From e82287c56d61f7154931da460381689c80ce35f8 Mon Sep 17 00:00:00 2001 From: Jeff Glass Date: Mon, 5 Feb 2024 20:49:29 -0600 Subject: [PATCH 01/31] Adjust structure of optimization_stats->opcode Make optimization_stats.opcode an array of pointers to UOpStats objects, instead of a member array. Add a *next_stats[512] field to the UOpStats struct Add _init_pystats function to initialize these new member structs Call _init_pystats inside _PyConfig_Write --- Include/cpython/pystats.h | 3 ++- Include/internal/pycore_code.h | 5 ++++- Python/initconfig.c | 1 + Python/specialize.c | 20 ++++++++++++++++---- 4 files changed, 23 insertions(+), 6 deletions(-) diff --git a/Include/cpython/pystats.h b/Include/cpython/pystats.h index bf0cfe4cb695b4..c28e37af51d291 100644 --- a/Include/cpython/pystats.h +++ b/Include/cpython/pystats.h @@ -99,6 +99,7 @@ typedef struct _gc_stats { typedef struct _uop_stats { uint64_t execution_count; uint64_t miss; + struct _uop_stats *next_stats[512]; } UOpStats; #define _Py_UOP_HIST_SIZE 32 @@ -115,7 +116,7 @@ typedef struct _optimization_stats { uint64_t inner_loop; uint64_t recursive_call; uint64_t low_confidence; - UOpStats opcode[512]; + UOpStats * opcode[512]; uint64_t unsupported_opcode[256]; uint64_t trace_length_hist[_Py_UOP_HIST_SIZE]; uint64_t trace_run_length_hist[_Py_UOP_HIST_SIZE]; diff --git a/Include/internal/pycore_code.h b/Include/internal/pycore_code.h index fdd5918228455d..82c1ca5474913d 100644 --- a/Include/internal/pycore_code.h +++ b/Include/internal/pycore_code.h @@ -285,7 +285,7 @@ extern int _PyStaticCode_Init(PyCodeObject *co); do { if (_Py_stats && PyFunction_Check(callable)) _Py_stats->call_stats.eval_calls[name]++; } while (0) #define GC_STAT_ADD(gen, name, n) do { if (_Py_stats) _Py_stats->gc_stats[(gen)].name += (n); } while (0) #define OPT_STAT_INC(name) do { if (_Py_stats) _Py_stats->optimization_stats.name++; } while (0) -#define UOP_STAT_INC(opname, name) do { if (_Py_stats) { assert(opname < 512); _Py_stats->optimization_stats.opcode[opname].name++; } } while (0) +#define UOP_STAT_INC(opname, name) do { if (_Py_stats) { assert(opname < 512); _Py_stats->optimization_stats.opcode[opname]->name++; } } while (0) #define OPT_UNSUPPORTED_OPCODE(opname) do { if (_Py_stats) _Py_stats->optimization_stats.unsupported_opcode[opname]++; } while (0) #define OPT_HIST(length, name) \ do { \ @@ -300,6 +300,9 @@ extern int _PyStaticCode_Init(PyCodeObject *co); // Export for '_opcode' shared extension PyAPI_FUNC(PyObject*) _Py_GetSpecializationStats(void); +// Method to initialize sub-structs of Py_Stats +void _init_pystats(PyStats *stats); + #else #define STAT_INC(opname, name) ((void)0) #define STAT_DEC(opname, name) ((void)0) diff --git a/Python/initconfig.c b/Python/initconfig.c index a6d8c176156617..44702b07ad39f6 100644 --- a/Python/initconfig.c +++ b/Python/initconfig.c @@ -2349,6 +2349,7 @@ _PyConfig_Write(const PyConfig *config, _PyRuntimeState *runtime) #ifdef Py_STATS if (config->_pystats) { _Py_StatsOn(); + _init_pystats(_Py_stats); } #endif diff --git a/Python/specialize.c b/Python/specialize.c index e38e3556a6d642..d36e086a848d51 100644 --- a/Python/specialize.c +++ b/Python/specialize.c @@ -23,6 +23,7 @@ */ #ifdef Py_STATS + GCStats _py_gc_stats[NUM_GENERATIONS] = { 0 }; static PyStats _Py_stats_struct = { .gc_stats = _py_gc_stats }; PyStats *_Py_stats = NULL; @@ -42,6 +43,17 @@ PyStats *_Py_stats = NULL; Py_DECREF(val); \ } while(0); +void +_init_pystats(PyStats *stats){ + //Make UOpstats structs for all initial opcodes, with null pointers + for (int i = 0; i < 512; i++){ + stats->optimization_stats.opcode[i] = PyMem_RawCalloc(1, sizeof(UOpStats)); + for (int j = 0; j < 512; j++){ + stats->optimization_stats.opcode[i]->next_stats[j] = NULL; + } + } +} + static PyObject* stats_to_dict(SpecializationStats *stats) { @@ -247,11 +259,11 @@ print_optimization_stats(FILE *out, OptimizationStats *stats) } else { names = _PyOpcode_uop_name; } - if (stats->opcode[i].execution_count) { - fprintf(out, "uops[%s].execution_count : %" PRIu64 "\n", names[i], stats->opcode[i].execution_count); + if (stats->opcode[i]->execution_count) { + fprintf(out, "uops[%s].execution_count : %" PRIu64 "\n", names[i], stats->opcode[i]->execution_count); } - if (stats->opcode[i].miss) { - fprintf(out, "uops[%s].specialization.miss : %" PRIu64 "\n", names[i], stats->opcode[i].miss); + if (stats->opcode[i]->miss) { + fprintf(out, "uops[%s].specialization.miss : %" PRIu64 "\n", names[i], stats->opcode[i]->miss); } } From 7f041f0c907f053893a4a0d33b0b020168a20d09 Mon Sep 17 00:00:00 2001 From: Jeff Glass Date: Mon, 5 Feb 2024 21:00:09 -0600 Subject: [PATCH 02/31] Output data from uop chains Add print_uop_sequence function to specialize.c to output stored chains of uops --- Python/specialize.c | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/Python/specialize.c b/Python/specialize.c index d36e086a848d51..5f63f177d00b68 100644 --- a/Python/specialize.c +++ b/Python/specialize.c @@ -233,6 +233,27 @@ print_histogram(FILE *out, const char *name, uint64_t hist[_Py_UOP_HIST_SIZE]) } } +// Print all recorded sequences of UOps, starting with the +// UOp with the given index +static void +print_uop_sequence(FILE *out, UOpStats *uop_stats, const char* prefix){ + for (int i = 1; i < 512; i++){ + if (uop_stats->next_stats[i]){ + if (uop_stats->next_stats[i]->execution_count){ + fprintf(out, "%s->%d : %d\n", prefix, i, uop_stats->next_stats[i]->execution_count); + char pre[strlen(prefix) + 256]; // TODO why is this constant so large? + pre[0] = '\0'; + strcat(pre, prefix); + strcat(pre, "->"); + char digits[3]; + sprintf(digits, "%d", i); + strcat(pre, digits); + print_uop_sequence(out, uop_stats->next_stats[i], pre); + } + } + } +} + static void print_optimization_stats(FILE *out, OptimizationStats *stats) { @@ -277,6 +298,11 @@ print_optimization_stats(FILE *out, OptimizationStats *stats) ); } } + + for (int i = 0; i < 512; i++){ + print_uop_sequence(out, stats->opcode[i], ""); + } + } static void From 0262ce86e2a27e1901a5379a06dcdeaebdc9e5c6 Mon Sep 17 00:00:00 2001 From: Jeff Glass Date: Mon, 5 Feb 2024 22:11:05 -0600 Subject: [PATCH 03/31] Outputing sequences of UOps as Numbers! --- Include/cpython/pystats.h | 4 +++- Include/internal/pycore_code.h | 19 +++++++++++++++++++ Python/ceval.c | 1 + Python/specialize.c | 15 +++++++++++---- 4 files changed, 34 insertions(+), 5 deletions(-) diff --git a/Include/cpython/pystats.h b/Include/cpython/pystats.h index c28e37af51d291..3f971dab1cb32e 100644 --- a/Include/cpython/pystats.h +++ b/Include/cpython/pystats.h @@ -116,7 +116,9 @@ typedef struct _optimization_stats { uint64_t inner_loop; uint64_t recursive_call; uint64_t low_confidence; - UOpStats * opcode[512]; + uint64_t max_uop_chain_depth; + uint64_t *last_opcodes; + UOpStats *opcode[512]; uint64_t unsupported_opcode[256]; uint64_t trace_length_hist[_Py_UOP_HIST_SIZE]; uint64_t trace_run_length_hist[_Py_UOP_HIST_SIZE]; diff --git a/Include/internal/pycore_code.h b/Include/internal/pycore_code.h index 82c1ca5474913d..711b88567eb3ce 100644 --- a/Include/internal/pycore_code.h +++ b/Include/internal/pycore_code.h @@ -295,6 +295,24 @@ extern int _PyStaticCode_Init(PyCodeObject *co); _Py_stats->optimization_stats.name[bucket]++; \ } \ } while (0) +#define UOP_CHAIN_UPDATE(op) \ + for (uint64_t i = _Py_stats->optimization_stats.max_uop_chain_depth - 1; i > 0; i--){ \ + _Py_stats->optimization_stats.last_opcodes[i] = _Py_stats->optimization_stats.last_opcodes[i-1]; \ + } \ + _Py_stats->optimization_stats.last_opcodes[0] = op; \ + do { \ + UOpStats *head = _Py_stats->optimization_stats.opcode[op]; \ + for (uint64_t i = 0; i < _Py_stats->optimization_stats.max_uop_chain_depth - 1; i++){ \ + if (!_Py_stats->optimization_stats.last_opcodes[i+1]){ break; } \ + if (head->next_stats[_Py_stats->optimization_stats.last_opcodes[i+1]]){ \ + head->next_stats[_Py_stats->optimization_stats.last_opcodes[i+1]]->execution_count++; }\ + else { \ + head->next_stats[_Py_stats->optimization_stats.last_opcodes[i+1]] = calloc(1, sizeof(UOpStats)); \ + head->next_stats[_Py_stats->optimization_stats.last_opcodes[i+1]]->execution_count = 1; \ + } \ + head = head->next_stats[_Py_stats->optimization_stats.last_opcodes[i+1]]; \ + } \ + } while (0) #define RARE_EVENT_STAT_INC(name) do { if (_Py_stats) _Py_stats->rare_event_stats.name++; } while (0) // Export for '_opcode' shared extension @@ -317,6 +335,7 @@ void _init_pystats(PyStats *stats); #define UOP_STAT_INC(opname, name) ((void)0) #define OPT_UNSUPPORTED_OPCODE(opname) ((void)0) #define OPT_HIST(length, name) ((void)0) +#define UOP_CHAIN_UPDATE(op) (void(0)) #define RARE_EVENT_STAT_INC(name) ((void)0) #endif // !Py_STATS diff --git a/Python/ceval.c b/Python/ceval.c index 4f208009086191..f7b496f830c260 100644 --- a/Python/ceval.c +++ b/Python/ceval.c @@ -1026,6 +1026,7 @@ _PyEval_EvalFrameDefault(PyThreadState *tstate, _PyInterpreterFrame *frame, int next_uop++; OPT_STAT_INC(uops_executed); UOP_STAT_INC(uopcode, execution_count); + UOP_CHAIN_UPDATE(uopcode); #ifdef Py_STATS trace_uop_execution_counter++; #endif diff --git a/Python/specialize.c b/Python/specialize.c index 5f63f177d00b68..4b4c2e0fe87172 100644 --- a/Python/specialize.c +++ b/Python/specialize.c @@ -52,6 +52,11 @@ _init_pystats(PyStats *stats){ stats->optimization_stats.opcode[i]->next_stats[j] = NULL; } } + + // TODO get this from an environment variable? + int MAX_DEPTH = 4; + stats->optimization_stats.max_uop_chain_depth = MAX_DEPTH; + stats->optimization_stats.last_opcodes = PyMem_RawCalloc(MAX_DEPTH, sizeof(uint64_t)); } static PyObject* @@ -240,13 +245,13 @@ print_uop_sequence(FILE *out, UOpStats *uop_stats, const char* prefix){ for (int i = 1; i < 512; i++){ if (uop_stats->next_stats[i]){ if (uop_stats->next_stats[i]->execution_count){ - fprintf(out, "%s->%d : %d\n", prefix, i, uop_stats->next_stats[i]->execution_count); + fprintf(out, "%s->%d : %ld\n", prefix, i, uop_stats->next_stats[i]->execution_count); char pre[strlen(prefix) + 256]; // TODO why is this constant so large? pre[0] = '\0'; strcat(pre, prefix); strcat(pre, "->"); - char digits[3]; - sprintf(digits, "%d", i); + char digits[6]; + sprintf(digits, "%d", i); // TODO fix buffer overflow?? strcat(pre, digits); print_uop_sequence(out, uop_stats->next_stats[i], pre); } @@ -300,7 +305,9 @@ print_optimization_stats(FILE *out, OptimizationStats *stats) } for (int i = 0; i < 512; i++){ - print_uop_sequence(out, stats->opcode[i], ""); + char uop_chars[6]; + sprintf(uop_chars, "%d", i); + print_uop_sequence(out, stats->opcode[i], uop_chars); } } From cdb740258769b3e3d7fbb62fc0df138c031f1310 Mon Sep 17 00:00:00 2001 From: Jeff Glass Date: Mon, 5 Feb 2024 22:17:58 -0600 Subject: [PATCH 04/31] Output sequence chains by name --- Python/specialize.c | 21 ++++++++++++++------- 1 file changed, 14 insertions(+), 7 deletions(-) diff --git a/Python/specialize.c b/Python/specialize.c index 4b4c2e0fe87172..121daa182afd7f 100644 --- a/Python/specialize.c +++ b/Python/specialize.c @@ -245,14 +245,18 @@ print_uop_sequence(FILE *out, UOpStats *uop_stats, const char* prefix){ for (int i = 1; i < 512; i++){ if (uop_stats->next_stats[i]){ if (uop_stats->next_stats[i]->execution_count){ - fprintf(out, "%s->%d : %ld\n", prefix, i, uop_stats->next_stats[i]->execution_count); + const char* const* names; + if (i < 256) { + names = _PyOpcode_OpName; + } else { + names = _PyOpcode_uop_name; + } + fprintf(out, "UOp Sequence Count:%s->%s: %ld\n", prefix, names[i], uop_stats->next_stats[i]->execution_count); char pre[strlen(prefix) + 256]; // TODO why is this constant so large? pre[0] = '\0'; strcat(pre, prefix); strcat(pre, "->"); - char digits[6]; - sprintf(digits, "%d", i); // TODO fix buffer overflow?? - strcat(pre, digits); + strcat(pre, names[i]); print_uop_sequence(out, uop_stats->next_stats[i], pre); } } @@ -305,9 +309,12 @@ print_optimization_stats(FILE *out, OptimizationStats *stats) } for (int i = 0; i < 512; i++){ - char uop_chars[6]; - sprintf(uop_chars, "%d", i); - print_uop_sequence(out, stats->opcode[i], uop_chars); + if (i < 256) { + names = _PyOpcode_OpName; + } else { + names = _PyOpcode_uop_name; + } + print_uop_sequence(out, stats->opcode[i], names[i]); } } From eb1daddd3dcd7d0ea852e068caec4f561d8c180f Mon Sep 17 00:00:00 2001 From: Jeff Glass Date: Tue, 6 Feb 2024 17:29:23 -0600 Subject: [PATCH 05/31] Add missing check for _Py_stats --- Include/internal/pycore_code.h | 36 ++++++++++++++++++---------------- 1 file changed, 19 insertions(+), 17 deletions(-) diff --git a/Include/internal/pycore_code.h b/Include/internal/pycore_code.h index 711b88567eb3ce..0cf093dacd9600 100644 --- a/Include/internal/pycore_code.h +++ b/Include/internal/pycore_code.h @@ -296,23 +296,25 @@ extern int _PyStaticCode_Init(PyCodeObject *co); } \ } while (0) #define UOP_CHAIN_UPDATE(op) \ - for (uint64_t i = _Py_stats->optimization_stats.max_uop_chain_depth - 1; i > 0; i--){ \ - _Py_stats->optimization_stats.last_opcodes[i] = _Py_stats->optimization_stats.last_opcodes[i-1]; \ - } \ - _Py_stats->optimization_stats.last_opcodes[0] = op; \ - do { \ - UOpStats *head = _Py_stats->optimization_stats.opcode[op]; \ - for (uint64_t i = 0; i < _Py_stats->optimization_stats.max_uop_chain_depth - 1; i++){ \ - if (!_Py_stats->optimization_stats.last_opcodes[i+1]){ break; } \ - if (head->next_stats[_Py_stats->optimization_stats.last_opcodes[i+1]]){ \ - head->next_stats[_Py_stats->optimization_stats.last_opcodes[i+1]]->execution_count++; }\ - else { \ - head->next_stats[_Py_stats->optimization_stats.last_opcodes[i+1]] = calloc(1, sizeof(UOpStats)); \ - head->next_stats[_Py_stats->optimization_stats.last_opcodes[i+1]]->execution_count = 1; \ - } \ - head = head->next_stats[_Py_stats->optimization_stats.last_opcodes[i+1]]; \ + if (_Py_stats){ \ + for (uint64_t i = _Py_stats->optimization_stats.max_uop_chain_depth - 1; i > 0; i--){ \ + _Py_stats->optimization_stats.last_opcodes[i] = _Py_stats->optimization_stats.last_opcodes[i-1]; \ } \ - } while (0) + _Py_stats->optimization_stats.last_opcodes[0] = op; \ + do { \ + UOpStats *head = _Py_stats->optimization_stats.opcode[op]; \ + for (uint64_t i = 0; i < _Py_stats->optimization_stats.max_uop_chain_depth - 1; i++){ \ + if (!_Py_stats->optimization_stats.last_opcodes[i+1]){ break; } \ + if (head->next_stats[_Py_stats->optimization_stats.last_opcodes[i+1]]){ \ + head->next_stats[_Py_stats->optimization_stats.last_opcodes[i+1]]->execution_count++; } \ + else { \ + head->next_stats[_Py_stats->optimization_stats.last_opcodes[i+1]] = calloc(1, sizeof(UOpStats)); \ + head->next_stats[_Py_stats->optimization_stats.last_opcodes[i+1]]->execution_count = 1; \ + } \ + head = head->next_stats[_Py_stats->optimization_stats.last_opcodes[i+1]]; \ + } \ + } while (0); \ + } #define RARE_EVENT_STAT_INC(name) do { if (_Py_stats) _Py_stats->rare_event_stats.name++; } while (0) // Export for '_opcode' shared extension @@ -335,7 +337,7 @@ void _init_pystats(PyStats *stats); #define UOP_STAT_INC(opname, name) ((void)0) #define OPT_UNSUPPORTED_OPCODE(opname) ((void)0) #define OPT_HIST(length, name) ((void)0) -#define UOP_CHAIN_UPDATE(op) (void(0)) +#define UOP_CHAIN_UPDATE(op) printf("UOP CHAINER NOT RUNNING\n"); #define RARE_EVENT_STAT_INC(name) ((void)0) #endif // !Py_STATS From ba7248d07548ebd0dc52364d75ba1a6d7c4e265c Mon Sep 17 00:00:00 2001 From: Jeff Glass Date: Tue, 6 Feb 2024 17:54:09 -0600 Subject: [PATCH 06/31] Adjust formatting on sequence output --- Python/specialize.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Python/specialize.c b/Python/specialize.c index 121daa182afd7f..c548efcbb357f2 100644 --- a/Python/specialize.c +++ b/Python/specialize.c @@ -251,11 +251,11 @@ print_uop_sequence(FILE *out, UOpStats *uop_stats, const char* prefix){ } else { names = _PyOpcode_uop_name; } - fprintf(out, "UOp Sequence Count:%s->%s: %ld\n", prefix, names[i], uop_stats->next_stats[i]->execution_count); + fprintf(out, "UOp Sequence Count[%s,%s]: %ld\n", prefix, names[i], uop_stats->next_stats[i]->execution_count); char pre[strlen(prefix) + 256]; // TODO why is this constant so large? pre[0] = '\0'; strcat(pre, prefix); - strcat(pre, "->"); + strcat(pre, ","); strcat(pre, names[i]); print_uop_sequence(out, uop_stats->next_stats[i], pre); } From 6c2fa8a275b714b8491126c5078fb44a2feb6ad1 Mon Sep 17 00:00:00 2001 From: Jeff Glass Date: Tue, 6 Feb 2024 18:24:42 -0600 Subject: [PATCH 07/31] Add uop sequence pairs to info --- Tools/scripts/summarize_stats.py | 38 ++++++++++++++++++++++++++++++++ 1 file changed, 38 insertions(+) diff --git a/Tools/scripts/summarize_stats.py b/Tools/scripts/summarize_stats.py index 9b7e7b999ea7c7..d93af591a84905 100644 --- a/Tools/scripts/summarize_stats.py +++ b/Tools/scripts/summarize_stats.py @@ -311,6 +311,9 @@ def get_opcode_stats(self, prefix: str) -> OpcodeStats: self._data["_defines"], self._data["_specialized_instructions"], ) + + def get_uop_sequence_stats(self, length=2) -> dict[str, int]: + return {k: v for k, v in self._data.items() if k.startswith("UOp Sequence Count") and k.count(',') == length - 1} def get_call_stats(self) -> dict[str, int]: defines = self._data["_stats_defines"] @@ -665,6 +668,39 @@ def calc_pair_count_table(stats: Stats) -> Rows: comparative=False, ) +def uop_sequence_section(sequence_length: int) -> Section: + def calc_uop_sequence(stats: Stats) -> Rows: + uop_sequences = stats.get_uop_sequence_stats() + total = stats.get_optimization_stats()["Uops executed"][0] + + cumulative = 0 + rows: Rows = [] + for sequence, count in itertools.islice( + sorted(uop_sequences.items(), key=itemgetter(1), reverse=True), 100 + ): + cumulative += count + rows.append( + ( + sequence, + Count(count), + Ratio(count, total), + Ratio(cumulative, total), + ) + ) + return rows + + return Section( + "Pair counts", + "Pair counts for top 100 UOp pairs", + [ + Table( + ("Pair", "Count:", "Self:", "Cumulative:"), + calc_uop_sequence, + ) + ], + comparative=False, + ) + def pre_succ_pairs_section() -> Section: def iter_pre_succ_pairs_tables(base_stats: Stats, head_stats: Stats | None = None): @@ -1053,6 +1089,7 @@ def iter_optimization_tables(base_stats: Stats, head_stats: Stats | None = None) ) ], ) + yield uop_sequence_section(2) yield Section( "Unsupported opcodes", "", @@ -1181,6 +1218,7 @@ def output_stats(inputs: list[Path], json_output=str | None): match len(inputs): case 1: data = load_raw_data(Path(inputs[0])) + #print(data) if json_output is not None: with open(json_output, "w", encoding="utf-8") as f: save_raw_data(data, f) # type: ignore From 6365ba282998ceef7da9b99e529d535b6f4ada1d Mon Sep 17 00:00:00 2001 From: Jeff Glass Date: Tue, 6 Feb 2024 18:27:19 -0600 Subject: [PATCH 08/31] Fix multi-length sequence output --- Tools/scripts/summarize_stats.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/Tools/scripts/summarize_stats.py b/Tools/scripts/summarize_stats.py index d93af591a84905..55c899bebe94fb 100644 --- a/Tools/scripts/summarize_stats.py +++ b/Tools/scripts/summarize_stats.py @@ -312,7 +312,7 @@ def get_opcode_stats(self, prefix: str) -> OpcodeStats: self._data["_specialized_instructions"], ) - def get_uop_sequence_stats(self, length=2) -> dict[str, int]: + def get_uop_sequence_stats(self, length) -> dict[str, int]: return {k: v for k, v in self._data.items() if k.startswith("UOp Sequence Count") and k.count(',') == length - 1} def get_call_stats(self) -> dict[str, int]: @@ -668,9 +668,9 @@ def calc_pair_count_table(stats: Stats) -> Rows: comparative=False, ) -def uop_sequence_section(sequence_length: int) -> Section: +def uop_sequence_section(sequence_length: int = 2) -> Section: def calc_uop_sequence(stats: Stats) -> Rows: - uop_sequences = stats.get_uop_sequence_stats() + uop_sequences = stats.get_uop_sequence_stats(sequence_length) total = stats.get_optimization_stats()["Uops executed"][0] cumulative = 0 @@ -691,7 +691,7 @@ def calc_uop_sequence(stats: Stats) -> Rows: return Section( "Pair counts", - "Pair counts for top 100 UOp pairs", + f"Pair counts for top 100 UOp Sequences of Length {sequence_length}", [ Table( ("Pair", "Count:", "Self:", "Cumulative:"), @@ -1089,7 +1089,7 @@ def iter_optimization_tables(base_stats: Stats, head_stats: Stats | None = None) ) ], ) - yield uop_sequence_section(2) + yield uop_sequence_section() yield Section( "Unsupported opcodes", "", From 27cc9fc7bec41d4b71b03c7f35e5d78288ad18b0 Mon Sep 17 00:00:00 2001 From: Jeff Glass Date: Tue, 6 Feb 2024 18:42:49 -0600 Subject: [PATCH 09/31] Output top 100 sequences of all present lengths --- Tools/scripts/summarize_stats.py | 47 ++++++++++++++++++++------------ 1 file changed, 29 insertions(+), 18 deletions(-) diff --git a/Tools/scripts/summarize_stats.py b/Tools/scripts/summarize_stats.py index 55c899bebe94fb..7ed8af95edd15a 100644 --- a/Tools/scripts/summarize_stats.py +++ b/Tools/scripts/summarize_stats.py @@ -21,7 +21,7 @@ from pathlib import Path import re import sys -from typing import Any, Callable, TextIO, TypeAlias +from typing import Any, Callable, Generator, TextIO, TypeAlias RawData: TypeAlias = dict[str, Any] @@ -314,6 +314,10 @@ def get_opcode_stats(self, prefix: str) -> OpcodeStats: def get_uop_sequence_stats(self, length) -> dict[str, int]: return {k: v for k, v in self._data.items() if k.startswith("UOp Sequence Count") and k.count(',') == length - 1} + + def get_max_uop_sequence_length(self) -> int: + uop_sequence_lengths = [k.count(',')+1 for k in self._data.keys() if k.startswith("UOp Sequence Count")] + return max(uop_sequence_lengths) if uop_sequence_lengths else 0 def get_call_stats(self) -> dict[str, int]: defines = self._data["_stats_defines"] @@ -668,10 +672,12 @@ def calc_pair_count_table(stats: Stats) -> Rows: comparative=False, ) -def uop_sequence_section(sequence_length: int = 2) -> Section: - def calc_uop_sequence(stats: Stats) -> Rows: - uop_sequences = stats.get_uop_sequence_stats(sequence_length) - total = stats.get_optimization_stats()["Uops executed"][0] +def uop_sequence_section(base_stats: Stats, head_stats: Stats) -> Generator[Section]: + if not (longest := base_stats.get_max_uop_sequence_length()): return + + def calc_uop_sequence(base_stats: Stats ,sequence_length: int) -> Rows: + uop_sequences = base_stats.get_uop_sequence_stats(sequence_length) + total = base_stats.get_optimization_stats()["Uops executed"][0] cumulative = 0 rows: Rows = [] @@ -688,18 +694,19 @@ def calc_uop_sequence(stats: Stats) -> Rows: ) ) return rows - - return Section( - "Pair counts", - f"Pair counts for top 100 UOp Sequences of Length {sequence_length}", - [ - Table( - ("Pair", "Count:", "Self:", "Cumulative:"), - calc_uop_sequence, - ) - ], - comparative=False, - ) + + for l in range(2, longest+1): + yield Section( + "Pair counts", + f"Counts for top 100 UOp Sequences of Length {l}", + [ + Table( + ("Pair", "Count:", "Self:", "Cumulative:"), + functools.partial(calc_uop_sequence, sequence_length=l), + ) + ], + comparative=False, + ) def pre_succ_pairs_section() -> Section: @@ -1089,7 +1096,11 @@ def iter_optimization_tables(base_stats: Stats, head_stats: Stats | None = None) ) ], ) - yield uop_sequence_section() + yield Section( + "UOp Sequences", + "", + uop_sequence_section, + ) yield Section( "Unsupported opcodes", "", From 45a57b78d3d09bbcf78ecebf90971ffbdc1c5b93 Mon Sep 17 00:00:00 2001 From: Jeff Glass Date: Tue, 6 Feb 2024 19:15:36 -0600 Subject: [PATCH 10/31] Move initialization to check in '_stats_on' --- Python/initconfig.c | 3 +-- Python/specialize.c | 3 ++- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Python/initconfig.c b/Python/initconfig.c index 44702b07ad39f6..2ee0bb36e61034 100644 --- a/Python/initconfig.c +++ b/Python/initconfig.c @@ -2348,8 +2348,7 @@ _PyConfig_Write(const PyConfig *config, _PyRuntimeState *runtime) #ifdef Py_STATS if (config->_pystats) { - _Py_StatsOn(); - _init_pystats(_Py_stats); + _Py_StatsOn(); } #endif diff --git a/Python/specialize.c b/Python/specialize.c index c548efcbb357f2..ec6a7a3983c9fa 100644 --- a/Python/specialize.c +++ b/Python/specialize.c @@ -54,7 +54,7 @@ _init_pystats(PyStats *stats){ } // TODO get this from an environment variable? - int MAX_DEPTH = 4; + int MAX_DEPTH = 2; stats->optimization_stats.max_uop_chain_depth = MAX_DEPTH; stats->optimization_stats.last_opcodes = PyMem_RawCalloc(MAX_DEPTH, sizeof(uint64_t)); } @@ -344,6 +344,7 @@ void _Py_StatsOn(void) { _Py_stats = &_Py_stats_struct; + if (!_Py_stats->optimization_stats.opcode[0]) _init_pystats(_Py_stats); } void From f7508e9da9d07e76975c1f7a862d2e21b0a955f8 Mon Sep 17 00:00:00 2001 From: Jeff Glass Date: Thu, 8 Feb 2024 10:49:53 -0600 Subject: [PATCH 11/31] Add PYTHONSTATS_UOPDEPTH environment variable Sets the maximum depth of UOP sequences to track. Defaults to 2. --- Include/cpython/initconfig.h | 2 ++ Include/internal/pycore_code.h | 2 +- Include/internal/pycore_pystats.h | 1 + Lib/test/test_embed.py | 4 +++ Python/initconfig.c | 18 +++++++++++- Python/specialize.c | 46 +++++++++++++++++++------------ Tools/scripts/summarize_stats.py | 4 +-- 7 files changed, 56 insertions(+), 21 deletions(-) diff --git a/Include/cpython/initconfig.h b/Include/cpython/initconfig.h index 87c059c521cbc9..342d97fc7cbad3 100644 --- a/Include/cpython/initconfig.h +++ b/Include/cpython/initconfig.h @@ -224,6 +224,8 @@ typedef struct PyConfig { #ifdef Py_STATS // If non-zero, turns on statistics gathering. int _pystats; + // The maximum depth of uop-sequence to record. + int _pystats_depth; #endif #ifdef Py_DEBUG diff --git a/Include/internal/pycore_code.h b/Include/internal/pycore_code.h index 0cf093dacd9600..c5137ae7f2dd54 100644 --- a/Include/internal/pycore_code.h +++ b/Include/internal/pycore_code.h @@ -321,7 +321,7 @@ extern int _PyStaticCode_Init(PyCodeObject *co); PyAPI_FUNC(PyObject*) _Py_GetSpecializationStats(void); // Method to initialize sub-structs of Py_Stats -void _init_pystats(PyStats *stats); +void _init_pystats(void); #else #define STAT_INC(opname, name) ((void)0) diff --git a/Include/internal/pycore_pystats.h b/Include/internal/pycore_pystats.h index f8af398a560586..f17fa65bab0966 100644 --- a/Include/internal/pycore_pystats.h +++ b/Include/internal/pycore_pystats.h @@ -12,6 +12,7 @@ extern "C" { extern void _Py_StatsOn(void); extern void _Py_StatsOff(void); extern void _Py_StatsClear(void); +extern void _Py_Stats_Set_Depth(int depth); extern int _Py_PrintSpecializationStats(int to_file); #endif diff --git a/Lib/test/test_embed.py b/Lib/test/test_embed.py index 6c60854bbd76cc..222e92c14bc8da 100644 --- a/Lib/test/test_embed.py +++ b/Lib/test/test_embed.py @@ -521,6 +521,7 @@ class InitConfigTests(EmbeddingTestsMixin, unittest.TestCase): } if Py_STATS: CONFIG_COMPAT['_pystats'] = 0 + CONFIG_COMPAT['_pystats_depth'] = 2 if support.Py_DEBUG: CONFIG_COMPAT['run_presite'] = None if MS_WINDOWS: @@ -909,6 +910,7 @@ def test_init_from_config(self): } if Py_STATS: config['_pystats'] = 1 + config['_pystats_depth'] = 2 self.check_all_configs("test_init_from_config", config, preconfig, api=API_COMPAT) @@ -943,6 +945,7 @@ def test_init_compat_env(self): } if Py_STATS: config['_pystats'] = 1 + config['_pystats_depth'] = 2 self.check_all_configs("test_init_compat_env", config, preconfig, api=API_COMPAT) @@ -978,6 +981,7 @@ def test_init_python_env(self): } if Py_STATS: config['_pystats'] = 1 + config['_pystats_depth'] = 2 self.check_all_configs("test_init_python_env", config, preconfig, api=API_PYTHON) diff --git a/Python/initconfig.c b/Python/initconfig.c index 2ee0bb36e61034..29ae05aa8310e0 100644 --- a/Python/initconfig.c +++ b/Python/initconfig.c @@ -9,7 +9,7 @@ #include "pycore_pylifecycle.h" // _Py_PreInitializeFromConfig() #include "pycore_pymem.h" // _PyMem_SetDefaultAllocator() #include "pycore_pystate.h" // _PyThreadState_GET() -#include "pycore_pystats.h" // _Py_StatsOn() +#include "pycore_pystats.h" // _Py_StatsOn(), _Py_Stats_Set_Depth #include "osdefs.h" // DELIM @@ -117,6 +117,7 @@ static const PyConfigSpec PYCONFIG_SPEC[] = { SPEC(_is_python_build, UINT), #ifdef Py_STATS SPEC(_pystats, UINT), + SPEC(_pystats_depth, UINT), #endif #ifdef Py_DEBUG SPEC(run_presite, WSTR_OPT), @@ -310,6 +311,7 @@ static const char usage_envvars[] = "PYTHONWARNINGS=arg : warning control (-W arg)\n" #ifdef Py_STATS "PYTHONSTATS : turns on statistics gathering\n" +"PYTHONSTATS_UOPDEPTH : sets the maximum length of UOp sequences to track\n" #endif #ifdef Py_DEBUG "PYTHON_PRESITE=pkg.mod : import this module before site.py is run\n" @@ -2209,6 +2211,16 @@ config_read(PyConfig *config, int compute_path_config) if (config->_pystats < 0) { config->_pystats = 0; } + + if (config_get_env(config, "PYTHONSTATS_UOPDEPTH")) { + config->_pystats_depth = atoi(config_get_env(config, "PYTHONSTATS_UOPDEPTH")); + if (config->_pystats_depth < 2){ printf("Setting default stats depth to 2"); config->_pystats_depth = 2; } + } + else { + config->_pystats_depth = 2; + } + + #endif status = config_read_complex_options(config); @@ -2348,7 +2360,11 @@ _PyConfig_Write(const PyConfig *config, _PyRuntimeState *runtime) #ifdef Py_STATS if (config->_pystats) { + if (config->_pystats_depth){ + _Py_Stats_Set_Depth(config->_pystats_depth); + } _Py_StatsOn(); + } #endif diff --git a/Python/specialize.c b/Python/specialize.c index ec6a7a3983c9fa..233a4d172d9434 100644 --- a/Python/specialize.c +++ b/Python/specialize.c @@ -10,6 +10,7 @@ #include "pycore_moduleobject.h" #include "pycore_object.h" #include "pycore_opcode_metadata.h" // _PyOpcode_Caches +#include "pycore_pystats.h" // _Py_Stats_Set_Depth #include "pycore_uop_metadata.h" // _PyOpcode_uop_name #include "pycore_opcode_utils.h" // RESUME_AT_FUNC_START #include "pycore_pylifecycle.h" // _PyOS_URandomNonblock() @@ -23,7 +24,6 @@ */ #ifdef Py_STATS - GCStats _py_gc_stats[NUM_GENERATIONS] = { 0 }; static PyStats _Py_stats_struct = { .gc_stats = _py_gc_stats }; PyStats *_Py_stats = NULL; @@ -44,19 +44,26 @@ PyStats *_Py_stats = NULL; } while(0); void -_init_pystats(PyStats *stats){ - //Make UOpstats structs for all initial opcodes, with null pointers +_init_pystats(){ + //Make UOpstats structs for all initial opcodes, with null pointers to deeper opcodes for (int i = 0; i < 512; i++){ - stats->optimization_stats.opcode[i] = PyMem_RawCalloc(1, sizeof(UOpStats)); + _Py_stats_struct.optimization_stats.opcode[i] = PyMem_RawCalloc(1, sizeof(UOpStats)); for (int j = 0; j < 512; j++){ - stats->optimization_stats.opcode[i]->next_stats[j] = NULL; + _Py_stats_struct.optimization_stats.opcode[i]->next_stats[j] = NULL; } } - // TODO get this from an environment variable? - int MAX_DEPTH = 2; - stats->optimization_stats.max_uop_chain_depth = MAX_DEPTH; - stats->optimization_stats.last_opcodes = PyMem_RawCalloc(MAX_DEPTH, sizeof(uint64_t)); + int DEPTH = _Py_stats_struct.optimization_stats.max_uop_chain_depth ? _Py_stats_struct.optimization_stats.max_uop_chain_depth : 2; + + if (_Py_stats_struct.optimization_stats.last_opcodes){ + uint64_t *tmp; + tmp = PyMem_RawRealloc(DEPTH, sizeof(uint64_t)); + if (tmp != NULL) _Py_stats_struct.optimization_stats.last_opcodes = tmp; + else return PyErr_NoMemory(); + } + else { + _Py_stats_struct.optimization_stats.last_opcodes = PyMem_RawCalloc(DEPTH, sizeof(uint64_t)); + } } static PyObject* @@ -251,13 +258,10 @@ print_uop_sequence(FILE *out, UOpStats *uop_stats, const char* prefix){ } else { names = _PyOpcode_uop_name; } - fprintf(out, "UOp Sequence Count[%s,%s]: %ld\n", prefix, names[i], uop_stats->next_stats[i]->execution_count); - char pre[strlen(prefix) + 256]; // TODO why is this constant so large? - pre[0] = '\0'; - strcat(pre, prefix); - strcat(pre, ","); - strcat(pre, names[i]); - print_uop_sequence(out, uop_stats->next_stats[i], pre); + fprintf(out, "UOp sequence count[%s,%s]: %ld\n", prefix, names[i], uop_stats->next_stats[i]->execution_count); + char new_prefix[strlen(prefix) + 64]; + sprintf(new_prefix, "%s,%s", prefix, names[i]); + print_uop_sequence(out, uop_stats->next_stats[i], new_prefix); } } } @@ -344,7 +348,15 @@ void _Py_StatsOn(void) { _Py_stats = &_Py_stats_struct; - if (!_Py_stats->optimization_stats.opcode[0]) _init_pystats(_Py_stats); + if (!_Py_stats->optimization_stats.opcode[0]) _init_pystats(); +} + +void +_Py_Stats_Set_Depth(int depth){ + _Py_stats_struct.optimization_stats.max_uop_chain_depth = depth; + if (_Py_stats){ + _Py_stats->optimization_stats.last_opcodes = PyMem_RawRealloc(_Py_stats->optimization_stats.last_opcodes, depth * sizeof(uint64_t)); + } } void diff --git a/Tools/scripts/summarize_stats.py b/Tools/scripts/summarize_stats.py index 7ed8af95edd15a..6015251d678e7b 100644 --- a/Tools/scripts/summarize_stats.py +++ b/Tools/scripts/summarize_stats.py @@ -313,10 +313,10 @@ def get_opcode_stats(self, prefix: str) -> OpcodeStats: ) def get_uop_sequence_stats(self, length) -> dict[str, int]: - return {k: v for k, v in self._data.items() if k.startswith("UOp Sequence Count") and k.count(',') == length - 1} + return {k: v for k, v in self._data.items() if k.startswith("UOp sequence count") and k.count(',') == length - 1} def get_max_uop_sequence_length(self) -> int: - uop_sequence_lengths = [k.count(',')+1 for k in self._data.keys() if k.startswith("UOp Sequence Count")] + uop_sequence_lengths = [k.count(',')+1 for k in self._data.keys() if k.startswith("UOp sequence count")] return max(uop_sequence_lengths) if uop_sequence_lengths else 0 def get_call_stats(self) -> dict[str, int]: From 9feb04c536973f3251f660991d5bb8b0570f4224 Mon Sep 17 00:00:00 2001 From: Jeff Glass Date: Thu, 8 Feb 2024 11:33:59 -0600 Subject: [PATCH 12/31] Catch up with main --- .github/workflows/build.yml | 6 +- .github/workflows/reusable-ubuntu.yml | 2 +- Doc/c-api/dict.rst | 20 + Doc/c-api/intro.rst | 11 +- Doc/faq/library.rst | 52 +- Doc/library/enum.rst | 4 +- Doc/library/sqlite3.rst | 11 +- Doc/library/test.rst | 6 +- Doc/whatsnew/3.13.rst | 46 +- Include/cpython/dictobject.h | 10 + Include/internal/pycore_critical_section.h | 46 + Include/internal/pycore_gc.h | 42 +- .../pycore_global_objects_fini_generated.h | 1 + Include/internal/pycore_global_strings.h | 1 + Include/internal/pycore_object.h | 17 +- Include/internal/pycore_pythread.h | 15 +- Include/internal/pycore_runtime_init.h | 10 +- .../internal/pycore_runtime_init_generated.h | 1 + Include/internal/pycore_symtable.h | 24 +- .../internal/pycore_unicodeobject_generated.h | 3 + Lib/mailbox.py | 5 +- Lib/pathlib/_abc.py | 2 +- Lib/sqlite3/dump.py | 19 +- Lib/test/test_capi/test_dict.py | 22 + Lib/test/test_gc.py | 22 +- Lib/test/test_listcomps.py | 12 + Lib/test/test_mailbox.py | 12 +- Lib/test/test_pathlib/test_pathlib_abc.py | 34 +- Lib/test/test_sqlite3/test_dump.py | 70 ++ Lib/test/test_sysconfig.py | 37 +- Lib/test/test_zoneinfo/test_zoneinfo.py | 17 +- Lib/threading.py | 5 +- Lib/zoneinfo/_tzpath.py | 24 +- Mac/BuildScript/build-installer.py | 12 +- Makefile.pre.in | 500 ++++----- ...-11-15-13-47-48.gh-issue-112066.22WsqR.rst | 5 + ...-01-07-04-22-51.gh-issue-108362.oB9Gcf.rst | 13 - ...-02-07-07-50-12.gh-issue-114828.nSXwMi.rst | 2 + ...-06-29-14-26-56.gh-issue-106233.Aqw2HI.rst | 2 + ...3-07-23-12-28-26.gh-issue-75705.aB2-Ww.rst | 1 + ...4-01-24-20-51-49.gh-issue-91602.8fOH8l.rst | 3 + ...-02-05-19-00-32.gh-issue-109991.yJSEkw.rst | 2 + ...-02-06-09-05-13.gh-issue-115009.ShMjZs.rst | 1 + ...-02-05-18-30-27.gh-issue-109991.tun6Yu.rst | 1 + ...-02-06-09-01-10.gh-issue-115009.ysau7e.rst | 1 + Misc/sbom.spdx.json | 60 +- Modules/_json.c | 5 +- Modules/_sqlite/clinic/connection.c.h | 60 +- Modules/_sqlite/connection.c | 20 +- Modules/_sre/sre.c | 27 +- Modules/_testcapi/dict.c | 26 + Modules/_threadmodule.c | 53 +- Modules/gcmodule.c | 64 +- Modules/posixmodule.c | 2 +- Modules/pyexpat.c | 3 +- Objects/clinic/dictobject.c.h | 30 +- Objects/dictobject.c | 963 +++++++++++++----- Objects/listobject.c | 52 +- Objects/object.c | 15 - Objects/odictobject.c | 19 +- Objects/setobject.c | 78 +- Objects/structseq.c | 5 +- Objects/typeobject.c | 6 +- Objects/unicodeobject.c | 12 +- PCbuild/get_externals.bat | 2 +- PCbuild/python.props | 2 +- PCbuild/readme.txt | 2 +- Python/compile.c | 29 +- Python/gc.c | 824 ++++++--------- Python/gc_free_threading.c | 27 +- Python/import.c | 2 +- Python/pystate.c | 2 + Python/symtable.c | 19 + Python/thread_nt.h | 4 - Python/thread_pthread.h | 10 - Tools/build/generate_sbom.py | 12 +- Tools/gdb/libpython.py | 7 +- Tools/scripts/summarize_stats.py | 6 +- Tools/ssl/multissltests.py | 5 +- 79 files changed, 2149 insertions(+), 1456 deletions(-) create mode 100644 Misc/NEWS.d/next/C API/2023-11-15-13-47-48.gh-issue-112066.22WsqR.rst delete mode 100644 Misc/NEWS.d/next/Core and Builtins/2024-01-07-04-22-51.gh-issue-108362.oB9Gcf.rst create mode 100644 Misc/NEWS.d/next/Core and Builtins/2024-02-07-07-50-12.gh-issue-114828.nSXwMi.rst create mode 100644 Misc/NEWS.d/next/Library/2023-06-29-14-26-56.gh-issue-106233.Aqw2HI.rst create mode 100644 Misc/NEWS.d/next/Library/2023-07-23-12-28-26.gh-issue-75705.aB2-Ww.rst create mode 100644 Misc/NEWS.d/next/Library/2024-01-24-20-51-49.gh-issue-91602.8fOH8l.rst create mode 100644 Misc/NEWS.d/next/Tools-Demos/2024-02-05-19-00-32.gh-issue-109991.yJSEkw.rst create mode 100644 Misc/NEWS.d/next/Windows/2024-02-06-09-05-13.gh-issue-115009.ShMjZs.rst create mode 100644 Misc/NEWS.d/next/macOS/2024-02-05-18-30-27.gh-issue-109991.tun6Yu.rst create mode 100644 Misc/NEWS.d/next/macOS/2024-02-06-09-01-10.gh-issue-115009.ysau7e.rst diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 949c4ae95da07f..0a2f6da50ed8a0 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -250,7 +250,7 @@ jobs: strategy: fail-fast: false matrix: - openssl_ver: [1.1.1w, 3.0.11, 3.1.3] + openssl_ver: [1.1.1w, 3.0.13, 3.1.5, 3.2.1] env: OPENSSL_VER: ${{ matrix.openssl_ver }} MULTISSL_DIR: ${{ github.workspace }}/multissl @@ -304,7 +304,7 @@ jobs: needs: check_source if: needs.check_source.outputs.run_tests == 'true' && needs.check_source.outputs.run_hypothesis == 'true' env: - OPENSSL_VER: 3.0.11 + OPENSSL_VER: 3.0.13 PYTHONSTRICTEXTENSIONBUILD: 1 steps: - uses: actions/checkout@v4 @@ -415,7 +415,7 @@ jobs: needs: check_source if: needs.check_source.outputs.run_tests == 'true' env: - OPENSSL_VER: 3.0.11 + OPENSSL_VER: 3.0.13 PYTHONSTRICTEXTENSIONBUILD: 1 ASAN_OPTIONS: detect_leaks=0:allocator_may_return_null=1:handle_segv=0 steps: diff --git a/.github/workflows/reusable-ubuntu.yml b/.github/workflows/reusable-ubuntu.yml index ef52d99c15191b..0cbad57f0c6572 100644 --- a/.github/workflows/reusable-ubuntu.yml +++ b/.github/workflows/reusable-ubuntu.yml @@ -14,7 +14,7 @@ jobs: timeout-minutes: 60 runs-on: ubuntu-20.04 env: - OPENSSL_VER: 3.0.11 + OPENSSL_VER: 3.0.13 PYTHONSTRICTEXTENSIONBUILD: 1 steps: - uses: actions/checkout@v4 diff --git a/Doc/c-api/dict.rst b/Doc/c-api/dict.rst index 8471c98d044872..03f3d28187bfe9 100644 --- a/Doc/c-api/dict.rst +++ b/Doc/c-api/dict.rst @@ -174,6 +174,26 @@ Dictionary Objects .. versionadded:: 3.4 +.. c:function:: int PyDict_SetDefaultRef(PyObject *p, PyObject *key, PyObject *default_value, PyObject **result) + + Inserts *default_value* into the dictionary *p* with a key of *key* if the + key is not already present in the dictionary. If *result* is not ``NULL``, + then *\*result* is set to a :term:`strong reference` to either + *default_value*, if the key was not present, or the existing value, if *key* + was already present in the dictionary. + Returns ``1`` if the key was present and *default_value* was not inserted, + or ``0`` if the key was not present and *default_value* was inserted. + On failure, returns ``-1``, sets an exception, and sets ``*result`` + to ``NULL``. + + For clarity: if you have a strong reference to *default_value* before + calling this function, then after it returns, you hold a strong reference + to both *default_value* and *\*result* (if it's not ``NULL``). + These may refer to the same object: in that case you hold two separate + references to it. + .. versionadded:: 3.13 + + .. c:function:: int PyDict_Pop(PyObject *p, PyObject *key, PyObject **result) Remove *key* from dictionary *p* and optionally return the removed value. diff --git a/Doc/c-api/intro.rst b/Doc/c-api/intro.rst index 4dbca92b18b5cd..dcda1071a58f35 100644 --- a/Doc/c-api/intro.rst +++ b/Doc/c-api/intro.rst @@ -148,7 +148,7 @@ complete listing. worse performances (due to increased code size for example). The compiler is usually smarter than the developer for the cost/benefit analysis. - If Python is :ref:`built in debug mode ` (if the ``Py_DEBUG`` + If Python is :ref:`built in debug mode ` (if the :c:macro:`Py_DEBUG` macro is defined), the :c:macro:`Py_ALWAYS_INLINE` macro does nothing. It must be specified before the function return type. Usage:: @@ -812,12 +812,14 @@ available that support tracing of reference counts, debugging the memory allocator, or low-level profiling of the main interpreter loop. Only the most frequently used builds will be described in the remainder of this section. -Compiling the interpreter with the :c:macro:`Py_DEBUG` macro defined produces +.. c:macro:: Py_DEBUG + +Compiling the interpreter with the :c:macro:`!Py_DEBUG` macro defined produces what is generally meant by :ref:`a debug build of Python `. -:c:macro:`Py_DEBUG` is enabled in the Unix build by adding +:c:macro:`!Py_DEBUG` is enabled in the Unix build by adding :option:`--with-pydebug` to the :file:`./configure` command. It is also implied by the presence of the -not-Python-specific :c:macro:`_DEBUG` macro. When :c:macro:`Py_DEBUG` is enabled +not-Python-specific :c:macro:`!_DEBUG` macro. When :c:macro:`!Py_DEBUG` is enabled in the Unix build, compiler optimization is disabled. In addition to the reference count debugging described below, extra checks are @@ -832,4 +834,3 @@ after every statement run by the interpreter.) Please refer to :file:`Misc/SpecialBuilds.txt` in the Python source distribution for more detailed information. - diff --git a/Doc/faq/library.rst b/Doc/faq/library.rst index 476a43d9c288f1..e2f8004c7e3aea 100644 --- a/Doc/faq/library.rst +++ b/Doc/faq/library.rst @@ -405,22 +405,37 @@ lists. When in doubt, use a mutex! Can't we get rid of the Global Interpreter Lock? ------------------------------------------------ -.. XXX link to dbeazley's talk about GIL? - The :term:`global interpreter lock` (GIL) is often seen as a hindrance to Python's deployment on high-end multiprocessor server machines, because a multi-threaded Python program effectively only uses one CPU, due to the insistence that (almost) all Python code can only run while the GIL is held. -Back in the days of Python 1.5, Greg Stein actually implemented a comprehensive +With the approval of :pep:`703` work is now underway to remove the GIL from the +CPython implementation of Python. Initially it will be implemented as an +optional compiler flag when building the interpreter, and so separate +builds will be available with and without the GIL. Long-term, the hope is +to settle on a single build, once the performance implications of removing the +GIL are fully understood. Python 3.13 is likely to be the first release +containing this work, although it may not be completely functional in this +release. + +The current work to remove the GIL is based on a +`fork of Python 3.9 with the GIL removed `_ +by Sam Gross. +Prior to that, +in the days of Python 1.5, Greg Stein actually implemented a comprehensive patch set (the "free threading" patches) that removed the GIL and replaced it -with fine-grained locking. Adam Olsen recently did a similar experiment +with fine-grained locking. Adam Olsen did a similar experiment in his `python-safethread `_ -project. Unfortunately, both experiments exhibited a sharp drop in single-thread +project. Unfortunately, both of these earlier experiments exhibited a sharp +drop in single-thread performance (at least 30% slower), due to the amount of fine-grained locking -necessary to compensate for the removal of the GIL. +necessary to compensate for the removal of the GIL. The Python 3.9 fork +is the first attempt at removing the GIL with an acceptable performance +impact. -This doesn't mean that you can't make good use of Python on multi-CPU machines! +The presence of the GIL in current Python releases +doesn't mean that you can't make good use of Python on multi-CPU machines! You just have to be creative with dividing the work up between multiple *processes* rather than multiple *threads*. The :class:`~concurrent.futures.ProcessPoolExecutor` class in the new @@ -434,22 +449,13 @@ thread of execution is in the C code and allow other threads to get some work done. Some standard library modules such as :mod:`zlib` and :mod:`hashlib` already do this. -It has been suggested that the GIL should be a per-interpreter-state lock rather -than truly global; interpreters then wouldn't be able to share objects. -Unfortunately, this isn't likely to happen either. It would be a tremendous -amount of work, because many object implementations currently have global state. -For example, small integers and short strings are cached; these caches would -have to be moved to the interpreter state. Other object types have their own -free list; these free lists would have to be moved to the interpreter state. -And so on. - -And I doubt that it can even be done in finite time, because the same problem -exists for 3rd party extensions. It is likely that 3rd party extensions are -being written at a faster rate than you can convert them to store all their -global state in the interpreter state. - -And finally, once you have multiple interpreters not sharing any state, what -have you gained over running each interpreter in a separate process? +An alternative approach to reducing the impact of the GIL is +to make the GIL a per-interpreter-state lock rather than truly global. +This was :ref:`first implemented in Python 3.12 ` and is +available in the C API. A Python interface to it is expected in Python 3.13. +The main limitation to it at the moment is likely to be 3rd party extension +modules, since these must be written with multiple interpreters in mind in +order to be usable, so many older extension modules will not be usable. Input and Output diff --git a/Doc/library/enum.rst b/Doc/library/enum.rst index f31e6ea848f3b2..534939943d3326 100644 --- a/Doc/library/enum.rst +++ b/Doc/library/enum.rst @@ -534,9 +534,7 @@ Data Types >>> list(purple) [, ] - .. versionchanged:: 3.11 - - Aliases are no longer returned during iteration. + .. versionadded:: 3.11 .. method:: __len__(self): diff --git a/Doc/library/sqlite3.rst b/Doc/library/sqlite3.rst index c3406b166c3d89..87d5ef1e42ca3a 100644 --- a/Doc/library/sqlite3.rst +++ b/Doc/library/sqlite3.rst @@ -1137,12 +1137,19 @@ Connection objects .. _Loading an Extension: https://www.sqlite.org/loadext.html#loading_an_extension_ - .. method:: iterdump + .. method:: iterdump(*, filter=None) Return an :term:`iterator` to dump the database as SQL source code. Useful when saving an in-memory database for later restoration. Similar to the ``.dump`` command in the :program:`sqlite3` shell. + :param filter: + + An optional ``LIKE`` pattern for database objects to dump, e.g. ``prefix_%``. + If ``None`` (the default), all database objects will be included. + + :type filter: str | None + Example: .. testcode:: @@ -1158,6 +1165,8 @@ Connection objects :ref:`sqlite3-howto-encoding` + .. versionchanged:: 3.13 + Added the *filter* parameter. .. method:: backup(target, *, pages=-1, progress=None, name="main", sleep=0.250) diff --git a/Doc/library/test.rst b/Doc/library/test.rst index cad1023021a512..7d28f625345726 100644 --- a/Doc/library/test.rst +++ b/Doc/library/test.rst @@ -324,9 +324,9 @@ The :mod:`test.support` module defines the following constants: .. data:: Py_DEBUG - True if Python is built with the :c:macro:`Py_DEBUG` macro defined: if - Python is :ref:`built in debug mode ` - (:option:`./configure --with-pydebug <--with-pydebug>`). + True if Python was built with the :c:macro:`Py_DEBUG` macro + defined, that is, if + Python was :ref:`built in debug mode `. .. versionadded:: 3.12 diff --git a/Doc/whatsnew/3.13.rst b/Doc/whatsnew/3.13.rst index 5e5f1e295f4d70..2ac5afa8ce601c 100644 --- a/Doc/whatsnew/3.13.rst +++ b/Doc/whatsnew/3.13.rst @@ -92,10 +92,6 @@ Interpreter improvements: New Features ============ -* The cyclic garbage collector is now incremental. - This means that maximum pause times are reduced, - by an order of magnitude or more for larger heaps. - Improved Error Messages ----------------------- @@ -105,13 +101,6 @@ Improved Error Messages variables. See also :ref:`using-on-controlling-color`. (Contributed by Pablo Galindo Salgado in :gh:`112730`.) -Incremental Garbage Collection ------------------------------- - -* The cycle garbage collector is now incremental. - This means that maximum pause times are reduced - by an order of magnitude or more for larger heaps. - Other Language Changes ====================== @@ -257,29 +246,6 @@ fractions sign handling, minimum width and grouping. (Contributed by Mark Dickinson in :gh:`111320`.) -gc --- -* The cyclic garbage collector is now incremental, which changes the meanings - of the results of :meth:`gc.get_threshold` and :meth:`gc.get_threshold` as - well as :meth:`gc.get_count` and :meth:`gc.get_stats`. -* :meth:`gc.get_threshold` returns a three-tuple for backwards compatibility, - the first value is the threshold for young collections, as before, the second - value determines the rate at which the old collection is scanned; the - default is 10 and higher values mean that the old collection is scanned more slowly. - The third value is meangless and is always zero. -* :meth:`gc.set_threshold` ignores any items after the second. -* :meth:`gc.get_count` and :meth:`gc.get_stats`. - These functions return the same format of results as before. - The only difference is that instead of the results refering to - the young, aging and old generations, the results refer to the - young generation and the aging and collecting spaces of the old generation. - -In summary, code that attempted to manipulate the behavior of the cycle GC may -not work as well as intended, but it is very unlikely to harmful. -All other code will work just fine. -Uses should avoid calling :meth:`gc.collect` unless their workload is episodic, -but that has always been the case to some extent. - glob ---- @@ -391,7 +357,7 @@ pathlib (Contributed by Barney Gale in :gh:`89812`.) * Add :meth:`pathlib.Path.from_uri`, a new constructor to create a :class:`pathlib.Path` - object from a 'file' URI (``file:/``). + object from a 'file' URI (``file://``). (Contributed by Barney Gale in :gh:`107465`.) * Add :meth:`pathlib.PurePath.full_match` for matching paths with @@ -438,6 +404,10 @@ sqlite3 object is not :meth:`closed ` explicitly. (Contributed by Erlend E. Aasland in :gh:`105539`.) +* Add *filter* keyword-only parameter to :meth:`sqlite3.Connection.iterdump` + for filtering database objects to dump. + (Contributed by Mariusz Felisiak in :gh:`91602`.) + subprocess ---------- @@ -1436,6 +1406,12 @@ New Features not needed. (Contributed by Victor Stinner in :gh:`106004`.) +* Added :c:func:`PyDict_SetDefaultRef`, which is similar to + :c:func:`PyDict_SetDefault` but returns a :term:`strong reference` instead of + a :term:`borrowed reference`. This function returns ``-1`` on error, ``0`` on + insertion, and ``1`` if the key was already present in the dictionary. + (Contributed by Sam Gross in :gh:`112066`.) + * Add :c:func:`PyDict_ContainsString` function: same as :c:func:`PyDict_Contains`, but *key* is specified as a :c:expr:`const char*` UTF-8 encoded bytes string, rather than a :c:expr:`PyObject*`. diff --git a/Include/cpython/dictobject.h b/Include/cpython/dictobject.h index 1720fe6f01ea37..35b6a822a0dfff 100644 --- a/Include/cpython/dictobject.h +++ b/Include/cpython/dictobject.h @@ -41,6 +41,16 @@ PyAPI_FUNC(PyObject *) _PyDict_GetItemStringWithError(PyObject *, const char *); PyAPI_FUNC(PyObject *) PyDict_SetDefault( PyObject *mp, PyObject *key, PyObject *defaultobj); +// Inserts `key` with a value `default_value`, if `key` is not already present +// in the dictionary. If `result` is not NULL, then the value associated +// with `key` is returned in `*result` (either the existing value, or the now +// inserted `default_value`). +// Returns: +// -1 on error +// 0 if `key` was not present and `default_value` was inserted +// 1 if `key` was present and `default_value` was not inserted +PyAPI_FUNC(int) PyDict_SetDefaultRef(PyObject *mp, PyObject *key, PyObject *default_value, PyObject **result); + /* Get the number of items of a dictionary. */ static inline Py_ssize_t PyDict_GET_SIZE(PyObject *op) { PyDictObject *mp; diff --git a/Include/internal/pycore_critical_section.h b/Include/internal/pycore_critical_section.h index bf2bbfffc38bd0..38ed8cd69804ba 100644 --- a/Include/internal/pycore_critical_section.h +++ b/Include/internal/pycore_critical_section.h @@ -104,12 +104,37 @@ extern "C" { # define Py_END_CRITICAL_SECTION2() \ _PyCriticalSection2_End(&_cs2); \ } + +// Asserts that the mutex is locked. The mutex must be held by the +// top-most critical section otherwise there's the possibility +// that the mutex would be swalled out in some code paths. +#define _Py_CRITICAL_SECTION_ASSERT_MUTEX_LOCKED(mutex) \ + _PyCriticalSection_AssertHeld(mutex) + +// Asserts that the mutex for the given object is locked. The mutex must +// be held by the top-most critical section otherwise there's the +// possibility that the mutex would be swalled out in some code paths. +#ifdef Py_DEBUG + +#define _Py_CRITICAL_SECTION_ASSERT_OBJECT_LOCKED(op) \ + if (Py_REFCNT(op) != 1) { \ + _Py_CRITICAL_SECTION_ASSERT_MUTEX_LOCKED(&_PyObject_CAST(op)->ob_mutex); \ + } + +#else /* Py_DEBUG */ + +#define _Py_CRITICAL_SECTION_ASSERT_OBJECT_LOCKED(op) + +#endif /* Py_DEBUG */ + #else /* !Py_GIL_DISABLED */ // The critical section APIs are no-ops with the GIL. # define Py_BEGIN_CRITICAL_SECTION(op) # define Py_END_CRITICAL_SECTION() # define Py_BEGIN_CRITICAL_SECTION2(a, b) # define Py_END_CRITICAL_SECTION2() +# define _Py_CRITICAL_SECTION_ASSERT_MUTEX_LOCKED(mutex) +# define _Py_CRITICAL_SECTION_ASSERT_OBJECT_LOCKED(op) #endif /* !Py_GIL_DISABLED */ typedef struct { @@ -236,6 +261,27 @@ _PyCriticalSection2_End(_PyCriticalSection2 *c) PyAPI_FUNC(void) _PyCriticalSection_SuspendAll(PyThreadState *tstate); +#ifdef Py_GIL_DISABLED + +static inline void +_PyCriticalSection_AssertHeld(PyMutex *mutex) { +#ifdef Py_DEBUG + PyThreadState *tstate = _PyThreadState_GET(); + uintptr_t prev = tstate->critical_section; + if (prev & _Py_CRITICAL_SECTION_TWO_MUTEXES) { + _PyCriticalSection2 *cs = (_PyCriticalSection2 *)(prev & ~_Py_CRITICAL_SECTION_MASK); + assert(cs != NULL && (cs->base.mutex == mutex || cs->mutex2 == mutex)); + } + else { + _PyCriticalSection *cs = (_PyCriticalSection *)(tstate->critical_section & ~_Py_CRITICAL_SECTION_MASK); + assert(cs != NULL && cs->mutex == mutex); + } + +#endif +} + +#endif + #ifdef __cplusplus } #endif diff --git a/Include/internal/pycore_gc.h b/Include/internal/pycore_gc.h index aeb07238fc8345..8d0bc2a218e48d 100644 --- a/Include/internal/pycore_gc.h +++ b/Include/internal/pycore_gc.h @@ -88,15 +88,11 @@ static inline void _PyObject_GC_SET_SHARED(PyObject *op) { /* Bit flags for _gc_prev */ /* Bit 0 is set when tp_finalize is called */ -#define _PyGC_PREV_MASK_FINALIZED 1 +#define _PyGC_PREV_MASK_FINALIZED (1) /* Bit 1 is set when the object is in generation which is GCed currently. */ -#define _PyGC_PREV_MASK_COLLECTING 2 - -/* Bit 0 is set if the object belongs to old space 1 */ -#define _PyGC_NEXT_MASK_OLD_SPACE_1 1 - +#define _PyGC_PREV_MASK_COLLECTING (2) /* The (N-2) most significant bits contain the real address. */ -#define _PyGC_PREV_SHIFT 2 +#define _PyGC_PREV_SHIFT (2) #define _PyGC_PREV_MASK (((uintptr_t) -1) << _PyGC_PREV_SHIFT) /* set for debugging information */ @@ -122,13 +118,11 @@ typedef enum { // Lowest bit of _gc_next is used for flags only in GC. // But it is always 0 for normal code. static inline PyGC_Head* _PyGCHead_NEXT(PyGC_Head *gc) { - uintptr_t next = gc->_gc_next & _PyGC_PREV_MASK; + uintptr_t next = gc->_gc_next; return (PyGC_Head*)next; } static inline void _PyGCHead_SET_NEXT(PyGC_Head *gc, PyGC_Head *next) { - uintptr_t unext = (uintptr_t)next; - assert((unext & ~_PyGC_PREV_MASK) == 0); - gc->_gc_next = (gc->_gc_next & ~_PyGC_PREV_MASK) | unext; + gc->_gc_next = (uintptr_t)next; } // Lowest two bits of _gc_prev is used for _PyGC_PREV_MASK_* flags. @@ -136,7 +130,6 @@ static inline PyGC_Head* _PyGCHead_PREV(PyGC_Head *gc) { uintptr_t prev = (gc->_gc_prev & _PyGC_PREV_MASK); return (PyGC_Head*)prev; } - static inline void _PyGCHead_SET_PREV(PyGC_Head *gc, PyGC_Head *prev) { uintptr_t uprev = (uintptr_t)prev; assert((uprev & ~_PyGC_PREV_MASK) == 0); @@ -222,13 +215,6 @@ struct gc_generation { generations */ }; -struct gc_collection_stats { - /* number of collected objects */ - Py_ssize_t collected; - /* total number of uncollectable objects (put into gc.garbage) */ - Py_ssize_t uncollectable; -}; - /* Running stats per generation */ struct gc_generation_stats { /* total number of collections */ @@ -250,8 +236,8 @@ struct _gc_runtime_state { int enabled; int debug; /* linked lists of container objects */ - struct gc_generation young; - struct gc_generation old[2]; + struct gc_generation generations[NUM_GENERATIONS]; + PyGC_Head *generation0; /* a permanent generation which won't be collected */ struct gc_generation permanent_generation; struct gc_generation_stats generation_stats[NUM_GENERATIONS]; @@ -264,20 +250,22 @@ struct _gc_runtime_state { /* This is the number of objects that survived the last full collection. It approximates the number of long lived objects tracked by the GC. + (by "full collection", we mean a collection of the oldest generation). */ Py_ssize_t long_lived_total; - - Py_ssize_t work_to_do; - /* Which of the old spaces is the visited space */ - int visited_space; + /* This is the number of objects that survived all "non-full" + collections, and are awaiting to undergo a full collection for + the first time. */ + Py_ssize_t long_lived_pending; }; extern void _PyGC_InitState(struct _gc_runtime_state *); -extern Py_ssize_t _PyGC_Collect(PyThreadState *tstate, int generation, _PyGC_Reason reason); -extern void _PyGC_CollectNoFail(PyThreadState *tstate); +extern Py_ssize_t _PyGC_Collect(PyThreadState *tstate, int generation, + _PyGC_Reason reason); +extern Py_ssize_t _PyGC_CollectNoFail(PyThreadState *tstate); /* Freeze objects tracked by the GC and ignore them in future collections. */ extern void _PyGC_Freeze(PyInterpreterState *interp); diff --git a/Include/internal/pycore_global_objects_fini_generated.h b/Include/internal/pycore_global_objects_fini_generated.h index dd09ff40f39fe6..932738c3049882 100644 --- a/Include/internal/pycore_global_objects_fini_generated.h +++ b/Include/internal/pycore_global_objects_fini_generated.h @@ -940,6 +940,7 @@ _PyStaticObjects_CheckRefcnt(PyInterpreterState *interp) { _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(fileno)); _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(filepath)); _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(fillvalue)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(filter)); _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(filters)); _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(final)); _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(find_class)); diff --git a/Include/internal/pycore_global_strings.h b/Include/internal/pycore_global_strings.h index 79d6509abcdfd9..da62b4f0a951ff 100644 --- a/Include/internal/pycore_global_strings.h +++ b/Include/internal/pycore_global_strings.h @@ -429,6 +429,7 @@ struct _Py_global_strings { STRUCT_FOR_ID(fileno) STRUCT_FOR_ID(filepath) STRUCT_FOR_ID(fillvalue) + STRUCT_FOR_ID(filter) STRUCT_FOR_ID(filters) STRUCT_FOR_ID(final) STRUCT_FOR_ID(find_class) diff --git a/Include/internal/pycore_object.h b/Include/internal/pycore_object.h index efa712c4a0b458..34a83ea228e8b1 100644 --- a/Include/internal/pycore_object.h +++ b/Include/internal/pycore_object.h @@ -125,7 +125,19 @@ static inline void _Py_RefcntAdd(PyObject* op, Py_ssize_t n) } #define _Py_RefcntAdd(op, n) _Py_RefcntAdd(_PyObject_CAST(op), n) -extern void _Py_SetImmortal(PyObject *op); +static inline void _Py_SetImmortal(PyObject *op) +{ + if (op) { +#ifdef Py_GIL_DISABLED + op->ob_tid = _Py_UNOWNED_TID; + op->ob_ref_local = _Py_IMMORTAL_REFCNT_LOCAL; + op->ob_ref_shared = 0; +#else + op->ob_refcnt = _Py_IMMORTAL_REFCNT; +#endif + } +} +#define _Py_SetImmortal(op) _Py_SetImmortal(_PyObject_CAST(op)) // Makes an immortal object mortal again with the specified refcnt. Should only // be used during runtime finalization. @@ -313,12 +325,11 @@ static inline void _PyObject_GC_TRACK( filename, lineno, __func__); PyInterpreterState *interp = _PyInterpreterState_GET(); - PyGC_Head *generation0 = &interp->gc.young.head; + PyGC_Head *generation0 = interp->gc.generation0; PyGC_Head *last = (PyGC_Head*)(generation0->_gc_prev); _PyGCHead_SET_NEXT(last, gc); _PyGCHead_SET_PREV(gc, last); _PyGCHead_SET_NEXT(gc, generation0); - assert((gc->_gc_next & _PyGC_NEXT_MASK_OLD_SPACE_1) == 0); generation0->_gc_prev = (uintptr_t)gc; #endif } diff --git a/Include/internal/pycore_pythread.h b/Include/internal/pycore_pythread.h index 9c9a09f60f3441..265299d7574838 100644 --- a/Include/internal/pycore_pythread.h +++ b/Include/internal/pycore_pythread.h @@ -9,6 +9,7 @@ extern "C" { #endif #include "dynamic_annotations.h" // _Py_ANNOTATE_PURE_HAPPENS_BEFORE_MUTEX +#include "pycore_llist.h" // struct llist_node // Get _POSIX_THREADS and _POSIX_SEMAPHORES macros if available #if (defined(HAVE_UNISTD_H) && !defined(_POSIX_THREADS) \ @@ -75,14 +76,22 @@ struct _pythread_runtime_state { struct py_stub_tls_entry tls_entries[PTHREAD_KEYS_MAX]; } stubs; #endif + + // Linked list of ThreadHandleObjects + struct llist_node handles; }; +#define _pythread_RUNTIME_INIT(pythread) \ + { \ + .handles = LLIST_INIT(pythread.handles), \ + } #ifdef HAVE_FORK /* Private function to reinitialize a lock at fork in the child process. Reset the lock to the unlocked state. Return 0 on success, return -1 on error. */ extern int _PyThread_at_fork_reinit(PyThread_type_lock *lock); +extern void _PyThread_AfterFork(struct _pythread_runtime_state *state); #endif /* HAVE_FORK */ @@ -143,12 +152,6 @@ PyAPI_FUNC(int) PyThread_join_thread(PyThread_handle_t); */ PyAPI_FUNC(int) PyThread_detach_thread(PyThread_handle_t); -/* - * Obtain the new thread ident and handle in a forked child process. - */ -PyAPI_FUNC(void) PyThread_update_thread_after_fork(PyThread_ident_t* ident, - PyThread_handle_t* handle); - #ifdef __cplusplus } #endif diff --git a/Include/internal/pycore_runtime_init.h b/Include/internal/pycore_runtime_init.h index 4370ad05bdc058..571a7d612c94e2 100644 --- a/Include/internal/pycore_runtime_init.h +++ b/Include/internal/pycore_runtime_init.h @@ -16,6 +16,7 @@ extern "C" { #include "pycore_parser.h" // _parser_runtime_state_INIT #include "pycore_pyhash.h" // pyhash_state_INIT #include "pycore_pymem_init.h" // _pymem_allocators_standard_INIT +#include "pycore_pythread.h" // _pythread_RUNTIME_INIT #include "pycore_runtime_init_generated.h" // _Py_bytes_characters_INIT #include "pycore_signal.h" // _signals_RUNTIME_INIT #include "pycore_tracemalloc.h" // _tracemalloc_runtime_state_INIT @@ -90,6 +91,7 @@ extern PyTypeObject _PyExc_MemoryError; }, \ .obmalloc = _obmalloc_global_state_INIT, \ .pyhash_state = pyhash_state_INIT, \ + .threads = _pythread_RUNTIME_INIT(runtime.threads), \ .signals = _signals_RUNTIME_INIT, \ .interpreters = { \ /* This prevents interpreters from getting created \ @@ -160,12 +162,12 @@ extern PyTypeObject _PyExc_MemoryError; }, \ .gc = { \ .enabled = 1, \ - .young = { .threshold = 2000, }, \ - .old = { \ + .generations = { \ + /* .head is set in _PyGC_InitState(). */ \ + { .threshold = 700, }, \ + { .threshold = 10, }, \ { .threshold = 10, }, \ - { .threshold = 0, }, \ }, \ - .work_to_do = -5000, \ }, \ .object_state = _py_object_state_INIT(INTERP), \ .dtoa = _dtoa_state_INIT(&(INTERP)), \ diff --git a/Include/internal/pycore_runtime_init_generated.h b/Include/internal/pycore_runtime_init_generated.h index f3c55acfb3c282..68fbbcb4378e17 100644 --- a/Include/internal/pycore_runtime_init_generated.h +++ b/Include/internal/pycore_runtime_init_generated.h @@ -938,6 +938,7 @@ extern "C" { INIT_ID(fileno), \ INIT_ID(filepath), \ INIT_ID(fillvalue), \ + INIT_ID(filter), \ INIT_ID(filters), \ INIT_ID(final), \ INIT_ID(find_class), \ diff --git a/Include/internal/pycore_symtable.h b/Include/internal/pycore_symtable.h index 1d782ca2c96e05..b44393b5644673 100644 --- a/Include/internal/pycore_symtable.h +++ b/Include/internal/pycore_symtable.h @@ -109,18 +109,18 @@ extern PyObject* _Py_Mangle(PyObject *p, PyObject *name); /* Flags for def-use information */ -#define DEF_GLOBAL 1 /* global stmt */ -#define DEF_LOCAL 2 /* assignment in code block */ -#define DEF_PARAM 2<<1 /* formal parameter */ -#define DEF_NONLOCAL 2<<2 /* nonlocal stmt */ -#define USE 2<<3 /* name is used */ -#define DEF_FREE 2<<4 /* name used but not defined in nested block */ -#define DEF_FREE_CLASS 2<<5 /* free variable from class's method */ -#define DEF_IMPORT 2<<6 /* assignment occurred via import */ -#define DEF_ANNOT 2<<7 /* this name is annotated */ -#define DEF_COMP_ITER 2<<8 /* this name is a comprehension iteration variable */ -#define DEF_TYPE_PARAM 2<<9 /* this name is a type parameter */ -#define DEF_COMP_CELL 2<<10 /* this name is a cell in an inlined comprehension */ +#define DEF_GLOBAL 1 /* global stmt */ +#define DEF_LOCAL 2 /* assignment in code block */ +#define DEF_PARAM (2<<1) /* formal parameter */ +#define DEF_NONLOCAL (2<<2) /* nonlocal stmt */ +#define USE (2<<3) /* name is used */ +#define DEF_FREE (2<<4) /* name used but not defined in nested block */ +#define DEF_FREE_CLASS (2<<5) /* free variable from class's method */ +#define DEF_IMPORT (2<<6) /* assignment occurred via import */ +#define DEF_ANNOT (2<<7) /* this name is annotated */ +#define DEF_COMP_ITER (2<<8) /* this name is a comprehension iteration variable */ +#define DEF_TYPE_PARAM (2<<9) /* this name is a type parameter */ +#define DEF_COMP_CELL (2<<10) /* this name is a cell in an inlined comprehension */ #define DEF_BOUND (DEF_LOCAL | DEF_PARAM | DEF_IMPORT) diff --git a/Include/internal/pycore_unicodeobject_generated.h b/Include/internal/pycore_unicodeobject_generated.h index 2e9572382fe033..c8458b4e36ccc9 100644 --- a/Include/internal/pycore_unicodeobject_generated.h +++ b/Include/internal/pycore_unicodeobject_generated.h @@ -1128,6 +1128,9 @@ _PyUnicode_InitStaticStrings(PyInterpreterState *interp) { string = &_Py_ID(fillvalue); assert(_PyUnicode_CheckConsistency(string, 1)); _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(filter); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); string = &_Py_ID(filters); assert(_PyUnicode_CheckConsistency(string, 1)); _PyUnicode_InternInPlace(interp, &string); diff --git a/Lib/mailbox.py b/Lib/mailbox.py index 81ea210cf815a4..746811bd559412 100644 --- a/Lib/mailbox.py +++ b/Lib/mailbox.py @@ -830,10 +830,11 @@ def get_message(self, key): """Return a Message representation or raise a KeyError.""" start, stop = self._lookup(key) self._file.seek(start) - from_line = self._file.readline().replace(linesep, b'') + from_line = self._file.readline().replace(linesep, b'').decode('ascii') string = self._file.read(stop - self._file.tell()) msg = self._message_factory(string.replace(linesep, b'\n')) - msg.set_from(from_line[5:].decode('ascii')) + msg.set_unixfrom(from_line) + msg.set_from(from_line[5:]) return msg def get_string(self, key, from_=False): diff --git a/Lib/pathlib/_abc.py b/Lib/pathlib/_abc.py index 91f5cd6c01e9d0..e4b1201a3703c3 100644 --- a/Lib/pathlib/_abc.py +++ b/Lib/pathlib/_abc.py @@ -95,7 +95,7 @@ def _select_recursive(parent_paths, dir_only, follow_symlinks): if follow_symlinks is None: follow_symlinks = False for parent_path in parent_paths: - paths = [parent_path] + paths = [parent_path._make_child_relpath('')] while paths: path = paths.pop() yield path diff --git a/Lib/sqlite3/dump.py b/Lib/sqlite3/dump.py index 719dfc8947697d..9dcce7dc76ced4 100644 --- a/Lib/sqlite3/dump.py +++ b/Lib/sqlite3/dump.py @@ -15,7 +15,7 @@ def _quote_value(value): return "'{0}'".format(value.replace("'", "''")) -def _iterdump(connection): +def _iterdump(connection, *, filter=None): """ Returns an iterator to the dump of the database in an SQL text format. @@ -32,15 +32,23 @@ def _iterdump(connection): yield('PRAGMA foreign_keys=OFF;') yield('BEGIN TRANSACTION;') + if filter: + # Return database objects which match the filter pattern. + filter_name_clause = 'AND "name" LIKE ?' + params = [filter] + else: + filter_name_clause = "" + params = [] # sqlite_master table contains the SQL CREATE statements for the database. - q = """ + q = f""" SELECT "name", "type", "sql" FROM "sqlite_master" WHERE "sql" NOT NULL AND "type" == 'table' + {filter_name_clause} ORDER BY "name" """ - schema_res = cu.execute(q) + schema_res = cu.execute(q, params) sqlite_sequence = [] for table_name, type, sql in schema_res.fetchall(): if table_name == 'sqlite_sequence': @@ -82,13 +90,14 @@ def _iterdump(connection): yield("{0};".format(row[0])) # Now when the type is 'index', 'trigger', or 'view' - q = """ + q = f""" SELECT "name", "type", "sql" FROM "sqlite_master" WHERE "sql" NOT NULL AND "type" IN ('index', 'trigger', 'view') + {filter_name_clause} """ - schema_res = cu.execute(q) + schema_res = cu.execute(q, params) for name, type, sql in schema_res.fetchall(): yield('{0};'.format(sql)) diff --git a/Lib/test/test_capi/test_dict.py b/Lib/test/test_capi/test_dict.py index 57a7238588eae0..cca6145bc90c04 100644 --- a/Lib/test/test_capi/test_dict.py +++ b/Lib/test/test_capi/test_dict.py @@ -339,6 +339,28 @@ def test_dict_setdefault(self): # CRASHES setdefault({}, 'a', NULL) # CRASHES setdefault(NULL, 'a', 5) + def test_dict_setdefaultref(self): + setdefault = _testcapi.dict_setdefaultref + dct = {} + self.assertEqual(setdefault(dct, 'a', 5), 5) + self.assertEqual(dct, {'a': 5}) + self.assertEqual(setdefault(dct, 'a', 8), 5) + self.assertEqual(dct, {'a': 5}) + + dct2 = DictSubclass() + self.assertEqual(setdefault(dct2, 'a', 5), 5) + self.assertEqual(dct2, {'a': 5}) + self.assertEqual(setdefault(dct2, 'a', 8), 5) + self.assertEqual(dct2, {'a': 5}) + + self.assertRaises(TypeError, setdefault, {}, [], 5) # unhashable + self.assertRaises(SystemError, setdefault, UserDict(), 'a', 5) + self.assertRaises(SystemError, setdefault, [1], 0, 5) + self.assertRaises(SystemError, setdefault, 42, 'a', 5) + # CRASHES setdefault({}, NULL, 5) + # CRASHES setdefault({}, 'a', NULL) + # CRASHES setdefault(NULL, 'a', 5) + def test_mapping_keys_valuesitems(self): class BadMapping(dict): def keys(self): diff --git a/Lib/test/test_gc.py b/Lib/test/test_gc.py index 0002852fce9643..b01f344cb14a1a 100644 --- a/Lib/test/test_gc.py +++ b/Lib/test/test_gc.py @@ -383,11 +383,19 @@ def test_collect_generations(self): # each call to collect(N) x = [] gc.collect(0) - # x is now in the old gen + # x is now in gen 1 a, b, c = gc.get_count() - # We don't check a since its exact values depends on + gc.collect(1) + # x is now in gen 2 + d, e, f = gc.get_count() + gc.collect(2) + # x is now in gen 3 + g, h, i = gc.get_count() + # We don't check a, d, g since their exact values depends on # internal implementation details of the interpreter. self.assertEqual((b, c), (1, 0)) + self.assertEqual((e, f), (0, 1)) + self.assertEqual((h, i), (0, 0)) def test_trashcan(self): class Ouch: @@ -838,6 +846,16 @@ def test_get_objects_generations(self): self.assertFalse( any(l is element for element in gc.get_objects(generation=2)) ) + gc.collect(generation=1) + self.assertFalse( + any(l is element for element in gc.get_objects(generation=0)) + ) + self.assertFalse( + any(l is element for element in gc.get_objects(generation=1)) + ) + self.assertTrue( + any(l is element for element in gc.get_objects(generation=2)) + ) gc.collect(generation=2) self.assertFalse( any(l is element for element in gc.get_objects(generation=0)) diff --git a/Lib/test/test_listcomps.py b/Lib/test/test_listcomps.py index f95a78aff0c711..2868dd01545b95 100644 --- a/Lib/test/test_listcomps.py +++ b/Lib/test/test_listcomps.py @@ -156,6 +156,18 @@ def method(self): self.assertEqual(C.y, [4, 4, 4, 4, 4]) self.assertIs(C().method(), C) + def test_references_super(self): + code = """ + res = [super for x in [1]] + """ + self._check_in_scopes(code, outputs={"res": [super]}) + + def test_references___class__(self): + code = """ + res = [__class__ for x in [1]] + """ + self._check_in_scopes(code, raises=NameError) + def test_inner_cell_shadows_outer(self): code = """ items = [(lambda: i) for i in range(5)] diff --git a/Lib/test/test_mailbox.py b/Lib/test/test_mailbox.py index d84faad0eb3406..c52c014185bec7 100644 --- a/Lib/test/test_mailbox.py +++ b/Lib/test/test_mailbox.py @@ -1127,12 +1127,14 @@ def test_add_from_string(self): # Add a string starting with 'From ' to the mailbox key = self._box.add('From foo@bar blah\nFrom: foo\n\n0\n') self.assertEqual(self._box[key].get_from(), 'foo@bar blah') + self.assertEqual(self._box[key].get_unixfrom(), 'From foo@bar blah') self.assertEqual(self._box[key].get_payload(), '0\n') def test_add_from_bytes(self): # Add a byte string starting with 'From ' to the mailbox key = self._box.add(b'From foo@bar blah\nFrom: foo\n\n0\n') self.assertEqual(self._box[key].get_from(), 'foo@bar blah') + self.assertEqual(self._box[key].get_unixfrom(), 'From foo@bar blah') self.assertEqual(self._box[key].get_payload(), '0\n') def test_add_mbox_or_mmdf_message(self): @@ -1667,18 +1669,23 @@ def test_initialize_with_unixfrom(self): msg = mailbox.Message(_sample_message) msg.set_unixfrom('From foo@bar blah') msg = mailbox.mboxMessage(msg) - self.assertEqual(msg.get_from(), 'foo@bar blah', msg.get_from()) + self.assertEqual(msg.get_from(), 'foo@bar blah') + self.assertEqual(msg.get_unixfrom(), 'From foo@bar blah') def test_from(self): # Get and set "From " line msg = mailbox.mboxMessage(_sample_message) self._check_from(msg) + self.assertIsNone(msg.get_unixfrom()) msg.set_from('foo bar') self.assertEqual(msg.get_from(), 'foo bar') + self.assertIsNone(msg.get_unixfrom()) msg.set_from('foo@bar', True) self._check_from(msg, 'foo@bar') + self.assertIsNone(msg.get_unixfrom()) msg.set_from('blah@temp', time.localtime()) self._check_from(msg, 'blah@temp') + self.assertIsNone(msg.get_unixfrom()) def test_flags(self): # Use get_flags(), set_flags(), add_flag(), remove_flag() @@ -1866,6 +1873,7 @@ def test_maildir_to_mboxmmdf(self): self.assertEqual(msg.get_flags(), result) self.assertEqual(msg.get_from(), 'MAILER-DAEMON %s' % time.asctime(time.gmtime(0.0))) + self.assertIsNone(msg.get_unixfrom()) msg_maildir.set_subdir('cur') self.assertEqual(class_(msg_maildir).get_flags(), 'RODFA') @@ -1914,10 +1922,12 @@ def test_mboxmmdf_to_mboxmmdf(self): msg_mboxMMDF = class_(_sample_message) msg_mboxMMDF.set_flags('RODFA') msg_mboxMMDF.set_from('foo@bar') + self.assertIsNone(msg_mboxMMDF.get_unixfrom()) for class2_ in (mailbox.mboxMessage, mailbox.MMDFMessage): msg2 = class2_(msg_mboxMMDF) self.assertEqual(msg2.get_flags(), 'RODFA') self.assertEqual(msg2.get_from(), 'foo@bar') + self.assertIsNone(msg2.get_unixfrom()) def test_mboxmmdf_to_mh(self): # Convert mboxMessage and MMDFMessage to MHMessage diff --git a/Lib/test/test_pathlib/test_pathlib_abc.py b/Lib/test/test_pathlib/test_pathlib_abc.py index 207579ccbf443b..1d30deca8f7a1b 100644 --- a/Lib/test/test_pathlib/test_pathlib_abc.py +++ b/Lib/test/test_pathlib/test_pathlib_abc.py @@ -1791,25 +1791,25 @@ def _check(path, glob, expected): _check(p, "*/", ["dirA/", "dirB/", "dirC/", "dirE/", "linkB/"]) _check(p, "dir*/*/..", ["dirC/dirD/..", "dirA/linkC/..", "dirB/linkD/.."]) _check(p, "dir*/**", [ - "dirA", "dirA/linkC", "dirA/linkC/fileB", "dirA/linkC/linkD", "dirA/linkC/linkD/fileB", - "dirB", "dirB/fileB", "dirB/linkD", "dirB/linkD/fileB", - "dirC", "dirC/fileC", "dirC/dirD", "dirC/dirD/fileD", "dirC/novel.txt", - "dirE"]) + "dirA/", "dirA/linkC", "dirA/linkC/fileB", "dirA/linkC/linkD", "dirA/linkC/linkD/fileB", + "dirB/", "dirB/fileB", "dirB/linkD", "dirB/linkD/fileB", + "dirC/", "dirC/fileC", "dirC/dirD", "dirC/dirD/fileD", "dirC/novel.txt", + "dirE/"]) _check(p, "dir*/**/", ["dirA/", "dirA/linkC/", "dirA/linkC/linkD/", "dirB/", "dirB/linkD/", "dirC/", "dirC/dirD/", "dirE/"]) _check(p, "dir*/**/..", ["dirA/..", "dirA/linkC/..", "dirB/..", "dirB/linkD/..", "dirA/linkC/linkD/..", "dirC/..", "dirC/dirD/..", "dirE/.."]) _check(p, "dir*/*/**", [ - "dirA/linkC", "dirA/linkC/linkD", "dirA/linkC/fileB", "dirA/linkC/linkD/fileB", - "dirB/linkD", "dirB/linkD/fileB", - "dirC/dirD", "dirC/dirD/fileD"]) + "dirA/linkC/", "dirA/linkC/linkD", "dirA/linkC/fileB", "dirA/linkC/linkD/fileB", + "dirB/linkD/", "dirB/linkD/fileB", + "dirC/dirD/", "dirC/dirD/fileD"]) _check(p, "dir*/*/**/", ["dirA/linkC/", "dirA/linkC/linkD/", "dirB/linkD/", "dirC/dirD/"]) _check(p, "dir*/*/**/..", ["dirA/linkC/..", "dirA/linkC/linkD/..", "dirB/linkD/..", "dirC/dirD/.."]) _check(p, "dir*/**/fileC", ["dirC/fileC"]) _check(p, "dir*/*/../dirD/**/", ["dirC/dirD/../dirD/"]) - _check(p, "*/dirD/**", ["dirC/dirD", "dirC/dirD/fileD"]) + _check(p, "*/dirD/**", ["dirC/dirD/", "dirC/dirD/fileD"]) _check(p, "*/dirD/**/", ["dirC/dirD/"]) @needs_symlinks @@ -1827,19 +1827,19 @@ def _check(path, glob, expected): _check(p, "*/", ["dirA/", "dirB/", "dirC/", "dirE/"]) _check(p, "dir*/*/..", ["dirC/dirD/.."]) _check(p, "dir*/**", [ - "dirA", "dirA/linkC", - "dirB", "dirB/fileB", "dirB/linkD", - "dirC", "dirC/fileC", "dirC/dirD", "dirC/dirD/fileD", "dirC/novel.txt", - "dirE"]) + "dirA/", "dirA/linkC", + "dirB/", "dirB/fileB", "dirB/linkD", + "dirC/", "dirC/fileC", "dirC/dirD", "dirC/dirD/fileD", "dirC/novel.txt", + "dirE/"]) _check(p, "dir*/**/", ["dirA/", "dirB/", "dirC/", "dirC/dirD/", "dirE/"]) _check(p, "dir*/**/..", ["dirA/..", "dirB/..", "dirC/..", "dirC/dirD/..", "dirE/.."]) - _check(p, "dir*/*/**", ["dirC/dirD", "dirC/dirD/fileD"]) + _check(p, "dir*/*/**", ["dirC/dirD/", "dirC/dirD/fileD"]) _check(p, "dir*/*/**/", ["dirC/dirD/"]) _check(p, "dir*/*/**/..", ["dirC/dirD/.."]) _check(p, "dir*/**/fileC", ["dirC/fileC"]) - _check(p, "dir*/*/../dirD/**", ["dirC/dirD/../dirD", "dirC/dirD/../dirD/fileD"]) + _check(p, "dir*/*/../dirD/**", ["dirC/dirD/../dirD/", "dirC/dirD/../dirD/fileD"]) _check(p, "dir*/*/../dirD/**/", ["dirC/dirD/../dirD/"]) - _check(p, "*/dirD/**", ["dirC/dirD", "dirC/dirD/fileD"]) + _check(p, "*/dirD/**", ["dirC/dirD/", "dirC/dirD/fileD"]) _check(p, "*/dirD/**/", ["dirC/dirD/"]) def test_rglob_common(self): @@ -1876,13 +1876,13 @@ def _check(glob, expected): "dirC/dirD", "dirC/dirD/fileD"]) _check(p.rglob("file*"), ["dirC/fileC", "dirC/dirD/fileD"]) _check(p.rglob("**/file*"), ["dirC/fileC", "dirC/dirD/fileD"]) - _check(p.rglob("dir*/**"), ["dirC/dirD", "dirC/dirD/fileD"]) + _check(p.rglob("dir*/**"), ["dirC/dirD/", "dirC/dirD/fileD"]) _check(p.rglob("dir*/**/"), ["dirC/dirD/"]) _check(p.rglob("*/*"), ["dirC/dirD/fileD"]) _check(p.rglob("*/"), ["dirC/dirD/"]) _check(p.rglob(""), ["dirC/", "dirC/dirD/"]) _check(p.rglob("**"), [ - "dirC", "dirC/fileC", "dirC/dirD", "dirC/dirD/fileD", "dirC/novel.txt"]) + "dirC/", "dirC/fileC", "dirC/dirD", "dirC/dirD/fileD", "dirC/novel.txt"]) _check(p.rglob("**/"), ["dirC/", "dirC/dirD/"]) # gh-91616, a re module regression _check(p.rglob("*.txt"), ["dirC/novel.txt"]) diff --git a/Lib/test/test_sqlite3/test_dump.py b/Lib/test/test_sqlite3/test_dump.py index 2e1f0b80c10f46..7261b7f0dc93d0 100644 --- a/Lib/test/test_sqlite3/test_dump.py +++ b/Lib/test/test_sqlite3/test_dump.py @@ -54,6 +54,76 @@ def test_table_dump(self): [self.assertEqual(expected_sqls[i], actual_sqls[i]) for i in range(len(expected_sqls))] + def test_table_dump_filter(self): + all_table_sqls = [ + """CREATE TABLE "some_table_2" ("id_1" INTEGER);""", + """INSERT INTO "some_table_2" VALUES(3);""", + """INSERT INTO "some_table_2" VALUES(4);""", + """CREATE TABLE "test_table_1" ("id_2" INTEGER);""", + """INSERT INTO "test_table_1" VALUES(1);""", + """INSERT INTO "test_table_1" VALUES(2);""", + ] + all_views_sqls = [ + """CREATE VIEW "view_1" AS SELECT * FROM "some_table_2";""", + """CREATE VIEW "view_2" AS SELECT * FROM "test_table_1";""", + ] + # Create database structure. + for sql in [*all_table_sqls, *all_views_sqls]: + self.cu.execute(sql) + # %_table_% matches all tables. + dump_sqls = list(self.cx.iterdump(filter="%_table_%")) + self.assertEqual( + dump_sqls, + ["BEGIN TRANSACTION;", *all_table_sqls, "COMMIT;"], + ) + # view_% matches all views. + dump_sqls = list(self.cx.iterdump(filter="view_%")) + self.assertEqual( + dump_sqls, + ["BEGIN TRANSACTION;", *all_views_sqls, "COMMIT;"], + ) + # %_1 matches tables and views with the _1 suffix. + dump_sqls = list(self.cx.iterdump(filter="%_1")) + self.assertEqual( + dump_sqls, + [ + "BEGIN TRANSACTION;", + """CREATE TABLE "test_table_1" ("id_2" INTEGER);""", + """INSERT INTO "test_table_1" VALUES(1);""", + """INSERT INTO "test_table_1" VALUES(2);""", + """CREATE VIEW "view_1" AS SELECT * FROM "some_table_2";""", + "COMMIT;" + ], + ) + # some_% matches some_table_2. + dump_sqls = list(self.cx.iterdump(filter="some_%")) + self.assertEqual( + dump_sqls, + [ + "BEGIN TRANSACTION;", + """CREATE TABLE "some_table_2" ("id_1" INTEGER);""", + """INSERT INTO "some_table_2" VALUES(3);""", + """INSERT INTO "some_table_2" VALUES(4);""", + "COMMIT;" + ], + ) + # Only single object. + dump_sqls = list(self.cx.iterdump(filter="view_2")) + self.assertEqual( + dump_sqls, + [ + "BEGIN TRANSACTION;", + """CREATE VIEW "view_2" AS SELECT * FROM "test_table_1";""", + "COMMIT;" + ], + ) + # % matches all objects. + dump_sqls = list(self.cx.iterdump(filter="%")) + self.assertEqual( + dump_sqls, + ["BEGIN TRANSACTION;", *all_table_sqls, *all_views_sqls, "COMMIT;"], + ) + def test_dump_autoincrement(self): expected = [ 'CREATE TABLE "t1" (id integer primary key autoincrement);', diff --git a/Lib/test/test_sysconfig.py b/Lib/test/test_sysconfig.py index be609a0abd29c8..bb87bf00dc2d1a 100644 --- a/Lib/test/test_sysconfig.py +++ b/Lib/test/test_sysconfig.py @@ -154,17 +154,21 @@ def test_posix_venv_scheme(self): 'python%d.%d' % sys.version_info[:2], 'site-packages') - # Resolve the paths in prefix - binpath = os.path.join(sys.prefix, binpath) - incpath = os.path.join(sys.prefix, incpath) - libpath = os.path.join(sys.prefix, libpath) + # Resolve the paths in an imaginary venv/ directory + binpath = os.path.join('venv', binpath) + incpath = os.path.join('venv', incpath) + libpath = os.path.join('venv', libpath) - self.assertEqual(binpath, sysconfig.get_path('scripts', scheme='posix_venv')) - self.assertEqual(libpath, sysconfig.get_path('purelib', scheme='posix_venv')) + # Mimic the venv module, set all bases to the venv directory + bases = ('base', 'platbase', 'installed_base', 'installed_platbase') + vars = {base: 'venv' for base in bases} + + self.assertEqual(binpath, sysconfig.get_path('scripts', scheme='posix_venv', vars=vars)) + self.assertEqual(libpath, sysconfig.get_path('purelib', scheme='posix_venv', vars=vars)) # The include directory on POSIX isn't exactly the same as before, # but it is "within" - sysconfig_includedir = sysconfig.get_path('include', scheme='posix_venv') + sysconfig_includedir = sysconfig.get_path('include', scheme='posix_venv', vars=vars) self.assertTrue(sysconfig_includedir.startswith(incpath + os.sep)) def test_nt_venv_scheme(self): @@ -174,14 +178,19 @@ def test_nt_venv_scheme(self): incpath = 'Include' libpath = os.path.join('Lib', 'site-packages') - # Resolve the paths in prefix - binpath = os.path.join(sys.prefix, binpath) - incpath = os.path.join(sys.prefix, incpath) - libpath = os.path.join(sys.prefix, libpath) + # Resolve the paths in an imaginary venv\ directory + venv = 'venv' + binpath = os.path.join(venv, binpath) + incpath = os.path.join(venv, incpath) + libpath = os.path.join(venv, libpath) + + # Mimic the venv module, set all bases to the venv directory + bases = ('base', 'platbase', 'installed_base', 'installed_platbase') + vars = {base: 'venv' for base in bases} - self.assertEqual(binpath, sysconfig.get_path('scripts', scheme='nt_venv')) - self.assertEqual(incpath, sysconfig.get_path('include', scheme='nt_venv')) - self.assertEqual(libpath, sysconfig.get_path('purelib', scheme='nt_venv')) + self.assertEqual(binpath, sysconfig.get_path('scripts', scheme='nt_venv', vars=vars)) + self.assertEqual(incpath, sysconfig.get_path('include', scheme='nt_venv', vars=vars)) + self.assertEqual(libpath, sysconfig.get_path('purelib', scheme='nt_venv', vars=vars)) def test_venv_scheme(self): if sys.platform == 'win32': diff --git a/Lib/test/test_zoneinfo/test_zoneinfo.py b/Lib/test/test_zoneinfo/test_zoneinfo.py index 18eab5b33540c9..8414721555731e 100644 --- a/Lib/test/test_zoneinfo/test_zoneinfo.py +++ b/Lib/test/test_zoneinfo/test_zoneinfo.py @@ -20,7 +20,7 @@ from test.support import MISSING_C_DOCSTRINGS from test.test_zoneinfo import _support as test_support from test.test_zoneinfo._support import OS_ENV_LOCK, TZPATH_TEST_LOCK, ZoneInfoTestBase -from test.support.import_helper import import_module +from test.support.import_helper import import_module, CleanImport lzma = import_module('lzma') py_zoneinfo, c_zoneinfo = test_support.get_modules() @@ -1720,13 +1720,26 @@ def test_env_variable_relative_paths(self): with self.subTest("warning", path_var=path_var): # Note: Per PEP 615 the warning is implementation-defined # behavior, other implementations need not warn. - with self.assertWarns(self.module.InvalidTZPathWarning): + with self.assertWarns(self.module.InvalidTZPathWarning) as w: self.module.reset_tzpath() + self.assertEqual(w.warnings[0].filename, __file__) tzpath = self.module.TZPATH with self.subTest("filtered", path_var=path_var): self.assertSequenceEqual(tzpath, expected_paths) + def test_env_variable_relative_paths_warning_location(self): + path_var = "path/to/somewhere" + + with self.python_tzpath_context(path_var): + with CleanImport("zoneinfo", "zoneinfo._tzpath"): + with self.assertWarns(RuntimeWarning) as w: + import zoneinfo + InvalidTZPathWarning = zoneinfo.InvalidTZPathWarning + self.assertIsInstance(w.warnings[0].message, InvalidTZPathWarning) + # It should represent the current file: + self.assertEqual(w.warnings[0].filename, __file__) + def test_reset_tzpath_kwarg(self): self.module.reset_tzpath(to=[f"{DRIVE}/a/b/c"]) diff --git a/Lib/threading.py b/Lib/threading.py index 75a08e5aac97d6..b6ff00acadd58f 100644 --- a/Lib/threading.py +++ b/Lib/threading.py @@ -949,7 +949,6 @@ def _after_fork(self, new_ident=None): # This thread is alive. self._ident = new_ident if self._handle is not None: - self._handle.after_fork_alive() assert self._handle.ident == new_ident # bpo-42350: If the fork happens when the thread is already stopped # (ex: after threading._shutdown() has been called), _tstate_lock @@ -965,9 +964,7 @@ def _after_fork(self, new_ident=None): self._is_stopped = True self._tstate_lock = None self._join_lock = None - if self._handle is not None: - self._handle.after_fork_dead() - self._handle = None + self._handle = None def __repr__(self): assert self._initialized, "Thread.__init__() was not called" diff --git a/Lib/zoneinfo/_tzpath.py b/Lib/zoneinfo/_tzpath.py index 4985dce2dc36d0..5db17bea045d8c 100644 --- a/Lib/zoneinfo/_tzpath.py +++ b/Lib/zoneinfo/_tzpath.py @@ -2,7 +2,7 @@ import sysconfig -def reset_tzpath(to=None): +def _reset_tzpath(to=None, stacklevel=4): global TZPATH tzpaths = to @@ -18,17 +18,22 @@ def reset_tzpath(to=None): base_tzpath = tzpaths else: env_var = os.environ.get("PYTHONTZPATH", None) - if env_var is not None: - base_tzpath = _parse_python_tzpath(env_var) - else: - base_tzpath = _parse_python_tzpath( - sysconfig.get_config_var("TZPATH") - ) + if env_var is None: + env_var = sysconfig.get_config_var("TZPATH") + base_tzpath = _parse_python_tzpath(env_var, stacklevel) TZPATH = tuple(base_tzpath) -def _parse_python_tzpath(env_var): +def reset_tzpath(to=None): + """Reset global TZPATH.""" + # We need `_reset_tzpath` helper function because it produces a warning, + # it is used as both a module-level call and a public API. + # This is how we equalize the stacklevel for both calls. + _reset_tzpath(to) + + +def _parse_python_tzpath(env_var, stacklevel): if not env_var: return () @@ -45,6 +50,7 @@ def _parse_python_tzpath(env_var): "Invalid paths specified in PYTHONTZPATH environment variable. " + msg, InvalidTZPathWarning, + stacklevel=stacklevel, ) return new_tzpath @@ -172,4 +178,4 @@ class InvalidTZPathWarning(RuntimeWarning): TZPATH = () -reset_tzpath() +_reset_tzpath(stacklevel=5) diff --git a/Mac/BuildScript/build-installer.py b/Mac/BuildScript/build-installer.py index 32de56bcf13086..0af90563cbbb2b 100755 --- a/Mac/BuildScript/build-installer.py +++ b/Mac/BuildScript/build-installer.py @@ -246,9 +246,9 @@ def library_recipes(): result.extend([ dict( - name="OpenSSL 3.0.11", - url="https://www.openssl.org/source/openssl-3.0.11.tar.gz", - checksum='b3425d3bb4a2218d0697eb41f7fc0cdede016ed19ca49d168b78e8d947887f55', + name="OpenSSL 3.0.13", + url="https://www.openssl.org/source/openssl-3.0.13.tar.gz", + checksum='88525753f79d3bec27d2fa7c66aa0b92b3aa9498dafd93d7cfa4b3780cdae313', buildrecipe=build_universal_openssl, configure=None, install=None, @@ -359,9 +359,9 @@ def library_recipes(): ), ), dict( - name="SQLite 3.44.2", - url="https://sqlite.org/2023/sqlite-autoconf-3440200.tar.gz", - checksum="c02f40fd4f809ced95096250adc5764a", + name="SQLite 3.45.1", + url="https://sqlite.org/2024/sqlite-autoconf-3450100.tar.gz", + checksum="cd9c27841b7a5932c9897651e20b86c701dd740556989b01ca596fcfa3d49a0a", extra_cflags=('-Os ' '-DSQLITE_ENABLE_FTS5 ' '-DSQLITE_ENABLE_FTS4 ' diff --git a/Makefile.pre.in b/Makefile.pre.in index aad637876ead80..07b2ec7adde78a 100644 --- a/Makefile.pre.in +++ b/Makefile.pre.in @@ -936,6 +936,261 @@ python.html: $(srcdir)/Tools/wasm/python.html python.worker.js python.worker.js: $(srcdir)/Tools/wasm/python.worker.js @cp $(srcdir)/Tools/wasm/python.worker.js $@ +############################################################################ +# Header files + +PYTHON_HEADERS= \ + $(srcdir)/Include/Python.h \ + $(srcdir)/Include/abstract.h \ + $(srcdir)/Include/bltinmodule.h \ + $(srcdir)/Include/boolobject.h \ + $(srcdir)/Include/bytearrayobject.h \ + $(srcdir)/Include/bytesobject.h \ + $(srcdir)/Include/ceval.h \ + $(srcdir)/Include/codecs.h \ + $(srcdir)/Include/compile.h \ + $(srcdir)/Include/complexobject.h \ + $(srcdir)/Include/descrobject.h \ + $(srcdir)/Include/dictobject.h \ + $(srcdir)/Include/dynamic_annotations.h \ + $(srcdir)/Include/enumobject.h \ + $(srcdir)/Include/errcode.h \ + $(srcdir)/Include/exports.h \ + $(srcdir)/Include/fileobject.h \ + $(srcdir)/Include/fileutils.h \ + $(srcdir)/Include/floatobject.h \ + $(srcdir)/Include/frameobject.h \ + $(srcdir)/Include/genericaliasobject.h \ + $(srcdir)/Include/import.h \ + $(srcdir)/Include/interpreteridobject.h \ + $(srcdir)/Include/intrcheck.h \ + $(srcdir)/Include/iterobject.h \ + $(srcdir)/Include/listobject.h \ + $(srcdir)/Include/longobject.h \ + $(srcdir)/Include/marshal.h \ + $(srcdir)/Include/memoryobject.h \ + $(srcdir)/Include/methodobject.h \ + $(srcdir)/Include/modsupport.h \ + $(srcdir)/Include/moduleobject.h \ + $(srcdir)/Include/object.h \ + $(srcdir)/Include/objimpl.h \ + $(srcdir)/Include/opcode.h \ + $(srcdir)/Include/opcode_ids.h \ + $(srcdir)/Include/osdefs.h \ + $(srcdir)/Include/osmodule.h \ + $(srcdir)/Include/patchlevel.h \ + $(srcdir)/Include/pyatomic.h \ + $(srcdir)/Include/pybuffer.h \ + $(srcdir)/Include/pycapsule.h \ + $(srcdir)/Include/pydtrace.h \ + $(srcdir)/Include/pyerrors.h \ + $(srcdir)/Include/pyexpat.h \ + $(srcdir)/Include/pyframe.h \ + $(srcdir)/Include/pyhash.h \ + $(srcdir)/Include/pylifecycle.h \ + $(srcdir)/Include/pymacconfig.h \ + $(srcdir)/Include/pymacro.h \ + $(srcdir)/Include/pymath.h \ + $(srcdir)/Include/pymem.h \ + $(srcdir)/Include/pyport.h \ + $(srcdir)/Include/pystate.h \ + $(srcdir)/Include/pystats.h \ + $(srcdir)/Include/pystrcmp.h \ + $(srcdir)/Include/pystrtod.h \ + $(srcdir)/Include/pythonrun.h \ + $(srcdir)/Include/pythread.h \ + $(srcdir)/Include/pytypedefs.h \ + $(srcdir)/Include/rangeobject.h \ + $(srcdir)/Include/setobject.h \ + $(srcdir)/Include/sliceobject.h \ + $(srcdir)/Include/structmember.h \ + $(srcdir)/Include/structseq.h \ + $(srcdir)/Include/sysmodule.h \ + $(srcdir)/Include/traceback.h \ + $(srcdir)/Include/tupleobject.h \ + $(srcdir)/Include/typeslots.h \ + $(srcdir)/Include/unicodeobject.h \ + $(srcdir)/Include/warnings.h \ + $(srcdir)/Include/weakrefobject.h \ + \ + pyconfig.h \ + $(PARSER_HEADERS) \ + \ + $(srcdir)/Include/cpython/abstract.h \ + $(srcdir)/Include/cpython/bytearrayobject.h \ + $(srcdir)/Include/cpython/bytesobject.h \ + $(srcdir)/Include/cpython/cellobject.h \ + $(srcdir)/Include/cpython/ceval.h \ + $(srcdir)/Include/cpython/classobject.h \ + $(srcdir)/Include/cpython/code.h \ + $(srcdir)/Include/cpython/compile.h \ + $(srcdir)/Include/cpython/complexobject.h \ + $(srcdir)/Include/cpython/context.h \ + $(srcdir)/Include/cpython/descrobject.h \ + $(srcdir)/Include/cpython/dictobject.h \ + $(srcdir)/Include/cpython/fileobject.h \ + $(srcdir)/Include/cpython/fileutils.h \ + $(srcdir)/Include/cpython/floatobject.h \ + $(srcdir)/Include/cpython/frameobject.h \ + $(srcdir)/Include/cpython/funcobject.h \ + $(srcdir)/Include/cpython/genobject.h \ + $(srcdir)/Include/cpython/import.h \ + $(srcdir)/Include/cpython/initconfig.h \ + $(srcdir)/Include/cpython/interpreteridobject.h \ + $(srcdir)/Include/cpython/listobject.h \ + $(srcdir)/Include/cpython/longintrepr.h \ + $(srcdir)/Include/cpython/longobject.h \ + $(srcdir)/Include/cpython/memoryobject.h \ + $(srcdir)/Include/cpython/methodobject.h \ + $(srcdir)/Include/cpython/object.h \ + $(srcdir)/Include/cpython/objimpl.h \ + $(srcdir)/Include/cpython/odictobject.h \ + $(srcdir)/Include/cpython/optimizer.h \ + $(srcdir)/Include/cpython/picklebufobject.h \ + $(srcdir)/Include/cpython/pthread_stubs.h \ + $(srcdir)/Include/cpython/pyatomic.h \ + $(srcdir)/Include/cpython/pyatomic_gcc.h \ + $(srcdir)/Include/cpython/pyatomic_std.h \ + $(srcdir)/Include/cpython/pyctype.h \ + $(srcdir)/Include/cpython/pydebug.h \ + $(srcdir)/Include/cpython/pyerrors.h \ + $(srcdir)/Include/cpython/pyfpe.h \ + $(srcdir)/Include/cpython/pyframe.h \ + $(srcdir)/Include/cpython/pyhash.h \ + $(srcdir)/Include/cpython/pylifecycle.h \ + $(srcdir)/Include/cpython/pymem.h \ + $(srcdir)/Include/cpython/pystate.h \ + $(srcdir)/Include/cpython/pystats.h \ + $(srcdir)/Include/cpython/pythonrun.h \ + $(srcdir)/Include/cpython/pythread.h \ + $(srcdir)/Include/cpython/setobject.h \ + $(srcdir)/Include/cpython/sysmodule.h \ + $(srcdir)/Include/cpython/traceback.h \ + $(srcdir)/Include/cpython/tracemalloc.h \ + $(srcdir)/Include/cpython/tupleobject.h \ + $(srcdir)/Include/cpython/unicodeobject.h \ + $(srcdir)/Include/cpython/warnings.h \ + $(srcdir)/Include/cpython/weakrefobject.h \ + \ + $(MIMALLOC_HEADERS) \ + \ + $(srcdir)/Include/internal/pycore_abstract.h \ + $(srcdir)/Include/internal/pycore_asdl.h \ + $(srcdir)/Include/internal/pycore_ast.h \ + $(srcdir)/Include/internal/pycore_ast_state.h \ + $(srcdir)/Include/internal/pycore_atexit.h \ + $(srcdir)/Include/internal/pycore_bitutils.h \ + $(srcdir)/Include/internal/pycore_blocks_output_buffer.h \ + $(srcdir)/Include/internal/pycore_bytes_methods.h \ + $(srcdir)/Include/internal/pycore_bytesobject.h \ + $(srcdir)/Include/internal/pycore_call.h \ + $(srcdir)/Include/internal/pycore_capsule.h \ + $(srcdir)/Include/internal/pycore_ceval.h \ + $(srcdir)/Include/internal/pycore_ceval_state.h \ + $(srcdir)/Include/internal/pycore_code.h \ + $(srcdir)/Include/internal/pycore_codecs.h \ + $(srcdir)/Include/internal/pycore_compile.h \ + $(srcdir)/Include/internal/pycore_complexobject.h \ + $(srcdir)/Include/internal/pycore_condvar.h \ + $(srcdir)/Include/internal/pycore_context.h \ + $(srcdir)/Include/internal/pycore_critical_section.h \ + $(srcdir)/Include/internal/pycore_crossinterp.h \ + $(srcdir)/Include/internal/pycore_descrobject.h \ + $(srcdir)/Include/internal/pycore_dict.h \ + $(srcdir)/Include/internal/pycore_dict_state.h \ + $(srcdir)/Include/internal/pycore_dtoa.h \ + $(srcdir)/Include/internal/pycore_exceptions.h \ + $(srcdir)/Include/internal/pycore_faulthandler.h \ + $(srcdir)/Include/internal/pycore_fileutils.h \ + $(srcdir)/Include/internal/pycore_floatobject.h \ + $(srcdir)/Include/internal/pycore_flowgraph.h \ + $(srcdir)/Include/internal/pycore_format.h \ + $(srcdir)/Include/internal/pycore_frame.h \ + $(srcdir)/Include/internal/pycore_freelist.h \ + $(srcdir)/Include/internal/pycore_function.h \ + $(srcdir)/Include/internal/pycore_gc.h \ + $(srcdir)/Include/internal/pycore_genobject.h \ + $(srcdir)/Include/internal/pycore_getopt.h \ + $(srcdir)/Include/internal/pycore_gil.h \ + $(srcdir)/Include/internal/pycore_global_objects.h \ + $(srcdir)/Include/internal/pycore_global_objects_fini_generated.h \ + $(srcdir)/Include/internal/pycore_global_strings.h \ + $(srcdir)/Include/internal/pycore_hamt.h \ + $(srcdir)/Include/internal/pycore_hashtable.h \ + $(srcdir)/Include/internal/pycore_identifier.h \ + $(srcdir)/Include/internal/pycore_import.h \ + $(srcdir)/Include/internal/pycore_importdl.h \ + $(srcdir)/Include/internal/pycore_initconfig.h \ + $(srcdir)/Include/internal/pycore_instruments.h \ + $(srcdir)/Include/internal/pycore_interp.h \ + $(srcdir)/Include/internal/pycore_intrinsics.h \ + $(srcdir)/Include/internal/pycore_jit.h \ + $(srcdir)/Include/internal/pycore_list.h \ + $(srcdir)/Include/internal/pycore_llist.h \ + $(srcdir)/Include/internal/pycore_lock.h \ + $(srcdir)/Include/internal/pycore_long.h \ + $(srcdir)/Include/internal/pycore_memoryobject.h \ + $(srcdir)/Include/internal/pycore_mimalloc.h \ + $(srcdir)/Include/internal/pycore_modsupport.h \ + $(srcdir)/Include/internal/pycore_moduleobject.h \ + $(srcdir)/Include/internal/pycore_namespace.h \ + $(srcdir)/Include/internal/pycore_object.h \ + $(srcdir)/Include/internal/pycore_object_alloc.h \ + $(srcdir)/Include/internal/pycore_object_stack.h \ + $(srcdir)/Include/internal/pycore_object_state.h \ + $(srcdir)/Include/internal/pycore_obmalloc.h \ + $(srcdir)/Include/internal/pycore_obmalloc_init.h \ + $(srcdir)/Include/internal/pycore_opcode_metadata.h \ + $(srcdir)/Include/internal/pycore_opcode_utils.h \ + $(srcdir)/Include/internal/pycore_optimizer.h \ + $(srcdir)/Include/internal/pycore_parking_lot.h \ + $(srcdir)/Include/internal/pycore_parser.h \ + $(srcdir)/Include/internal/pycore_pathconfig.h \ + $(srcdir)/Include/internal/pycore_pyarena.h \ + $(srcdir)/Include/internal/pycore_pybuffer.h \ + $(srcdir)/Include/internal/pycore_pyerrors.h \ + $(srcdir)/Include/internal/pycore_pyhash.h \ + $(srcdir)/Include/internal/pycore_pylifecycle.h \ + $(srcdir)/Include/internal/pycore_pymath.h \ + $(srcdir)/Include/internal/pycore_pymem.h \ + $(srcdir)/Include/internal/pycore_pymem_init.h \ + $(srcdir)/Include/internal/pycore_pystate.h \ + $(srcdir)/Include/internal/pycore_pystats.h \ + $(srcdir)/Include/internal/pycore_pythonrun.h \ + $(srcdir)/Include/internal/pycore_pythread.h \ + $(srcdir)/Include/internal/pycore_range.h \ + $(srcdir)/Include/internal/pycore_runtime.h \ + $(srcdir)/Include/internal/pycore_runtime_init.h \ + $(srcdir)/Include/internal/pycore_runtime_init_generated.h \ + $(srcdir)/Include/internal/pycore_semaphore.h \ + $(srcdir)/Include/internal/pycore_setobject.h \ + $(srcdir)/Include/internal/pycore_signal.h \ + $(srcdir)/Include/internal/pycore_sliceobject.h \ + $(srcdir)/Include/internal/pycore_strhex.h \ + $(srcdir)/Include/internal/pycore_structseq.h \ + $(srcdir)/Include/internal/pycore_symtable.h \ + $(srcdir)/Include/internal/pycore_sysmodule.h \ + $(srcdir)/Include/internal/pycore_time.h \ + $(srcdir)/Include/internal/pycore_token.h \ + $(srcdir)/Include/internal/pycore_traceback.h \ + $(srcdir)/Include/internal/pycore_tracemalloc.h \ + $(srcdir)/Include/internal/pycore_tstate.h \ + $(srcdir)/Include/internal/pycore_tuple.h \ + $(srcdir)/Include/internal/pycore_typeobject.h \ + $(srcdir)/Include/internal/pycore_typevarobject.h \ + $(srcdir)/Include/internal/pycore_ucnhash.h \ + $(srcdir)/Include/internal/pycore_unicodeobject.h \ + $(srcdir)/Include/internal/pycore_unicodeobject_generated.h \ + $(srcdir)/Include/internal/pycore_unionobject.h \ + $(srcdir)/Include/internal/pycore_uop_ids.h \ + $(srcdir)/Include/internal/pycore_uop_metadata.h \ + $(srcdir)/Include/internal/pycore_warnings.h \ + $(srcdir)/Include/internal/pycore_weakref.h \ + $(DTRACE_HEADERS) \ + @PLATFORM_HEADERS@ \ + \ + $(srcdir)/Python/stdlib_module_names.h + ########################################################################## # Build static libmpdec.a LIBMPDEC_CFLAGS=@LIBMPDEC_CFLAGS@ $(PY_STDMODULE_CFLAGS) $(CCSHARED) @@ -1400,7 +1655,7 @@ Modules/getpath.o: $(srcdir)/Modules/getpath.c Python/frozen_modules/getpath.h M Programs/python.o: $(srcdir)/Programs/python.c $(CC) -c $(PY_CORE_CFLAGS) -o $@ $(srcdir)/Programs/python.c -Programs/_testembed.o: $(srcdir)/Programs/_testembed.c Programs/test_frozenmain.h +Programs/_testembed.o: $(srcdir)/Programs/_testembed.c Programs/test_frozenmain.h $(PYTHON_HEADERS) $(CC) -c $(PY_CORE_CFLAGS) -o $@ $(srcdir)/Programs/_testembed.c Modules/_sre/sre.o: $(srcdir)/Modules/_sre/sre.c $(srcdir)/Modules/_sre/sre.h $(srcdir)/Modules/_sre/sre_constants.h $(srcdir)/Modules/_sre/sre_lib.h @@ -1669,246 +1924,6 @@ regen-typeslots: $(srcdir)/Objects/typeslots.inc.new $(UPDATE_FILE) $(srcdir)/Objects/typeslots.inc $(srcdir)/Objects/typeslots.inc.new -############################################################################ -# Header files - -PYTHON_HEADERS= \ - $(srcdir)/Include/Python.h \ - $(srcdir)/Include/abstract.h \ - $(srcdir)/Include/bltinmodule.h \ - $(srcdir)/Include/boolobject.h \ - $(srcdir)/Include/bytearrayobject.h \ - $(srcdir)/Include/bytesobject.h \ - $(srcdir)/Include/ceval.h \ - $(srcdir)/Include/codecs.h \ - $(srcdir)/Include/compile.h \ - $(srcdir)/Include/complexobject.h \ - $(srcdir)/Include/descrobject.h \ - $(srcdir)/Include/dictobject.h \ - $(srcdir)/Include/dynamic_annotations.h \ - $(srcdir)/Include/enumobject.h \ - $(srcdir)/Include/errcode.h \ - $(srcdir)/Include/fileobject.h \ - $(srcdir)/Include/fileutils.h \ - $(srcdir)/Include/floatobject.h \ - $(srcdir)/Include/frameobject.h \ - $(srcdir)/Include/import.h \ - $(srcdir)/Include/interpreteridobject.h \ - $(srcdir)/Include/intrcheck.h \ - $(srcdir)/Include/iterobject.h \ - $(srcdir)/Include/listobject.h \ - $(srcdir)/Include/longobject.h \ - $(srcdir)/Include/marshal.h \ - $(srcdir)/Include/memoryobject.h \ - $(srcdir)/Include/methodobject.h \ - $(srcdir)/Include/modsupport.h \ - $(srcdir)/Include/moduleobject.h \ - $(srcdir)/Include/object.h \ - $(srcdir)/Include/objimpl.h \ - $(srcdir)/Include/opcode.h \ - $(srcdir)/Include/opcode_ids.h \ - $(srcdir)/Include/osdefs.h \ - $(srcdir)/Include/osmodule.h \ - $(srcdir)/Include/patchlevel.h \ - $(srcdir)/Include/pybuffer.h \ - $(srcdir)/Include/pycapsule.h \ - $(srcdir)/Include/pydtrace.h \ - $(srcdir)/Include/pyerrors.h \ - $(srcdir)/Include/pyframe.h \ - $(srcdir)/Include/pyhash.h \ - $(srcdir)/Include/pylifecycle.h \ - $(srcdir)/Include/pymacconfig.h \ - $(srcdir)/Include/pymacro.h \ - $(srcdir)/Include/pymath.h \ - $(srcdir)/Include/pymem.h \ - $(srcdir)/Include/pyport.h \ - $(srcdir)/Include/pystate.h \ - $(srcdir)/Include/pystats.h \ - $(srcdir)/Include/pystrcmp.h \ - $(srcdir)/Include/pystrtod.h \ - $(srcdir)/Include/pythonrun.h \ - $(srcdir)/Include/pythread.h \ - $(srcdir)/Include/pytypedefs.h \ - $(srcdir)/Include/rangeobject.h \ - $(srcdir)/Include/setobject.h \ - $(srcdir)/Include/sliceobject.h \ - $(srcdir)/Include/structmember.h \ - $(srcdir)/Include/structseq.h \ - $(srcdir)/Include/sysmodule.h \ - $(srcdir)/Include/traceback.h \ - $(srcdir)/Include/tupleobject.h \ - $(srcdir)/Include/unicodeobject.h \ - $(srcdir)/Include/warnings.h \ - $(srcdir)/Include/weakrefobject.h \ - \ - pyconfig.h \ - $(PARSER_HEADERS) \ - \ - $(srcdir)/Include/cpython/abstract.h \ - $(srcdir)/Include/cpython/bytearrayobject.h \ - $(srcdir)/Include/cpython/bytesobject.h \ - $(srcdir)/Include/cpython/cellobject.h \ - $(srcdir)/Include/cpython/ceval.h \ - $(srcdir)/Include/cpython/classobject.h \ - $(srcdir)/Include/cpython/code.h \ - $(srcdir)/Include/cpython/compile.h \ - $(srcdir)/Include/cpython/complexobject.h \ - $(srcdir)/Include/cpython/context.h \ - $(srcdir)/Include/cpython/descrobject.h \ - $(srcdir)/Include/cpython/dictobject.h \ - $(srcdir)/Include/cpython/fileobject.h \ - $(srcdir)/Include/cpython/fileutils.h \ - $(srcdir)/Include/cpython/floatobject.h \ - $(srcdir)/Include/cpython/frameobject.h \ - $(srcdir)/Include/cpython/funcobject.h \ - $(srcdir)/Include/cpython/genobject.h \ - $(srcdir)/Include/cpython/import.h \ - $(srcdir)/Include/cpython/initconfig.h \ - $(srcdir)/Include/cpython/interpreteridobject.h \ - $(srcdir)/Include/cpython/listobject.h \ - $(srcdir)/Include/cpython/longintrepr.h \ - $(srcdir)/Include/cpython/longobject.h \ - $(srcdir)/Include/cpython/memoryobject.h \ - $(srcdir)/Include/cpython/methodobject.h \ - $(srcdir)/Include/cpython/object.h \ - $(srcdir)/Include/cpython/objimpl.h \ - $(srcdir)/Include/cpython/odictobject.h \ - $(srcdir)/Include/cpython/optimizer.h \ - $(srcdir)/Include/cpython/picklebufobject.h \ - $(srcdir)/Include/cpython/pthread_stubs.h \ - $(srcdir)/Include/cpython/pyatomic.h \ - $(srcdir)/Include/cpython/pyatomic_gcc.h \ - $(srcdir)/Include/cpython/pyatomic_std.h \ - $(srcdir)/Include/cpython/pyctype.h \ - $(srcdir)/Include/cpython/pydebug.h \ - $(srcdir)/Include/cpython/pyerrors.h \ - $(srcdir)/Include/cpython/pyfpe.h \ - $(srcdir)/Include/cpython/pyframe.h \ - $(srcdir)/Include/cpython/pyhash.h \ - $(srcdir)/Include/cpython/pylifecycle.h \ - $(srcdir)/Include/cpython/pymem.h \ - $(srcdir)/Include/cpython/pystate.h \ - $(srcdir)/Include/cpython/pystats.h \ - $(srcdir)/Include/cpython/pythonrun.h \ - $(srcdir)/Include/cpython/pythread.h \ - $(srcdir)/Include/cpython/setobject.h \ - $(srcdir)/Include/cpython/sysmodule.h \ - $(srcdir)/Include/cpython/traceback.h \ - $(srcdir)/Include/cpython/tracemalloc.h \ - $(srcdir)/Include/cpython/tupleobject.h \ - $(srcdir)/Include/cpython/unicodeobject.h \ - $(srcdir)/Include/cpython/warnings.h \ - $(srcdir)/Include/cpython/weakrefobject.h \ - \ - $(MIMALLOC_HEADERS) \ - \ - $(srcdir)/Include/internal/pycore_abstract.h \ - $(srcdir)/Include/internal/pycore_asdl.h \ - $(srcdir)/Include/internal/pycore_ast.h \ - $(srcdir)/Include/internal/pycore_ast_state.h \ - $(srcdir)/Include/internal/pycore_atexit.h \ - $(srcdir)/Include/internal/pycore_bitutils.h \ - $(srcdir)/Include/internal/pycore_bytes_methods.h \ - $(srcdir)/Include/internal/pycore_bytesobject.h \ - $(srcdir)/Include/internal/pycore_call.h \ - $(srcdir)/Include/internal/pycore_capsule.h \ - $(srcdir)/Include/internal/pycore_ceval.h \ - $(srcdir)/Include/internal/pycore_ceval_state.h \ - $(srcdir)/Include/internal/pycore_code.h \ - $(srcdir)/Include/internal/pycore_codecs.h \ - $(srcdir)/Include/internal/pycore_compile.h \ - $(srcdir)/Include/internal/pycore_complexobject.h \ - $(srcdir)/Include/internal/pycore_condvar.h \ - $(srcdir)/Include/internal/pycore_context.h \ - $(srcdir)/Include/internal/pycore_critical_section.h \ - $(srcdir)/Include/internal/pycore_crossinterp.h \ - $(srcdir)/Include/internal/pycore_dict.h \ - $(srcdir)/Include/internal/pycore_dict_state.h \ - $(srcdir)/Include/internal/pycore_descrobject.h \ - $(srcdir)/Include/internal/pycore_dtoa.h \ - $(srcdir)/Include/internal/pycore_exceptions.h \ - $(srcdir)/Include/internal/pycore_faulthandler.h \ - $(srcdir)/Include/internal/pycore_fileutils.h \ - $(srcdir)/Include/internal/pycore_floatobject.h \ - $(srcdir)/Include/internal/pycore_format.h \ - $(srcdir)/Include/internal/pycore_frame.h \ - $(srcdir)/Include/internal/pycore_freelist.h \ - $(srcdir)/Include/internal/pycore_function.h \ - $(srcdir)/Include/internal/pycore_gc.h \ - $(srcdir)/Include/internal/pycore_genobject.h \ - $(srcdir)/Include/internal/pycore_getopt.h \ - $(srcdir)/Include/internal/pycore_gil.h \ - $(srcdir)/Include/internal/pycore_global_objects.h \ - $(srcdir)/Include/internal/pycore_global_objects_fini_generated.h \ - $(srcdir)/Include/internal/pycore_hamt.h \ - $(srcdir)/Include/internal/pycore_hashtable.h \ - $(srcdir)/Include/internal/pycore_identifier.h \ - $(srcdir)/Include/internal/pycore_import.h \ - $(srcdir)/Include/internal/pycore_initconfig.h \ - $(srcdir)/Include/internal/pycore_interp.h \ - $(srcdir)/Include/internal/pycore_intrinsics.h \ - $(srcdir)/Include/internal/pycore_jit.h \ - $(srcdir)/Include/internal/pycore_list.h \ - $(srcdir)/Include/internal/pycore_llist.h \ - $(srcdir)/Include/internal/pycore_lock.h \ - $(srcdir)/Include/internal/pycore_long.h \ - $(srcdir)/Include/internal/pycore_modsupport.h \ - $(srcdir)/Include/internal/pycore_moduleobject.h \ - $(srcdir)/Include/internal/pycore_namespace.h \ - $(srcdir)/Include/internal/pycore_object.h \ - $(srcdir)/Include/internal/pycore_object_alloc.h \ - $(srcdir)/Include/internal/pycore_object_stack.h \ - $(srcdir)/Include/internal/pycore_object_state.h \ - $(srcdir)/Include/internal/pycore_obmalloc.h \ - $(srcdir)/Include/internal/pycore_obmalloc_init.h \ - $(srcdir)/Include/internal/pycore_opcode_metadata.h \ - $(srcdir)/Include/internal/pycore_opcode_utils.h \ - $(srcdir)/Include/internal/pycore_optimizer.h \ - $(srcdir)/Include/internal/pycore_parking_lot.h \ - $(srcdir)/Include/internal/pycore_pathconfig.h \ - $(srcdir)/Include/internal/pycore_pyarena.h \ - $(srcdir)/Include/internal/pycore_pybuffer.h \ - $(srcdir)/Include/internal/pycore_pyerrors.h \ - $(srcdir)/Include/internal/pycore_pyhash.h \ - $(srcdir)/Include/internal/pycore_pylifecycle.h \ - $(srcdir)/Include/internal/pycore_pymem.h \ - $(srcdir)/Include/internal/pycore_pymem_init.h \ - $(srcdir)/Include/internal/pycore_pystate.h \ - $(srcdir)/Include/internal/pycore_pystats.h \ - $(srcdir)/Include/internal/pycore_pythonrun.h \ - $(srcdir)/Include/internal/pycore_pythread.h \ - $(srcdir)/Include/internal/pycore_range.h \ - $(srcdir)/Include/internal/pycore_runtime.h \ - $(srcdir)/Include/internal/pycore_runtime_init_generated.h \ - $(srcdir)/Include/internal/pycore_runtime_init.h \ - $(srcdir)/Include/internal/pycore_semaphore.h \ - $(srcdir)/Include/internal/pycore_setobject.h \ - $(srcdir)/Include/internal/pycore_signal.h \ - $(srcdir)/Include/internal/pycore_sliceobject.h \ - $(srcdir)/Include/internal/pycore_strhex.h \ - $(srcdir)/Include/internal/pycore_structseq.h \ - $(srcdir)/Include/internal/pycore_symtable.h \ - $(srcdir)/Include/internal/pycore_sysmodule.h \ - $(srcdir)/Include/internal/pycore_time.h \ - $(srcdir)/Include/internal/pycore_token.h \ - $(srcdir)/Include/internal/pycore_traceback.h \ - $(srcdir)/Include/internal/pycore_tracemalloc.h \ - $(srcdir)/Include/internal/pycore_tstate.h \ - $(srcdir)/Include/internal/pycore_tuple.h \ - $(srcdir)/Include/internal/pycore_typeobject.h \ - $(srcdir)/Include/internal/pycore_typevarobject.h \ - $(srcdir)/Include/internal/pycore_ucnhash.h \ - $(srcdir)/Include/internal/pycore_unionobject.h \ - $(srcdir)/Include/internal/pycore_unicodeobject.h \ - $(srcdir)/Include/internal/pycore_unicodeobject_generated.h \ - $(srcdir)/Include/internal/pycore_uop_metadata.h \ - $(srcdir)/Include/internal/pycore_warnings.h \ - $(srcdir)/Include/internal/pycore_weakref.h \ - $(DTRACE_HEADERS) \ - @PLATFORM_HEADERS@ \ - \ - $(srcdir)/Python/stdlib_module_names.h - $(LIBRARY_OBJS) $(MODOBJS) Programs/python.o: $(PYTHON_HEADERS) @@ -2877,6 +2892,9 @@ Python/thread.o: @THREADHEADERS@ $(srcdir)/Python/condvar.h MODULE_DEPS_STATIC=Modules/config.c MODULE_DEPS_SHARED=$(MODULE_DEPS_STATIC) $(EXPORTSYMS) +MODULE__CURSES_DEPS=$(srcdir)/Include/py_curses.h +MODULE__CURSES_PANEL_DEPS=$(srcdir)/Include/py_curses.h +MODULE__DATETIME_DEPS=$(srcdir)/Include/datetime.h MODULE_CMATH_DEPS=$(srcdir)/Modules/_math.h MODULE_MATH_DEPS=$(srcdir)/Modules/_math.h MODULE_PYEXPAT_DEPS=@LIBEXPAT_INTERNAL@ diff --git a/Misc/NEWS.d/next/C API/2023-11-15-13-47-48.gh-issue-112066.22WsqR.rst b/Misc/NEWS.d/next/C API/2023-11-15-13-47-48.gh-issue-112066.22WsqR.rst new file mode 100644 index 00000000000000..ae2b8b2444de97 --- /dev/null +++ b/Misc/NEWS.d/next/C API/2023-11-15-13-47-48.gh-issue-112066.22WsqR.rst @@ -0,0 +1,5 @@ +Add :c:func:`PyDict_SetDefaultRef`: insert a key and value into a dictionary +if the key is not already present. This is similar to +:meth:`dict.setdefault`, but returns an integer value indicating if the key +was already present. It is also similar to :c:func:`PyDict_SetDefault`, but +returns a strong reference instead of a borrowed reference. diff --git a/Misc/NEWS.d/next/Core and Builtins/2024-01-07-04-22-51.gh-issue-108362.oB9Gcf.rst b/Misc/NEWS.d/next/Core and Builtins/2024-01-07-04-22-51.gh-issue-108362.oB9Gcf.rst deleted file mode 100644 index 1fe4e0f41e1295..00000000000000 --- a/Misc/NEWS.d/next/Core and Builtins/2024-01-07-04-22-51.gh-issue-108362.oB9Gcf.rst +++ /dev/null @@ -1,13 +0,0 @@ -Implements an incremental cyclic garbage collector. By collecting the old -generation in increments, there is no need for a full heap scan. This can -hugely reduce maximum pause time for programs with large heaps. - -Reduces the number of generations from three to two. The old generation is -split into two spaces, "aging" and "collecting". - -Collection happens in two steps:: * First, the young generation is scanned -and the survivors moved to the end of the aging space. * Then objects are -taken from the collecting space, at such a rate that all cycles are -collected eventually. Those objects are then scanned and the survivors -moved to the end of the aging space. When the collecting space becomes -empty, the two spaces are swapped. diff --git a/Misc/NEWS.d/next/Core and Builtins/2024-02-07-07-50-12.gh-issue-114828.nSXwMi.rst b/Misc/NEWS.d/next/Core and Builtins/2024-02-07-07-50-12.gh-issue-114828.nSXwMi.rst new file mode 100644 index 00000000000000..b1c63e0a1518fd --- /dev/null +++ b/Misc/NEWS.d/next/Core and Builtins/2024-02-07-07-50-12.gh-issue-114828.nSXwMi.rst @@ -0,0 +1,2 @@ +Fix compilation crashes in uncommon code examples using :func:`super` inside +a comprehension in a class body. diff --git a/Misc/NEWS.d/next/Library/2023-06-29-14-26-56.gh-issue-106233.Aqw2HI.rst b/Misc/NEWS.d/next/Library/2023-06-29-14-26-56.gh-issue-106233.Aqw2HI.rst new file mode 100644 index 00000000000000..345c8b20815c95 --- /dev/null +++ b/Misc/NEWS.d/next/Library/2023-06-29-14-26-56.gh-issue-106233.Aqw2HI.rst @@ -0,0 +1,2 @@ +Fix stacklevel in ``InvalidTZPathWarning`` during :mod:`zoneinfo` module +import. diff --git a/Misc/NEWS.d/next/Library/2023-07-23-12-28-26.gh-issue-75705.aB2-Ww.rst b/Misc/NEWS.d/next/Library/2023-07-23-12-28-26.gh-issue-75705.aB2-Ww.rst new file mode 100644 index 00000000000000..272e31d64cfbd9 --- /dev/null +++ b/Misc/NEWS.d/next/Library/2023-07-23-12-28-26.gh-issue-75705.aB2-Ww.rst @@ -0,0 +1 @@ +Set unixfrom envelope in :class:`mailbox.mbox` and :class:`mailbox.MMDF`. diff --git a/Misc/NEWS.d/next/Library/2024-01-24-20-51-49.gh-issue-91602.8fOH8l.rst b/Misc/NEWS.d/next/Library/2024-01-24-20-51-49.gh-issue-91602.8fOH8l.rst new file mode 100644 index 00000000000000..21d39df43e035b --- /dev/null +++ b/Misc/NEWS.d/next/Library/2024-01-24-20-51-49.gh-issue-91602.8fOH8l.rst @@ -0,0 +1,3 @@ +Add *filter* keyword-only parameter to +:meth:`sqlite3.Connection.iterdump` for filtering database objects to dump. +Patch by Mariusz Felisiak. diff --git a/Misc/NEWS.d/next/Tools-Demos/2024-02-05-19-00-32.gh-issue-109991.yJSEkw.rst b/Misc/NEWS.d/next/Tools-Demos/2024-02-05-19-00-32.gh-issue-109991.yJSEkw.rst new file mode 100644 index 00000000000000..4eb4d39629b9bc --- /dev/null +++ b/Misc/NEWS.d/next/Tools-Demos/2024-02-05-19-00-32.gh-issue-109991.yJSEkw.rst @@ -0,0 +1,2 @@ +Update GitHub CI workflows to use OpenSSL 3.0.13 and multissltests to use +1.1.1w, 3.0.13, 3.1.5, and 3.2.1. diff --git a/Misc/NEWS.d/next/Windows/2024-02-06-09-05-13.gh-issue-115009.ShMjZs.rst b/Misc/NEWS.d/next/Windows/2024-02-06-09-05-13.gh-issue-115009.ShMjZs.rst new file mode 100644 index 00000000000000..5bdb6963a24311 --- /dev/null +++ b/Misc/NEWS.d/next/Windows/2024-02-06-09-05-13.gh-issue-115009.ShMjZs.rst @@ -0,0 +1 @@ +Update Windows installer to use SQLite 3.45.1. diff --git a/Misc/NEWS.d/next/macOS/2024-02-05-18-30-27.gh-issue-109991.tun6Yu.rst b/Misc/NEWS.d/next/macOS/2024-02-05-18-30-27.gh-issue-109991.tun6Yu.rst new file mode 100644 index 00000000000000..79b45e7d51da3f --- /dev/null +++ b/Misc/NEWS.d/next/macOS/2024-02-05-18-30-27.gh-issue-109991.tun6Yu.rst @@ -0,0 +1 @@ +Update macOS installer to use OpenSSL 3.0.13. diff --git a/Misc/NEWS.d/next/macOS/2024-02-06-09-01-10.gh-issue-115009.ysau7e.rst b/Misc/NEWS.d/next/macOS/2024-02-06-09-01-10.gh-issue-115009.ysau7e.rst new file mode 100644 index 00000000000000..47ec488c3cced2 --- /dev/null +++ b/Misc/NEWS.d/next/macOS/2024-02-06-09-01-10.gh-issue-115009.ysau7e.rst @@ -0,0 +1 @@ +Update macOS installer to use SQLite 3.45.1. diff --git a/Misc/sbom.spdx.json b/Misc/sbom.spdx.json index e94dcb83dd4e40..d783d14255e66f 100644 --- a/Misc/sbom.spdx.json +++ b/Misc/sbom.spdx.json @@ -1601,7 +1601,7 @@ "referenceType": "cpe23Type" } ], - "licenseConcluded": "MIT", + "licenseConcluded": "NOASSERTION", "name": "expat", "originator": "Organization: Expat development team", "primaryPackagePurpose": "SOURCE", @@ -1623,7 +1623,7 @@ "referenceType": "cpe23Type" } ], - "licenseConcluded": "Apache-2.0", + "licenseConcluded": "NOASSERTION", "name": "hacl-star", "originator": "Organization: HACL* Developers", "primaryPackagePurpose": "SOURCE", @@ -1645,7 +1645,7 @@ "referenceType": "cpe23Type" } ], - "licenseConcluded": "CC0-1.0", + "licenseConcluded": "NOASSERTION", "name": "libb2", "originator": "Organization: BLAKE2 - fast secure hashing", "primaryPackagePurpose": "SOURCE", @@ -1667,7 +1667,7 @@ "referenceType": "purl" } ], - "licenseConcluded": "MIT", + "licenseConcluded": "NOASSERTION", "name": "macholib", "originator": "Person: Ronald Oussoren (ronaldoussoren@mac.com)", "primaryPackagePurpose": "SOURCE", @@ -1689,7 +1689,7 @@ "referenceType": "cpe23Type" } ], - "licenseConcluded": "BSD-2-Clause", + "licenseConcluded": "NOASSERTION", "name": "mpdecimal", "originator": "Organization: bytereef.org", "primaryPackagePurpose": "SOURCE", @@ -1711,7 +1711,7 @@ "referenceType": "purl" } ], - "licenseConcluded": "MIT", + "licenseConcluded": "NOASSERTION", "name": "cachecontrol", "primaryPackagePurpose": "SOURCE", "versionInfo": "0.13.1" @@ -1732,7 +1732,7 @@ "referenceType": "purl" } ], - "licenseConcluded": "MIT", + "licenseConcluded": "NOASSERTION", "name": "colorama", "primaryPackagePurpose": "SOURCE", "versionInfo": "0.4.6" @@ -1753,7 +1753,7 @@ "referenceType": "purl" } ], - "licenseConcluded": "MIT", + "licenseConcluded": "NOASSERTION", "name": "distlib", "primaryPackagePurpose": "SOURCE", "versionInfo": "0.3.8" @@ -1774,7 +1774,7 @@ "referenceType": "purl" } ], - "licenseConcluded": "MIT", + "licenseConcluded": "NOASSERTION", "name": "distro", "primaryPackagePurpose": "SOURCE", "versionInfo": "1.8.0" @@ -1795,7 +1795,7 @@ "referenceType": "purl" } ], - "licenseConcluded": "MIT", + "licenseConcluded": "NOASSERTION", "name": "msgpack", "primaryPackagePurpose": "SOURCE", "versionInfo": "1.0.5" @@ -1816,7 +1816,7 @@ "referenceType": "purl" } ], - "licenseConcluded": "MIT", + "licenseConcluded": "NOASSERTION", "name": "packaging", "primaryPackagePurpose": "SOURCE", "versionInfo": "21.3" @@ -1837,7 +1837,7 @@ "referenceType": "purl" } ], - "licenseConcluded": "MIT", + "licenseConcluded": "NOASSERTION", "name": "platformdirs", "primaryPackagePurpose": "SOURCE", "versionInfo": "3.8.1" @@ -1858,7 +1858,7 @@ "referenceType": "purl" } ], - "licenseConcluded": "MIT", + "licenseConcluded": "NOASSERTION", "name": "pyparsing", "primaryPackagePurpose": "SOURCE", "versionInfo": "3.1.0" @@ -1879,7 +1879,7 @@ "referenceType": "purl" } ], - "licenseConcluded": "MIT", + "licenseConcluded": "NOASSERTION", "name": "pyproject-hooks", "primaryPackagePurpose": "SOURCE", "versionInfo": "1.0.0" @@ -1900,7 +1900,7 @@ "referenceType": "purl" } ], - "licenseConcluded": "MIT", + "licenseConcluded": "NOASSERTION", "name": "requests", "primaryPackagePurpose": "SOURCE", "versionInfo": "2.31.0" @@ -1921,7 +1921,7 @@ "referenceType": "purl" } ], - "licenseConcluded": "MIT", + "licenseConcluded": "NOASSERTION", "name": "certifi", "primaryPackagePurpose": "SOURCE", "versionInfo": "2023.7.22" @@ -1942,7 +1942,7 @@ "referenceType": "purl" } ], - "licenseConcluded": "MIT", + "licenseConcluded": "NOASSERTION", "name": "chardet", "primaryPackagePurpose": "SOURCE", "versionInfo": "5.1.0" @@ -1963,7 +1963,7 @@ "referenceType": "purl" } ], - "licenseConcluded": "MIT", + "licenseConcluded": "NOASSERTION", "name": "idna", "primaryPackagePurpose": "SOURCE", "versionInfo": "3.4" @@ -1984,7 +1984,7 @@ "referenceType": "purl" } ], - "licenseConcluded": "MIT", + "licenseConcluded": "NOASSERTION", "name": "rich", "primaryPackagePurpose": "SOURCE", "versionInfo": "13.4.2" @@ -2005,7 +2005,7 @@ "referenceType": "purl" } ], - "licenseConcluded": "MIT", + "licenseConcluded": "NOASSERTION", "name": "pygments", "primaryPackagePurpose": "SOURCE", "versionInfo": "2.15.1" @@ -2026,7 +2026,7 @@ "referenceType": "purl" } ], - "licenseConcluded": "MIT", + "licenseConcluded": "NOASSERTION", "name": "typing_extensions", "primaryPackagePurpose": "SOURCE", "versionInfo": "4.7.1" @@ -2047,7 +2047,7 @@ "referenceType": "purl" } ], - "licenseConcluded": "MIT", + "licenseConcluded": "NOASSERTION", "name": "resolvelib", "primaryPackagePurpose": "SOURCE", "versionInfo": "1.0.1" @@ -2068,7 +2068,7 @@ "referenceType": "purl" } ], - "licenseConcluded": "MIT", + "licenseConcluded": "NOASSERTION", "name": "setuptools", "primaryPackagePurpose": "SOURCE", "versionInfo": "68.0.0" @@ -2089,7 +2089,7 @@ "referenceType": "purl" } ], - "licenseConcluded": "MIT", + "licenseConcluded": "NOASSERTION", "name": "six", "primaryPackagePurpose": "SOURCE", "versionInfo": "1.16.0" @@ -2110,7 +2110,7 @@ "referenceType": "purl" } ], - "licenseConcluded": "MIT", + "licenseConcluded": "NOASSERTION", "name": "tenacity", "primaryPackagePurpose": "SOURCE", "versionInfo": "8.2.2" @@ -2131,7 +2131,7 @@ "referenceType": "purl" } ], - "licenseConcluded": "MIT", + "licenseConcluded": "NOASSERTION", "name": "tomli", "primaryPackagePurpose": "SOURCE", "versionInfo": "2.0.1" @@ -2152,7 +2152,7 @@ "referenceType": "purl" } ], - "licenseConcluded": "MIT", + "licenseConcluded": "NOASSERTION", "name": "truststore", "primaryPackagePurpose": "SOURCE", "versionInfo": "0.8.0" @@ -2173,7 +2173,7 @@ "referenceType": "purl" } ], - "licenseConcluded": "MIT", + "licenseConcluded": "NOASSERTION", "name": "webencodings", "primaryPackagePurpose": "SOURCE", "versionInfo": "0.5.1" @@ -2194,7 +2194,7 @@ "referenceType": "purl" } ], - "licenseConcluded": "MIT", + "licenseConcluded": "NOASSERTION", "name": "urllib3", "primaryPackagePurpose": "SOURCE", "versionInfo": "1.26.17" @@ -2220,7 +2220,7 @@ "referenceType": "purl" } ], - "licenseConcluded": "MIT", + "licenseConcluded": "NOASSERTION", "name": "pip", "originator": "Organization: Python Packaging Authority", "primaryPackagePurpose": "SOURCE", diff --git a/Modules/_json.c b/Modules/_json.c index 24b292ce70e5eb..c55299899e77fe 100644 --- a/Modules/_json.c +++ b/Modules/_json.c @@ -691,11 +691,10 @@ _parse_object_unicode(PyScannerObject *s, PyObject *memo, PyObject *pystr, Py_ss key = scanstring_unicode(pystr, idx + 1, s->strict, &next_idx); if (key == NULL) goto bail; - memokey = PyDict_SetDefault(memo, key, key); - if (memokey == NULL) { + if (PyDict_SetDefaultRef(memo, key, key, &memokey) < 0) { goto bail; } - Py_SETREF(key, Py_NewRef(memokey)); + Py_SETREF(key, memokey); idx = next_idx; /* skip whitespace between key and : delimiter, read :, skip whitespace */ diff --git a/Modules/_sqlite/clinic/connection.c.h b/Modules/_sqlite/clinic/connection.c.h index f2cff6a7b421f3..811314b5cd8aed 100644 --- a/Modules/_sqlite/clinic/connection.c.h +++ b/Modules/_sqlite/clinic/connection.c.h @@ -1204,21 +1204,67 @@ pysqlite_connection_interrupt(pysqlite_Connection *self, PyObject *Py_UNUSED(ign } PyDoc_STRVAR(pysqlite_connection_iterdump__doc__, -"iterdump($self, /)\n" +"iterdump($self, /, *, filter=None)\n" "--\n" "\n" -"Returns iterator to the dump of the database in an SQL text format."); +"Returns iterator to the dump of the database in an SQL text format.\n" +"\n" +" filter\n" +" An optional LIKE pattern for database objects to dump"); #define PYSQLITE_CONNECTION_ITERDUMP_METHODDEF \ - {"iterdump", (PyCFunction)pysqlite_connection_iterdump, METH_NOARGS, pysqlite_connection_iterdump__doc__}, + {"iterdump", _PyCFunction_CAST(pysqlite_connection_iterdump), METH_FASTCALL|METH_KEYWORDS, pysqlite_connection_iterdump__doc__}, static PyObject * -pysqlite_connection_iterdump_impl(pysqlite_Connection *self); +pysqlite_connection_iterdump_impl(pysqlite_Connection *self, + PyObject *filter); static PyObject * -pysqlite_connection_iterdump(pysqlite_Connection *self, PyObject *Py_UNUSED(ignored)) +pysqlite_connection_iterdump(pysqlite_Connection *self, PyObject *const *args, Py_ssize_t nargs, PyObject *kwnames) { - return pysqlite_connection_iterdump_impl(self); + PyObject *return_value = NULL; + #if defined(Py_BUILD_CORE) && !defined(Py_BUILD_CORE_MODULE) + + #define NUM_KEYWORDS 1 + static struct { + PyGC_Head _this_is_not_used; + PyObject_VAR_HEAD + PyObject *ob_item[NUM_KEYWORDS]; + } _kwtuple = { + .ob_base = PyVarObject_HEAD_INIT(&PyTuple_Type, NUM_KEYWORDS) + .ob_item = { &_Py_ID(filter), }, + }; + #undef NUM_KEYWORDS + #define KWTUPLE (&_kwtuple.ob_base.ob_base) + + #else // !Py_BUILD_CORE + # define KWTUPLE NULL + #endif // !Py_BUILD_CORE + + static const char * const _keywords[] = {"filter", NULL}; + static _PyArg_Parser _parser = { + .keywords = _keywords, + .fname = "iterdump", + .kwtuple = KWTUPLE, + }; + #undef KWTUPLE + PyObject *argsbuf[1]; + Py_ssize_t noptargs = nargs + (kwnames ? PyTuple_GET_SIZE(kwnames) : 0) - 0; + PyObject *filter = Py_None; + + args = _PyArg_UnpackKeywords(args, nargs, NULL, kwnames, &_parser, 0, 0, 0, argsbuf); + if (!args) { + goto exit; + } + if (!noptargs) { + goto skip_optional_kwonly; + } + filter = args[0]; +skip_optional_kwonly: + return_value = pysqlite_connection_iterdump_impl(self, filter); + +exit: + return return_value; } PyDoc_STRVAR(pysqlite_connection_backup__doc__, @@ -1820,4 +1866,4 @@ getconfig(pysqlite_Connection *self, PyObject *arg) #ifndef DESERIALIZE_METHODDEF #define DESERIALIZE_METHODDEF #endif /* !defined(DESERIALIZE_METHODDEF) */ -/*[clinic end generated code: output=99299d3ee2c247ab input=a9049054013a1b77]*/ +/*[clinic end generated code: output=3c6d0b748fac016f input=a9049054013a1b77]*/ diff --git a/Modules/_sqlite/connection.c b/Modules/_sqlite/connection.c index 0a6633972cc5ef..f97afcf5fcf16e 100644 --- a/Modules/_sqlite/connection.c +++ b/Modules/_sqlite/connection.c @@ -1979,12 +1979,17 @@ pysqlite_connection_interrupt_impl(pysqlite_Connection *self) /*[clinic input] _sqlite3.Connection.iterdump as pysqlite_connection_iterdump + * + filter: object = None + An optional LIKE pattern for database objects to dump + Returns iterator to the dump of the database in an SQL text format. [clinic start generated code]*/ static PyObject * -pysqlite_connection_iterdump_impl(pysqlite_Connection *self) -/*[clinic end generated code: output=586997aaf9808768 input=1911ca756066da89]*/ +pysqlite_connection_iterdump_impl(pysqlite_Connection *self, + PyObject *filter) +/*[clinic end generated code: output=fd81069c4bdeb6b0 input=4ae6d9a898f108df]*/ { if (!pysqlite_check_connection(self)) { return NULL; @@ -1998,9 +2003,16 @@ pysqlite_connection_iterdump_impl(pysqlite_Connection *self) } return NULL; } - - PyObject *retval = PyObject_CallOneArg(iterdump, (PyObject *)self); + PyObject *args[3] = {NULL, (PyObject *)self, filter}; + PyObject *kwnames = Py_BuildValue("(s)", "filter"); + if (!kwnames) { + Py_DECREF(iterdump); + return NULL; + } + Py_ssize_t nargsf = 1 | PY_VECTORCALL_ARGUMENTS_OFFSET; + PyObject *retval = PyObject_Vectorcall(iterdump, args + 1, nargsf, kwnames); Py_DECREF(iterdump); + Py_DECREF(kwnames); return retval; } diff --git a/Modules/_sre/sre.c b/Modules/_sre/sre.c index d451974b9cf81e..00fbd9674b8cdd 100644 --- a/Modules/_sre/sre.c +++ b/Modules/_sre/sre.c @@ -39,13 +39,14 @@ static const char copyright[] = " SRE 2.2.2 Copyright (c) 1997-2002 by Secret Labs AB "; #include "Python.h" -#include "pycore_dict.h" // _PyDict_Next() -#include "pycore_long.h" // _PyLong_GetZero() -#include "pycore_moduleobject.h" // _PyModule_GetState() +#include "pycore_critical_section.h" // Py_BEGIN_CRITICAL_SECTION +#include "pycore_dict.h" // _PyDict_Next() +#include "pycore_long.h" // _PyLong_GetZero() +#include "pycore_moduleobject.h" // _PyModule_GetState() -#include "sre.h" // SRE_CODE +#include "sre.h" // SRE_CODE -#include // tolower(), toupper(), isalnum() +#include // tolower(), toupper(), isalnum() #define SRE_CODE_BITS (8 * sizeof(SRE_CODE)) @@ -2349,26 +2350,28 @@ _sre_SRE_Match_groupdict_impl(MatchObject *self, PyObject *default_value) if (!result || !self->pattern->groupindex) return result; + Py_BEGIN_CRITICAL_SECTION(self->pattern->groupindex); while (_PyDict_Next(self->pattern->groupindex, &pos, &key, &value, &hash)) { int status; Py_INCREF(key); value = match_getslice(self, key, default_value); if (!value) { Py_DECREF(key); - goto failed; + Py_CLEAR(result); + goto exit; } status = _PyDict_SetItem_KnownHash(result, key, value, hash); Py_DECREF(value); Py_DECREF(key); - if (status < 0) - goto failed; + if (status < 0) { + Py_CLEAR(result); + goto exit; + } } +exit: + Py_END_CRITICAL_SECTION(); return result; - -failed: - Py_DECREF(result); - return NULL; } /*[clinic input] diff --git a/Modules/_testcapi/dict.c b/Modules/_testcapi/dict.c index 42e056b7d07a31..fe03c24f75e196 100644 --- a/Modules/_testcapi/dict.c +++ b/Modules/_testcapi/dict.c @@ -225,6 +225,31 @@ dict_setdefault(PyObject *self, PyObject *args) return PyDict_SetDefault(mapping, key, defaultobj); } +static PyObject * +dict_setdefaultref(PyObject *self, PyObject *args) +{ + PyObject *obj, *key, *default_value, *result = UNINITIALIZED_PTR; + if (!PyArg_ParseTuple(args, "OOO", &obj, &key, &default_value)) { + return NULL; + } + NULLABLE(obj); + NULLABLE(key); + NULLABLE(default_value); + switch (PyDict_SetDefaultRef(obj, key, default_value, &result)) { + case -1: + assert(result == NULL); + return NULL; + case 0: + assert(result == default_value); + return result; + case 1: + return result; + default: + Py_FatalError("PyDict_SetDefaultRef() returned invalid code"); + Py_UNREACHABLE(); + } +} + static PyObject * dict_delitem(PyObject *self, PyObject *args) { @@ -433,6 +458,7 @@ static PyMethodDef test_methods[] = { {"dict_delitem", dict_delitem, METH_VARARGS}, {"dict_delitemstring", dict_delitemstring, METH_VARARGS}, {"dict_setdefault", dict_setdefault, METH_VARARGS}, + {"dict_setdefaultref", dict_setdefaultref, METH_VARARGS}, {"dict_keys", dict_keys, METH_O}, {"dict_values", dict_values, METH_O}, {"dict_items", dict_items, METH_O}, diff --git a/Modules/_threadmodule.c b/Modules/_threadmodule.c index 5cceb84658deb7..df02b023012fbd 100644 --- a/Modules/_threadmodule.c +++ b/Modules/_threadmodule.c @@ -44,6 +44,7 @@ get_thread_state(PyObject *module) typedef struct { PyObject_HEAD + struct llist_node node; // linked list node (see _pythread_runtime_state) PyThread_ident_t ident; PyThread_handle_t handle; char joinable; @@ -59,6 +60,11 @@ new_thread_handle(thread_module_state* state) self->ident = 0; self->handle = 0; self->joinable = 0; + + HEAD_LOCK(&_PyRuntime); + llist_insert_tail(&_PyRuntime.threads.handles, &self->node); + HEAD_UNLOCK(&_PyRuntime); + return self; } @@ -66,6 +72,14 @@ static void ThreadHandle_dealloc(ThreadHandleObject *self) { PyObject *tp = (PyObject *) Py_TYPE(self); + + // Remove ourself from the global list of handles + HEAD_LOCK(&_PyRuntime); + if (self->node.next != NULL) { + llist_remove(&self->node); + } + HEAD_UNLOCK(&_PyRuntime); + if (self->joinable) { int ret = PyThread_detach_thread(self->handle); if (ret) { @@ -77,6 +91,28 @@ ThreadHandle_dealloc(ThreadHandleObject *self) Py_DECREF(tp); } +void +_PyThread_AfterFork(struct _pythread_runtime_state *state) +{ + // gh-115035: We mark ThreadHandles as not joinable early in the child's + // after-fork handler. We do this before calling any Python code to ensure + // that it happens before any ThreadHandles are deallocated, such as by a + // GC cycle. + PyThread_ident_t current = PyThread_get_thread_ident_ex(); + + struct llist_node *node; + llist_for_each_safe(node, &state->handles) { + ThreadHandleObject *hobj = llist_data(node, ThreadHandleObject, node); + if (hobj->ident == current) { + continue; + } + + // Disallow calls to detach() and join() as they could crash. + hobj->joinable = 0; + llist_remove(node); + } +} + static PyObject * ThreadHandle_repr(ThreadHandleObject *self) { @@ -91,21 +127,6 @@ ThreadHandle_get_ident(ThreadHandleObject *self, void *ignored) } -static PyObject * -ThreadHandle_after_fork_alive(ThreadHandleObject *self, void* ignored) -{ - PyThread_update_thread_after_fork(&self->ident, &self->handle); - Py_RETURN_NONE; -} - -static PyObject * -ThreadHandle_after_fork_dead(ThreadHandleObject *self, void* ignored) -{ - // Disallow calls to detach() and join() as they could crash. - self->joinable = 0; - Py_RETURN_NONE; -} - static PyObject * ThreadHandle_detach(ThreadHandleObject *self, void* ignored) { @@ -157,8 +178,6 @@ static PyGetSetDef ThreadHandle_getsetlist[] = { static PyMethodDef ThreadHandle_methods[] = { - {"after_fork_alive", (PyCFunction)ThreadHandle_after_fork_alive, METH_NOARGS}, - {"after_fork_dead", (PyCFunction)ThreadHandle_after_fork_dead, METH_NOARGS}, {"detach", (PyCFunction)ThreadHandle_detach, METH_NOARGS}, {"join", (PyCFunction)ThreadHandle_join, METH_NOARGS}, {0, 0} diff --git a/Modules/gcmodule.c b/Modules/gcmodule.c index 3b63dd7a9a8353..a2b66b9b78c169 100644 --- a/Modules/gcmodule.c +++ b/Modules/gcmodule.c @@ -158,12 +158,17 @@ gc_set_threshold_impl(PyObject *module, int threshold0, int group_right_1, { GCState *gcstate = get_gc_state(); - gcstate->young.threshold = threshold0; + gcstate->generations[0].threshold = threshold0; if (group_right_1) { - gcstate->old[0].threshold = threshold1; + gcstate->generations[1].threshold = threshold1; } if (group_right_2) { - gcstate->old[1].threshold = threshold2; + gcstate->generations[2].threshold = threshold2; + + /* generations higher than 2 get the same threshold */ + for (int i = 3; i < NUM_GENERATIONS; i++) { + gcstate->generations[i].threshold = gcstate->generations[2].threshold; + } } Py_RETURN_NONE; } @@ -180,9 +185,9 @@ gc_get_threshold_impl(PyObject *module) { GCState *gcstate = get_gc_state(); return Py_BuildValue("(iii)", - gcstate->young.threshold, - gcstate->old[0].threshold, - 0); + gcstate->generations[0].threshold, + gcstate->generations[1].threshold, + gcstate->generations[2].threshold); } /*[clinic input] @@ -197,9 +202,9 @@ gc_get_count_impl(PyObject *module) { GCState *gcstate = get_gc_state(); return Py_BuildValue("(iii)", - gcstate->young.count, - gcstate->old[gcstate->visited_space].count, - gcstate->old[gcstate->visited_space^1].count); + gcstate->generations[0].count, + gcstate->generations[1].count, + gcstate->generations[2].count); } /*[clinic input] @@ -230,6 +235,26 @@ referentsvisit(PyObject *obj, void *arg) return PyList_Append(list, obj) < 0; } +static int +append_referrents(PyObject *result, PyObject *args) +{ + for (Py_ssize_t i = 0; i < PyTuple_GET_SIZE(args); i++) { + PyObject *obj = PyTuple_GET_ITEM(args, i); + if (!_PyObject_IS_GC(obj)) { + continue; + } + + traverseproc traverse = Py_TYPE(obj)->tp_traverse; + if (!traverse) { + continue; + } + if (traverse(obj, referentsvisit, result)) { + return -1; + } + } + return 0; +} + /*[clinic input] gc.get_referents @@ -242,29 +267,24 @@ static PyObject * gc_get_referents_impl(PyObject *module, PyObject *args) /*[clinic end generated code: output=d47dc02cefd06fe8 input=b3ceab0c34038cbf]*/ { - Py_ssize_t i; if (PySys_Audit("gc.get_referents", "(O)", args) < 0) { return NULL; } + PyInterpreterState *interp = _PyInterpreterState_GET(); PyObject *result = PyList_New(0); if (result == NULL) return NULL; - for (i = 0; i < PyTuple_GET_SIZE(args); i++) { - traverseproc traverse; - PyObject *obj = PyTuple_GET_ITEM(args, i); + // NOTE: stop the world is a no-op in default build + _PyEval_StopTheWorld(interp); + int err = append_referrents(result, args); + _PyEval_StartTheWorld(interp); - if (!_PyObject_IS_GC(obj)) - continue; - traverse = Py_TYPE(obj)->tp_traverse; - if (! traverse) - continue; - if (traverse(obj, referentsvisit, result)) { - Py_DECREF(result); - return NULL; - } + if (err < 0) { + Py_CLEAR(result); } + return result; } diff --git a/Modules/posixmodule.c b/Modules/posixmodule.c index 22891135bde0af..e26265fc874ebb 100644 --- a/Modules/posixmodule.c +++ b/Modules/posixmodule.c @@ -1627,7 +1627,7 @@ convertenviron(void) Py_DECREF(d); return NULL; } - if (PyDict_SetDefault(d, k, v) == NULL) { + if (PyDict_SetDefaultRef(d, k, v, NULL) < 0) { Py_DECREF(v); Py_DECREF(k); Py_DECREF(d); diff --git a/Modules/pyexpat.c b/Modules/pyexpat.c index 7c08eda83e66b2..62cd262a7885e9 100644 --- a/Modules/pyexpat.c +++ b/Modules/pyexpat.c @@ -1615,7 +1615,8 @@ static int init_handler_descrs(pyexpat_state *state) if (descr == NULL) return -1; - if (PyDict_SetDefault(state->xml_parse_type->tp_dict, PyDescr_NAME(descr), descr) == NULL) { + if (PyDict_SetDefaultRef(state->xml_parse_type->tp_dict, + PyDescr_NAME(descr), descr, NULL) < 0) { Py_DECREF(descr); return -1; } diff --git a/Objects/clinic/dictobject.c.h b/Objects/clinic/dictobject.c.h index 8f532f454156de..daaef211b1db49 100644 --- a/Objects/clinic/dictobject.c.h +++ b/Objects/clinic/dictobject.c.h @@ -2,6 +2,7 @@ preserve [clinic start generated code]*/ +#include "pycore_critical_section.h"// Py_BEGIN_CRITICAL_SECTION() #include "pycore_modsupport.h" // _PyArg_CheckPositional() PyDoc_STRVAR(dict_fromkeys__doc__, @@ -65,6 +66,21 @@ PyDoc_STRVAR(dict___contains____doc__, #define DICT___CONTAINS___METHODDEF \ {"__contains__", (PyCFunction)dict___contains__, METH_O|METH_COEXIST, dict___contains____doc__}, +static PyObject * +dict___contains___impl(PyDictObject *self, PyObject *key); + +static PyObject * +dict___contains__(PyDictObject *self, PyObject *key) +{ + PyObject *return_value = NULL; + + Py_BEGIN_CRITICAL_SECTION(self); + return_value = dict___contains___impl(self, key); + Py_END_CRITICAL_SECTION(); + + return return_value; +} + PyDoc_STRVAR(dict_get__doc__, "get($self, key, default=None, /)\n" "--\n" @@ -93,7 +109,9 @@ dict_get(PyDictObject *self, PyObject *const *args, Py_ssize_t nargs) } default_value = args[1]; skip_optional: + Py_BEGIN_CRITICAL_SECTION(self); return_value = dict_get_impl(self, key, default_value); + Py_END_CRITICAL_SECTION(); exit: return return_value; @@ -130,7 +148,9 @@ dict_setdefault(PyDictObject *self, PyObject *const *args, Py_ssize_t nargs) } default_value = args[1]; skip_optional: + Py_BEGIN_CRITICAL_SECTION(self); return_value = dict_setdefault_impl(self, key, default_value); + Py_END_CRITICAL_SECTION(); exit: return return_value; @@ -209,7 +229,13 @@ dict_popitem_impl(PyDictObject *self); static PyObject * dict_popitem(PyDictObject *self, PyObject *Py_UNUSED(ignored)) { - return dict_popitem_impl(self); + PyObject *return_value = NULL; + + Py_BEGIN_CRITICAL_SECTION(self); + return_value = dict_popitem_impl(self); + Py_END_CRITICAL_SECTION(); + + return return_value; } PyDoc_STRVAR(dict___sizeof____doc__, @@ -301,4 +327,4 @@ dict_values(PyDictObject *self, PyObject *Py_UNUSED(ignored)) { return dict_values_impl(self); } -/*[clinic end generated code: output=f3ac47dfbf341b23 input=a9049054013a1b77]*/ +/*[clinic end generated code: output=c8fda06bac5b05f3 input=a9049054013a1b77]*/ diff --git a/Objects/dictobject.c b/Objects/dictobject.c index 4bb818b90a4a72..2df95e977a180f 100644 --- a/Objects/dictobject.c +++ b/Objects/dictobject.c @@ -113,18 +113,19 @@ As a consequence of this, split keys have a maximum size of 16. #define PyDict_MINSIZE 8 #include "Python.h" -#include "pycore_bitutils.h" // _Py_bit_length -#include "pycore_call.h" // _PyObject_CallNoArgs() -#include "pycore_ceval.h" // _PyEval_GetBuiltin() -#include "pycore_code.h" // stats -#include "pycore_dict.h" // export _PyDict_SizeOf() -#include "pycore_freelist.h" // _PyFreeListState_GET() -#include "pycore_gc.h" // _PyObject_GC_IS_TRACKED() -#include "pycore_object.h" // _PyObject_GC_TRACK(), _PyDebugAllocatorStats() -#include "pycore_pyerrors.h" // _PyErr_GetRaisedException() -#include "pycore_pystate.h" // _PyThreadState_GET() -#include "pycore_setobject.h" // _PySet_NextEntry() -#include "stringlib/eq.h" // unicode_eq() +#include "pycore_bitutils.h" // _Py_bit_length +#include "pycore_call.h" // _PyObject_CallNoArgs() +#include "pycore_ceval.h" // _PyEval_GetBuiltin() +#include "pycore_code.h" // stats +#include "pycore_critical_section.h" // Py_BEGIN_CRITICAL_SECTION, Py_END_CRITICAL_SECTION +#include "pycore_dict.h" // export _PyDict_SizeOf() +#include "pycore_freelist.h" // _PyFreeListState_GET() +#include "pycore_gc.h" // _PyObject_GC_IS_TRACKED() +#include "pycore_object.h" // _PyObject_GC_TRACK(), _PyDebugAllocatorStats() +#include "pycore_pyerrors.h" // _PyErr_GetRaisedException() +#include "pycore_pystate.h" // _PyThreadState_GET() +#include "pycore_setobject.h" // _PySet_NextEntry() +#include "stringlib/eq.h" // unicode_eq() #include @@ -141,6 +142,21 @@ To avoid slowing down lookups on a near-full table, we resize the table when it's USABLE_FRACTION (currently two-thirds) full. */ +#ifdef Py_GIL_DISABLED + +static inline void +ASSERT_DICT_LOCKED(PyObject *op) +{ + _Py_CRITICAL_SECTION_ASSERT_OBJECT_LOCKED(op); +} +#define ASSERT_DICT_LOCKED(op) ASSERT_DICT_LOCKED(_Py_CAST(PyObject*, op)) + +#else + +#define ASSERT_DICT_LOCKED(op) + +#endif + #define PERTURB_SHIFT 5 /* @@ -240,6 +256,16 @@ static int dictresize(PyInterpreterState *interp, PyDictObject *mp, static PyObject* dict_iter(PyObject *dict); +static int +contains_lock_held(PyDictObject *mp, PyObject *key); +static int +contains_known_hash_lock_held(PyDictObject *mp, PyObject *key, Py_ssize_t hash); +static int +setitem_lock_held(PyDictObject *mp, PyObject *key, PyObject *value); +static int +dict_setdefault_ref_lock_held(PyObject *d, PyObject *key, PyObject *default_value, + PyObject **result, int incref_result); + #include "clinic/dictobject.c.h" @@ -789,6 +815,8 @@ clone_combined_dict_keys(PyDictObject *orig) assert(orig->ma_keys != Py_EMPTY_KEYS); assert(orig->ma_keys->dk_refcnt == 1); + ASSERT_DICT_LOCKED(orig); + size_t keys_size = _PyDict_KeysSize(orig->ma_keys); PyDictKeysObject *keys = PyMem_Malloc(keys_size); if (keys == NULL) { @@ -1230,6 +1258,8 @@ insertdict(PyInterpreterState *interp, PyDictObject *mp, { PyObject *old_value; + ASSERT_DICT_LOCKED(mp); + if (DK_IS_UNICODE(mp->ma_keys) && !PyUnicode_CheckExact(key)) { if (insertion_resize(interp, mp, 0) < 0) goto Fail; @@ -1326,6 +1356,7 @@ insert_to_emptydict(PyInterpreterState *interp, PyDictObject *mp, PyObject *key, Py_hash_t hash, PyObject *value) { assert(mp->ma_keys == Py_EMPTY_KEYS); + ASSERT_DICT_LOCKED(mp); uint64_t new_version = _PyDict_NotifyEvent( interp, PyDict_EVENT_ADDED, mp, key, value); @@ -1419,6 +1450,8 @@ dictresize(PyInterpreterState *interp, PyDictObject *mp, PyDictKeysObject *oldkeys; PyDictValues *oldvalues; + ASSERT_DICT_LOCKED(mp); + if (log2_newsize >= SIZEOF_SIZE_T*8) { PyErr_NoMemory(); return -1; @@ -1613,7 +1646,7 @@ _PyDict_FromItems(PyObject *const *keys, Py_ssize_t keys_offset, for (Py_ssize_t i = 0; i < length; i++) { PyObject *key = *ks; PyObject *value = *vs; - if (PyDict_SetItem(dict, key, value) < 0) { + if (setitem_lock_held((PyDictObject *)dict, key, value) < 0) { Py_DECREF(dict); return NULL; } @@ -1688,6 +1721,7 @@ PyDict_GetItem(PyObject *op, PyObject *key) Py_ssize_t _PyDict_LookupIndex(PyDictObject *mp, PyObject *key) { + // TODO: Thread safety PyObject *value; assert(PyDict_CheckExact((PyObject*)mp)); assert(PyUnicode_CheckExact(key)); @@ -1864,9 +1898,11 @@ _PyDict_LoadGlobal(PyDictObject *globals, PyDictObject *builtins, PyObject *key) } /* Consumes references to key and value */ -int -_PyDict_SetItem_Take2(PyDictObject *mp, PyObject *key, PyObject *value) +static int +setitem_take2_lock_held(PyDictObject *mp, PyObject *key, PyObject *value) { + ASSERT_DICT_LOCKED(mp); + assert(key); assert(value); assert(PyDict_Check(mp)); @@ -1879,7 +1915,9 @@ _PyDict_SetItem_Take2(PyDictObject *mp, PyObject *key, PyObject *value) return -1; } } + PyInterpreterState *interp = _PyInterpreterState_GET(); + if (mp->ma_keys == Py_EMPTY_KEYS) { return insert_to_emptydict(interp, mp, key, hash, value); } @@ -1887,6 +1925,16 @@ _PyDict_SetItem_Take2(PyDictObject *mp, PyObject *key, PyObject *value) return insertdict(interp, mp, key, hash, value); } +int +_PyDict_SetItem_Take2(PyDictObject *mp, PyObject *key, PyObject *value) +{ + int res; + Py_BEGIN_CRITICAL_SECTION(mp); + res = setitem_take2_lock_held(mp, key, value); + Py_END_CRITICAL_SECTION(); + return res; +} + /* CAUTION: PyDict_SetItem() must guarantee that it won't resize the * dictionary if it's merely replacing the value for an existing key. * This means that it's safe to loop over a dictionary with PyDict_Next() @@ -1906,6 +1954,16 @@ PyDict_SetItem(PyObject *op, PyObject *key, PyObject *value) Py_NewRef(key), Py_NewRef(value)); } +static int +setitem_lock_held(PyDictObject *mp, PyObject *key, PyObject *value) +{ + assert(key); + assert(value); + return setitem_take2_lock_held(mp, + Py_NewRef(key), Py_NewRef(value)); +} + + int _PyDict_SetItem_KnownHash(PyObject *op, PyObject *key, PyObject *value, Py_hash_t hash) @@ -1921,12 +1979,21 @@ _PyDict_SetItem_KnownHash(PyObject *op, PyObject *key, PyObject *value, assert(hash != -1); mp = (PyDictObject *)op; + int res; PyInterpreterState *interp = _PyInterpreterState_GET(); + + Py_BEGIN_CRITICAL_SECTION(mp); + if (mp->ma_keys == Py_EMPTY_KEYS) { - return insert_to_emptydict(interp, mp, Py_NewRef(key), hash, Py_NewRef(value)); + res = insert_to_emptydict(interp, mp, Py_NewRef(key), hash, Py_NewRef(value)); } - /* insertdict() handles any resizing that might be necessary */ - return insertdict(interp, mp, Py_NewRef(key), hash, Py_NewRef(value)); + else { + /* insertdict() handles any resizing that might be necessary */ + res = insertdict(interp, mp, Py_NewRef(key), hash, Py_NewRef(value)); + } + + Py_END_CRITICAL_SECTION(); + return res; } static void @@ -1951,6 +2018,8 @@ delitem_common(PyDictObject *mp, Py_hash_t hash, Py_ssize_t ix, { PyObject *old_key; + ASSERT_DICT_LOCKED(mp); + Py_ssize_t hashpos = lookdict_index(mp->ma_keys, hash, ix); assert(hashpos >= 0); @@ -2002,8 +2071,8 @@ PyDict_DelItem(PyObject *op, PyObject *key) return _PyDict_DelItem_KnownHash(op, key, hash); } -int -_PyDict_DelItem_KnownHash(PyObject *op, PyObject *key, Py_hash_t hash) +static int +delitem_knownhash_lock_held(PyObject *op, PyObject *key, Py_hash_t hash) { Py_ssize_t ix; PyDictObject *mp; @@ -2013,6 +2082,9 @@ _PyDict_DelItem_KnownHash(PyObject *op, PyObject *key, Py_hash_t hash) PyErr_BadInternalCall(); return -1; } + + ASSERT_DICT_LOCKED(op); + assert(key); assert(hash != -1); mp = (PyDictObject *)op; @@ -2030,13 +2102,19 @@ _PyDict_DelItem_KnownHash(PyObject *op, PyObject *key, Py_hash_t hash) return delitem_common(mp, hash, ix, old_value, new_version); } -/* This function promises that the predicate -> deletion sequence is atomic - * (i.e. protected by the GIL), assuming the predicate itself doesn't - * release the GIL. - */ int -_PyDict_DelItemIf(PyObject *op, PyObject *key, - int (*predicate)(PyObject *value)) +_PyDict_DelItem_KnownHash(PyObject *op, PyObject *key, Py_hash_t hash) +{ + int res; + Py_BEGIN_CRITICAL_SECTION(op); + res = delitem_knownhash_lock_held(op, key, hash); + Py_END_CRITICAL_SECTION(); + return res; +} + +static int +delitemif_lock_held(PyObject *op, PyObject *key, + int (*predicate)(PyObject *value)) { Py_ssize_t hashpos, ix; PyDictObject *mp; @@ -2044,6 +2122,8 @@ _PyDict_DelItemIf(PyObject *op, PyObject *key, PyObject *old_value; int res; + ASSERT_DICT_LOCKED(op); + if (!PyDict_Check(op)) { PyErr_BadInternalCall(); return -1; @@ -2077,16 +2157,32 @@ _PyDict_DelItemIf(PyObject *op, PyObject *key, return 0; } } +/* This function promises that the predicate -> deletion sequence is atomic + * (i.e. protected by the GIL or the per-dict mutex in free threaded builds), + * assuming the predicate itself doesn't release the GIL (or cause re-entrancy + * which would release the per-dict mutex) + */ +int +_PyDict_DelItemIf(PyObject *op, PyObject *key, + int (*predicate)(PyObject *value)) +{ + int res; + Py_BEGIN_CRITICAL_SECTION(op); + res = delitemif_lock_held(op, key, predicate); + Py_END_CRITICAL_SECTION(); + return res; +} - -void -PyDict_Clear(PyObject *op) +static void +clear_lock_held(PyObject *op) { PyDictObject *mp; PyDictKeysObject *oldkeys; PyDictValues *oldvalues; Py_ssize_t i, n; + ASSERT_DICT_LOCKED(op); + if (!PyDict_Check(op)) return; mp = ((PyDictObject *)op); @@ -2119,6 +2215,14 @@ PyDict_Clear(PyObject *op) ASSERT_CONSISTENT(mp); } +void +PyDict_Clear(PyObject *op) +{ + Py_BEGIN_CRITICAL_SECTION(op); + clear_lock_held(op); + Py_END_CRITICAL_SECTION(); +} + /* Internal version of PyDict_Next that returns a hash value in addition * to the key and value. * Return 1 on success, return 0 when the reached the end of the dictionary @@ -2135,6 +2239,9 @@ _PyDict_Next(PyObject *op, Py_ssize_t *ppos, PyObject **pkey, if (!PyDict_Check(op)) return 0; + + ASSERT_DICT_LOCKED(op); + mp = (PyDictObject *)op; i = *ppos; if (mp->ma_values) { @@ -2208,7 +2315,11 @@ _PyDict_Next(PyObject *op, Py_ssize_t *ppos, PyObject **pkey, int PyDict_Next(PyObject *op, Py_ssize_t *ppos, PyObject **pkey, PyObject **pvalue) { - return _PyDict_Next(op, ppos, pkey, pvalue, NULL); + int res; + Py_BEGIN_CRITICAL_SECTION(op); + res = _PyDict_Next(op, ppos, pkey, pvalue, NULL); + Py_END_CRITICAL_SECTION(); + return res; } @@ -2219,6 +2330,8 @@ _PyDict_Pop_KnownHash(PyDictObject *mp, PyObject *key, Py_hash_t hash, { assert(PyDict_Check(mp)); + ASSERT_DICT_LOCKED(mp); + if (mp->ma_used == 0) { if (result) { *result = NULL; @@ -2258,10 +2371,11 @@ _PyDict_Pop_KnownHash(PyDictObject *mp, PyObject *key, Py_hash_t hash, return 1; } - -int -PyDict_Pop(PyObject *op, PyObject *key, PyObject **result) +static int +pop_lock_held(PyObject *op, PyObject *key, PyObject **result) { + ASSERT_DICT_LOCKED(op); + if (!PyDict_Check(op)) { if (result) { *result = NULL; @@ -2291,6 +2405,17 @@ PyDict_Pop(PyObject *op, PyObject *key, PyObject **result) return _PyDict_Pop_KnownHash(dict, key, hash, result); } +int +PyDict_Pop(PyObject *op, PyObject *key, PyObject **result) +{ + int err; + Py_BEGIN_CRITICAL_SECTION(op); + err = pop_lock_held(op, key, result); + Py_END_CRITICAL_SECTION(); + + return err; +} + int PyDict_PopString(PyObject *op, const char *key, PyObject **result) @@ -2323,6 +2448,55 @@ _PyDict_Pop(PyObject *dict, PyObject *key, PyObject *default_value) return result; } +static PyDictObject * +dict_dict_fromkeys(PyInterpreterState *interp, PyDictObject *mp, + PyObject *iterable, PyObject *value) +{ + PyObject *oldvalue; + Py_ssize_t pos = 0; + PyObject *key; + Py_hash_t hash; + int unicode = DK_IS_UNICODE(((PyDictObject*)iterable)->ma_keys); + uint8_t new_size = Py_MAX( + estimate_log2_keysize(PyDict_GET_SIZE(iterable)), + DK_LOG_SIZE(mp->ma_keys)); + if (dictresize(interp, mp, new_size, unicode)) { + Py_DECREF(mp); + return NULL; + } + + while (_PyDict_Next(iterable, &pos, &key, &oldvalue, &hash)) { + if (insertdict(interp, mp, + Py_NewRef(key), hash, Py_NewRef(value))) { + Py_DECREF(mp); + return NULL; + } + } + return mp; +} + +static PyDictObject * +dict_set_fromkeys(PyInterpreterState *interp, PyDictObject *mp, + PyObject *iterable, PyObject *value) +{ + Py_ssize_t pos = 0; + PyObject *key; + Py_hash_t hash; + + if (dictresize(interp, mp, + estimate_log2_keysize(PySet_GET_SIZE(iterable)), 0)) { + Py_DECREF(mp); + return NULL; + } + + while (_PySet_NextEntry(iterable, &pos, &key, &hash)) { + if (insertdict(interp, mp, Py_NewRef(key), hash, Py_NewRef(value))) { + Py_DECREF(mp); + return NULL; + } + } + return mp; +} /* Internal version of dict.from_keys(). It is subclass-friendly. */ PyObject * @@ -2338,49 +2512,22 @@ _PyDict_FromKeys(PyObject *cls, PyObject *iterable, PyObject *value) if (d == NULL) return NULL; - if (PyDict_CheckExact(d) && ((PyDictObject *)d)->ma_used == 0) { + + if (PyDict_CheckExact(d)) { if (PyDict_CheckExact(iterable)) { PyDictObject *mp = (PyDictObject *)d; - PyObject *oldvalue; - Py_ssize_t pos = 0; - PyObject *key; - Py_hash_t hash; - - int unicode = DK_IS_UNICODE(((PyDictObject*)iterable)->ma_keys); - if (dictresize(interp, mp, - estimate_log2_keysize(PyDict_GET_SIZE(iterable)), - unicode)) { - Py_DECREF(d); - return NULL; - } - while (_PyDict_Next(iterable, &pos, &key, &oldvalue, &hash)) { - if (insertdict(interp, mp, - Py_NewRef(key), hash, Py_NewRef(value))) { - Py_DECREF(d); - return NULL; - } - } + Py_BEGIN_CRITICAL_SECTION2(d, iterable); + d = (PyObject *)dict_dict_fromkeys(interp, mp, iterable, value); + Py_END_CRITICAL_SECTION2(); return d; } - if (PyAnySet_CheckExact(iterable)) { + else if (PyAnySet_CheckExact(iterable)) { PyDictObject *mp = (PyDictObject *)d; - Py_ssize_t pos = 0; - PyObject *key; - Py_hash_t hash; - if (dictresize(interp, mp, - estimate_log2_keysize(PySet_GET_SIZE(iterable)), 0)) { - Py_DECREF(d); - return NULL; - } - - while (_PySet_NextEntry(iterable, &pos, &key, &hash)) { - if (insertdict(interp, mp, Py_NewRef(key), hash, Py_NewRef(value))) { - Py_DECREF(d); - return NULL; - } - } + Py_BEGIN_CRITICAL_SECTION2(d, iterable); + d = (PyObject *)dict_set_fromkeys(interp, mp, iterable, value); + Py_END_CRITICAL_SECTION2(); return d; } } @@ -2392,12 +2539,17 @@ _PyDict_FromKeys(PyObject *cls, PyObject *iterable, PyObject *value) } if (PyDict_CheckExact(d)) { + Py_BEGIN_CRITICAL_SECTION(d); while ((key = PyIter_Next(it)) != NULL) { - status = PyDict_SetItem(d, key, value); + status = setitem_lock_held((PyDictObject *)d, key, value); Py_DECREF(key); - if (status < 0) - goto Fail; + if (status < 0) { + assert(PyErr_Occurred()); + goto dict_iter_exit; + } } +dict_iter_exit: + Py_END_CRITICAL_SECTION(); } else { while ((key = PyIter_Next(it)) != NULL) { status = PyObject_SetItem(d, key, value); @@ -2468,7 +2620,7 @@ dict_dealloc(PyObject *self) static PyObject * -dict_repr(PyObject *self) +dict_repr_lock_held(PyObject *self) { PyDictObject *mp = (PyDictObject *)self; Py_ssize_t i; @@ -2498,7 +2650,7 @@ dict_repr(PyObject *self) Note that repr may mutate the dict. */ i = 0; first = 1; - while (PyDict_Next((PyObject *)mp, &i, &key, &value)) { + while (_PyDict_Next((PyObject *)mp, &i, &key, &value, NULL)) { PyObject *s; int res; @@ -2551,15 +2703,25 @@ dict_repr(PyObject *self) return NULL; } +static PyObject * +dict_repr(PyObject *self) +{ + PyObject *res; + Py_BEGIN_CRITICAL_SECTION(self); + res = dict_repr_lock_held(self); + Py_END_CRITICAL_SECTION(); + return res; +} + static Py_ssize_t dict_length(PyObject *self) { PyDictObject *mp = (PyDictObject *)self; - return mp->ma_used; + return _Py_atomic_load_ssize_relaxed(&mp->ma_used); } static PyObject * -dict_subscript(PyObject *self, PyObject *key) +dict_subscript_lock_held(PyObject *self, PyObject *key) { PyDictObject *mp = (PyDictObject *)self; Py_ssize_t ix; @@ -2594,6 +2756,16 @@ dict_subscript(PyObject *self, PyObject *key) return Py_NewRef(value); } +static PyObject * +dict_subscript(PyObject *self, PyObject *key) +{ + PyObject *res; + Py_BEGIN_CRITICAL_SECTION(self); + res = dict_subscript_lock_held(self, key); + Py_END_CRITICAL_SECTION(); + return res; +} + static int dict_ass_sub(PyObject *mp, PyObject *v, PyObject *w) { @@ -2609,9 +2781,11 @@ static PyMappingMethods dict_as_mapping = { dict_ass_sub, /*mp_ass_subscript*/ }; -PyObject * -PyDict_Keys(PyObject *dict) +static PyObject * +keys_lock_held(PyObject *dict) { + ASSERT_DICT_LOCKED(dict); + if (dict == NULL || !PyDict_Check(dict)) { PyErr_BadInternalCall(); return NULL; @@ -2646,8 +2820,21 @@ PyDict_Keys(PyObject *dict) } PyObject * -PyDict_Values(PyObject *dict) +PyDict_Keys(PyObject *dict) +{ + PyObject *res; + Py_BEGIN_CRITICAL_SECTION(dict); + res = keys_lock_held(dict); + Py_END_CRITICAL_SECTION(); + + return res; +} + +static PyObject * +values_lock_held(PyObject *dict) { + ASSERT_DICT_LOCKED(dict); + if (dict == NULL || !PyDict_Check(dict)) { PyErr_BadInternalCall(); return NULL; @@ -2682,8 +2869,20 @@ PyDict_Values(PyObject *dict) } PyObject * -PyDict_Items(PyObject *dict) +PyDict_Values(PyObject *dict) +{ + PyObject *res; + Py_BEGIN_CRITICAL_SECTION(dict); + res = values_lock_held(dict); + Py_END_CRITICAL_SECTION(); + return res; +} + +static PyObject * +items_lock_held(PyObject *dict) { + ASSERT_DICT_LOCKED(dict); + if (dict == NULL || !PyDict_Check(dict)) { PyErr_BadInternalCall(); return NULL; @@ -2732,6 +2931,17 @@ PyDict_Items(PyObject *dict) return v; } +PyObject * +PyDict_Items(PyObject *dict) +{ + PyObject *res; + Py_BEGIN_CRITICAL_SECTION(dict); + res = items_lock_held(dict); + Py_END_CRITICAL_SECTION(); + + return res; +} + /*[clinic input] @classmethod dict.fromkeys @@ -2810,8 +3020,8 @@ dict_update(PyObject *self, PyObject *args, PyObject *kwds) producing iterable objects of length 2. */ -int -PyDict_MergeFromSeq2(PyObject *d, PyObject *seq2, int override) +static int +merge_from_seq2_lock_held(PyObject *d, PyObject *seq2, int override) { PyObject *it; /* iter(seq2) */ Py_ssize_t i; /* index into seq2 of current element */ @@ -2863,14 +3073,14 @@ PyDict_MergeFromSeq2(PyObject *d, PyObject *seq2, int override) Py_INCREF(key); Py_INCREF(value); if (override) { - if (PyDict_SetItem(d, key, value) < 0) { + if (setitem_lock_held((PyDictObject *)d, key, value) < 0) { Py_DECREF(key); Py_DECREF(value); goto Fail; } } else { - if (PyDict_SetDefault(d, key, value) == NULL) { + if (dict_setdefault_ref_lock_held(d, key, value, NULL, 0) < 0) { Py_DECREF(key); Py_DECREF(value); goto Fail; @@ -2895,6 +3105,117 @@ PyDict_MergeFromSeq2(PyObject *d, PyObject *seq2, int override) return Py_SAFE_DOWNCAST(i, Py_ssize_t, int); } +int +PyDict_MergeFromSeq2(PyObject *d, PyObject *seq2, int override) +{ + int res; + Py_BEGIN_CRITICAL_SECTION(d); + res = merge_from_seq2_lock_held(d, seq2, override); + Py_END_CRITICAL_SECTION(); + + return res; +} + +static int +dict_dict_merge(PyInterpreterState *interp, PyDictObject *mp, PyDictObject *other, int override) +{ + if (other == mp || other->ma_used == 0) + /* a.update(a) or a.update({}); nothing to do */ + return 0; + if (mp->ma_used == 0) { + /* Since the target dict is empty, PyDict_GetItem() + * always returns NULL. Setting override to 1 + * skips the unnecessary test. + */ + override = 1; + PyDictKeysObject *okeys = other->ma_keys; + + // If other is clean, combined, and just allocated, just clone it. + if (other->ma_values == NULL && + other->ma_used == okeys->dk_nentries && + (DK_LOG_SIZE(okeys) == PyDict_LOG_MINSIZE || + USABLE_FRACTION(DK_SIZE(okeys)/2) < other->ma_used)) { + uint64_t new_version = _PyDict_NotifyEvent( + interp, PyDict_EVENT_CLONED, mp, (PyObject *)other, NULL); + PyDictKeysObject *keys = clone_combined_dict_keys(other); + if (keys == NULL) + return -1; + + dictkeys_decref(interp, mp->ma_keys); + mp->ma_keys = keys; + if (mp->ma_values != NULL) { + free_values(mp->ma_values); + mp->ma_values = NULL; + } + + mp->ma_used = other->ma_used; + mp->ma_version_tag = new_version; + ASSERT_CONSISTENT(mp); + + if (_PyObject_GC_IS_TRACKED(other) && !_PyObject_GC_IS_TRACKED(mp)) { + /* Maintain tracking. */ + _PyObject_GC_TRACK(mp); + } + + return 0; + } + } + /* Do one big resize at the start, rather than + * incrementally resizing as we insert new items. Expect + * that there will be no (or few) overlapping keys. + */ + if (USABLE_FRACTION(DK_SIZE(mp->ma_keys)) < other->ma_used) { + int unicode = DK_IS_UNICODE(other->ma_keys); + if (dictresize(interp, mp, + estimate_log2_keysize(mp->ma_used + other->ma_used), + unicode)) { + return -1; + } + } + + Py_ssize_t orig_size = other->ma_keys->dk_nentries; + Py_ssize_t pos = 0; + Py_hash_t hash; + PyObject *key, *value; + + while (_PyDict_Next((PyObject*)other, &pos, &key, &value, &hash)) { + int err = 0; + Py_INCREF(key); + Py_INCREF(value); + if (override == 1) { + err = insertdict(interp, mp, + Py_NewRef(key), hash, Py_NewRef(value)); + } + else { + err = contains_known_hash_lock_held(mp, key, hash); + if (err == 0) { + err = insertdict(interp, mp, + Py_NewRef(key), hash, Py_NewRef(value)); + } + else if (err > 0) { + if (override != 0) { + _PyErr_SetKeyError(key); + Py_DECREF(value); + Py_DECREF(key); + return -1; + } + err = 0; + } + } + Py_DECREF(value); + Py_DECREF(key); + if (err != 0) + return -1; + + if (orig_size != other->ma_keys->dk_nentries) { + PyErr_SetString(PyExc_RuntimeError, + "dict mutated during update"); + return -1; + } + } + return 0; +} + static int dict_merge(PyInterpreterState *interp, PyObject *a, PyObject *b, int override) { @@ -2912,127 +3233,44 @@ dict_merge(PyInterpreterState *interp, PyObject *a, PyObject *b, int override) return -1; } mp = (PyDictObject*)a; + int res = 0; if (PyDict_Check(b) && (Py_TYPE(b)->tp_iter == dict_iter)) { other = (PyDictObject*)b; - if (other == mp || other->ma_used == 0) - /* a.update(a) or a.update({}); nothing to do */ - return 0; - if (mp->ma_used == 0) { - /* Since the target dict is empty, PyDict_GetItem() - * always returns NULL. Setting override to 1 - * skips the unnecessary test. - */ - override = 1; - PyDictKeysObject *okeys = other->ma_keys; - - // If other is clean, combined, and just allocated, just clone it. - if (other->ma_values == NULL && - other->ma_used == okeys->dk_nentries && - (DK_LOG_SIZE(okeys) == PyDict_LOG_MINSIZE || - USABLE_FRACTION(DK_SIZE(okeys)/2) < other->ma_used)) { - uint64_t new_version = _PyDict_NotifyEvent( - interp, PyDict_EVENT_CLONED, mp, b, NULL); - PyDictKeysObject *keys = clone_combined_dict_keys(other); - if (keys == NULL) { - return -1; - } + int res; + Py_BEGIN_CRITICAL_SECTION2(a, b); + res = dict_dict_merge(interp, (PyDictObject *)a, other, override); + ASSERT_CONSISTENT(a); + Py_END_CRITICAL_SECTION2(); + return res; + } + else { + /* Do it the generic, slower way */ + Py_BEGIN_CRITICAL_SECTION(a); + PyObject *keys = PyMapping_Keys(b); + PyObject *iter; + PyObject *key, *value; + int status; - dictkeys_decref(interp, mp->ma_keys); - mp->ma_keys = keys; - if (mp->ma_values != NULL) { - free_values(mp->ma_values); - mp->ma_values = NULL; - } - - mp->ma_used = other->ma_used; - mp->ma_version_tag = new_version; - ASSERT_CONSISTENT(mp); - - if (_PyObject_GC_IS_TRACKED(other) && !_PyObject_GC_IS_TRACKED(mp)) { - /* Maintain tracking. */ - _PyObject_GC_TRACK(mp); - } - - return 0; - } - } - /* Do one big resize at the start, rather than - * incrementally resizing as we insert new items. Expect - * that there will be no (or few) overlapping keys. - */ - if (USABLE_FRACTION(DK_SIZE(mp->ma_keys)) < other->ma_used) { - int unicode = DK_IS_UNICODE(other->ma_keys); - if (dictresize(interp, mp, - estimate_log2_keysize(mp->ma_used + other->ma_used), - unicode)) { - return -1; - } - } - - Py_ssize_t orig_size = other->ma_keys->dk_nentries; - Py_ssize_t pos = 0; - Py_hash_t hash; - PyObject *key, *value; - - while (_PyDict_Next((PyObject*)other, &pos, &key, &value, &hash)) { - int err = 0; - Py_INCREF(key); - Py_INCREF(value); - if (override == 1) { - err = insertdict(interp, mp, - Py_NewRef(key), hash, Py_NewRef(value)); - } - else { - err = _PyDict_Contains_KnownHash(a, key, hash); - if (err == 0) { - err = insertdict(interp, mp, - Py_NewRef(key), hash, Py_NewRef(value)); - } - else if (err > 0) { - if (override != 0) { - _PyErr_SetKeyError(key); - Py_DECREF(value); - Py_DECREF(key); - return -1; - } - err = 0; - } - } - Py_DECREF(value); - Py_DECREF(key); - if (err != 0) - return -1; - - if (orig_size != other->ma_keys->dk_nentries) { - PyErr_SetString(PyExc_RuntimeError, - "dict mutated during update"); - return -1; - } - } - } - else { - /* Do it the generic, slower way */ - PyObject *keys = PyMapping_Keys(b); - PyObject *iter; - PyObject *key, *value; - int status; - - if (keys == NULL) + if (keys == NULL) { /* Docstring says this is equivalent to E.keys() so * if E doesn't have a .keys() method we want * AttributeError to percolate up. Might as well * do the same for any other error. */ - return -1; + res = -1; + goto slow_exit; + } iter = PyObject_GetIter(keys); Py_DECREF(keys); - if (iter == NULL) - return -1; + if (iter == NULL) { + res = -1; + goto slow_exit; + } for (key = PyIter_Next(iter); key; key = PyIter_Next(iter)) { if (override != 1) { - status = PyDict_Contains(a, key); + status = contains_lock_held(mp, key); if (status != 0) { if (status > 0) { if (override == 0) { @@ -3043,30 +3281,39 @@ dict_merge(PyInterpreterState *interp, PyObject *a, PyObject *b, int override) } Py_DECREF(key); Py_DECREF(iter); - return -1; + res = -1; + goto slow_exit; } } value = PyObject_GetItem(b, key); if (value == NULL) { Py_DECREF(iter); Py_DECREF(key); - return -1; + res = -1; + goto slow_exit; } - status = PyDict_SetItem(a, key, value); + status = setitem_lock_held(mp, key, value); Py_DECREF(key); Py_DECREF(value); if (status < 0) { Py_DECREF(iter); + res = -1; + goto slow_exit; return -1; } } Py_DECREF(iter); - if (PyErr_Occurred()) + if (PyErr_Occurred()) { /* Iterator completed, via error */ - return -1; + res = -1; + goto slow_exit; + } + +slow_exit: + ASSERT_CONSISTENT(a); + Py_END_CRITICAL_SECTION(); + return res; } - ASSERT_CONSISTENT(a); - return 0; } int @@ -3104,17 +3351,14 @@ dict_copy_impl(PyDictObject *self) return PyDict_Copy((PyObject *)self); } -PyObject * -PyDict_Copy(PyObject *o) +static PyObject * +copy_lock_held(PyObject *o) { PyObject *copy; PyDictObject *mp; PyInterpreterState *interp = _PyInterpreterState_GET(); - if (o == NULL || !PyDict_Check(o)) { - PyErr_BadInternalCall(); - return NULL; - } + ASSERT_DICT_LOCKED(o); mp = (PyDictObject *)o; if (mp->ma_used == 0) { @@ -3197,6 +3441,23 @@ PyDict_Copy(PyObject *o) return NULL; } +PyObject * +PyDict_Copy(PyObject *o) +{ + if (o == NULL || !PyDict_Check(o)) { + PyErr_BadInternalCall(); + return NULL; + } + + PyObject *res; + Py_BEGIN_CRITICAL_SECTION(o); + + res = copy_lock_held(o); + + Py_END_CRITICAL_SECTION(); + return res; +} + Py_ssize_t PyDict_Size(PyObject *mp) { @@ -3212,10 +3473,13 @@ PyDict_Size(PyObject *mp) * Uses only Py_EQ comparison. */ static int -dict_equal(PyDictObject *a, PyDictObject *b) +dict_equal_lock_held(PyDictObject *a, PyDictObject *b) { Py_ssize_t i; + ASSERT_DICT_LOCKED(a); + ASSERT_DICT_LOCKED(b); + if (a->ma_used != b->ma_used) /* can't be equal if # of entries differ */ return 0; @@ -3270,6 +3534,17 @@ dict_equal(PyDictObject *a, PyDictObject *b) return 1; } +static int +dict_equal(PyDictObject *a, PyDictObject *b) +{ + int res; + Py_BEGIN_CRITICAL_SECTION2(a, b); + res = dict_equal_lock_held(a, b); + Py_END_CRITICAL_SECTION2(); + + return res; +} + static PyObject * dict_richcompare(PyObject *v, PyObject *w, int op) { @@ -3293,6 +3568,7 @@ dict_richcompare(PyObject *v, PyObject *w, int op) /*[clinic input] @coexist +@critical_section dict.__contains__ key: object @@ -3302,8 +3578,8 @@ True if the dictionary has the specified key, else False. [clinic start generated code]*/ static PyObject * -dict___contains__(PyDictObject *self, PyObject *key) -/*[clinic end generated code: output=a3d03db709ed6e6b input=fe1cb42ad831e820]*/ +dict___contains___impl(PyDictObject *self, PyObject *key) +/*[clinic end generated code: output=1b314e6da7687dae input=bc76ec9c157cb81b]*/ { register PyDictObject *mp = self; Py_hash_t hash; @@ -3324,6 +3600,7 @@ dict___contains__(PyDictObject *self, PyObject *key) } /*[clinic input] +@critical_section dict.get key: object @@ -3335,7 +3612,7 @@ Return the value for key if key is in the dictionary, else default. static PyObject * dict_get_impl(PyDictObject *self, PyObject *key, PyObject *default_value) -/*[clinic end generated code: output=bba707729dee05bf input=279ddb5790b6b107]*/ +/*[clinic end generated code: output=bba707729dee05bf input=a631d3f18f584c60]*/ { PyObject *val = NULL; Py_hash_t hash; @@ -3355,51 +3632,77 @@ dict_get_impl(PyDictObject *self, PyObject *key, PyObject *default_value) return Py_NewRef(val); } -PyObject * -PyDict_SetDefault(PyObject *d, PyObject *key, PyObject *defaultobj) +static int +dict_setdefault_ref_lock_held(PyObject *d, PyObject *key, PyObject *default_value, + PyObject **result, int incref_result) { PyDictObject *mp = (PyDictObject *)d; PyObject *value; Py_hash_t hash; PyInterpreterState *interp = _PyInterpreterState_GET(); + ASSERT_DICT_LOCKED(d); + if (!PyDict_Check(d)) { PyErr_BadInternalCall(); - return NULL; + if (result) { + *result = NULL; + } + return -1; } if (!PyUnicode_CheckExact(key) || (hash = unicode_get_hash(key)) == -1) { hash = PyObject_Hash(key); - if (hash == -1) - return NULL; + if (hash == -1) { + if (result) { + *result = NULL; + } + return -1; + } } if (mp->ma_keys == Py_EMPTY_KEYS) { if (insert_to_emptydict(interp, mp, Py_NewRef(key), hash, - Py_NewRef(defaultobj)) < 0) { - return NULL; + Py_NewRef(default_value)) < 0) { + if (result) { + *result = NULL; + } + return -1; + } + if (result) { + *result = incref_result ? Py_NewRef(default_value) : default_value; } - return defaultobj; + return 0; } if (!PyUnicode_CheckExact(key) && DK_IS_UNICODE(mp->ma_keys)) { if (insertion_resize(interp, mp, 0) < 0) { - return NULL; + if (result) { + *result = NULL; + } + return -1; } } Py_ssize_t ix = _Py_dict_lookup(mp, key, hash, &value); - if (ix == DKIX_ERROR) - return NULL; + if (ix == DKIX_ERROR) { + if (result) { + *result = NULL; + } + return -1; + } if (ix == DKIX_EMPTY) { uint64_t new_version = _PyDict_NotifyEvent( - interp, PyDict_EVENT_ADDED, mp, key, defaultobj); + interp, PyDict_EVENT_ADDED, mp, key, default_value); mp->ma_keys->dk_version = 0; - value = defaultobj; + value = default_value; if (mp->ma_keys->dk_usable <= 0) { if (insertion_resize(interp, mp, 1) < 0) { - return NULL; + if (result) { + *result = NULL; + } + return -1; } } Py_ssize_t hashpos = find_empty_slot(mp->ma_keys, hash); @@ -3431,11 +3734,16 @@ PyDict_SetDefault(PyObject *d, PyObject *key, PyObject *defaultobj) mp->ma_keys->dk_usable--; mp->ma_keys->dk_nentries++; assert(mp->ma_keys->dk_usable >= 0); + ASSERT_CONSISTENT(mp); + if (result) { + *result = incref_result ? Py_NewRef(value) : value; + } + return 0; } else if (value == NULL) { uint64_t new_version = _PyDict_NotifyEvent( - interp, PyDict_EVENT_ADDED, mp, key, defaultobj); - value = defaultobj; + interp, PyDict_EVENT_ADDED, mp, key, default_value); + value = default_value; assert(_PyDict_HasSplitTable(mp)); assert(mp->ma_values->values[ix] == NULL); MAINTAIN_TRACKING(mp, key, value); @@ -3443,13 +3751,43 @@ PyDict_SetDefault(PyObject *d, PyObject *key, PyObject *defaultobj) _PyDictValues_AddToInsertionOrder(mp->ma_values, ix); mp->ma_used++; mp->ma_version_tag = new_version; + ASSERT_CONSISTENT(mp); + if (result) { + *result = incref_result ? Py_NewRef(value) : value; + } + return 0; } ASSERT_CONSISTENT(mp); - return value; + if (result) { + *result = incref_result ? Py_NewRef(value) : value; + } + return 1; +} + +int +PyDict_SetDefaultRef(PyObject *d, PyObject *key, PyObject *default_value, + PyObject **result) +{ + int res; + Py_BEGIN_CRITICAL_SECTION(d); + res = dict_setdefault_ref_lock_held(d, key, default_value, result, 1); + Py_END_CRITICAL_SECTION(); + return res; +} + +PyObject * +PyDict_SetDefault(PyObject *d, PyObject *key, PyObject *defaultobj) +{ + PyObject *result; + Py_BEGIN_CRITICAL_SECTION(d); + dict_setdefault_ref_lock_held(d, key, defaultobj, &result, 0); + Py_END_CRITICAL_SECTION(); + return result; } /*[clinic input] +@critical_section dict.setdefault key: object @@ -3464,12 +3802,11 @@ Return the value for key if key is in the dictionary, else default. static PyObject * dict_setdefault_impl(PyDictObject *self, PyObject *key, PyObject *default_value) -/*[clinic end generated code: output=f8c1101ebf69e220 input=0f063756e815fd9d]*/ +/*[clinic end generated code: output=f8c1101ebf69e220 input=9237af9a0a224302]*/ { PyObject *val; - - val = PyDict_SetDefault((PyObject *)self, key, default_value); - return Py_XNewRef(val); + dict_setdefault_ref_lock_held((PyObject *)self, key, default_value, &val, 1); + return val; } @@ -3508,6 +3845,7 @@ dict_pop_impl(PyDictObject *self, PyObject *key, PyObject *default_value) } /*[clinic input] +@critical_section dict.popitem Remove and return a (key, value) pair as a 2-tuple. @@ -3518,7 +3856,7 @@ Raises KeyError if the dict is empty. static PyObject * dict_popitem_impl(PyDictObject *self) -/*[clinic end generated code: output=e65fcb04420d230d input=1c38a49f21f64941]*/ +/*[clinic end generated code: output=e65fcb04420d230d input=ef28b4da5f0f762e]*/ { Py_ssize_t i, j; PyObject *res; @@ -3644,8 +3982,8 @@ dict_tp_clear(PyObject *op) static PyObject *dictiter_new(PyDictObject *, PyTypeObject *); -Py_ssize_t -_PyDict_SizeOf(PyDictObject *mp) +static Py_ssize_t +sizeof_lock_held(PyDictObject *mp) { size_t res = _PyObject_SIZE(Py_TYPE(mp)); if (mp->ma_values) { @@ -3660,6 +3998,17 @@ _PyDict_SizeOf(PyDictObject *mp) return (Py_ssize_t)res; } +Py_ssize_t +_PyDict_SizeOf(PyDictObject *mp) +{ + Py_ssize_t res; + Py_BEGIN_CRITICAL_SECTION(mp); + res = sizeof_lock_held(mp); + Py_END_CRITICAL_SECTION(); + + return res; +} + size_t _PyDict_KeysSize(PyDictKeysObject *keys) { @@ -3743,15 +4092,29 @@ static PyMethodDef mapp_methods[] = { {NULL, NULL} /* sentinel */ }; -/* Return 1 if `key` is in dict `op`, 0 if not, and -1 on error. */ -int -PyDict_Contains(PyObject *op, PyObject *key) +static int +contains_known_hash_lock_held(PyDictObject *mp, PyObject *key, Py_ssize_t hash) +{ + Py_ssize_t ix; + PyObject *value; + + ASSERT_DICT_LOCKED(mp); + + ix = _Py_dict_lookup(mp, key, hash, &value); + if (ix == DKIX_ERROR) + return -1; + return (ix != DKIX_EMPTY && value != NULL); +} + +static int +contains_lock_held(PyDictObject *mp, PyObject *key) { Py_hash_t hash; Py_ssize_t ix; - PyDictObject *mp = (PyDictObject *)op; PyObject *value; + ASSERT_DICT_LOCKED(mp); + if (!PyUnicode_CheckExact(key) || (hash = unicode_get_hash(key)) == -1) { hash = PyObject_Hash(key); if (hash == -1) @@ -3763,6 +4126,17 @@ PyDict_Contains(PyObject *op, PyObject *key) return (ix != DKIX_EMPTY && value != NULL); } +/* Return 1 if `key` is in dict `op`, 0 if not, and -1 on error. */ +int +PyDict_Contains(PyObject *op, PyObject *key) +{ + int res; + Py_BEGIN_CRITICAL_SECTION(op); + res = contains_lock_held((PyDictObject *)op, key); + Py_END_CRITICAL_SECTION(); + return res; +} + int PyDict_ContainsString(PyObject *op, const char *key) { @@ -4129,17 +4503,15 @@ static PyMethodDef dictiter_methods[] = { }; static PyObject* -dictiter_iternextkey(PyObject *self) +dictiter_iternextkey_lock_held(PyDictObject *d, PyObject *self) { dictiterobject *di = (dictiterobject *)self; PyObject *key; Py_ssize_t i; PyDictKeysObject *k; - PyDictObject *d = di->di_dict; - if (d == NULL) - return NULL; assert (PyDict_Check(d)); + ASSERT_DICT_LOCKED(d); if (di->di_used != d->ma_used) { PyErr_SetString(PyExc_RuntimeError, @@ -4197,6 +4569,23 @@ dictiter_iternextkey(PyObject *self) return NULL; } +static PyObject* +dictiter_iternextkey(PyObject *self) +{ + dictiterobject *di = (dictiterobject *)self; + PyDictObject *d = di->di_dict; + + if (d == NULL) + return NULL; + + PyObject *value; + Py_BEGIN_CRITICAL_SECTION(d); + value = dictiter_iternextkey_lock_held(d, self); + Py_END_CRITICAL_SECTION(); + + return value; +} + PyTypeObject PyDictIterKey_Type = { PyVarObject_HEAD_INIT(&PyType_Type, 0) "dict_keyiterator", /* tp_name */ @@ -4231,16 +4620,14 @@ PyTypeObject PyDictIterKey_Type = { }; static PyObject * -dictiter_iternextvalue(PyObject *self) +dictiter_iternextvalue_lock_held(PyDictObject *d, PyObject *self) { dictiterobject *di = (dictiterobject *)self; PyObject *value; Py_ssize_t i; - PyDictObject *d = di->di_dict; - if (d == NULL) - return NULL; assert (PyDict_Check(d)); + ASSERT_DICT_LOCKED(d); if (di->di_used != d->ma_used) { PyErr_SetString(PyExc_RuntimeError, @@ -4297,6 +4684,23 @@ dictiter_iternextvalue(PyObject *self) return NULL; } +static PyObject * +dictiter_iternextvalue(PyObject *self) +{ + dictiterobject *di = (dictiterobject *)self; + PyDictObject *d = di->di_dict; + + if (d == NULL) + return NULL; + + PyObject *value; + Py_BEGIN_CRITICAL_SECTION(d); + value = dictiter_iternextvalue_lock_held(d, self); + Py_END_CRITICAL_SECTION(); + + return value; +} + PyTypeObject PyDictIterValue_Type = { PyVarObject_HEAD_INIT(&PyType_Type, 0) "dict_valueiterator", /* tp_name */ @@ -4331,15 +4735,12 @@ PyTypeObject PyDictIterValue_Type = { }; static PyObject * -dictiter_iternextitem(PyObject *self) +dictiter_iternextitem_lock_held(PyDictObject *d, PyObject *self) { dictiterobject *di = (dictiterobject *)self; PyObject *key, *value, *result; Py_ssize_t i; - PyDictObject *d = di->di_dict; - if (d == NULL) - return NULL; assert (PyDict_Check(d)); if (di->di_used != d->ma_used) { @@ -4422,6 +4823,22 @@ dictiter_iternextitem(PyObject *self) return NULL; } +static PyObject * +dictiter_iternextitem(PyObject *self) +{ + dictiterobject *di = (dictiterobject *)self; + PyDictObject *d = di->di_dict; + + if (d == NULL) + return NULL; + + PyObject *item; + Py_BEGIN_CRITICAL_SECTION(d); + item = dictiter_iternextitem_lock_held(d, self); + Py_END_CRITICAL_SECTION(); + return item; +} + PyTypeObject PyDictIterItem_Type = { PyVarObject_HEAD_INIT(&PyType_Type, 0) "dict_itemiterator", /* tp_name */ @@ -4459,15 +4876,12 @@ PyTypeObject PyDictIterItem_Type = { /* dictreviter */ static PyObject * -dictreviter_iternext(PyObject *self) +dictreviter_iter_PyDict_Next(PyDictObject *d, PyObject *self) { dictiterobject *di = (dictiterobject *)self; - PyDictObject *d = di->di_dict; - if (d == NULL) { - return NULL; - } assert (PyDict_Check(d)); + ASSERT_DICT_LOCKED(d); if (di->di_used != d->ma_used) { PyErr_SetString(PyExc_RuntimeError, @@ -4558,6 +4972,23 @@ dictreviter_iternext(PyObject *self) return NULL; } +static PyObject * +dictreviter_iternext(PyObject *self) +{ + dictiterobject *di = (dictiterobject *)self; + PyDictObject *d = di->di_dict; + + if (d == NULL) + return NULL; + + PyObject *value; + Py_BEGIN_CRITICAL_SECTION(d); + value = dictreviter_iter_PyDict_Next(d, self); + Py_END_CRITICAL_SECTION(); + + return value; +} + PyTypeObject PyDictRevIterKey_Type = { PyVarObject_HEAD_INIT(&PyType_Type, 0) "dict_reversekeyiterator", @@ -4986,14 +5417,12 @@ dictviews_or(PyObject* self, PyObject *other) } static PyObject * -dictitems_xor(PyObject *self, PyObject *other) +dictitems_xor_lock_held(PyObject *d1, PyObject *d2) { - assert(PyDictItems_Check(self)); - assert(PyDictItems_Check(other)); - PyObject *d1 = (PyObject *)((_PyDictViewObject *)self)->dv_dict; - PyObject *d2 = (PyObject *)((_PyDictViewObject *)other)->dv_dict; + ASSERT_DICT_LOCKED(d1); + ASSERT_DICT_LOCKED(d2); - PyObject *temp_dict = PyDict_Copy(d1); + PyObject *temp_dict = copy_lock_held(d1); if (temp_dict == NULL) { return NULL; } @@ -5071,6 +5500,22 @@ dictitems_xor(PyObject *self, PyObject *other) return NULL; } +static PyObject * +dictitems_xor(PyObject *self, PyObject *other) +{ + assert(PyDictItems_Check(self)); + assert(PyDictItems_Check(other)); + PyObject *d1 = (PyObject *)((_PyDictViewObject *)self)->dv_dict; + PyObject *d2 = (PyObject *)((_PyDictViewObject *)other)->dv_dict; + + PyObject *res; + Py_BEGIN_CRITICAL_SECTION2(d1, d2); + res = dictitems_xor_lock_held(d1, d2); + Py_END_CRITICAL_SECTION2(); + + return res; +} + static PyObject* dictviews_xor(PyObject* self, PyObject *other) { diff --git a/Objects/listobject.c b/Objects/listobject.c index 82a4ba952de07d..307b8f1bd76cac 100644 --- a/Objects/listobject.c +++ b/Objects/listobject.c @@ -272,6 +272,15 @@ PyList_GetItemRef(PyObject *op, Py_ssize_t i) return Py_NewRef(PyList_GET_ITEM(op, i)); } +static inline PyObject* +list_get_item_ref(PyListObject *op, Py_ssize_t i) +{ + if (!valid_index(i, Py_SIZE(op))) { + return NULL; + } + return Py_NewRef(PyList_GET_ITEM(op, i)); +} + int PyList_SetItem(PyObject *op, Py_ssize_t i, PyObject *newitem) @@ -478,18 +487,20 @@ list_length(PyObject *a) static int list_contains(PyObject *aa, PyObject *el) { - PyListObject *a = (PyListObject *)aa; - PyObject *item; - Py_ssize_t i; - int cmp; - for (i = 0, cmp = 0 ; cmp == 0 && i < Py_SIZE(a); ++i) { - item = PyList_GET_ITEM(a, i); - Py_INCREF(item); - cmp = PyObject_RichCompareBool(item, el, Py_EQ); + for (Py_ssize_t i = 0; ; i++) { + PyObject *item = list_get_item_ref((PyListObject *)aa, i); + if (item == NULL) { + // out-of-bounds + return 0; + } + int cmp = PyObject_RichCompareBool(item, el, Py_EQ); Py_DECREF(item); + if (cmp != 0) { + return cmp; + } } - return cmp; + return 0; } static PyObject * @@ -2724,8 +2735,6 @@ list_index_impl(PyListObject *self, PyObject *value, Py_ssize_t start, Py_ssize_t stop) /*[clinic end generated code: output=ec51b88787e4e481 input=40ec5826303a0eb1]*/ { - Py_ssize_t i; - if (start < 0) { start += Py_SIZE(self); if (start < 0) @@ -2736,9 +2745,12 @@ list_index_impl(PyListObject *self, PyObject *value, Py_ssize_t start, if (stop < 0) stop = 0; } - for (i = start; i < stop && i < Py_SIZE(self); i++) { - PyObject *obj = self->ob_item[i]; - Py_INCREF(obj); + for (Py_ssize_t i = start; i < stop; i++) { + PyObject *obj = list_get_item_ref(self, i); + if (obj == NULL) { + // out-of-bounds + break; + } int cmp = PyObject_RichCompareBool(obj, value, Py_EQ); Py_DECREF(obj); if (cmp > 0) @@ -2764,15 +2776,17 @@ list_count(PyListObject *self, PyObject *value) /*[clinic end generated code: output=b1f5d284205ae714 input=3bdc3a5e6f749565]*/ { Py_ssize_t count = 0; - Py_ssize_t i; - - for (i = 0; i < Py_SIZE(self); i++) { - PyObject *obj = self->ob_item[i]; + for (Py_ssize_t i = 0; ; i++) { + PyObject *obj = list_get_item_ref(self, i); + if (obj == NULL) { + // out-of-bounds + break; + } if (obj == value) { count++; + Py_DECREF(obj); continue; } - Py_INCREF(obj); int cmp = PyObject_RichCompareBool(obj, value, Py_EQ); Py_DECREF(obj); if (cmp > 0) diff --git a/Objects/object.c b/Objects/object.c index 7247eb21df6b6e..bbf7f98ae3daf9 100644 --- a/Objects/object.c +++ b/Objects/object.c @@ -2387,21 +2387,6 @@ _Py_NewReferenceNoTotal(PyObject *op) new_reference(op); } -void -_Py_SetImmortal(PyObject *op) -{ - if (PyObject_IS_GC(op) && _PyObject_GC_IS_TRACKED(op)) { - _PyObject_GC_UNTRACK(op); - } -#ifdef Py_GIL_DISABLED - op->ob_tid = _Py_UNOWNED_TID; - op->ob_ref_local = _Py_IMMORTAL_REFCNT_LOCAL; - op->ob_ref_shared = 0; -#else - op->ob_refcnt = _Py_IMMORTAL_REFCNT; -#endif -} - void _Py_ResurrectReference(PyObject *op) { diff --git a/Objects/odictobject.c b/Objects/odictobject.c index b5280c39e1be54..421bc52992d735 100644 --- a/Objects/odictobject.c +++ b/Objects/odictobject.c @@ -465,12 +465,13 @@ Potential Optimizations */ #include "Python.h" -#include "pycore_call.h" // _PyObject_CallNoArgs() -#include "pycore_ceval.h" // _PyEval_GetBuiltin() -#include "pycore_dict.h" // _Py_dict_lookup() -#include "pycore_object.h" // _PyObject_GC_UNTRACK() -#include "pycore_pyerrors.h" // _PyErr_ChainExceptions1() -#include // offsetof() +#include "pycore_call.h" // _PyObject_CallNoArgs() +#include "pycore_ceval.h" // _PyEval_GetBuiltin() +#include "pycore_critical_section.h" //_Py_BEGIN_CRITICAL_SECTION +#include "pycore_dict.h" // _Py_dict_lookup() +#include "pycore_object.h" // _PyObject_GC_UNTRACK() +#include "pycore_pyerrors.h" // _PyErr_ChainExceptions1() +#include // offsetof() #include "clinic/odictobject.c.h" @@ -1039,6 +1040,8 @@ _odict_popkey_hash(PyObject *od, PyObject *key, PyObject *failobj, { PyObject *value = NULL; + Py_BEGIN_CRITICAL_SECTION(od); + _ODictNode *node = _odict_find_node_hash((PyODictObject *)od, key, hash); if (node != NULL) { /* Pop the node first to avoid a possible dict resize (due to @@ -1046,7 +1049,7 @@ _odict_popkey_hash(PyObject *od, PyObject *key, PyObject *failobj, resolution. */ int res = _odict_clear_node((PyODictObject *)od, node, key, hash); if (res < 0) { - return NULL; + goto done; } /* Now delete the value from the dict. */ if (_PyDict_Pop_KnownHash((PyDictObject *)od, key, hash, @@ -1063,6 +1066,8 @@ _odict_popkey_hash(PyObject *od, PyObject *key, PyObject *failobj, PyErr_SetObject(PyExc_KeyError, key); } } + Py_END_CRITICAL_SECTION(); +done: return value; } diff --git a/Objects/setobject.c b/Objects/setobject.c index 93de8e84f2ddf9..3acf2a7a74890b 100644 --- a/Objects/setobject.c +++ b/Objects/setobject.c @@ -32,13 +32,14 @@ */ #include "Python.h" -#include "pycore_ceval.h" // _PyEval_GetBuiltin() -#include "pycore_dict.h" // _PyDict_Contains_KnownHash() -#include "pycore_modsupport.h" // _PyArg_NoKwnames() -#include "pycore_object.h" // _PyObject_GC_UNTRACK() -#include "pycore_pyerrors.h" // _PyErr_SetKeyError() -#include "pycore_setobject.h" // _PySet_NextEntry() definition -#include // offsetof() +#include "pycore_ceval.h" // _PyEval_GetBuiltin() +#include "pycore_critical_section.h" // Py_BEGIN_CRITICAL_SECTION, Py_END_CRITICAL_SECTION +#include "pycore_dict.h" // _PyDict_Contains_KnownHash() +#include "pycore_modsupport.h" // _PyArg_NoKwnames() +#include "pycore_object.h" // _PyObject_GC_UNTRACK() +#include "pycore_pyerrors.h" // _PyErr_SetKeyError() +#include "pycore_setobject.h" // _PySet_NextEntry() definition +#include // offsetof() /* Object used as dummy key to fill deleted entries */ static PyObject _dummy_struct; @@ -903,11 +904,17 @@ set_update_internal(PySetObject *so, PyObject *other) if (set_table_resize(so, (so->used + dictsize)*2) != 0) return -1; } + int err = 0; + Py_BEGIN_CRITICAL_SECTION(other); while (_PyDict_Next(other, &pos, &key, &value, &hash)) { - if (set_add_entry(so, key, hash)) - return -1; + if (set_add_entry(so, key, hash)) { + err = -1; + goto exit; + } } - return 0; +exit: + Py_END_CRITICAL_SECTION(); + return err; } it = PyObject_GetIter(other); @@ -1620,6 +1627,33 @@ set_isub(PySetObject *so, PyObject *other) return Py_NewRef(so); } +static PyObject * +set_symmetric_difference_update_dict(PySetObject *so, PyObject *other) +{ + PyObject *key; + Py_ssize_t pos = 0; + Py_hash_t hash; + PyObject *value; + int rv; + + while (_PyDict_Next(other, &pos, &key, &value, &hash)) { + Py_INCREF(key); + rv = set_discard_entry(so, key, hash); + if (rv < 0) { + Py_DECREF(key); + return NULL; + } + if (rv == DISCARD_NOTFOUND) { + if (set_add_entry(so, key, hash)) { + Py_DECREF(key); + return NULL; + } + } + Py_DECREF(key); + } + Py_RETURN_NONE; +} + static PyObject * set_symmetric_difference_update(PySetObject *so, PyObject *other) { @@ -1634,23 +1668,13 @@ set_symmetric_difference_update(PySetObject *so, PyObject *other) return set_clear(so, NULL); if (PyDict_CheckExact(other)) { - PyObject *value; - while (_PyDict_Next(other, &pos, &key, &value, &hash)) { - Py_INCREF(key); - rv = set_discard_entry(so, key, hash); - if (rv < 0) { - Py_DECREF(key); - return NULL; - } - if (rv == DISCARD_NOTFOUND) { - if (set_add_entry(so, key, hash)) { - Py_DECREF(key); - return NULL; - } - } - Py_DECREF(key); - } - Py_RETURN_NONE; + PyObject *res; + + Py_BEGIN_CRITICAL_SECTION(other); + res = set_symmetric_difference_update_dict(so, other); + Py_END_CRITICAL_SECTION(); + + return res; } if (PyAnySet_Check(other)) { diff --git a/Objects/structseq.c b/Objects/structseq.c index 661d96a968fb80..581d6ad240885a 100644 --- a/Objects/structseq.c +++ b/Objects/structseq.c @@ -603,9 +603,6 @@ _PyStructSequence_InitBuiltinWithFlags(PyInterpreterState *interp, PyStructSequence_Desc *desc, unsigned long tp_flags) { - if (Py_TYPE(type) == NULL) { - Py_SET_TYPE(type, &PyType_Type); - } Py_ssize_t n_unnamed_members; Py_ssize_t n_members = count_members(desc, &n_unnamed_members); PyMemberDef *members = NULL; @@ -621,7 +618,7 @@ _PyStructSequence_InitBuiltinWithFlags(PyInterpreterState *interp, } initialize_static_fields(type, desc, members, tp_flags); - _Py_SetImmortal((PyObject *)type); + _Py_SetImmortal(type); } #ifndef NDEBUG else { diff --git a/Objects/typeobject.c b/Objects/typeobject.c index e220d10ce563c2..c65d0ec2acae52 100644 --- a/Objects/typeobject.c +++ b/Objects/typeobject.c @@ -6683,7 +6683,7 @@ type_add_method(PyTypeObject *type, PyMethodDef *meth) int err; PyObject *dict = lookup_tp_dict(type); if (!(meth->ml_flags & METH_COEXIST)) { - err = PyDict_SetDefault(dict, name, descr) == NULL; + err = PyDict_SetDefaultRef(dict, name, descr, NULL) < 0; } else { err = PyDict_SetItem(dict, name, descr) < 0; @@ -6731,7 +6731,7 @@ type_add_members(PyTypeObject *type) if (descr == NULL) return -1; - if (PyDict_SetDefault(dict, PyDescr_NAME(descr), descr) == NULL) { + if (PyDict_SetDefaultRef(dict, PyDescr_NAME(descr), descr, NULL) < 0) { Py_DECREF(descr); return -1; } @@ -6756,7 +6756,7 @@ type_add_getset(PyTypeObject *type) return -1; } - if (PyDict_SetDefault(dict, PyDescr_NAME(descr), descr) == NULL) { + if (PyDict_SetDefaultRef(dict, PyDescr_NAME(descr), descr, NULL) < 0) { Py_DECREF(descr); return -1; } diff --git a/Objects/unicodeobject.c b/Objects/unicodeobject.c index b236ddba9cdc69..0a569a950e88e2 100644 --- a/Objects/unicodeobject.c +++ b/Objects/unicodeobject.c @@ -14894,16 +14894,18 @@ _PyUnicode_InternInPlace(PyInterpreterState *interp, PyObject **p) PyObject *interned = get_interned_dict(interp); assert(interned != NULL); - PyObject *t = PyDict_SetDefault(interned, s, s); - if (t == NULL) { + PyObject *t; + int res = PyDict_SetDefaultRef(interned, s, s, &t); + if (res < 0) { PyErr_Clear(); return; } - - if (t != s) { - Py_SETREF(*p, Py_NewRef(t)); + else if (res == 1) { + // value was already present (not inserted) + Py_SETREF(*p, t); return; } + Py_DECREF(t); if (_Py_IsImmortal(s)) { // XXX Restrict this to the main interpreter? diff --git a/PCbuild/get_externals.bat b/PCbuild/get_externals.bat index 0989bd46a580f7..60ce12b725e233 100644 --- a/PCbuild/get_externals.bat +++ b/PCbuild/get_externals.bat @@ -54,7 +54,7 @@ set libraries= set libraries=%libraries% bzip2-1.0.8 if NOT "%IncludeLibffiSrc%"=="false" set libraries=%libraries% libffi-3.4.4 if NOT "%IncludeSSLSrc%"=="false" set libraries=%libraries% openssl-3.0.13 -set libraries=%libraries% sqlite-3.44.2.0 +set libraries=%libraries% sqlite-3.45.1.0 if NOT "%IncludeTkinterSrc%"=="false" set libraries=%libraries% tcl-core-8.6.13.1 if NOT "%IncludeTkinterSrc%"=="false" set libraries=%libraries% tk-8.6.13.1 set libraries=%libraries% xz-5.2.5 diff --git a/PCbuild/python.props b/PCbuild/python.props index 54553db4057288..e21f1f60464bc8 100644 --- a/PCbuild/python.props +++ b/PCbuild/python.props @@ -68,7 +68,7 @@ - $(ExternalsDir)sqlite-3.44.2.0\ + $(ExternalsDir)sqlite-3.45.1.0\ $(ExternalsDir)bzip2-1.0.8\ $(ExternalsDir)xz-5.2.5\ $(ExternalsDir)libffi-3.4.4\ diff --git a/PCbuild/readme.txt b/PCbuild/readme.txt index b9d76515c383f7..387565515fa0b0 100644 --- a/PCbuild/readme.txt +++ b/PCbuild/readme.txt @@ -189,7 +189,7 @@ _ssl again when building. _sqlite3 - Wraps SQLite 3.44.2, which is itself built by sqlite3.vcxproj + Wraps SQLite 3.45.1, which is itself built by sqlite3.vcxproj Homepage: https://www.sqlite.org/ _tkinter diff --git a/Python/compile.c b/Python/compile.c index 4c1d3bb2d2b475..15e5cf38a37b97 100644 --- a/Python/compile.c +++ b/Python/compile.c @@ -958,14 +958,15 @@ merge_consts_recursive(PyObject *const_cache, PyObject *o) return NULL; } - // t is borrowed reference - PyObject *t = PyDict_SetDefault(const_cache, key, key); - if (t != key) { - // o is registered in const_cache. Just use it. - Py_XINCREF(t); + PyObject *t; + int res = PyDict_SetDefaultRef(const_cache, key, key, &t); + if (res != 0) { + // o was not inserted into const_cache. t is either the existing value + // or NULL (on error). Py_DECREF(key); return t; } + Py_DECREF(t); // We registered o in const_cache. // When o is a tuple or frozenset, we want to merge its @@ -7527,22 +7528,26 @@ _PyCompile_ConstCacheMergeOne(PyObject *const_cache, PyObject **obj) return ERROR; } - // t is borrowed reference - PyObject *t = PyDict_SetDefault(const_cache, key, key); + PyObject *t; + int res = PyDict_SetDefaultRef(const_cache, key, key, &t); Py_DECREF(key); - if (t == NULL) { + if (res < 0) { return ERROR; } - if (t == key) { // obj is new constant. + if (res == 0) { // inserted: obj is new constant. + Py_DECREF(t); return SUCCESS; } if (PyTuple_CheckExact(t)) { - // t is still borrowed reference - t = PyTuple_GET_ITEM(t, 1); + PyObject *item = PyTuple_GET_ITEM(t, 1); + Py_SETREF(*obj, Py_NewRef(item)); + Py_DECREF(t); + } + else { + Py_SETREF(*obj, t); } - Py_SETREF(*obj, Py_NewRef(t)); return SUCCESS; } diff --git a/Python/gc.c b/Python/gc.c index cda12ff7fbc982..46646760291526 100644 --- a/Python/gc.c +++ b/Python/gc.c @@ -45,7 +45,7 @@ typedef struct _gc_runtime_state GCState; // move_legacy_finalizers() removes this flag instead. // Between them, unreachable list is not normal list and we can not use // most gc_list_* functions for it. -#define NEXT_MASK_UNREACHABLE 2 +#define NEXT_MASK_UNREACHABLE (1) #define AS_GC(op) _Py_AS_GC(op) #define FROM_GC(gc) _Py_FROM_GC(gc) @@ -95,48 +95,9 @@ gc_decref(PyGC_Head *g) g->_gc_prev -= 1 << _PyGC_PREV_SHIFT; } -static inline int -gc_old_space(PyGC_Head *g) -{ - return g->_gc_next & _PyGC_NEXT_MASK_OLD_SPACE_1; -} -static inline int -flip_old_space(int space) -{ - assert(space == 0 || space == 1); - return space ^ _PyGC_NEXT_MASK_OLD_SPACE_1; -} +#define GEN_HEAD(gcstate, n) (&(gcstate)->generations[n].head) -static inline void -gc_flip_old_space(PyGC_Head *g) -{ - g->_gc_next ^= _PyGC_NEXT_MASK_OLD_SPACE_1; -} - -static inline void -gc_set_old_space(PyGC_Head *g, int space) -{ - assert(space == 0 || space == _PyGC_NEXT_MASK_OLD_SPACE_1); - g->_gc_next &= ~_PyGC_NEXT_MASK_OLD_SPACE_1; - g->_gc_next |= space; -} - -static PyGC_Head * -GEN_HEAD(GCState *gcstate, int n) -{ - assert((gcstate->visited_space & (~1)) == 0); - switch(n) { - case 0: - return &gcstate->young.head; - case 1: - return &gcstate->old[gcstate->visited_space].head; - case 2: - return &gcstate->old[gcstate->visited_space^1].head; - default: - Py_UNREACHABLE(); - } -} static GCState * get_gc_state(void) @@ -155,12 +116,11 @@ _PyGC_InitState(GCState *gcstate) GEN.head._gc_prev = (uintptr_t)&GEN.head; \ } while (0) - assert(gcstate->young.count == 0); - assert(gcstate->old[0].count == 0); - assert(gcstate->old[1].count == 0); - INIT_HEAD(gcstate->young); - INIT_HEAD(gcstate->old[0]); - INIT_HEAD(gcstate->old[1]); + for (int i = 0; i < NUM_GENERATIONS; i++) { + assert(gcstate->generations[i].count == 0); + INIT_HEAD(gcstate->generations[i]); + }; + gcstate->generation0 = GEN_HEAD(gcstate, 0); INIT_HEAD(gcstate->permanent_generation); #undef INIT_HEAD @@ -258,7 +218,6 @@ gc_list_is_empty(PyGC_Head *list) static inline void gc_list_append(PyGC_Head *node, PyGC_Head *list) { - assert((list->_gc_prev & ~_PyGC_PREV_MASK) == 0); PyGC_Head *last = (PyGC_Head *)list->_gc_prev; // last <-> node @@ -316,8 +275,6 @@ gc_list_merge(PyGC_Head *from, PyGC_Head *to) PyGC_Head *from_tail = GC_PREV(from); assert(from_head != from); assert(from_tail != from); - assert(gc_list_is_empty(to) || - gc_old_space(to_tail) == gc_old_space(from_tail)); _PyGCHead_SET_NEXT(to_tail, from_head); _PyGCHead_SET_PREV(from_head, to_tail); @@ -386,8 +343,8 @@ enum flagstates {collecting_clear_unreachable_clear, static void validate_list(PyGC_Head *head, enum flagstates flags) { - assert((head->_gc_prev & ~_PyGC_PREV_MASK) == 0); - assert((head->_gc_next & ~_PyGC_PREV_MASK) == 0); + assert((head->_gc_prev & PREV_MASK_COLLECTING) == 0); + assert((head->_gc_next & NEXT_MASK_UNREACHABLE) == 0); uintptr_t prev_value = 0, next_value = 0; switch (flags) { case collecting_clear_unreachable_clear: @@ -409,7 +366,7 @@ validate_list(PyGC_Head *head, enum flagstates flags) PyGC_Head *gc = GC_NEXT(head); while (gc != head) { PyGC_Head *trueprev = GC_PREV(gc); - PyGC_Head *truenext = GC_NEXT(gc); + PyGC_Head *truenext = (PyGC_Head *)(gc->_gc_next & ~NEXT_MASK_UNREACHABLE); assert(truenext != NULL); assert(trueprev == prev); assert((gc->_gc_prev & PREV_MASK_COLLECTING) == prev_value); @@ -419,44 +376,8 @@ validate_list(PyGC_Head *head, enum flagstates flags) } assert(prev == GC_PREV(head)); } - -static void -validate_old(GCState *gcstate) -{ - for (int space = 0; space < 2; space++) { - PyGC_Head *head = &gcstate->old[space].head; - PyGC_Head *gc = GC_NEXT(head); - while (gc != head) { - PyGC_Head *next = GC_NEXT(gc); - assert(gc_old_space(gc) == space); - gc = next; - } - } -} - -static void -validate_consistent_old_space(PyGC_Head *head) -{ - PyGC_Head *prev = head; - PyGC_Head *gc = GC_NEXT(head); - if (gc == head) { - return; - } - int old_space = gc_old_space(gc); - while (gc != head) { - PyGC_Head *truenext = GC_NEXT(gc); - assert(truenext != NULL); - assert(gc_old_space(gc) == old_space); - prev = gc; - gc = truenext; - } - assert(prev == GC_PREV(head)); -} - #else #define validate_list(x, y) do{}while(0) -#define validate_old(g) do{}while(0) -#define validate_consistent_old_space(l) do{}while(0) #endif /*** end of list stuff ***/ @@ -473,7 +394,15 @@ update_refs(PyGC_Head *containers) while (gc != containers) { next = GC_NEXT(gc); - assert(!_Py_IsImmortal(FROM_GC(gc))); + /* Move any object that might have become immortal to the + * permanent generation as the reference count is not accurately + * reflecting the actual number of live references to this object + */ + if (_Py_IsImmortal(FROM_GC(gc))) { + gc_list_move(gc, &get_gc_state()->permanent_generation.head); + gc = next; + continue; + } gc_reset_refs(gc, Py_REFCNT(FROM_GC(gc))); /* Python's cyclic gc should never see an incoming refcount * of 0: if something decref'ed to 0, it should have been @@ -571,13 +500,12 @@ visit_reachable(PyObject *op, void *arg) // Manually unlink gc from unreachable list because the list functions // don't work right in the presence of NEXT_MASK_UNREACHABLE flags. PyGC_Head *prev = GC_PREV(gc); - PyGC_Head *next = GC_NEXT(gc); + PyGC_Head *next = (PyGC_Head*)(gc->_gc_next & ~NEXT_MASK_UNREACHABLE); _PyObject_ASSERT(FROM_GC(prev), prev->_gc_next & NEXT_MASK_UNREACHABLE); _PyObject_ASSERT(FROM_GC(next), next->_gc_next & NEXT_MASK_UNREACHABLE); - prev->_gc_next = gc->_gc_next; // copy flag bits - gc->_gc_next &= ~NEXT_MASK_UNREACHABLE; + prev->_gc_next = gc->_gc_next; // copy NEXT_MASK_UNREACHABLE _PyGCHead_SET_PREV(next, prev); gc_list_append(gc, reachable); @@ -629,9 +557,6 @@ move_unreachable(PyGC_Head *young, PyGC_Head *unreachable) * or to the right have been scanned yet. */ - validate_consistent_old_space(young); - /* Record which old space we are in, and set NEXT_MASK_UNREACHABLE bit for convenience */ - uintptr_t flags = NEXT_MASK_UNREACHABLE | (gc->_gc_next & _PyGC_NEXT_MASK_OLD_SPACE_1); while (gc != young) { if (gc_get_refs(gc)) { /* gc is definitely reachable from outside the @@ -677,18 +602,17 @@ move_unreachable(PyGC_Head *young, PyGC_Head *unreachable) // But this may pollute the unreachable list head's 'next' pointer // too. That's semantically senseless but expedient here - the // damage is repaired when this function ends. - last->_gc_next = flags | (uintptr_t)gc; + last->_gc_next = (NEXT_MASK_UNREACHABLE | (uintptr_t)gc); _PyGCHead_SET_PREV(gc, last); - gc->_gc_next = flags | (uintptr_t)unreachable; + gc->_gc_next = (NEXT_MASK_UNREACHABLE | (uintptr_t)unreachable); unreachable->_gc_prev = (uintptr_t)gc; } - gc = _PyGCHead_NEXT(prev); + gc = (PyGC_Head*)prev->_gc_next; } // young->_gc_prev must be last element remained in the list. young->_gc_prev = (uintptr_t)prev; - young->_gc_next &= _PyGC_PREV_MASK; // don't let the pollution of the list head's next pointer leak - unreachable->_gc_next &= _PyGC_PREV_MASK; + unreachable->_gc_next &= ~NEXT_MASK_UNREACHABLE; } static void @@ -745,8 +669,8 @@ move_legacy_finalizers(PyGC_Head *unreachable, PyGC_Head *finalizers) PyObject *op = FROM_GC(gc); _PyObject_ASSERT(op, gc->_gc_next & NEXT_MASK_UNREACHABLE); - next = GC_NEXT(gc); gc->_gc_next &= ~NEXT_MASK_UNREACHABLE; + next = (PyGC_Head*)gc->_gc_next; if (has_legacy_finalizer(op)) { gc_clear_collecting(gc); @@ -765,8 +689,8 @@ clear_unreachable_mask(PyGC_Head *unreachable) assert((unreachable->_gc_next & NEXT_MASK_UNREACHABLE) == 0); for (gc = GC_NEXT(unreachable); gc != unreachable; gc = next) { _PyObject_ASSERT((PyObject*)FROM_GC(gc), gc->_gc_next & NEXT_MASK_UNREACHABLE); - next = GC_NEXT(gc); gc->_gc_next &= ~NEXT_MASK_UNREACHABLE; + next = (PyGC_Head*)gc->_gc_next; } validate_list(unreachable, collecting_set_unreachable_clear); } @@ -1099,6 +1023,25 @@ delete_garbage(PyThreadState *tstate, GCState *gcstate, } +// Show stats for objects in each generations +static void +show_stats_each_generations(GCState *gcstate) +{ + char buf[100]; + size_t pos = 0; + + for (int i = 0; i < NUM_GENERATIONS && pos < sizeof(buf); i++) { + pos += PyOS_snprintf(buf+pos, sizeof(buf)-pos, + " %zd", + gc_list_size(GEN_HEAD(gcstate, i))); + } + + PySys_FormatStderr( + "gc: objects in each generation:%s\n" + "gc: objects in permanent generation: %zd\n", + buf, gc_list_size(&gcstate->permanent_generation.head)); +} + /* Deduce which objects among "base" are unreachable from outside the list and move them to 'unreachable'. The process consist in the following steps: @@ -1172,6 +1115,7 @@ deduce_unreachable(PyGC_Head *base, PyGC_Head *unreachable) { * the reachable objects instead. But this is a one-time cost, probably not * worth complicating the code to speed just a little. */ + gc_list_init(unreachable); move_unreachable(base, unreachable); // gc_prev is pointer again validate_list(base, collecting_clear_unreachable_clear); validate_list(unreachable, collecting_set_unreachable_set); @@ -1210,272 +1154,219 @@ handle_resurrected_objects(PyGC_Head *unreachable, PyGC_Head* still_unreachable, } -#define UNTRACK_TUPLES 1 -#define UNTRACK_DICTS 2 - -static void -gc_collect_region(PyThreadState *tstate, - PyGC_Head *from, - PyGC_Head *to, - int untrack, - struct gc_collection_stats *stats); - -static inline Py_ssize_t -gc_list_set_space(PyGC_Head *list, int space) -{ - Py_ssize_t size = 0; - PyGC_Head *gc; - for (gc = GC_NEXT(list); gc != list; gc = GC_NEXT(gc)) { - gc_set_old_space(gc, space); - size++; - } - return size; -} - - +/* Invoke progress callbacks to notify clients that garbage collection + * is starting or stopping + */ static void -add_stats(GCState *gcstate, int gen, struct gc_collection_stats *stats) +invoke_gc_callback(PyThreadState *tstate, const char *phase, + int generation, Py_ssize_t collected, + Py_ssize_t uncollectable) { - gcstate->generation_stats[gen].collected += stats->collected; - gcstate->generation_stats[gen].uncollectable += stats->uncollectable; - gcstate->generation_stats[gen].collections += 1; -} - - -/* Multiply by 4 so that the default incremental threshold of 10 - * scans objects at 40% the rate that the young gen tenures them. */ -#define SCAN_RATE_MULTIPLIER 4 - + assert(!_PyErr_Occurred(tstate)); -static void -gc_collect_young(PyThreadState *tstate, - struct gc_collection_stats *stats) -{ + /* we may get called very early */ GCState *gcstate = &tstate->interp->gc; - PyGC_Head *young = &gcstate->young.head; - PyGC_Head *visited = &gcstate->old[gcstate->visited_space].head; -#ifdef Py_STATS - { - Py_ssize_t count = 0; - PyGC_Head *gc; - for (gc = GC_NEXT(young); gc != young; gc = GC_NEXT(gc)) { - count++; - } + if (gcstate->callbacks == NULL) { + return; } -#endif - PyGC_Head survivors; - gc_list_init(&survivors); - gc_collect_region(tstate, young, &survivors, UNTRACK_TUPLES, stats); - Py_ssize_t survivor_count = 0; - if (gcstate->visited_space) { - /* objects in visited space have bit set, so we set it here */ - survivor_count = gc_list_set_space(&survivors, 1); - } - else { - PyGC_Head *gc; - for (gc = GC_NEXT(&survivors); gc != &survivors; gc = GC_NEXT(gc)) { -#ifdef GC_DEBUG - assert(gc_old_space(gc) == 0); -#endif - survivor_count++; + /* The local variable cannot be rebound, check it for sanity */ + assert(PyList_CheckExact(gcstate->callbacks)); + PyObject *info = NULL; + if (PyList_GET_SIZE(gcstate->callbacks) != 0) { + info = Py_BuildValue("{sisnsn}", + "generation", generation, + "collected", collected, + "uncollectable", uncollectable); + if (info == NULL) { + PyErr_FormatUnraisable("Exception ignored on invoking gc callbacks"); + return; } } - gc_list_merge(&survivors, visited); - validate_old(gcstate); - gcstate->young.count = 0; - gcstate->old[gcstate->visited_space].count++; - Py_ssize_t scale_factor = gcstate->old[0].threshold; - if (scale_factor < 1) { - scale_factor = 1; - } - gcstate->work_to_do += survivor_count + survivor_count * SCAN_RATE_MULTIPLIER / scale_factor; - add_stats(gcstate, 0, stats); -} -static inline int -is_in_visited(PyGC_Head *gc, int visited_space) -{ - assert(visited_space == 0 || flip_old_space(visited_space) == 0); - return gc_old_space(gc) == visited_space; -} - -struct container_and_flag { - PyGC_Head *container; - int visited_space; -}; + PyObject *phase_obj = PyUnicode_FromString(phase); + if (phase_obj == NULL) { + Py_XDECREF(info); + PyErr_FormatUnraisable("Exception ignored on invoking gc callbacks"); + return; + } -/* A traversal callback for adding to container) */ -static int -visit_add_to_container(PyObject *op, void *arg) -{ - OBJECT_STAT_INC(object_visits); - struct container_and_flag *cf = (struct container_and_flag *)arg; - int visited = cf->visited_space; - assert(visited == get_gc_state()->visited_space); - if (_PyObject_IS_GC(op)) { - PyGC_Head *gc = AS_GC(op); - if (_PyObject_GC_IS_TRACKED(op) && - gc_old_space(gc) != visited) { - assert(!_Py_IsImmortal(op)); - gc_flip_old_space(gc); - gc_list_move(gc, cf->container); + PyObject *stack[] = {phase_obj, info}; + for (Py_ssize_t i=0; icallbacks); i++) { + PyObject *r, *cb = PyList_GET_ITEM(gcstate->callbacks, i); + Py_INCREF(cb); /* make sure cb doesn't go away */ + r = PyObject_Vectorcall(cb, stack, 2, NULL); + if (r == NULL) { + PyErr_WriteUnraisable(cb); } + else { + Py_DECREF(r); + } + Py_DECREF(cb); } - return 0; + Py_DECREF(phase_obj); + Py_XDECREF(info); + assert(!_PyErr_Occurred(tstate)); } -static uintptr_t -expand_region_transitively_reachable(PyGC_Head *container, PyGC_Head *gc, GCState *gcstate) -{ - validate_list(container, collecting_clear_unreachable_clear); - struct container_and_flag arg = { - .container = container, - .visited_space = gcstate->visited_space, - }; - uintptr_t size = 0; - assert(GC_NEXT(gc) == container); - while (gc != container) { - /* Survivors will be moved to visited space, so they should - * have been marked as visited */ - assert(is_in_visited(gc, gcstate->visited_space)); - PyObject *op = FROM_GC(gc); - if (_Py_IsImmortal(op)) { - PyGC_Head *next = GC_NEXT(gc); - gc_list_move(gc, &get_gc_state()->permanent_generation.head); - gc = next; - continue; + +/* Find the oldest generation (highest numbered) where the count + * exceeds the threshold. Objects in the that generation and + * generations younger than it will be collected. */ +static int +gc_select_generation(GCState *gcstate) +{ + for (int i = NUM_GENERATIONS-1; i >= 0; i--) { + if (gcstate->generations[i].count > gcstate->generations[i].threshold) { + /* Avoid quadratic performance degradation in number + of tracked objects (see also issue #4074): + + To limit the cost of garbage collection, there are two strategies; + - make each collection faster, e.g. by scanning fewer objects + - do less collections + This heuristic is about the latter strategy. + + In addition to the various configurable thresholds, we only trigger a + full collection if the ratio + + long_lived_pending / long_lived_total + + is above a given value (hardwired to 25%). + + The reason is that, while "non-full" collections (i.e., collections of + the young and middle generations) will always examine roughly the same + number of objects -- determined by the aforementioned thresholds --, + the cost of a full collection is proportional to the total number of + long-lived objects, which is virtually unbounded. + + Indeed, it has been remarked that doing a full collection every + of object creations entails a dramatic performance + degradation in workloads which consist in creating and storing lots of + long-lived objects (e.g. building a large list of GC-tracked objects would + show quadratic performance, instead of linear as expected: see issue #4074). + + Using the above ratio, instead, yields amortized linear performance in + the total number of objects (the effect of which can be summarized + thusly: "each full garbage collection is more and more costly as the + number of objects grows, but we do fewer and fewer of them"). + + This heuristic was suggested by Martin von Löwis on python-dev in + June 2008. His original analysis and proposal can be found at: + http://mail.python.org/pipermail/python-dev/2008-June/080579.html + */ + if (i == NUM_GENERATIONS - 1 + && gcstate->long_lived_pending < gcstate->long_lived_total / 4) + { + continue; + } + return i; } - traverseproc traverse = Py_TYPE(op)->tp_traverse; - (void) traverse(op, - visit_add_to_container, - &arg); - gc = GC_NEXT(gc); - size++; } - return size; + return -1; } -/* Do bookkeeping for a completed GC cycle */ -static void -completed_cycle(GCState *gcstate) -{ - assert(gc_list_is_empty(&gcstate->old[gcstate->visited_space^1].head)); - assert(gc_list_is_empty(&gcstate->young.head)); - gcstate->visited_space = flip_old_space(gcstate->visited_space); - if (gcstate->work_to_do > 0) { - gcstate->work_to_do = 0; - } -} -static void -gc_collect_increment(PyThreadState *tstate, struct gc_collection_stats *stats) +/* This is the main function. Read this to understand how the + * collection process works. */ +static Py_ssize_t +gc_collect_main(PyThreadState *tstate, int generation, _PyGC_Reason reason) { + int i; + Py_ssize_t m = 0; /* # objects collected */ + Py_ssize_t n = 0; /* # unreachable objects that couldn't be collected */ + PyGC_Head *young; /* the generation we are examining */ + PyGC_Head *old; /* next older generation */ + PyGC_Head unreachable; /* non-problematic unreachable trash */ + PyGC_Head finalizers; /* objects with, & reachable from, __del__ */ + PyGC_Head *gc; + _PyTime_t t1 = 0; /* initialize to prevent a compiler warning */ GCState *gcstate = &tstate->interp->gc; - if (gcstate->work_to_do <= 0) { - /* No work to do */ - return; - } - PyGC_Head *not_visited = &gcstate->old[gcstate->visited_space^1].head; - PyGC_Head *visited = &gcstate->old[gcstate->visited_space].head; - PyGC_Head increment; - gc_list_init(&increment); - if (gc_list_is_empty(not_visited)) { - completed_cycle(gcstate); - return; + + // gc_collect_main() must not be called before _PyGC_Init + // or after _PyGC_Fini() + assert(gcstate->garbage != NULL); + assert(!_PyErr_Occurred(tstate)); + + int expected = 0; + if (!_Py_atomic_compare_exchange_int(&gcstate->collecting, &expected, 1)) { + // Don't start a garbage collection if one is already in progress. + return 0; } - Py_ssize_t region_size = 0; - while (region_size < gcstate->work_to_do) { - if (gc_list_is_empty(not_visited)) { - break; + + if (generation == GENERATION_AUTO) { + // Select the oldest generation that needs collecting. We will collect + // objects from that generation and all generations younger than it. + generation = gc_select_generation(gcstate); + if (generation < 0) { + // No generation needs to be collected. + _Py_atomic_store_int(&gcstate->collecting, 0); + return 0; } - PyGC_Head *gc = _PyGCHead_NEXT(not_visited); - gc_list_move(gc, &increment); - gc_set_old_space(gc, gcstate->visited_space); - region_size += expand_region_transitively_reachable(&increment, gc, gcstate); - } - assert(region_size == gc_list_size(&increment)); - PyGC_Head survivors; - gc_list_init(&survivors); - gc_collect_region(tstate, &increment, &survivors, UNTRACK_TUPLES, stats); - gc_list_merge(&survivors, visited); - assert(gc_list_is_empty(&increment)); - gcstate->work_to_do -= region_size; - validate_old(gcstate); - add_stats(gcstate, 1, stats); - if (gc_list_is_empty(not_visited)) { - completed_cycle(gcstate); } -} + assert(generation >= 0 && generation < NUM_GENERATIONS); -static void -gc_collect_full(PyThreadState *tstate, - struct gc_collection_stats *stats) -{ - GCState *gcstate = &tstate->interp->gc; - validate_old(gcstate); - PyGC_Head *young = &gcstate->young.head; - PyGC_Head *old0 = &gcstate->old[0].head; - PyGC_Head *old1 = &gcstate->old[1].head; - /* merge all generations into old0 */ - gc_list_merge(young, old0); - gcstate->young.count = 0; - PyGC_Head *gc = GC_NEXT(old1); - while (gc != old1) { - PyGC_Head *next = GC_NEXT(gc); - gc_set_old_space(gc, 0); - gc = next; +#ifdef Py_STATS + if (_Py_stats) { + _Py_stats->object_stats.object_visits = 0; } - gc_list_merge(old1, old0); - - gc_collect_region(tstate, old0, old0, - UNTRACK_TUPLES | UNTRACK_DICTS, - stats); - gcstate->visited_space = 1; - gcstate->young.count = 0; - gcstate->old[0].count = 0; - gcstate->old[1].count = 0; +#endif + GC_STAT_ADD(generation, collections, 1); - gcstate->work_to_do = - gcstate->young.threshold * 2; + if (reason != _Py_GC_REASON_SHUTDOWN) { + invoke_gc_callback(tstate, "start", generation, 0, 0); + } - _PyGC_ClearAllFreeLists(tstate->interp); - validate_old(gcstate); - add_stats(gcstate, 2, stats); -} + if (gcstate->debug & _PyGC_DEBUG_STATS) { + PySys_WriteStderr("gc: collecting generation %d...\n", generation); + show_stats_each_generations(gcstate); + t1 = _PyTime_GetPerfCounter(); + } -/* This is the main function. Read this to understand how the - * collection process works. */ -static void -gc_collect_region(PyThreadState *tstate, - PyGC_Head *from, - PyGC_Head *to, - int untrack, - struct gc_collection_stats *stats) -{ - PyGC_Head unreachable; /* non-problematic unreachable trash */ - PyGC_Head finalizers; /* objects with, & reachable from, __del__ */ - PyGC_Head *gc; /* initialize to prevent a compiler warning */ - GCState *gcstate = &tstate->interp->gc; + if (PyDTrace_GC_START_ENABLED()) { + PyDTrace_GC_START(generation); + } - assert(gcstate->garbage != NULL); - assert(!_PyErr_Occurred(tstate)); + /* update collection and allocation counters */ + if (generation+1 < NUM_GENERATIONS) { + gcstate->generations[generation+1].count += 1; + } + for (i = 0; i <= generation; i++) { + gcstate->generations[i].count = 0; + } - gc_list_init(&unreachable); - deduce_unreachable(from, &unreachable); - validate_consistent_old_space(from); - if (untrack & UNTRACK_TUPLES) { - untrack_tuples(from); + /* merge younger generations with one we are currently collecting */ + for (i = 0; i < generation; i++) { + gc_list_merge(GEN_HEAD(gcstate, i), GEN_HEAD(gcstate, generation)); } - if (untrack & UNTRACK_DICTS) { - untrack_dicts(from); + + /* handy references */ + young = GEN_HEAD(gcstate, generation); + if (generation < NUM_GENERATIONS-1) { + old = GEN_HEAD(gcstate, generation+1); } - validate_consistent_old_space(to); - if (from != to) { - gc_list_merge(from, to); + else { + old = young; } - validate_consistent_old_space(to); + validate_list(old, collecting_clear_unreachable_clear); + + deduce_unreachable(young, &unreachable); + + untrack_tuples(young); /* Move reachable objects to next generation. */ + if (young != old) { + if (generation == NUM_GENERATIONS - 2) { + gcstate->long_lived_pending += gc_list_size(young); + } + gc_list_merge(young, old); + } + else { + /* We only un-track dicts in full collections, to avoid quadratic + dict build-up. See issue #14775. */ + untrack_dicts(young); + gcstate->long_lived_pending = 0; + gcstate->long_lived_total = gc_list_size(young); + } /* All objects in unreachable are trash, but objects reachable from * legacy finalizers (e.g. tp_del) can't safely be deleted. @@ -1489,8 +1380,10 @@ gc_collect_region(PyThreadState *tstate, * and we move those into the finalizers list too. */ move_legacy_finalizer_reachable(&finalizers); + validate_list(&finalizers, collecting_clear_unreachable_clear); validate_list(&unreachable, collecting_set_unreachable_clear); + /* Print debugging information. */ if (gcstate->debug & _PyGC_DEBUG_COLLECTABLE) { for (gc = GC_NEXT(&unreachable); gc != &unreachable; gc = GC_NEXT(gc)) { @@ -1499,99 +1392,89 @@ gc_collect_region(PyThreadState *tstate, } /* Clear weakrefs and invoke callbacks as necessary. */ - stats->collected += handle_weakrefs(&unreachable, to); - validate_list(to, collecting_clear_unreachable_clear); + m += handle_weakrefs(&unreachable, old); + + validate_list(old, collecting_clear_unreachable_clear); validate_list(&unreachable, collecting_set_unreachable_clear); /* Call tp_finalize on objects which have one. */ finalize_garbage(tstate, &unreachable); + /* Handle any objects that may have resurrected after the call * to 'finalize_garbage' and continue the collection with the * objects that are still unreachable */ PyGC_Head final_unreachable; - gc_list_init(&final_unreachable); - handle_resurrected_objects(&unreachable, &final_unreachable, to); + handle_resurrected_objects(&unreachable, &final_unreachable, old); /* Call tp_clear on objects in the final_unreachable set. This will cause * the reference cycles to be broken. It may also cause some objects * in finalizers to be freed. */ - stats->collected += gc_list_size(&final_unreachable); - delete_garbage(tstate, gcstate, &final_unreachable, to); + m += gc_list_size(&final_unreachable); + delete_garbage(tstate, gcstate, &final_unreachable, old); /* Collect statistics on uncollectable objects found and print * debugging information. */ - Py_ssize_t n = 0; for (gc = GC_NEXT(&finalizers); gc != &finalizers; gc = GC_NEXT(gc)) { n++; if (gcstate->debug & _PyGC_DEBUG_UNCOLLECTABLE) debug_cycle("uncollectable", FROM_GC(gc)); } - stats->uncollectable = n; + if (gcstate->debug & _PyGC_DEBUG_STATS) { + double d = _PyTime_AsSecondsDouble(_PyTime_GetPerfCounter() - t1); + PySys_WriteStderr( + "gc: done, %zd unreachable, %zd uncollectable, %.4fs elapsed\n", + n+m, n, d); + } + /* Append instances in the uncollectable set to a Python * reachable list of garbage. The programmer has to deal with * this if they insist on creating this type of structure. */ - handle_legacy_finalizers(tstate, gcstate, &finalizers, to); - validate_list(to, collecting_clear_unreachable_clear); -} + handle_legacy_finalizers(tstate, gcstate, &finalizers, old); + validate_list(old, collecting_clear_unreachable_clear); -/* Invoke progress callbacks to notify clients that garbage collection - * is starting or stopping - */ -static void -do_gc_callback(GCState *gcstate, const char *phase, - int generation, struct gc_collection_stats *stats) -{ - assert(!PyErr_Occurred()); + /* Clear free list only during the collection of the highest + * generation */ + if (generation == NUM_GENERATIONS-1) { + _PyGC_ClearAllFreeLists(tstate->interp); + } - /* The local variable cannot be rebound, check it for sanity */ - assert(PyList_CheckExact(gcstate->callbacks)); - PyObject *info = NULL; - if (PyList_GET_SIZE(gcstate->callbacks) != 0) { - info = Py_BuildValue("{sisnsn}", - "generation", generation, - "collected", stats->collected, - "uncollectable", stats->uncollectable); - if (info == NULL) { - PyErr_FormatUnraisable("Exception ignored on invoking gc callbacks"); - return; + if (_PyErr_Occurred(tstate)) { + if (reason == _Py_GC_REASON_SHUTDOWN) { + _PyErr_Clear(tstate); + } + else { + PyErr_FormatUnraisable("Exception ignored in garbage collection"); } } - PyObject *phase_obj = PyUnicode_FromString(phase); - if (phase_obj == NULL) { - Py_XDECREF(info); - PyErr_FormatUnraisable("Exception ignored on invoking gc callbacks"); - return; + /* Update stats */ + struct gc_generation_stats *stats = &gcstate->generation_stats[generation]; + stats->collections++; + stats->collected += m; + stats->uncollectable += n; + + GC_STAT_ADD(generation, objects_collected, m); +#ifdef Py_STATS + if (_Py_stats) { + GC_STAT_ADD(generation, object_visits, + _Py_stats->object_stats.object_visits); + _Py_stats->object_stats.object_visits = 0; } +#endif - PyObject *stack[] = {phase_obj, info}; - for (Py_ssize_t i=0; icallbacks); i++) { - PyObject *r, *cb = PyList_GET_ITEM(gcstate->callbacks, i); - Py_INCREF(cb); /* make sure cb doesn't go away */ - r = PyObject_Vectorcall(cb, stack, 2, NULL); - if (r == NULL) { - PyErr_WriteUnraisable(cb); - } - else { - Py_DECREF(r); - } - Py_DECREF(cb); + if (PyDTrace_GC_DONE_ENABLED()) { + PyDTrace_GC_DONE(n + m); } - Py_DECREF(phase_obj); - Py_XDECREF(info); - assert(!PyErr_Occurred()); -} -static void -invoke_gc_callback(GCState *gcstate, const char *phase, - int generation, struct gc_collection_stats *stats) -{ - if (gcstate->callbacks == NULL) { - return; + if (reason != _Py_GC_REASON_SHUTDOWN) { + invoke_gc_callback(tstate, "stop", generation, m, n); } - do_gc_callback(gcstate, phase, generation, stats); + + assert(!_PyErr_Occurred(tstate)); + _Py_atomic_store_int(&gcstate->collecting, 0); + return n + m; } static int @@ -1666,7 +1549,7 @@ _PyGC_GetObjects(PyInterpreterState *interp, Py_ssize_t generation) } } else { - if (append_objects(result, GEN_HEAD(gcstate, (int)generation))) { + if (append_objects(result, GEN_HEAD(gcstate, generation))) { goto error; } } @@ -1681,16 +1564,10 @@ void _PyGC_Freeze(PyInterpreterState *interp) { GCState *gcstate = &interp->gc; - gc_list_merge(&gcstate->young.head, &gcstate->permanent_generation.head); - gcstate->young.count = 0; - PyGC_Head*old0 = &gcstate->old[0].head; - PyGC_Head*old1 = &gcstate->old[1].head; - gc_list_merge(old0, &gcstate->permanent_generation.head); - gcstate->old[0].count = 0; - gc_list_set_space(old1, 0); - gc_list_merge(old1, &gcstate->permanent_generation.head); - gcstate->old[1].count = 0; - validate_old(gcstate); + for (int i = 0; i < NUM_GENERATIONS; ++i) { + gc_list_merge(GEN_HEAD(gcstate, i), &gcstate->permanent_generation.head); + gcstate->generations[i].count = 0; + } } void @@ -1698,8 +1575,7 @@ _PyGC_Unfreeze(PyInterpreterState *interp) { GCState *gcstate = &interp->gc; gc_list_merge(&gcstate->permanent_generation.head, - &gcstate->old[0].head); - validate_old(gcstate); + GEN_HEAD(gcstate, NUM_GENERATIONS-1)); } Py_ssize_t @@ -1735,100 +1611,32 @@ PyGC_IsEnabled(void) return gcstate->enabled; } -// Show stats for objects in each generations -static void -show_stats_each_generations(GCState *gcstate) -{ - char buf[100]; - size_t pos = 0; - - for (int i = 0; i < NUM_GENERATIONS && pos < sizeof(buf); i++) { - pos += PyOS_snprintf(buf+pos, sizeof(buf)-pos, - " %zd", - gc_list_size(GEN_HEAD(gcstate, i))); - } - - PySys_FormatStderr( - "gc: objects in each generation:%s\n" - "gc: objects in permanent generation: %zd\n", - buf, gc_list_size(&gcstate->permanent_generation.head)); -} - +/* Public API to invoke gc.collect() from C */ Py_ssize_t -_PyGC_Collect(PyThreadState *tstate, int generation, _PyGC_Reason reason) +PyGC_Collect(void) { + PyThreadState *tstate = _PyThreadState_GET(); GCState *gcstate = &tstate->interp->gc; - int expected = 0; - if (!_Py_atomic_compare_exchange_int(&gcstate->collecting, &expected, 1)) { - // Don't start a garbage collection if one is already in progress. + if (!gcstate->enabled) { return 0; } - struct gc_collection_stats stats = { 0 }; - if (reason != _Py_GC_REASON_SHUTDOWN) { - invoke_gc_callback(gcstate, "start", generation, &stats); - } - _PyTime_t t1 = 0; /* initialize to prevent a compiler warning */ - if (gcstate->debug & _PyGC_DEBUG_STATS) { - PySys_WriteStderr("gc: collecting generation %d...\n", generation); - show_stats_each_generations(gcstate); - t1 = _PyTime_GetPerfCounter(); - } - if (PyDTrace_GC_START_ENABLED()) { - PyDTrace_GC_START(generation); - } - GC_STAT_ADD(generation, collections, 1); + Py_ssize_t n; PyObject *exc = _PyErr_GetRaisedException(tstate); - switch(generation) { - case 0: - gc_collect_young(tstate, &stats); - break; - case 1: - gc_collect_young(tstate, &stats); - gc_collect_increment(tstate, &stats); - break; - case 2: - gc_collect_full(tstate, &stats); - break; - default: - Py_UNREACHABLE(); - } - if (PyDTrace_GC_DONE_ENABLED()) { - PyDTrace_GC_DONE(stats.uncollectable + stats.collected); - } - if (reason != _Py_GC_REASON_SHUTDOWN) { - invoke_gc_callback(gcstate, "stop", generation, &stats); - } + n = gc_collect_main(tstate, NUM_GENERATIONS - 1, _Py_GC_REASON_MANUAL); _PyErr_SetRaisedException(tstate, exc); - GC_STAT_ADD(generation, objects_collected, stats.collected); -#ifdef Py_STATS - if (_Py_stats) { - GC_STAT_ADD(generation, object_visits, - _Py_stats->object_stats.object_visits); - _Py_stats->object_stats.object_visits = 0; - } -#endif - validate_old(gcstate); - if (gcstate->debug & _PyGC_DEBUG_STATS) { - double d = _PyTime_AsSecondsDouble(_PyTime_GetPerfCounter() - t1); - PySys_WriteStderr( - "gc: done, %zd collected, %zd uncollectable, %.4fs elapsed\n", - stats.collected, stats.uncollectable, d); - } - _Py_atomic_store_int(&gcstate->collecting, 0); - return stats.uncollectable + stats.collected; + return n; } -/* Public API to invoke gc.collect() from C */ Py_ssize_t -PyGC_Collect(void) +_PyGC_Collect(PyThreadState *tstate, int generation, _PyGC_Reason reason) { - return _PyGC_Collect(_PyThreadState_GET(), 2, _Py_GC_REASON_MANUAL); + return gc_collect_main(tstate, generation, reason); } -void +Py_ssize_t _PyGC_CollectNoFail(PyThreadState *tstate) { /* Ideally, this function is only called on interpreter shutdown, @@ -1837,7 +1645,7 @@ _PyGC_CollectNoFail(PyThreadState *tstate) during interpreter shutdown (and then never finish it). See http://bugs.python.org/issue8713#msg195178 for an example. */ - _PyGC_Collect(_PyThreadState_GET(), 2, _Py_GC_REASON_SHUTDOWN); + return gc_collect_main(tstate, NUM_GENERATIONS - 1, _Py_GC_REASON_SHUTDOWN); } void @@ -1972,10 +1780,10 @@ _PyObject_GC_Link(PyObject *op) GCState *gcstate = &tstate->interp->gc; g->_gc_next = 0; g->_gc_prev = 0; - gcstate->young.count++; /* number of allocated GC objects */ - if (gcstate->young.count > gcstate->young.threshold && + gcstate->generations[0].count++; /* number of allocated GC objects */ + if (gcstate->generations[0].count > gcstate->generations[0].threshold && gcstate->enabled && - gcstate->young.threshold && + gcstate->generations[0].threshold && !_Py_atomic_load_int_relaxed(&gcstate->collecting) && !_PyErr_Occurred(tstate)) { @@ -1986,9 +1794,7 @@ _PyObject_GC_Link(PyObject *op) void _Py_RunGC(PyThreadState *tstate) { - if (tstate->interp->gc.enabled) { - _PyGC_Collect(tstate, 1, _Py_GC_REASON_HEAP); - } + gc_collect_main(tstate, GENERATION_AUTO, _Py_GC_REASON_HEAP); } static PyObject * @@ -2091,8 +1897,8 @@ PyObject_GC_Del(void *op) #endif } GCState *gcstate = get_gc_state(); - if (gcstate->young.count > 0) { - gcstate->young.count--; + if (gcstate->generations[0].count > 0) { + gcstate->generations[0].count--; } PyObject_Free(((char *)op)-presize); } @@ -2115,36 +1921,26 @@ PyObject_GC_IsFinalized(PyObject *obj) return 0; } -static int -visit_generation(gcvisitobjects_t callback, void *arg, struct gc_generation *gen) -{ - PyGC_Head *gc_list, *gc; - gc_list = &gen->head; - for (gc = GC_NEXT(gc_list); gc != gc_list; gc = GC_NEXT(gc)) { - PyObject *op = FROM_GC(gc); - Py_INCREF(op); - int res = callback(op, arg); - Py_DECREF(op); - if (!res) { - return -1; - } - } - return 0; -} - void PyUnstable_GC_VisitObjects(gcvisitobjects_t callback, void *arg) { + size_t i; GCState *gcstate = get_gc_state(); int origenstate = gcstate->enabled; gcstate->enabled = 0; - if (visit_generation(callback, arg, &gcstate->young)) { - goto done; - } - if (visit_generation(callback, arg, &gcstate->old[0])) { - goto done; + for (i = 0; i < NUM_GENERATIONS; i++) { + PyGC_Head *gc_list, *gc; + gc_list = GEN_HEAD(gcstate, i); + for (gc = GC_NEXT(gc_list); gc != gc_list; gc = GC_NEXT(gc)) { + PyObject *op = FROM_GC(gc); + Py_INCREF(op); + int res = callback(op, arg); + Py_DECREF(op); + if (!res) { + goto done; + } + } } - visit_generation(callback, arg, &gcstate->old[1]); done: gcstate->enabled = origenstate; } diff --git a/Python/gc_free_threading.c b/Python/gc_free_threading.c index 1c4da726866e4e..8fbcdb15109b76 100644 --- a/Python/gc_free_threading.c +++ b/Python/gc_free_threading.c @@ -616,7 +616,7 @@ void _PyGC_InitState(GCState *gcstate) { // TODO: move to pycore_runtime_init.h once the incremental GC lands. - gcstate->young.threshold = 2000; + gcstate->generations[0].threshold = 2000; } @@ -911,8 +911,8 @@ cleanup_worklist(struct worklist *worklist) static bool gc_should_collect(GCState *gcstate) { - int count = _Py_atomic_load_int_relaxed(&gcstate->young.count); - int threshold = gcstate->young.threshold; + int count = _Py_atomic_load_int_relaxed(&gcstate->generations[0].count); + int threshold = gcstate->generations[0].threshold; if (count <= threshold || threshold == 0 || !gcstate->enabled) { return false; } @@ -920,7 +920,7 @@ gc_should_collect(GCState *gcstate) // objects. A few tests rely on immediate scheduling of the GC so we ignore // the scaled threshold if generations[1].threshold is set to zero. return (count > gcstate->long_lived_total / 4 || - gcstate->old[0].threshold == 0); + gcstate->generations[1].threshold == 0); } static void @@ -1031,15 +1031,10 @@ gc_collect_main(PyThreadState *tstate, int generation, _PyGC_Reason reason) /* update collection and allocation counters */ if (generation+1 < NUM_GENERATIONS) { - gcstate->old[generation].count += 1; + gcstate->generations[generation+1].count += 1; } for (i = 0; i <= generation; i++) { - if (i == 0) { - gcstate->young.count = 0; - } - else { - gcstate->old[i-1].count = 0; - } + gcstate->generations[i].count = 0; } PyInterpreterState *interp = tstate->interp; @@ -1362,7 +1357,7 @@ _PyGC_Collect(PyThreadState *tstate, int generation, _PyGC_Reason reason) return gc_collect_main(tstate, generation, reason); } -void +Py_ssize_t _PyGC_CollectNoFail(PyThreadState *tstate) { /* Ideally, this function is only called on interpreter shutdown, @@ -1371,7 +1366,7 @@ _PyGC_CollectNoFail(PyThreadState *tstate) during interpreter shutdown (and then never finish it). See http://bugs.python.org/issue8713#msg195178 for an example. */ - gc_collect_main(tstate, NUM_GENERATIONS - 1, _Py_GC_REASON_SHUTDOWN); + return gc_collect_main(tstate, NUM_GENERATIONS - 1, _Py_GC_REASON_SHUTDOWN); } void @@ -1495,7 +1490,7 @@ _PyObject_GC_Link(PyObject *op) { PyThreadState *tstate = _PyThreadState_GET(); GCState *gcstate = &tstate->interp->gc; - gcstate->young.count++; + gcstate->generations[0].count++; if (gc_should_collect(gcstate) && !_Py_atomic_load_int_relaxed(&gcstate->collecting)) @@ -1610,8 +1605,8 @@ PyObject_GC_Del(void *op) #endif } GCState *gcstate = get_gc_state(); - if (gcstate->young.count > 0) { - gcstate->young.count--; + if (gcstate->generations[0].count > 0) { + gcstate->generations[0].count--; } PyObject_Free(((char *)op)-presize); } diff --git a/Python/import.c b/Python/import.c index dfc5ec1f2f2927..2fd0c08a6bb5ae 100644 --- a/Python/import.c +++ b/Python/import.c @@ -1030,7 +1030,7 @@ _extensions_cache_set(PyObject *filename, PyObject *name, PyModuleDef *def) if (!already_set) { /* We assume that all module defs are statically allocated and will never be freed. Otherwise, we would incref here. */ - _Py_SetImmortal((PyObject *)def); + _Py_SetImmortal(def); } res = 0; diff --git a/Python/pystate.c b/Python/pystate.c index 7836c172bbfb61..e77e5bfa7e2df8 100644 --- a/Python/pystate.c +++ b/Python/pystate.c @@ -517,6 +517,8 @@ _PyRuntimeState_ReInitThreads(_PyRuntimeState *runtime) return _PyStatus_NO_MEMORY(); } + _PyThread_AfterFork(&runtime->threads); + return _PyStatus_OK(); } #endif diff --git a/Python/symtable.c b/Python/symtable.c index 743029956e32fa..d69516351efba2 100644 --- a/Python/symtable.c +++ b/Python/symtable.c @@ -758,6 +758,8 @@ inline_comprehension(PySTEntryObject *ste, PySTEntryObject *comp, { PyObject *k, *v; Py_ssize_t pos = 0; + int remove_dunder_class = 0; + while (PyDict_Next(comp->ste_symbols, &pos, &k, &v)) { // skip comprehension parameter long comp_flags = PyLong_AS_LONG(v); @@ -779,6 +781,19 @@ inline_comprehension(PySTEntryObject *ste, PySTEntryObject *comp, if (!existing) { // name does not exist in scope, copy from comprehension assert(scope != FREE || PySet_Contains(comp_free, k) == 1); + if (scope == FREE && ste->ste_type == ClassBlock && + _PyUnicode_EqualToASCIIString(k, "__class__")) { + // if __class__ is unbound in the enclosing class scope and free + // in the comprehension scope, it needs special handling; just + // letting it be marked as free in class scope will break due to + // drop_class_free + scope = GLOBAL_IMPLICIT; + only_flags &= ~DEF_FREE; + if (PySet_Discard(comp_free, k) < 0) { + return 0; + } + remove_dunder_class = 1; + } PyObject *v_flags = PyLong_FromLong(only_flags); if (v_flags == NULL) { return 0; @@ -803,6 +818,10 @@ inline_comprehension(PySTEntryObject *ste, PySTEntryObject *comp, } } } + comp->ste_free = PySet_Size(comp_free) > 0; + if (remove_dunder_class && PyDict_DelItemString(comp->ste_symbols, "__class__") < 0) { + return 0; + } return 1; } diff --git a/Python/thread_nt.h b/Python/thread_nt.h index 044e9fa111e979..ad467e0e7840e7 100644 --- a/Python/thread_nt.h +++ b/Python/thread_nt.h @@ -242,10 +242,6 @@ PyThread_detach_thread(PyThread_handle_t handle) { return (CloseHandle(hThread) == 0); } -void -PyThread_update_thread_after_fork(PyThread_ident_t* ident, PyThread_handle_t* handle) { -} - /* * Return the thread Id instead of a handle. The Id is said to uniquely identify the * thread in the system diff --git a/Python/thread_pthread.h b/Python/thread_pthread.h index fb3b79fc160502..556e3de0b071f8 100644 --- a/Python/thread_pthread.h +++ b/Python/thread_pthread.h @@ -339,16 +339,6 @@ PyThread_detach_thread(PyThread_handle_t th) { return pthread_detach((pthread_t) th); } -void -PyThread_update_thread_after_fork(PyThread_ident_t* ident, PyThread_handle_t* handle) { - // The thread id might have been updated in the forked child - pthread_t th = pthread_self(); - *ident = (PyThread_ident_t) th; - *handle = (PyThread_handle_t) th; - assert(th == (pthread_t) *ident); - assert(th == (pthread_t) *handle); -} - /* XXX This implementation is considered (to quote Tim Peters) "inherently hosed" because: - It does not guarantee the promise that a non-zero integer is returned. diff --git a/Tools/build/generate_sbom.py b/Tools/build/generate_sbom.py index aceb13f141cba4..442487f2d2546b 100644 --- a/Tools/build/generate_sbom.py +++ b/Tools/build/generate_sbom.py @@ -338,7 +338,7 @@ def discover_pip_sbom_package(sbom_data: dict[str, typing.Any]) -> None: "name": "pip", "versionInfo": pip_version, "originator": "Organization: Python Packaging Authority", - "licenseConcluded": "MIT", + "licenseConcluded": "NOASSERTION", "downloadLocation": pip_download_url, "checksums": [ {"algorithm": "SHA256", "checksumValue": pip_checksum_sha256} @@ -383,9 +383,11 @@ def main() -> None: discover_pip_sbom_package(sbom_data) # Ensure all packages in this tool are represented also in the SBOM file. + actual_names = {package["name"] for package in sbom_data["packages"]} + expected_names = set(PACKAGE_TO_FILES) error_if( - {package["name"] for package in sbom_data["packages"]} != set(PACKAGE_TO_FILES), - "Packages defined in SBOM tool don't match those defined in SBOM file.", + actual_names != expected_names, + f"Packages defined in SBOM tool don't match those defined in SBOM file: {actual_names}, {expected_names}", ) # Make a bunch of assertions about the SBOM data to ensure it's consistent. @@ -422,8 +424,8 @@ def main() -> None: # License must be on the approved list for SPDX. license_concluded = package["licenseConcluded"] error_if( - license_concluded not in ALLOWED_LICENSE_EXPRESSIONS, - f"License identifier '{license_concluded}' not in SBOM tool allowlist" + license_concluded != "NOASSERTION", + f"License identifier must be 'NOASSERTION'" ) # We call 'sorted()' here a lot to avoid filesystem scan order issues. diff --git a/Tools/gdb/libpython.py b/Tools/gdb/libpython.py index 96b891481d9f46..483f28b46dfec7 100755 --- a/Tools/gdb/libpython.py +++ b/Tools/gdb/libpython.py @@ -1753,11 +1753,8 @@ def is_waiting_for_gil(self): return (name == 'take_gil') def is_gc_collect(self): - '''Is this frame a collector within the garbage-collector?''' - return self._gdbframe.name() in ( - 'collect', 'gc_collect_full', 'gc_collect_main', - 'gc_collect_young', 'gc_collect_increment' - ) + '''Is this frame gc_collect_main() within the garbage-collector?''' + return self._gdbframe.name() in ('collect', 'gc_collect_main') def get_pyop(self): try: diff --git a/Tools/scripts/summarize_stats.py b/Tools/scripts/summarize_stats.py index 6015251d678e7b..8534f7fe5d53a4 100644 --- a/Tools/scripts/summarize_stats.py +++ b/Tools/scripts/summarize_stats.py @@ -311,10 +311,10 @@ def get_opcode_stats(self, prefix: str) -> OpcodeStats: self._data["_defines"], self._data["_specialized_instructions"], ) - + def get_uop_sequence_stats(self, length) -> dict[str, int]: return {k: v for k, v in self._data.items() if k.startswith("UOp sequence count") and k.count(',') == length - 1} - + def get_max_uop_sequence_length(self) -> int: uop_sequence_lengths = [k.count(',')+1 for k in self._data.keys() if k.startswith("UOp sequence count")] return max(uop_sequence_lengths) if uop_sequence_lengths else 0 @@ -694,7 +694,7 @@ def calc_uop_sequence(base_stats: Stats ,sequence_length: int) -> Rows: ) ) return rows - + for l in range(2, longest+1): yield Section( "Pair counts", diff --git a/Tools/ssl/multissltests.py b/Tools/ssl/multissltests.py index 120e3883adc795..baa16102068aa0 100755 --- a/Tools/ssl/multissltests.py +++ b/Tools/ssl/multissltests.py @@ -47,8 +47,9 @@ OPENSSL_RECENT_VERSIONS = [ "1.1.1w", - "3.0.11", - "3.1.3", + "3.0.13", + "3.1.5", + "3.2.1", ] LIBRESSL_OLD_VERSIONS = [ From 3a45127a580a5b659bb29e10cebef85bff371f8b Mon Sep 17 00:00:00 2001 From: Jeff Glass Date: Thu, 8 Feb 2024 11:39:18 -0600 Subject: [PATCH 13/31] Catch up with main again --- Doc/whatsnew/3.13.rst | 11 + Lib/pickletools.py | 2 +- Lib/test/test_call.py | 32 +- Lib/test/test_capi/test_getargs.py | 26 +- Lib/test/test_exceptions.py | 2 +- ...-01-31-09-10-10.gh-issue-107944.XWm1B-.rst | 1 + ...-02-07-00-18-42.gh-issue-112069.jRDRR5.rst | 1 + Modules/_xxtestfuzz/fuzzer.c | 11 +- Modules/getpath.c | 4 + Objects/clinic/setobject.c.h | 414 +++++++++++++++++ Objects/setobject.c | 416 +++++++++++------- Python/getargs.c | 70 ++- 12 files changed, 801 insertions(+), 189 deletions(-) create mode 100644 Misc/NEWS.d/next/Core and Builtins/2024-01-31-09-10-10.gh-issue-107944.XWm1B-.rst create mode 100644 Misc/NEWS.d/next/Core and Builtins/2024-02-07-00-18-42.gh-issue-112069.jRDRR5.rst create mode 100644 Objects/clinic/setobject.c.h diff --git a/Doc/whatsnew/3.13.rst b/Doc/whatsnew/3.13.rst index 2ac5afa8ce601c..50a2a69c75ac70 100644 --- a/Doc/whatsnew/3.13.rst +++ b/Doc/whatsnew/3.13.rst @@ -101,6 +101,17 @@ Improved Error Messages variables. See also :ref:`using-on-controlling-color`. (Contributed by Pablo Galindo Salgado in :gh:`112730`.) +* When an incorrect keyword argument is passed to a function, the error message + now potentially suggests the correct keyword argument. + (Contributed by Pablo Galindo Salgado and Shantanu Jain in :gh:`107944`.) + + >>> "better error messages!".split(max_split=1) + Traceback (most recent call last): + File "", line 1, in + "better error messages!".split(max_split=1) + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~^^^^^^^^^^^^^ + TypeError: split() got an unexpected keyword argument 'max_split'. Did you mean 'maxsplit'? + Other Language Changes ====================== diff --git a/Lib/pickletools.py b/Lib/pickletools.py index 95a77aeb2afe2a..51ee4a7a2632ac 100644 --- a/Lib/pickletools.py +++ b/Lib/pickletools.py @@ -1253,7 +1253,7 @@ def __init__(self, name, code, arg, stack_before=[], stack_after=[pyint], proto=2, - doc="""Long integer using found-byte length. + doc="""Long integer using four-byte length. A more efficient encoding of a Python long; the long4 encoding says it all."""), diff --git a/Lib/test/test_call.py b/Lib/test/test_call.py index 3c8fc35e3c116d..2a6a5d287b04ee 100644 --- a/Lib/test/test_call.py +++ b/Lib/test/test_call.py @@ -155,7 +155,7 @@ def test_varargs16_kw(self): min, 0, default=1, key=2, foo=3) def test_varargs17_kw(self): - msg = r"'foo' is an invalid keyword argument for print\(\)$" + msg = r"print\(\) got an unexpected keyword argument 'foo'$" self.assertRaisesRegex(TypeError, msg, print, 0, sep=1, end=2, file=3, flush=4, foo=5) @@ -928,7 +928,7 @@ def check_suggestion_includes(self, message): self.assertIn(f"Did you mean '{message}'?", str(cm.exception)) @contextlib.contextmanager - def check_suggestion_not_pressent(self): + def check_suggestion_not_present(self): with self.assertRaises(TypeError) as cm: yield self.assertNotIn("Did you mean", str(cm.exception)) @@ -946,7 +946,7 @@ def foo(blech=None, /, aaa=None, *args, late1=None): for keyword, suggestion in cases: with self.subTest(keyword): - ctx = self.check_suggestion_includes(suggestion) if suggestion else self.check_suggestion_not_pressent() + ctx = self.check_suggestion_includes(suggestion) if suggestion else self.check_suggestion_not_present() with ctx: foo(**{keyword:None}) @@ -987,6 +987,32 @@ def case_change_over_substitution(BLuch=None, Luch = None, fluch = None): with self.check_suggestion_includes(suggestion): func(bluch=None) + def test_unexpected_keyword_suggestion_via_getargs(self): + with self.check_suggestion_includes("maxsplit"): + "foo".split(maxsplt=1) + + self.assertRaisesRegex( + TypeError, r"split\(\) got an unexpected keyword argument 'blech'$", + "foo".split, blech=1 + ) + with self.check_suggestion_not_present(): + "foo".split(blech=1) + with self.check_suggestion_not_present(): + "foo".split(more_noise=1, maxsplt=1) + + # Also test the vgetargskeywords path + with self.check_suggestion_includes("name"): + ImportError(namez="oops") + + self.assertRaisesRegex( + TypeError, r"ImportError\(\) got an unexpected keyword argument 'blech'$", + ImportError, blech=1 + ) + with self.check_suggestion_not_present(): + ImportError(blech=1) + with self.check_suggestion_not_present(): + ImportError(blech=1, namez="oops") + @cpython_only class TestRecursion(unittest.TestCase): diff --git a/Lib/test/test_capi/test_getargs.py b/Lib/test/test_capi/test_getargs.py index 9b6aef27625ad0..12039803ba543e 100644 --- a/Lib/test/test_capi/test_getargs.py +++ b/Lib/test/test_capi/test_getargs.py @@ -667,7 +667,7 @@ def test_invalid_keyword(self): try: getargs_keywords((1,2),3,arg5=10,arg666=666) except TypeError as err: - self.assertEqual(str(err), "'arg666' is an invalid keyword argument for this function") + self.assertEqual(str(err), "this function got an unexpected keyword argument 'arg666'") else: self.fail('TypeError should have been raised') @@ -675,7 +675,7 @@ def test_surrogate_keyword(self): try: getargs_keywords((1,2), 3, (4,(5,6)), (7,8,9), **{'\uDC80': 10}) except TypeError as err: - self.assertEqual(str(err), "'\udc80' is an invalid keyword argument for this function") + self.assertEqual(str(err), "this function got an unexpected keyword argument '\udc80'") else: self.fail('TypeError should have been raised') @@ -742,12 +742,12 @@ def test_too_many_args(self): def test_invalid_keyword(self): # extraneous keyword arg with self.assertRaisesRegex(TypeError, - "'monster' is an invalid keyword argument for this function"): + "this function got an unexpected keyword argument 'monster'"): getargs_keyword_only(1, 2, monster=666) def test_surrogate_keyword(self): with self.assertRaisesRegex(TypeError, - "'\udc80' is an invalid keyword argument for this function"): + "this function got an unexpected keyword argument '\udc80'"): getargs_keyword_only(1, 2, **{'\uDC80': 10}) def test_weird_str_subclass(self): @@ -761,7 +761,7 @@ def __hash__(self): "invalid keyword argument for this function"): getargs_keyword_only(1, 2, **{BadStr("keyword_only"): 3}) with self.assertRaisesRegex(TypeError, - "invalid keyword argument for this function"): + "this function got an unexpected keyword argument"): getargs_keyword_only(1, 2, **{BadStr("monster"): 666}) def test_weird_str_subclass2(self): @@ -774,7 +774,7 @@ def __hash__(self): "invalid keyword argument for this function"): getargs_keyword_only(1, 2, **{BadStr("keyword_only"): 3}) with self.assertRaisesRegex(TypeError, - "invalid keyword argument for this function"): + "this function got an unexpected keyword argument"): getargs_keyword_only(1, 2, **{BadStr("monster"): 666}) @@ -807,7 +807,7 @@ def test_required_args(self): def test_empty_keyword(self): with self.assertRaisesRegex(TypeError, - "'' is an invalid keyword argument for this function"): + "this function got an unexpected keyword argument ''"): self.getargs(1, 2, **{'': 666}) @@ -1204,7 +1204,7 @@ def test_basic(self): "function missing required argument 'a'"): parse((), {}, 'O', ['a']) with self.assertRaisesRegex(TypeError, - "'b' is an invalid keyword argument"): + "this function got an unexpected keyword argument 'b'"): parse((), {'b': 1}, '|O', ['a']) with self.assertRaisesRegex(TypeError, fr"argument for function given by name \('a'\) " @@ -1278,10 +1278,10 @@ def test_nonascii_keywords(self): fr"and position \(1\)"): parse((1,), {name: 2}, 'O|O', [name, 'b']) with self.assertRaisesRegex(TypeError, - f"'{name}' is an invalid keyword argument"): + f"this function got an unexpected keyword argument '{name}'"): parse((), {name: 1}, '|O', ['b']) with self.assertRaisesRegex(TypeError, - "'b' is an invalid keyword argument"): + "this function got an unexpected keyword argument 'b'"): parse((), {'b': 1}, '|O', [name]) invalid = name.encode() + (name.encode()[:-1] or b'\x80') @@ -1301,17 +1301,17 @@ def test_nonascii_keywords(self): for name2 in ('b', 'ë', 'ĉ', 'Ɐ', '𐀁'): with self.subTest(name2=name2): with self.assertRaisesRegex(TypeError, - f"'{name2}' is an invalid keyword argument"): + f"this function got an unexpected keyword argument '{name2}'"): parse((), {name2: 1}, '|O', [name]) name2 = name.encode().decode('latin1') if name2 != name: with self.assertRaisesRegex(TypeError, - f"'{name2}' is an invalid keyword argument"): + f"this function got an unexpected keyword argument '{name2}'"): parse((), {name2: 1}, '|O', [name]) name3 = name + '3' with self.assertRaisesRegex(TypeError, - f"'{name2}' is an invalid keyword argument"): + f"this function got an unexpected keyword argument '{name2}'"): parse((), {name2: 1, name3: 2}, '|OO', [name, name3]) def test_nested_tuple(self): diff --git a/Lib/test/test_exceptions.py b/Lib/test/test_exceptions.py index c57488e44aecc6..c7e76414ff0715 100644 --- a/Lib/test/test_exceptions.py +++ b/Lib/test/test_exceptions.py @@ -1917,7 +1917,7 @@ def test_attributes(self): self.assertEqual(exc.name, 'somename') self.assertEqual(exc.path, 'somepath') - msg = "'invalid' is an invalid keyword argument for ImportError" + msg = r"ImportError\(\) got an unexpected keyword argument 'invalid'" with self.assertRaisesRegex(TypeError, msg): ImportError('test', invalid='keyword') diff --git a/Misc/NEWS.d/next/Core and Builtins/2024-01-31-09-10-10.gh-issue-107944.XWm1B-.rst b/Misc/NEWS.d/next/Core and Builtins/2024-01-31-09-10-10.gh-issue-107944.XWm1B-.rst new file mode 100644 index 00000000000000..8e3fb786c11055 --- /dev/null +++ b/Misc/NEWS.d/next/Core and Builtins/2024-01-31-09-10-10.gh-issue-107944.XWm1B-.rst @@ -0,0 +1 @@ +Improve error message for function calls with bad keyword arguments via getargs diff --git a/Misc/NEWS.d/next/Core and Builtins/2024-02-07-00-18-42.gh-issue-112069.jRDRR5.rst b/Misc/NEWS.d/next/Core and Builtins/2024-02-07-00-18-42.gh-issue-112069.jRDRR5.rst new file mode 100644 index 00000000000000..51ba6bd1ddaac3 --- /dev/null +++ b/Misc/NEWS.d/next/Core and Builtins/2024-02-07-00-18-42.gh-issue-112069.jRDRR5.rst @@ -0,0 +1 @@ +Adapt :class:`set` and :class:`frozenset` methods to Argument Clinic. diff --git a/Modules/_xxtestfuzz/fuzzer.c b/Modules/_xxtestfuzz/fuzzer.c index e133b4d3c44480..6ea9f64d628530 100644 --- a/Modules/_xxtestfuzz/fuzzer.c +++ b/Modules/_xxtestfuzz/fuzzer.c @@ -502,7 +502,6 @@ static int fuzz_elementtree_parsewhole(const char* data, size_t size) { } #define MAX_PYCOMPILE_TEST_SIZE 16384 -static char pycompile_scratch[MAX_PYCOMPILE_TEST_SIZE]; static const int start_vals[] = {Py_eval_input, Py_single_input, Py_file_input}; const size_t NUM_START_VALS = sizeof(start_vals) / sizeof(start_vals[0]); @@ -531,6 +530,8 @@ static int fuzz_pycompile(const char* data, size_t size) { unsigned char optimize_idx = (unsigned char) data[1]; int optimize = optimize_vals[optimize_idx % NUM_OPTIMIZE_VALS]; + char pycompile_scratch[MAX_PYCOMPILE_TEST_SIZE]; + // Create a NUL-terminated C string from the remaining input memcpy(pycompile_scratch, data + 2, size - 2); // Put a NUL terminator just after the copied data. (Space was reserved already.) @@ -549,7 +550,13 @@ static int fuzz_pycompile(const char* data, size_t size) { PyObject *result = Py_CompileStringExFlags(pycompile_scratch, "", start, flags, optimize); if (result == NULL) { - /* compilation failed, most likely from a syntax error */ + /* Compilation failed, most likely from a syntax error. If it was a + SystemError we abort. There's no non-bug reason to raise a + SystemError. */ + if (PyErr_Occurred() && PyErr_ExceptionMatches(PyExc_SystemError)) { + PyErr_Print(); + abort(); + } PyErr_Clear(); } else { Py_DECREF(result); diff --git a/Modules/getpath.c b/Modules/getpath.c index a3c8fc269d1c3c..abed139028244a 100644 --- a/Modules/getpath.c +++ b/Modules/getpath.c @@ -262,6 +262,10 @@ getpath_joinpath(PyObject *Py_UNUSED(self), PyObject *args) } /* Convert all parts to wchar and accumulate max final length */ wchar_t **parts = (wchar_t **)PyMem_Malloc(n * sizeof(wchar_t *)); + if (parts == NULL) { + PyErr_NoMemory(); + return NULL; + } memset(parts, 0, n * sizeof(wchar_t *)); Py_ssize_t cchFinal = 0; Py_ssize_t first = 0; diff --git a/Objects/clinic/setobject.c.h b/Objects/clinic/setobject.c.h new file mode 100644 index 00000000000000..f3c96995ede60d --- /dev/null +++ b/Objects/clinic/setobject.c.h @@ -0,0 +1,414 @@ +/*[clinic input] +preserve +[clinic start generated code]*/ + +#include "pycore_modsupport.h" // _PyArg_CheckPositional() + +PyDoc_STRVAR(set_pop__doc__, +"pop($self, /)\n" +"--\n" +"\n" +"Remove and return an arbitrary set element.\n" +"\n" +"Raises KeyError if the set is empty."); + +#define SET_POP_METHODDEF \ + {"pop", (PyCFunction)set_pop, METH_NOARGS, set_pop__doc__}, + +static PyObject * +set_pop_impl(PySetObject *so); + +static PyObject * +set_pop(PySetObject *so, PyObject *Py_UNUSED(ignored)) +{ + return set_pop_impl(so); +} + +PyDoc_STRVAR(set_update__doc__, +"update($self, /, *others)\n" +"--\n" +"\n" +"Update the set, adding elements from all others."); + +#define SET_UPDATE_METHODDEF \ + {"update", _PyCFunction_CAST(set_update), METH_FASTCALL, set_update__doc__}, + +static PyObject * +set_update_impl(PySetObject *so, PyObject *args); + +static PyObject * +set_update(PySetObject *so, PyObject *const *args, Py_ssize_t nargs) +{ + PyObject *return_value = NULL; + PyObject *__clinic_args = NULL; + + if (!_PyArg_CheckPositional("update", nargs, 0, PY_SSIZE_T_MAX)) { + goto exit; + } + __clinic_args = PyTuple_New(nargs - 0); + if (!__clinic_args) { + goto exit; + } + for (Py_ssize_t i = 0; i < nargs - 0; ++i) { + PyTuple_SET_ITEM(__clinic_args, i, Py_NewRef(args[0 + i])); + } + return_value = set_update_impl(so, __clinic_args); + +exit: + Py_XDECREF(__clinic_args); + return return_value; +} + +PyDoc_STRVAR(set_copy__doc__, +"copy($self, /)\n" +"--\n" +"\n" +"Return a shallow copy of a set."); + +#define SET_COPY_METHODDEF \ + {"copy", (PyCFunction)set_copy, METH_NOARGS, set_copy__doc__}, + +static PyObject * +set_copy_impl(PySetObject *so); + +static PyObject * +set_copy(PySetObject *so, PyObject *Py_UNUSED(ignored)) +{ + return set_copy_impl(so); +} + +PyDoc_STRVAR(frozenset_copy__doc__, +"copy($self, /)\n" +"--\n" +"\n" +"Return a shallow copy of a set."); + +#define FROZENSET_COPY_METHODDEF \ + {"copy", (PyCFunction)frozenset_copy, METH_NOARGS, frozenset_copy__doc__}, + +static PyObject * +frozenset_copy_impl(PySetObject *so); + +static PyObject * +frozenset_copy(PySetObject *so, PyObject *Py_UNUSED(ignored)) +{ + return frozenset_copy_impl(so); +} + +PyDoc_STRVAR(set_clear__doc__, +"clear($self, /)\n" +"--\n" +"\n" +"Remove all elements from this set."); + +#define SET_CLEAR_METHODDEF \ + {"clear", (PyCFunction)set_clear, METH_NOARGS, set_clear__doc__}, + +static PyObject * +set_clear_impl(PySetObject *so); + +static PyObject * +set_clear(PySetObject *so, PyObject *Py_UNUSED(ignored)) +{ + return set_clear_impl(so); +} + +PyDoc_STRVAR(set_union__doc__, +"union($self, /, *others)\n" +"--\n" +"\n" +"Return a new set with elements from the set and all others."); + +#define SET_UNION_METHODDEF \ + {"union", _PyCFunction_CAST(set_union), METH_FASTCALL, set_union__doc__}, + +static PyObject * +set_union_impl(PySetObject *so, PyObject *args); + +static PyObject * +set_union(PySetObject *so, PyObject *const *args, Py_ssize_t nargs) +{ + PyObject *return_value = NULL; + PyObject *__clinic_args = NULL; + + if (!_PyArg_CheckPositional("union", nargs, 0, PY_SSIZE_T_MAX)) { + goto exit; + } + __clinic_args = PyTuple_New(nargs - 0); + if (!__clinic_args) { + goto exit; + } + for (Py_ssize_t i = 0; i < nargs - 0; ++i) { + PyTuple_SET_ITEM(__clinic_args, i, Py_NewRef(args[0 + i])); + } + return_value = set_union_impl(so, __clinic_args); + +exit: + Py_XDECREF(__clinic_args); + return return_value; +} + +PyDoc_STRVAR(set_intersection_multi__doc__, +"intersection($self, /, *others)\n" +"--\n" +"\n" +"Return a new set with elements common to the set and all others."); + +#define SET_INTERSECTION_MULTI_METHODDEF \ + {"intersection", _PyCFunction_CAST(set_intersection_multi), METH_FASTCALL, set_intersection_multi__doc__}, + +static PyObject * +set_intersection_multi_impl(PySetObject *so, PyObject *args); + +static PyObject * +set_intersection_multi(PySetObject *so, PyObject *const *args, Py_ssize_t nargs) +{ + PyObject *return_value = NULL; + PyObject *__clinic_args = NULL; + + if (!_PyArg_CheckPositional("intersection", nargs, 0, PY_SSIZE_T_MAX)) { + goto exit; + } + __clinic_args = PyTuple_New(nargs - 0); + if (!__clinic_args) { + goto exit; + } + for (Py_ssize_t i = 0; i < nargs - 0; ++i) { + PyTuple_SET_ITEM(__clinic_args, i, Py_NewRef(args[0 + i])); + } + return_value = set_intersection_multi_impl(so, __clinic_args); + +exit: + Py_XDECREF(__clinic_args); + return return_value; +} + +PyDoc_STRVAR(set_intersection_update_multi__doc__, +"intersection_update($self, /, *others)\n" +"--\n" +"\n" +"Update the set, keeping only elements found in it and all others."); + +#define SET_INTERSECTION_UPDATE_MULTI_METHODDEF \ + {"intersection_update", _PyCFunction_CAST(set_intersection_update_multi), METH_FASTCALL, set_intersection_update_multi__doc__}, + +static PyObject * +set_intersection_update_multi_impl(PySetObject *so, PyObject *args); + +static PyObject * +set_intersection_update_multi(PySetObject *so, PyObject *const *args, Py_ssize_t nargs) +{ + PyObject *return_value = NULL; + PyObject *__clinic_args = NULL; + + if (!_PyArg_CheckPositional("intersection_update", nargs, 0, PY_SSIZE_T_MAX)) { + goto exit; + } + __clinic_args = PyTuple_New(nargs - 0); + if (!__clinic_args) { + goto exit; + } + for (Py_ssize_t i = 0; i < nargs - 0; ++i) { + PyTuple_SET_ITEM(__clinic_args, i, Py_NewRef(args[0 + i])); + } + return_value = set_intersection_update_multi_impl(so, __clinic_args); + +exit: + Py_XDECREF(__clinic_args); + return return_value; +} + +PyDoc_STRVAR(set_isdisjoint__doc__, +"isdisjoint($self, other, /)\n" +"--\n" +"\n" +"Return True if two sets have a null intersection."); + +#define SET_ISDISJOINT_METHODDEF \ + {"isdisjoint", (PyCFunction)set_isdisjoint, METH_O, set_isdisjoint__doc__}, + +PyDoc_STRVAR(set_difference_update__doc__, +"difference_update($self, /, *others)\n" +"--\n" +"\n" +"Update the set, removing elements found in others."); + +#define SET_DIFFERENCE_UPDATE_METHODDEF \ + {"difference_update", _PyCFunction_CAST(set_difference_update), METH_FASTCALL, set_difference_update__doc__}, + +static PyObject * +set_difference_update_impl(PySetObject *so, PyObject *args); + +static PyObject * +set_difference_update(PySetObject *so, PyObject *const *args, Py_ssize_t nargs) +{ + PyObject *return_value = NULL; + PyObject *__clinic_args = NULL; + + if (!_PyArg_CheckPositional("difference_update", nargs, 0, PY_SSIZE_T_MAX)) { + goto exit; + } + __clinic_args = PyTuple_New(nargs - 0); + if (!__clinic_args) { + goto exit; + } + for (Py_ssize_t i = 0; i < nargs - 0; ++i) { + PyTuple_SET_ITEM(__clinic_args, i, Py_NewRef(args[0 + i])); + } + return_value = set_difference_update_impl(so, __clinic_args); + +exit: + Py_XDECREF(__clinic_args); + return return_value; +} + +PyDoc_STRVAR(set_difference_multi__doc__, +"difference($self, /, *others)\n" +"--\n" +"\n" +"Return a new set with elements in the set that are not in the others."); + +#define SET_DIFFERENCE_MULTI_METHODDEF \ + {"difference", _PyCFunction_CAST(set_difference_multi), METH_FASTCALL, set_difference_multi__doc__}, + +static PyObject * +set_difference_multi_impl(PySetObject *so, PyObject *args); + +static PyObject * +set_difference_multi(PySetObject *so, PyObject *const *args, Py_ssize_t nargs) +{ + PyObject *return_value = NULL; + PyObject *__clinic_args = NULL; + + if (!_PyArg_CheckPositional("difference", nargs, 0, PY_SSIZE_T_MAX)) { + goto exit; + } + __clinic_args = PyTuple_New(nargs - 0); + if (!__clinic_args) { + goto exit; + } + for (Py_ssize_t i = 0; i < nargs - 0; ++i) { + PyTuple_SET_ITEM(__clinic_args, i, Py_NewRef(args[0 + i])); + } + return_value = set_difference_multi_impl(so, __clinic_args); + +exit: + Py_XDECREF(__clinic_args); + return return_value; +} + +PyDoc_STRVAR(set_symmetric_difference_update__doc__, +"symmetric_difference_update($self, other, /)\n" +"--\n" +"\n" +"Update the set, keeping only elements found in either set, but not in both."); + +#define SET_SYMMETRIC_DIFFERENCE_UPDATE_METHODDEF \ + {"symmetric_difference_update", (PyCFunction)set_symmetric_difference_update, METH_O, set_symmetric_difference_update__doc__}, + +PyDoc_STRVAR(set_symmetric_difference__doc__, +"symmetric_difference($self, other, /)\n" +"--\n" +"\n" +"Return a new set with elements in either the set or other but not both."); + +#define SET_SYMMETRIC_DIFFERENCE_METHODDEF \ + {"symmetric_difference", (PyCFunction)set_symmetric_difference, METH_O, set_symmetric_difference__doc__}, + +PyDoc_STRVAR(set_issubset__doc__, +"issubset($self, other, /)\n" +"--\n" +"\n" +"Report whether another set contains this set."); + +#define SET_ISSUBSET_METHODDEF \ + {"issubset", (PyCFunction)set_issubset, METH_O, set_issubset__doc__}, + +PyDoc_STRVAR(set_issuperset__doc__, +"issuperset($self, other, /)\n" +"--\n" +"\n" +"Report whether this set contains another set."); + +#define SET_ISSUPERSET_METHODDEF \ + {"issuperset", (PyCFunction)set_issuperset, METH_O, set_issuperset__doc__}, + +PyDoc_STRVAR(set_add__doc__, +"add($self, object, /)\n" +"--\n" +"\n" +"Add an element to a set.\n" +"\n" +"This has no effect if the element is already present."); + +#define SET_ADD_METHODDEF \ + {"add", (PyCFunction)set_add, METH_O, set_add__doc__}, + +PyDoc_STRVAR(set___contains____doc__, +"__contains__($self, object, /)\n" +"--\n" +"\n" +"x.__contains__(y) <==> y in x."); + +#define SET___CONTAINS___METHODDEF \ + {"__contains__", (PyCFunction)set___contains__, METH_O|METH_COEXIST, set___contains____doc__}, + +PyDoc_STRVAR(set_remove__doc__, +"remove($self, object, /)\n" +"--\n" +"\n" +"Remove an element from a set; it must be a member.\n" +"\n" +"If the element is not a member, raise a KeyError."); + +#define SET_REMOVE_METHODDEF \ + {"remove", (PyCFunction)set_remove, METH_O, set_remove__doc__}, + +PyDoc_STRVAR(set_discard__doc__, +"discard($self, object, /)\n" +"--\n" +"\n" +"Remove an element from a set if it is a member.\n" +"\n" +"Unlike set.remove(), the discard() method does not raise\n" +"an exception when an element is missing from the set."); + +#define SET_DISCARD_METHODDEF \ + {"discard", (PyCFunction)set_discard, METH_O, set_discard__doc__}, + +PyDoc_STRVAR(set___reduce____doc__, +"__reduce__($self, /)\n" +"--\n" +"\n" +"Return state information for pickling."); + +#define SET___REDUCE___METHODDEF \ + {"__reduce__", (PyCFunction)set___reduce__, METH_NOARGS, set___reduce____doc__}, + +static PyObject * +set___reduce___impl(PySetObject *so); + +static PyObject * +set___reduce__(PySetObject *so, PyObject *Py_UNUSED(ignored)) +{ + return set___reduce___impl(so); +} + +PyDoc_STRVAR(set___sizeof____doc__, +"__sizeof__($self, /)\n" +"--\n" +"\n" +"S.__sizeof__() -> size of S in memory, in bytes."); + +#define SET___SIZEOF___METHODDEF \ + {"__sizeof__", (PyCFunction)set___sizeof__, METH_NOARGS, set___sizeof____doc__}, + +static PyObject * +set___sizeof___impl(PySetObject *so); + +static PyObject * +set___sizeof__(PySetObject *so, PyObject *Py_UNUSED(ignored)) +{ + return set___sizeof___impl(so); +} +/*[clinic end generated code: output=34a30591148da884 input=a9049054013a1b77]*/ diff --git a/Objects/setobject.c b/Objects/setobject.c index 3acf2a7a74890b..6a4c8c45f0836d 100644 --- a/Objects/setobject.c +++ b/Objects/setobject.c @@ -40,6 +40,19 @@ #include "pycore_pyerrors.h" // _PyErr_SetKeyError() #include "pycore_setobject.h" // _PySet_NextEntry() definition #include // offsetof() +#include "clinic/setobject.c.h" + +/*[clinic input] +class set "PySetObject *" "&PySet_Type" +class frozenset "PySetObject *" "&PyFrozenSet_Type" +[clinic start generated code]*/ +/*[clinic end generated code: output=da39a3ee5e6b4b0d input=97ad1d3e9f117079]*/ + +/*[python input] +class setobject_converter(self_converter): + type = "PySetObject *" +[python start generated code]*/ +/*[python end generated code: output=da39a3ee5e6b4b0d input=33a44506d4d57793]*/ /* Object used as dummy key to fill deleted entries */ static PyObject _dummy_struct; @@ -631,8 +644,18 @@ set_merge(PySetObject *so, PyObject *otherset) return 0; } +/*[clinic input] +set.pop + so: setobject + +Remove and return an arbitrary set element. + +Raises KeyError if the set is empty. +[clinic start generated code]*/ + static PyObject * -set_pop(PySetObject *so, PyObject *Py_UNUSED(ignored)) +set_pop_impl(PySetObject *so) +/*[clinic end generated code: output=4d65180f1271871b input=4a3f5552e660a260]*/ { /* Make sure the search finger is in bounds */ setentry *entry = so->table + (so->finger & so->mask); @@ -656,9 +679,6 @@ set_pop(PySetObject *so, PyObject *Py_UNUSED(ignored)) return key; } -PyDoc_STRVAR(pop_doc, "Remove and return an arbitrary set element.\n\ -Raises KeyError if the set is empty."); - static int set_traverse(PySetObject *so, visitproc visit, void *arg) { @@ -935,8 +955,18 @@ set_update_internal(PySetObject *so, PyObject *other) return 0; } +/*[clinic input] +set.update + so: setobject + *others as args: object + / + +Update the set, adding elements from all others. +[clinic start generated code]*/ + static PyObject * -set_update(PySetObject *so, PyObject *args) +set_update_impl(PySetObject *so, PyObject *args) +/*[clinic end generated code: output=34f6371704974c8a input=eb47c4fbaeb3286e]*/ { Py_ssize_t i; @@ -948,12 +978,6 @@ set_update(PySetObject *so, PyObject *args) Py_RETURN_NONE; } -PyDoc_STRVAR(update_doc, -"update($self, /, *others)\n\ ---\n\ -\n\ -Update the set, adding elements from all others."); - /* XXX Todo: If aligned memory allocations become available, make the set object 64 byte aligned so that most of the fields @@ -1101,14 +1125,30 @@ set_swap_bodies(PySetObject *a, PySetObject *b) } } +/*[clinic input] +set.copy + so: setobject + +Return a shallow copy of a set. +[clinic start generated code]*/ + static PyObject * -set_copy(PySetObject *so, PyObject *Py_UNUSED(ignored)) +set_copy_impl(PySetObject *so) +/*[clinic end generated code: output=c9223a1e1cc6b041 input=2b80b288d47b8cf1]*/ { return make_new_set_basetype(Py_TYPE(so), (PyObject *)so); } +/*[clinic input] +frozenset.copy + so: setobject + +Return a shallow copy of a set. +[clinic start generated code]*/ + static PyObject * -frozenset_copy(PySetObject *so, PyObject *Py_UNUSED(ignored)) +frozenset_copy_impl(PySetObject *so) +/*[clinic end generated code: output=b356263526af9e70 input=3dc65577d344eff7]*/ { if (PyFrozenSet_CheckExact(so)) { return Py_NewRef(so); @@ -1116,19 +1156,33 @@ frozenset_copy(PySetObject *so, PyObject *Py_UNUSED(ignored)) return set_copy(so, NULL); } -PyDoc_STRVAR(copy_doc, "Return a shallow copy of a set."); +/*[clinic input] +set.clear + so: setobject + +Remove all elements from this set. +[clinic start generated code]*/ static PyObject * -set_clear(PySetObject *so, PyObject *Py_UNUSED(ignored)) +set_clear_impl(PySetObject *so) +/*[clinic end generated code: output=4e71d5a83904161a input=74ac19794da81a39]*/ { set_clear_internal(so); Py_RETURN_NONE; } -PyDoc_STRVAR(clear_doc, "Remove all elements from this set."); +/*[clinic input] +set.union + so: setobject + *others as args: object + / + +Return a new set with elements from the set and all others. +[clinic start generated code]*/ static PyObject * -set_union(PySetObject *so, PyObject *args) +set_union_impl(PySetObject *so, PyObject *args) +/*[clinic end generated code: output=2c83d05a446a1477 input=2e2024fa1e40ac84]*/ { PySetObject *result; PyObject *other; @@ -1150,12 +1204,6 @@ set_union(PySetObject *so, PyObject *args) return (PyObject *)result; } -PyDoc_STRVAR(union_doc, -"union($self, /, *others)\n\ ---\n\ -\n\ -Return a new set with elements from the set and all others."); - static PyObject * set_or(PySetObject *so, PyObject *other) { @@ -1270,8 +1318,18 @@ set_intersection(PySetObject *so, PyObject *other) return NULL; } +/*[clinic input] +set.intersection as set_intersection_multi + so: setobject + *others as args: object + / + +Return a new set with elements common to the set and all others. +[clinic start generated code]*/ + static PyObject * -set_intersection_multi(PySetObject *so, PyObject *args) +set_intersection_multi_impl(PySetObject *so, PyObject *args) +/*[clinic end generated code: output=2406ef3387adbe2f input=04108ea6d7f0532b]*/ { Py_ssize_t i; @@ -1291,12 +1349,6 @@ set_intersection_multi(PySetObject *so, PyObject *args) return result; } -PyDoc_STRVAR(intersection_doc, -"intersection($self, /, *others)\n\ ---\n\ -\n\ -Return a new set with elements common to the set and all others."); - static PyObject * set_intersection_update(PySetObject *so, PyObject *other) { @@ -1310,12 +1362,22 @@ set_intersection_update(PySetObject *so, PyObject *other) Py_RETURN_NONE; } +/*[clinic input] +set.intersection_update as set_intersection_update_multi + so: setobject + *others as args: object + / + +Update the set, keeping only elements found in it and all others. +[clinic start generated code]*/ + static PyObject * -set_intersection_update_multi(PySetObject *so, PyObject *args) +set_intersection_update_multi_impl(PySetObject *so, PyObject *args) +/*[clinic end generated code: output=251c1f729063609d input=ff8f119f97458d16]*/ { PyObject *tmp; - tmp = set_intersection_multi(so, args); + tmp = set_intersection_multi_impl(so, args); if (tmp == NULL) return NULL; set_swap_bodies(so, (PySetObject *)tmp); @@ -1323,12 +1385,6 @@ set_intersection_update_multi(PySetObject *so, PyObject *args) Py_RETURN_NONE; } -PyDoc_STRVAR(intersection_update_doc, -"intersection_update($self, /, *others)\n\ ---\n\ -\n\ -Update the set, keeping only elements found in it and all others."); - static PyObject * set_and(PySetObject *so, PyObject *other) { @@ -1351,8 +1407,18 @@ set_iand(PySetObject *so, PyObject *other) return Py_NewRef(so); } +/*[clinic input] +set.isdisjoint + so: setobject + other: object + / + +Return True if two sets have a null intersection. +[clinic start generated code]*/ + static PyObject * set_isdisjoint(PySetObject *so, PyObject *other) +/*[clinic end generated code: output=a92bbf9a2db6a3da input=c254ddec8a2326e3]*/ { PyObject *key, *it, *tmp; int rv; @@ -1410,9 +1476,6 @@ set_isdisjoint(PySetObject *so, PyObject *other) Py_RETURN_TRUE; } -PyDoc_STRVAR(isdisjoint_doc, -"Return True if two sets have a null intersection."); - static int set_difference_update_internal(PySetObject *so, PyObject *other) { @@ -1471,8 +1534,18 @@ set_difference_update_internal(PySetObject *so, PyObject *other) return set_table_resize(so, so->used>50000 ? so->used*2 : so->used*4); } +/*[clinic input] +set.difference_update + so: setobject + *others as args: object + / + +Update the set, removing elements found in others. +[clinic start generated code]*/ + static PyObject * -set_difference_update(PySetObject *so, PyObject *args) +set_difference_update_impl(PySetObject *so, PyObject *args) +/*[clinic end generated code: output=28685b2fc63e41c4 input=e7abb43c9f2c5a73]*/ { Py_ssize_t i; @@ -1484,12 +1557,6 @@ set_difference_update(PySetObject *so, PyObject *args) Py_RETURN_NONE; } -PyDoc_STRVAR(difference_update_doc, -"difference_update($self, /, *others)\n\ ---\n\ -\n\ -Update the set, removing elements found in others."); - static PyObject * set_copy_and_difference(PySetObject *so, PyObject *other) { @@ -1580,8 +1647,18 @@ set_difference(PySetObject *so, PyObject *other) return result; } +/*[clinic input] +set.difference as set_difference_multi + so: setobject + *others as args: object + / + +Return a new set with elements in the set that are not in the others. +[clinic start generated code]*/ + static PyObject * -set_difference_multi(PySetObject *so, PyObject *args) +set_difference_multi_impl(PySetObject *so, PyObject *args) +/*[clinic end generated code: output=3130c3bb3cac873d input=d8ae9bb6d518ab95]*/ { Py_ssize_t i; PyObject *result, *other; @@ -1604,11 +1681,6 @@ set_difference_multi(PySetObject *so, PyObject *args) return result; } -PyDoc_STRVAR(difference_doc, -"difference($self, /, *others)\n\ ---\n\ -\n\ -Return a new set with elements in the set that are not in the others."); static PyObject * set_sub(PySetObject *so, PyObject *other) { @@ -1654,8 +1726,18 @@ set_symmetric_difference_update_dict(PySetObject *so, PyObject *other) Py_RETURN_NONE; } +/*[clinic input] +set.symmetric_difference_update + so: setobject + other: object + / + +Update the set, keeping only elements found in either set, but not in both. +[clinic start generated code]*/ + static PyObject * set_symmetric_difference_update(PySetObject *so, PyObject *other) +/*[clinic end generated code: output=fbb049c0806028de input=a50acf0365e1f0a5]*/ { PySetObject *otherset; PyObject *key; @@ -1708,14 +1790,18 @@ set_symmetric_difference_update(PySetObject *so, PyObject *other) Py_RETURN_NONE; } -PyDoc_STRVAR(symmetric_difference_update_doc, -"symmetric_difference_update($self, other, /)\n\ ---\n\ -\n\ -Update the set, keeping only elements found in either set, but not in both."); +/*[clinic input] +set.symmetric_difference + so: setobject + other: object + / + +Return a new set with elements in either the set or other but not both. +[clinic start generated code]*/ static PyObject * set_symmetric_difference(PySetObject *so, PyObject *other) +/*[clinic end generated code: output=f95364211b88775a input=f18af370ad72ebac]*/ { PyObject *rv; PySetObject *otherset; @@ -1732,12 +1818,6 @@ set_symmetric_difference(PySetObject *so, PyObject *other) return (PyObject *)otherset; } -PyDoc_STRVAR(symmetric_difference_doc, -"symmetric_difference($self, other, /)\n\ ---\n\ -\n\ -Return a new set with elements in either the set or other but not both."); - static PyObject * set_xor(PySetObject *so, PyObject *other) { @@ -1760,8 +1840,18 @@ set_ixor(PySetObject *so, PyObject *other) return Py_NewRef(so); } +/*[clinic input] +set.issubset + so: setobject + other: object + / + +Report whether another set contains this set. +[clinic start generated code]*/ + static PyObject * set_issubset(PySetObject *so, PyObject *other) +/*[clinic end generated code: output=78aef1f377aedef1 input=37fbc579b609db0c]*/ { setentry *entry; Py_ssize_t pos = 0; @@ -1794,14 +1884,18 @@ set_issubset(PySetObject *so, PyObject *other) Py_RETURN_TRUE; } -PyDoc_STRVAR(issubset_doc, -"issubset($self, other, /)\n\ ---\n\ -\n\ -Test whether every element in the set is in other."); +/*[clinic input] +set.issuperset + so: setobject + other: object + / + +Report whether this set contains another set. +[clinic start generated code]*/ static PyObject * set_issuperset(PySetObject *so, PyObject *other) +/*[clinic end generated code: output=7d2b71dd714a7ec7 input=fd5dab052f2e9bb3]*/ { if (PyAnySet_Check(other)) { return set_issubset((PySetObject *)other, (PyObject *)so); @@ -1830,12 +1924,6 @@ set_issuperset(PySetObject *so, PyObject *other) Py_RETURN_TRUE; } -PyDoc_STRVAR(issuperset_doc, -"issuperset($self, other, /)\n\ ---\n\ -\n\ -Test whether every element in other is in the set."); - static PyObject * set_richcompare(PySetObject *v, PyObject *w, int op) { @@ -1879,19 +1967,26 @@ set_richcompare(PySetObject *v, PyObject *w, int op) Py_RETURN_NOTIMPLEMENTED; } +/*[clinic input] +set.add + so: setobject + object as key: object + / + +Add an element to a set. + +This has no effect if the element is already present. +[clinic start generated code]*/ + static PyObject * set_add(PySetObject *so, PyObject *key) +/*[clinic end generated code: output=cd9c2d5c2069c2ba input=96f1efe029e47972]*/ { if (set_add_key(so, key)) return NULL; Py_RETURN_NONE; } -PyDoc_STRVAR(add_doc, -"Add an element to a set.\n\ -\n\ -This has no effect if the element is already present."); - static int set_contains(PySetObject *so, PyObject *key) { @@ -1912,8 +2007,19 @@ set_contains(PySetObject *so, PyObject *key) return rv; } +/*[clinic input] +@coexist +set.__contains__ + so: setobject + object as key: object + / + +x.__contains__(y) <==> y in x. +[clinic start generated code]*/ + static PyObject * -set_direct_contains(PySetObject *so, PyObject *key) +set___contains__(PySetObject *so, PyObject *key) +/*[clinic end generated code: output=b5948bc5c590d3ca input=cf4c72db704e4cf0]*/ { long result; @@ -1923,10 +2029,20 @@ set_direct_contains(PySetObject *so, PyObject *key) return PyBool_FromLong(result); } -PyDoc_STRVAR(contains_doc, "x.__contains__(y) <==> y in x."); +/*[clinic input] +set.remove + so: setobject + object as key: object + / + +Remove an element from a set; it must be a member. + +If the element is not a member, raise a KeyError. +[clinic start generated code]*/ static PyObject * set_remove(PySetObject *so, PyObject *key) +/*[clinic end generated code: output=08ae496d0cd2b8c1 input=10132515dfe8ebd7]*/ { PyObject *tmpkey; int rv; @@ -1952,13 +2068,21 @@ set_remove(PySetObject *so, PyObject *key) Py_RETURN_NONE; } -PyDoc_STRVAR(remove_doc, -"Remove an element from a set; it must be a member.\n\ -\n\ -If the element is not a member, raise a KeyError."); +/*[clinic input] +set.discard + so: setobject + object as key: object + / + +Remove an element from a set if it is a member. + +Unlike set.remove(), the discard() method does not raise +an exception when an element is missing from the set. +[clinic start generated code]*/ static PyObject * set_discard(PySetObject *so, PyObject *key) +/*[clinic end generated code: output=9181b60d7bb7d480 input=82a689eba94d5ad9]*/ { PyObject *tmpkey; int rv; @@ -1979,14 +2103,16 @@ set_discard(PySetObject *so, PyObject *key) Py_RETURN_NONE; } -PyDoc_STRVAR(discard_doc, -"Remove an element from a set if it is a member.\n\ -\n\ -Unlike set.remove(), the discard() method does not raise\n\ -an exception when an element is missing from the set."); +/*[clinic input] +set.__reduce__ + so: setobject + +Return state information for pickling. +[clinic start generated code]*/ static PyObject * -set_reduce(PySetObject *so, PyObject *Py_UNUSED(ignored)) +set___reduce___impl(PySetObject *so) +/*[clinic end generated code: output=9af7d0e029df87ee input=531375e87a24a449]*/ { PyObject *keys=NULL, *args=NULL, *result=NULL, *state=NULL; @@ -2007,8 +2133,16 @@ set_reduce(PySetObject *so, PyObject *Py_UNUSED(ignored)) return result; } +/*[clinic input] +set.__sizeof__ + so: setobject + +S.__sizeof__() -> size of S in memory, in bytes. +[clinic start generated code]*/ + static PyObject * -set_sizeof(PySetObject *so, PyObject *Py_UNUSED(ignored)) +set___sizeof___impl(PySetObject *so) +/*[clinic end generated code: output=4bfa3df7bd38ed88 input=0f214fc2225319fc]*/ { size_t res = _PyObject_SIZE(Py_TYPE(so)); if (so->table != so->smalltable) { @@ -2017,7 +2151,6 @@ set_sizeof(PySetObject *so, PyObject *Py_UNUSED(ignored)) return PyLong_FromSize_t(res); } -PyDoc_STRVAR(sizeof_doc, "S.__sizeof__() -> size of S in memory, in bytes"); static int set_init(PySetObject *self, PyObject *args, PyObject *kwds) { @@ -2071,46 +2204,26 @@ static PySequenceMethods set_as_sequence = { /* set object ********************************************************/ static PyMethodDef set_methods[] = { - {"add", (PyCFunction)set_add, METH_O, - add_doc}, - {"clear", (PyCFunction)set_clear, METH_NOARGS, - clear_doc}, - {"__contains__",(PyCFunction)set_direct_contains, METH_O | METH_COEXIST, - contains_doc}, - {"copy", (PyCFunction)set_copy, METH_NOARGS, - copy_doc}, - {"discard", (PyCFunction)set_discard, METH_O, - discard_doc}, - {"difference", (PyCFunction)set_difference_multi, METH_VARARGS, - difference_doc}, - {"difference_update", (PyCFunction)set_difference_update, METH_VARARGS, - difference_update_doc}, - {"intersection",(PyCFunction)set_intersection_multi, METH_VARARGS, - intersection_doc}, - {"intersection_update",(PyCFunction)set_intersection_update_multi, METH_VARARGS, - intersection_update_doc}, - {"isdisjoint", (PyCFunction)set_isdisjoint, METH_O, - isdisjoint_doc}, - {"issubset", (PyCFunction)set_issubset, METH_O, - issubset_doc}, - {"issuperset", (PyCFunction)set_issuperset, METH_O, - issuperset_doc}, - {"pop", (PyCFunction)set_pop, METH_NOARGS, - pop_doc}, - {"__reduce__", (PyCFunction)set_reduce, METH_NOARGS, - reduce_doc}, - {"remove", (PyCFunction)set_remove, METH_O, - remove_doc}, - {"__sizeof__", (PyCFunction)set_sizeof, METH_NOARGS, - sizeof_doc}, - {"symmetric_difference",(PyCFunction)set_symmetric_difference, METH_O, - symmetric_difference_doc}, - {"symmetric_difference_update",(PyCFunction)set_symmetric_difference_update, METH_O, - symmetric_difference_update_doc}, - {"union", (PyCFunction)set_union, METH_VARARGS, - union_doc}, - {"update", (PyCFunction)set_update, METH_VARARGS, - update_doc}, + SET_ADD_METHODDEF + SET_CLEAR_METHODDEF + SET___CONTAINS___METHODDEF + SET_COPY_METHODDEF + SET_DISCARD_METHODDEF + SET_DIFFERENCE_MULTI_METHODDEF + SET_DIFFERENCE_UPDATE_METHODDEF + SET_INTERSECTION_MULTI_METHODDEF + SET_INTERSECTION_UPDATE_MULTI_METHODDEF + SET_ISDISJOINT_METHODDEF + SET_ISSUBSET_METHODDEF + SET_ISSUPERSET_METHODDEF + SET_POP_METHODDEF + SET___REDUCE___METHODDEF + SET_REMOVE_METHODDEF + SET___SIZEOF___METHODDEF + SET_SYMMETRIC_DIFFERENCE_METHODDEF + SET_SYMMETRIC_DIFFERENCE_UPDATE_METHODDEF + SET_UNION_METHODDEF + SET_UPDATE_METHODDEF {"__class_getitem__", Py_GenericAlias, METH_O|METH_CLASS, PyDoc_STR("See PEP 585")}, {NULL, NULL} /* sentinel */ }; @@ -2203,28 +2316,17 @@ PyTypeObject PySet_Type = { static PyMethodDef frozenset_methods[] = { - {"__contains__",(PyCFunction)set_direct_contains, METH_O | METH_COEXIST, - contains_doc}, - {"copy", (PyCFunction)frozenset_copy, METH_NOARGS, - copy_doc}, - {"difference", (PyCFunction)set_difference_multi, METH_VARARGS, - difference_doc}, - {"intersection", (PyCFunction)set_intersection_multi, METH_VARARGS, - intersection_doc}, - {"isdisjoint", (PyCFunction)set_isdisjoint, METH_O, - isdisjoint_doc}, - {"issubset", (PyCFunction)set_issubset, METH_O, - issubset_doc}, - {"issuperset", (PyCFunction)set_issuperset, METH_O, - issuperset_doc}, - {"__reduce__", (PyCFunction)set_reduce, METH_NOARGS, - reduce_doc}, - {"__sizeof__", (PyCFunction)set_sizeof, METH_NOARGS, - sizeof_doc}, - {"symmetric_difference",(PyCFunction)set_symmetric_difference, METH_O, - symmetric_difference_doc}, - {"union", (PyCFunction)set_union, METH_VARARGS, - union_doc}, + SET___CONTAINS___METHODDEF + FROZENSET_COPY_METHODDEF + SET_DIFFERENCE_MULTI_METHODDEF + SET_INTERSECTION_MULTI_METHODDEF + SET_ISDISJOINT_METHODDEF + SET_ISSUBSET_METHODDEF + SET_ISSUPERSET_METHODDEF + SET___REDUCE___METHODDEF + SET___SIZEOF___METHODDEF + SET_SYMMETRIC_DIFFERENCE_METHODDEF + SET_UNION_METHODDEF {"__class_getitem__", Py_GenericAlias, METH_O|METH_CLASS, PyDoc_STR("See PEP 585")}, {NULL, NULL} /* sentinel */ }; diff --git a/Python/getargs.c b/Python/getargs.c index 0c4ce282f48764..08e97ee3e627b5 100644 --- a/Python/getargs.c +++ b/Python/getargs.c @@ -8,6 +8,7 @@ #include "pycore_modsupport.h" // export _PyArg_NoKeywords() #include "pycore_pylifecycle.h" // _PyArg_Fini #include "pycore_tuple.h" // _PyTuple_ITEMS() +#include "pycore_pyerrors.h" // _Py_CalculateSuggestions() /* Export Stable ABIs (abi only) */ PyAPI_FUNC(int) _PyArg_Parse_SizeT(PyObject *, const char *, ...); @@ -1424,12 +1425,31 @@ error_unexpected_keyword_arg(PyObject *kwargs, PyObject *kwnames, PyObject *kwtu int match = PySequence_Contains(kwtuple, keyword); if (match <= 0) { if (!match) { - PyErr_Format(PyExc_TypeError, - "'%S' is an invalid keyword " - "argument for %.200s%s", - keyword, - (fname == NULL) ? "this function" : fname, - (fname == NULL) ? "" : "()"); + PyObject *kwlist = PySequence_List(kwtuple); + if (!kwlist) { + return; + } + PyObject *suggestion_keyword = _Py_CalculateSuggestions(kwlist, keyword); + Py_DECREF(kwlist); + + if (suggestion_keyword) { + PyErr_Format(PyExc_TypeError, + "%.200s%s got an unexpected keyword argument '%S'." + " Did you mean '%S'?", + (fname == NULL) ? "this function" : fname, + (fname == NULL) ? "" : "()", + keyword, + suggestion_keyword); + Py_DECREF(suggestion_keyword); + } + else { + PyErr_Format(PyExc_TypeError, + "%.200s%s got an unexpected keyword argument '%S'", + (fname == NULL) ? "this function" : fname, + (fname == NULL) ? "" : "()", + keyword); + } + } return; } @@ -1457,6 +1477,9 @@ PyArg_ValidateKeywordArguments(PyObject *kwargs) return 1; } +static PyObject * +new_kwtuple(const char * const *keywords, int total, int pos); + #define IS_END_OF_FORMAT(c) (c == '\0' || c == ';' || c == ':') static int @@ -1722,12 +1745,35 @@ vgetargskeywords(PyObject *args, PyObject *kwargs, const char *format, } } if (!match) { - PyErr_Format(PyExc_TypeError, - "'%U' is an invalid keyword " - "argument for %.200s%s", - key, - (fname == NULL) ? "this function" : fname, - (fname == NULL) ? "" : "()"); + PyObject *_pykwtuple = new_kwtuple(kwlist, len, pos); + if (!_pykwtuple) { + return cleanreturn(0, &freelist); + } + PyObject *pykwlist = PySequence_List(_pykwtuple); + Py_DECREF(_pykwtuple); + if (!pykwlist) { + return cleanreturn(0, &freelist); + } + PyObject *suggestion_keyword = _Py_CalculateSuggestions(pykwlist, key); + Py_DECREF(pykwlist); + + if (suggestion_keyword) { + PyErr_Format(PyExc_TypeError, + "%.200s%s got an unexpected keyword argument '%S'." + " Did you mean '%S'?", + (fname == NULL) ? "this function" : fname, + (fname == NULL) ? "" : "()", + key, + suggestion_keyword); + Py_DECREF(suggestion_keyword); + } + else { + PyErr_Format(PyExc_TypeError, + "%.200s%s got an unexpected keyword argument '%S'", + (fname == NULL) ? "this function" : fname, + (fname == NULL) ? "" : "()", + key); + } return cleanreturn(0, &freelist); } } From 58c1854654c103400b43f0310e117f7c90e73579 Mon Sep 17 00:00:00 2001 From: Jeff Glass Date: Thu, 8 Feb 2024 12:12:38 -0600 Subject: [PATCH 14/31] Formatting cleanup --- Python/initconfig.c | 7 +++++-- Python/specialize.c | 12 +++++++----- Tools/scripts/summarize_stats.py | 7 +++++-- 3 files changed, 17 insertions(+), 9 deletions(-) diff --git a/Python/initconfig.c b/Python/initconfig.c index 29ae05aa8310e0..ebcf47703230cb 100644 --- a/Python/initconfig.c +++ b/Python/initconfig.c @@ -2213,8 +2213,11 @@ config_read(PyConfig *config, int compute_path_config) } if (config_get_env(config, "PYTHONSTATS_UOPDEPTH")) { - config->_pystats_depth = atoi(config_get_env(config, "PYTHONSTATS_UOPDEPTH")); - if (config->_pystats_depth < 2){ printf("Setting default stats depth to 2"); config->_pystats_depth = 2; } + int uop_depth = atoi(config_get_env(config, "PYTHONSTATS_UOPDEPTH")); + config->_pystats_depth = uop_depth; + if (config->_pystats_depth < 2){ + printf("Setting default stats depth to 2"); config->_pystats_depth = 2; + } } else { config->_pystats_depth = 2; diff --git a/Python/specialize.c b/Python/specialize.c index 233a4d172d9434..fe5f8e4201923b 100644 --- a/Python/specialize.c +++ b/Python/specialize.c @@ -47,22 +47,24 @@ void _init_pystats(){ //Make UOpstats structs for all initial opcodes, with null pointers to deeper opcodes for (int i = 0; i < 512; i++){ - _Py_stats_struct.optimization_stats.opcode[i] = PyMem_RawCalloc(1, sizeof(UOpStats)); + UOpStats *opcode_stat = _Py_stats_struct.optimization_stats.opcode[i]; + opcode_stat = PyMem_RawCalloc(1, sizeof(UOpStats)); for (int j = 0; j < 512; j++){ - _Py_stats_struct.optimization_stats.opcode[i]->next_stats[j] = NULL; + opcode_stat->next_stats[j] = NULL; } } - int DEPTH = _Py_stats_struct.optimization_stats.max_uop_chain_depth ? _Py_stats_struct.optimization_stats.max_uop_chain_depth : 2; + int previous_depth = _Py_stats_struct.optimization_stats.max_uop_chain_depth; + int depth = previous_depth ? previous_depth : 2; if (_Py_stats_struct.optimization_stats.last_opcodes){ uint64_t *tmp; - tmp = PyMem_RawRealloc(DEPTH, sizeof(uint64_t)); + tmp = PyMem_RawRealloc(depth, sizeof(uint64_t)); if (tmp != NULL) _Py_stats_struct.optimization_stats.last_opcodes = tmp; else return PyErr_NoMemory(); } else { - _Py_stats_struct.optimization_stats.last_opcodes = PyMem_RawCalloc(DEPTH, sizeof(uint64_t)); + _Py_stats_struct.optimization_stats.last_opcodes = PyMem_RawCalloc(depth, sizeof(uint64_t)); } } diff --git a/Tools/scripts/summarize_stats.py b/Tools/scripts/summarize_stats.py index 8534f7fe5d53a4..9b294926611b1f 100644 --- a/Tools/scripts/summarize_stats.py +++ b/Tools/scripts/summarize_stats.py @@ -313,10 +313,13 @@ def get_opcode_stats(self, prefix: str) -> OpcodeStats: ) def get_uop_sequence_stats(self, length) -> dict[str, int]: - return {k: v for k, v in self._data.items() if k.startswith("UOp sequence count") and k.count(',') == length - 1} + return {k: v for k, v in self._data.items() + if k.startswith("UOp sequence count") and + k.count(',') == length - 1} def get_max_uop_sequence_length(self) -> int: - uop_sequence_lengths = [k.count(',')+1 for k in self._data.keys() if k.startswith("UOp sequence count")] + uop_sequence_lengths = [k.count(',')+1 for k in self._data.keys() + if k.startswith("UOp sequence count")] return max(uop_sequence_lengths) if uop_sequence_lengths else 0 def get_call_stats(self) -> dict[str, int]: From 26a1c02f5bb79c15f6cbc9102af14cd56832402b Mon Sep 17 00:00:00 2001 From: Jeff Glass Date: Thu, 8 Feb 2024 14:07:33 -0600 Subject: [PATCH 15/31] Add docs, ACK --- Doc/using/configure.rst | 6 ++++++ Misc/ACKS | 1 + 2 files changed, 7 insertions(+) diff --git a/Doc/using/configure.rst b/Doc/using/configure.rst index aab9469b44828a..e10ffda51b7f9b 100644 --- a/Doc/using/configure.rst +++ b/Doc/using/configure.rst @@ -235,6 +235,7 @@ General Options * Add :option:`-X pystats <-X>` command line option. * Add :envvar:`!PYTHONSTATS` environment variable. + * Add :envvar:`!PYTHONSTATS_UOPDEPTH` environment variable * Define the ``Py_STATS`` macro. * Add functions to the :mod:`sys` module: @@ -247,6 +248,11 @@ General Options ``/tmp/py_stats/`` (Unix) or ``C:\temp\py_stats\`` (Windows). If that directory does not exist, results will be printed on stderr. + If the :envvar:`!PYTHONSTATS_UOPDEPTH` environment variable is set to an + integer, that number will be used as the maximum length UOP chain to + record an execution count for. If it is unset, the default maximum length is + 2. + Use ``Tools/scripts/summarize_stats.py`` to read the stats. Statistics: diff --git a/Misc/ACKS b/Misc/ACKS index 466023f390a421..af685d17ec5b96 100644 --- a/Misc/ACKS +++ b/Misc/ACKS @@ -639,6 +639,7 @@ Neil Girdhar Matt Giuca Andrea Giudiceandrea Franz Glasner +Jeff Glass Wim Glenn Michael Goderbauer Karan Goel From b5ee34ee1a7849d0306038167372ffb3ea6d9e39 Mon Sep 17 00:00:00 2001 From: Jeff Glass Date: Thu, 8 Feb 2024 14:20:59 -0600 Subject: [PATCH 16/31] Add docs, ACK --- Doc/using/configure.rst | 6 ++++++ Misc/ACKS | 1 + 2 files changed, 7 insertions(+) diff --git a/Doc/using/configure.rst b/Doc/using/configure.rst index aab9469b44828a..e10ffda51b7f9b 100644 --- a/Doc/using/configure.rst +++ b/Doc/using/configure.rst @@ -235,6 +235,7 @@ General Options * Add :option:`-X pystats <-X>` command line option. * Add :envvar:`!PYTHONSTATS` environment variable. + * Add :envvar:`!PYTHONSTATS_UOPDEPTH` environment variable * Define the ``Py_STATS`` macro. * Add functions to the :mod:`sys` module: @@ -247,6 +248,11 @@ General Options ``/tmp/py_stats/`` (Unix) or ``C:\temp\py_stats\`` (Windows). If that directory does not exist, results will be printed on stderr. + If the :envvar:`!PYTHONSTATS_UOPDEPTH` environment variable is set to an + integer, that number will be used as the maximum length UOP chain to + record an execution count for. If it is unset, the default maximum length is + 2. + Use ``Tools/scripts/summarize_stats.py`` to read the stats. Statistics: diff --git a/Misc/ACKS b/Misc/ACKS index 466023f390a421..af685d17ec5b96 100644 --- a/Misc/ACKS +++ b/Misc/ACKS @@ -639,6 +639,7 @@ Neil Girdhar Matt Giuca Andrea Giudiceandrea Franz Glasner +Jeff Glass Wim Glenn Michael Goderbauer Karan Goel From 94c4f195af503fadf0493ea0db4428293d9d37d3 Mon Sep 17 00:00:00 2001 From: Jeff Glass Date: Thu, 8 Feb 2024 15:31:25 -0600 Subject: [PATCH 17/31] Address linting errors --- Include/cpython/pystats.h | 2 +- Python/initconfig.c | 8 ++++---- Python/specialize.c | 14 +++++++------- 3 files changed, 12 insertions(+), 12 deletions(-) diff --git a/Include/cpython/pystats.h b/Include/cpython/pystats.h index 3f971dab1cb32e..3a580da6205b53 100644 --- a/Include/cpython/pystats.h +++ b/Include/cpython/pystats.h @@ -117,7 +117,7 @@ typedef struct _optimization_stats { uint64_t recursive_call; uint64_t low_confidence; uint64_t max_uop_chain_depth; - uint64_t *last_opcodes; + uint64_t *last_opcodes; UOpStats *opcode[512]; uint64_t unsupported_opcode[256]; uint64_t trace_length_hist[_Py_UOP_HIST_SIZE]; diff --git a/Python/initconfig.c b/Python/initconfig.c index ebcf47703230cb..71fba052a18096 100644 --- a/Python/initconfig.c +++ b/Python/initconfig.c @@ -2216,14 +2216,14 @@ config_read(PyConfig *config, int compute_path_config) int uop_depth = atoi(config_get_env(config, "PYTHONSTATS_UOPDEPTH")); config->_pystats_depth = uop_depth; if (config->_pystats_depth < 2){ - printf("Setting default stats depth to 2"); config->_pystats_depth = 2; + printf("Setting default stats depth to 2"); config->_pystats_depth = 2; } } else { config->_pystats_depth = 2; } - + #endif status = config_read_complex_options(config); @@ -2366,8 +2366,8 @@ _PyConfig_Write(const PyConfig *config, _PyRuntimeState *runtime) if (config->_pystats_depth){ _Py_Stats_Set_Depth(config->_pystats_depth); } - _Py_StatsOn(); - + _Py_StatsOn(); + } #endif diff --git a/Python/specialize.c b/Python/specialize.c index fe5f8e4201923b..566e6e0e397b33 100644 --- a/Python/specialize.c +++ b/Python/specialize.c @@ -43,7 +43,7 @@ PyStats *_Py_stats = NULL; Py_DECREF(val); \ } while(0); -void +void _init_pystats(){ //Make UOpstats structs for all initial opcodes, with null pointers to deeper opcodes for (int i = 0; i < 512; i++){ @@ -56,16 +56,16 @@ _init_pystats(){ int previous_depth = _Py_stats_struct.optimization_stats.max_uop_chain_depth; int depth = previous_depth ? previous_depth : 2; - + if (_Py_stats_struct.optimization_stats.last_opcodes){ uint64_t *tmp; tmp = PyMem_RawRealloc(depth, sizeof(uint64_t)); if (tmp != NULL) _Py_stats_struct.optimization_stats.last_opcodes = tmp; - else return PyErr_NoMemory(); - } + else return PyErr_NoMemory(); + } else { _Py_stats_struct.optimization_stats.last_opcodes = PyMem_RawCalloc(depth, sizeof(uint64_t)); - } + } } static PyObject* @@ -266,7 +266,7 @@ print_uop_sequence(FILE *out, UOpStats *uop_stats, const char* prefix){ print_uop_sequence(out, uop_stats->next_stats[i], new_prefix); } } - } + } } static void @@ -353,7 +353,7 @@ _Py_StatsOn(void) if (!_Py_stats->optimization_stats.opcode[0]) _init_pystats(); } -void +void _Py_Stats_Set_Depth(int depth){ _Py_stats_struct.optimization_stats.max_uop_chain_depth = depth; if (_Py_stats){ From 870a7cebc70401b25b962bca3cd660b3ef514402 Mon Sep 17 00:00:00 2001 From: Jeff Glass Date: Thu, 8 Feb 2024 15:35:16 -0600 Subject: [PATCH 18/31] Minor formatting, table renaming in summarize_stats.py --- Tools/scripts/summarize_stats.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/Tools/scripts/summarize_stats.py b/Tools/scripts/summarize_stats.py index 9b294926611b1f..2a9a6b4af2f57a 100644 --- a/Tools/scripts/summarize_stats.py +++ b/Tools/scripts/summarize_stats.py @@ -678,7 +678,7 @@ def calc_pair_count_table(stats: Stats) -> Rows: def uop_sequence_section(base_stats: Stats, head_stats: Stats) -> Generator[Section]: if not (longest := base_stats.get_max_uop_sequence_length()): return - def calc_uop_sequence(base_stats: Stats ,sequence_length: int) -> Rows: + def calc_uop_sequence(base_stats: Stats, sequence_length: int) -> Rows: uop_sequences = base_stats.get_uop_sequence_stats(sequence_length) total = base_stats.get_optimization_stats()["Uops executed"][0] @@ -700,11 +700,11 @@ def calc_uop_sequence(base_stats: Stats ,sequence_length: int) -> Rows: for l in range(2, longest+1): yield Section( - "Pair counts", + f"Uop {l}-Sequence Counts", f"Counts for top 100 UOp Sequences of Length {l}", [ Table( - ("Pair", "Count:", "Self:", "Cumulative:"), + ("Sequence", "Count:", "Self:", "Cumulative:"), functools.partial(calc_uop_sequence, sequence_length=l), ) ], From eab5abe151ee59ca732f7c899e2150600d951002 Mon Sep 17 00:00:00 2001 From: Jeff Glass Date: Thu, 8 Feb 2024 17:59:30 -0600 Subject: [PATCH 19/31] Remove my 'formatting fix' that was causing segfaults --- Python/specialize.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/Python/specialize.c b/Python/specialize.c index 566e6e0e397b33..4354d4761098d9 100644 --- a/Python/specialize.c +++ b/Python/specialize.c @@ -47,10 +47,10 @@ void _init_pystats(){ //Make UOpstats structs for all initial opcodes, with null pointers to deeper opcodes for (int i = 0; i < 512; i++){ - UOpStats *opcode_stat = _Py_stats_struct.optimization_stats.opcode[i]; - opcode_stat = PyMem_RawCalloc(1, sizeof(UOpStats)); + _Py_stats_struct.optimization_stats.opcode[i] = PyMem_RawCalloc(1, sizeof(UOpStats)); for (int j = 0; j < 512; j++){ - opcode_stat->next_stats[j] = NULL; + _Py_stats_struct.optimization_stats.opcode[i]->next_stats[j] = NULL; + } } From 09a65555e40176c022484fc97d2d21b320ad4a63 Mon Sep 17 00:00:00 2001 From: Jeff Glass Date: Mon, 12 Feb 2024 11:05:37 -0600 Subject: [PATCH 20/31] Implement pair counts --- Include/cpython/pystats.h | 1 + Misc/ACKS | 1 + Python/ceval.c | 7 +++++++ Python/specialize.c | 9 +++++++++ Tools/scripts/summarize_stats.py | 11 ++++++----- 5 files changed, 24 insertions(+), 5 deletions(-) diff --git a/Include/cpython/pystats.h b/Include/cpython/pystats.h index 0f50439b73848e..2354f0cce3396e 100644 --- a/Include/cpython/pystats.h +++ b/Include/cpython/pystats.h @@ -99,6 +99,7 @@ typedef struct _gc_stats { typedef struct _uop_stats { uint64_t execution_count; uint64_t miss; + uint64_t pair_count[512]; } UOpStats; #define _Py_UOP_HIST_SIZE 32 diff --git a/Misc/ACKS b/Misc/ACKS index 8a80e02ecba26a..755069826dec66 100644 --- a/Misc/ACKS +++ b/Misc/ACKS @@ -639,6 +639,7 @@ Neil Girdhar Matt Giuca Andrea Giudiceandrea Franz Glasner +Jeff Glass Wim Glenn Michael Goderbauer Karan Goel diff --git a/Python/ceval.c b/Python/ceval.c index 4f208009086191..06ed8c19a52e2b 100644 --- a/Python/ceval.c +++ b/Python/ceval.c @@ -1010,6 +1010,8 @@ _PyEval_EvalFrameDefault(PyThreadState *tstate, _PyInterpreterFrame *frame, int _PyUOpInstruction *next_uop = current_executor->trace; uint16_t uopcode; #ifdef Py_STATS + int lastuop = 0; + int first_uop_flag = 1; uint64_t trace_uop_execution_counter = 0; #endif @@ -1026,6 +1028,11 @@ _PyEval_EvalFrameDefault(PyThreadState *tstate, _PyInterpreterFrame *frame, int next_uop++; OPT_STAT_INC(uops_executed); UOP_STAT_INC(uopcode, execution_count); + if (!first_uop_flag){ + UOP_STAT_INC(uopcode, pair_count[lastuop]); + lastuop = uopcode; + } + first_uop_flag = 0; #ifdef Py_STATS trace_uop_execution_counter++; #endif diff --git a/Python/specialize.c b/Python/specialize.c index ea2638570f22d0..0c7279a7871f94 100644 --- a/Python/specialize.c +++ b/Python/specialize.c @@ -265,6 +265,15 @@ print_optimization_stats(FILE *out, OptimizationStats *stats) ); } } + + for (int i = 0; i < 512; i++){ + for (int j = 0; j < 512; j++) { + if (stats->opcode[i].pair_count[j]) { + fprintf(out, "uop[%s].pair_count[%s] : %" PRIu64 "\n", + _PyOpcode_uop_name[i], _PyOpcode_uop_name[j], stats->opcode[i].pair_count[j]); + } + } + } } static void diff --git a/Tools/scripts/summarize_stats.py b/Tools/scripts/summarize_stats.py index 7891b9cf923d33..6b513f2537f1f2 100644 --- a/Tools/scripts/summarize_stats.py +++ b/Tools/scripts/summarize_stats.py @@ -21,7 +21,7 @@ from pathlib import Path import re import sys -from typing import Any, Callable, TextIO, TypeAlias +from typing import Any, Callable, Generator, TextIO, TypeAlias RawData: TypeAlias = dict[str, Any] @@ -631,9 +631,9 @@ def execution_count_section() -> Section: ) -def pair_count_section() -> Section: +def pair_count_section(prefix: str) -> Section: def calc_pair_count_table(stats: Stats) -> Rows: - opcode_stats = stats.get_opcode_stats("opcode") + opcode_stats = stats.get_opcode_stats(prefix) pair_counts = opcode_stats.get_pair_counts() total = opcode_stats.get_total_execution_count() @@ -655,7 +655,7 @@ def calc_pair_count_table(stats: Stats) -> Rows: return Section( "Pair counts", - "Pair counts for top 100 pairs", + f"Pair counts for top 100 {prefix} pairs", [ Table( ("Pair", "Count:", "Self:", "Cumulative:"), @@ -1053,6 +1053,7 @@ def iter_optimization_tables(base_stats: Stats, head_stats: Stats | None = None) ) ], ) + yield pair_count_section("uop") yield Section( "Unsupported opcodes", "", @@ -1096,7 +1097,7 @@ def calc_rows(stats: Stats) -> Rows: LAYOUT = [ execution_count_section(), - pair_count_section(), + pair_count_section('opcode'), pre_succ_pairs_section(), specialization_section(), specialization_effectiveness_section(), From 094e88fe5a1ca3d0306f91f1995a386acc26b5d5 Mon Sep 17 00:00:00 2001 From: Jeff Glass Date: Mon, 12 Feb 2024 11:08:53 -0600 Subject: [PATCH 21/31] Format python with Black --- Tools/scripts/summarize_stats.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/Tools/scripts/summarize_stats.py b/Tools/scripts/summarize_stats.py index 6b513f2537f1f2..71a3f5e86b63c2 100644 --- a/Tools/scripts/summarize_stats.py +++ b/Tools/scripts/summarize_stats.py @@ -21,7 +21,7 @@ from pathlib import Path import re import sys -from typing import Any, Callable, Generator, TextIO, TypeAlias +from typing import Any, Callable, TextIO, TypeAlias RawData: TypeAlias = dict[str, Any] @@ -415,7 +415,7 @@ def get_histogram(self, prefix: str) -> list[tuple[int, int]]: def get_rare_events(self) -> list[tuple[str, int]]: prefix = "Rare event " return [ - (key[len(prefix) + 1:-1].replace("_", " "), val) + (key[len(prefix) + 1 : -1].replace("_", " "), val) for key, val in self._data.items() if key.startswith(prefix) ] @@ -1097,7 +1097,7 @@ def calc_rows(stats: Stats) -> Rows: LAYOUT = [ execution_count_section(), - pair_count_section('opcode'), + pair_count_section("opcode"), pre_succ_pairs_section(), specialization_section(), specialization_effectiveness_section(), From e27d88661bf1fc27af88b5d0ac8adfec60d073db Mon Sep 17 00:00:00 2001 From: Jeff Glass Date: Mon, 12 Feb 2024 11:18:25 -0600 Subject: [PATCH 22/31] Rework UOP_PAIR_INC to be no-op in non-stats case --- Include/internal/pycore_code.h | 2 ++ Python/ceval.c | 7 +------ 2 files changed, 3 insertions(+), 6 deletions(-) diff --git a/Include/internal/pycore_code.h b/Include/internal/pycore_code.h index fdd5918228455d..f16a7241fecdb6 100644 --- a/Include/internal/pycore_code.h +++ b/Include/internal/pycore_code.h @@ -286,6 +286,7 @@ extern int _PyStaticCode_Init(PyCodeObject *co); #define GC_STAT_ADD(gen, name, n) do { if (_Py_stats) _Py_stats->gc_stats[(gen)].name += (n); } while (0) #define OPT_STAT_INC(name) do { if (_Py_stats) _Py_stats->optimization_stats.name++; } while (0) #define UOP_STAT_INC(opname, name) do { if (_Py_stats) { assert(opname < 512); _Py_stats->optimization_stats.opcode[opname].name++; } } while (0) +#define UOP_PAIR_INC(uopcode, lastuop) do { if (lastuop) { _Py_stats->optimization_stats.opcode[uopcode].pair_count[lastuop]++; } lastuop = uopcode; } while (0) #define OPT_UNSUPPORTED_OPCODE(opname) do { if (_Py_stats) _Py_stats->optimization_stats.unsupported_opcode[opname]++; } while (0) #define OPT_HIST(length, name) \ do { \ @@ -312,6 +313,7 @@ PyAPI_FUNC(PyObject*) _Py_GetSpecializationStats(void); #define GC_STAT_ADD(gen, name, n) ((void)0) #define OPT_STAT_INC(name) ((void)0) #define UOP_STAT_INC(opname, name) ((void)0) +#define UOP_PAIR_INC(uopcode, lastuop) ((void)0) #define OPT_UNSUPPORTED_OPCODE(opname) ((void)0) #define OPT_HIST(length, name) ((void)0) #define RARE_EVENT_STAT_INC(name) ((void)0) diff --git a/Python/ceval.c b/Python/ceval.c index 06ed8c19a52e2b..ac07a8d396384a 100644 --- a/Python/ceval.c +++ b/Python/ceval.c @@ -1011,7 +1011,6 @@ _PyEval_EvalFrameDefault(PyThreadState *tstate, _PyInterpreterFrame *frame, int uint16_t uopcode; #ifdef Py_STATS int lastuop = 0; - int first_uop_flag = 1; uint64_t trace_uop_execution_counter = 0; #endif @@ -1028,11 +1027,7 @@ _PyEval_EvalFrameDefault(PyThreadState *tstate, _PyInterpreterFrame *frame, int next_uop++; OPT_STAT_INC(uops_executed); UOP_STAT_INC(uopcode, execution_count); - if (!first_uop_flag){ - UOP_STAT_INC(uopcode, pair_count[lastuop]); - lastuop = uopcode; - } - first_uop_flag = 0; + UOP_PAIR_INC(uopcode, lastuop); #ifdef Py_STATS trace_uop_execution_counter++; #endif From 6ee92ae9bf8b2b8f051384184134dd93a05f83b2 Mon Sep 17 00:00:00 2001 From: Jeff Glass Date: Mon, 12 Feb 2024 13:44:28 -0600 Subject: [PATCH 23/31] Add check for _Py_stats to avoid segfault --- Include/internal/pycore_code.h | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/Include/internal/pycore_code.h b/Include/internal/pycore_code.h index f16a7241fecdb6..9f5240f77c8d5b 100644 --- a/Include/internal/pycore_code.h +++ b/Include/internal/pycore_code.h @@ -286,7 +286,9 @@ extern int _PyStaticCode_Init(PyCodeObject *co); #define GC_STAT_ADD(gen, name, n) do { if (_Py_stats) _Py_stats->gc_stats[(gen)].name += (n); } while (0) #define OPT_STAT_INC(name) do { if (_Py_stats) _Py_stats->optimization_stats.name++; } while (0) #define UOP_STAT_INC(opname, name) do { if (_Py_stats) { assert(opname < 512); _Py_stats->optimization_stats.opcode[opname].name++; } } while (0) -#define UOP_PAIR_INC(uopcode, lastuop) do { if (lastuop) { _Py_stats->optimization_stats.opcode[uopcode].pair_count[lastuop]++; } lastuop = uopcode; } while (0) +#define UOP_PAIR_INC(uopcode, lastuop) do { if (lastuop && _Py_stats) { \ + _Py_stats->optimization_stats.opcode[uopcode].pair_count[lastuop]++; } \ + lastuop = uopcode; } while (0) #define OPT_UNSUPPORTED_OPCODE(opname) do { if (_Py_stats) _Py_stats->optimization_stats.unsupported_opcode[opname]++; } while (0) #define OPT_HIST(length, name) \ do { \ From ba7ba11b2fefffcf756c9f984a64e297fe952b49 Mon Sep 17 00:00:00 2001 From: Jeff Glass Date: Mon, 12 Feb 2024 15:15:11 -0600 Subject: [PATCH 24/31] Undo some of Black's formatting --- Tools/scripts/summarize_stats.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Tools/scripts/summarize_stats.py b/Tools/scripts/summarize_stats.py index 71a3f5e86b63c2..ccacf7a10db219 100644 --- a/Tools/scripts/summarize_stats.py +++ b/Tools/scripts/summarize_stats.py @@ -415,7 +415,7 @@ def get_histogram(self, prefix: str) -> list[tuple[int, int]]: def get_rare_events(self) -> list[tuple[str, int]]: prefix = "Rare event " return [ - (key[len(prefix) + 1 : -1].replace("_", " "), val) + (key[len(prefix) + 1: -1].replace("_", " "), val) for key, val in self._data.items() if key.startswith(prefix) ] From cbb5f70d5b2d58ce2265119e2066d4190ba7ee47 Mon Sep 17 00:00:00 2001 From: Jeff Glass Date: Tue, 13 Feb 2024 07:29:23 -0600 Subject: [PATCH 25/31] Swap lastuop/current uop in counts --- Include/internal/pycore_code.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Include/internal/pycore_code.h b/Include/internal/pycore_code.h index 9f5240f77c8d5b..a465238d39faca 100644 --- a/Include/internal/pycore_code.h +++ b/Include/internal/pycore_code.h @@ -287,7 +287,7 @@ extern int _PyStaticCode_Init(PyCodeObject *co); #define OPT_STAT_INC(name) do { if (_Py_stats) _Py_stats->optimization_stats.name++; } while (0) #define UOP_STAT_INC(opname, name) do { if (_Py_stats) { assert(opname < 512); _Py_stats->optimization_stats.opcode[opname].name++; } } while (0) #define UOP_PAIR_INC(uopcode, lastuop) do { if (lastuop && _Py_stats) { \ - _Py_stats->optimization_stats.opcode[uopcode].pair_count[lastuop]++; } \ + _Py_stats->optimization_stats.opcode[lastuop].pair_count[uopcode]++; } \ lastuop = uopcode; } while (0) #define OPT_UNSUPPORTED_OPCODE(opname) do { if (_Py_stats) _Py_stats->optimization_stats.unsupported_opcode[opname]++; } while (0) #define OPT_HIST(length, name) \ From 03db7a5e950639cbbaea38ad4521f55ab39b1495 Mon Sep 17 00:00:00 2001 From: Jeff Glass Date: Wed, 14 Feb 2024 13:35:58 -0600 Subject: [PATCH 26/31] Catch up with main --- .editorconfig | 4 +- .gitattributes | 1 + Doc/c-api/long.rst | 70 + Doc/c-api/time.rst | 83 + Doc/c-api/unicode.rst | 7 +- Doc/c-api/utilities.rst | 1 + Doc/conf.py | 17 +- Doc/reference/datamodel.rst | 2 + Doc/tools/static/changelog_search.js | 102 +- Doc/using/mac.rst | 105 +- Doc/whatsnew/3.13.rst | 17 +- Include/Python.h | 1 + Include/cpython/longobject.h | 36 +- Include/cpython/optimizer.h | 10 +- Include/cpython/pystats.h | 3 + Include/cpython/pytime.h | 23 + .../pycore_global_objects_fini_generated.h | 10 + Include/internal/pycore_global_strings.h | 10 + Include/internal/pycore_interp.h | 8 +- Include/internal/pycore_opcode_metadata.h | 10 +- Include/internal/pycore_optimizer.h | 7 + .../internal/pycore_runtime_init_generated.h | 10 + Include/internal/pycore_time.h | 99 +- .../internal/pycore_unicodeobject_generated.h | 30 + Include/internal/pycore_uop_ids.h | 3 +- Include/internal/pycore_uop_metadata.h | 14 +- Lib/logging/config.py | 7 +- Lib/multiprocessing/connection.py | 14 +- Lib/pydoc.py | 2 +- Lib/test/_test_multiprocessing.py | 18 + Lib/test/libregrtest/findtests.py | 1 + Lib/test/test_capi/test_long.py | 145 ++ Lib/test/test_capi/test_opt.py | 209 ++ Lib/test/test_capi/test_time.py | 71 + Lib/test/test_dataclasses/__init__.py | 3 + Lib/test/test_enum.py | 10 +- Lib/test/test_generated_cases.py | 153 ++ Lib/test/test_pydoc/__init__.py | 6 + Lib/test/{ => test_pydoc}/pydoc_mod.py | 0 Lib/test/{ => test_pydoc}/pydocfodder.py | 0 Lib/test/{ => test_pydoc}/test_pydoc.py | 51 +- Lib/test/test_queue.py | 4 +- Lib/test/test_time.py | 17 +- Lib/test/test_winapi.py | 94 + Makefile.pre.in | 20 +- Misc/NEWS.d/3.12.0a1.rst | 4 +- Misc/NEWS.d/3.12.0b1.rst | 4 +- Misc/NEWS.d/3.13.0a1.rst | 2 +- ...-02-07-08-23-48.gh-issue-114099.XcEXEZ.rst | 2 + ...-11-16-02-07-48.gh-issue-110850.DQGNfF.rst | 9 + ...-02-05-17-11-15.gh-issue-111140.WMEjid.rst | 2 + ...-01-16-14-41-54.gh-issue-114058.Cb2b8h.rst | 1 + ...-02-09-07-20-16.gh-issue-115165.yfJLXA.rst | 2 +- ...3-08-11-18-21-38.gh-issue-89240.dtSOLG.rst | 1 + ...-02-08-21-37-22.gh-issue-115049.X1ObpJ.rst | 1 + Modules/Setup.stdlib.in | 2 +- Modules/_io/textio.c | 2 +- Modules/_pickle.c | 3 +- Modules/_randommodule.c | 5 +- Modules/_sqlite/util.c | 2 +- Modules/_struct.c | 20 +- Modules/_testcapi/long.c | 48 +- Modules/_testcapi/parts.h | 1 + Modules/_testcapi/time.c | 104 + Modules/_testcapimodule.c | 3 + Modules/_testinternalcapi/pytime.c | 16 - Modules/_threadmodule.c | 6 +- Modules/_tkinter.c | 3 +- Modules/_winapi.c | 506 +++++ Modules/cjkcodecs/multibytecodec.c | 6 +- Modules/clinic/_winapi.c.h | 498 ++++- Objects/exceptions.c | 7 +- Objects/longobject.c | 216 ++- PC/errmap.h | 3 + PC/launcher2.c | 15 +- PCbuild/_testcapi.vcxproj | 1 + PCbuild/_testcapi.vcxproj.filters | 3 + Python/bytecodes.c | 85 +- Python/executor_cases.c.h | 15 +- Python/generated_cases.c.h | 31 +- Python/optimizer.c | 66 +- Python/optimizer_analysis.c | 580 +++++- Python/pylifecycle.c | 4 +- Python/pystate.c | 10 +- Python/pytime.c | 102 +- Python/specialize.c | 5 + .../tier2_redundancy_eliminator_bytecodes.c | 272 +++ Python/tier2_redundancy_eliminator_cases.c.h | 1680 +++++++++++++++++ Tools/c-analyzer/cpython/_parser.py | 2 + Tools/c-analyzer/cpython/ignored.tsv | 2 +- Tools/cases_generator/README.md | 3 + Tools/cases_generator/analyzer.py | 6 +- .../cases_generator/interpreter_definition.md | 26 +- Tools/cases_generator/parsing.py | 26 +- Tools/cases_generator/stack.py | 4 +- .../tier2_abstract_generator.py | 235 +++ config.sub | 251 ++- configure | 383 ++-- configure.ac | 385 ++-- 99 files changed, 6249 insertions(+), 930 deletions(-) create mode 100644 Doc/c-api/time.rst create mode 100644 Include/cpython/pytime.h create mode 100644 Lib/test/test_capi/test_time.py create mode 100644 Lib/test/test_pydoc/__init__.py rename Lib/test/{ => test_pydoc}/pydoc_mod.py (100%) rename Lib/test/{ => test_pydoc}/pydocfodder.py (100%) rename Lib/test/{ => test_pydoc}/test_pydoc.py (98%) create mode 100644 Lib/test/test_winapi.py create mode 100644 Misc/NEWS.d/next/Build/2024-02-07-08-23-48.gh-issue-114099.XcEXEZ.rst create mode 100644 Misc/NEWS.d/next/C API/2023-11-16-02-07-48.gh-issue-110850.DQGNfF.rst create mode 100644 Misc/NEWS.d/next/C API/2024-02-05-17-11-15.gh-issue-111140.WMEjid.rst create mode 100644 Misc/NEWS.d/next/Core and Builtins/2024-01-16-14-41-54.gh-issue-114058.Cb2b8h.rst create mode 100644 Misc/NEWS.d/next/Windows/2023-08-11-18-21-38.gh-issue-89240.dtSOLG.rst create mode 100644 Misc/NEWS.d/next/Windows/2024-02-08-21-37-22.gh-issue-115049.X1ObpJ.rst create mode 100644 Modules/_testcapi/time.c create mode 100644 Python/tier2_redundancy_eliminator_bytecodes.c create mode 100644 Python/tier2_redundancy_eliminator_cases.c.h create mode 100644 Tools/cases_generator/tier2_abstract_generator.py diff --git a/.editorconfig b/.editorconfig index 0169eed951cd3f..a6187d64f3ce46 100644 --- a/.editorconfig +++ b/.editorconfig @@ -1,6 +1,6 @@ root = true -[*.{py,c,cpp,h,rst,md,yml}] +[*.{py,c,cpp,h,js,rst,md,yml}] trim_trailing_whitespace = true insert_final_newline = true indent_style = space @@ -11,5 +11,5 @@ indent_size = 4 [*.rst] indent_size = 3 -[*.yml] +[*.{js,yml}] indent_size = 2 diff --git a/.gitattributes b/.gitattributes index 2a48df079e1aeb..07d877027b09f6 100644 --- a/.gitattributes +++ b/.gitattributes @@ -94,6 +94,7 @@ Programs/test_frozenmain.h generated Python/Python-ast.c generated Python/executor_cases.c.h generated Python/generated_cases.c.h generated +Python/tier2_redundancy_eliminator_bytecodes.c.h generated Python/opcode_targets.h generated Python/stdlib_module_names.h generated Tools/peg_generator/pegen/grammar_parser.py generated diff --git a/Doc/c-api/long.rst b/Doc/c-api/long.rst index f42e23db89ae39..f24282e76a33d1 100644 --- a/Doc/c-api/long.rst +++ b/Doc/c-api/long.rst @@ -113,6 +113,28 @@ distinguished from a number. Use :c:func:`PyErr_Occurred` to disambiguate. retrieved from the resulting value using :c:func:`PyLong_AsVoidPtr`. +.. c:function:: PyObject* PyLong_FromNativeBytes(const void* buffer, size_t n_bytes, int endianness) + + Create a Python integer from the value contained in the first *n_bytes* of + *buffer*, interpreted as a two's-complement signed number. + + *endianness* may be passed ``-1`` for the native endian that CPython was + compiled with, or else ``0`` for big endian and ``1`` for little. + + .. versionadded:: 3.13 + + +.. c:function:: PyObject* PyLong_FromUnsignedNativeBytes(const void* buffer, size_t n_bytes, int endianness) + + Create a Python integer from the value contained in the first *n_bytes* of + *buffer*, interpreted as an unsigned number. + + *endianness* may be passed ``-1`` for the native endian that CPython was + compiled with, or else ``0`` for big endian and ``1`` for little. + + .. versionadded:: 3.13 + + .. XXX alias PyLong_AS_LONG (for now) .. c:function:: long PyLong_AsLong(PyObject *obj) @@ -332,6 +354,54 @@ distinguished from a number. Use :c:func:`PyErr_Occurred` to disambiguate. Returns ``NULL`` on error. Use :c:func:`PyErr_Occurred` to disambiguate. +.. c:function:: Py_ssize_t PyLong_AsNativeBytes(PyObject *pylong, void* buffer, Py_ssize_t n_bytes, int endianness) + + Copy the Python integer value to a native *buffer* of size *n_bytes*:: + + int value; + Py_ssize_t bytes = PyLong_AsNativeBytes(v, &value, sizeof(value), -1); + if (bytes < 0) { + // Error occurred + return NULL; + } + else if (bytes <= (Py_ssize_t)sizeof(value)) { + // Success! + } + else { + // Overflow occurred, but 'value' contains truncated value + } + + *endianness* may be passed ``-1`` for the native endian that CPython was + compiled with, or ``0`` for big endian and ``1`` for little. + + Return ``-1`` with an exception raised if *pylong* cannot be interpreted as + an integer. Otherwise, return the size of the buffer required to store the + value. If this is equal to or less than *n_bytes*, the entire value was + copied. + + Unless an exception is raised, all *n_bytes* of the buffer will be written + with as much of the value as can fit. This allows the caller to ignore all + non-negative results if the intent is to match the typical behavior of a + C-style downcast. No exception is set for this case. + + Values are always copied as two's-complement, and sufficient buffer will be + requested to include a sign bit. For example, this may cause an value that + fits into 8 bytes when treated as unsigned to request 9 bytes, even though + all eight bytes were copied into the buffer. What has been omitted is the + zero sign bit, which is redundant when the intention is to treat the value as + unsigned. + + Passing zero to *n_bytes* will return the requested buffer size. + + .. note:: + + When the value does not fit in the provided buffer, the requested size + returned from the function may be larger than necessary. Passing 0 to this + function is not an accurate way to determine the bit length of a value. + + .. versionadded:: 3.13 + + .. c:function:: int PyUnstable_Long_IsCompact(const PyLongObject* op) Return 1 if *op* is compact, 0 otherwise. diff --git a/Doc/c-api/time.rst b/Doc/c-api/time.rst new file mode 100644 index 00000000000000..7791cdb1781055 --- /dev/null +++ b/Doc/c-api/time.rst @@ -0,0 +1,83 @@ +.. highlight:: c + +PyTime C API +============ + +.. versionadded:: 3.13 + +The clock C API provides access to system clocks. +It is similar to the Python :mod:`time` module. + +For C API related to the :mod:`datetime` module, see :ref:`datetimeobjects`. + + +Types +----- + +.. c:type:: PyTime_t + + A timestamp or duration in nanoseconds, represented as a signed 64-bit + integer. + + The reference point for timestamps depends on the clock used. For example, + :c:func:`PyTime_Time` returns timestamps relative to the UNIX epoch. + + The supported range is around [-292.3 years; +292.3 years]. + Using the Unix epoch (January 1st, 1970) as reference, the supported date + range is around [1677-09-21; 2262-04-11]. + The exact limits are exposed as constants: + +.. c:var:: PyTime_t PyTime_MIN + + Minimum value of :c:type:`PyTime_t`. + +.. c:var:: PyTime_t PyTime_MAX + + Maximum value of :c:type:`PyTime_t`. + + +Clock Functions +--------------- + +The following functions take a pointer to a :c:expr:`PyTime_t` that they +set to the value of a particular clock. +Details of each clock are given in the documentation of the corresponding +Python function. + +The functions return ``0`` on success, or ``-1`` (with an exception set) +on failure. + +On integer overflow, they set the :c:data:`PyExc_OverflowError` exception and +set ``*result`` to the value clamped to the ``[PyTime_MIN; PyTime_MAX]`` +range. +(On current systems, integer overflows are likely caused by misconfigured +system time.) + +As any other C API (unless otherwise specified), the functions must be called +with the :term:`GIL` held. + +.. c:function:: int PyTime_Monotonic(PyTime_t *result) + + Read the monotonic clock. + See :func:`time.monotonic` for important details on this clock. + +.. c:function:: int PyTime_PerfCounter(PyTime_t *result) + + Read the performance counter. + See :func:`time.perf_counter` for important details on this clock. + +.. c:function:: int PyTime_Time(PyTime_t *result) + + Read the “wall clock” time. + See :func:`time.time` for details important on this clock. + + +Conversion functions +-------------------- + +.. c:function:: double PyTime_AsSecondsDouble(PyTime_t t) + + Convert a timestamp to a number of seconds as a C :c:expr:`double`. + + The function cannot fail, but note that :c:expr:`double` has limited + accuracy for large values. diff --git a/Doc/c-api/unicode.rst b/Doc/c-api/unicode.rst index 5541eaa521803b..666ffe89605c56 100644 --- a/Doc/c-api/unicode.rst +++ b/Doc/c-api/unicode.rst @@ -854,7 +854,12 @@ wchar_t Support Copy the Unicode object contents into the :c:type:`wchar_t` buffer *wstr*. At most *size* :c:type:`wchar_t` characters are copied (excluding a possibly trailing null termination character). Return the number of :c:type:`wchar_t` characters - copied or ``-1`` in case of an error. Note that the resulting :c:expr:`wchar_t*` + copied or ``-1`` in case of an error. + + When *wstr* is ``NULL``, instead return the *size* that would be required + to store all of *unicode* including a terminating null. + + Note that the resulting :c:expr:`wchar_t*` string may or may not be null-terminated. It is the responsibility of the caller to make sure that the :c:expr:`wchar_t*` string is null-terminated in case this is required by the application. Also, note that the :c:expr:`wchar_t*` string diff --git a/Doc/c-api/utilities.rst b/Doc/c-api/utilities.rst index 48ae54acebe887..9d0abf440f791d 100644 --- a/Doc/c-api/utilities.rst +++ b/Doc/c-api/utilities.rst @@ -20,4 +20,5 @@ and parsing function arguments and constructing Python values from C values. hash.rst reflection.rst codec.rst + time.rst perfmaps.rst diff --git a/Doc/conf.py b/Doc/conf.py index c2d57696aeeaa3..677d139046e5d0 100644 --- a/Doc/conf.py +++ b/Doc/conf.py @@ -64,6 +64,10 @@ import patchlevel version, release = patchlevel.get_version_info() +rst_epilog = f""" +.. |python_version_literal| replace:: ``Python {version}`` +""" + # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: today = '' @@ -135,11 +139,14 @@ ('c:type', 'wchar_t'), ('c:type', '__int64'), ('c:type', 'unsigned __int64'), + ('c:type', 'double'), # Standard C structures ('c:struct', 'in6_addr'), ('c:struct', 'in_addr'), ('c:struct', 'stat'), ('c:struct', 'statvfs'), + ('c:struct', 'timeval'), + ('c:struct', 'timespec'), # Standard C macros ('c:macro', 'LLONG_MAX'), ('c:macro', 'LLONG_MIN'), @@ -269,12 +276,12 @@ ('py:meth', 'index'), # list.index, tuple.index, etc. ] -# gh-106948: Copy standard C types declared in the "c:type" domain to the -# "c:identifier" domain, since "c:function" markup looks for types in the -# "c:identifier" domain. Use list() to not iterate on items which are being -# added +# gh-106948: Copy standard C types declared in the "c:type" domain and C +# structures declared in the "c:struct" domain to the "c:identifier" domain, +# since "c:function" markup looks for types in the "c:identifier" domain. Use +# list() to not iterate on items which are being added for role, name in list(nitpick_ignore): - if role == 'c:type': + if role in ('c:type', 'c:struct'): nitpick_ignore.append(('c:identifier', name)) del role, name diff --git a/Doc/reference/datamodel.rst b/Doc/reference/datamodel.rst index 885ee825c12296..88bc025c7c3fb4 100644 --- a/Doc/reference/datamodel.rst +++ b/Doc/reference/datamodel.rst @@ -1134,6 +1134,8 @@ Special read-only attributes * - .. attribute:: codeobject.co_qualname - The fully qualified function name + .. versionadded:: 3.11 + * - .. attribute:: codeobject.co_argcount - The total number of positional :term:`parameters ` (including positional-only parameters and parameters with default values) diff --git a/Doc/tools/static/changelog_search.js b/Doc/tools/static/changelog_search.js index c881a9bd4c84a7..0a77c0d71ae937 100644 --- a/Doc/tools/static/changelog_search.js +++ b/Doc/tools/static/changelog_search.js @@ -1,53 +1,59 @@ -$(document).ready(function() { - // add the search form and bind the events - $('h1').after([ - '

Filter entries by content:', - '', - '

' - ].join('\n')); +document.addEventListener("DOMContentLoaded", function () { + // add the search form and bind the events + document + .querySelector("h1") + .insertAdjacentHTML( + "afterend", + [ + "

Filter entries by content:", + '', + '

', + ].join("\n"), + ); - function dofilter() { - try { - var query = new RegExp($('#searchbox').val(), 'i'); + function doFilter() { + let query; + try { + query = new RegExp(document.querySelector("#searchbox").value, "i"); + } catch (e) { + return; // not a valid regex (yet) + } + // find headers for the versions (What's new in Python X.Y.Z?) + const h2s = document.querySelectorAll("#changelog h2"); + for (const h2 of h2s) { + let sections_found = 0; + // find headers for the sections (Core, Library, etc.) + const h3s = h2.parentNode.querySelectorAll("h3"); + for (const h3 of h3s) { + let entries_found = 0; + // find all the entries + const lis = h3.parentNode.querySelectorAll("li"); + for (let li of lis) { + // check if the query matches the entry + if (query.test(li.textContent)) { + li.style.display = "block"; + entries_found++; + } else { + li.style.display = "none"; + } } - catch (e) { - return; // not a valid regex (yet) + // if there are entries, show the section, otherwise hide it + if (entries_found > 0) { + h3.parentNode.style.display = "block"; + sections_found++; + } else { + h3.parentNode.style.display = "none"; } - // find headers for the versions (What's new in Python X.Y.Z?) - $('#changelog h2').each(function(index1, h2) { - var h2_parent = $(h2).parent(); - var sections_found = 0; - // find headers for the sections (Core, Library, etc.) - h2_parent.find('h3').each(function(index2, h3) { - var h3_parent = $(h3).parent(); - var entries_found = 0; - // find all the entries - h3_parent.find('li').each(function(index3, li) { - var li = $(li); - // check if the query matches the entry - if (query.test(li.text())) { - li.show(); - entries_found++; - } - else { - li.hide(); - } - }); - // if there are entries, show the section, otherwise hide it - if (entries_found > 0) { - h3_parent.show(); - sections_found++; - } - else { - h3_parent.hide(); - } - }); - if (sections_found > 0) - h2_parent.show(); - else - h2_parent.hide(); - }); + } + if (sections_found > 0) { + h2.parentNode.style.display = "block"; + } else { + h2.parentNode.style.display = "none"; + } } - $('#searchbox').keyup(dofilter); - $('#searchbox-submit').click(dofilter); + } + document.querySelector("#searchbox").addEventListener("keyup", doFilter); + document + .querySelector("#searchbox-submit") + .addEventListener("click", doFilter); }); diff --git a/Doc/using/mac.rst b/Doc/using/mac.rst index eb1413af2cbc3d..e99993238895f9 100644 --- a/Doc/using/mac.rst +++ b/Doc/using/mac.rst @@ -10,41 +10,46 @@ Using Python on a Mac Python on a Mac running macOS is in principle very similar to Python on any other Unix platform, but there are a number of additional features such as -the IDE and the Package Manager that are worth pointing out. +the integrated development environment (IDE) and the Package Manager that are +worth pointing out. + .. _getting-osx: +.. _getting-and-installing-macpython: -Getting and Installing MacPython -================================ +Getting and Installing Python +============================= macOS used to come with Python 2.7 pre-installed between versions 10.8 and `12.3 `_. -You are invited to install the most recent version of Python 3 from the Python -website (https://www.python.org). A current "universal binary" build of Python, -which runs natively on the Mac's new Intel and legacy PPC CPU's, is available -there. +You are invited to install the most recent version of Python 3 from the `Python +website `__. +A current "universal2 binary" build of Python, which runs natively on the Mac's +new Apple Silicon and legacy Intel processors, is available there. What you get after installing is a number of things: -* A :file:`Python 3.12` folder in your :file:`Applications` folder. In here +* A |python_version_literal| folder in your :file:`Applications` folder. In here you find IDLE, the development environment that is a standard part of official - Python distributions; and PythonLauncher, which handles double-clicking Python + Python distributions; and :program:`Python Launcher`, which handles double-clicking Python scripts from the Finder. * A framework :file:`/Library/Frameworks/Python.framework`, which includes the Python executable and libraries. The installer adds this location to your shell - path. To uninstall MacPython, you can simply remove these three things. A - symlink to the Python executable is placed in /usr/local/bin/. - -The Apple-provided build of Python is installed in -:file:`/System/Library/Frameworks/Python.framework` and :file:`/usr/bin/python`, -respectively. You should never modify or delete these, as they are -Apple-controlled and are used by Apple- or third-party software. Remember that -if you choose to install a newer Python version from python.org, you will have -two different but functional Python installations on your computer, so it will -be important that your paths and usages are consistent with what you want to do. - -IDLE includes a help menu that allows you to access Python documentation. If you + path. To uninstall Python, you can remove these three things. A + symlink to the Python executable is placed in :file:`/usr/local/bin/`. + +.. note:: + + On macOS 10.8-12.3, the Apple-provided build of Python is installed in + :file:`/System/Library/Frameworks/Python.framework` and :file:`/usr/bin/python`, + respectively. You should never modify or delete these, as they are + Apple-controlled and are used by Apple- or third-party software. Remember that + if you choose to install a newer Python version from python.org, you will have + two different but functional Python installations on your computer, so it will + be important that your paths and usages are consistent with what you want to do. + +IDLE includes a Help menu that allows you to access Python documentation. If you are completely new to Python you should start reading the tutorial introduction in that document. @@ -56,29 +61,29 @@ How to run a Python script -------------------------- Your best way to get started with Python on macOS is through the IDLE -integrated development environment, see section :ref:`ide` and use the Help menu +integrated development environment; see section :ref:`ide` and use the Help menu when the IDE is running. If you want to run Python scripts from the Terminal window command line or from the Finder you first need an editor to create your script. macOS comes with a -number of standard Unix command line editors, :program:`vim` and -:program:`emacs` among them. If you want a more Mac-like editor, -:program:`BBEdit` or :program:`TextWrangler` from Bare Bones Software (see -http://www.barebones.com/products/bbedit/index.html) are good choices, as is -:program:`TextMate` (see https://macromates.com/). Other editors include -:program:`Gvim` (https://macvim.org/macvim/) and :program:`Aquamacs` -(http://aquamacs.org/). +number of standard Unix command line editors, :program:`vim` +:program:`nano` among them. If you want a more Mac-like editor, +:program:`BBEdit` from Bare Bones Software (see +https://www.barebones.com/products/bbedit/index.html) are good choices, as is +:program:`TextMate` (see https://macromates.com). Other editors include +:program:`MacVim` (https://macvim.org) and :program:`Aquamacs` +(https://aquamacs.org). To run your script from the Terminal window you must make sure that :file:`/usr/local/bin` is in your shell search path. To run your script from the Finder you have two options: -* Drag it to :program:`PythonLauncher` +* Drag it to :program:`Python Launcher`. -* Select :program:`PythonLauncher` as the default application to open your - script (or any .py script) through the finder Info window and double-click it. - :program:`PythonLauncher` has various preferences to control how your script is +* Select :program:`Python Launcher` as the default application to open your + script (or any ``.py`` script) through the finder Info window and double-click it. + :program:`Python Launcher` has various preferences to control how your script is launched. Option-dragging allows you to change these for one invocation, or use its Preferences menu to change things globally. @@ -103,10 +108,11 @@ Python on macOS honors all standard Unix environment variables such as :envvar:`PYTHONPATH`, but setting these variables for programs started from the Finder is non-standard as the Finder does not read your :file:`.profile` or :file:`.cshrc` at startup. You need to create a file -:file:`~/.MacOSX/environment.plist`. See Apple's Technical Document QA1067 for -details. +:file:`~/.MacOSX/environment.plist`. See Apple's +`Technical Q&A QA1067 `__ +for details. -For more information on installation Python packages in MacPython, see section +For more information on installation Python packages, see section :ref:`mac-package-manager`. @@ -115,9 +121,9 @@ For more information on installation Python packages in MacPython, see section The IDE ======= -MacPython ships with the standard IDLE development environment. A good +Python ships with the standard IDLE development environment. A good introduction to using IDLE can be found at -http://www.hashcollision.org/hkn/python/idle_intro/index.html. +https://www.hashcollision.org/hkn/python/idle_intro/index.html. .. _mac-package-manager: @@ -130,8 +136,10 @@ This section has moved to the `Python Packaging User Guide`_. .. _Python Packaging User Guide: https://packaging.python.org/en/latest/tutorials/installing-packages/ -GUI Programming on the Mac -========================== +.. _gui-programming-on-the-mac: + +GUI Programming +=============== There are several options for building GUI applications on the Mac with Python. @@ -151,20 +159,25 @@ macOS. Packages and documentation are available from https://www.wxpython.org. macOS. More information can be found at https://riverbankcomputing.com/software/pyqt/intro. +*PySide* is another cross-platform Qt-based toolkit. More information at +https://www.qt.io/qt-for-python. + -Distributing Python Applications on the Mac -=========================================== +.. _distributing-python-applications-on-the-mac: + +Distributing Python Applications +================================ The standard tool for deploying standalone Python applications on the Mac is -:program:`py2app`. More information on installing and using py2app can be found -at https://pypi.org/project/py2app/. +:program:`py2app`. More information on installing and using :program:`py2app` +can be found at https://pypi.org/project/py2app/. Other Resources =============== -The MacPython mailing list is an excellent support resource for Python users and -developers on the Mac: +The Pythonmac-SIG mailing list is an excellent support resource for Python users +and developers on the Mac: https://www.python.org/community/sigs/current/pythonmac-sig/ diff --git a/Doc/whatsnew/3.13.rst b/Doc/whatsnew/3.13.rst index 1b803278ae0d5b..b96720df0a2f2d 100644 --- a/Doc/whatsnew/3.13.rst +++ b/Doc/whatsnew/3.13.rst @@ -587,6 +587,7 @@ Tier 2 IR by Mark Shannon and Guido van Rossum. Tier 2 optimizer by Ken Jin.) + Deprecated ========== @@ -1516,6 +1517,21 @@ New Features * Add :c:func:`Py_HashPointer` function to hash a pointer. (Contributed by Victor Stinner in :gh:`111545`.) +* Add PyTime C API: + + * :c:type:`PyTime_t` type. + * :c:var:`PyTime_MIN` and :c:var:`PyTime_MAX` constants. + * :c:func:`PyTime_AsSecondsDouble` + :c:func:`PyTime_Monotonic`, :c:func:`PyTime_PerfCounter`, and + :c:func:`PyTime_Time` functions. + + (Contributed by Victor Stinner and Petr Viktorin in :gh:`110850`.) + +* Add :c:func:`PyLong_AsNativeBytes`, :c:func:`PyLong_FromNativeBytes` and + :c:func:`PyLong_FromUnsignedNativeBytes` functions to simplify converting + between native integer types and Python :class:`int` objects. + (Contributed by Steve Dower in :gh:`111140`.) + Porting to Python 3.13 ---------------------- @@ -1575,7 +1591,6 @@ Porting to Python 3.13 platforms, the ``HAVE_STDDEF_H`` macro is only defined on Windows. (Contributed by Victor Stinner in :gh:`108765`.) - Deprecated ---------- diff --git a/Include/Python.h b/Include/Python.h index 196751c3201e62..01fc45137a17bb 100644 --- a/Include/Python.h +++ b/Include/Python.h @@ -97,6 +97,7 @@ #include "weakrefobject.h" #include "structseq.h" #include "cpython/picklebufobject.h" +#include "cpython/pytime.h" #include "codecs.h" #include "pyerrors.h" #include "pythread.h" diff --git a/Include/cpython/longobject.h b/Include/cpython/longobject.h index fd1be29ed397d1..07251db6bcc203 100644 --- a/Include/cpython/longobject.h +++ b/Include/cpython/longobject.h @@ -4,6 +4,40 @@ PyAPI_FUNC(PyObject*) PyLong_FromUnicodeObject(PyObject *u, int base); +/* PyLong_AsNativeBytes: Copy the integer value to a native variable. + buffer points to the first byte of the variable. + n_bytes is the number of bytes available in the buffer. Pass 0 to request + the required size for the value. + endianness is -1 for native endian, 0 for big endian or 1 for little. + Big endian mode will write the most significant byte into the address + directly referenced by buffer; little endian will write the least significant + byte into that address. + + If an exception is raised, returns a negative value. + Otherwise, returns the number of bytes that are required to store the value. + To check that the full value is represented, ensure that the return value is + equal or less than n_bytes. + All n_bytes are guaranteed to be written (unless an exception occurs), and + so ignoring a positive return value is the equivalent of a downcast in C. + In cases where the full value could not be represented, the returned value + may be larger than necessary - this function is not an accurate way to + calculate the bit length of an integer object. + */ +PyAPI_FUNC(Py_ssize_t) PyLong_AsNativeBytes(PyObject* v, void* buffer, + Py_ssize_t n_bytes, int endianness); + +/* PyLong_FromNativeBytes: Create an int value from a native integer + n_bytes is the number of bytes to read from the buffer. Passing 0 will + always produce the zero int. + PyLong_FromUnsignedNativeBytes always produces a non-negative int. + endianness is -1 for native endian, 0 for big endian or 1 for little. + + Returns the int object, or NULL with an exception set. */ +PyAPI_FUNC(PyObject*) PyLong_FromNativeBytes(const void* buffer, size_t n_bytes, + int endianness); +PyAPI_FUNC(PyObject*) PyLong_FromUnsignedNativeBytes(const void* buffer, + size_t n_bytes, int endianness); + PyAPI_FUNC(int) PyUnstable_Long_IsCompact(const PyLongObject* op); PyAPI_FUNC(Py_ssize_t) PyUnstable_Long_CompactValue(const PyLongObject* op); @@ -50,7 +84,7 @@ PyAPI_FUNC(PyObject *) _PyLong_FromByteArray( */ PyAPI_FUNC(int) _PyLong_AsByteArray(PyLongObject* v, unsigned char* bytes, size_t n, - int little_endian, int is_signed); + int little_endian, int is_signed, int with_exceptions); /* For use by the gcd function in mathmodule.c */ PyAPI_FUNC(PyObject *) _PyLong_GCD(PyObject *, PyObject *); diff --git a/Include/cpython/optimizer.h b/Include/cpython/optimizer.h index 3928eca583ba5b..f710ca76b2ba24 100644 --- a/Include/cpython/optimizer.h +++ b/Include/cpython/optimizer.h @@ -71,6 +71,8 @@ typedef struct { PyAPI_FUNC(int) PyUnstable_Replace_Executor(PyCodeObject *code, _Py_CODEUNIT *instr, _PyExecutorObject *executor); +_PyOptimizerObject *_Py_SetOptimizer(PyInterpreterState *interp, _PyOptimizerObject* optimizer); + PyAPI_FUNC(void) PyUnstable_SetOptimizer(_PyOptimizerObject* optimizer); PyAPI_FUNC(_PyOptimizerObject *) PyUnstable_GetOptimizer(void); @@ -80,8 +82,6 @@ PyAPI_FUNC(_PyExecutorObject *) PyUnstable_GetExecutor(PyCodeObject *code, int o int _PyOptimizer_Optimize(struct _PyInterpreterFrame *frame, _Py_CODEUNIT *start, PyObject **stack_pointer); -extern _PyOptimizerObject _PyOptimizer_Default; - void _Py_ExecutorInit(_PyExecutorObject *, _PyBloomFilter *); void _Py_ExecutorClear(_PyExecutorObject *); void _Py_BloomFilter_Init(_PyBloomFilter *); @@ -96,7 +96,11 @@ PyAPI_FUNC(PyObject *)PyUnstable_Optimizer_NewUOpOptimizer(void); #define OPTIMIZER_BITS_IN_COUNTER 4 /* Minimum of 16 additional executions before retry */ -#define MINIMUM_TIER2_BACKOFF 4 +#define MIN_TIER2_BACKOFF 4 +#define MAX_TIER2_BACKOFF (15 - OPTIMIZER_BITS_IN_COUNTER) +#define OPTIMIZER_BITS_MASK ((1 << OPTIMIZER_BITS_IN_COUNTER) - 1) +/* A value <= UINT16_MAX but large enough that when shifted is > UINT16_MAX */ +#define OPTIMIZER_UNREACHABLE_THRESHOLD UINT16_MAX #define _Py_MAX_ALLOWED_BUILTINS_MODIFICATIONS 3 #define _Py_MAX_ALLOWED_GLOBALS_MODIFICATIONS 6 diff --git a/Include/cpython/pystats.h b/Include/cpython/pystats.h index 2354f0cce3396e..de7853914acb98 100644 --- a/Include/cpython/pystats.h +++ b/Include/cpython/pystats.h @@ -121,6 +121,9 @@ typedef struct _optimization_stats { uint64_t trace_length_hist[_Py_UOP_HIST_SIZE]; uint64_t trace_run_length_hist[_Py_UOP_HIST_SIZE]; uint64_t optimized_trace_length_hist[_Py_UOP_HIST_SIZE]; + uint64_t optimizer_attempts; + uint64_t optimizer_successes; + uint64_t optimizer_failure_reason_no_memory; } OptimizationStats; typedef struct _rare_event_stats { diff --git a/Include/cpython/pytime.h b/Include/cpython/pytime.h new file mode 100644 index 00000000000000..d8244700d614ce --- /dev/null +++ b/Include/cpython/pytime.h @@ -0,0 +1,23 @@ +// PyTime_t C API: see Doc/c-api/time.rst for the documentation. + +#ifndef Py_LIMITED_API +#ifndef Py_PYTIME_H +#define Py_PYTIME_H +#ifdef __cplusplus +extern "C" { +#endif + +typedef int64_t PyTime_t; +#define PyTime_MIN INT64_MIN +#define PyTime_MAX INT64_MAX + +PyAPI_FUNC(double) PyTime_AsSecondsDouble(PyTime_t t); +PyAPI_FUNC(int) PyTime_Monotonic(PyTime_t *result); +PyAPI_FUNC(int) PyTime_PerfCounter(PyTime_t *result); +PyAPI_FUNC(int) PyTime_Time(PyTime_t *result); + +#ifdef __cplusplus +} +#endif +#endif /* Py_PYTIME_H */ +#endif /* Py_LIMITED_API */ diff --git a/Include/internal/pycore_global_objects_fini_generated.h b/Include/internal/pycore_global_objects_fini_generated.h index 932738c3049882..11755210d65432 100644 --- a/Include/internal/pycore_global_objects_fini_generated.h +++ b/Include/internal/pycore_global_objects_fini_generated.h @@ -883,6 +883,7 @@ _PyStaticObjects_CheckRefcnt(PyInterpreterState *interp) { _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(defaultaction)); _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(delete)); _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(depth)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(desired_access)); _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(detect_types)); _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(deterministic)); _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(device)); @@ -973,6 +974,7 @@ _PyStaticObjects_CheckRefcnt(PyInterpreterState *interp) { _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(groups)); _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(h)); _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(handle)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(handle_seq)); _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(hash_name)); _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(header)); _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(headers)); @@ -990,9 +992,12 @@ _PyStaticObjects_CheckRefcnt(PyInterpreterState *interp) { _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(indexgroup)); _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(inf)); _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(infer_variance)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(inherit_handle)); _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(inheritable)); _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(initial)); _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(initial_bytes)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(initial_owner)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(initial_state)); _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(initial_value)); _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(initval)); _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(inner_size)); @@ -1048,6 +1053,7 @@ _PyStaticObjects_CheckRefcnt(PyInterpreterState *interp) { _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(locals)); _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(logoption)); _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(loop)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(manual_reset)); _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(mapping)); _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(match)); _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(max_length)); @@ -1064,6 +1070,7 @@ _PyStaticObjects_CheckRefcnt(PyInterpreterState *interp) { _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(metadata)); _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(method)); _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(microsecond)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(milliseconds)); _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(minute)); _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(mod)); _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(mode)); @@ -1073,6 +1080,7 @@ _PyStaticObjects_CheckRefcnt(PyInterpreterState *interp) { _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(month)); _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(mro)); _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(msg)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(mutex)); _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(mycmp)); _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(n)); _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(n_arg)); @@ -1176,6 +1184,7 @@ _PyStaticObjects_CheckRefcnt(PyInterpreterState *interp) { _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(sched_priority)); _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(scheduler)); _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(second)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(security_attributes)); _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(seek)); _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(seekable)); _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(selectors)); @@ -1263,6 +1272,7 @@ _PyStaticObjects_CheckRefcnt(PyInterpreterState *interp) { _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(values)); _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(version)); _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(volume)); + _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(wait_all)); _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(warnings)); _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(warnoptions)); _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(wbits)); diff --git a/Include/internal/pycore_global_strings.h b/Include/internal/pycore_global_strings.h index da62b4f0a951ff..576ac703ca1508 100644 --- a/Include/internal/pycore_global_strings.h +++ b/Include/internal/pycore_global_strings.h @@ -372,6 +372,7 @@ struct _Py_global_strings { STRUCT_FOR_ID(defaultaction) STRUCT_FOR_ID(delete) STRUCT_FOR_ID(depth) + STRUCT_FOR_ID(desired_access) STRUCT_FOR_ID(detect_types) STRUCT_FOR_ID(deterministic) STRUCT_FOR_ID(device) @@ -462,6 +463,7 @@ struct _Py_global_strings { STRUCT_FOR_ID(groups) STRUCT_FOR_ID(h) STRUCT_FOR_ID(handle) + STRUCT_FOR_ID(handle_seq) STRUCT_FOR_ID(hash_name) STRUCT_FOR_ID(header) STRUCT_FOR_ID(headers) @@ -479,9 +481,12 @@ struct _Py_global_strings { STRUCT_FOR_ID(indexgroup) STRUCT_FOR_ID(inf) STRUCT_FOR_ID(infer_variance) + STRUCT_FOR_ID(inherit_handle) STRUCT_FOR_ID(inheritable) STRUCT_FOR_ID(initial) STRUCT_FOR_ID(initial_bytes) + STRUCT_FOR_ID(initial_owner) + STRUCT_FOR_ID(initial_state) STRUCT_FOR_ID(initial_value) STRUCT_FOR_ID(initval) STRUCT_FOR_ID(inner_size) @@ -537,6 +542,7 @@ struct _Py_global_strings { STRUCT_FOR_ID(locals) STRUCT_FOR_ID(logoption) STRUCT_FOR_ID(loop) + STRUCT_FOR_ID(manual_reset) STRUCT_FOR_ID(mapping) STRUCT_FOR_ID(match) STRUCT_FOR_ID(max_length) @@ -553,6 +559,7 @@ struct _Py_global_strings { STRUCT_FOR_ID(metadata) STRUCT_FOR_ID(method) STRUCT_FOR_ID(microsecond) + STRUCT_FOR_ID(milliseconds) STRUCT_FOR_ID(minute) STRUCT_FOR_ID(mod) STRUCT_FOR_ID(mode) @@ -562,6 +569,7 @@ struct _Py_global_strings { STRUCT_FOR_ID(month) STRUCT_FOR_ID(mro) STRUCT_FOR_ID(msg) + STRUCT_FOR_ID(mutex) STRUCT_FOR_ID(mycmp) STRUCT_FOR_ID(n) STRUCT_FOR_ID(n_arg) @@ -665,6 +673,7 @@ struct _Py_global_strings { STRUCT_FOR_ID(sched_priority) STRUCT_FOR_ID(scheduler) STRUCT_FOR_ID(second) + STRUCT_FOR_ID(security_attributes) STRUCT_FOR_ID(seek) STRUCT_FOR_ID(seekable) STRUCT_FOR_ID(selectors) @@ -752,6 +761,7 @@ struct _Py_global_strings { STRUCT_FOR_ID(values) STRUCT_FOR_ID(version) STRUCT_FOR_ID(volume) + STRUCT_FOR_ID(wait_all) STRUCT_FOR_ID(warnings) STRUCT_FOR_ID(warnoptions) STRUCT_FOR_ID(wbits) diff --git a/Include/internal/pycore_interp.h b/Include/internal/pycore_interp.h index 31d88071e19d0c..c244d8966f238b 100644 --- a/Include/internal/pycore_interp.h +++ b/Include/internal/pycore_interp.h @@ -112,7 +112,7 @@ struct _is { /* The thread currently executing in the __main__ module, if any. */ PyThreadState *main; /* Used in Modules/_threadmodule.c. */ - long count; + Py_ssize_t count; /* Support for runtime thread stack size tuning. A value of 0 means using the platform's default stack size or the size specified by the THREAD_STACK_SIZE macro. */ @@ -239,8 +239,10 @@ struct _is { struct callable_cache callable_cache; _PyOptimizerObject *optimizer; _PyExecutorObject *executor_list_head; - uint16_t optimizer_resume_threshold; - uint16_t optimizer_backedge_threshold; + /* These values are shifted and offset to speed up check in JUMP_BACKWARD */ + uint32_t optimizer_resume_threshold; + uint32_t optimizer_backedge_threshold; + uint32_t next_func_version; _rare_events rare_events; PyDict_WatchCallback builtins_dict_watcher; diff --git a/Include/internal/pycore_opcode_metadata.h b/Include/internal/pycore_opcode_metadata.h index 75d7f44025328e..6b60a6fbffdc5e 100644 --- a/Include/internal/pycore_opcode_metadata.h +++ b/Include/internal/pycore_opcode_metadata.h @@ -1094,7 +1094,7 @@ const struct opcode_metadata _PyOpcode_opcode_metadata[268] = { [MATCH_KEYS] = { true, INSTR_FMT_IX, HAS_ERROR_FLAG | HAS_ESCAPES_FLAG }, [MATCH_MAPPING] = { true, INSTR_FMT_IX, 0 }, [MATCH_SEQUENCE] = { true, INSTR_FMT_IX, 0 }, - [NOP] = { true, INSTR_FMT_IX, 0 }, + [NOP] = { true, INSTR_FMT_IX, HAS_PURE_FLAG }, [POP_EXCEPT] = { true, INSTR_FMT_IX, HAS_ESCAPES_FLAG }, [POP_JUMP_IF_FALSE] = { true, INSTR_FMT_IBC, HAS_ARG_FLAG | HAS_JUMP_FLAG }, [POP_JUMP_IF_NONE] = { true, INSTR_FMT_IBC, HAS_ARG_FLAG | HAS_JUMP_FLAG }, @@ -1156,10 +1156,10 @@ const struct opcode_metadata _PyOpcode_opcode_metadata[268] = { [LOAD_SUPER_METHOD] = { true, -1, HAS_ARG_FLAG | HAS_NAME_FLAG | HAS_ERROR_FLAG | HAS_ESCAPES_FLAG }, [LOAD_ZERO_SUPER_ATTR] = { true, -1, HAS_ARG_FLAG | HAS_NAME_FLAG | HAS_ERROR_FLAG | HAS_ESCAPES_FLAG }, [LOAD_ZERO_SUPER_METHOD] = { true, -1, HAS_ARG_FLAG | HAS_NAME_FLAG | HAS_ERROR_FLAG | HAS_ESCAPES_FLAG }, - [POP_BLOCK] = { true, -1, 0 }, - [SETUP_CLEANUP] = { true, -1, HAS_ARG_FLAG }, - [SETUP_FINALLY] = { true, -1, HAS_ARG_FLAG }, - [SETUP_WITH] = { true, -1, HAS_ARG_FLAG }, + [POP_BLOCK] = { true, -1, HAS_PURE_FLAG }, + [SETUP_CLEANUP] = { true, -1, HAS_PURE_FLAG | HAS_ARG_FLAG }, + [SETUP_FINALLY] = { true, -1, HAS_PURE_FLAG | HAS_ARG_FLAG }, + [SETUP_WITH] = { true, -1, HAS_PURE_FLAG | HAS_ARG_FLAG }, [STORE_FAST_MAYBE_NULL] = { true, -1, HAS_ARG_FLAG | HAS_LOCAL_FLAG }, }; #endif diff --git a/Include/internal/pycore_optimizer.h b/Include/internal/pycore_optimizer.h index e21412fc815540..eee71c700d4904 100644 --- a/Include/internal/pycore_optimizer.h +++ b/Include/internal/pycore_optimizer.h @@ -8,6 +8,13 @@ extern "C" { # error "this header requires Py_BUILD_CORE define" #endif +#include "pycore_uop_ids.h" + +// This is the length of the trace we project initially. +#define UOP_MAX_TRACE_LENGTH 512 + +#define TRACE_STACK_SIZE 5 + int _Py_uop_analyze_and_optimize(_PyInterpreterFrame *frame, _PyUOpInstruction *trace, int trace_len, int curr_stackentries, _PyBloomFilter *dependencies); diff --git a/Include/internal/pycore_runtime_init_generated.h b/Include/internal/pycore_runtime_init_generated.h index 68fbbcb4378e17..e682c97e7c0248 100644 --- a/Include/internal/pycore_runtime_init_generated.h +++ b/Include/internal/pycore_runtime_init_generated.h @@ -881,6 +881,7 @@ extern "C" { INIT_ID(defaultaction), \ INIT_ID(delete), \ INIT_ID(depth), \ + INIT_ID(desired_access), \ INIT_ID(detect_types), \ INIT_ID(deterministic), \ INIT_ID(device), \ @@ -971,6 +972,7 @@ extern "C" { INIT_ID(groups), \ INIT_ID(h), \ INIT_ID(handle), \ + INIT_ID(handle_seq), \ INIT_ID(hash_name), \ INIT_ID(header), \ INIT_ID(headers), \ @@ -988,9 +990,12 @@ extern "C" { INIT_ID(indexgroup), \ INIT_ID(inf), \ INIT_ID(infer_variance), \ + INIT_ID(inherit_handle), \ INIT_ID(inheritable), \ INIT_ID(initial), \ INIT_ID(initial_bytes), \ + INIT_ID(initial_owner), \ + INIT_ID(initial_state), \ INIT_ID(initial_value), \ INIT_ID(initval), \ INIT_ID(inner_size), \ @@ -1046,6 +1051,7 @@ extern "C" { INIT_ID(locals), \ INIT_ID(logoption), \ INIT_ID(loop), \ + INIT_ID(manual_reset), \ INIT_ID(mapping), \ INIT_ID(match), \ INIT_ID(max_length), \ @@ -1062,6 +1068,7 @@ extern "C" { INIT_ID(metadata), \ INIT_ID(method), \ INIT_ID(microsecond), \ + INIT_ID(milliseconds), \ INIT_ID(minute), \ INIT_ID(mod), \ INIT_ID(mode), \ @@ -1071,6 +1078,7 @@ extern "C" { INIT_ID(month), \ INIT_ID(mro), \ INIT_ID(msg), \ + INIT_ID(mutex), \ INIT_ID(mycmp), \ INIT_ID(n), \ INIT_ID(n_arg), \ @@ -1174,6 +1182,7 @@ extern "C" { INIT_ID(sched_priority), \ INIT_ID(scheduler), \ INIT_ID(second), \ + INIT_ID(security_attributes), \ INIT_ID(seek), \ INIT_ID(seekable), \ INIT_ID(selectors), \ @@ -1261,6 +1270,7 @@ extern "C" { INIT_ID(values), \ INIT_ID(version), \ INIT_ID(volume), \ + INIT_ID(wait_all), \ INIT_ID(warnings), \ INIT_ID(warnoptions), \ INIT_ID(wbits), \ diff --git a/Include/internal/pycore_time.h b/Include/internal/pycore_time.h index dabbd7b41556cd..1aad6ccea69ae3 100644 --- a/Include/internal/pycore_time.h +++ b/Include/internal/pycore_time.h @@ -1,34 +1,39 @@ -// The _PyTime_t API is written to use timestamp and timeout values stored in -// various formats and to read clocks. +// Internal PyTime_t C API: see Doc/c-api/time.rst for the documentation. // -// The _PyTime_t type is an integer to support directly common arithmetic -// operations like t1 + t2. +// The PyTime_t type is an integer to support directly common arithmetic +// operations such as t1 + t2. // -// The _PyTime_t API supports a resolution of 1 nanosecond. The _PyTime_t type -// is signed to support negative timestamps. The supported range is around -// [-292.3 years; +292.3 years]. Using the Unix epoch (January 1st, 1970), the -// supported date range is around [1677-09-21; 2262-04-11]. +// Time formats: // -// Formats: +// * Seconds. +// * Seconds as a floating point number (C double). +// * Milliseconds (10^-3 seconds). +// * Microseconds (10^-6 seconds). +// * 100 nanoseconds (10^-7 seconds), used on Windows. +// * Nanoseconds (10^-9 seconds). +// * timeval structure, 1 microsecond (10^-6 seconds). +// * timespec structure, 1 nanosecond (10^-9 seconds). // -// * seconds -// * seconds as a floating pointer number (C double) -// * milliseconds (10^-3 seconds) -// * microseconds (10^-6 seconds) -// * 100 nanoseconds (10^-7 seconds) -// * nanoseconds (10^-9 seconds) -// * timeval structure, 1 microsecond resolution (10^-6 seconds) -// * timespec structure, 1 nanosecond resolution (10^-9 seconds) +// Note that PyTime_t is now specified as int64_t, in nanoseconds. +// (If we need to change this, we'll need new public API with new names.) +// Previously, PyTime_t was configurable (in theory); some comments and code +// might still allude to that. // // Integer overflows are detected and raise OverflowError. Conversion to a -// resolution worse than 1 nanosecond is rounded correctly with the requested -// rounding mode. There are 4 rounding modes: floor (towards -inf), ceiling -// (towards +inf), half even and up (away from zero). +// resolution larger than 1 nanosecond is rounded correctly with the requested +// rounding mode. Available rounding modes: // -// Some functions clamp the result in the range [_PyTime_MIN; _PyTime_MAX], so -// the caller doesn't have to handle errors and doesn't need to hold the GIL. -// For example, _PyTime_Add(t1, t2) computes t1+t2 and clamp the result on -// overflow. +// * Round towards minus infinity (-inf). For example, used to read a clock. +// * Round towards infinity (+inf). For example, used for timeout to wait "at +// least" N seconds. +// * Round to nearest with ties going to nearest even integer. For example, used +// to round from a Python float. +// * Round away from zero. For example, used for timeout. +// +// Some functions clamp the result in the range [PyTime_MIN; PyTime_MAX]. The +// caller doesn't have to handle errors and so doesn't need to hold the GIL to +// handle exceptions. For example, _PyTime_Add(t1, t2) computes t1+t2 and +// clamps the result on overflow. // // Clocks: // @@ -36,10 +41,11 @@ // * Monotonic clock // * Performance counter // -// Operations like (t * k / q) with integers are implemented in a way to reduce -// the risk of integer overflow. Such operation is used to convert a clock -// value expressed in ticks with a frequency to _PyTime_t, like -// QueryPerformanceCounter() with QueryPerformanceFrequency(). +// Internally, operations like (t * k / q) with integers are implemented in a +// way to reduce the risk of integer overflow. Such operation is used to convert a +// clock value expressed in ticks with a frequency to PyTime_t, like +// QueryPerformanceCounter() with QueryPerformanceFrequency() on Windows. + #ifndef Py_INTERNAL_TIME_H #define Py_INTERNAL_TIME_H @@ -56,14 +62,7 @@ extern "C" { struct timeval; #endif -// _PyTime_t: Python timestamp with subsecond precision. It can be used to -// store a duration, and so indirectly a date (related to another date, like -// UNIX epoch). -typedef int64_t _PyTime_t; -// _PyTime_MIN nanoseconds is around -292.3 years -#define _PyTime_MIN INT64_MIN -// _PyTime_MAX nanoseconds is around +292.3 years -#define _PyTime_MAX INT64_MAX +typedef PyTime_t _PyTime_t; #define _SIZEOF_PYTIME_T 8 typedef enum { @@ -147,7 +146,7 @@ PyAPI_FUNC(_PyTime_t) _PyTime_FromSecondsDouble(double seconds, _PyTime_round_t PyAPI_FUNC(_PyTime_t) _PyTime_FromNanoseconds(_PyTime_t ns); // Create a timestamp from a number of microseconds. -// Clamp to [_PyTime_MIN; _PyTime_MAX] on overflow. +// Clamp to [PyTime_MIN; PyTime_MAX] on overflow. extern _PyTime_t _PyTime_FromMicrosecondsClamp(_PyTime_t us); // Create a timestamp from nanoseconds (Python int). @@ -169,10 +168,6 @@ PyAPI_FUNC(int) _PyTime_FromMillisecondsObject(_PyTime_t *t, PyObject *obj, _PyTime_round_t round); -// Convert a timestamp to a number of seconds as a C double. -// Export for '_socket' shared extension. -PyAPI_FUNC(double) _PyTime_AsSecondsDouble(_PyTime_t t); - // Convert timestamp to a number of milliseconds (10^-3 seconds). // Export for '_ssl' shared extension. PyAPI_FUNC(_PyTime_t) _PyTime_AsMilliseconds(_PyTime_t t, @@ -183,9 +178,6 @@ PyAPI_FUNC(_PyTime_t) _PyTime_AsMilliseconds(_PyTime_t t, PyAPI_FUNC(_PyTime_t) _PyTime_AsMicroseconds(_PyTime_t t, _PyTime_round_t round); -// Convert timestamp to a number of nanoseconds (10^-9 seconds). -extern _PyTime_t _PyTime_AsNanoseconds(_PyTime_t t); - #ifdef MS_WINDOWS // Convert timestamp to a number of 100 nanoseconds (10^-7 seconds). extern _PyTime_t _PyTime_As100Nanoseconds(_PyTime_t t, @@ -250,7 +242,7 @@ PyAPI_FUNC(void) _PyTime_AsTimespec_clamp(_PyTime_t t, struct timespec *ts); #endif -// Compute t1 + t2. Clamp to [_PyTime_MIN; _PyTime_MAX] on overflow. +// Compute t1 + t2. Clamp to [PyTime_MIN; PyTime_MAX] on overflow. extern _PyTime_t _PyTime_Add(_PyTime_t t1, _PyTime_t t2); // Structure used by time.get_clock_info() @@ -267,7 +259,8 @@ typedef struct { // On integer overflow, silently ignore the overflow and clamp the clock to // [_PyTime_MIN; _PyTime_MAX]. // -// Use _PyTime_GetSystemClockWithInfo() to check for failure. +// Use _PyTime_GetSystemClockWithInfo or the public PyTime_Time() to check +// for failure. // Export for '_random' shared extension. PyAPI_FUNC(_PyTime_t) _PyTime_GetSystemClock(void); @@ -287,7 +280,8 @@ extern int _PyTime_GetSystemClockWithInfo( // On integer overflow, silently ignore the overflow and clamp the clock to // [_PyTime_MIN; _PyTime_MAX]. // -// Use _PyTime_GetMonotonicClockWithInfo() to check for failure. +// Use _PyTime_GetMonotonicClockWithInfo or the public PyTime_Monotonic() +// to check for failure. // Export for '_random' shared extension. PyAPI_FUNC(_PyTime_t) _PyTime_GetMonotonicClock(void); @@ -322,10 +316,12 @@ PyAPI_FUNC(int) _PyTime_gmtime(time_t t, struct tm *tm); // On integer overflow, silently ignore the overflow and clamp the clock to // [_PyTime_MIN; _PyTime_MAX]. // -// Use _PyTime_GetPerfCounterWithInfo() to check for failure. +// Use _PyTime_GetPerfCounterWithInfo() or the public PyTime_PerfCounter +// to check for failure. // Export for '_lsprof' shared extension. PyAPI_FUNC(_PyTime_t) _PyTime_GetPerfCounter(void); + // Get the performance counter: clock with the highest available resolution to // measure a short duration. // @@ -336,6 +332,13 @@ extern int _PyTime_GetPerfCounterWithInfo( _PyTime_t *t, _Py_clock_info_t *info); +// Alias for backward compatibility +#define _PyTime_MIN PyTime_MIN +#define _PyTime_MAX PyTime_MAX +#define _PyTime_AsSecondsDouble PyTime_AsSecondsDouble + + +// --- _PyDeadline ----------------------------------------------------------- // Create a deadline. // Pseudo code: _PyTime_GetMonotonicClock() + timeout. diff --git a/Include/internal/pycore_unicodeobject_generated.h b/Include/internal/pycore_unicodeobject_generated.h index c8458b4e36ccc9..739af0e73c23ff 100644 --- a/Include/internal/pycore_unicodeobject_generated.h +++ b/Include/internal/pycore_unicodeobject_generated.h @@ -957,6 +957,9 @@ _PyUnicode_InitStaticStrings(PyInterpreterState *interp) { string = &_Py_ID(depth); assert(_PyUnicode_CheckConsistency(string, 1)); _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(desired_access); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); string = &_Py_ID(detect_types); assert(_PyUnicode_CheckConsistency(string, 1)); _PyUnicode_InternInPlace(interp, &string); @@ -1227,6 +1230,9 @@ _PyUnicode_InitStaticStrings(PyInterpreterState *interp) { string = &_Py_ID(handle); assert(_PyUnicode_CheckConsistency(string, 1)); _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(handle_seq); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); string = &_Py_ID(hash_name); assert(_PyUnicode_CheckConsistency(string, 1)); _PyUnicode_InternInPlace(interp, &string); @@ -1278,6 +1284,9 @@ _PyUnicode_InitStaticStrings(PyInterpreterState *interp) { string = &_Py_ID(infer_variance); assert(_PyUnicode_CheckConsistency(string, 1)); _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(inherit_handle); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); string = &_Py_ID(inheritable); assert(_PyUnicode_CheckConsistency(string, 1)); _PyUnicode_InternInPlace(interp, &string); @@ -1287,6 +1296,12 @@ _PyUnicode_InitStaticStrings(PyInterpreterState *interp) { string = &_Py_ID(initial_bytes); assert(_PyUnicode_CheckConsistency(string, 1)); _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(initial_owner); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(initial_state); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); string = &_Py_ID(initial_value); assert(_PyUnicode_CheckConsistency(string, 1)); _PyUnicode_InternInPlace(interp, &string); @@ -1452,6 +1467,9 @@ _PyUnicode_InitStaticStrings(PyInterpreterState *interp) { string = &_Py_ID(loop); assert(_PyUnicode_CheckConsistency(string, 1)); _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(manual_reset); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); string = &_Py_ID(mapping); assert(_PyUnicode_CheckConsistency(string, 1)); _PyUnicode_InternInPlace(interp, &string); @@ -1500,6 +1518,9 @@ _PyUnicode_InitStaticStrings(PyInterpreterState *interp) { string = &_Py_ID(microsecond); assert(_PyUnicode_CheckConsistency(string, 1)); _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(milliseconds); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); string = &_Py_ID(minute); assert(_PyUnicode_CheckConsistency(string, 1)); _PyUnicode_InternInPlace(interp, &string); @@ -1527,6 +1548,9 @@ _PyUnicode_InitStaticStrings(PyInterpreterState *interp) { string = &_Py_ID(msg); assert(_PyUnicode_CheckConsistency(string, 1)); _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(mutex); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); string = &_Py_ID(mycmp); assert(_PyUnicode_CheckConsistency(string, 1)); _PyUnicode_InternInPlace(interp, &string); @@ -1836,6 +1860,9 @@ _PyUnicode_InitStaticStrings(PyInterpreterState *interp) { string = &_Py_ID(second); assert(_PyUnicode_CheckConsistency(string, 1)); _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(security_attributes); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); string = &_Py_ID(seek); assert(_PyUnicode_CheckConsistency(string, 1)); _PyUnicode_InternInPlace(interp, &string); @@ -2097,6 +2124,9 @@ _PyUnicode_InitStaticStrings(PyInterpreterState *interp) { string = &_Py_ID(volume); assert(_PyUnicode_CheckConsistency(string, 1)); _PyUnicode_InternInPlace(interp, &string); + string = &_Py_ID(wait_all); + assert(_PyUnicode_CheckConsistency(string, 1)); + _PyUnicode_InternInPlace(interp, &string); string = &_Py_ID(warnings); assert(_PyUnicode_CheckConsistency(string, 1)); _PyUnicode_InternInPlace(interp, &string); diff --git a/Include/internal/pycore_uop_ids.h b/Include/internal/pycore_uop_ids.h index b2476e1c6e5c4b..9bb537d355055d 100644 --- a/Include/internal/pycore_uop_ids.h +++ b/Include/internal/pycore_uop_ids.h @@ -237,7 +237,8 @@ extern "C" { #define _CHECK_GLOBALS 384 #define _CHECK_BUILTINS 385 #define _INTERNAL_INCREMENT_OPT_COUNTER 386 -#define MAX_UOP_ID 386 +#define _CHECK_VALIDITY_AND_SET_IP 387 +#define MAX_UOP_ID 387 #ifdef __cplusplus } diff --git a/Include/internal/pycore_uop_metadata.h b/Include/internal/pycore_uop_metadata.h index 2b5b37e6b8d6a4..163a0320aa2298 100644 --- a/Include/internal/pycore_uop_metadata.h +++ b/Include/internal/pycore_uop_metadata.h @@ -16,7 +16,7 @@ extern const char * const _PyOpcode_uop_name[MAX_UOP_ID+1]; #ifdef NEED_OPCODE_METADATA const uint16_t _PyUop_Flags[MAX_UOP_ID+1] = { - [_NOP] = 0, + [_NOP] = HAS_PURE_FLAG, [_RESUME_CHECK] = HAS_DEOPT_FLAG, [_LOAD_FAST_CHECK] = HAS_ARG_FLAG | HAS_LOCAL_FLAG | HAS_ERROR_FLAG, [_LOAD_FAST] = HAS_ARG_FLAG | HAS_LOCAL_FLAG | HAS_PURE_FLAG, @@ -198,17 +198,18 @@ const uint16_t _PyUop_Flags[MAX_UOP_ID+1] = { [_GUARD_IS_NONE_POP] = HAS_DEOPT_FLAG, [_GUARD_IS_NOT_NONE_POP] = HAS_DEOPT_FLAG, [_JUMP_TO_TOP] = HAS_EVAL_BREAK_FLAG, - [_SET_IP] = HAS_ARG_FLAG | HAS_ESCAPES_FLAG, + [_SET_IP] = 0, [_SAVE_RETURN_OFFSET] = HAS_ARG_FLAG, [_EXIT_TRACE] = HAS_DEOPT_FLAG, [_CHECK_VALIDITY] = HAS_DEOPT_FLAG, - [_LOAD_CONST_INLINE] = 0, - [_LOAD_CONST_INLINE_BORROW] = 0, - [_LOAD_CONST_INLINE_WITH_NULL] = 0, - [_LOAD_CONST_INLINE_BORROW_WITH_NULL] = 0, + [_LOAD_CONST_INLINE] = HAS_PURE_FLAG, + [_LOAD_CONST_INLINE_BORROW] = HAS_PURE_FLAG, + [_LOAD_CONST_INLINE_WITH_NULL] = HAS_PURE_FLAG, + [_LOAD_CONST_INLINE_BORROW_WITH_NULL] = HAS_PURE_FLAG, [_CHECK_GLOBALS] = HAS_DEOPT_FLAG, [_CHECK_BUILTINS] = HAS_DEOPT_FLAG, [_INTERNAL_INCREMENT_OPT_COUNTER] = 0, + [_CHECK_VALIDITY_AND_SET_IP] = HAS_DEOPT_FLAG, }; const char *const _PyOpcode_uop_name[MAX_UOP_ID+1] = { @@ -264,6 +265,7 @@ const char *const _PyOpcode_uop_name[MAX_UOP_ID+1] = { [_CHECK_PEP_523] = "_CHECK_PEP_523", [_CHECK_STACK_SPACE] = "_CHECK_STACK_SPACE", [_CHECK_VALIDITY] = "_CHECK_VALIDITY", + [_CHECK_VALIDITY_AND_SET_IP] = "_CHECK_VALIDITY_AND_SET_IP", [_COMPARE_OP] = "_COMPARE_OP", [_COMPARE_OP_FLOAT] = "_COMPARE_OP_FLOAT", [_COMPARE_OP_INT] = "_COMPARE_OP_INT", diff --git a/Lib/logging/config.py b/Lib/logging/config.py index de06090942d965..ea37dd7544564a 100644 --- a/Lib/logging/config.py +++ b/Lib/logging/config.py @@ -667,10 +667,9 @@ def configure_formatter(self, config): except TypeError as te: if "'format'" not in str(te): raise - #Name of parameter changed from fmt to format. - #Retry with old name. - #This is so that code can be used with older Python versions - #(e.g. by Django) + # logging.Formatter and its subclasses expect the `fmt` + # parameter instead of `format`. Retry passing configuration + # with `fmt`. config['fmt'] = config.pop('format') config['()'] = factory result = self.configure_custom(config) diff --git a/Lib/multiprocessing/connection.py b/Lib/multiprocessing/connection.py index c6a66a1bc963c3..58d697fdecacc0 100644 --- a/Lib/multiprocessing/connection.py +++ b/Lib/multiprocessing/connection.py @@ -1011,8 +1011,20 @@ def _exhaustive_wait(handles, timeout): # returning the first signalled might create starvation issues.) L = list(handles) ready = [] + # Windows limits WaitForMultipleObjects at 64 handles, and we use a + # few for synchronisation, so we switch to batched waits at 60. + if len(L) > 60: + try: + res = _winapi.BatchedWaitForMultipleObjects(L, False, timeout) + except TimeoutError: + return [] + ready.extend(L[i] for i in res) + if res: + L = [h for i, h in enumerate(L) if i > res[0] & i not in res] + timeout = 0 while L: - res = _winapi.WaitForMultipleObjects(L, False, timeout) + short_L = L[:60] if len(L) > 60 else L + res = _winapi.WaitForMultipleObjects(short_L, False, timeout) if res == WAIT_TIMEOUT: break elif WAIT_OBJECT_0 <= res < WAIT_OBJECT_0 + len(L): diff --git a/Lib/pydoc.py b/Lib/pydoc.py index 17f7346e5cc619..6d145abda9d4ab 100755 --- a/Lib/pydoc.py +++ b/Lib/pydoc.py @@ -552,7 +552,7 @@ def getdocloc(self, object, basedir=sysconfig.get_path('stdlib')): '_thread', 'zipimport') or (file.startswith(basedir) and not file.startswith(os.path.join(basedir, 'site-packages')))) and - object.__name__ not in ('xml.etree', 'test.pydoc_mod')): + object.__name__ not in ('xml.etree', 'test.test_pydoc.pydoc_mod')): if docloc.startswith(("http://", "https://")): docloc = "{}/{}.html".format(docloc.rstrip("/"), object.__name__.lower()) else: diff --git a/Lib/test/_test_multiprocessing.py b/Lib/test/_test_multiprocessing.py index c0d3ca50f17d69..94ce85cac754ae 100644 --- a/Lib/test/_test_multiprocessing.py +++ b/Lib/test/_test_multiprocessing.py @@ -6113,6 +6113,24 @@ def test_spawn_sys_executable_none_allows_import(self): self.assertEqual(rc, 0) self.assertFalse(err, msg=err.decode('utf-8')) + def test_large_pool(self): + # + # gh-89240: Check that large pools are always okay + # + testfn = os_helper.TESTFN + self.addCleanup(os_helper.unlink, testfn) + with open(testfn, 'w', encoding='utf-8') as f: + f.write(textwrap.dedent('''\ + import multiprocessing + def f(x): return x*x + if __name__ == '__main__': + with multiprocessing.Pool(200) as p: + print(sum(p.map(f, range(1000)))) + ''')) + rc, out, err = script_helper.assert_python_ok(testfn) + self.assertEqual("332833500", out.decode('utf-8').strip()) + self.assertFalse(err, msg=err.decode('utf-8')) + # # Mixins diff --git a/Lib/test/libregrtest/findtests.py b/Lib/test/libregrtest/findtests.py index ee890b5b1db4cd..4ac95e23a56b8f 100644 --- a/Lib/test/libregrtest/findtests.py +++ b/Lib/test/libregrtest/findtests.py @@ -23,6 +23,7 @@ "test_future_stmt", "test_gdb", "test_inspect", + "test_pydoc", "test_multiprocessing_fork", "test_multiprocessing_forkserver", "test_multiprocessing_spawn", diff --git a/Lib/test/test_capi/test_long.py b/Lib/test/test_capi/test_long.py index 8e3ef25d1ff86f..fc82cbfa66ea7a 100644 --- a/Lib/test/test_capi/test_long.py +++ b/Lib/test/test_capi/test_long.py @@ -1,5 +1,6 @@ import unittest import sys +import test.support as support from test.support import import_helper @@ -423,6 +424,150 @@ def test_long_asvoidptr(self): self.assertRaises(OverflowError, asvoidptr, -2**1000) # CRASHES asvoidptr(NULL) + def test_long_asnativebytes(self): + import math + from _testcapi import ( + pylong_asnativebytes as asnativebytes, + SIZE_MAX, + ) + + # Abbreviate sizeof(Py_ssize_t) to SZ because we use it a lot + SZ = int(math.ceil(math.log(SIZE_MAX + 1) / math.log(2)) / 8) + MAX_SSIZE = 2 ** (SZ * 8 - 1) - 1 + MAX_USIZE = 2 ** (SZ * 8) - 1 + if support.verbose: + print(f"SIZEOF_SIZE={SZ}\n{MAX_SSIZE=:016X}\n{MAX_USIZE=:016X}") + + # These tests check that the requested buffer size is correct + for v, expect in [ + (0, SZ), + (512, SZ), + (-512, SZ), + (MAX_SSIZE, SZ), + (MAX_USIZE, SZ + 1), + (-MAX_SSIZE, SZ), + (-MAX_USIZE, SZ + 1), + (2**255-1, 32), + (-(2**255-1), 32), + (2**256-1, 33), + (-(2**256-1), 33), + ]: + with self.subTest(f"sizeof-{v:X}"): + buffer = bytearray(1) + self.assertEqual(expect, asnativebytes(v, buffer, 0, -1), + "PyLong_AsNativeBytes(v, NULL, 0, -1)") + # Also check via the __index__ path + self.assertEqual(expect, asnativebytes(Index(v), buffer, 0, -1), + "PyLong_AsNativeBytes(Index(v), NULL, 0, -1)") + + # We request as many bytes as `expect_be` contains, and always check + # the result (both big and little endian). We check the return value + # independently, since the buffer should always be filled correctly even + # if we need more bytes + for v, expect_be, expect_n in [ + (0, b'\x00', 1), + (0, b'\x00' * 2, 2), + (0, b'\x00' * 8, min(8, SZ)), + (1, b'\x01', 1), + (1, b'\x00' * 10 + b'\x01', min(11, SZ)), + (42, b'\x2a', 1), + (42, b'\x00' * 10 + b'\x2a', min(11, SZ)), + (-1, b'\xff', 1), + (-1, b'\xff' * 10, min(11, SZ)), + (-42, b'\xd6', 1), + (-42, b'\xff' * 10 + b'\xd6', min(11, SZ)), + # Extracts 255 into a single byte, but requests sizeof(Py_ssize_t) + (255, b'\xff', SZ), + (255, b'\x00\xff', 2), + (256, b'\x01\x00', 2), + # Extracts successfully (unsigned), but requests 9 bytes + (2**63, b'\x80' + b'\x00' * 7, 9), + # "Extracts", but requests 9 bytes + (-2**63, b'\x80' + b'\x00' * 7, 9), + (2**63, b'\x00\x80' + b'\x00' * 7, 9), + (-2**63, b'\xff\x80' + b'\x00' * 7, 9), + + (2**255-1, b'\x7f' + b'\xff' * 31, 32), + (-(2**255-1), b'\x80' + b'\x00' * 30 + b'\x01', 32), + # Request extra bytes, but result says we only needed 32 + (-(2**255-1), b'\xff\x80' + b'\x00' * 30 + b'\x01', 32), + (-(2**255-1), b'\xff\xff\x80' + b'\x00' * 30 + b'\x01', 32), + + # Extracting 256 bits of integer will request 33 bytes, but still + # copy as many bits as possible into the buffer. So we *can* copy + # into a 32-byte buffer, though negative number may be unrecoverable + (2**256-1, b'\xff' * 32, 33), + (2**256-1, b'\x00' + b'\xff' * 32, 33), + (-(2**256-1), b'\x00' * 31 + b'\x01', 33), + (-(2**256-1), b'\xff' + b'\x00' * 31 + b'\x01', 33), + (-(2**256-1), b'\xff\xff' + b'\x00' * 31 + b'\x01', 33), + + # The classic "Windows HRESULT as negative number" case + # HRESULT hr; + # PyLong_CopyBits(<-2147467259>, &hr, sizeof(HRESULT)) + # assert(hr == E_FAIL) + (-2147467259, b'\x80\x00\x40\x05', 4), + ]: + with self.subTest(f"{v:X}-{len(expect_be)}bytes"): + n = len(expect_be) + buffer = bytearray(n) + expect_le = expect_be[::-1] + + self.assertEqual(expect_n, asnativebytes(v, buffer, n, 0), + f"PyLong_AsNativeBytes(v, buffer, {n}, )") + self.assertEqual(expect_be, buffer[:n], "") + self.assertEqual(expect_n, asnativebytes(v, buffer, n, 1), + f"PyLong_AsNativeBytes(v, buffer, {n}, )") + self.assertEqual(expect_le, buffer[:n], "") + + # Check a few error conditions. These are validated in code, but are + # unspecified in docs, so if we make changes to the implementation, it's + # fine to just update these tests rather than preserve the behaviour. + with self.assertRaises(SystemError): + asnativebytes(1, buffer, 0, 2) + with self.assertRaises(TypeError): + asnativebytes('not a number', buffer, 0, -1) + + def test_long_fromnativebytes(self): + import math + from _testcapi import ( + pylong_fromnativebytes as fromnativebytes, + SIZE_MAX, + ) + + # Abbreviate sizeof(Py_ssize_t) to SZ because we use it a lot + SZ = int(math.ceil(math.log(SIZE_MAX + 1) / math.log(2)) / 8) + MAX_SSIZE = 2 ** (SZ * 8 - 1) - 1 + MAX_USIZE = 2 ** (SZ * 8) - 1 + + for v_be, expect_s, expect_u in [ + (b'\x00', 0, 0), + (b'\x01', 1, 1), + (b'\xff', -1, 255), + (b'\x00\xff', 255, 255), + (b'\xff\xff', -1, 65535), + ]: + with self.subTest(f"{expect_s}-{expect_u:X}-{len(v_be)}bytes"): + n = len(v_be) + v_le = v_be[::-1] + + self.assertEqual(expect_s, fromnativebytes(v_be, n, 0, 1), + f"PyLong_FromNativeBytes(buffer, {n}, )") + self.assertEqual(expect_s, fromnativebytes(v_le, n, 1, 1), + f"PyLong_FromNativeBytes(buffer, {n}, )") + self.assertEqual(expect_u, fromnativebytes(v_be, n, 0, 0), + f"PyLong_FromUnsignedNativeBytes(buffer, {n}, )") + self.assertEqual(expect_u, fromnativebytes(v_le, n, 1, 0), + f"PyLong_FromUnsignedNativeBytes(buffer, {n}, )") + + # Check native endian when the result would be the same either + # way and we can test it. + if v_be == v_le: + self.assertEqual(expect_s, fromnativebytes(v_be, n, -1, 1), + f"PyLong_FromNativeBytes(buffer, {n}, )") + self.assertEqual(expect_u, fromnativebytes(v_be, n, -1, 0), + f"PyLong_FromUnsignedNativeBytes(buffer, {n}, )") + if __name__ == "__main__": unittest.main() diff --git a/Lib/test/test_capi/test_opt.py b/Lib/test/test_capi/test_opt.py index e6b1b554c9af10..b64aed10d2d653 100644 --- a/Lib/test/test_capi/test_opt.py +++ b/Lib/test/test_capi/test_opt.py @@ -3,6 +3,7 @@ import sys import textwrap import unittest +import gc import _testinternalcapi @@ -556,6 +557,214 @@ def testfunc(n): # too much already. self.assertEqual(count, 1) +class TestUopsOptimization(unittest.TestCase): + + def test_int_type_propagation(self): + def testfunc(loops): + num = 0 + while num < loops: + x = num + num + a = x + 1 + num += 1 + return a + + opt = _testinternalcapi.get_uop_optimizer() + res = None + with temporary_optimizer(opt): + res = testfunc(32) + + ex = get_first_executor(testfunc) + self.assertIsNotNone(ex) + self.assertEqual(res, 63) + binop_count = [opname for opname, _, _ in ex if opname == "_BINARY_OP_ADD_INT"] + guard_both_int_count = [opname for opname, _, _ in ex if opname == "_GUARD_BOTH_INT"] + self.assertGreaterEqual(len(binop_count), 3) + self.assertLessEqual(len(guard_both_int_count), 1) + + def test_int_type_propagation_through_frame(self): + def double(x): + return x + x + def testfunc(loops): + num = 0 + while num < loops: + x = num + num + a = double(x) + num += 1 + return a + + opt = _testinternalcapi.get_uop_optimizer() + res = None + with temporary_optimizer(opt): + res = testfunc(32) + + ex = get_first_executor(testfunc) + self.assertIsNotNone(ex) + self.assertEqual(res, 124) + binop_count = [opname for opname, _, _ in ex if opname == "_BINARY_OP_ADD_INT"] + guard_both_int_count = [opname for opname, _, _ in ex if opname == "_GUARD_BOTH_INT"] + self.assertGreaterEqual(len(binop_count), 3) + self.assertLessEqual(len(guard_both_int_count), 1) + + def test_int_type_propagation_from_frame(self): + def double(x): + return x + x + def testfunc(loops): + num = 0 + while num < loops: + a = double(num) + x = a + a + num += 1 + return x + + opt = _testinternalcapi.get_uop_optimizer() + res = None + with temporary_optimizer(opt): + res = testfunc(32) + + ex = get_first_executor(testfunc) + self.assertIsNotNone(ex) + self.assertEqual(res, 124) + binop_count = [opname for opname, _, _ in ex if opname == "_BINARY_OP_ADD_INT"] + guard_both_int_count = [opname for opname, _, _ in ex if opname == "_GUARD_BOTH_INT"] + self.assertGreaterEqual(len(binop_count), 3) + self.assertLessEqual(len(guard_both_int_count), 1) + + def test_int_impure_region(self): + def testfunc(loops): + num = 0 + while num < loops: + x = num + num + y = 1 + x // 2 + a = x + y + num += 1 + return a + + opt = _testinternalcapi.get_uop_optimizer() + res = None + with temporary_optimizer(opt): + res = testfunc(64) + + ex = get_first_executor(testfunc) + self.assertIsNotNone(ex) + binop_count = [opname for opname, _, _ in ex if opname == "_BINARY_OP_ADD_INT"] + self.assertGreaterEqual(len(binop_count), 3) + + def test_call_py_exact_args(self): + def testfunc(n): + def dummy(x): + return x+1 + for i in range(n): + dummy(i) + + opt = _testinternalcapi.get_uop_optimizer() + with temporary_optimizer(opt): + testfunc(20) + + ex = get_first_executor(testfunc) + self.assertIsNotNone(ex) + uops = {opname for opname, _, _ in ex} + self.assertIn("_PUSH_FRAME", uops) + self.assertIn("_BINARY_OP_ADD_INT", uops) + self.assertNotIn("_CHECK_PEP_523", uops) + + def test_int_type_propagate_through_range(self): + def testfunc(n): + + for i in range(n): + x = i + i + return x + + opt = _testinternalcapi.get_uop_optimizer() + with temporary_optimizer(opt): + res = testfunc(20) + + ex = get_first_executor(testfunc) + self.assertEqual(res, 19 * 2) + self.assertIsNotNone(ex) + uops = {opname for opname, _, _ in ex} + self.assertNotIn("_GUARD_BOTH_INT", uops) + + def test_int_value_numbering(self): + def testfunc(n): + + y = 1 + for i in range(n): + x = y + z = x + a = z + b = a + res = x + z + a + b + return res + + opt = _testinternalcapi.get_uop_optimizer() + with temporary_optimizer(opt): + res = testfunc(20) + + ex = get_first_executor(testfunc) + self.assertEqual(res, 4) + self.assertIsNotNone(ex) + uops = {opname for opname, _, _ in ex} + self.assertIn("_GUARD_BOTH_INT", uops) + guard_count = [opname for opname, _, _ in ex if opname == "_GUARD_BOTH_INT"] + self.assertEqual(len(guard_count), 1) + + def test_comprehension(self): + def testfunc(n): + for _ in range(n): + return [i for i in range(n)] + + opt = _testinternalcapi.get_uop_optimizer() + with temporary_optimizer(opt): + testfunc(20) + + ex = get_first_executor(testfunc) + self.assertIsNotNone(ex) + uops = {opname for opname, _, _ in ex} + self.assertNotIn("_BINARY_OP_ADD_INT", uops) + + def test_call_py_exact_args_disappearing(self): + def dummy(x): + return x+1 + + def testfunc(n): + for i in range(n): + dummy(i) + + opt = _testinternalcapi.get_uop_optimizer() + # Trigger specialization + testfunc(8) + with temporary_optimizer(opt): + del dummy + gc.collect() + + def dummy(x): + return x + 2 + testfunc(10) + + ex = get_first_executor(testfunc) + # Honestly as long as it doesn't crash it's fine. + # Whether we get an executor or not is non-deterministic, + # because it's decided by when the function is freed. + # This test is a little implementation specific. + + def test_promote_globals_to_constants(self): + def testfunc(n): + for i in range(n): + x = range(i) + return x + + opt = _testinternalcapi.get_uop_optimizer() + with temporary_optimizer(opt): + testfunc(20) + + ex = get_first_executor(testfunc) + self.assertIsNotNone(ex) + uops = {opname for opname, _, _ in ex} + self.assertNotIn("_LOAD_GLOBAL_BUILTIN", uops) + self.assertIn("_LOAD_CONST_INLINE_BORROW_WITH_NULL", uops) + + if __name__ == "__main__": unittest.main() diff --git a/Lib/test/test_capi/test_time.py b/Lib/test/test_capi/test_time.py new file mode 100644 index 00000000000000..10b7fbf2c372a3 --- /dev/null +++ b/Lib/test/test_capi/test_time.py @@ -0,0 +1,71 @@ +import time +import unittest +from test.support import import_helper +_testcapi = import_helper.import_module('_testcapi') + + +PyTime_MIN = _testcapi.PyTime_MIN +PyTime_MAX = _testcapi.PyTime_MAX +SEC_TO_NS = 10 ** 9 +DAY_TO_SEC = (24 * 60 * 60) +# Worst clock resolution: maximum delta between two clock reads. +CLOCK_RES = 0.050 + + +class CAPITest(unittest.TestCase): + def test_min_max(self): + # PyTime_t is just int64_t + self.assertEqual(PyTime_MIN, -2**63) + self.assertEqual(PyTime_MAX, 2**63 - 1) + + def check_clock(self, c_func, py_func): + t1 = c_func() + t2 = py_func() + self.assertAlmostEqual(t1, t2, delta=CLOCK_RES) + + def test_assecondsdouble(self): + # Test PyTime_AsSecondsDouble() + def ns_to_sec(ns): + if abs(ns) % SEC_TO_NS == 0: + return float(ns // SEC_TO_NS) + else: + return float(ns) / SEC_TO_NS + + seconds = ( + 0, + 1, + DAY_TO_SEC, + 365 * DAY_TO_SEC, + ) + values = { + PyTime_MIN, + PyTime_MIN + 1, + PyTime_MAX - 1, + PyTime_MAX, + } + for second in seconds: + ns = second * SEC_TO_NS + values.add(ns) + # test nanosecond before/after to test rounding + values.add(ns - 1) + values.add(ns + 1) + for ns in list(values): + if (-ns) > PyTime_MAX: + continue + values.add(-ns) + for ns in sorted(values): + with self.subTest(ns=ns): + self.assertEqual(_testcapi.PyTime_AsSecondsDouble(ns), + ns_to_sec(ns)) + + def test_monotonic(self): + # Test PyTime_Monotonic() + self.check_clock(_testcapi.PyTime_Monotonic, time.monotonic) + + def test_perf_counter(self): + # Test PyTime_PerfCounter() + self.check_clock(_testcapi.PyTime_PerfCounter, time.perf_counter) + + def test_time(self): + # Test PyTime_time() + self.check_clock(_testcapi.PyTime_Time, time.time) diff --git a/Lib/test/test_dataclasses/__init__.py b/Lib/test/test_dataclasses/__init__.py index 272d427875ae40..ede74b0dd15ccf 100644 --- a/Lib/test/test_dataclasses/__init__.py +++ b/Lib/test/test_dataclasses/__init__.py @@ -22,6 +22,8 @@ import typing # Needed for the string "typing.ClassVar[int]" to work as an annotation. import dataclasses # Needed for the string "dataclasses.InitVar[int]" to work as an annotation. +from test import support + # Just any custom exception we can catch. class CustomError(Exception): pass @@ -2216,6 +2218,7 @@ def assertDocStrEqual(self, a, b): # whitespace stripped. self.assertEqual(a.replace(' ', ''), b.replace(' ', '')) + @support.requires_docstrings def test_existing_docstring_not_overridden(self): @dataclass class C: diff --git a/Lib/test/test_enum.py b/Lib/test/test_enum.py index 5d7dae8829574b..61060f3dc29fd4 100644 --- a/Lib/test/test_enum.py +++ b/Lib/test/test_enum.py @@ -4905,15 +4905,15 @@ class Color(enum.Enum) | value | | ---------------------------------------------------------------------- - | Methods inherited from enum.EnumType: + | Static methods inherited from enum.EnumType: | - | __contains__(value) from enum.EnumType + | __contains__(value) | - | __getitem__(name) from enum.EnumType + | __getitem__(name) | - | __iter__() from enum.EnumType + | __iter__() | - | __len__() from enum.EnumType + | __len__() | | ---------------------------------------------------------------------- | Readonly properties inherited from enum.EnumType: diff --git a/Lib/test/test_generated_cases.py b/Lib/test/test_generated_cases.py index ca1228ee7008a9..a7ad6c7320b4ee 100644 --- a/Lib/test/test_generated_cases.py +++ b/Lib/test/test_generated_cases.py @@ -33,6 +33,7 @@ def skip_if_different_mount_drives(): import parser from stack import Stack import tier1_generator + import tier2_abstract_generator def handle_stderr(): @@ -793,5 +794,157 @@ def test_annotated_op(self): self.run_cases_test(input, output) +class TestGeneratedAbstractCases(unittest.TestCase): + def setUp(self) -> None: + super().setUp() + self.maxDiff = None + + self.temp_dir = tempfile.gettempdir() + self.temp_input_filename = os.path.join(self.temp_dir, "input.txt") + self.temp_input2_filename = os.path.join(self.temp_dir, "input2.txt") + self.temp_output_filename = os.path.join(self.temp_dir, "output.txt") + + def tearDown(self) -> None: + for filename in [ + self.temp_input_filename, + self.temp_input2_filename, + self.temp_output_filename, + ]: + try: + os.remove(filename) + except: + pass + super().tearDown() + + def run_cases_test(self, input: str, input2: str, expected: str): + with open(self.temp_input_filename, "w+") as temp_input: + temp_input.write(parser.BEGIN_MARKER) + temp_input.write(input) + temp_input.write(parser.END_MARKER) + temp_input.flush() + + with open(self.temp_input2_filename, "w+") as temp_input: + temp_input.write(parser.BEGIN_MARKER) + temp_input.write(input2) + temp_input.write(parser.END_MARKER) + temp_input.flush() + + with handle_stderr(): + tier2_abstract_generator.generate_tier2_abstract_from_files( + [self.temp_input_filename, self.temp_input2_filename], + self.temp_output_filename + ) + + with open(self.temp_output_filename) as temp_output: + lines = temp_output.readlines() + while lines and lines[0].startswith(("// ", "#", " #", "\n")): + lines.pop(0) + while lines and lines[-1].startswith(("#", "\n")): + lines.pop(-1) + actual = "".join(lines) + self.assertEqual(actual.strip(), expected.strip()) + + def test_overridden_abstract(self): + input = """ + pure op(OP, (--)) { + spam(); + } + """ + input2 = """ + pure op(OP, (--)) { + eggs(); + } + """ + output = """ + case OP: { + eggs(); + break; + } + """ + self.run_cases_test(input, input2, output) + + def test_overridden_abstract_args(self): + input = """ + pure op(OP, (arg1 -- out)) { + spam(); + } + op(OP2, (arg1 -- out)) { + eggs(); + } + """ + input2 = """ + op(OP, (arg1 -- out)) { + eggs(); + } + """ + output = """ + case OP: { + _Py_UOpsSymType *arg1; + _Py_UOpsSymType *out; + arg1 = stack_pointer[-1]; + eggs(); + stack_pointer[-1] = out; + break; + } + + case OP2: { + _Py_UOpsSymType *out; + out = sym_new_unknown(ctx); + if (out == NULL) goto out_of_space; + stack_pointer[-1] = out; + break; + } + """ + self.run_cases_test(input, input2, output) + + def test_no_overridden_case(self): + input = """ + pure op(OP, (arg1 -- out)) { + spam(); + } + + pure op(OP2, (arg1 -- out)) { + } + + """ + input2 = """ + pure op(OP2, (arg1 -- out)) { + } + """ + output = """ + case OP: { + _Py_UOpsSymType *out; + out = sym_new_unknown(ctx); + if (out == NULL) goto out_of_space; + stack_pointer[-1] = out; + break; + } + + case OP2: { + _Py_UOpsSymType *arg1; + _Py_UOpsSymType *out; + arg1 = stack_pointer[-1]; + stack_pointer[-1] = out; + break; + } + """ + self.run_cases_test(input, input2, output) + + def test_missing_override_failure(self): + input = """ + pure op(OP, (arg1 -- out)) { + spam(); + } + """ + input2 = """ + pure op(OTHER, (arg1 -- out)) { + } + """ + output = """ + """ + with self.assertRaisesRegex(AssertionError, "All abstract uops"): + self.run_cases_test(input, input2, output) + + if __name__ == "__main__": unittest.main() diff --git a/Lib/test/test_pydoc/__init__.py b/Lib/test/test_pydoc/__init__.py new file mode 100644 index 00000000000000..f2a39a3fe29c7f --- /dev/null +++ b/Lib/test/test_pydoc/__init__.py @@ -0,0 +1,6 @@ +import os +from test import support + + +def load_tests(*args): + return support.load_package_tests(os.path.dirname(__file__), *args) diff --git a/Lib/test/pydoc_mod.py b/Lib/test/test_pydoc/pydoc_mod.py similarity index 100% rename from Lib/test/pydoc_mod.py rename to Lib/test/test_pydoc/pydoc_mod.py diff --git a/Lib/test/pydocfodder.py b/Lib/test/test_pydoc/pydocfodder.py similarity index 100% rename from Lib/test/pydocfodder.py rename to Lib/test/test_pydoc/pydocfodder.py diff --git a/Lib/test/test_pydoc.py b/Lib/test/test_pydoc/test_pydoc.py similarity index 98% rename from Lib/test/test_pydoc.py rename to Lib/test/test_pydoc/test_pydoc.py index f3c26624c624f5..0dd24e6d347364 100644 --- a/Lib/test/test_pydoc.py +++ b/Lib/test/test_pydoc/test_pydoc.py @@ -34,8 +34,8 @@ captured_stderr, is_emscripten, is_wasi, requires_docstrings, MISSING_C_DOCSTRINGS) from test.support.os_helper import (TESTFN, rmtree, unlink) -from test import pydoc_mod -from test import pydocfodder +from test.test_pydoc import pydoc_mod +from test.test_pydoc import pydocfodder class nonascii: @@ -52,7 +52,7 @@ class nonascii: expected_text_pattern = """ NAME - test.pydoc_mod - This is a test module for test_pydoc + test.test_pydoc.pydoc_mod - This is a test module for test_pydoc %s CLASSES builtins.object @@ -125,7 +125,7 @@ class C(builtins.object) DATA __xyz__ = 'X, Y and Z' - c_alias = test.pydoc_mod.C[int] + c_alias = test.test_pydoc.pydoc_mod.C[int] list_alias1 = typing.List[int] list_alias2 = list[int] type_union1 = typing.Union[int, str] @@ -148,7 +148,7 @@ class C(builtins.object) for s in expected_data_docstrings) html2text_of_expected = """ -test.pydoc_mod (version 1.2.3.4) +test.test_pydoc.pydoc_mod (version 1.2.3.4) This is a test module for test_pydoc Modules @@ -213,7 +213,7 @@ class C(builtins.object) Data __xyz__ = 'X, Y and Z' - c_alias = test.pydoc_mod.C[int] + c_alias = test.test_pydoc.pydoc_mod.C[int] list_alias1 = typing.List[int] list_alias2 = list[int] type_union1 = typing.Union[int, str] @@ -342,7 +342,7 @@ def get_pydoc_link(module): "Returns a documentation web link of a module" abspath = os.path.abspath dirname = os.path.dirname - basedir = dirname(dirname(abspath(__file__))) + basedir = dirname(dirname(dirname(abspath(__file__)))) doc = pydoc.TextDoc() loc = doc.getdocloc(module, basedir=basedir) return loc @@ -489,7 +489,7 @@ def test_not_here(self): @requires_docstrings def test_not_ascii(self): - result = run_pydoc('test.test_pydoc.nonascii', PYTHONIOENCODING='ascii') + result = run_pydoc('test.test_pydoc.test_pydoc.nonascii', PYTHONIOENCODING='ascii') encoded = nonascii.__doc__.encode('ascii', 'backslashreplace') self.assertIn(encoded, result) @@ -669,9 +669,9 @@ def test_help_output_redirect(self): buf = StringIO() helper = pydoc.Helper(output=buf) unused, doc_loc = get_pydoc_text(pydoc_mod) - module = "test.pydoc_mod" + module = "test.test_pydoc.pydoc_mod" help_header = """ - Help on module test.pydoc_mod in test: + Help on module test.test_pydoc.pydoc_mod in test.test_pydoc: """.lstrip() help_header = textwrap.dedent(help_header) @@ -1142,7 +1142,6 @@ class TestDescriptions(unittest.TestCase): def test_module(self): # Check that pydocfodder module can be described - from test import pydocfodder doc = pydoc.render_doc(pydocfodder) self.assertIn("pydocfodder", doc) @@ -1425,10 +1424,10 @@ def smeth(*args, **kwargs): self.assertEqual(self._get_summary_line(C.meth), "meth" + unbound) self.assertEqual(self._get_summary_line(C().meth), - "meth" + bound + " method of test.test_pydoc.C instance") + "meth" + bound + " method of test.test_pydoc.test_pydoc.C instance") C.cmeth.__func__.__text_signature__ = text_signature self.assertEqual(self._get_summary_line(C.cmeth), - "cmeth" + bound + " class method of test.test_pydoc.C") + "cmeth" + bound + " class method of test.test_pydoc.test_pydoc.C") C.smeth.__text_signature__ = text_signature self.assertEqual(self._get_summary_line(C.smeth), "smeth" + unbound) @@ -1465,7 +1464,7 @@ def cm(cls, x): 'cm(...)\n' ' A class method\n') self.assertEqual(self._get_summary_lines(X.cm), """\ -cm(x) class method of test.test_pydoc.X +cm(x) class method of test.test_pydoc.test_pydoc.X A class method """) self.assertIn(""" @@ -1647,19 +1646,19 @@ def test_text_doc_routines_in_class(self, cls=pydocfodder.B): lines = self.getsection(result, f' | Methods {where}:', ' | ' + '-'*70) self.assertIn(' | A_method_alias = A_method(self)', lines) self.assertIn(' | B_method_alias = B_method(self)', lines) - self.assertIn(' | A_staticmethod(x, y) from test.pydocfodder.A', lines) + self.assertIn(' | A_staticmethod(x, y) from test.test_pydoc.pydocfodder.A', lines) self.assertIn(' | A_staticmethod_alias = A_staticmethod(x, y)', lines) - self.assertIn(' | global_func(x, y) from test.pydocfodder', lines) + self.assertIn(' | global_func(x, y) from test.test_pydoc.pydocfodder', lines) self.assertIn(' | global_func_alias = global_func(x, y)', lines) - self.assertIn(' | global_func2_alias = global_func2(x, y) from test.pydocfodder', lines) + self.assertIn(' | global_func2_alias = global_func2(x, y) from test.test_pydoc.pydocfodder', lines) self.assertIn(' | __repr__(self, /) from builtins.object', lines) self.assertIn(' | object_repr = __repr__(self, /)', lines) lines = self.getsection(result, f' | Static methods {where}:', ' | ' + '-'*70) - self.assertIn(' | A_classmethod_ref = A_classmethod(x) class method of test.pydocfodder.A', lines) - note = '' if cls is pydocfodder.B else ' class method of test.pydocfodder.B' + self.assertIn(' | A_classmethod_ref = A_classmethod(x) class method of test.test_pydoc.pydocfodder.A', lines) + note = '' if cls is pydocfodder.B else ' class method of test.test_pydoc.pydocfodder.B' self.assertIn(' | B_classmethod_ref = B_classmethod(x)' + note, lines) - self.assertIn(' | A_method_ref = A_method() method of test.pydocfodder.A instance', lines) + self.assertIn(' | A_method_ref = A_method() method of test.test_pydoc.pydocfodder.A instance', lines) self.assertIn(' | get(key, default=None, /) method of builtins.dict instance', lines) self.assertIn(' | dict_get = get(key, default=None, /) method of builtins.dict instance', lines) @@ -1675,19 +1674,19 @@ def test_html_doc_routines_in_class(self, cls=pydocfodder.B): lines = self.getsection(result, f'Methods {where}:', '-'*70) self.assertIn('A_method_alias = A_method(self)', lines) self.assertIn('B_method_alias = B_method(self)', lines) - self.assertIn('A_staticmethod(x, y) from test.pydocfodder.A', lines) + self.assertIn('A_staticmethod(x, y) from test.test_pydoc.pydocfodder.A', lines) self.assertIn('A_staticmethod_alias = A_staticmethod(x, y)', lines) - self.assertIn('global_func(x, y) from test.pydocfodder', lines) + self.assertIn('global_func(x, y) from test.test_pydoc.pydocfodder', lines) self.assertIn('global_func_alias = global_func(x, y)', lines) - self.assertIn('global_func2_alias = global_func2(x, y) from test.pydocfodder', lines) + self.assertIn('global_func2_alias = global_func2(x, y) from test.test_pydoc.pydocfodder', lines) self.assertIn('__repr__(self, /) from builtins.object', lines) self.assertIn('object_repr = __repr__(self, /)', lines) lines = self.getsection(result, f'Static methods {where}:', '-'*70) - self.assertIn('A_classmethod_ref = A_classmethod(x) class method of test.pydocfodder.A', lines) - note = '' if cls is pydocfodder.B else ' class method of test.pydocfodder.B' + self.assertIn('A_classmethod_ref = A_classmethod(x) class method of test.test_pydoc.pydocfodder.A', lines) + note = '' if cls is pydocfodder.B else ' class method of test.test_pydoc.pydocfodder.B' self.assertIn('B_classmethod_ref = B_classmethod(x)' + note, lines) - self.assertIn('A_method_ref = A_method() method of test.pydocfodder.A instance', lines) + self.assertIn('A_method_ref = A_method() method of test.test_pydoc.pydocfodder.A instance', lines) lines = self.getsection(result, f'Class methods {where}:', '-'*70) self.assertIn('B_classmethod(x)', lines) diff --git a/Lib/test/test_queue.py b/Lib/test/test_queue.py index d308a212999429..92d670ca6f8f5b 100644 --- a/Lib/test/test_queue.py +++ b/Lib/test/test_queue.py @@ -403,11 +403,11 @@ def _shutdown_all_methods_in_many_threads(self, immediate): for thread in ps[1:]: thread.join() - @unittest.skipIf(sys.platform == "win32", "test times out (gh-115258)") + @unittest.skip("test times out (gh-115258)") def test_shutdown_all_methods_in_many_threads(self): return self._shutdown_all_methods_in_many_threads(False) - @unittest.skipIf(sys.platform == "win32", "test times out (gh-115258)") + @unittest.skip("test times out (gh-115258)") def test_shutdown_immediate_all_methods_in_many_threads(self): return self._shutdown_all_methods_in_many_threads(True) diff --git a/Lib/test/test_time.py b/Lib/test/test_time.py index 3b5640abdb6b89..a0aeea515afbd6 100644 --- a/Lib/test/test_time.py +++ b/Lib/test/test_time.py @@ -43,8 +43,8 @@ class _PyTime(enum.IntEnum): ROUND_UP = 3 # _PyTime_t is int64_t -_PyTime_MIN = -2 ** 63 -_PyTime_MAX = 2 ** 63 - 1 +PyTime_MIN = -2 ** 63 +PyTime_MAX = 2 ** 63 - 1 # Rounding modes supported by PyTime ROUNDING_MODES = ( @@ -934,7 +934,7 @@ def test_FromSecondsObject(self): _PyTime_FromSecondsObject(float('nan'), time_rnd) def test_AsSecondsDouble(self): - from _testinternalcapi import _PyTime_AsSecondsDouble + from _testcapi import PyTime_AsSecondsDouble def float_converter(ns): if abs(ns) % SEC_TO_NS == 0: @@ -942,15 +942,10 @@ def float_converter(ns): else: return float(ns) / SEC_TO_NS - self.check_int_rounding(lambda ns, rnd: _PyTime_AsSecondsDouble(ns), + self.check_int_rounding(lambda ns, rnd: PyTime_AsSecondsDouble(ns), float_converter, NS_TO_SEC) - # test nan - for time_rnd, _ in ROUNDING_MODES: - with self.assertRaises(TypeError): - _PyTime_AsSecondsDouble(float('nan')) - def create_decimal_converter(self, denominator): denom = decimal.Decimal(denominator) @@ -1009,7 +1004,7 @@ def test_AsTimeval_clamp(self): tv_sec_max = self.time_t_max tv_sec_min = self.time_t_min - for t in (_PyTime_MIN, _PyTime_MAX): + for t in (PyTime_MIN, PyTime_MAX): ts = _PyTime_AsTimeval_clamp(t, _PyTime.ROUND_CEILING) with decimal.localcontext() as context: context.rounding = decimal.ROUND_CEILING @@ -1028,7 +1023,7 @@ def test_AsTimeval_clamp(self): def test_AsTimespec_clamp(self): from _testinternalcapi import _PyTime_AsTimespec_clamp - for t in (_PyTime_MIN, _PyTime_MAX): + for t in (PyTime_MIN, PyTime_MAX): ts = _PyTime_AsTimespec_clamp(t) tv_sec, tv_nsec = divmod(t, NS_TO_SEC) if self.time_t_max < tv_sec: diff --git a/Lib/test/test_winapi.py b/Lib/test/test_winapi.py new file mode 100644 index 00000000000000..014aeea7239e2b --- /dev/null +++ b/Lib/test/test_winapi.py @@ -0,0 +1,94 @@ +# Test the Windows-only _winapi module + +import random +import threading +import time +import unittest +from test.support import import_helper + +_winapi = import_helper.import_module('_winapi', required_on=['win']) + +MAXIMUM_WAIT_OBJECTS = 64 +MAXIMUM_BATCHED_WAIT_OBJECTS = (MAXIMUM_WAIT_OBJECTS - 1) ** 2 + +class WinAPIBatchedWaitForMultipleObjectsTests(unittest.TestCase): + def _events_waitall_test(self, n): + evts = [_winapi.CreateEventW(0, False, False, None) for _ in range(n)] + + with self.assertRaises(TimeoutError): + _winapi.BatchedWaitForMultipleObjects(evts, True, 100) + + # Ensure no errors raised when all are triggered + for e in evts: + _winapi.SetEvent(e) + try: + _winapi.BatchedWaitForMultipleObjects(evts, True, 100) + except TimeoutError: + self.fail("expected wait to complete immediately") + + # Choose 8 events to set, distributed throughout the list, to make sure + # we don't always have them in the first chunk + chosen = [i * (len(evts) // 8) for i in range(8)] + + # Replace events with invalid handles to make sure we fail + for i in chosen: + old_evt = evts[i] + evts[i] = -1 + with self.assertRaises(OSError): + _winapi.BatchedWaitForMultipleObjects(evts, True, 100) + evts[i] = old_evt + + + def _events_waitany_test(self, n): + evts = [_winapi.CreateEventW(0, False, False, None) for _ in range(n)] + + with self.assertRaises(TimeoutError): + _winapi.BatchedWaitForMultipleObjects(evts, False, 100) + + # Choose 8 events to set, distributed throughout the list, to make sure + # we don't always have them in the first chunk + chosen = [i * (len(evts) // 8) for i in range(8)] + + # Trigger one by one. They are auto-reset events, so will only trigger once + for i in chosen: + with self.subTest(f"trigger event {i} of {len(evts)}"): + _winapi.SetEvent(evts[i]) + triggered = _winapi.BatchedWaitForMultipleObjects(evts, False, 10000) + self.assertSetEqual(set(triggered), {i}) + + # Trigger all at once. This may require multiple calls + for i in chosen: + _winapi.SetEvent(evts[i]) + triggered = set() + while len(triggered) < len(chosen): + triggered.update(_winapi.BatchedWaitForMultipleObjects(evts, False, 10000)) + self.assertSetEqual(triggered, set(chosen)) + + # Replace events with invalid handles to make sure we fail + for i in chosen: + with self.subTest(f"corrupt event {i} of {len(evts)}"): + old_evt = evts[i] + evts[i] = -1 + with self.assertRaises(OSError): + _winapi.BatchedWaitForMultipleObjects(evts, False, 100) + evts[i] = old_evt + + + def test_few_events_waitall(self): + self._events_waitall_test(16) + + def test_many_events_waitall(self): + self._events_waitall_test(256) + + def test_max_events_waitall(self): + self._events_waitall_test(MAXIMUM_BATCHED_WAIT_OBJECTS) + + + def test_few_events_waitany(self): + self._events_waitany_test(16) + + def test_many_events_waitany(self): + self._events_waitany_test(256) + + def test_max_events_waitany(self): + self._events_waitany_test(MAXIMUM_BATCHED_WAIT_OBJECTS) diff --git a/Makefile.pre.in b/Makefile.pre.in index 4dabe328ce0362..d3b18acad61ce5 100644 --- a/Makefile.pre.in +++ b/Makefile.pre.in @@ -184,6 +184,8 @@ PYTHONFRAMEWORK= @PYTHONFRAMEWORK@ PYTHONFRAMEWORKDIR= @PYTHONFRAMEWORKDIR@ PYTHONFRAMEWORKPREFIX= @PYTHONFRAMEWORKPREFIX@ PYTHONFRAMEWORKINSTALLDIR= @PYTHONFRAMEWORKINSTALLDIR@ +PYTHONFRAMEWORKINSTALLNAMEPREFIX= @PYTHONFRAMEWORKINSTALLNAMEPREFIX@ +RESSRCDIR= @RESSRCDIR@ # Deployment target selected during configure, to be checked # by distutils. The export statement is needed to ensure that the # deployment target is active during build. @@ -866,7 +868,7 @@ libpython3.so: libpython$(LDVERSION).so $(BLDSHARED) $(NO_AS_NEEDED) -o $@ -Wl,-h$@ $^ libpython$(LDVERSION).dylib: $(LIBRARY_OBJS) - $(CC) -dynamiclib -Wl,-single_module $(PY_CORE_LDFLAGS) -undefined dynamic_lookup -Wl,-install_name,$(prefix)/lib/libpython$(LDVERSION).dylib -Wl,-compatibility_version,$(VERSION) -Wl,-current_version,$(VERSION) -o $@ $(LIBRARY_OBJS) $(DTRACE_OBJS) $(SHLIBS) $(LIBC) $(LIBM); \ + $(CC) -dynamiclib $(PY_CORE_LDFLAGS) -undefined dynamic_lookup -Wl,-install_name,$(PYTHONFRAMEWORKINSTALLNAMEPREFIX)/lib/libpython$(LDVERSION).dylib -Wl,-compatibility_version,$(VERSION) -Wl,-current_version,$(VERSION) -o $@ $(LIBRARY_OBJS) $(DTRACE_OBJS) $(SHLIBS) $(LIBC) $(LIBM); \ libpython$(VERSION).sl: $(LIBRARY_OBJS) @@ -891,14 +893,13 @@ $(BUILDPYTHON)-gdb.py: $(SRC_GDB_HOOKS) # This rule is here for OPENSTEP/Rhapsody/MacOSX. It builds a temporary # minimal framework (not including the Lib directory and such) in the current # directory. -RESSRCDIR=Mac/Resources/framework $(PYTHONFRAMEWORKDIR)/Versions/$(VERSION)/$(PYTHONFRAMEWORK): \ $(LIBRARY) \ $(RESSRCDIR)/Info.plist $(INSTALL) -d -m $(DIRMODE) $(PYTHONFRAMEWORKDIR)/Versions/$(VERSION) $(CC) -o $(LDLIBRARY) $(PY_CORE_LDFLAGS) -dynamiclib \ - -all_load $(LIBRARY) -Wl,-single_module \ - -install_name $(DESTDIR)$(PYTHONFRAMEWORKINSTALLDIR)/Versions/$(VERSION)/$(PYTHONFRAMEWORK) \ + -all_load $(LIBRARY) \ + -install_name $(DESTDIR)$(PYTHONFRAMEWORKINSTALLNAMEPREFIX)/$(PYTHONFRAMEWORK) \ -compatibility_version $(VERSION) \ -current_version $(VERSION) \ -framework CoreFoundation $(LIBS); @@ -1862,6 +1863,10 @@ regen-cases: -o $(srcdir)/Python/generated_cases.c.h.new $(srcdir)/Python/bytecodes.c $(PYTHON_FOR_REGEN) $(srcdir)/Tools/cases_generator/tier2_generator.py \ -o $(srcdir)/Python/executor_cases.c.h.new $(srcdir)/Python/bytecodes.c + $(PYTHON_FOR_REGEN) $(srcdir)/Tools/cases_generator/tier2_abstract_generator.py \ + -o $(srcdir)/Python/tier2_redundancy_eliminator_cases.c.h.new \ + $(srcdir)/Python/tier2_redundancy_eliminator_bytecodes.c \ + $(srcdir)/Python/bytecodes.c $(PYTHON_FOR_REGEN) $(srcdir)/Tools/cases_generator/opcode_metadata_generator.py \ -o $(srcdir)/Include/internal/pycore_opcode_metadata.h.new $(srcdir)/Python/bytecodes.c $(PYTHON_FOR_REGEN) $(srcdir)/Tools/cases_generator/uop_metadata_generator.py -o \ @@ -1873,6 +1878,7 @@ regen-cases: $(UPDATE_FILE) $(srcdir)/Include/internal/pycore_opcode_metadata.h $(srcdir)/Include/internal/pycore_opcode_metadata.h.new $(UPDATE_FILE) $(srcdir)/Include/internal/pycore_uop_metadata.h $(srcdir)/Include/internal/pycore_uop_metadata.h.new $(UPDATE_FILE) $(srcdir)/Python/executor_cases.c.h $(srcdir)/Python/executor_cases.c.h.new + $(UPDATE_FILE) $(srcdir)/Python/tier2_redundancy_eliminator_cases.c.h $(srcdir)/Python/tier2_redundancy_eliminator_cases.c.h.new $(UPDATE_FILE) $(srcdir)/Lib/_opcode_metadata.py $(srcdir)/Lib/_opcode_metadata.py.new Python/compile.o: $(srcdir)/Include/internal/pycore_opcode_metadata.h @@ -1894,7 +1900,8 @@ Python/optimizer.o: \ Python/optimizer_analysis.o: \ $(srcdir)/Include/internal/pycore_opcode_metadata.h \ - $(srcdir)/Include/internal/pycore_optimizer.h + $(srcdir)/Include/internal/pycore_optimizer.h \ + $(srcdir)/Python/tier2_redundancy_eliminator_cases.c.h Python/frozen.o: $(FROZEN_FILES_OUT) @@ -2000,7 +2007,7 @@ multissltest: all # which can lead to two parallel `./python setup.py build` processes that # step on each others toes. .PHONY: install -install: @FRAMEWORKINSTALLFIRST@ commoninstall bininstall maninstall @FRAMEWORKINSTALLLAST@ +install: @FRAMEWORKINSTALLFIRST@ @INSTALLTARGETS@ @FRAMEWORKINSTALLLAST@ if test "x$(ENSUREPIP)" != "xno" ; then \ case $(ENSUREPIP) in \ upgrade) ensurepip="--upgrade" ;; \ @@ -2306,6 +2313,7 @@ TESTSUBDIRS= idlelib/idle_test \ test/test_module \ test/test_pathlib \ test/test_peg_generator \ + test/test_pydoc \ test/test_sqlite3 \ test/test_tkinter \ test/test_tomllib \ diff --git a/Misc/NEWS.d/3.12.0a1.rst b/Misc/NEWS.d/3.12.0a1.rst index f192bf086ed259..4739e0fb89a4a8 100644 --- a/Misc/NEWS.d/3.12.0a1.rst +++ b/Misc/NEWS.d/3.12.0a1.rst @@ -2722,7 +2722,7 @@ on future on an error - e.g. TimeoutError or KeyboardInterrupt. Fix a :mod:`sqlite3` regression where ``*args`` and ``**kwds`` were incorrectly relayed from :py:func:`~sqlite3.connect` to the :class:`~sqlite3.Connection` factory. The regression was introduced in -3.11a1 with PR 24421 (:gh:`85128`). Patch by Erlend E. Aasland.` +3.11a1 with PR 24421 (:gh:`85128`). Patch by Erlend E. Aasland. .. @@ -2988,7 +2988,7 @@ Kumar Aditya. .. section: Library Fix crash in :class:`struct.Struct` when it was not completely initialized -by initializing it in :meth:`~object.__new__``. Patch by Kumar Aditya. +by initializing it in :meth:`~object.__new__`. Patch by Kumar Aditya. .. diff --git a/Misc/NEWS.d/3.12.0b1.rst b/Misc/NEWS.d/3.12.0b1.rst index 21f2c748f40548..d9804be764c9a9 100644 --- a/Misc/NEWS.d/3.12.0b1.rst +++ b/Misc/NEWS.d/3.12.0b1.rst @@ -563,10 +563,10 @@ Complex function calls are now faster and consume no C stack space. .. nonce: fvgsCl .. section: Core and Builtins -``len()`` for 0-dimensional :class:`memoryview`` objects (such as +``len()`` for 0-dimensional :class:`memoryview` objects (such as ``memoryview(ctypes.c_uint8(42))``) now raises a :exc:`TypeError`. Previously this returned ``1``, which was not consistent with ``mem_0d[0]`` -raising an :exc:`IndexError``. +raising an :exc:`IndexError`. .. diff --git a/Misc/NEWS.d/3.13.0a1.rst b/Misc/NEWS.d/3.13.0a1.rst index d385b6a4504f97..16715bee5a8e49 100644 --- a/Misc/NEWS.d/3.13.0a1.rst +++ b/Misc/NEWS.d/3.13.0a1.rst @@ -4380,7 +4380,7 @@ Patch by Victor Stinner. .. nonce: I6MQhb .. section: Library -:pep:`594`: Remove the :mod:`!cgi`` and :mod:`!cgitb` modules, deprecated in +:pep:`594`: Remove the :mod:`!cgi` and :mod:`!cgitb` modules, deprecated in Python 3.11. Patch by Victor Stinner. .. diff --git a/Misc/NEWS.d/next/Build/2024-02-07-08-23-48.gh-issue-114099.XcEXEZ.rst b/Misc/NEWS.d/next/Build/2024-02-07-08-23-48.gh-issue-114099.XcEXEZ.rst new file mode 100644 index 00000000000000..5e4acfba8a6949 --- /dev/null +++ b/Misc/NEWS.d/next/Build/2024-02-07-08-23-48.gh-issue-114099.XcEXEZ.rst @@ -0,0 +1,2 @@ +configure and Makefile were refactored to accomodate framework builds on +Apple platforms other than macOS. diff --git a/Misc/NEWS.d/next/C API/2023-11-16-02-07-48.gh-issue-110850.DQGNfF.rst b/Misc/NEWS.d/next/C API/2023-11-16-02-07-48.gh-issue-110850.DQGNfF.rst new file mode 100644 index 00000000000000..998d4426dd53f9 --- /dev/null +++ b/Misc/NEWS.d/next/C API/2023-11-16-02-07-48.gh-issue-110850.DQGNfF.rst @@ -0,0 +1,9 @@ +Add PyTime C API: + +* :c:type:`PyTime_t` type. +* :c:var:`PyTime_MIN` and :c:var:`PyTime_MAX` constants. +* :c:func:`PyTime_AsSecondsDouble`, + :c:func:`PyTime_Monotonic`, :c:func:`PyTime_PerfCounter`, and + :c:func:`PyTime_Time` functions. + +Patch by Victor Stinner. diff --git a/Misc/NEWS.d/next/C API/2024-02-05-17-11-15.gh-issue-111140.WMEjid.rst b/Misc/NEWS.d/next/C API/2024-02-05-17-11-15.gh-issue-111140.WMEjid.rst new file mode 100644 index 00000000000000..a8aa191b5eb3ba --- /dev/null +++ b/Misc/NEWS.d/next/C API/2024-02-05-17-11-15.gh-issue-111140.WMEjid.rst @@ -0,0 +1,2 @@ +Adds :c:func:`PyLong_AsNativeBytes`, :c:func:`PyLong_FromNativeBytes` and +:c:func:`PyLong_FromUnsignedNativeBytes` functions. diff --git a/Misc/NEWS.d/next/Core and Builtins/2024-01-16-14-41-54.gh-issue-114058.Cb2b8h.rst b/Misc/NEWS.d/next/Core and Builtins/2024-01-16-14-41-54.gh-issue-114058.Cb2b8h.rst new file mode 100644 index 00000000000000..beb82dbcd3cccd --- /dev/null +++ b/Misc/NEWS.d/next/Core and Builtins/2024-01-16-14-41-54.gh-issue-114058.Cb2b8h.rst @@ -0,0 +1 @@ +Implement the foundations of the Tier 2 redundancy eliminator. diff --git a/Misc/NEWS.d/next/Library/2024-02-09-07-20-16.gh-issue-115165.yfJLXA.rst b/Misc/NEWS.d/next/Library/2024-02-09-07-20-16.gh-issue-115165.yfJLXA.rst index 73d3d001f07f3f..3e6eef183ad524 100644 --- a/Misc/NEWS.d/next/Library/2024-02-09-07-20-16.gh-issue-115165.yfJLXA.rst +++ b/Misc/NEWS.d/next/Library/2024-02-09-07-20-16.gh-issue-115165.yfJLXA.rst @@ -1,4 +1,4 @@ Most exceptions are now ignored when attempting to set the ``__orig_class__`` attribute on objects returned when calling :mod:`typing` generic aliases (including generic aliases created using :data:`typing.Annotated`). -Previously only :exc:`AttributeError`` was ignored. Patch by Dave Shawley. +Previously only :exc:`AttributeError` was ignored. Patch by Dave Shawley. diff --git a/Misc/NEWS.d/next/Windows/2023-08-11-18-21-38.gh-issue-89240.dtSOLG.rst b/Misc/NEWS.d/next/Windows/2023-08-11-18-21-38.gh-issue-89240.dtSOLG.rst new file mode 100644 index 00000000000000..8ffe328b16598a --- /dev/null +++ b/Misc/NEWS.d/next/Windows/2023-08-11-18-21-38.gh-issue-89240.dtSOLG.rst @@ -0,0 +1 @@ +Allows :mod:`multiprocessing` to create pools of greater than 62 processes. diff --git a/Misc/NEWS.d/next/Windows/2024-02-08-21-37-22.gh-issue-115049.X1ObpJ.rst b/Misc/NEWS.d/next/Windows/2024-02-08-21-37-22.gh-issue-115049.X1ObpJ.rst new file mode 100644 index 00000000000000..a679391857dcb3 --- /dev/null +++ b/Misc/NEWS.d/next/Windows/2024-02-08-21-37-22.gh-issue-115049.X1ObpJ.rst @@ -0,0 +1 @@ +Fixes ``py.exe`` launcher failing when run as users without user profiles. diff --git a/Modules/Setup.stdlib.in b/Modules/Setup.stdlib.in index 8a65a9cffb1b9d..e98775a4808765 100644 --- a/Modules/Setup.stdlib.in +++ b/Modules/Setup.stdlib.in @@ -162,7 +162,7 @@ @MODULE__XXTESTFUZZ_TRUE@_xxtestfuzz _xxtestfuzz/_xxtestfuzz.c _xxtestfuzz/fuzzer.c @MODULE__TESTBUFFER_TRUE@_testbuffer _testbuffer.c @MODULE__TESTINTERNALCAPI_TRUE@_testinternalcapi _testinternalcapi.c _testinternalcapi/test_lock.c _testinternalcapi/pytime.c _testinternalcapi/set.c _testinternalcapi/test_critical_sections.c -@MODULE__TESTCAPI_TRUE@_testcapi _testcapimodule.c _testcapi/vectorcall.c _testcapi/vectorcall_limited.c _testcapi/heaptype.c _testcapi/abstract.c _testcapi/bytearray.c _testcapi/bytes.c _testcapi/unicode.c _testcapi/dict.c _testcapi/set.c _testcapi/list.c _testcapi/tuple.c _testcapi/getargs.c _testcapi/datetime.c _testcapi/docstring.c _testcapi/mem.c _testcapi/watchers.c _testcapi/long.c _testcapi/float.c _testcapi/complex.c _testcapi/numbers.c _testcapi/structmember.c _testcapi/exceptions.c _testcapi/code.c _testcapi/buffer.c _testcapi/pyatomic.c _testcapi/pyos.c _testcapi/file.c _testcapi/codec.c _testcapi/immortal.c _testcapi/heaptype_relative.c _testcapi/gc.c _testcapi/sys.c _testcapi/hash.c +@MODULE__TESTCAPI_TRUE@_testcapi _testcapimodule.c _testcapi/vectorcall.c _testcapi/vectorcall_limited.c _testcapi/heaptype.c _testcapi/abstract.c _testcapi/bytearray.c _testcapi/bytes.c _testcapi/unicode.c _testcapi/dict.c _testcapi/set.c _testcapi/list.c _testcapi/tuple.c _testcapi/getargs.c _testcapi/datetime.c _testcapi/docstring.c _testcapi/mem.c _testcapi/watchers.c _testcapi/long.c _testcapi/float.c _testcapi/complex.c _testcapi/numbers.c _testcapi/structmember.c _testcapi/exceptions.c _testcapi/code.c _testcapi/buffer.c _testcapi/pyatomic.c _testcapi/pyos.c _testcapi/file.c _testcapi/codec.c _testcapi/immortal.c _testcapi/heaptype_relative.c _testcapi/gc.c _testcapi/sys.c _testcapi/hash.c _testcapi/time.c @MODULE__TESTCLINIC_TRUE@_testclinic _testclinic.c @MODULE__TESTCLINIC_LIMITED_TRUE@_testclinic_limited _testclinic_limited.c diff --git a/Modules/_io/textio.c b/Modules/_io/textio.c index d794af8de2b8f0..a3239ec0f52960 100644 --- a/Modules/_io/textio.c +++ b/Modules/_io/textio.c @@ -2393,7 +2393,7 @@ textiowrapper_parse_cookie(cookie_type *cookie, PyObject *cookieObj) return -1; if (_PyLong_AsByteArray(cookieLong, buffer, sizeof(buffer), - PY_LITTLE_ENDIAN, 0) < 0) { + PY_LITTLE_ENDIAN, 0, 1) < 0) { Py_DECREF(cookieLong); return -1; } diff --git a/Modules/_pickle.c b/Modules/_pickle.c index f210c0ca205991..0d83261168185d 100644 --- a/Modules/_pickle.c +++ b/Modules/_pickle.c @@ -2162,7 +2162,8 @@ save_long(PicklerObject *self, PyObject *obj) pdata = (unsigned char *)PyBytes_AS_STRING(repr); i = _PyLong_AsByteArray((PyLongObject *)obj, pdata, nbytes, - 1 /* little endian */ , 1 /* signed */ ); + 1 /* little endian */ , 1 /* signed */ , + 1 /* with exceptions */); if (i < 0) goto error; /* If the int is negative, this may be a byte more than diff --git a/Modules/_randommodule.c b/Modules/_randommodule.c index 4403e1d132c057..4463157d62248d 100644 --- a/Modules/_randommodule.c +++ b/Modules/_randommodule.c @@ -262,7 +262,7 @@ random_seed_urandom(RandomObject *self) static void random_seed_time_pid(RandomObject *self) { - _PyTime_t now; + PyTime_t now; uint32_t key[5]; now = _PyTime_GetSystemClock(); @@ -342,7 +342,8 @@ random_seed(RandomObject *self, PyObject *arg) res = _PyLong_AsByteArray((PyLongObject *)n, (unsigned char *)key, keyused * 4, PY_LITTLE_ENDIAN, - 0); /* unsigned */ + 0, /* unsigned */ + 1); /* with exceptions */ if (res == -1) { goto Done; } diff --git a/Modules/_sqlite/util.c b/Modules/_sqlite/util.c index 833a666301d8ff..9e8613ef67916e 100644 --- a/Modules/_sqlite/util.c +++ b/Modules/_sqlite/util.c @@ -162,7 +162,7 @@ _pysqlite_long_as_int64(PyObject * py_val) sqlite_int64 int64val; if (_PyLong_AsByteArray((PyLongObject *)py_val, (unsigned char *)&int64val, sizeof(int64val), - IS_LITTLE_ENDIAN, 1 /* signed */) >= 0) { + IS_LITTLE_ENDIAN, 1 /* signed */, 0) >= 0) { return int64val; } } diff --git a/Modules/_struct.c b/Modules/_struct.c index bd16fa89f18945..fa2cd37e003e0a 100644 --- a/Modules/_struct.c +++ b/Modules/_struct.c @@ -1000,9 +1000,10 @@ bp_longlong(_structmodulestate *state, char *p, PyObject *v, const formatdef *f) (unsigned char *)p, 8, 0, /* little_endian */ - 1 /* signed */); + 1, /* signed */ + 0 /* !with_exceptions */); Py_DECREF(v); - if (res == -1 && PyErr_Occurred()) { + if (res < 0) { PyErr_Format(state->StructError, "'%c' format requires %lld <= number <= %lld", f->format, @@ -1024,9 +1025,10 @@ bp_ulonglong(_structmodulestate *state, char *p, PyObject *v, const formatdef *f (unsigned char *)p, 8, 0, /* little_endian */ - 0 /* signed */); + 0, /* signed */ + 0 /* !with_exceptions */); Py_DECREF(v); - if (res == -1 && PyErr_Occurred()) { + if (res < 0) { PyErr_Format(state->StructError, "'%c' format requires 0 <= number <= %llu", f->format, @@ -1260,9 +1262,10 @@ lp_longlong(_structmodulestate *state, char *p, PyObject *v, const formatdef *f) (unsigned char *)p, 8, 1, /* little_endian */ - 1 /* signed */); + 1, /* signed */ + 0 /* !with_exceptions */); Py_DECREF(v); - if (res == -1 && PyErr_Occurred()) { + if (res < 0) { PyErr_Format(state->StructError, "'%c' format requires %lld <= number <= %lld", f->format, @@ -1284,9 +1287,10 @@ lp_ulonglong(_structmodulestate *state, char *p, PyObject *v, const formatdef *f (unsigned char *)p, 8, 1, /* little_endian */ - 0 /* signed */); + 0, /* signed */ + 0 /* !with_exceptions */); Py_DECREF(v); - if (res == -1 && PyErr_Occurred()) { + if (res < 0) { PyErr_Format(state->StructError, "'%c' format requires 0 <= number <= %llu", f->format, diff --git a/Modules/_testcapi/long.c b/Modules/_testcapi/long.c index 32ad8d32ab8523..dc21cf9f475228 100644 --- a/Modules/_testcapi/long.c +++ b/Modules/_testcapi/long.c @@ -776,6 +776,51 @@ pylong_asvoidptr(PyObject *module, PyObject *arg) return Py_NewRef((PyObject *)value); } +static PyObject * +pylong_asnativebytes(PyObject *module, PyObject *args) +{ + PyObject *v; + Py_buffer buffer; + Py_ssize_t n, endianness; + if (!PyArg_ParseTuple(args, "Ow*nn", &v, &buffer, &n, &endianness)) { + return NULL; + } + if (buffer.readonly) { + PyErr_SetString(PyExc_TypeError, "buffer must be writable"); + PyBuffer_Release(&buffer); + return NULL; + } + if (buffer.len < n) { + PyErr_SetString(PyExc_ValueError, "buffer must be at least 'n' bytes"); + PyBuffer_Release(&buffer); + return NULL; + } + Py_ssize_t res = PyLong_AsNativeBytes(v, buffer.buf, n, (int)endianness); + PyBuffer_Release(&buffer); + return res >= 0 ? PyLong_FromSsize_t(res) : NULL; +} + +static PyObject * +pylong_fromnativebytes(PyObject *module, PyObject *args) +{ + Py_buffer buffer; + Py_ssize_t n, endianness, signed_; + if (!PyArg_ParseTuple(args, "y*nnn", &buffer, &n, &endianness, &signed_)) { + return NULL; + } + if (buffer.len < n) { + PyErr_SetString(PyExc_ValueError, "buffer must be at least 'n' bytes"); + PyBuffer_Release(&buffer); + return NULL; + } + PyObject *res = signed_ + ? PyLong_FromNativeBytes(buffer.buf, n, (int)endianness) + : PyLong_FromUnsignedNativeBytes(buffer.buf, n, (int)endianness); + PyBuffer_Release(&buffer); + return res; +} + + static PyMethodDef test_methods[] = { _TESTCAPI_TEST_LONG_AND_OVERFLOW_METHODDEF _TESTCAPI_TEST_LONG_API_METHODDEF @@ -804,6 +849,8 @@ static PyMethodDef test_methods[] = { {"pylong_as_size_t", pylong_as_size_t, METH_O}, {"pylong_asdouble", pylong_asdouble, METH_O}, {"pylong_asvoidptr", pylong_asvoidptr, METH_O}, + {"pylong_asnativebytes", pylong_asnativebytes, METH_VARARGS}, + {"pylong_fromnativebytes", pylong_fromnativebytes, METH_VARARGS}, {NULL}, }; @@ -813,6 +860,5 @@ _PyTestCapi_Init_Long(PyObject *mod) if (PyModule_AddFunctions(mod, test_methods) < 0) { return -1; } - return 0; } diff --git a/Modules/_testcapi/parts.h b/Modules/_testcapi/parts.h index 29817edd69b134..e8cfb2423500d4 100644 --- a/Modules/_testcapi/parts.h +++ b/Modules/_testcapi/parts.h @@ -59,6 +59,7 @@ int _PyTestCapi_Init_Immortal(PyObject *module); int _PyTestCapi_Init_GC(PyObject *module); int _PyTestCapi_Init_Sys(PyObject *module); int _PyTestCapi_Init_Hash(PyObject *module); +int _PyTestCapi_Init_Time(PyObject *module); int _PyTestCapi_Init_VectorcallLimited(PyObject *module); int _PyTestCapi_Init_HeaptypeRelative(PyObject *module); diff --git a/Modules/_testcapi/time.c b/Modules/_testcapi/time.c new file mode 100644 index 00000000000000..57eb9135d30029 --- /dev/null +++ b/Modules/_testcapi/time.c @@ -0,0 +1,104 @@ +#include "parts.h" + + +static int +pytime_from_nanoseconds(PyTime_t *tp, PyObject *obj) +{ + if (!PyLong_Check(obj)) { + PyErr_Format(PyExc_TypeError, "expect int, got %s", + Py_TYPE(obj)->tp_name); + return -1; + } + + long long nsec = PyLong_AsLongLong(obj); + if (nsec == -1 && PyErr_Occurred()) { + return -1; + } + + Py_BUILD_ASSERT(sizeof(long long) == sizeof(PyTime_t)); + *tp = (PyTime_t)nsec; + return 0; +} + + +static PyObject * +test_pytime_assecondsdouble(PyObject *Py_UNUSED(self), PyObject *args) +{ + PyObject *obj; + if (!PyArg_ParseTuple(args, "O", &obj)) { + return NULL; + } + PyTime_t ts; + if (pytime_from_nanoseconds(&ts, obj) < 0) { + return NULL; + } + double d = PyTime_AsSecondsDouble(ts); + return PyFloat_FromDouble(d); +} + + +static PyObject* +pytime_as_float(PyTime_t t) +{ + return PyFloat_FromDouble(PyTime_AsSecondsDouble(t)); +} + + + +static PyObject* +test_pytime_monotonic(PyObject *Py_UNUSED(self), PyObject *Py_UNUSED(args)) +{ + PyTime_t t; + if (PyTime_Monotonic(&t) < 0) { + return NULL; + } + return pytime_as_float(t); +} + + +static PyObject* +test_pytime_perf_counter(PyObject *Py_UNUSED(self), PyObject *Py_UNUSED(args)) +{ + PyTime_t t; + if (PyTime_PerfCounter(&t) < 0) { + return NULL; + } + return pytime_as_float(t); +} + + +static PyObject* +test_pytime_time(PyObject *Py_UNUSED(self), PyObject *Py_UNUSED(args)) +{ + PyTime_t t; + if (PyTime_Time(&t) < 0) { + printf("ERR! %d\n", (int)t); + return NULL; + } + return pytime_as_float(t); +} + + +static PyMethodDef test_methods[] = { + {"PyTime_AsSecondsDouble", test_pytime_assecondsdouble, METH_VARARGS}, + {"PyTime_Monotonic", test_pytime_monotonic, METH_NOARGS}, + {"PyTime_PerfCounter", test_pytime_perf_counter, METH_NOARGS}, + {"PyTime_Time", test_pytime_time, METH_NOARGS}, + {NULL}, +}; + +int +_PyTestCapi_Init_Time(PyObject *m) +{ + if (PyModule_AddFunctions(m, test_methods) < 0) { + return -1; + } + Py_BUILD_ASSERT(sizeof(long long) == sizeof(PyTime_t)); + if (PyModule_AddObject(m, "PyTime_MIN", PyLong_FromLongLong(PyTime_MIN)) < 0) { + return 1; + } + if (PyModule_AddObject(m, "PyTime_MAX", PyLong_FromLongLong(PyTime_MAX)) < 0) { + return 1; + } + return 0; +} diff --git a/Modules/_testcapimodule.c b/Modules/_testcapimodule.c index e67de3eeb6e17e..b03f871b089c8a 100644 --- a/Modules/_testcapimodule.c +++ b/Modules/_testcapimodule.c @@ -4107,6 +4107,9 @@ PyInit__testcapi(void) if (_PyTestCapi_Init_Hash(m) < 0) { return NULL; } + if (_PyTestCapi_Init_Time(m) < 0) { + return NULL; + } PyState_AddModule(m, &_testcapimodule); return m; diff --git a/Modules/_testinternalcapi/pytime.c b/Modules/_testinternalcapi/pytime.c index 2b5f9eb0ef2851..f0f758ea032df8 100644 --- a/Modules/_testinternalcapi/pytime.c +++ b/Modules/_testinternalcapi/pytime.c @@ -52,21 +52,6 @@ test_pytime_fromsecondsobject(PyObject *self, PyObject *args) return _PyTime_AsNanosecondsObject(ts); } -static PyObject * -test_pytime_assecondsdouble(PyObject *self, PyObject *args) -{ - PyObject *obj; - if (!PyArg_ParseTuple(args, "O", &obj)) { - return NULL; - } - _PyTime_t ts; - if (_PyTime_FromNanosecondsObject(&ts, obj) < 0) { - return NULL; - } - double d = _PyTime_AsSecondsDouble(ts); - return PyFloat_FromDouble(d); -} - static PyObject * test_PyTime_AsTimeval(PyObject *self, PyObject *args) { @@ -254,7 +239,6 @@ test_pytime_object_to_timespec(PyObject *self, PyObject *args) static PyMethodDef TestMethods[] = { {"_PyTime_AsMicroseconds", test_PyTime_AsMicroseconds, METH_VARARGS}, {"_PyTime_AsMilliseconds", test_PyTime_AsMilliseconds, METH_VARARGS}, - {"_PyTime_AsSecondsDouble", test_pytime_assecondsdouble, METH_VARARGS}, #ifdef HAVE_CLOCK_GETTIME {"_PyTime_AsTimespec", test_PyTime_AsTimespec, METH_VARARGS}, {"_PyTime_AsTimespec_clamp", test_PyTime_AsTimespec_clamp, METH_VARARGS}, diff --git a/Modules/_threadmodule.c b/Modules/_threadmodule.c index df02b023012fbd..d7840eaf45e8d6 100644 --- a/Modules/_threadmodule.c +++ b/Modules/_threadmodule.c @@ -1244,7 +1244,7 @@ thread_run(void *boot_raw) _PyThreadState_Bind(tstate); PyEval_AcquireThread(tstate); - tstate->interp->threads.count++; + _Py_atomic_add_ssize(&tstate->interp->threads.count, 1); PyObject *res = PyObject_Call(boot->func, boot->args, boot->kwargs); if (res == NULL) { @@ -1262,7 +1262,7 @@ thread_run(void *boot_raw) thread_bootstate_free(boot, 1); - tstate->interp->threads.count--; + _Py_atomic_add_ssize(&tstate->interp->threads.count, -1); PyThreadState_Clear(tstate); _PyThreadState_DeleteCurrent(tstate); @@ -1539,7 +1539,7 @@ static PyObject * thread__count(PyObject *self, PyObject *Py_UNUSED(ignored)) { PyInterpreterState *interp = _PyInterpreterState_GET(); - return PyLong_FromLong(interp->threads.count); + return PyLong_FromSsize_t(_Py_atomic_load_ssize(&interp->threads.count)); } PyDoc_STRVAR(_count_doc, diff --git a/Modules/_tkinter.c b/Modules/_tkinter.c index f6181168a85ae1..e3789867dc085f 100644 --- a/Modules/_tkinter.c +++ b/Modules/_tkinter.c @@ -926,7 +926,8 @@ AsObj(PyObject *value) (unsigned char *)(void *)&wideValue, sizeof(wideValue), PY_LITTLE_ENDIAN, - /* signed */ 1) == 0) { + /* signed */ 1, + /* with_exceptions */ 1) == 0) { return Tcl_NewWideIntObj(wideValue); } PyErr_Clear(); diff --git a/Modules/_winapi.c b/Modules/_winapi.c index 5e5eb123c4ccff..83a4ccd4802ae0 100644 --- a/Modules/_winapi.c +++ b/Modules/_winapi.c @@ -438,6 +438,39 @@ _winapi_ConnectNamedPipe_impl(PyObject *module, HANDLE handle, Py_RETURN_NONE; } +/*[clinic input] +_winapi.CreateEventW -> HANDLE + + security_attributes: LPSECURITY_ATTRIBUTES + manual_reset: BOOL + initial_state: BOOL + name: LPCWSTR(accept={str, NoneType}) +[clinic start generated code]*/ + +static HANDLE +_winapi_CreateEventW_impl(PyObject *module, + LPSECURITY_ATTRIBUTES security_attributes, + BOOL manual_reset, BOOL initial_state, + LPCWSTR name) +/*[clinic end generated code: output=2d4c7d5852ecb298 input=4187cee28ac763f8]*/ +{ + HANDLE handle; + + if (PySys_Audit("_winapi.CreateEventW", "bbu", manual_reset, initial_state, name) < 0) { + return INVALID_HANDLE_VALUE; + } + + Py_BEGIN_ALLOW_THREADS + handle = CreateEventW(security_attributes, manual_reset, initial_state, name); + Py_END_ALLOW_THREADS + + if (handle == INVALID_HANDLE_VALUE) { + PyErr_SetFromWindowsErr(0); + } + + return handle; +} + /*[clinic input] _winapi.CreateFile -> HANDLE @@ -674,6 +707,37 @@ _winapi_CreateJunction_impl(PyObject *module, LPCWSTR src_path, Py_RETURN_NONE; } +/*[clinic input] +_winapi.CreateMutexW -> HANDLE + + security_attributes: LPSECURITY_ATTRIBUTES + initial_owner: BOOL + name: LPCWSTR(accept={str, NoneType}) +[clinic start generated code]*/ + +static HANDLE +_winapi_CreateMutexW_impl(PyObject *module, + LPSECURITY_ATTRIBUTES security_attributes, + BOOL initial_owner, LPCWSTR name) +/*[clinic end generated code: output=31b9ee8fc37e49a5 input=7d54b921e723254a]*/ +{ + HANDLE handle; + + if (PySys_Audit("_winapi.CreateMutexW", "bu", initial_owner, name) < 0) { + return INVALID_HANDLE_VALUE; + } + + Py_BEGIN_ALLOW_THREADS + handle = CreateMutexW(security_attributes, initial_owner, name); + Py_END_ALLOW_THREADS + + if (handle == INVALID_HANDLE_VALUE) { + PyErr_SetFromWindowsErr(0); + } + + return handle; +} + /*[clinic input] _winapi.CreateNamedPipe -> HANDLE @@ -1590,6 +1654,67 @@ _winapi_UnmapViewOfFile_impl(PyObject *module, LPCVOID address) Py_RETURN_NONE; } +/*[clinic input] +_winapi.OpenEventW -> HANDLE + + desired_access: DWORD + inherit_handle: BOOL + name: LPCWSTR +[clinic start generated code]*/ + +static HANDLE +_winapi_OpenEventW_impl(PyObject *module, DWORD desired_access, + BOOL inherit_handle, LPCWSTR name) +/*[clinic end generated code: output=c4a45e95545a4bd2 input=dec26598748d35aa]*/ +{ + HANDLE handle; + + if (PySys_Audit("_winapi.OpenEventW", "Iu", desired_access, name) < 0) { + return INVALID_HANDLE_VALUE; + } + + Py_BEGIN_ALLOW_THREADS + handle = OpenEventW(desired_access, inherit_handle, name); + Py_END_ALLOW_THREADS + + if (handle == INVALID_HANDLE_VALUE) { + PyErr_SetFromWindowsErr(0); + } + + return handle; +} + + +/*[clinic input] +_winapi.OpenMutexW -> HANDLE + + desired_access: DWORD + inherit_handle: BOOL + name: LPCWSTR +[clinic start generated code]*/ + +static HANDLE +_winapi_OpenMutexW_impl(PyObject *module, DWORD desired_access, + BOOL inherit_handle, LPCWSTR name) +/*[clinic end generated code: output=dda39d7844397bf0 input=f3a7b466c5307712]*/ +{ + HANDLE handle; + + if (PySys_Audit("_winapi.OpenMutexW", "Iu", desired_access, name) < 0) { + return INVALID_HANDLE_VALUE; + } + + Py_BEGIN_ALLOW_THREADS + handle = OpenMutexW(desired_access, inherit_handle, name); + Py_END_ALLOW_THREADS + + if (handle == INVALID_HANDLE_VALUE) { + PyErr_SetFromWindowsErr(0); + } + + return handle; +} + /*[clinic input] _winapi.OpenFileMapping -> HANDLE @@ -1820,6 +1945,75 @@ _winapi_ReadFile_impl(PyObject *module, HANDLE handle, DWORD size, return Py_BuildValue("NI", buf, err); } +/*[clinic input] +_winapi.ReleaseMutex + + mutex: HANDLE +[clinic start generated code]*/ + +static PyObject * +_winapi_ReleaseMutex_impl(PyObject *module, HANDLE mutex) +/*[clinic end generated code: output=5b9001a72dd8af37 input=49e9d20de3559d84]*/ +{ + int err = 0; + + Py_BEGIN_ALLOW_THREADS + if (!ReleaseMutex(mutex)) { + err = GetLastError(); + } + Py_END_ALLOW_THREADS + if (err) { + return PyErr_SetFromWindowsErr(err); + } + Py_RETURN_NONE; +} + +/*[clinic input] +_winapi.ResetEvent + + event: HANDLE +[clinic start generated code]*/ + +static PyObject * +_winapi_ResetEvent_impl(PyObject *module, HANDLE event) +/*[clinic end generated code: output=81c8501d57c0530d input=e2d42d990322e87a]*/ +{ + int err = 0; + + Py_BEGIN_ALLOW_THREADS + if (!ResetEvent(event)) { + err = GetLastError(); + } + Py_END_ALLOW_THREADS + if (err) { + return PyErr_SetFromWindowsErr(err); + } + Py_RETURN_NONE; +} + +/*[clinic input] +_winapi.SetEvent + + event: HANDLE +[clinic start generated code]*/ + +static PyObject * +_winapi_SetEvent_impl(PyObject *module, HANDLE event) +/*[clinic end generated code: output=c18ba09eb9aa774d input=e660e830a37c09f8]*/ +{ + int err = 0; + + Py_BEGIN_ALLOW_THREADS + if (!SetEvent(event)) { + err = GetLastError(); + } + Py_END_ALLOW_THREADS + if (err) { + return PyErr_SetFromWindowsErr(err); + } + Py_RETURN_NONE; +} + /*[clinic input] _winapi.SetNamedPipeHandleState @@ -1942,6 +2136,310 @@ _winapi_WaitNamedPipe_impl(PyObject *module, LPCTSTR name, DWORD timeout) Py_RETURN_NONE; } + +typedef struct { + HANDLE handles[MAXIMUM_WAIT_OBJECTS]; + HANDLE cancel_event; + DWORD handle_base; + DWORD handle_count; + HANDLE thread; + volatile DWORD result; +} BatchedWaitData; + +static DWORD WINAPI +_batched_WaitForMultipleObjects_thread(LPVOID param) +{ + BatchedWaitData *data = (BatchedWaitData *)param; + data->result = WaitForMultipleObjects( + data->handle_count, + data->handles, + FALSE, + INFINITE + ); + if (data->result == WAIT_FAILED) { + DWORD err = GetLastError(); + SetEvent(data->cancel_event); + return err; + } else if (data->result >= WAIT_ABANDONED_0 && data->result < WAIT_ABANDONED_0 + MAXIMUM_WAIT_OBJECTS) { + data->result = WAIT_FAILED; + SetEvent(data->cancel_event); + return ERROR_ABANDONED_WAIT_0; + } + return 0; +} + +/*[clinic input] +_winapi.BatchedWaitForMultipleObjects + + handle_seq: object + wait_all: BOOL + milliseconds: DWORD(c_default='INFINITE') = _winapi.INFINITE + +Supports a larger number of handles than WaitForMultipleObjects + +Note that the handles may be waited on other threads, which could cause +issues for objects like mutexes that become associated with the thread +that was waiting for them. Objects may also be left signalled, even if +the wait fails. + +It is recommended to use WaitForMultipleObjects whenever possible, and +only switch to BatchedWaitForMultipleObjects for scenarios where you +control all the handles involved, such as your own thread pool or +files, and all wait objects are left unmodified by a wait (for example, +manual reset events, threads, and files/pipes). + +Overlapped handles returned from this module use manual reset events. +[clinic start generated code]*/ + +static PyObject * +_winapi_BatchedWaitForMultipleObjects_impl(PyObject *module, + PyObject *handle_seq, + BOOL wait_all, DWORD milliseconds) +/*[clinic end generated code: output=d21c1a4ad0a252fd input=7e196f29005dc77b]*/ +{ + Py_ssize_t thread_count = 0, handle_count = 0, i, j; + Py_ssize_t nhandles; + BatchedWaitData *thread_data[MAXIMUM_WAIT_OBJECTS]; + HANDLE handles[MAXIMUM_WAIT_OBJECTS]; + HANDLE sigint_event = NULL; + HANDLE cancel_event = NULL; + DWORD result; + + const Py_ssize_t _MAXIMUM_TOTAL_OBJECTS = (MAXIMUM_WAIT_OBJECTS - 1) * (MAXIMUM_WAIT_OBJECTS - 1); + + if (!PySequence_Check(handle_seq)) { + PyErr_Format(PyExc_TypeError, + "sequence type expected, got '%s'", + Py_TYPE(handle_seq)->tp_name); + return NULL; + } + nhandles = PySequence_Length(handle_seq); + if (nhandles == -1) { + return NULL; + } + if (nhandles == 0) { + return wait_all ? Py_NewRef(Py_None) : PyList_New(0); + } + + /* If this is the main thread then make the wait interruptible + by Ctrl-C. When waiting for *all* handles, it is only checked + in between batches. */ + if (_PyOS_IsMainThread()) { + sigint_event = _PyOS_SigintEvent(); + assert(sigint_event != NULL); + } + + if (nhandles < 0 || nhandles > _MAXIMUM_TOTAL_OBJECTS) { + PyErr_Format(PyExc_ValueError, + "need at most %zd handles, got a sequence of length %zd", + _MAXIMUM_TOTAL_OBJECTS, nhandles); + return NULL; + } + + if (!wait_all) { + cancel_event = CreateEventW(NULL, TRUE, FALSE, NULL); + if (!cancel_event) { + PyErr_SetExcFromWindowsErr(PyExc_OSError, 0); + return NULL; + } + } + + i = 0; + while (i < nhandles) { + BatchedWaitData *data = (BatchedWaitData*)PyMem_Malloc(sizeof(BatchedWaitData)); + if (!data) { + goto error; + } + thread_data[thread_count++] = data; + data->thread = NULL; + data->cancel_event = cancel_event; + data->handle_base = Py_SAFE_DOWNCAST(i, Py_ssize_t, DWORD); + data->handle_count = Py_SAFE_DOWNCAST(nhandles - i, Py_ssize_t, DWORD); + if (data->handle_count > MAXIMUM_WAIT_OBJECTS - 1) { + data->handle_count = MAXIMUM_WAIT_OBJECTS - 1; + } + for (j = 0; j < data->handle_count; ++i, ++j) { + PyObject *v = PySequence_GetItem(handle_seq, i); + if (!v || !PyArg_Parse(v, F_HANDLE, &data->handles[j])) { + Py_XDECREF(v); + goto error; + } + Py_DECREF(v); + } + if (!wait_all) { + data->handles[data->handle_count++] = cancel_event; + } + } + + DWORD err = 0; + + /* We need to use different strategies when waiting for ALL handles + as opposed to ANY handle. This is because there is no way to + (safely) interrupt a thread that is waiting for all handles in a + group. So for ALL handles, we loop over each set and wait. For + ANY handle, we use threads and wait on them. */ + if (wait_all) { + Py_BEGIN_ALLOW_THREADS + long long deadline = 0; + if (milliseconds != INFINITE) { + deadline = (long long)GetTickCount64() + milliseconds; + } + + for (i = 0; !err && i < thread_count; ++i) { + DWORD timeout = milliseconds; + if (deadline) { + long long time_to_deadline = deadline - GetTickCount64(); + if (time_to_deadline <= 0) { + err = WAIT_TIMEOUT; + break; + } else if (time_to_deadline < UINT_MAX) { + timeout = (DWORD)time_to_deadline; + } + } + result = WaitForMultipleObjects(thread_data[i]->handle_count, + thread_data[i]->handles, TRUE, timeout); + // ABANDONED is not possible here because we own all the handles + if (result == WAIT_FAILED) { + err = GetLastError(); + } else if (result == WAIT_TIMEOUT) { + err = WAIT_TIMEOUT; + } + + if (!err && sigint_event) { + result = WaitForSingleObject(sigint_event, 0); + if (result == WAIT_OBJECT_0) { + err = ERROR_CONTROL_C_EXIT; + } else if (result == WAIT_FAILED) { + err = GetLastError(); + } + } + } + + CloseHandle(cancel_event); + + Py_END_ALLOW_THREADS + } else { + Py_BEGIN_ALLOW_THREADS + + for (i = 0; i < thread_count; ++i) { + BatchedWaitData *data = thread_data[i]; + data->thread = CreateThread( + NULL, + 1, // smallest possible initial stack + _batched_WaitForMultipleObjects_thread, + (LPVOID)data, + CREATE_SUSPENDED, + NULL + ); + if (!data->thread) { + err = GetLastError(); + break; + } + handles[handle_count++] = data->thread; + } + Py_END_ALLOW_THREADS + + if (err) { + PyErr_SetExcFromWindowsErr(PyExc_OSError, err); + goto error; + } + if (handle_count > MAXIMUM_WAIT_OBJECTS - 1) { + // basically an assert, but stronger + PyErr_SetString(PyExc_SystemError, "allocated too many wait objects"); + goto error; + } + + Py_BEGIN_ALLOW_THREADS + + // Once we start resuming threads, can no longer "goto error" + for (i = 0; i < thread_count; ++i) { + ResumeThread(thread_data[i]->thread); + } + if (sigint_event) { + handles[handle_count++] = sigint_event; + } + result = WaitForMultipleObjects((DWORD)handle_count, handles, wait_all, milliseconds); + // ABANDONED is not possible here because we own all the handles + if (result == WAIT_FAILED) { + err = GetLastError(); + } else if (result == WAIT_TIMEOUT) { + err = WAIT_TIMEOUT; + } else if (sigint_event && result == WAIT_OBJECT_0 + handle_count) { + err = ERROR_CONTROL_C_EXIT; + } + + SetEvent(cancel_event); + + // Wait for all threads to finish before we start freeing their memory + if (sigint_event) { + handle_count -= 1; + } + WaitForMultipleObjects((DWORD)handle_count, handles, TRUE, INFINITE); + + for (i = 0; i < thread_count; ++i) { + if (!err && thread_data[i]->result == WAIT_FAILED) { + if (!GetExitCodeThread(thread_data[i]->thread, &err)) { + err = GetLastError(); + } + } + CloseHandle(thread_data[i]->thread); + } + + CloseHandle(cancel_event); + + Py_END_ALLOW_THREADS + + } + + PyObject *triggered_indices; + if (sigint_event != NULL && err == ERROR_CONTROL_C_EXIT) { + errno = EINTR; + PyErr_SetFromErrno(PyExc_OSError); + triggered_indices = NULL; + } else if (err) { + PyErr_SetExcFromWindowsErr(PyExc_OSError, err); + triggered_indices = NULL; + } else if (wait_all) { + triggered_indices = Py_NewRef(Py_None); + } else { + triggered_indices = PyList_New(0); + if (triggered_indices) { + for (i = 0; i < thread_count; ++i) { + Py_ssize_t triggered = (Py_ssize_t)thread_data[i]->result - WAIT_OBJECT_0; + if (triggered >= 0 && triggered < thread_data[i]->handle_count - 1) { + PyObject *v = PyLong_FromSsize_t(thread_data[i]->handle_base + triggered); + if (!v || PyList_Append(triggered_indices, v) < 0) { + Py_XDECREF(v); + Py_CLEAR(triggered_indices); + break; + } + Py_DECREF(v); + } + } + } + } + + for (i = 0; i < thread_count; ++i) { + PyMem_Free((void *)thread_data[i]); + } + + return triggered_indices; + +error: + // We should only enter here before any threads start running. + // Once we start resuming threads, different cleanup is required + CloseHandle(cancel_event); + while (--thread_count >= 0) { + HANDLE t = thread_data[thread_count]->thread; + if (t) { + TerminateThread(t, WAIT_ABANDONED_0); + CloseHandle(t); + } + PyMem_Free((void *)thread_data[thread_count]); + } + return NULL; +} + /*[clinic input] _winapi.WaitForMultipleObjects @@ -2335,8 +2833,10 @@ _winapi_CopyFile2_impl(PyObject *module, LPCWSTR existing_file_name, static PyMethodDef winapi_functions[] = { _WINAPI_CLOSEHANDLE_METHODDEF _WINAPI_CONNECTNAMEDPIPE_METHODDEF + _WINAPI_CREATEEVENTW_METHODDEF _WINAPI_CREATEFILE_METHODDEF _WINAPI_CREATEFILEMAPPING_METHODDEF + _WINAPI_CREATEMUTEXW_METHODDEF _WINAPI_CREATENAMEDPIPE_METHODDEF _WINAPI_CREATEPIPE_METHODDEF _WINAPI_CREATEPROCESS_METHODDEF @@ -2350,17 +2850,23 @@ static PyMethodDef winapi_functions[] = { _WINAPI_GETSTDHANDLE_METHODDEF _WINAPI_GETVERSION_METHODDEF _WINAPI_MAPVIEWOFFILE_METHODDEF + _WINAPI_OPENEVENTW_METHODDEF _WINAPI_OPENFILEMAPPING_METHODDEF + _WINAPI_OPENMUTEXW_METHODDEF _WINAPI_OPENPROCESS_METHODDEF _WINAPI_PEEKNAMEDPIPE_METHODDEF _WINAPI_LCMAPSTRINGEX_METHODDEF _WINAPI_READFILE_METHODDEF + _WINAPI_RELEASEMUTEX_METHODDEF + _WINAPI_RESETEVENT_METHODDEF + _WINAPI_SETEVENT_METHODDEF _WINAPI_SETNAMEDPIPEHANDLESTATE_METHODDEF _WINAPI_TERMINATEPROCESS_METHODDEF _WINAPI_UNMAPVIEWOFFILE_METHODDEF _WINAPI_VIRTUALQUERYSIZE_METHODDEF _WINAPI_WAITNAMEDPIPE_METHODDEF _WINAPI_WAITFORMULTIPLEOBJECTS_METHODDEF + _WINAPI_BATCHEDWAITFORMULTIPLEOBJECTS_METHODDEF _WINAPI_WAITFORSINGLEOBJECT_METHODDEF _WINAPI_WRITEFILE_METHODDEF _WINAPI_GETACP_METHODDEF diff --git a/Modules/cjkcodecs/multibytecodec.c b/Modules/cjkcodecs/multibytecodec.c index 5d3c16a98423ba..2125da437963d2 100644 --- a/Modules/cjkcodecs/multibytecodec.c +++ b/Modules/cjkcodecs/multibytecodec.c @@ -973,7 +973,8 @@ _multibytecodec_MultibyteIncrementalEncoder_setstate_impl(MultibyteIncrementalEn if (_PyLong_AsByteArray(statelong, statebytes, sizeof(statebytes), 1 /* little-endian */ , - 0 /* unsigned */ ) < 0) { + 0 /* unsigned */ , + 1 /* with_exceptions */) < 0) { goto errorexit; } @@ -1255,7 +1256,8 @@ _multibytecodec_MultibyteIncrementalDecoder_setstate_impl(MultibyteIncrementalDe if (_PyLong_AsByteArray(statelong, statebytes, sizeof(statebytes), 1 /* little-endian */ , - 0 /* unsigned */ ) < 0) { + 0 /* unsigned */ , + 1 /* with_exceptions */) < 0) { return NULL; } diff --git a/Modules/clinic/_winapi.c.h b/Modules/clinic/_winapi.c.h index d1052f38919dde..468457e624c691 100644 --- a/Modules/clinic/_winapi.c.h +++ b/Modules/clinic/_winapi.c.h @@ -151,6 +151,76 @@ _winapi_ConnectNamedPipe(PyObject *module, PyObject *const *args, Py_ssize_t nar return return_value; } +PyDoc_STRVAR(_winapi_CreateEventW__doc__, +"CreateEventW($module, /, security_attributes, manual_reset,\n" +" initial_state, name)\n" +"--\n" +"\n"); + +#define _WINAPI_CREATEEVENTW_METHODDEF \ + {"CreateEventW", _PyCFunction_CAST(_winapi_CreateEventW), METH_FASTCALL|METH_KEYWORDS, _winapi_CreateEventW__doc__}, + +static HANDLE +_winapi_CreateEventW_impl(PyObject *module, + LPSECURITY_ATTRIBUTES security_attributes, + BOOL manual_reset, BOOL initial_state, + LPCWSTR name); + +static PyObject * +_winapi_CreateEventW(PyObject *module, PyObject *const *args, Py_ssize_t nargs, PyObject *kwnames) +{ + PyObject *return_value = NULL; + #if defined(Py_BUILD_CORE) && !defined(Py_BUILD_CORE_MODULE) + + #define NUM_KEYWORDS 4 + static struct { + PyGC_Head _this_is_not_used; + PyObject_VAR_HEAD + PyObject *ob_item[NUM_KEYWORDS]; + } _kwtuple = { + .ob_base = PyVarObject_HEAD_INIT(&PyTuple_Type, NUM_KEYWORDS) + .ob_item = { &_Py_ID(security_attributes), &_Py_ID(manual_reset), &_Py_ID(initial_state), &_Py_ID(name), }, + }; + #undef NUM_KEYWORDS + #define KWTUPLE (&_kwtuple.ob_base.ob_base) + + #else // !Py_BUILD_CORE + # define KWTUPLE NULL + #endif // !Py_BUILD_CORE + + static const char * const _keywords[] = {"security_attributes", "manual_reset", "initial_state", "name", NULL}; + static _PyArg_Parser _parser = { + .keywords = _keywords, + .format = "" F_POINTER "iiO&:CreateEventW", + .kwtuple = KWTUPLE, + }; + #undef KWTUPLE + LPSECURITY_ATTRIBUTES security_attributes; + BOOL manual_reset; + BOOL initial_state; + LPCWSTR name = NULL; + HANDLE _return_value; + + if (!_PyArg_ParseStackAndKeywords(args, nargs, kwnames, &_parser, + &security_attributes, &manual_reset, &initial_state, _PyUnicode_WideCharString_Opt_Converter, &name)) { + goto exit; + } + _return_value = _winapi_CreateEventW_impl(module, security_attributes, manual_reset, initial_state, name); + if ((_return_value == INVALID_HANDLE_VALUE) && PyErr_Occurred()) { + goto exit; + } + if (_return_value == NULL) { + Py_RETURN_NONE; + } + return_value = HANDLE_TO_PYNUM(_return_value); + +exit: + /* Cleanup for name */ + PyMem_Free((void *)name); + + return return_value; +} + PyDoc_STRVAR(_winapi_CreateFile__doc__, "CreateFile($module, file_name, desired_access, share_mode,\n" " security_attributes, creation_disposition,\n" @@ -297,6 +367,73 @@ _winapi_CreateJunction(PyObject *module, PyObject *const *args, Py_ssize_t nargs return return_value; } +PyDoc_STRVAR(_winapi_CreateMutexW__doc__, +"CreateMutexW($module, /, security_attributes, initial_owner, name)\n" +"--\n" +"\n"); + +#define _WINAPI_CREATEMUTEXW_METHODDEF \ + {"CreateMutexW", _PyCFunction_CAST(_winapi_CreateMutexW), METH_FASTCALL|METH_KEYWORDS, _winapi_CreateMutexW__doc__}, + +static HANDLE +_winapi_CreateMutexW_impl(PyObject *module, + LPSECURITY_ATTRIBUTES security_attributes, + BOOL initial_owner, LPCWSTR name); + +static PyObject * +_winapi_CreateMutexW(PyObject *module, PyObject *const *args, Py_ssize_t nargs, PyObject *kwnames) +{ + PyObject *return_value = NULL; + #if defined(Py_BUILD_CORE) && !defined(Py_BUILD_CORE_MODULE) + + #define NUM_KEYWORDS 3 + static struct { + PyGC_Head _this_is_not_used; + PyObject_VAR_HEAD + PyObject *ob_item[NUM_KEYWORDS]; + } _kwtuple = { + .ob_base = PyVarObject_HEAD_INIT(&PyTuple_Type, NUM_KEYWORDS) + .ob_item = { &_Py_ID(security_attributes), &_Py_ID(initial_owner), &_Py_ID(name), }, + }; + #undef NUM_KEYWORDS + #define KWTUPLE (&_kwtuple.ob_base.ob_base) + + #else // !Py_BUILD_CORE + # define KWTUPLE NULL + #endif // !Py_BUILD_CORE + + static const char * const _keywords[] = {"security_attributes", "initial_owner", "name", NULL}; + static _PyArg_Parser _parser = { + .keywords = _keywords, + .format = "" F_POINTER "iO&:CreateMutexW", + .kwtuple = KWTUPLE, + }; + #undef KWTUPLE + LPSECURITY_ATTRIBUTES security_attributes; + BOOL initial_owner; + LPCWSTR name = NULL; + HANDLE _return_value; + + if (!_PyArg_ParseStackAndKeywords(args, nargs, kwnames, &_parser, + &security_attributes, &initial_owner, _PyUnicode_WideCharString_Opt_Converter, &name)) { + goto exit; + } + _return_value = _winapi_CreateMutexW_impl(module, security_attributes, initial_owner, name); + if ((_return_value == INVALID_HANDLE_VALUE) && PyErr_Occurred()) { + goto exit; + } + if (_return_value == NULL) { + Py_RETURN_NONE; + } + return_value = HANDLE_TO_PYNUM(_return_value); + +exit: + /* Cleanup for name */ + PyMem_Free((void *)name); + + return return_value; +} + PyDoc_STRVAR(_winapi_CreateNamedPipe__doc__, "CreateNamedPipe($module, name, open_mode, pipe_mode, max_instances,\n" " out_buffer_size, in_buffer_size, default_timeout,\n" @@ -771,6 +908,138 @@ _winapi_UnmapViewOfFile(PyObject *module, PyObject *arg) return return_value; } +PyDoc_STRVAR(_winapi_OpenEventW__doc__, +"OpenEventW($module, /, desired_access, inherit_handle, name)\n" +"--\n" +"\n"); + +#define _WINAPI_OPENEVENTW_METHODDEF \ + {"OpenEventW", _PyCFunction_CAST(_winapi_OpenEventW), METH_FASTCALL|METH_KEYWORDS, _winapi_OpenEventW__doc__}, + +static HANDLE +_winapi_OpenEventW_impl(PyObject *module, DWORD desired_access, + BOOL inherit_handle, LPCWSTR name); + +static PyObject * +_winapi_OpenEventW(PyObject *module, PyObject *const *args, Py_ssize_t nargs, PyObject *kwnames) +{ + PyObject *return_value = NULL; + #if defined(Py_BUILD_CORE) && !defined(Py_BUILD_CORE_MODULE) + + #define NUM_KEYWORDS 3 + static struct { + PyGC_Head _this_is_not_used; + PyObject_VAR_HEAD + PyObject *ob_item[NUM_KEYWORDS]; + } _kwtuple = { + .ob_base = PyVarObject_HEAD_INIT(&PyTuple_Type, NUM_KEYWORDS) + .ob_item = { &_Py_ID(desired_access), &_Py_ID(inherit_handle), &_Py_ID(name), }, + }; + #undef NUM_KEYWORDS + #define KWTUPLE (&_kwtuple.ob_base.ob_base) + + #else // !Py_BUILD_CORE + # define KWTUPLE NULL + #endif // !Py_BUILD_CORE + + static const char * const _keywords[] = {"desired_access", "inherit_handle", "name", NULL}; + static _PyArg_Parser _parser = { + .keywords = _keywords, + .format = "kiO&:OpenEventW", + .kwtuple = KWTUPLE, + }; + #undef KWTUPLE + DWORD desired_access; + BOOL inherit_handle; + LPCWSTR name = NULL; + HANDLE _return_value; + + if (!_PyArg_ParseStackAndKeywords(args, nargs, kwnames, &_parser, + &desired_access, &inherit_handle, _PyUnicode_WideCharString_Converter, &name)) { + goto exit; + } + _return_value = _winapi_OpenEventW_impl(module, desired_access, inherit_handle, name); + if ((_return_value == INVALID_HANDLE_VALUE) && PyErr_Occurred()) { + goto exit; + } + if (_return_value == NULL) { + Py_RETURN_NONE; + } + return_value = HANDLE_TO_PYNUM(_return_value); + +exit: + /* Cleanup for name */ + PyMem_Free((void *)name); + + return return_value; +} + +PyDoc_STRVAR(_winapi_OpenMutexW__doc__, +"OpenMutexW($module, /, desired_access, inherit_handle, name)\n" +"--\n" +"\n"); + +#define _WINAPI_OPENMUTEXW_METHODDEF \ + {"OpenMutexW", _PyCFunction_CAST(_winapi_OpenMutexW), METH_FASTCALL|METH_KEYWORDS, _winapi_OpenMutexW__doc__}, + +static HANDLE +_winapi_OpenMutexW_impl(PyObject *module, DWORD desired_access, + BOOL inherit_handle, LPCWSTR name); + +static PyObject * +_winapi_OpenMutexW(PyObject *module, PyObject *const *args, Py_ssize_t nargs, PyObject *kwnames) +{ + PyObject *return_value = NULL; + #if defined(Py_BUILD_CORE) && !defined(Py_BUILD_CORE_MODULE) + + #define NUM_KEYWORDS 3 + static struct { + PyGC_Head _this_is_not_used; + PyObject_VAR_HEAD + PyObject *ob_item[NUM_KEYWORDS]; + } _kwtuple = { + .ob_base = PyVarObject_HEAD_INIT(&PyTuple_Type, NUM_KEYWORDS) + .ob_item = { &_Py_ID(desired_access), &_Py_ID(inherit_handle), &_Py_ID(name), }, + }; + #undef NUM_KEYWORDS + #define KWTUPLE (&_kwtuple.ob_base.ob_base) + + #else // !Py_BUILD_CORE + # define KWTUPLE NULL + #endif // !Py_BUILD_CORE + + static const char * const _keywords[] = {"desired_access", "inherit_handle", "name", NULL}; + static _PyArg_Parser _parser = { + .keywords = _keywords, + .format = "kiO&:OpenMutexW", + .kwtuple = KWTUPLE, + }; + #undef KWTUPLE + DWORD desired_access; + BOOL inherit_handle; + LPCWSTR name = NULL; + HANDLE _return_value; + + if (!_PyArg_ParseStackAndKeywords(args, nargs, kwnames, &_parser, + &desired_access, &inherit_handle, _PyUnicode_WideCharString_Converter, &name)) { + goto exit; + } + _return_value = _winapi_OpenMutexW_impl(module, desired_access, inherit_handle, name); + if ((_return_value == INVALID_HANDLE_VALUE) && PyErr_Occurred()) { + goto exit; + } + if (_return_value == NULL) { + Py_RETURN_NONE; + } + return_value = HANDLE_TO_PYNUM(_return_value); + +exit: + /* Cleanup for name */ + PyMem_Free((void *)name); + + return return_value; +} + PyDoc_STRVAR(_winapi_OpenFileMapping__doc__, "OpenFileMapping($module, desired_access, inherit_handle, name, /)\n" "--\n" @@ -991,6 +1260,162 @@ _winapi_ReadFile(PyObject *module, PyObject *const *args, Py_ssize_t nargs, PyOb return return_value; } +PyDoc_STRVAR(_winapi_ReleaseMutex__doc__, +"ReleaseMutex($module, /, mutex)\n" +"--\n" +"\n"); + +#define _WINAPI_RELEASEMUTEX_METHODDEF \ + {"ReleaseMutex", _PyCFunction_CAST(_winapi_ReleaseMutex), METH_FASTCALL|METH_KEYWORDS, _winapi_ReleaseMutex__doc__}, + +static PyObject * +_winapi_ReleaseMutex_impl(PyObject *module, HANDLE mutex); + +static PyObject * +_winapi_ReleaseMutex(PyObject *module, PyObject *const *args, Py_ssize_t nargs, PyObject *kwnames) +{ + PyObject *return_value = NULL; + #if defined(Py_BUILD_CORE) && !defined(Py_BUILD_CORE_MODULE) + + #define NUM_KEYWORDS 1 + static struct { + PyGC_Head _this_is_not_used; + PyObject_VAR_HEAD + PyObject *ob_item[NUM_KEYWORDS]; + } _kwtuple = { + .ob_base = PyVarObject_HEAD_INIT(&PyTuple_Type, NUM_KEYWORDS) + .ob_item = { &_Py_ID(mutex), }, + }; + #undef NUM_KEYWORDS + #define KWTUPLE (&_kwtuple.ob_base.ob_base) + + #else // !Py_BUILD_CORE + # define KWTUPLE NULL + #endif // !Py_BUILD_CORE + + static const char * const _keywords[] = {"mutex", NULL}; + static _PyArg_Parser _parser = { + .keywords = _keywords, + .format = "" F_HANDLE ":ReleaseMutex", + .kwtuple = KWTUPLE, + }; + #undef KWTUPLE + HANDLE mutex; + + if (!_PyArg_ParseStackAndKeywords(args, nargs, kwnames, &_parser, + &mutex)) { + goto exit; + } + return_value = _winapi_ReleaseMutex_impl(module, mutex); + +exit: + return return_value; +} + +PyDoc_STRVAR(_winapi_ResetEvent__doc__, +"ResetEvent($module, /, event)\n" +"--\n" +"\n"); + +#define _WINAPI_RESETEVENT_METHODDEF \ + {"ResetEvent", _PyCFunction_CAST(_winapi_ResetEvent), METH_FASTCALL|METH_KEYWORDS, _winapi_ResetEvent__doc__}, + +static PyObject * +_winapi_ResetEvent_impl(PyObject *module, HANDLE event); + +static PyObject * +_winapi_ResetEvent(PyObject *module, PyObject *const *args, Py_ssize_t nargs, PyObject *kwnames) +{ + PyObject *return_value = NULL; + #if defined(Py_BUILD_CORE) && !defined(Py_BUILD_CORE_MODULE) + + #define NUM_KEYWORDS 1 + static struct { + PyGC_Head _this_is_not_used; + PyObject_VAR_HEAD + PyObject *ob_item[NUM_KEYWORDS]; + } _kwtuple = { + .ob_base = PyVarObject_HEAD_INIT(&PyTuple_Type, NUM_KEYWORDS) + .ob_item = { &_Py_ID(event), }, + }; + #undef NUM_KEYWORDS + #define KWTUPLE (&_kwtuple.ob_base.ob_base) + + #else // !Py_BUILD_CORE + # define KWTUPLE NULL + #endif // !Py_BUILD_CORE + + static const char * const _keywords[] = {"event", NULL}; + static _PyArg_Parser _parser = { + .keywords = _keywords, + .format = "" F_HANDLE ":ResetEvent", + .kwtuple = KWTUPLE, + }; + #undef KWTUPLE + HANDLE event; + + if (!_PyArg_ParseStackAndKeywords(args, nargs, kwnames, &_parser, + &event)) { + goto exit; + } + return_value = _winapi_ResetEvent_impl(module, event); + +exit: + return return_value; +} + +PyDoc_STRVAR(_winapi_SetEvent__doc__, +"SetEvent($module, /, event)\n" +"--\n" +"\n"); + +#define _WINAPI_SETEVENT_METHODDEF \ + {"SetEvent", _PyCFunction_CAST(_winapi_SetEvent), METH_FASTCALL|METH_KEYWORDS, _winapi_SetEvent__doc__}, + +static PyObject * +_winapi_SetEvent_impl(PyObject *module, HANDLE event); + +static PyObject * +_winapi_SetEvent(PyObject *module, PyObject *const *args, Py_ssize_t nargs, PyObject *kwnames) +{ + PyObject *return_value = NULL; + #if defined(Py_BUILD_CORE) && !defined(Py_BUILD_CORE_MODULE) + + #define NUM_KEYWORDS 1 + static struct { + PyGC_Head _this_is_not_used; + PyObject_VAR_HEAD + PyObject *ob_item[NUM_KEYWORDS]; + } _kwtuple = { + .ob_base = PyVarObject_HEAD_INIT(&PyTuple_Type, NUM_KEYWORDS) + .ob_item = { &_Py_ID(event), }, + }; + #undef NUM_KEYWORDS + #define KWTUPLE (&_kwtuple.ob_base.ob_base) + + #else // !Py_BUILD_CORE + # define KWTUPLE NULL + #endif // !Py_BUILD_CORE + + static const char * const _keywords[] = {"event", NULL}; + static _PyArg_Parser _parser = { + .keywords = _keywords, + .format = "" F_HANDLE ":SetEvent", + .kwtuple = KWTUPLE, + }; + #undef KWTUPLE + HANDLE event; + + if (!_PyArg_ParseStackAndKeywords(args, nargs, kwnames, &_parser, + &event)) { + goto exit; + } + return_value = _winapi_SetEvent_impl(module, event); + +exit: + return return_value; +} + PyDoc_STRVAR(_winapi_SetNamedPipeHandleState__doc__, "SetNamedPipeHandleState($module, named_pipe, mode,\n" " max_collection_count, collect_data_timeout, /)\n" @@ -1114,6 +1539,77 @@ _winapi_WaitNamedPipe(PyObject *module, PyObject *const *args, Py_ssize_t nargs) return return_value; } +PyDoc_STRVAR(_winapi_BatchedWaitForMultipleObjects__doc__, +"BatchedWaitForMultipleObjects($module, /, handle_seq, wait_all,\n" +" milliseconds=_winapi.INFINITE)\n" +"--\n" +"\n" +"Supports a larger number of handles than WaitForMultipleObjects\n" +"\n" +"Note that the handles may be waited on other threads, which could cause\n" +"issues for objects like mutexes that become associated with the thread\n" +"that was waiting for them. Objects may also be left signalled, even if\n" +"the wait fails.\n" +"\n" +"It is recommended to use WaitForMultipleObjects whenever possible, and\n" +"only switch to BatchedWaitForMultipleObjects for scenarios where you\n" +"control all the handles involved, such as your own thread pool or\n" +"files, and all wait objects are left unmodified by a wait (for example,\n" +"manual reset events, threads, and files/pipes).\n" +"\n" +"Overlapped handles returned from this module use manual reset events."); + +#define _WINAPI_BATCHEDWAITFORMULTIPLEOBJECTS_METHODDEF \ + {"BatchedWaitForMultipleObjects", _PyCFunction_CAST(_winapi_BatchedWaitForMultipleObjects), METH_FASTCALL|METH_KEYWORDS, _winapi_BatchedWaitForMultipleObjects__doc__}, + +static PyObject * +_winapi_BatchedWaitForMultipleObjects_impl(PyObject *module, + PyObject *handle_seq, + BOOL wait_all, DWORD milliseconds); + +static PyObject * +_winapi_BatchedWaitForMultipleObjects(PyObject *module, PyObject *const *args, Py_ssize_t nargs, PyObject *kwnames) +{ + PyObject *return_value = NULL; + #if defined(Py_BUILD_CORE) && !defined(Py_BUILD_CORE_MODULE) + + #define NUM_KEYWORDS 3 + static struct { + PyGC_Head _this_is_not_used; + PyObject_VAR_HEAD + PyObject *ob_item[NUM_KEYWORDS]; + } _kwtuple = { + .ob_base = PyVarObject_HEAD_INIT(&PyTuple_Type, NUM_KEYWORDS) + .ob_item = { &_Py_ID(handle_seq), &_Py_ID(wait_all), &_Py_ID(milliseconds), }, + }; + #undef NUM_KEYWORDS + #define KWTUPLE (&_kwtuple.ob_base.ob_base) + + #else // !Py_BUILD_CORE + # define KWTUPLE NULL + #endif // !Py_BUILD_CORE + + static const char * const _keywords[] = {"handle_seq", "wait_all", "milliseconds", NULL}; + static _PyArg_Parser _parser = { + .keywords = _keywords, + .format = "Oi|k:BatchedWaitForMultipleObjects", + .kwtuple = KWTUPLE, + }; + #undef KWTUPLE + PyObject *handle_seq; + BOOL wait_all; + DWORD milliseconds = INFINITE; + + if (!_PyArg_ParseStackAndKeywords(args, nargs, kwnames, &_parser, + &handle_seq, &wait_all, &milliseconds)) { + goto exit; + } + return_value = _winapi_BatchedWaitForMultipleObjects_impl(module, handle_seq, wait_all, milliseconds); + +exit: + return return_value; +} + PyDoc_STRVAR(_winapi_WaitForMultipleObjects__doc__, "WaitForMultipleObjects($module, handle_seq, wait_flag,\n" " milliseconds=_winapi.INFINITE, /)\n" @@ -1482,4 +1978,4 @@ _winapi_CopyFile2(PyObject *module, PyObject *const *args, Py_ssize_t nargs, PyO return return_value; } -/*[clinic end generated code: output=2350d4f2275d3a6f input=a9049054013a1b77]*/ +/*[clinic end generated code: output=1f5bbcfa8d1847c5 input=a9049054013a1b77]*/ diff --git a/Objects/exceptions.c b/Objects/exceptions.c index cff55d05163b6b..3df3a9b3b1a253 100644 --- a/Objects/exceptions.c +++ b/Objects/exceptions.c @@ -3539,7 +3539,6 @@ SimpleExtendsException(PyExc_Warning, ResourceWarning, #undef EOPNOTSUPP #undef EPROTONOSUPPORT #undef EPROTOTYPE -#undef ETIMEDOUT #undef EWOULDBLOCK #if defined(WSAEALREADY) && !defined(EALREADY) @@ -3560,9 +3559,6 @@ SimpleExtendsException(PyExc_Warning, ResourceWarning, #if defined(WSAESHUTDOWN) && !defined(ESHUTDOWN) #define ESHUTDOWN WSAESHUTDOWN #endif -#if defined(WSAETIMEDOUT) && !defined(ETIMEDOUT) -#define ETIMEDOUT WSAETIMEDOUT -#endif #if defined(WSAEWOULDBLOCK) && !defined(EWOULDBLOCK) #define EWOULDBLOCK WSAEWOULDBLOCK #endif @@ -3747,6 +3743,9 @@ _PyExc_InitState(PyInterpreterState *interp) #endif ADD_ERRNO(ProcessLookupError, ESRCH); ADD_ERRNO(TimeoutError, ETIMEDOUT); +#ifdef WSAETIMEDOUT + ADD_ERRNO(TimeoutError, WSAETIMEDOUT); +#endif return _PyStatus_OK(); diff --git a/Objects/longobject.c b/Objects/longobject.c index e655ba19e8f1c1..932111f58425f2 100644 --- a/Objects/longobject.c +++ b/Objects/longobject.c @@ -928,7 +928,8 @@ _PyLong_FromByteArray(const unsigned char* bytes, size_t n, int _PyLong_AsByteArray(PyLongObject* v, unsigned char* bytes, size_t n, - int little_endian, int is_signed) + int little_endian, int is_signed, + int with_exceptions) { Py_ssize_t i; /* index into v->long_value.ob_digit */ Py_ssize_t ndigits; /* number of digits */ @@ -945,8 +946,10 @@ _PyLong_AsByteArray(PyLongObject* v, ndigits = _PyLong_DigitCount(v); if (_PyLong_IsNegative(v)) { if (!is_signed) { - PyErr_SetString(PyExc_OverflowError, - "can't convert negative int to unsigned"); + if (with_exceptions) { + PyErr_SetString(PyExc_OverflowError, + "can't convert negative int to unsigned"); + } return -1; } do_twos_comp = 1; @@ -967,7 +970,12 @@ _PyLong_AsByteArray(PyLongObject* v, /* Copy over all the Python digits. It's crucial that every Python digit except for the MSD contribute exactly PyLong_SHIFT bits to the total, so first assert that the int is - normalized. */ + normalized. + NOTE: PyLong_AsNativeBytes() assumes that this function will fill in 'n' + bytes even if it eventually fails to convert the whole number. Make sure + you account for that if you are changing this algorithm to return without + doing that. + */ assert(ndigits == 0 || v->long_value.ob_digit[ndigits - 1] != 0); j = 0; accum = 0; @@ -1052,11 +1060,203 @@ _PyLong_AsByteArray(PyLongObject* v, return 0; Overflow: - PyErr_SetString(PyExc_OverflowError, "int too big to convert"); + if (with_exceptions) { + PyErr_SetString(PyExc_OverflowError, "int too big to convert"); + } return -1; } +// Refactored out for readability, not reuse +static inline int +_fits_in_n_bits(Py_ssize_t v, Py_ssize_t n) +{ + if (n >= (Py_ssize_t)sizeof(Py_ssize_t) * 8) { + return 1; + } + // If all bits above n are the same, we fit. + // (Use n-1 if we require the sign bit to be consistent.) + Py_ssize_t v_extended = v >> ((int)n - 1); + return v_extended == 0 || v_extended == -1; +} + +static inline int +_resolve_endianness(int *endianness) +{ + if (*endianness < 0) { + *endianness = PY_LITTLE_ENDIAN; + } + if (*endianness != 0 && *endianness != 1) { + PyErr_SetString(PyExc_SystemError, "invalid 'endianness' value"); + return -1; + } + return 0; +} + +Py_ssize_t +PyLong_AsNativeBytes(PyObject* vv, void* buffer, Py_ssize_t n, int endianness) +{ + PyLongObject *v; + union { + Py_ssize_t v; + unsigned char b[sizeof(Py_ssize_t)]; + } cv; + int do_decref = 0; + Py_ssize_t res = 0; + + if (vv == NULL || n < 0) { + PyErr_BadInternalCall(); + return -1; + } + + int little_endian = endianness; + if (_resolve_endianness(&little_endian) < 0) { + return -1; + } + + if (PyLong_Check(vv)) { + v = (PyLongObject *)vv; + } + else { + v = (PyLongObject *)_PyNumber_Index(vv); + if (v == NULL) { + return -1; + } + do_decref = 1; + } + + if (_PyLong_IsCompact(v)) { + res = 0; + cv.v = _PyLong_CompactValue(v); + /* Most paths result in res = sizeof(compact value). Only the case + * where 0 < n < sizeof(compact value) do we need to check and adjust + * our return value. */ + res = sizeof(cv.b); + if (n <= 0) { + // nothing to do! + } + else if (n <= sizeof(cv.b)) { +#if PY_LITTLE_ENDIAN + if (little_endian) { + memcpy(buffer, cv.b, n); + } + else { + for (Py_ssize_t i = 0; i < n; ++i) { + ((unsigned char*)buffer)[n - i - 1] = cv.b[i]; + } + } +#else + if (little_endian) { + for (Py_ssize_t i = 0; i < n; ++i) { + ((unsigned char*)buffer)[i] = cv.b[sizeof(cv.b) - i - 1]; + } + } + else { + memcpy(buffer, &cv.b[sizeof(cv.b) - n], n); + } +#endif + + /* If we fit, return the requested number of bytes */ + if (_fits_in_n_bits(cv.v, n * 8)) { + res = n; + } + } + else { + unsigned char fill = cv.v < 0 ? 0xFF : 0x00; +#if PY_LITTLE_ENDIAN + if (little_endian) { + memcpy(buffer, cv.b, sizeof(cv.b)); + memset((char *)buffer + sizeof(cv.b), fill, n - sizeof(cv.b)); + } + else { + unsigned char *b = (unsigned char *)buffer; + for (Py_ssize_t i = 0; i < n - (int)sizeof(cv.b); ++i) { + *b++ = fill; + } + for (Py_ssize_t i = sizeof(cv.b); i > 0; --i) { + *b++ = cv.b[i - 1]; + } + } +#else + if (little_endian) { + unsigned char *b = (unsigned char *)buffer; + for (Py_ssize_t i = sizeof(cv.b); i > 0; --i) { + *b++ = cv.b[i - 1]; + } + for (Py_ssize_t i = 0; i < n - sizeof(cv.b); ++i) { + *b++ = fill; + } + } + else { + memset(buffer, fill, n - sizeof(cv.b)); + memcpy((char *)buffer + n - sizeof(cv.b), cv.b, sizeof(cv.b)); + } +#endif + } + } + else { + if (n > 0) { + _PyLong_AsByteArray(v, buffer, (size_t)n, little_endian, 1, 0); + } + + // More efficient calculation for number of bytes required? + size_t nb = _PyLong_NumBits((PyObject *)v); + /* Normally this would be((nb - 1) / 8) + 1 to avoid rounding up + * multiples of 8 to the next byte, but we add an implied bit for + * the sign and it cancels out. */ + size_t n_needed = (nb / 8) + 1; + res = (Py_ssize_t)n_needed; + if ((size_t)res != n_needed) { + PyErr_SetString(PyExc_OverflowError, + "value too large to convert"); + res = -1; + } + } + + if (do_decref) { + Py_DECREF(v); + } + + return res; +} + + +PyObject * +PyLong_FromNativeBytes(const void* buffer, size_t n, int endianness) +{ + if (!buffer) { + PyErr_BadInternalCall(); + return NULL; + } + + int little_endian = endianness; + if (_resolve_endianness(&little_endian) < 0) { + return NULL; + } + + return _PyLong_FromByteArray((const unsigned char *)buffer, n, + little_endian, 1); +} + + +PyObject * +PyLong_FromUnsignedNativeBytes(const void* buffer, size_t n, int endianness) +{ + if (!buffer) { + PyErr_BadInternalCall(); + return NULL; + } + + int little_endian = endianness; + if (_resolve_endianness(&little_endian) < 0) { + return NULL; + } + + return _PyLong_FromByteArray((const unsigned char *)buffer, n, + little_endian, 0); +} + + /* Create a new int object from a C pointer */ PyObject * @@ -1231,7 +1431,7 @@ PyLong_AsLongLong(PyObject *vv) } else { res = _PyLong_AsByteArray((PyLongObject *)v, (unsigned char *)&bytes, - SIZEOF_LONG_LONG, PY_LITTLE_ENDIAN, 1); + SIZEOF_LONG_LONG, PY_LITTLE_ENDIAN, 1, 1); } if (do_decref) { Py_DECREF(v); @@ -1270,7 +1470,7 @@ PyLong_AsUnsignedLongLong(PyObject *vv) } else { res = _PyLong_AsByteArray((PyLongObject *)vv, (unsigned char *)&bytes, - SIZEOF_LONG_LONG, PY_LITTLE_ENDIAN, 0); + SIZEOF_LONG_LONG, PY_LITTLE_ENDIAN, 0, 1); } /* Plan 9 can't handle long long in ? : expressions */ @@ -6068,7 +6268,7 @@ int_to_bytes_impl(PyObject *self, Py_ssize_t length, PyObject *byteorder, if (_PyLong_AsByteArray((PyLongObject *)self, (unsigned char *)PyBytes_AS_STRING(bytes), - length, little_endian, is_signed) < 0) { + length, little_endian, is_signed, 1) < 0) { Py_DECREF(bytes); return NULL; } diff --git a/PC/errmap.h b/PC/errmap.h index a7489ab75c6561..a064ecb80b1ed9 100644 --- a/PC/errmap.h +++ b/PC/errmap.h @@ -129,6 +129,9 @@ winerror_to_errno(int winerror) case ERROR_NO_UNICODE_TRANSLATION: // 1113 return EILSEQ; + case WAIT_TIMEOUT: // 258 + return ETIMEDOUT; + case ERROR_INVALID_FUNCTION: // 1 case ERROR_INVALID_ACCESS: // 12 case ERROR_INVALID_DATA: // 13 diff --git a/PC/launcher2.c b/PC/launcher2.c index e426eccd700044..90b0fdebd3bdfb 100644 --- a/PC/launcher2.c +++ b/PC/launcher2.c @@ -1594,6 +1594,7 @@ _registryReadLegacyEnvironment(const SearchInfo *search, HKEY root, EnvironmentI int count = swprintf_s(realTag, tagLength + 4, L"%s-32", env->tag); if (count == -1) { + debug(L"# Failed to generate 32bit tag\n"); free(realTag); return RC_INTERNAL_ERROR; } @@ -1749,10 +1750,18 @@ appxSearch(const SearchInfo *search, EnvironmentInfo **result, const wchar_t *pa exeName = search->windowed ? L"pythonw.exe" : L"python.exe"; } - if (FAILED(SHGetFolderPathW(NULL, CSIDL_LOCAL_APPDATA, NULL, 0, buffer)) || - !join(buffer, MAXLEN, L"Microsoft\\WindowsApps") || + // Failure to get LocalAppData may just mean we're running as a user who + // doesn't have a profile directory. + // In this case, return "not found", but don't fail. + // Chances are they can't launch Store installs anyway. + if (FAILED(SHGetFolderPathW(NULL, CSIDL_LOCAL_APPDATA, NULL, 0, buffer))) { + return RC_NO_PYTHON; + } + + if (!join(buffer, MAXLEN, L"Microsoft\\WindowsApps") || !join(buffer, MAXLEN, packageFamilyName) || !join(buffer, MAXLEN, exeName)) { + debug(L"# Failed to construct App Execution Alias path\n"); return RC_INTERNAL_ERROR; } @@ -1982,6 +1991,7 @@ collectEnvironments(const SearchInfo *search, EnvironmentInfo **result) EnvironmentInfo *env = NULL; if (!result) { + debug(L"# collectEnvironments() was passed a NULL result\n"); return RC_INTERNAL_ERROR; } *result = NULL; @@ -2276,6 +2286,7 @@ int selectEnvironment(const SearchInfo *search, EnvironmentInfo *root, EnvironmentInfo **best) { if (!best) { + debug(L"# selectEnvironment() was passed a NULL best\n"); return RC_INTERNAL_ERROR; } if (!root) { diff --git a/PCbuild/_testcapi.vcxproj b/PCbuild/_testcapi.vcxproj index 6911aacab29b97..66df0a61b5b5a6 100644 --- a/PCbuild/_testcapi.vcxproj +++ b/PCbuild/_testcapi.vcxproj @@ -125,6 +125,7 @@ + diff --git a/PCbuild/_testcapi.vcxproj.filters b/PCbuild/_testcapi.vcxproj.filters index 6059959bb9a040..651eb1d6ba0b7f 100644 --- a/PCbuild/_testcapi.vcxproj.filters +++ b/PCbuild/_testcapi.vcxproj.filters @@ -105,6 +105,9 @@ Source Files + + Source Files + Source Files diff --git a/Python/bytecodes.c b/Python/bytecodes.c index 197dff4b9888ce..96b97ca4be6d93 100644 --- a/Python/bytecodes.c +++ b/Python/bytecodes.c @@ -133,7 +133,7 @@ dummy_func( switch (opcode) { // BEGIN BYTECODES // - inst(NOP, (--)) { + pure inst(NOP, (--)) { } family(RESUME, 0) = { @@ -411,12 +411,12 @@ dummy_func( // BINARY_OP_INPLACE_ADD_UNICODE, // See comments at that opcode. }; - op(_GUARD_BOTH_INT, (left, right -- left: &PYLONG_TYPE, right: &PYLONG_TYPE)) { + op(_GUARD_BOTH_INT, (left, right -- left, right)) { DEOPT_IF(!PyLong_CheckExact(left)); DEOPT_IF(!PyLong_CheckExact(right)); } - pure op(_BINARY_OP_MULTIPLY_INT, (left, right -- res: &PYLONG_TYPE)) { + pure op(_BINARY_OP_MULTIPLY_INT, (left, right -- res)) { STAT_INC(BINARY_OP, hit); res = _PyLong_Multiply((PyLongObject *)left, (PyLongObject *)right); _Py_DECREF_SPECIALIZED(right, (destructor)PyObject_Free); @@ -424,7 +424,7 @@ dummy_func( ERROR_IF(res == NULL, error); } - pure op(_BINARY_OP_ADD_INT, (left, right -- res: &PYLONG_TYPE)) { + pure op(_BINARY_OP_ADD_INT, (left, right -- res)) { STAT_INC(BINARY_OP, hit); res = _PyLong_Add((PyLongObject *)left, (PyLongObject *)right); _Py_DECREF_SPECIALIZED(right, (destructor)PyObject_Free); @@ -432,7 +432,7 @@ dummy_func( ERROR_IF(res == NULL, error); } - pure op(_BINARY_OP_SUBTRACT_INT, (left, right -- res: &PYLONG_TYPE)) { + pure op(_BINARY_OP_SUBTRACT_INT, (left, right -- res)) { STAT_INC(BINARY_OP, hit); res = _PyLong_Subtract((PyLongObject *)left, (PyLongObject *)right); _Py_DECREF_SPECIALIZED(right, (destructor)PyObject_Free); @@ -447,12 +447,12 @@ dummy_func( macro(BINARY_OP_SUBTRACT_INT) = _GUARD_BOTH_INT + unused/1 + _BINARY_OP_SUBTRACT_INT; - op(_GUARD_BOTH_FLOAT, (left, right -- left: &PYFLOAT_TYPE, right: &PYFLOAT_TYPE)) { + op(_GUARD_BOTH_FLOAT, (left, right -- left, right)) { DEOPT_IF(!PyFloat_CheckExact(left)); DEOPT_IF(!PyFloat_CheckExact(right)); } - pure op(_BINARY_OP_MULTIPLY_FLOAT, (left, right -- res: &PYFLOAT_TYPE)) { + pure op(_BINARY_OP_MULTIPLY_FLOAT, (left, right -- res)) { STAT_INC(BINARY_OP, hit); double dres = ((PyFloatObject *)left)->ob_fval * @@ -460,7 +460,7 @@ dummy_func( DECREF_INPUTS_AND_REUSE_FLOAT(left, right, dres, res); } - pure op(_BINARY_OP_ADD_FLOAT, (left, right -- res: &PYFLOAT_TYPE)) { + pure op(_BINARY_OP_ADD_FLOAT, (left, right -- res)) { STAT_INC(BINARY_OP, hit); double dres = ((PyFloatObject *)left)->ob_fval + @@ -468,7 +468,7 @@ dummy_func( DECREF_INPUTS_AND_REUSE_FLOAT(left, right, dres, res); } - pure op(_BINARY_OP_SUBTRACT_FLOAT, (left, right -- res: &PYFLOAT_TYPE)) { + pure op(_BINARY_OP_SUBTRACT_FLOAT, (left, right -- res)) { STAT_INC(BINARY_OP, hit); double dres = ((PyFloatObject *)left)->ob_fval - @@ -483,12 +483,12 @@ dummy_func( macro(BINARY_OP_SUBTRACT_FLOAT) = _GUARD_BOTH_FLOAT + unused/1 + _BINARY_OP_SUBTRACT_FLOAT; - op(_GUARD_BOTH_UNICODE, (left, right -- left: &PYUNICODE_TYPE, right: &PYUNICODE_TYPE)) { + op(_GUARD_BOTH_UNICODE, (left, right -- left, right)) { DEOPT_IF(!PyUnicode_CheckExact(left)); DEOPT_IF(!PyUnicode_CheckExact(right)); } - pure op(_BINARY_OP_ADD_UNICODE, (left, right -- res: &PYUNICODE_TYPE)) { + pure op(_BINARY_OP_ADD_UNICODE, (left, right -- res)) { STAT_INC(BINARY_OP, hit); res = PyUnicode_Concat(left, right); _Py_DECREF_SPECIALIZED(left, _PyUnicode_ExactDealloc); @@ -1877,7 +1877,7 @@ dummy_func( something was returned by a descriptor protocol). Set the second element of the stack to NULL, to signal CALL that it's not a method call. - NULL | meth | arg1 | ... | argN + meth | NULL | arg1 | ... | argN */ DECREF_INPUTS(); ERROR_IF(attr == NULL, error); @@ -1901,7 +1901,7 @@ dummy_func( LOAD_ATTR, }; - op(_GUARD_TYPE_VERSION, (type_version/2, owner -- owner: &(GUARD_TYPE_VERSION_TYPE + type_version))) { + op(_GUARD_TYPE_VERSION, (type_version/2, owner -- owner)) { PyTypeObject *tp = Py_TYPE(owner); assert(type_version != 0); DEOPT_IF(tp->tp_version_tag != type_version); @@ -2082,7 +2082,7 @@ dummy_func( DISPATCH_INLINED(new_frame); } - op(_GUARD_DORV_VALUES, (owner -- owner: &GUARD_DORV_VALUES_TYPE)) { + op(_GUARD_DORV_VALUES, (owner -- owner)) { assert(Py_TYPE(owner)->tp_flags & Py_TPFLAGS_MANAGED_DICT); PyDictOrValues dorv = *_PyObject_DictOrValuesPointer(owner); DEOPT_IF(!_PyDictOrValues_IsValues(dorv)); @@ -2318,13 +2318,16 @@ dummy_func( assert(oparg <= INSTR_OFFSET()); JUMPBY(-oparg); #if ENABLE_SPECIALIZATION - this_instr[1].cache += (1 << OPTIMIZER_BITS_IN_COUNTER); + uint16_t counter = this_instr[1].cache; + this_instr[1].cache = counter + (1 << OPTIMIZER_BITS_IN_COUNTER); /* We are using unsigned values, but we really want signed values, so - * do the 2s complement comparison manually */ - uint16_t ucounter = this_instr[1].cache + (1 << 15); - uint16_t threshold = tstate->interp->optimizer_backedge_threshold + (1 << 15); + * do the 2s complement adjustment manually */ + uint32_t offset_counter = counter ^ (1 << 15); + uint32_t threshold = tstate->interp->optimizer_backedge_threshold; + assert((threshold & OPTIMIZER_BITS_MASK) == 0); + // Use '>=' not '>' so that the optimizer/backoff bits do not effect the result. // Double-check that the opcode isn't instrumented or something: - if (ucounter > threshold && this_instr->op.code == JUMP_BACKWARD) { + if (offset_counter >= threshold && this_instr->op.code == JUMP_BACKWARD) { OPT_STAT_INC(attempts); _Py_CODEUNIT *start = this_instr; /* Back up over EXTENDED_ARGs so optimizer sees the whole instruction */ @@ -2338,18 +2341,18 @@ dummy_func( // Rewind and enter the executor: assert(start->op.code == ENTER_EXECUTOR); next_instr = start; - this_instr[1].cache &= ((1 << OPTIMIZER_BITS_IN_COUNTER) - 1); + this_instr[1].cache &= OPTIMIZER_BITS_MASK; } else { - int backoff = this_instr[1].cache & ((1 << OPTIMIZER_BITS_IN_COUNTER) - 1); - if (backoff < MINIMUM_TIER2_BACKOFF) { - backoff = MINIMUM_TIER2_BACKOFF; + int backoff = this_instr[1].cache & OPTIMIZER_BITS_MASK; + backoff++; + if (backoff < MIN_TIER2_BACKOFF) { + backoff = MIN_TIER2_BACKOFF; } - else if (backoff < 15 - OPTIMIZER_BITS_IN_COUNTER) { - backoff++; + else if (backoff > MAX_TIER2_BACKOFF) { + backoff = MAX_TIER2_BACKOFF; } - assert(backoff <= 15 - OPTIMIZER_BITS_IN_COUNTER); - this_instr[1].cache = ((1 << 16) - ((1 << OPTIMIZER_BITS_IN_COUNTER) << backoff)) | backoff; + this_instr[1].cache = ((UINT16_MAX << OPTIMIZER_BITS_IN_COUNTER) << backoff) | backoff; } } #endif /* ENABLE_SPECIALIZATION */ @@ -2711,7 +2714,7 @@ dummy_func( DEOPT_IF(r->len <= 0); } - op(_ITER_NEXT_RANGE, (iter -- iter, next: &PYLONG_TYPE)) { + op(_ITER_NEXT_RANGE, (iter -- iter, next)) { _PyRangeIterObject *r = (_PyRangeIterObject *)iter; assert(Py_TYPE(r) == &PyRangeIter_Type); assert(r->len > 0); @@ -2869,13 +2872,13 @@ dummy_func( exc_info->exc_value = Py_NewRef(new_exc); } - op(_GUARD_DORV_VALUES_INST_ATTR_FROM_DICT, (owner -- owner: &GUARD_DORV_VALUES_INST_ATTR_FROM_DICT_TYPE)) { + op(_GUARD_DORV_VALUES_INST_ATTR_FROM_DICT, (owner -- owner)) { assert(Py_TYPE(owner)->tp_flags & Py_TPFLAGS_MANAGED_DICT); PyDictOrValues *dorv = _PyObject_DictOrValuesPointer(owner); DEOPT_IF(!_PyDictOrValues_IsValues(*dorv) && !_PyObject_MakeInstanceAttributesFromDict(owner, dorv)); } - op(_GUARD_KEYS_VERSION, (keys_version/2, owner -- owner: &(GUARD_KEYS_VERSION_TYPE + keys_version))) { + op(_GUARD_KEYS_VERSION, (keys_version/2, owner -- owner)) { PyTypeObject *owner_cls = Py_TYPE(owner); PyHeapTypeObject *owner_heap_type = (PyHeapTypeObject *)owner_cls; DEOPT_IF(owner_heap_type->ht_cached_keys->dk_version != keys_version); @@ -3090,7 +3093,7 @@ dummy_func( macro(CALL) = _SPECIALIZE_CALL + unused/2 + _CALL; - op(_CHECK_CALL_BOUND_METHOD_EXACT_ARGS, (callable, null, unused[oparg] -- callable: &PYMETHOD_TYPE, null: &NULL_TYPE, unused[oparg])) { + op(_CHECK_CALL_BOUND_METHOD_EXACT_ARGS, (callable, null, unused[oparg] -- callable, null, unused[oparg])) { DEOPT_IF(null != NULL); DEOPT_IF(Py_TYPE(callable) != &PyMethod_Type); } @@ -3108,7 +3111,7 @@ dummy_func( DEOPT_IF(tstate->interp->eval_frame); } - op(_CHECK_FUNCTION_EXACT_ARGS, (func_version/2, callable, self_or_null, unused[oparg] -- callable: &(PYFUNCTION_TYPE_VERSION_TYPE + func_version), self_or_null, unused[oparg])) { + op(_CHECK_FUNCTION_EXACT_ARGS, (func_version/2, callable, self_or_null, unused[oparg] -- callable, self_or_null, unused[oparg])) { DEOPT_IF(!PyFunction_Check(callable)); PyFunctionObject *func = (PyFunctionObject *)callable; DEOPT_IF(func->func_version != func_version); @@ -4034,10 +4037,9 @@ dummy_func( CHECK_EVAL_BREAKER(); } - op(_SET_IP, (--)) { + op(_SET_IP, (instr_ptr/4 --)) { TIER_TWO_ONLY - // TODO: Put the code pointer in `operand` to avoid indirection via `frame` - frame->instr_ptr = _PyCode_CODE(_PyFrame_GetCode(frame)) + oparg; + frame->instr_ptr = (_Py_CODEUNIT *)instr_ptr; } op(_SAVE_RETURN_OFFSET, (--)) { @@ -4059,23 +4061,23 @@ dummy_func( DEOPT_IF(!current_executor->vm_data.valid); } - op(_LOAD_CONST_INLINE, (ptr/4 -- value)) { + pure op(_LOAD_CONST_INLINE, (ptr/4 -- value)) { TIER_TWO_ONLY value = Py_NewRef(ptr); } - op(_LOAD_CONST_INLINE_BORROW, (ptr/4 -- value)) { + pure op(_LOAD_CONST_INLINE_BORROW, (ptr/4 -- value)) { TIER_TWO_ONLY value = ptr; } - op(_LOAD_CONST_INLINE_WITH_NULL, (ptr/4 -- value, null)) { + pure op(_LOAD_CONST_INLINE_WITH_NULL, (ptr/4 -- value, null)) { TIER_TWO_ONLY value = Py_NewRef(ptr); null = NULL; } - op(_LOAD_CONST_INLINE_BORROW_WITH_NULL, (ptr/4 -- value, null)) { + pure op(_LOAD_CONST_INLINE_BORROW_WITH_NULL, (ptr/4 -- value, null)) { TIER_TWO_ONLY value = ptr; null = NULL; @@ -4097,6 +4099,11 @@ dummy_func( exe->count++; } + op(_CHECK_VALIDITY_AND_SET_IP, (instr_ptr/4 --)) { + TIER_TWO_ONLY + DEOPT_IF(!current_executor->vm_data.valid); + frame->instr_ptr = (_Py_CODEUNIT *)instr_ptr; + } // END BYTECODES // diff --git a/Python/executor_cases.c.h b/Python/executor_cases.c.h index 2d914b82dbf88f..58d238320276f4 100644 --- a/Python/executor_cases.c.h +++ b/Python/executor_cases.c.h @@ -1598,7 +1598,7 @@ something was returned by a descriptor protocol). Set the second element of the stack to NULL, to signal CALL that it's not a method call. - NULL | meth | arg1 | ... | argN + meth | NULL | arg1 | ... | argN */ Py_DECREF(owner); if (attr == NULL) goto pop_1_error_tier_two; @@ -3360,10 +3360,9 @@ } case _SET_IP: { - oparg = CURRENT_OPARG(); + PyObject *instr_ptr = (PyObject *)CURRENT_OPERAND(); TIER_TWO_ONLY - // TODO: Put the code pointer in `operand` to avoid indirection via `frame` - frame->instr_ptr = _PyCode_CODE(_PyFrame_GetCode(frame)) + oparg; + frame->instr_ptr = (_Py_CODEUNIT *)instr_ptr; break; } @@ -3459,4 +3458,12 @@ break; } + case _CHECK_VALIDITY_AND_SET_IP: { + PyObject *instr_ptr = (PyObject *)CURRENT_OPERAND(); + TIER_TWO_ONLY + if (!current_executor->vm_data.valid) goto deoptimize; + frame->instr_ptr = (_Py_CODEUNIT *)instr_ptr; + break; + } + #undef TIER_TWO diff --git a/Python/generated_cases.c.h b/Python/generated_cases.c.h index e5244147d499af..a49223e4db5318 100644 --- a/Python/generated_cases.c.h +++ b/Python/generated_cases.c.h @@ -3263,13 +3263,16 @@ assert(oparg <= INSTR_OFFSET()); JUMPBY(-oparg); #if ENABLE_SPECIALIZATION - this_instr[1].cache += (1 << OPTIMIZER_BITS_IN_COUNTER); + uint16_t counter = this_instr[1].cache; + this_instr[1].cache = counter + (1 << OPTIMIZER_BITS_IN_COUNTER); /* We are using unsigned values, but we really want signed values, so - * do the 2s complement comparison manually */ - uint16_t ucounter = this_instr[1].cache + (1 << 15); - uint16_t threshold = tstate->interp->optimizer_backedge_threshold + (1 << 15); + * do the 2s complement adjustment manually */ + uint32_t offset_counter = counter ^ (1 << 15); + uint32_t threshold = tstate->interp->optimizer_backedge_threshold; + assert((threshold & OPTIMIZER_BITS_MASK) == 0); + // Use '>=' not '>' so that the optimizer/backoff bits do not effect the result. // Double-check that the opcode isn't instrumented or something: - if (ucounter > threshold && this_instr->op.code == JUMP_BACKWARD) { + if (offset_counter >= threshold && this_instr->op.code == JUMP_BACKWARD) { OPT_STAT_INC(attempts); _Py_CODEUNIT *start = this_instr; /* Back up over EXTENDED_ARGs so optimizer sees the whole instruction */ @@ -3283,18 +3286,18 @@ // Rewind and enter the executor: assert(start->op.code == ENTER_EXECUTOR); next_instr = start; - this_instr[1].cache &= ((1 << OPTIMIZER_BITS_IN_COUNTER) - 1); + this_instr[1].cache &= OPTIMIZER_BITS_MASK; } else { - int backoff = this_instr[1].cache & ((1 << OPTIMIZER_BITS_IN_COUNTER) - 1); - if (backoff < MINIMUM_TIER2_BACKOFF) { - backoff = MINIMUM_TIER2_BACKOFF; + int backoff = this_instr[1].cache & OPTIMIZER_BITS_MASK; + backoff++; + if (backoff < MIN_TIER2_BACKOFF) { + backoff = MIN_TIER2_BACKOFF; } - else if (backoff < 15 - OPTIMIZER_BITS_IN_COUNTER) { - backoff++; + else if (backoff > MAX_TIER2_BACKOFF) { + backoff = MAX_TIER2_BACKOFF; } - assert(backoff <= 15 - OPTIMIZER_BITS_IN_COUNTER); - this_instr[1].cache = ((1 << 16) - ((1 << OPTIMIZER_BITS_IN_COUNTER) << backoff)) | backoff; + this_instr[1].cache = ((UINT16_MAX << OPTIMIZER_BITS_IN_COUNTER) << backoff) | backoff; } } #endif /* ENABLE_SPECIALIZATION */ @@ -3420,7 +3423,7 @@ something was returned by a descriptor protocol). Set the second element of the stack to NULL, to signal CALL that it's not a method call. - NULL | meth | arg1 | ... | argN + meth | NULL | arg1 | ... | argN */ Py_DECREF(owner); if (attr == NULL) goto pop_1_error; diff --git a/Python/optimizer.c b/Python/optimizer.c index ad9ac382d300ef..efa19680c9b1f3 100644 --- a/Python/optimizer.c +++ b/Python/optimizer.c @@ -17,8 +17,6 @@ #include "pycore_uop_metadata.h" // Uop tables #undef NEED_OPCODE_METADATA -#define UOP_MAX_TRACE_LENGTH 512 - #define MAX_EXECUTORS_SIZE 256 @@ -111,6 +109,9 @@ never_optimize( _PyExecutorObject **exec, int Py_UNUSED(stack_entries)) { + /* Although it should be benign for this to be called, + * it shouldn't happen, so fail in debug builds. */ + assert(0 && "never optimize should never be called"); return 0; } @@ -122,13 +123,19 @@ PyTypeObject _PyDefaultOptimizer_Type = { .tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_DISALLOW_INSTANTIATION, }; -_PyOptimizerObject _PyOptimizer_Default = { +static _PyOptimizerObject _PyOptimizer_Default = { PyObject_HEAD_INIT(&_PyDefaultOptimizer_Type) .optimize = never_optimize, - .resume_threshold = INT16_MAX, - .backedge_threshold = INT16_MAX, + .resume_threshold = OPTIMIZER_UNREACHABLE_THRESHOLD, + .backedge_threshold = OPTIMIZER_UNREACHABLE_THRESHOLD, }; +static uint32_t +shift_and_offset_threshold(uint16_t threshold) +{ + return (threshold << OPTIMIZER_BITS_IN_COUNTER) + (1 << 15); +} + _PyOptimizerObject * PyUnstable_GetOptimizer(void) { @@ -136,24 +143,33 @@ PyUnstable_GetOptimizer(void) if (interp->optimizer == &_PyOptimizer_Default) { return NULL; } - assert(interp->optimizer_backedge_threshold == interp->optimizer->backedge_threshold); - assert(interp->optimizer_resume_threshold == interp->optimizer->resume_threshold); + assert(interp->optimizer_backedge_threshold == + shift_and_offset_threshold(interp->optimizer->backedge_threshold)); + assert(interp->optimizer_resume_threshold == + shift_and_offset_threshold(interp->optimizer->resume_threshold)); Py_INCREF(interp->optimizer); return interp->optimizer; } -void -PyUnstable_SetOptimizer(_PyOptimizerObject *optimizer) +_PyOptimizerObject * +_Py_SetOptimizer(PyInterpreterState *interp, _PyOptimizerObject *optimizer) { - PyInterpreterState *interp = _PyInterpreterState_GET(); if (optimizer == NULL) { optimizer = &_PyOptimizer_Default; } _PyOptimizerObject *old = interp->optimizer; Py_INCREF(optimizer); interp->optimizer = optimizer; - interp->optimizer_backedge_threshold = optimizer->backedge_threshold; - interp->optimizer_resume_threshold = optimizer->resume_threshold; + interp->optimizer_backedge_threshold = shift_and_offset_threshold(optimizer->backedge_threshold); + interp->optimizer_resume_threshold = shift_and_offset_threshold(optimizer->resume_threshold); + return old; +} + +void +PyUnstable_SetOptimizer(_PyOptimizerObject *optimizer) +{ + PyInterpreterState *interp = _PyInterpreterState_GET(); + _PyOptimizerObject *old = _Py_SetOptimizer(interp, optimizer); Py_DECREF(old); } @@ -308,8 +324,6 @@ BRANCH_TO_GUARD[4][2] = { [POP_JUMP_IF_NOT_NONE - POP_JUMP_IF_FALSE][1] = _GUARD_IS_NOT_NONE_POP, }; -#define TRACE_STACK_SIZE 5 - #define CONFIDENCE_RANGE 1000 #define CONFIDENCE_CUTOFF 333 @@ -323,10 +337,11 @@ BRANCH_TO_GUARD[4][2] = { #define ADD_TO_TRACE(OPCODE, OPARG, OPERAND, TARGET) \ DPRINTF(2, \ - " ADD_TO_TRACE(%s, %d, %" PRIu64 ")\n", \ + " ADD_TO_TRACE(%s, %d, %" PRIu64 ", %d)\n", \ _PyUOpName(OPCODE), \ (OPARG), \ - (uint64_t)(OPERAND)); \ + (uint64_t)(OPERAND), \ + TARGET); \ assert(trace_length < max_length); \ trace[trace_length].opcode = (OPCODE); \ trace[trace_length].oparg = (OPARG); \ @@ -417,9 +432,8 @@ translate_bytecode_to_trace( top: // Jump here after _PUSH_FRAME or likely branches for (;;) { target = INSTR_IP(instr, code); - RESERVE_RAW(3, "epilogue"); // Always need space for _SET_IP, _CHECK_VALIDITY and _EXIT_TRACE - ADD_TO_TRACE(_SET_IP, target, 0, target); - ADD_TO_TRACE(_CHECK_VALIDITY, 0, 0, target); + RESERVE_RAW(2, "epilogue"); // Always need space for _SET_IP, _CHECK_VALIDITY and _EXIT_TRACE + ADD_TO_TRACE(_CHECK_VALIDITY_AND_SET_IP, 0, (uintptr_t)instr, target); uint32_t opcode = instr->op.code; uint32_t oparg = instr->op.arg; @@ -825,11 +839,13 @@ uop_optimize( char *uop_optimize = Py_GETENV("PYTHONUOPSOPTIMIZE"); if (uop_optimize == NULL || *uop_optimize > '0') { err = _Py_uop_analyze_and_optimize(frame, buffer, - UOP_MAX_TRACE_LENGTH, curr_stackentries, &dependencies); + UOP_MAX_TRACE_LENGTH, + curr_stackentries, &dependencies); if (err <= 0) { return err; } } + assert(err == 1); _PyExecutorObject *executor = make_executor_from_uops(buffer, &dependencies); if (executor == NULL) { return -1; @@ -861,10 +877,10 @@ PyUnstable_Optimizer_NewUOpOptimizer(void) return NULL; } opt->optimize = uop_optimize; - opt->resume_threshold = INT16_MAX; - // Need at least 3 iterations to settle specializations. - // A few lower bits of the counter are reserved for other flags. - opt->backedge_threshold = 16 << OPTIMIZER_BITS_IN_COUNTER; + opt->resume_threshold = OPTIMIZER_UNREACHABLE_THRESHOLD; + // Need a few iterations to settle specializations, + // and to ammortize the cost of optimization. + opt->backedge_threshold = 16; return (PyObject *)opt; } @@ -951,7 +967,7 @@ PyUnstable_Optimizer_NewCounter(void) return NULL; } opt->base.optimize = counter_optimize; - opt->base.resume_threshold = INT16_MAX; + opt->base.resume_threshold = OPTIMIZER_UNREACHABLE_THRESHOLD; opt->base.backedge_threshold = 0; opt->count = 0; return (PyObject *)opt; diff --git a/Python/optimizer_analysis.c b/Python/optimizer_analysis.c index b14e6950b4a06b..49974520de924d 100644 --- a/Python/optimizer_analysis.c +++ b/Python/optimizer_analysis.c @@ -1,3 +1,14 @@ +/* + * This file contains the support code for CPython's uops redundancy eliminator. + * It also performs some simple optimizations. + * It performs a traditional data-flow analysis[1] over the trace of uops. + * Using the information gained, it chooses to emit, or skip certain instructions + * if possible. + * + * [1] For information on data-flow analysis, please see + * https://clang.llvm.org/docs/DataFlowAnalysisIntro.html + * + * */ #include "Python.h" #include "opcode.h" #include "pycore_dict.h" @@ -9,10 +20,355 @@ #include "pycore_dict.h" #include "pycore_long.h" #include "cpython/optimizer.h" +#include "pycore_optimizer.h" +#include "pycore_object.h" +#include "pycore_dict.h" +#include "pycore_function.h" +#include "pycore_uop_metadata.h" +#include "pycore_uop_ids.h" +#include "pycore_range.h" + +#include #include #include #include -#include "pycore_optimizer.h" + +// Holds locals, stack, locals, stack ... co_consts (in that order) +#define MAX_ABSTRACT_INTERP_SIZE 4096 + +#define OVERALLOCATE_FACTOR 5 + +#define TY_ARENA_SIZE (UOP_MAX_TRACE_LENGTH * OVERALLOCATE_FACTOR) + +// Need extras for root frame and for overflow frame (see TRACE_STACK_PUSH()) +#define MAX_ABSTRACT_FRAME_DEPTH (TRACE_STACK_SIZE + 2) + +#ifdef Py_DEBUG + static const char *const DEBUG_ENV = "PYTHON_OPT_DEBUG"; + static inline int get_lltrace(void) { + char *uop_debug = Py_GETENV(DEBUG_ENV); + int lltrace = 0; + if (uop_debug != NULL && *uop_debug >= '0') { + lltrace = *uop_debug - '0'; // TODO: Parse an int and all that + } + return lltrace; + } + #define DPRINTF(level, ...) \ + if (get_lltrace() >= (level)) { printf(__VA_ARGS__); } +#else + #define DPRINTF(level, ...) +#endif + + +// Flags for below. +#define KNOWN 1 << 0 +#define TRUE_CONST 1 << 1 +#define IS_NULL 1 << 2 +#define NOT_NULL 1 << 3 + +typedef struct { + int flags; + PyTypeObject *typ; + // constant propagated value (might be NULL) + PyObject *const_val; +} _Py_UOpsSymType; + + +typedef struct _Py_UOpsAbstractFrame { + // Max stacklen + int stack_len; + int locals_len; + + _Py_UOpsSymType **stack_pointer; + _Py_UOpsSymType **stack; + _Py_UOpsSymType **locals; +} _Py_UOpsAbstractFrame; + + +typedef struct ty_arena { + int ty_curr_number; + int ty_max_number; + _Py_UOpsSymType arena[TY_ARENA_SIZE]; +} ty_arena; + +// Tier 2 types meta interpreter +typedef struct _Py_UOpsAbstractInterpContext { + PyObject_HEAD + // The current "executing" frame. + _Py_UOpsAbstractFrame *frame; + _Py_UOpsAbstractFrame frames[MAX_ABSTRACT_FRAME_DEPTH]; + int curr_frame_depth; + + // Arena for the symbolic types. + ty_arena t_arena; + + _Py_UOpsSymType **n_consumed; + _Py_UOpsSymType **limit; + _Py_UOpsSymType *locals_and_stack[MAX_ABSTRACT_INTERP_SIZE]; +} _Py_UOpsAbstractInterpContext; + +static inline _Py_UOpsSymType* sym_new_unknown(_Py_UOpsAbstractInterpContext *ctx); + +// 0 on success, -1 on error. +static _Py_UOpsAbstractFrame * +ctx_frame_new( + _Py_UOpsAbstractInterpContext *ctx, + PyCodeObject *co, + _Py_UOpsSymType **localsplus_start, + int n_locals_already_filled, + int curr_stackentries +) +{ + assert(ctx->curr_frame_depth < MAX_ABSTRACT_FRAME_DEPTH); + _Py_UOpsAbstractFrame *frame = &ctx->frames[ctx->curr_frame_depth]; + + frame->stack_len = co->co_stacksize; + frame->locals_len = co->co_nlocalsplus; + + frame->locals = localsplus_start; + frame->stack = frame->locals + co->co_nlocalsplus; + frame->stack_pointer = frame->stack + curr_stackentries; + ctx->n_consumed = localsplus_start + (co->co_nlocalsplus + co->co_stacksize); + if (ctx->n_consumed >= ctx->limit) { + return NULL; + } + + + // Initialize with the initial state of all local variables + for (int i = n_locals_already_filled; i < co->co_nlocalsplus; i++) { + _Py_UOpsSymType *local = sym_new_unknown(ctx); + if (local == NULL) { + return NULL; + } + frame->locals[i] = local; + } + + + // Initialize the stack as well + for (int i = 0; i < curr_stackentries; i++) { + _Py_UOpsSymType *stackvar = sym_new_unknown(ctx); + if (stackvar == NULL) { + return NULL; + } + frame->stack[i] = stackvar; + } + + return frame; +} + +static void +abstractcontext_fini(_Py_UOpsAbstractInterpContext *ctx) +{ + if (ctx == NULL) { + return; + } + ctx->curr_frame_depth = 0; + int tys = ctx->t_arena.ty_curr_number; + for (int i = 0; i < tys; i++) { + Py_CLEAR(ctx->t_arena.arena[i].const_val); + } +} + +static int +abstractcontext_init( + _Py_UOpsAbstractInterpContext *ctx, + PyCodeObject *co, + int curr_stacklen, + int ir_entries +) +{ + ctx->limit = ctx->locals_and_stack + MAX_ABSTRACT_INTERP_SIZE; + ctx->n_consumed = ctx->locals_and_stack; +#ifdef Py_DEBUG // Aids debugging a little. There should never be NULL in the abstract interpreter. + for (int i = 0 ; i < MAX_ABSTRACT_INTERP_SIZE; i++) { + ctx->locals_and_stack[i] = NULL; + } +#endif + + // Setup the arena for sym expressions. + ctx->t_arena.ty_curr_number = 0; + ctx->t_arena.ty_max_number = TY_ARENA_SIZE; + + // Frame setup + ctx->curr_frame_depth = 0; + _Py_UOpsAbstractFrame *frame = ctx_frame_new(ctx, co, ctx->n_consumed, 0, curr_stacklen); + if (frame == NULL) { + return -1; + } + ctx->curr_frame_depth++; + ctx->frame = frame; + return 0; +} + + +static int +ctx_frame_pop( + _Py_UOpsAbstractInterpContext *ctx +) +{ + _Py_UOpsAbstractFrame *frame = ctx->frame; + + ctx->n_consumed = frame->locals; + ctx->curr_frame_depth--; + assert(ctx->curr_frame_depth >= 1); + ctx->frame = &ctx->frames[ctx->curr_frame_depth - 1]; + + return 0; +} + + +// Takes a borrowed reference to const_val, turns that into a strong reference. +static _Py_UOpsSymType* +sym_new(_Py_UOpsAbstractInterpContext *ctx, + PyObject *const_val) +{ + _Py_UOpsSymType *self = &ctx->t_arena.arena[ctx->t_arena.ty_curr_number]; + if (ctx->t_arena.ty_curr_number >= ctx->t_arena.ty_max_number) { + OPT_STAT_INC(optimizer_failure_reason_no_memory); + DPRINTF(1, "out of space for symbolic expression type\n"); + return NULL; + } + ctx->t_arena.ty_curr_number++; + self->const_val = NULL; + self->typ = NULL; + self->flags = 0; + + if (const_val != NULL) { + self->const_val = Py_NewRef(const_val); + } + + return self; +} + +static inline void +sym_set_flag(_Py_UOpsSymType *sym, int flag) +{ + sym->flags |= flag; +} + +static inline void +sym_clear_flag(_Py_UOpsSymType *sym, int flag) +{ + sym->flags &= (~flag); +} + +static inline bool +sym_has_flag(_Py_UOpsSymType *sym, int flag) +{ + return (sym->flags & flag) != 0; +} + +static inline bool +sym_is_known(_Py_UOpsSymType *sym) +{ + return sym_has_flag(sym, KNOWN); +} + +static inline bool +sym_is_not_null(_Py_UOpsSymType *sym) +{ + return (sym->flags & (IS_NULL | NOT_NULL)) == NOT_NULL; +} + +static inline bool +sym_is_null(_Py_UOpsSymType *sym) +{ + return (sym->flags & (IS_NULL | NOT_NULL)) == IS_NULL; +} + +static inline void +sym_set_type(_Py_UOpsSymType *sym, PyTypeObject *tp) +{ + assert(PyType_Check(tp)); + sym->typ = tp; + sym_set_flag(sym, KNOWN); + sym_set_flag(sym, NOT_NULL); +} + +static inline void +sym_set_null(_Py_UOpsSymType *sym) +{ + sym_set_flag(sym, IS_NULL); + sym_set_flag(sym, KNOWN); +} + + +static inline _Py_UOpsSymType* +sym_new_unknown(_Py_UOpsAbstractInterpContext *ctx) +{ + return sym_new(ctx,NULL); +} + +static inline _Py_UOpsSymType* +sym_new_known_notnull(_Py_UOpsAbstractInterpContext *ctx) +{ + _Py_UOpsSymType *res = sym_new_unknown(ctx); + if (res == NULL) { + return NULL; + } + sym_set_flag(res, NOT_NULL); + return res; +} + +static inline _Py_UOpsSymType* +sym_new_known_type(_Py_UOpsAbstractInterpContext *ctx, + PyTypeObject *typ) +{ + _Py_UOpsSymType *res = sym_new(ctx,NULL); + if (res == NULL) { + return NULL; + } + sym_set_type(res, typ); + return res; +} + +// Takes a borrowed reference to const_val. +static inline _Py_UOpsSymType* +sym_new_const(_Py_UOpsAbstractInterpContext *ctx, PyObject *const_val) +{ + assert(const_val != NULL); + _Py_UOpsSymType *temp = sym_new( + ctx, + const_val + ); + if (temp == NULL) { + return NULL; + } + sym_set_type(temp, Py_TYPE(const_val)); + sym_set_flag(temp, TRUE_CONST); + sym_set_flag(temp, KNOWN); + sym_set_flag(temp, NOT_NULL); + return temp; +} + +static _Py_UOpsSymType* +sym_new_null(_Py_UOpsAbstractInterpContext *ctx) +{ + _Py_UOpsSymType *null_sym = sym_new_unknown(ctx); + if (null_sym == NULL) { + return NULL; + } + sym_set_null(null_sym); + return null_sym; +} + + +static inline bool +sym_matches_type(_Py_UOpsSymType *sym, PyTypeObject *typ) +{ + assert(typ == NULL || PyType_Check(typ)); + if (!sym_has_flag(sym, KNOWN)) { + return false; + } + return sym->typ == typ; +} + + +static inline bool +op_is_end(uint32_t opcode) +{ + return opcode == _EXIT_TRACE || opcode == _JUMP_TO_TOP; +} static int get_mutations(PyObject* dict) { @@ -199,12 +555,163 @@ remove_globals(_PyInterpreterFrame *frame, _PyUOpInstruction *buffer, builtins = func->func_builtins; break; } + default: + if (op_is_end(opcode)) { + return 1; + } + break; + } + } + return 0; +} + + + +#define STACK_LEVEL() ((int)(stack_pointer - ctx->frame->stack)) + +#define GETLOCAL(idx) ((ctx->frame->locals[idx])) + +#define REPLACE_OP(INST, OP, ARG, OPERAND) \ + INST->opcode = OP; \ + INST->oparg = ARG; \ + INST->operand = OPERAND; + +#define _LOAD_ATTR_NOT_NULL \ + do { \ + attr = sym_new_known_notnull(ctx); \ + if (attr == NULL) { \ + goto error; \ + } \ + null = sym_new_null(ctx); \ + if (null == NULL) { \ + goto error; \ + } \ + } while (0); + + +/* 1 for success, 0 for not ready, cannot error at the moment. */ +static int +uop_redundancy_eliminator( + PyCodeObject *co, + _PyUOpInstruction *trace, + int trace_len, + int curr_stacklen +) +{ + + _Py_UOpsAbstractInterpContext context; + _Py_UOpsAbstractInterpContext *ctx = &context; + + if (abstractcontext_init( + ctx, + co, curr_stacklen, + trace_len) < 0) { + goto out_of_space; + } + + for (_PyUOpInstruction *this_instr = trace; + this_instr < trace + trace_len && !op_is_end(this_instr->opcode); + this_instr++) { + + int oparg = this_instr->oparg; + uint32_t opcode = this_instr->opcode; + + _Py_UOpsSymType **stack_pointer = ctx->frame->stack_pointer; + + DPRINTF(3, "Abstract interpreting %s:%d ", + _PyOpcode_uop_name[opcode], + oparg); + switch (opcode) { +#include "tier2_redundancy_eliminator_cases.c.h" + + default: + DPRINTF(1, "Unknown opcode in abstract interpreter\n"); + Py_UNREACHABLE(); + } + assert(ctx->frame != NULL); + DPRINTF(3, " stack_level %d\n", STACK_LEVEL()); + ctx->frame->stack_pointer = stack_pointer; + assert(STACK_LEVEL() >= 0); + } + + abstractcontext_fini(ctx); + return 1; + +out_of_space: + DPRINTF(1, "Out of space in abstract interpreter\n"); + abstractcontext_fini(ctx); + return 0; + +error: + DPRINTF(1, "Encountered error in abstract interpreter\n"); + abstractcontext_fini(ctx); + return 0; +} + + +static void +remove_unneeded_uops(_PyUOpInstruction *buffer, int buffer_size) +{ + /* Remove _SET_IP and _CHECK_VALIDITY where possible. + * _SET_IP is needed if the following instruction escapes or + * could error. _CHECK_VALIDITY is needed if the previous + * instruction could have escaped. */ + int last_set_ip = -1; + bool may_have_escaped = false; + for (int pc = 0; pc < buffer_size; pc++) { + int opcode = buffer[pc].opcode; + switch (opcode) { + case _SET_IP: + buffer[pc].opcode = NOP; + last_set_ip = pc; + break; + case _CHECK_VALIDITY: + if (may_have_escaped) { + may_have_escaped = false; + } + else { + buffer[pc].opcode = NOP; + } + break; + case _CHECK_VALIDITY_AND_SET_IP: + if (may_have_escaped) { + may_have_escaped = false; + buffer[pc].opcode = _CHECK_VALIDITY; + } + else { + buffer[pc].opcode = NOP; + } + last_set_ip = pc; + break; case _JUMP_TO_TOP: case _EXIT_TRACE: - return 1; + return; + default: + { + bool needs_ip = false; + if (_PyUop_Flags[opcode] & HAS_ESCAPES_FLAG) { + needs_ip = true; + may_have_escaped = true; + } + if (_PyUop_Flags[opcode] & HAS_ERROR_FLAG) { + needs_ip = true; + } + if (opcode == _PUSH_FRAME) { + needs_ip = true; + } + if (needs_ip && last_set_ip >= 0) { + if (buffer[last_set_ip].opcode == _CHECK_VALIDITY) { + buffer[last_set_ip].opcode = _CHECK_VALIDITY_AND_SET_IP; + } + else { + assert(buffer[last_set_ip].opcode == _NOP); + buffer[last_set_ip].opcode = _SET_IP; + } + last_set_ip = -1; + } + } } } - return 0; } static void @@ -250,44 +757,9 @@ peephole_opt(_PyInterpreterFrame *frame, _PyUOpInstruction *buffer, int buffer_s } } -static void -remove_unneeded_uops(_PyUOpInstruction *buffer, int buffer_size) -{ - int last_set_ip = -1; - bool maybe_invalid = false; - for (int pc = 0; pc < buffer_size; pc++) { - int opcode = buffer[pc].opcode; - if (opcode == _SET_IP) { - buffer[pc].opcode = NOP; - last_set_ip = pc; - } - else if (opcode == _CHECK_VALIDITY) { - if (maybe_invalid) { - maybe_invalid = false; - } - else { - buffer[pc].opcode = NOP; - } - } - else if (opcode == _JUMP_TO_TOP || opcode == _EXIT_TRACE) { - break; - } - else { - if (_PyUop_Flags[opcode] & HAS_ESCAPES_FLAG) { - maybe_invalid = true; - if (last_set_ip >= 0) { - buffer[last_set_ip].opcode = _SET_IP; - } - } - if ((_PyUop_Flags[opcode] & HAS_ERROR_FLAG) || opcode == _PUSH_FRAME) { - if (last_set_ip >= 0) { - buffer[last_set_ip].opcode = _SET_IP; - } - } - } - } -} - +// 0 - failure, no error raised, just fall back to Tier 1 +// -1 - failure, and raise error +// 1 - optimizer success int _Py_uop_analyze_and_optimize( _PyInterpreterFrame *frame, @@ -297,11 +769,33 @@ _Py_uop_analyze_and_optimize( _PyBloomFilter *dependencies ) { + OPT_STAT_INC(optimizer_attempts); + int err = remove_globals(frame, buffer, buffer_size, dependencies); - if (err <= 0) { - return err; + if (err == 0) { + goto not_ready; + } + if (err < 0) { + goto error; } + peephole_opt(frame, buffer, buffer_size); + + err = uop_redundancy_eliminator( + (PyCodeObject *)frame->f_executable, buffer, + buffer_size, curr_stacklen); + + if (err == 0) { + goto not_ready; + } + assert(err == 1); + remove_unneeded_uops(buffer, buffer_size); + + OPT_STAT_INC(optimizer_successes); return 1; +not_ready: + return 0; +error: + return -1; } diff --git a/Python/pylifecycle.c b/Python/pylifecycle.c index 230018068d751c..7e4c07bb657d19 100644 --- a/Python/pylifecycle.c +++ b/Python/pylifecycle.c @@ -1627,8 +1627,8 @@ finalize_modules(PyThreadState *tstate) // Invalidate all executors and turn off tier 2 optimizer _Py_Executors_InvalidateAll(interp); - Py_XDECREF(interp->optimizer); - interp->optimizer = &_PyOptimizer_Default; + _PyOptimizerObject *old = _Py_SetOptimizer(interp, NULL); + Py_XDECREF(old); // Stop watching __builtin__ modifications PyDict_Unwatch(0, interp->builtins); diff --git a/Python/pystate.c b/Python/pystate.c index 937c43033b068d..996f465825215f 100644 --- a/Python/pystate.c +++ b/Python/pystate.c @@ -625,9 +625,7 @@ init_interpreter(PyInterpreterState *interp, } interp->sys_profile_initialized = false; interp->sys_trace_initialized = false; - interp->optimizer = &_PyOptimizer_Default; - interp->optimizer_backedge_threshold = _PyOptimizer_Default.backedge_threshold; - interp->optimizer_resume_threshold = _PyOptimizer_Default.backedge_threshold; + (void)_Py_SetOptimizer(interp, NULL); interp->next_func_version = 1; interp->executor_list_head = NULL; if (interp != &runtime->_main_interpreter) { @@ -780,10 +778,8 @@ interpreter_clear(PyInterpreterState *interp, PyThreadState *tstate) tstate->_status.cleared = 0; } - Py_CLEAR(interp->optimizer); - interp->optimizer = &_PyOptimizer_Default; - interp->optimizer_backedge_threshold = _PyOptimizer_Default.backedge_threshold; - interp->optimizer_resume_threshold = _PyOptimizer_Default.backedge_threshold; + _PyOptimizerObject *old = _Py_SetOptimizer(interp, NULL); + Py_DECREF(old); /* It is possible that any of the objects below have a finalizer that runs Python code or otherwise relies on a thread state diff --git a/Python/pytime.c b/Python/pytime.c index 77cb95f8feb179..fb0ed85c541e68 100644 --- a/Python/pytime.c +++ b/Python/pytime.c @@ -50,7 +50,7 @@ # error "time_t is not a two's complement integer type" #endif -#if _PyTime_MIN + _PyTime_MAX != -1 +#if PyTime_MIN + PyTime_MAX != -1 # error "_PyTime_t is not a two's complement integer type" #endif @@ -124,16 +124,16 @@ pytime_as_nanoseconds(_PyTime_t t) } -// Compute t1 + t2. Clamp to [_PyTime_MIN; _PyTime_MAX] on overflow. +// Compute t1 + t2. Clamp to [PyTime_MIN; PyTime_MAX] on overflow. static inline int pytime_add(_PyTime_t *t1, _PyTime_t t2) { - if (t2 > 0 && *t1 > _PyTime_MAX - t2) { - *t1 = _PyTime_MAX; + if (t2 > 0 && *t1 > PyTime_MAX - t2) { + *t1 = PyTime_MAX; return -1; } - else if (t2 < 0 && *t1 < _PyTime_MIN - t2) { - *t1 = _PyTime_MIN; + else if (t2 < 0 && *t1 < PyTime_MIN - t2) { + *t1 = PyTime_MIN; return -1; } else { @@ -156,7 +156,7 @@ pytime_mul_check_overflow(_PyTime_t a, _PyTime_t b) { if (b != 0) { assert(b > 0); - return ((a < _PyTime_MIN / b) || (_PyTime_MAX / b < a)); + return ((a < PyTime_MIN / b) || (PyTime_MAX / b < a)); } else { return 0; @@ -164,13 +164,13 @@ pytime_mul_check_overflow(_PyTime_t a, _PyTime_t b) } -// Compute t * k. Clamp to [_PyTime_MIN; _PyTime_MAX] on overflow. +// Compute t * k. Clamp to [PyTime_MIN; PyTime_MAX] on overflow. static inline int pytime_mul(_PyTime_t *t, _PyTime_t k) { assert(k >= 0); if (pytime_mul_check_overflow(*t, k)) { - *t = (*t >= 0) ? _PyTime_MAX : _PyTime_MIN; + *t = (*t >= 0) ? PyTime_MAX : PyTime_MIN; return -1; } else { @@ -180,7 +180,7 @@ pytime_mul(_PyTime_t *t, _PyTime_t k) } -// Compute t * k. Clamp to [_PyTime_MIN; _PyTime_MAX] on overflow. +// Compute t * k. Clamp to [PyTime_MIN; PyTime_MAX] on overflow. static inline _PyTime_t _PyTime_Mul(_PyTime_t t, _PyTime_t k) { @@ -459,12 +459,12 @@ _PyTime_FromSeconds(int seconds) /* ensure that integer overflow cannot happen, int type should have 32 bits, whereas _PyTime_t type has at least 64 bits (SEC_TO_NS takes 30 bits). */ - static_assert(INT_MAX <= _PyTime_MAX / SEC_TO_NS, "_PyTime_t overflow"); - static_assert(INT_MIN >= _PyTime_MIN / SEC_TO_NS, "_PyTime_t underflow"); + static_assert(INT_MAX <= PyTime_MAX / SEC_TO_NS, "_PyTime_t overflow"); + static_assert(INT_MIN >= PyTime_MIN / SEC_TO_NS, "_PyTime_t underflow"); _PyTime_t t = (_PyTime_t)seconds; - assert((t >= 0 && t <= _PyTime_MAX / SEC_TO_NS) - || (t < 0 && t >= _PyTime_MIN / SEC_TO_NS)); + assert((t >= 0 && t <= PyTime_MAX / SEC_TO_NS) + || (t < 0 && t >= PyTime_MIN / SEC_TO_NS)); t *= SEC_TO_NS; return pytime_from_nanoseconds(t); } @@ -587,7 +587,7 @@ pytime_from_double(_PyTime_t *tp, double value, _PyTime_round_t round, d = pytime_round(d, round); /* See comments in pytime_double_to_denominator */ - if (!((double)_PyTime_MIN <= d && d < -(double)_PyTime_MIN)) { + if (!((double)PyTime_MIN <= d && d < -(double)PyTime_MIN)) { pytime_time_t_overflow(); return -1; } @@ -649,12 +649,12 @@ _PyTime_FromMillisecondsObject(_PyTime_t *tp, PyObject *obj, _PyTime_round_t rou double -_PyTime_AsSecondsDouble(_PyTime_t t) +PyTime_AsSecondsDouble(PyTime_t t) { /* volatile avoids optimization changing how numbers are rounded */ volatile double d; - _PyTime_t ns = pytime_as_nanoseconds(t); + PyTime_t ns = pytime_as_nanoseconds(t); if (ns % SEC_TO_NS == 0) { /* Divide using integers to avoid rounding issues on the integer part. 1e-9 cannot be stored exactly in IEEE 64-bit. */ @@ -695,7 +695,7 @@ pytime_divide_round_up(const _PyTime_t t, const _PyTime_t k) assert(k > 1); if (t >= 0) { // Don't use (t + k - 1) / k to avoid integer overflow - // if t is equal to _PyTime_MAX + // if t is equal to PyTime_MAX _PyTime_t q = t / k; if (t % k) { q += 1; @@ -704,7 +704,7 @@ pytime_divide_round_up(const _PyTime_t t, const _PyTime_t k) } else { // Don't use (t - (k - 1)) / k to avoid integer overflow - // if t is equals to _PyTime_MIN. + // if t is equals to PyTime_MIN. _PyTime_t q = t / k; if (t % k) { q -= 1; @@ -759,7 +759,7 @@ pytime_divide(const _PyTime_t t, const _PyTime_t k, // Compute (t / k, t % k) in (pq, pr). // Make sure that 0 <= pr < k. // Return 0 on success. -// Return -1 on underflow and store (_PyTime_MIN, 0) in (pq, pr). +// Return -1 on underflow and store (PyTime_MIN, 0) in (pq, pr). static int pytime_divmod(const _PyTime_t t, const _PyTime_t k, _PyTime_t *pq, _PyTime_t *pr) @@ -768,8 +768,8 @@ pytime_divmod(const _PyTime_t t, const _PyTime_t k, _PyTime_t q = t / k; _PyTime_t r = t % k; if (r < 0) { - if (q == _PyTime_MIN) { - *pq = _PyTime_MIN; + if (q == PyTime_MIN) { + *pq = PyTime_MIN; *pr = 0; return -1; } @@ -784,13 +784,6 @@ pytime_divmod(const _PyTime_t t, const _PyTime_t k, } -_PyTime_t -_PyTime_AsNanoseconds(_PyTime_t t) -{ - return pytime_as_nanoseconds(t); -} - - #ifdef MS_WINDOWS _PyTime_t _PyTime_As100Nanoseconds(_PyTime_t t, _PyTime_round_t round) @@ -926,6 +919,7 @@ _PyTime_AsTimespec(_PyTime_t t, struct timespec *ts) #endif +// N.B. If raise_exc=0, this may be called without the GIL. static int py_get_system_clock(_PyTime_t *tp, _Py_clock_info_t *info, int raise_exc) { @@ -1050,6 +1044,18 @@ _PyTime_GetSystemClock(void) } +int +PyTime_Time(PyTime_t *result) +{ + if (py_get_system_clock(result, NULL, 1) < 0) { + // If clock_gettime(CLOCK_REALTIME) or gettimeofday() fails: + // silently ignore the failure and return 0. + *result = 0; + return -1; + } + return 1; +} + int _PyTime_GetSystemClockWithInfo(_PyTime_t *t, _Py_clock_info_t *info) { @@ -1092,6 +1098,7 @@ py_mach_timebase_info(_PyTimeFraction *base, int raise) #endif +// N.B. If raise_exc=0, this may be called without the GIL. static int py_get_monotonic_clock(_PyTime_t *tp, _Py_clock_info_t *info, int raise_exc) { @@ -1102,13 +1109,13 @@ py_get_monotonic_clock(_PyTime_t *tp, _Py_clock_info_t *info, int raise_exc) static_assert(sizeof(ticks) <= sizeof(_PyTime_t), "ULONGLONG is larger than _PyTime_t"); _PyTime_t t; - if (ticks <= (ULONGLONG)_PyTime_MAX) { + if (ticks <= (ULONGLONG)PyTime_MAX) { t = (_PyTime_t)ticks; } else { // GetTickCount64() maximum is larger than _PyTime_t maximum: // ULONGLONG is unsigned, whereas _PyTime_t is signed. - t = _PyTime_MAX; + t = PyTime_MAX; } int res = pytime_mul(&t, MS_TO_NS); @@ -1151,7 +1158,7 @@ py_get_monotonic_clock(_PyTime_t *tp, _Py_clock_info_t *info, int raise_exc) uint64_t uticks = mach_absolute_time(); // unsigned => signed - assert(uticks <= (uint64_t)_PyTime_MAX); + assert(uticks <= (uint64_t)PyTime_MAX); _PyTime_t ticks = (_PyTime_t)uticks; _PyTime_t ns = _PyTimeFraction_Mul(ticks, &base); @@ -1229,6 +1236,17 @@ _PyTime_GetMonotonicClock(void) } +int +PyTime_Monotonic(PyTime_t *result) +{ + if (py_get_monotonic_clock(result, NULL, 1) < 0) { + *result = 0; + return -1; + } + return 0; +} + + int _PyTime_GetMonotonicClockWithInfo(_PyTime_t *tp, _Py_clock_info_t *info) { @@ -1268,6 +1286,7 @@ py_win_perf_counter_frequency(_PyTimeFraction *base, int raise) } +// N.B. If raise_exc=0, this may be called without the GIL. static int py_get_win_perf_counter(_PyTime_t *tp, _Py_clock_info_t *info, int raise_exc) { @@ -1335,6 +1354,25 @@ _PyTime_GetPerfCounter(void) } +int +PyTime_PerfCounter(PyTime_t *result) +{ + int res; +#ifdef MS_WINDOWS + res = py_get_win_perf_counter(result, NULL, 1); +#else + res = py_get_monotonic_clock(result, NULL, 1); +#endif + if (res < 0) { + // If py_win_perf_counter_frequency() or py_get_monotonic_clock() + // fails: silently ignore the failure and return 0. + *result = 0; + return -1; + } + return 0; +} + + int _PyTime_localtime(time_t t, struct tm *tm) { diff --git a/Python/specialize.c b/Python/specialize.c index 0c7279a7871f94..7642589508f184 100644 --- a/Python/specialize.c +++ b/Python/specialize.c @@ -240,6 +240,11 @@ print_optimization_stats(FILE *out, OptimizationStats *stats) print_histogram(out, "Trace run length", stats->trace_run_length_hist); print_histogram(out, "Optimized trace length", stats->optimized_trace_length_hist); + fprintf(out, "Optimization optimizer attempts: %" PRIu64 "\n", stats->optimizer_attempts); + fprintf(out, "Optimization optimizer successes: %" PRIu64 "\n", stats->optimizer_successes); + fprintf(out, "Optimization optimizer failure no memory: %" PRIu64 "\n", + stats->optimizer_failure_reason_no_memory); + const char* const* names; for (int i = 0; i < 512; i++) { if (i < 256) { diff --git a/Python/tier2_redundancy_eliminator_bytecodes.c b/Python/tier2_redundancy_eliminator_bytecodes.c new file mode 100644 index 00000000000000..3272b187f20d0e --- /dev/null +++ b/Python/tier2_redundancy_eliminator_bytecodes.c @@ -0,0 +1,272 @@ +#include "Python.h" +#include "pycore_uops.h" +#include "pycore_uop_ids.h" + +#define op(name, ...) /* NAME is ignored */ + +typedef struct _Py_UOpsSymType _Py_UOpsSymType; +typedef struct _Py_UOpsAbstractInterpContext _Py_UOpsAbstractInterpContext; +typedef struct _Py_UOpsAbstractFrame _Py_UOpsAbstractFrame; + +static int +dummy_func(void) { + + PyCodeObject *code; + int oparg; + _Py_UOpsSymType *flag; + _Py_UOpsSymType *left; + _Py_UOpsSymType *right; + _Py_UOpsSymType *value; + _Py_UOpsSymType *res; + _Py_UOpsSymType *iter; + _Py_UOpsSymType *top; + _Py_UOpsSymType *bottom; + _Py_UOpsAbstractFrame *frame; + _Py_UOpsAbstractInterpContext *ctx; + _PyUOpInstruction *this_instr; + _PyBloomFilter *dependencies; + int modified; + +// BEGIN BYTECODES // + + op(_LOAD_FAST_CHECK, (-- value)) { + value = GETLOCAL(oparg); + // We guarantee this will error - just bail and don't optimize it. + if (sym_is_null(value)) { + goto out_of_space; + } + } + + op(_LOAD_FAST, (-- value)) { + value = GETLOCAL(oparg); + } + + op(_LOAD_FAST_AND_CLEAR, (-- value)) { + value = GETLOCAL(oparg); + _Py_UOpsSymType *temp = sym_new_null(ctx); + if (temp == NULL) { + goto out_of_space; + } + GETLOCAL(oparg) = temp; + } + + op(_STORE_FAST, (value --)) { + GETLOCAL(oparg) = value; + } + + op(_PUSH_NULL, (-- res)) { + res = sym_new_null(ctx); + if (res == NULL) { + goto out_of_space; + }; + } + + op(_GUARD_BOTH_INT, (left, right -- left, right)) { + if (sym_matches_type(left, &PyLong_Type) && + sym_matches_type(right, &PyLong_Type)) { + REPLACE_OP(this_instr, _NOP, 0, 0); + } + sym_set_type(left, &PyLong_Type); + sym_set_type(right, &PyLong_Type); + } + + op(_GUARD_BOTH_FLOAT, (left, right -- left, right)) { + if (sym_matches_type(left, &PyFloat_Type) && + sym_matches_type(right, &PyFloat_Type)) { + REPLACE_OP(this_instr, _NOP, 0 ,0); + } + sym_set_type(left, &PyFloat_Type); + sym_set_type(right, &PyFloat_Type); + } + + + op(_BINARY_OP_ADD_INT, (left, right -- res)) { + // TODO constant propagation + (void)left; + (void)right; + res = sym_new_known_type(ctx, &PyLong_Type); + if (res == NULL) { + goto out_of_space; + } + } + + op(_LOAD_CONST, (-- value)) { + // There should be no LOAD_CONST. It should be all + // replaced by peephole_opt. + Py_UNREACHABLE(); + } + + op(_LOAD_CONST_INLINE, (ptr/4 -- value)) { + value = sym_new_const(ctx, ptr); + if (value == NULL) { + goto out_of_space; + } + } + + op(_LOAD_CONST_INLINE_BORROW, (ptr/4 -- value)) { + value = sym_new_const(ctx, ptr); + if (value == NULL) { + goto out_of_space; + } + } + + op(_LOAD_CONST_INLINE_WITH_NULL, (ptr/4 -- value, null)) { + value = sym_new_const(ctx, ptr); + if (value == NULL) { + goto out_of_space; + } + null = sym_new_null(ctx); + if (null == NULL) { + goto out_of_space; + } + } + + op(_LOAD_CONST_INLINE_BORROW_WITH_NULL, (ptr/4 -- value, null)) { + value = sym_new_const(ctx, ptr); + if (value == NULL) { + goto out_of_space; + } + null = sym_new_null(ctx); + if (null == NULL) { + goto out_of_space; + } + } + + + op(_COPY, (bottom, unused[oparg-1] -- bottom, unused[oparg-1], top)) { + assert(oparg > 0); + top = bottom; + } + + op(_SWAP, (bottom, unused[oparg-2], top -- + top, unused[oparg-2], bottom)) { + } + + op(_LOAD_ATTR_INSTANCE_VALUE, (index/1, owner -- attr, null if (oparg & 1))) { + _LOAD_ATTR_NOT_NULL + (void)index; + (void)owner; + } + + op(_LOAD_ATTR_MODULE, (index/1, owner -- attr, null if (oparg & 1))) { + _LOAD_ATTR_NOT_NULL + (void)index; + (void)owner; + } + + op(_LOAD_ATTR_WITH_HINT, (hint/1, owner -- attr, null if (oparg & 1))) { + _LOAD_ATTR_NOT_NULL + (void)hint; + (void)owner; + } + + op(_LOAD_ATTR_SLOT, (index/1, owner -- attr, null if (oparg & 1))) { + _LOAD_ATTR_NOT_NULL + (void)index; + (void)owner; + } + + op(_LOAD_ATTR_CLASS, (descr/4, owner -- attr, null if (oparg & 1))) { + _LOAD_ATTR_NOT_NULL + (void)descr; + (void)owner; + } + + op(_CHECK_FUNCTION_EXACT_ARGS, (func_version/2, callable, self_or_null, unused[oparg] -- callable, self_or_null, unused[oparg])) { + sym_set_type(callable, &PyFunction_Type); + (void)self_or_null; + (void)func_version; + } + + op(_CHECK_CALL_BOUND_METHOD_EXACT_ARGS, (callable, null, unused[oparg] -- callable, null, unused[oparg])) { + sym_set_null(null); + sym_set_type(callable, &PyMethod_Type); + } + + op(_INIT_CALL_PY_EXACT_ARGS, (callable, self_or_null, args[oparg] -- new_frame: _Py_UOpsAbstractFrame *)) { + int argcount = oparg; + + (void)callable; + + PyFunctionObject *func = (PyFunctionObject *)(this_instr + 2)->operand; + if (func == NULL) { + goto error; + } + PyCodeObject *co = (PyCodeObject *)func->func_code; + + assert(self_or_null != NULL); + assert(args != NULL); + if (sym_is_not_null(self_or_null)) { + // Bound method fiddling, same as _INIT_CALL_PY_EXACT_ARGS in VM + args--; + argcount++; + } + + _Py_UOpsSymType **localsplus_start = ctx->n_consumed; + int n_locals_already_filled = 0; + // Can determine statically, so we interleave the new locals + // and make the current stack the new locals. + // This also sets up for true call inlining. + if (sym_is_known(self_or_null)) { + localsplus_start = args; + n_locals_already_filled = argcount; + } + new_frame = ctx_frame_new(ctx, co, localsplus_start, n_locals_already_filled, 0); + if (new_frame == NULL){ + goto out_of_space; + } + } + + op(_POP_FRAME, (retval -- res)) { + SYNC_SP(); + ctx->frame->stack_pointer = stack_pointer; + ctx_frame_pop(ctx); + stack_pointer = ctx->frame->stack_pointer; + res = retval; + } + + op(_PUSH_FRAME, (new_frame: _Py_UOpsAbstractFrame * -- unused if (0))) { + SYNC_SP(); + ctx->frame->stack_pointer = stack_pointer; + ctx->frame = new_frame; + ctx->curr_frame_depth++; + stack_pointer = new_frame->stack_pointer; + } + + op(_UNPACK_SEQUENCE, (seq -- values[oparg])) { + /* This has to be done manually */ + (void)seq; + for (int i = 0; i < oparg; i++) { + values[i] = sym_new_unknown(ctx); + if (values[i] == NULL) { + goto out_of_space; + } + } + } + + op(_UNPACK_EX, (seq -- values[oparg & 0xFF], unused, unused[oparg >> 8])) { + /* This has to be done manually */ + (void)seq; + int totalargs = (oparg & 0xFF) + (oparg >> 8) + 1; + for (int i = 0; i < totalargs; i++) { + values[i] = sym_new_unknown(ctx); + if (values[i] == NULL) { + goto out_of_space; + } + } + } + + op(_ITER_NEXT_RANGE, (iter -- iter, next)) { + next = sym_new_known_type(ctx, &PyLong_Type); + if (next == NULL) { + goto out_of_space; + } + (void)iter; + } + + + + +// END BYTECODES // + +} \ No newline at end of file diff --git a/Python/tier2_redundancy_eliminator_cases.c.h b/Python/tier2_redundancy_eliminator_cases.c.h new file mode 100644 index 00000000000000..c2b7bbaf1c4481 --- /dev/null +++ b/Python/tier2_redundancy_eliminator_cases.c.h @@ -0,0 +1,1680 @@ +// This file is generated by Tools/cases_generator/tier2_abstract_generator.py +// from: +// Python/tier2_redundancy_eliminator_bytecodes.c +// Do not edit! + + case _NOP: { + break; + } + + case _RESUME_CHECK: { + break; + } + + /* _INSTRUMENTED_RESUME is not a viable micro-op for tier 2 */ + + case _LOAD_FAST_CHECK: { + _Py_UOpsSymType *value; + value = GETLOCAL(oparg); + // We guarantee this will error - just bail and don't optimize it. + if (sym_is_null(value)) { + goto out_of_space; + } + stack_pointer[0] = value; + stack_pointer += 1; + break; + } + + case _LOAD_FAST: { + _Py_UOpsSymType *value; + value = GETLOCAL(oparg); + stack_pointer[0] = value; + stack_pointer += 1; + break; + } + + case _LOAD_FAST_AND_CLEAR: { + _Py_UOpsSymType *value; + value = GETLOCAL(oparg); + _Py_UOpsSymType *temp = sym_new_null(ctx); + if (temp == NULL) { + goto out_of_space; + } + GETLOCAL(oparg) = temp; + stack_pointer[0] = value; + stack_pointer += 1; + break; + } + + case _LOAD_CONST: { + _Py_UOpsSymType *value; + // There should be no LOAD_CONST. It should be all + // replaced by peephole_opt. + Py_UNREACHABLE(); + stack_pointer[0] = value; + stack_pointer += 1; + break; + } + + case _STORE_FAST: { + _Py_UOpsSymType *value; + value = stack_pointer[-1]; + GETLOCAL(oparg) = value; + stack_pointer += -1; + break; + } + + case _POP_TOP: { + stack_pointer += -1; + break; + } + + case _PUSH_NULL: { + _Py_UOpsSymType *res; + res = sym_new_null(ctx); + if (res == NULL) { + goto out_of_space; + }; + stack_pointer[0] = res; + stack_pointer += 1; + break; + } + + case _END_SEND: { + _Py_UOpsSymType *value; + value = sym_new_unknown(ctx); + if (value == NULL) goto out_of_space; + stack_pointer[-2] = value; + stack_pointer += -1; + break; + } + + case _UNARY_NEGATIVE: { + _Py_UOpsSymType *res; + res = sym_new_unknown(ctx); + if (res == NULL) goto out_of_space; + stack_pointer[-1] = res; + break; + } + + case _UNARY_NOT: { + _Py_UOpsSymType *res; + res = sym_new_unknown(ctx); + if (res == NULL) goto out_of_space; + stack_pointer[-1] = res; + break; + } + + case _TO_BOOL: { + _Py_UOpsSymType *res; + res = sym_new_unknown(ctx); + if (res == NULL) goto out_of_space; + stack_pointer[-1] = res; + break; + } + + case _TO_BOOL_BOOL: { + break; + } + + case _TO_BOOL_INT: { + _Py_UOpsSymType *res; + res = sym_new_unknown(ctx); + if (res == NULL) goto out_of_space; + stack_pointer[-1] = res; + break; + } + + case _TO_BOOL_LIST: { + _Py_UOpsSymType *res; + res = sym_new_unknown(ctx); + if (res == NULL) goto out_of_space; + stack_pointer[-1] = res; + break; + } + + case _TO_BOOL_NONE: { + _Py_UOpsSymType *res; + res = sym_new_unknown(ctx); + if (res == NULL) goto out_of_space; + stack_pointer[-1] = res; + break; + } + + case _TO_BOOL_STR: { + _Py_UOpsSymType *res; + res = sym_new_unknown(ctx); + if (res == NULL) goto out_of_space; + stack_pointer[-1] = res; + break; + } + + case _TO_BOOL_ALWAYS_TRUE: { + _Py_UOpsSymType *res; + res = sym_new_unknown(ctx); + if (res == NULL) goto out_of_space; + stack_pointer[-1] = res; + break; + } + + case _UNARY_INVERT: { + _Py_UOpsSymType *res; + res = sym_new_unknown(ctx); + if (res == NULL) goto out_of_space; + stack_pointer[-1] = res; + break; + } + + case _GUARD_BOTH_INT: { + _Py_UOpsSymType *right; + _Py_UOpsSymType *left; + right = stack_pointer[-1]; + left = stack_pointer[-2]; + if (sym_matches_type(left, &PyLong_Type) && + sym_matches_type(right, &PyLong_Type)) { + REPLACE_OP(this_instr, _NOP, 0, 0); + } + sym_set_type(left, &PyLong_Type); + sym_set_type(right, &PyLong_Type); + break; + } + + case _BINARY_OP_MULTIPLY_INT: { + _Py_UOpsSymType *res; + res = sym_new_unknown(ctx); + if (res == NULL) goto out_of_space; + stack_pointer[-2] = res; + stack_pointer += -1; + break; + } + + case _BINARY_OP_ADD_INT: { + _Py_UOpsSymType *right; + _Py_UOpsSymType *left; + _Py_UOpsSymType *res; + right = stack_pointer[-1]; + left = stack_pointer[-2]; + // TODO constant propagation + (void)left; + (void)right; + res = sym_new_known_type(ctx, &PyLong_Type); + if (res == NULL) { + goto out_of_space; + } + stack_pointer[-2] = res; + stack_pointer += -1; + break; + } + + case _BINARY_OP_SUBTRACT_INT: { + _Py_UOpsSymType *res; + res = sym_new_unknown(ctx); + if (res == NULL) goto out_of_space; + stack_pointer[-2] = res; + stack_pointer += -1; + break; + } + + case _GUARD_BOTH_FLOAT: { + _Py_UOpsSymType *right; + _Py_UOpsSymType *left; + right = stack_pointer[-1]; + left = stack_pointer[-2]; + if (sym_matches_type(left, &PyFloat_Type) && + sym_matches_type(right, &PyFloat_Type)) { + REPLACE_OP(this_instr, _NOP, 0 ,0); + } + sym_set_type(left, &PyFloat_Type); + sym_set_type(right, &PyFloat_Type); + break; + } + + case _BINARY_OP_MULTIPLY_FLOAT: { + _Py_UOpsSymType *res; + res = sym_new_unknown(ctx); + if (res == NULL) goto out_of_space; + stack_pointer[-2] = res; + stack_pointer += -1; + break; + } + + case _BINARY_OP_ADD_FLOAT: { + _Py_UOpsSymType *res; + res = sym_new_unknown(ctx); + if (res == NULL) goto out_of_space; + stack_pointer[-2] = res; + stack_pointer += -1; + break; + } + + case _BINARY_OP_SUBTRACT_FLOAT: { + _Py_UOpsSymType *res; + res = sym_new_unknown(ctx); + if (res == NULL) goto out_of_space; + stack_pointer[-2] = res; + stack_pointer += -1; + break; + } + + case _GUARD_BOTH_UNICODE: { + break; + } + + case _BINARY_OP_ADD_UNICODE: { + _Py_UOpsSymType *res; + res = sym_new_unknown(ctx); + if (res == NULL) goto out_of_space; + stack_pointer[-2] = res; + stack_pointer += -1; + break; + } + + case _BINARY_SUBSCR: { + _Py_UOpsSymType *res; + res = sym_new_unknown(ctx); + if (res == NULL) goto out_of_space; + stack_pointer[-2] = res; + stack_pointer += -1; + break; + } + + case _BINARY_SLICE: { + _Py_UOpsSymType *res; + res = sym_new_unknown(ctx); + if (res == NULL) goto out_of_space; + stack_pointer[-3] = res; + stack_pointer += -2; + break; + } + + case _STORE_SLICE: { + stack_pointer += -4; + break; + } + + case _BINARY_SUBSCR_LIST_INT: { + _Py_UOpsSymType *res; + res = sym_new_unknown(ctx); + if (res == NULL) goto out_of_space; + stack_pointer[-2] = res; + stack_pointer += -1; + break; + } + + case _BINARY_SUBSCR_STR_INT: { + _Py_UOpsSymType *res; + res = sym_new_unknown(ctx); + if (res == NULL) goto out_of_space; + stack_pointer[-2] = res; + stack_pointer += -1; + break; + } + + case _BINARY_SUBSCR_TUPLE_INT: { + _Py_UOpsSymType *res; + res = sym_new_unknown(ctx); + if (res == NULL) goto out_of_space; + stack_pointer[-2] = res; + stack_pointer += -1; + break; + } + + case _BINARY_SUBSCR_DICT: { + _Py_UOpsSymType *res; + res = sym_new_unknown(ctx); + if (res == NULL) goto out_of_space; + stack_pointer[-2] = res; + stack_pointer += -1; + break; + } + + /* _BINARY_SUBSCR_GETITEM is not a viable micro-op for tier 2 */ + + case _LIST_APPEND: { + stack_pointer += -1; + break; + } + + case _SET_ADD: { + stack_pointer += -1; + break; + } + + case _STORE_SUBSCR: { + stack_pointer += -3; + break; + } + + case _STORE_SUBSCR_LIST_INT: { + stack_pointer += -3; + break; + } + + case _STORE_SUBSCR_DICT: { + stack_pointer += -3; + break; + } + + case _DELETE_SUBSCR: { + stack_pointer += -2; + break; + } + + case _CALL_INTRINSIC_1: { + _Py_UOpsSymType *res; + res = sym_new_unknown(ctx); + if (res == NULL) goto out_of_space; + stack_pointer[-1] = res; + break; + } + + case _CALL_INTRINSIC_2: { + _Py_UOpsSymType *res; + res = sym_new_unknown(ctx); + if (res == NULL) goto out_of_space; + stack_pointer[-2] = res; + stack_pointer += -1; + break; + } + + case _POP_FRAME: { + _Py_UOpsSymType *retval; + _Py_UOpsSymType *res; + retval = stack_pointer[-1]; + stack_pointer += -1; + ctx->frame->stack_pointer = stack_pointer; + ctx_frame_pop(ctx); + stack_pointer = ctx->frame->stack_pointer; + res = retval; + stack_pointer[0] = res; + stack_pointer += 1; + break; + } + + /* _INSTRUMENTED_RETURN_VALUE is not a viable micro-op for tier 2 */ + + /* _INSTRUMENTED_RETURN_CONST is not a viable micro-op for tier 2 */ + + case _GET_AITER: { + _Py_UOpsSymType *iter; + iter = sym_new_unknown(ctx); + if (iter == NULL) goto out_of_space; + stack_pointer[-1] = iter; + break; + } + + case _GET_ANEXT: { + _Py_UOpsSymType *awaitable; + awaitable = sym_new_unknown(ctx); + if (awaitable == NULL) goto out_of_space; + stack_pointer[0] = awaitable; + stack_pointer += 1; + break; + } + + case _GET_AWAITABLE: { + _Py_UOpsSymType *iter; + iter = sym_new_unknown(ctx); + if (iter == NULL) goto out_of_space; + stack_pointer[-1] = iter; + break; + } + + /* _SEND is not a viable micro-op for tier 2 */ + + /* _SEND_GEN is not a viable micro-op for tier 2 */ + + /* _INSTRUMENTED_YIELD_VALUE is not a viable micro-op for tier 2 */ + + case _POP_EXCEPT: { + stack_pointer += -1; + break; + } + + case _LOAD_ASSERTION_ERROR: { + _Py_UOpsSymType *value; + value = sym_new_unknown(ctx); + if (value == NULL) goto out_of_space; + stack_pointer[0] = value; + stack_pointer += 1; + break; + } + + case _LOAD_BUILD_CLASS: { + _Py_UOpsSymType *bc; + bc = sym_new_unknown(ctx); + if (bc == NULL) goto out_of_space; + stack_pointer[0] = bc; + stack_pointer += 1; + break; + } + + case _STORE_NAME: { + stack_pointer += -1; + break; + } + + case _DELETE_NAME: { + break; + } + + case _UNPACK_SEQUENCE: { + _Py_UOpsSymType *seq; + _Py_UOpsSymType **values; + seq = stack_pointer[-1]; + values = &stack_pointer[-1]; + /* This has to be done manually */ + (void)seq; + for (int i = 0; i < oparg; i++) { + values[i] = sym_new_unknown(ctx); + if (values[i] == NULL) { + goto out_of_space; + } + } + stack_pointer += -1 + oparg; + break; + } + + case _UNPACK_SEQUENCE_TWO_TUPLE: { + _Py_UOpsSymType **values; + values = &stack_pointer[-1]; + for (int _i = oparg; --_i >= 0;) { + values[_i] = sym_new_unknown(ctx); + if (values[_i] == NULL) goto out_of_space; + } + stack_pointer += -1 + oparg; + break; + } + + case _UNPACK_SEQUENCE_TUPLE: { + _Py_UOpsSymType **values; + values = &stack_pointer[-1]; + for (int _i = oparg; --_i >= 0;) { + values[_i] = sym_new_unknown(ctx); + if (values[_i] == NULL) goto out_of_space; + } + stack_pointer += -1 + oparg; + break; + } + + case _UNPACK_SEQUENCE_LIST: { + _Py_UOpsSymType **values; + values = &stack_pointer[-1]; + for (int _i = oparg; --_i >= 0;) { + values[_i] = sym_new_unknown(ctx); + if (values[_i] == NULL) goto out_of_space; + } + stack_pointer += -1 + oparg; + break; + } + + case _UNPACK_EX: { + _Py_UOpsSymType *seq; + _Py_UOpsSymType **values; + seq = stack_pointer[-1]; + values = &stack_pointer[-1]; + /* This has to be done manually */ + (void)seq; + int totalargs = (oparg & 0xFF) + (oparg >> 8) + 1; + for (int i = 0; i < totalargs; i++) { + values[i] = sym_new_unknown(ctx); + if (values[i] == NULL) { + goto out_of_space; + } + } + stack_pointer += (oparg >> 8) + (oparg & 0xFF); + break; + } + + case _STORE_ATTR: { + stack_pointer += -2; + break; + } + + case _DELETE_ATTR: { + stack_pointer += -1; + break; + } + + case _STORE_GLOBAL: { + stack_pointer += -1; + break; + } + + case _DELETE_GLOBAL: { + break; + } + + case _LOAD_LOCALS: { + _Py_UOpsSymType *locals; + locals = sym_new_unknown(ctx); + if (locals == NULL) goto out_of_space; + stack_pointer[0] = locals; + stack_pointer += 1; + break; + } + + case _LOAD_FROM_DICT_OR_GLOBALS: { + _Py_UOpsSymType *v; + v = sym_new_unknown(ctx); + if (v == NULL) goto out_of_space; + stack_pointer[-1] = v; + break; + } + + case _LOAD_NAME: { + _Py_UOpsSymType *v; + v = sym_new_unknown(ctx); + if (v == NULL) goto out_of_space; + stack_pointer[0] = v; + stack_pointer += 1; + break; + } + + case _LOAD_GLOBAL: { + _Py_UOpsSymType *res; + _Py_UOpsSymType *null = NULL; + res = sym_new_unknown(ctx); + if (res == NULL) goto out_of_space; + null = sym_new_null(ctx); + if (null == NULL) goto out_of_space; + stack_pointer[0] = res; + if (oparg & 1) stack_pointer[1] = null; + stack_pointer += 1 + (oparg & 1); + break; + } + + case _GUARD_GLOBALS_VERSION: { + break; + } + + case _GUARD_BUILTINS_VERSION: { + break; + } + + case _LOAD_GLOBAL_MODULE: { + _Py_UOpsSymType *res; + _Py_UOpsSymType *null = NULL; + res = sym_new_unknown(ctx); + if (res == NULL) goto out_of_space; + null = sym_new_null(ctx); + if (null == NULL) goto out_of_space; + stack_pointer[0] = res; + if (oparg & 1) stack_pointer[1] = null; + stack_pointer += 1 + (oparg & 1); + break; + } + + case _LOAD_GLOBAL_BUILTINS: { + _Py_UOpsSymType *res; + _Py_UOpsSymType *null = NULL; + res = sym_new_unknown(ctx); + if (res == NULL) goto out_of_space; + null = sym_new_null(ctx); + if (null == NULL) goto out_of_space; + stack_pointer[0] = res; + if (oparg & 1) stack_pointer[1] = null; + stack_pointer += 1 + (oparg & 1); + break; + } + + case _DELETE_FAST: { + break; + } + + case _MAKE_CELL: { + break; + } + + case _DELETE_DEREF: { + break; + } + + case _LOAD_FROM_DICT_OR_DEREF: { + _Py_UOpsSymType *value; + value = sym_new_unknown(ctx); + if (value == NULL) goto out_of_space; + stack_pointer[-1] = value; + break; + } + + case _LOAD_DEREF: { + _Py_UOpsSymType *value; + value = sym_new_unknown(ctx); + if (value == NULL) goto out_of_space; + stack_pointer[0] = value; + stack_pointer += 1; + break; + } + + case _STORE_DEREF: { + stack_pointer += -1; + break; + } + + case _COPY_FREE_VARS: { + break; + } + + case _BUILD_STRING: { + _Py_UOpsSymType *str; + str = sym_new_unknown(ctx); + if (str == NULL) goto out_of_space; + stack_pointer[-oparg] = str; + stack_pointer += 1 - oparg; + break; + } + + case _BUILD_TUPLE: { + _Py_UOpsSymType *tup; + tup = sym_new_unknown(ctx); + if (tup == NULL) goto out_of_space; + stack_pointer[-oparg] = tup; + stack_pointer += 1 - oparg; + break; + } + + case _BUILD_LIST: { + _Py_UOpsSymType *list; + list = sym_new_unknown(ctx); + if (list == NULL) goto out_of_space; + stack_pointer[-oparg] = list; + stack_pointer += 1 - oparg; + break; + } + + case _LIST_EXTEND: { + stack_pointer += -1; + break; + } + + case _SET_UPDATE: { + stack_pointer += -1; + break; + } + + case _BUILD_SET: { + _Py_UOpsSymType *set; + set = sym_new_unknown(ctx); + if (set == NULL) goto out_of_space; + stack_pointer[-oparg] = set; + stack_pointer += 1 - oparg; + break; + } + + case _BUILD_MAP: { + _Py_UOpsSymType *map; + map = sym_new_unknown(ctx); + if (map == NULL) goto out_of_space; + stack_pointer[-oparg*2] = map; + stack_pointer += 1 - oparg*2; + break; + } + + case _SETUP_ANNOTATIONS: { + break; + } + + case _BUILD_CONST_KEY_MAP: { + _Py_UOpsSymType *map; + map = sym_new_unknown(ctx); + if (map == NULL) goto out_of_space; + stack_pointer[-1 - oparg] = map; + stack_pointer += -oparg; + break; + } + + case _DICT_UPDATE: { + stack_pointer += -1; + break; + } + + case _DICT_MERGE: { + stack_pointer += -1; + break; + } + + case _MAP_ADD: { + stack_pointer += -2; + break; + } + + /* _INSTRUMENTED_LOAD_SUPER_ATTR is not a viable micro-op for tier 2 */ + + case _LOAD_SUPER_ATTR_ATTR: { + _Py_UOpsSymType *attr; + attr = sym_new_unknown(ctx); + if (attr == NULL) goto out_of_space; + stack_pointer[-3] = attr; + stack_pointer += -2 + ((0) ? 1 : 0); + break; + } + + case _LOAD_SUPER_ATTR_METHOD: { + _Py_UOpsSymType *attr; + _Py_UOpsSymType *self_or_null; + attr = sym_new_unknown(ctx); + if (attr == NULL) goto out_of_space; + self_or_null = sym_new_unknown(ctx); + if (self_or_null == NULL) goto out_of_space; + stack_pointer[-3] = attr; + stack_pointer[-2] = self_or_null; + stack_pointer += -1; + break; + } + + case _LOAD_ATTR: { + _Py_UOpsSymType *attr; + _Py_UOpsSymType *self_or_null = NULL; + attr = sym_new_unknown(ctx); + if (attr == NULL) goto out_of_space; + self_or_null = sym_new_unknown(ctx); + if (self_or_null == NULL) goto out_of_space; + stack_pointer[-1] = attr; + if (oparg & 1) stack_pointer[0] = self_or_null; + stack_pointer += (oparg & 1); + break; + } + + case _GUARD_TYPE_VERSION: { + break; + } + + case _CHECK_MANAGED_OBJECT_HAS_VALUES: { + break; + } + + case _LOAD_ATTR_INSTANCE_VALUE: { + _Py_UOpsSymType *owner; + _Py_UOpsSymType *attr; + _Py_UOpsSymType *null = NULL; + owner = stack_pointer[-1]; + uint16_t index = (uint16_t)this_instr->operand; + _LOAD_ATTR_NOT_NULL + (void)index; + (void)owner; + stack_pointer[-1] = attr; + if (oparg & 1) stack_pointer[0] = null; + stack_pointer += (oparg & 1); + break; + } + + case _CHECK_ATTR_MODULE: { + break; + } + + case _LOAD_ATTR_MODULE: { + _Py_UOpsSymType *owner; + _Py_UOpsSymType *attr; + _Py_UOpsSymType *null = NULL; + owner = stack_pointer[-1]; + uint16_t index = (uint16_t)this_instr->operand; + _LOAD_ATTR_NOT_NULL + (void)index; + (void)owner; + stack_pointer[-1] = attr; + if (oparg & 1) stack_pointer[0] = null; + stack_pointer += (oparg & 1); + break; + } + + case _CHECK_ATTR_WITH_HINT: { + break; + } + + case _LOAD_ATTR_WITH_HINT: { + _Py_UOpsSymType *owner; + _Py_UOpsSymType *attr; + _Py_UOpsSymType *null = NULL; + owner = stack_pointer[-1]; + uint16_t hint = (uint16_t)this_instr->operand; + _LOAD_ATTR_NOT_NULL + (void)hint; + (void)owner; + stack_pointer[-1] = attr; + if (oparg & 1) stack_pointer[0] = null; + stack_pointer += (oparg & 1); + break; + } + + case _LOAD_ATTR_SLOT: { + _Py_UOpsSymType *owner; + _Py_UOpsSymType *attr; + _Py_UOpsSymType *null = NULL; + owner = stack_pointer[-1]; + uint16_t index = (uint16_t)this_instr->operand; + _LOAD_ATTR_NOT_NULL + (void)index; + (void)owner; + stack_pointer[-1] = attr; + if (oparg & 1) stack_pointer[0] = null; + stack_pointer += (oparg & 1); + break; + } + + case _CHECK_ATTR_CLASS: { + break; + } + + case _LOAD_ATTR_CLASS: { + _Py_UOpsSymType *owner; + _Py_UOpsSymType *attr; + _Py_UOpsSymType *null = NULL; + owner = stack_pointer[-1]; + PyObject *descr = (PyObject *)this_instr->operand; + _LOAD_ATTR_NOT_NULL + (void)descr; + (void)owner; + stack_pointer[-1] = attr; + if (oparg & 1) stack_pointer[0] = null; + stack_pointer += (oparg & 1); + break; + } + + /* _LOAD_ATTR_PROPERTY is not a viable micro-op for tier 2 */ + + /* _LOAD_ATTR_GETATTRIBUTE_OVERRIDDEN is not a viable micro-op for tier 2 */ + + case _GUARD_DORV_VALUES: { + break; + } + + case _STORE_ATTR_INSTANCE_VALUE: { + stack_pointer += -2; + break; + } + + /* _STORE_ATTR_WITH_HINT is not a viable micro-op for tier 2 */ + + case _STORE_ATTR_SLOT: { + stack_pointer += -2; + break; + } + + case _COMPARE_OP: { + _Py_UOpsSymType *res; + res = sym_new_unknown(ctx); + if (res == NULL) goto out_of_space; + stack_pointer[-2] = res; + stack_pointer += -1; + break; + } + + case _COMPARE_OP_FLOAT: { + _Py_UOpsSymType *res; + res = sym_new_unknown(ctx); + if (res == NULL) goto out_of_space; + stack_pointer[-2] = res; + stack_pointer += -1; + break; + } + + case _COMPARE_OP_INT: { + _Py_UOpsSymType *res; + res = sym_new_unknown(ctx); + if (res == NULL) goto out_of_space; + stack_pointer[-2] = res; + stack_pointer += -1; + break; + } + + case _COMPARE_OP_STR: { + _Py_UOpsSymType *res; + res = sym_new_unknown(ctx); + if (res == NULL) goto out_of_space; + stack_pointer[-2] = res; + stack_pointer += -1; + break; + } + + case _IS_OP: { + _Py_UOpsSymType *b; + b = sym_new_unknown(ctx); + if (b == NULL) goto out_of_space; + stack_pointer[-2] = b; + stack_pointer += -1; + break; + } + + case _CONTAINS_OP: { + _Py_UOpsSymType *b; + b = sym_new_unknown(ctx); + if (b == NULL) goto out_of_space; + stack_pointer[-2] = b; + stack_pointer += -1; + break; + } + + case _CHECK_EG_MATCH: { + _Py_UOpsSymType *rest; + _Py_UOpsSymType *match; + rest = sym_new_unknown(ctx); + if (rest == NULL) goto out_of_space; + match = sym_new_unknown(ctx); + if (match == NULL) goto out_of_space; + stack_pointer[-2] = rest; + stack_pointer[-1] = match; + break; + } + + case _CHECK_EXC_MATCH: { + _Py_UOpsSymType *b; + b = sym_new_unknown(ctx); + if (b == NULL) goto out_of_space; + stack_pointer[-1] = b; + break; + } + + /* _JUMP_BACKWARD is not a viable micro-op for tier 2 */ + + /* _POP_JUMP_IF_FALSE is not a viable micro-op for tier 2 */ + + /* _POP_JUMP_IF_TRUE is not a viable micro-op for tier 2 */ + + case _IS_NONE: { + _Py_UOpsSymType *b; + b = sym_new_unknown(ctx); + if (b == NULL) goto out_of_space; + stack_pointer[-1] = b; + break; + } + + case _GET_LEN: { + _Py_UOpsSymType *len_o; + len_o = sym_new_unknown(ctx); + if (len_o == NULL) goto out_of_space; + stack_pointer[0] = len_o; + stack_pointer += 1; + break; + } + + case _MATCH_CLASS: { + _Py_UOpsSymType *attrs; + attrs = sym_new_unknown(ctx); + if (attrs == NULL) goto out_of_space; + stack_pointer[-3] = attrs; + stack_pointer += -2; + break; + } + + case _MATCH_MAPPING: { + _Py_UOpsSymType *res; + res = sym_new_unknown(ctx); + if (res == NULL) goto out_of_space; + stack_pointer[0] = res; + stack_pointer += 1; + break; + } + + case _MATCH_SEQUENCE: { + _Py_UOpsSymType *res; + res = sym_new_unknown(ctx); + if (res == NULL) goto out_of_space; + stack_pointer[0] = res; + stack_pointer += 1; + break; + } + + case _MATCH_KEYS: { + _Py_UOpsSymType *values_or_none; + values_or_none = sym_new_unknown(ctx); + if (values_or_none == NULL) goto out_of_space; + stack_pointer[0] = values_or_none; + stack_pointer += 1; + break; + } + + case _GET_ITER: { + _Py_UOpsSymType *iter; + iter = sym_new_unknown(ctx); + if (iter == NULL) goto out_of_space; + stack_pointer[-1] = iter; + break; + } + + case _GET_YIELD_FROM_ITER: { + _Py_UOpsSymType *iter; + iter = sym_new_unknown(ctx); + if (iter == NULL) goto out_of_space; + stack_pointer[-1] = iter; + break; + } + + /* _FOR_ITER is not a viable micro-op for tier 2 */ + + case _FOR_ITER_TIER_TWO: { + _Py_UOpsSymType *next; + next = sym_new_unknown(ctx); + if (next == NULL) goto out_of_space; + stack_pointer[0] = next; + stack_pointer += 1; + break; + } + + /* _INSTRUMENTED_FOR_ITER is not a viable micro-op for tier 2 */ + + case _ITER_CHECK_LIST: { + break; + } + + /* _ITER_JUMP_LIST is not a viable micro-op for tier 2 */ + + case _GUARD_NOT_EXHAUSTED_LIST: { + break; + } + + case _ITER_NEXT_LIST: { + _Py_UOpsSymType *next; + next = sym_new_unknown(ctx); + if (next == NULL) goto out_of_space; + stack_pointer[0] = next; + stack_pointer += 1; + break; + } + + case _ITER_CHECK_TUPLE: { + break; + } + + /* _ITER_JUMP_TUPLE is not a viable micro-op for tier 2 */ + + case _GUARD_NOT_EXHAUSTED_TUPLE: { + break; + } + + case _ITER_NEXT_TUPLE: { + _Py_UOpsSymType *next; + next = sym_new_unknown(ctx); + if (next == NULL) goto out_of_space; + stack_pointer[0] = next; + stack_pointer += 1; + break; + } + + case _ITER_CHECK_RANGE: { + break; + } + + /* _ITER_JUMP_RANGE is not a viable micro-op for tier 2 */ + + case _GUARD_NOT_EXHAUSTED_RANGE: { + break; + } + + case _ITER_NEXT_RANGE: { + _Py_UOpsSymType *iter; + _Py_UOpsSymType *next; + iter = stack_pointer[-1]; + next = sym_new_known_type(ctx, &PyLong_Type); + if (next == NULL) { + goto out_of_space; + } + (void)iter; + stack_pointer[0] = next; + stack_pointer += 1; + break; + } + + /* _FOR_ITER_GEN is not a viable micro-op for tier 2 */ + + case _BEFORE_ASYNC_WITH: { + _Py_UOpsSymType *exit; + _Py_UOpsSymType *res; + exit = sym_new_unknown(ctx); + if (exit == NULL) goto out_of_space; + res = sym_new_unknown(ctx); + if (res == NULL) goto out_of_space; + stack_pointer[-1] = exit; + stack_pointer[0] = res; + stack_pointer += 1; + break; + } + + case _BEFORE_WITH: { + _Py_UOpsSymType *exit; + _Py_UOpsSymType *res; + exit = sym_new_unknown(ctx); + if (exit == NULL) goto out_of_space; + res = sym_new_unknown(ctx); + if (res == NULL) goto out_of_space; + stack_pointer[-1] = exit; + stack_pointer[0] = res; + stack_pointer += 1; + break; + } + + case _WITH_EXCEPT_START: { + _Py_UOpsSymType *res; + res = sym_new_unknown(ctx); + if (res == NULL) goto out_of_space; + stack_pointer[0] = res; + stack_pointer += 1; + break; + } + + case _PUSH_EXC_INFO: { + _Py_UOpsSymType *prev_exc; + _Py_UOpsSymType *new_exc; + prev_exc = sym_new_unknown(ctx); + if (prev_exc == NULL) goto out_of_space; + new_exc = sym_new_unknown(ctx); + if (new_exc == NULL) goto out_of_space; + stack_pointer[-1] = prev_exc; + stack_pointer[0] = new_exc; + stack_pointer += 1; + break; + } + + case _GUARD_DORV_VALUES_INST_ATTR_FROM_DICT: { + break; + } + + case _GUARD_KEYS_VERSION: { + break; + } + + case _LOAD_ATTR_METHOD_WITH_VALUES: { + _Py_UOpsSymType *attr; + _Py_UOpsSymType *self = NULL; + attr = sym_new_unknown(ctx); + if (attr == NULL) goto out_of_space; + self = sym_new_unknown(ctx); + if (self == NULL) goto out_of_space; + stack_pointer[-1] = attr; + if (1) stack_pointer[0] = self; + stack_pointer += ((1) ? 1 : 0); + break; + } + + case _LOAD_ATTR_METHOD_NO_DICT: { + _Py_UOpsSymType *attr; + _Py_UOpsSymType *self = NULL; + attr = sym_new_unknown(ctx); + if (attr == NULL) goto out_of_space; + self = sym_new_unknown(ctx); + if (self == NULL) goto out_of_space; + stack_pointer[-1] = attr; + if (1) stack_pointer[0] = self; + stack_pointer += ((1) ? 1 : 0); + break; + } + + case _LOAD_ATTR_NONDESCRIPTOR_WITH_VALUES: { + _Py_UOpsSymType *attr; + attr = sym_new_unknown(ctx); + if (attr == NULL) goto out_of_space; + stack_pointer[-1] = attr; + stack_pointer += ((0) ? 1 : 0); + break; + } + + case _LOAD_ATTR_NONDESCRIPTOR_NO_DICT: { + _Py_UOpsSymType *attr; + attr = sym_new_unknown(ctx); + if (attr == NULL) goto out_of_space; + stack_pointer[-1] = attr; + stack_pointer += ((0) ? 1 : 0); + break; + } + + case _CHECK_ATTR_METHOD_LAZY_DICT: { + break; + } + + case _LOAD_ATTR_METHOD_LAZY_DICT: { + _Py_UOpsSymType *attr; + _Py_UOpsSymType *self = NULL; + attr = sym_new_unknown(ctx); + if (attr == NULL) goto out_of_space; + self = sym_new_unknown(ctx); + if (self == NULL) goto out_of_space; + stack_pointer[-1] = attr; + if (1) stack_pointer[0] = self; + stack_pointer += ((1) ? 1 : 0); + break; + } + + /* _INSTRUMENTED_CALL is not a viable micro-op for tier 2 */ + + /* _CALL is not a viable micro-op for tier 2 */ + + case _CHECK_CALL_BOUND_METHOD_EXACT_ARGS: { + _Py_UOpsSymType *null; + _Py_UOpsSymType *callable; + null = stack_pointer[-1 - oparg]; + callable = stack_pointer[-2 - oparg]; + sym_set_null(null); + sym_set_type(callable, &PyMethod_Type); + break; + } + + case _INIT_CALL_BOUND_METHOD_EXACT_ARGS: { + _Py_UOpsSymType *func; + _Py_UOpsSymType *self; + func = sym_new_unknown(ctx); + if (func == NULL) goto out_of_space; + self = sym_new_unknown(ctx); + if (self == NULL) goto out_of_space; + stack_pointer[-2 - oparg] = func; + stack_pointer[-1 - oparg] = self; + break; + } + + case _CHECK_PEP_523: { + break; + } + + case _CHECK_FUNCTION_EXACT_ARGS: { + _Py_UOpsSymType *self_or_null; + _Py_UOpsSymType *callable; + self_or_null = stack_pointer[-1 - oparg]; + callable = stack_pointer[-2 - oparg]; + uint32_t func_version = (uint32_t)this_instr->operand; + sym_set_type(callable, &PyFunction_Type); + (void)self_or_null; + (void)func_version; + break; + } + + case _CHECK_STACK_SPACE: { + break; + } + + case _INIT_CALL_PY_EXACT_ARGS: { + _Py_UOpsSymType **args; + _Py_UOpsSymType *self_or_null; + _Py_UOpsSymType *callable; + _Py_UOpsAbstractFrame *new_frame; + args = &stack_pointer[-oparg]; + self_or_null = stack_pointer[-1 - oparg]; + callable = stack_pointer[-2 - oparg]; + int argcount = oparg; + (void)callable; + PyFunctionObject *func = (PyFunctionObject *)(this_instr + 2)->operand; + if (func == NULL) { + goto error; + } + PyCodeObject *co = (PyCodeObject *)func->func_code; + assert(self_or_null != NULL); + assert(args != NULL); + if (sym_is_not_null(self_or_null)) { + // Bound method fiddling, same as _INIT_CALL_PY_EXACT_ARGS in VM + args--; + argcount++; + } + _Py_UOpsSymType **localsplus_start = ctx->n_consumed; + int n_locals_already_filled = 0; + // Can determine statically, so we interleave the new locals + // and make the current stack the new locals. + // This also sets up for true call inlining. + if (sym_is_known(self_or_null)) { + localsplus_start = args; + n_locals_already_filled = argcount; + } + new_frame = ctx_frame_new(ctx, co, localsplus_start, n_locals_already_filled, 0); + if (new_frame == NULL){ + goto out_of_space; + } + stack_pointer[-2 - oparg] = (_Py_UOpsSymType *)new_frame; + stack_pointer += -1 - oparg; + break; + } + + case _PUSH_FRAME: { + _Py_UOpsAbstractFrame *new_frame; + new_frame = (_Py_UOpsAbstractFrame *)stack_pointer[-1]; + stack_pointer += -1; + ctx->frame->stack_pointer = stack_pointer; + ctx->frame = new_frame; + ctx->curr_frame_depth++; + stack_pointer = new_frame->stack_pointer; + stack_pointer += ((0) ? 1 : 0); + break; + } + + /* _CALL_PY_WITH_DEFAULTS is not a viable micro-op for tier 2 */ + + case _CALL_TYPE_1: { + _Py_UOpsSymType *res; + res = sym_new_unknown(ctx); + if (res == NULL) goto out_of_space; + stack_pointer[-2 - oparg] = res; + stack_pointer += -1 - oparg; + break; + } + + case _CALL_STR_1: { + _Py_UOpsSymType *res; + res = sym_new_unknown(ctx); + if (res == NULL) goto out_of_space; + stack_pointer[-2 - oparg] = res; + stack_pointer += -1 - oparg; + break; + } + + case _CALL_TUPLE_1: { + _Py_UOpsSymType *res; + res = sym_new_unknown(ctx); + if (res == NULL) goto out_of_space; + stack_pointer[-2 - oparg] = res; + stack_pointer += -1 - oparg; + break; + } + + /* _CALL_ALLOC_AND_ENTER_INIT is not a viable micro-op for tier 2 */ + + case _EXIT_INIT_CHECK: { + stack_pointer += -1; + break; + } + + case _CALL_BUILTIN_CLASS: { + _Py_UOpsSymType *res; + res = sym_new_unknown(ctx); + if (res == NULL) goto out_of_space; + stack_pointer[-2 - oparg] = res; + stack_pointer += -1 - oparg; + break; + } + + case _CALL_BUILTIN_O: { + _Py_UOpsSymType *res; + res = sym_new_unknown(ctx); + if (res == NULL) goto out_of_space; + stack_pointer[-2 - oparg] = res; + stack_pointer += -1 - oparg; + break; + } + + case _CALL_BUILTIN_FAST: { + _Py_UOpsSymType *res; + res = sym_new_unknown(ctx); + if (res == NULL) goto out_of_space; + stack_pointer[-2 - oparg] = res; + stack_pointer += -1 - oparg; + break; + } + + case _CALL_BUILTIN_FAST_WITH_KEYWORDS: { + _Py_UOpsSymType *res; + res = sym_new_unknown(ctx); + if (res == NULL) goto out_of_space; + stack_pointer[-2 - oparg] = res; + stack_pointer += -1 - oparg; + break; + } + + case _CALL_LEN: { + _Py_UOpsSymType *res; + res = sym_new_unknown(ctx); + if (res == NULL) goto out_of_space; + stack_pointer[-2 - oparg] = res; + stack_pointer += -1 - oparg; + break; + } + + case _CALL_ISINSTANCE: { + _Py_UOpsSymType *res; + res = sym_new_unknown(ctx); + if (res == NULL) goto out_of_space; + stack_pointer[-2 - oparg] = res; + stack_pointer += -1 - oparg; + break; + } + + case _CALL_METHOD_DESCRIPTOR_O: { + _Py_UOpsSymType *res; + res = sym_new_unknown(ctx); + if (res == NULL) goto out_of_space; + stack_pointer[-2 - oparg] = res; + stack_pointer += -1 - oparg; + break; + } + + case _CALL_METHOD_DESCRIPTOR_FAST_WITH_KEYWORDS: { + _Py_UOpsSymType *res; + res = sym_new_unknown(ctx); + if (res == NULL) goto out_of_space; + stack_pointer[-2 - oparg] = res; + stack_pointer += -1 - oparg; + break; + } + + case _CALL_METHOD_DESCRIPTOR_NOARGS: { + _Py_UOpsSymType *res; + res = sym_new_unknown(ctx); + if (res == NULL) goto out_of_space; + stack_pointer[-2 - oparg] = res; + stack_pointer += -1 - oparg; + break; + } + + case _CALL_METHOD_DESCRIPTOR_FAST: { + _Py_UOpsSymType *res; + res = sym_new_unknown(ctx); + if (res == NULL) goto out_of_space; + stack_pointer[-2 - oparg] = res; + stack_pointer += -1 - oparg; + break; + } + + /* _INSTRUMENTED_CALL_KW is not a viable micro-op for tier 2 */ + + /* _CALL_KW is not a viable micro-op for tier 2 */ + + /* _INSTRUMENTED_CALL_FUNCTION_EX is not a viable micro-op for tier 2 */ + + /* _CALL_FUNCTION_EX is not a viable micro-op for tier 2 */ + + case _MAKE_FUNCTION: { + _Py_UOpsSymType *func; + func = sym_new_unknown(ctx); + if (func == NULL) goto out_of_space; + stack_pointer[-1] = func; + break; + } + + case _SET_FUNCTION_ATTRIBUTE: { + _Py_UOpsSymType *func; + func = sym_new_unknown(ctx); + if (func == NULL) goto out_of_space; + stack_pointer[-2] = func; + stack_pointer += -1; + break; + } + + case _BUILD_SLICE: { + _Py_UOpsSymType *slice; + slice = sym_new_unknown(ctx); + if (slice == NULL) goto out_of_space; + stack_pointer[-2 - ((oparg == 3) ? 1 : 0)] = slice; + stack_pointer += -1 - ((oparg == 3) ? 1 : 0); + break; + } + + case _CONVERT_VALUE: { + _Py_UOpsSymType *result; + result = sym_new_unknown(ctx); + if (result == NULL) goto out_of_space; + stack_pointer[-1] = result; + break; + } + + case _FORMAT_SIMPLE: { + _Py_UOpsSymType *res; + res = sym_new_unknown(ctx); + if (res == NULL) goto out_of_space; + stack_pointer[-1] = res; + break; + } + + case _FORMAT_WITH_SPEC: { + _Py_UOpsSymType *res; + res = sym_new_unknown(ctx); + if (res == NULL) goto out_of_space; + stack_pointer[-2] = res; + stack_pointer += -1; + break; + } + + case _COPY: { + _Py_UOpsSymType *bottom; + _Py_UOpsSymType *top; + bottom = stack_pointer[-1 - (oparg-1)]; + assert(oparg > 0); + top = bottom; + stack_pointer[0] = top; + stack_pointer += 1; + break; + } + + case _BINARY_OP: { + _Py_UOpsSymType *res; + res = sym_new_unknown(ctx); + if (res == NULL) goto out_of_space; + stack_pointer[-2] = res; + stack_pointer += -1; + break; + } + + case _SWAP: { + _Py_UOpsSymType *top; + _Py_UOpsSymType *bottom; + top = stack_pointer[-1]; + bottom = stack_pointer[-2 - (oparg-2)]; + stack_pointer[-2 - (oparg-2)] = top; + stack_pointer[-1] = bottom; + break; + } + + /* _INSTRUMENTED_INSTRUCTION is not a viable micro-op for tier 2 */ + + /* _INSTRUMENTED_JUMP_FORWARD is not a viable micro-op for tier 2 */ + + /* _INSTRUMENTED_JUMP_BACKWARD is not a viable micro-op for tier 2 */ + + /* _INSTRUMENTED_POP_JUMP_IF_TRUE is not a viable micro-op for tier 2 */ + + /* _INSTRUMENTED_POP_JUMP_IF_FALSE is not a viable micro-op for tier 2 */ + + /* _INSTRUMENTED_POP_JUMP_IF_NONE is not a viable micro-op for tier 2 */ + + /* _INSTRUMENTED_POP_JUMP_IF_NOT_NONE is not a viable micro-op for tier 2 */ + + case _GUARD_IS_TRUE_POP: { + stack_pointer += -1; + break; + } + + case _GUARD_IS_FALSE_POP: { + stack_pointer += -1; + break; + } + + case _GUARD_IS_NONE_POP: { + stack_pointer += -1; + break; + } + + case _GUARD_IS_NOT_NONE_POP: { + stack_pointer += -1; + break; + } + + case _JUMP_TO_TOP: { + break; + } + + case _SET_IP: { + break; + } + + case _SAVE_RETURN_OFFSET: { + break; + } + + case _EXIT_TRACE: { + break; + } + + case _CHECK_VALIDITY: { + break; + } + + case _LOAD_CONST_INLINE: { + _Py_UOpsSymType *value; + PyObject *ptr = (PyObject *)this_instr->operand; + value = sym_new_const(ctx, ptr); + if (value == NULL) { + goto out_of_space; + } + stack_pointer[0] = value; + stack_pointer += 1; + break; + } + + case _LOAD_CONST_INLINE_BORROW: { + _Py_UOpsSymType *value; + PyObject *ptr = (PyObject *)this_instr->operand; + value = sym_new_const(ctx, ptr); + if (value == NULL) { + goto out_of_space; + } + stack_pointer[0] = value; + stack_pointer += 1; + break; + } + + case _LOAD_CONST_INLINE_WITH_NULL: { + _Py_UOpsSymType *value; + _Py_UOpsSymType *null; + PyObject *ptr = (PyObject *)this_instr->operand; + value = sym_new_const(ctx, ptr); + if (value == NULL) { + goto out_of_space; + } + null = sym_new_null(ctx); + if (null == NULL) { + goto out_of_space; + } + stack_pointer[0] = value; + stack_pointer[1] = null; + stack_pointer += 2; + break; + } + + case _LOAD_CONST_INLINE_BORROW_WITH_NULL: { + _Py_UOpsSymType *value; + _Py_UOpsSymType *null; + PyObject *ptr = (PyObject *)this_instr->operand; + value = sym_new_const(ctx, ptr); + if (value == NULL) { + goto out_of_space; + } + null = sym_new_null(ctx); + if (null == NULL) { + goto out_of_space; + } + stack_pointer[0] = value; + stack_pointer[1] = null; + stack_pointer += 2; + break; + } + + case _CHECK_GLOBALS: { + break; + } + + case _CHECK_BUILTINS: { + break; + } + + case _INTERNAL_INCREMENT_OPT_COUNTER: { + stack_pointer += -1; + break; + } + + case _CHECK_VALIDITY_AND_SET_IP: { + break; + } + diff --git a/Tools/c-analyzer/cpython/_parser.py b/Tools/c-analyzer/cpython/_parser.py index 444063d2148934..be89a26058e8e8 100644 --- a/Tools/c-analyzer/cpython/_parser.py +++ b/Tools/c-analyzer/cpython/_parser.py @@ -83,9 +83,11 @@ def clean_lines(text): Python/frozen_modules/*.h Python/generated_cases.c.h Python/executor_cases.c.h +Python/tier2_redundancy_eliminator_cases.c.h # not actually source Python/bytecodes.c +Python/tier2_redundancy_eliminator_bytecodes.c # mimalloc Objects/mimalloc/*.c diff --git a/Tools/c-analyzer/cpython/ignored.tsv b/Tools/c-analyzer/cpython/ignored.tsv index c75aff8c1723c1..14bcd85b9eae59 100644 --- a/Tools/c-analyzer/cpython/ignored.tsv +++ b/Tools/c-analyzer/cpython/ignored.tsv @@ -734,6 +734,6 @@ Modules/expat/xmlrole.c - error - ## other Modules/_io/_iomodule.c - _PyIO_Module - Modules/_sqlite/module.c - _sqlite3module - -Python/optimizer_analysis.c - _Py_PartitionRootNode_Type - +Python/optimizer_analysis.c - _Py_UOpsAbstractFrame_Type - Python/optimizer_analysis.c - _Py_UOpsAbstractInterpContext_Type - Modules/clinic/md5module.c.h _md5_md5 _keywords - diff --git a/Tools/cases_generator/README.md b/Tools/cases_generator/README.md index 7fec8a882336cd..d35a868b42ea9e 100644 --- a/Tools/cases_generator/README.md +++ b/Tools/cases_generator/README.md @@ -13,6 +13,9 @@ What's currently here: - `parser.py` helper for interactions with `parsing.py` - `tierN_generator.py`: a couple of driver scripts to read `Python/bytecodes.c` and write `Python/generated_cases.c.h` (and several other files) +- `tier2_abstract_generator.py`: reads `Python/bytecodes.c` and + `Python/tier2_redundancy_eliminator_bytecodes.c` and writes + `Python/tier2_redundancy_eliminator_cases.c.h` - `stack.py`: code to handle generalized stack effects - `cwriter.py`: code which understands tokens and how to format C code; main class: `CWriter` diff --git a/Tools/cases_generator/analyzer.py b/Tools/cases_generator/analyzer.py index b80fa66e2a159a..3497b7fcdf35d3 100644 --- a/Tools/cases_generator/analyzer.py +++ b/Tools/cases_generator/analyzer.py @@ -24,7 +24,6 @@ class Properties: pure: bool passthrough: bool - guard: bool def dump(self, indent: str) -> None: print(indent, end="") @@ -51,7 +50,6 @@ def from_list(properties: list["Properties"]) -> "Properties": has_free=any(p.has_free for p in properties), pure=all(p.pure for p in properties), passthrough=all(p.passthrough for p in properties), - guard=all(p.guard for p in properties), ) @@ -73,7 +71,6 @@ def from_list(properties: list["Properties"]) -> "Properties": has_free=False, pure=False, passthrough=False, - guard=False, ) @@ -273,7 +270,7 @@ def override_error( def convert_stack_item(item: parser.StackEffect) -> StackItem: return StackItem( - item.name, item.type, item.cond, (item.size or "1"), type_prop=item.type_prop + item.name, item.type, item.cond, (item.size or "1") ) @@ -473,7 +470,6 @@ def compute_properties(op: parser.InstDef) -> Properties: has_free=has_free, pure="pure" in op.annotations, passthrough=passthrough, - guard=passthrough and deopts, ) diff --git a/Tools/cases_generator/interpreter_definition.md b/Tools/cases_generator/interpreter_definition.md index e87aff43762b11..9b5733562f77b4 100644 --- a/Tools/cases_generator/interpreter_definition.md +++ b/Tools/cases_generator/interpreter_definition.md @@ -109,10 +109,7 @@ and a piece of C code describing its semantics:: NAME [":" type] [ "if" "(" C-expression ")" ] type: - NAME ["*"] | type_prop - - type_prop: - "&" "(" NAME ["+" NAME] ")" + NAME ["*"] stream: NAME "/" size @@ -142,26 +139,7 @@ The following definitions may occur: The optional `type` in an `object` is the C type. It defaults to `PyObject *`. The objects before the "--" are the objects on top of the stack at the start of the instruction. Those after the "--" are the objects on top of the stack at the -end of the instruction. When prefixed by a `&`, the `type` production rule follows the -`type_prop` production rule. This indicates the type of the value is of that specific type -after the operation. In this case, the type may also contain 64-bit refinement information -that is fetched from a previously defined operand in the instruction header, such as -a type version tag. This follows the format `type + refinement`. The list of possible types -and their refinements are below. They obey the following predicates: - - -* `PYLONG_TYPE`: `Py_TYPE(val) == &PyLong_Type` -* `PYFLOAT_TYPE`: `Py_TYPE(val) == &PyFloat_Type` -* `PYUNICODE_TYPE`: `Py_TYPE(val) == &PYUNICODE_TYPE` -* `NULL_TYPE`: `val == NULL` -* `GUARD_TYPE_VERSION_TYPE`: `type->tp_version_tag == auxillary` -* `GUARD_DORV_VALUES_TYPE`: `_PyDictOrValues_IsValues(obj)` -* `GUARD_DORV_VALUES_INST_ATTR_FROM_DICT_TYPE`: - `_PyDictOrValues_IsValues(obj) || _PyObject_MakeInstanceAttributesFromDict(obj, dorv)` -* `GUARD_KEYS_VERSION_TYPE`: `owner_heap_type->ht_cached_keys->dk_version == auxillary` -* `PYMETHOD_TYPE`: `Py_TYPE(val) == &PyMethod_Type` -* `PYFUNCTION_TYPE_VERSION_TYPE`: - `PyFunction_Check(callable) && func->func_version == auxillary && code->co_argcount == oparg + (self_or_null != NULL)` +end of the instruction. An `inst` without `stack_effect` is a transitional form to allow the original C code diff --git a/Tools/cases_generator/parsing.py b/Tools/cases_generator/parsing.py index 307919cb37ce1e..a8961f28babea1 100644 --- a/Tools/cases_generator/parsing.py +++ b/Tools/cases_generator/parsing.py @@ -75,11 +75,6 @@ class StackEffect(Node): size: str = "" # Optional `[size]` # Note: size cannot be combined with type or cond - # Optional `(type, refinement)` - type_prop: None | tuple[str, None | str] = field( - default_factory=lambda: None, init=True, compare=False, hash=False - ) - def __repr__(self) -> str: items = [self.name, self.type, self.cond, self.size] while items and items[-1] == "": @@ -260,25 +255,14 @@ def cache_effect(self) -> CacheEffect | None: @contextual def stack_effect(self) -> StackEffect | None: - # IDENTIFIER [':' [IDENTIFIER [TIMES]] ['&' '(' IDENTIFIER ['+' IDENTIFIER] ')']] ['if' '(' expression ')'] + # IDENTIFIER [':' IDENTIFIER [TIMES]] ['if' '(' expression ')'] # | IDENTIFIER '[' expression ']' if tkn := self.expect(lx.IDENTIFIER): type_text = "" - type_prop = None if self.expect(lx.COLON): - if i := self.expect(lx.IDENTIFIER): - type_text = i.text.strip() - if self.expect(lx.TIMES): - type_text += " *" - if self.expect(lx.AND): - consumed_bracket = self.expect(lx.LPAREN) is not None - type_prop_text = self.require(lx.IDENTIFIER).text.strip() - refinement = None - if self.expect(lx.PLUS): - refinement = self.require(lx.IDENTIFIER).text.strip() - type_prop = (type_prop_text, refinement) - if consumed_bracket: - self.require(lx.RPAREN) + type_text = self.require(lx.IDENTIFIER).text.strip() + if self.expect(lx.TIMES): + type_text += " *" cond_text = "" if self.expect(lx.IF): self.require(lx.LPAREN) @@ -295,7 +279,7 @@ def stack_effect(self) -> StackEffect | None: self.require(lx.RBRACKET) type_text = "PyObject **" size_text = size.text.strip() - return StackEffect(tkn.text, type_text, cond_text, size_text, type_prop) + return StackEffect(tkn.text, type_text, cond_text, size_text) return None @contextual diff --git a/Tools/cases_generator/stack.py b/Tools/cases_generator/stack.py index f62ece43c1be7f..97a301142d59c7 100644 --- a/Tools/cases_generator/stack.py +++ b/Tools/cases_generator/stack.py @@ -168,11 +168,11 @@ def push(self, var: StackItem) -> str: self.top_offset.push(var) return "" - def flush(self, out: CWriter) -> None: + def flush(self, out: CWriter, cast_type: str = "PyObject *") -> None: out.start_line() for var in self.variables: if not var.peek: - cast = "(PyObject *)" if var.type else "" + cast = f"({cast_type})" if var.type else "" if var.name not in UNUSED and not var.is_array(): if var.condition: out.emit(f"if ({var.condition}) ") diff --git a/Tools/cases_generator/tier2_abstract_generator.py b/Tools/cases_generator/tier2_abstract_generator.py new file mode 100644 index 00000000000000..cc29b1660d26ed --- /dev/null +++ b/Tools/cases_generator/tier2_abstract_generator.py @@ -0,0 +1,235 @@ +"""Generate the cases for the tier 2 redundancy eliminator/abstract interpreter. +Reads the instruction definitions from bytecodes.c. and tier2_redundancy_eliminator.bytecodes.c +Writes the cases to tier2_redundancy_eliminator_cases.c.h, which is #included in Python/optimizer_analysis.c. +""" + +import argparse +import os.path +import sys + +from analyzer import ( + Analysis, + Instruction, + Uop, + Part, + analyze_files, + Skip, + StackItem, + analysis_error, +) +from generators_common import ( + DEFAULT_INPUT, + ROOT, + write_header, + emit_tokens, + emit_to, + replace_sync_sp, +) +from cwriter import CWriter +from typing import TextIO, Iterator +from lexer import Token +from stack import StackOffset, Stack, SizeMismatch, UNUSED + +DEFAULT_OUTPUT = ROOT / "Python/tier2_redundancy_eliminator_cases.c.h" +DEFAULT_ABSTRACT_INPUT = ROOT / "Python/tier2_redundancy_eliminator_bytecodes.c" + + +def validate_uop(override: Uop, uop: Uop) -> None: + # To do + pass + + +def type_name(var: StackItem) -> str: + if var.is_array(): + return f"_Py_UOpsSymType **" + if var.type: + return var.type + return f"_Py_UOpsSymType *" + + +def declare_variables(uop: Uop, out: CWriter, skip_inputs: bool) -> None: + variables = {"unused"} + if not skip_inputs: + for var in reversed(uop.stack.inputs): + if var.name not in variables: + variables.add(var.name) + if var.condition: + out.emit(f"{type_name(var)}{var.name} = NULL;\n") + else: + out.emit(f"{type_name(var)}{var.name};\n") + for var in uop.stack.outputs: + if var.peek: + continue + if var.name not in variables: + variables.add(var.name) + if var.condition: + out.emit(f"{type_name(var)}{var.name} = NULL;\n") + else: + out.emit(f"{type_name(var)}{var.name};\n") + + +def decref_inputs( + out: CWriter, + tkn: Token, + tkn_iter: Iterator[Token], + uop: Uop, + stack: Stack, + inst: Instruction | None, +) -> None: + next(tkn_iter) + next(tkn_iter) + next(tkn_iter) + out.emit_at("", tkn) + + +def emit_default(out: CWriter, uop: Uop) -> None: + for i, var in enumerate(uop.stack.outputs): + if var.name != "unused" and not var.peek: + if var.is_array(): + out.emit(f"for (int _i = {var.size}; --_i >= 0;) {{\n") + out.emit(f"{var.name}[_i] = sym_new_unknown(ctx);\n") + out.emit(f"if ({var.name}[_i] == NULL) goto out_of_space;\n") + out.emit("}\n") + elif var.name == "null": + out.emit(f"{var.name} = sym_new_null(ctx);\n") + out.emit(f"if ({var.name} == NULL) goto out_of_space;\n") + else: + out.emit(f"{var.name} = sym_new_unknown(ctx);\n") + out.emit(f"if ({var.name} == NULL) goto out_of_space;\n") + + +def write_uop( + override: Uop | None, + uop: Uop, + out: CWriter, + stack: Stack, + debug: bool, + skip_inputs: bool, +) -> None: + try: + prototype = override if override else uop + is_override = override is not None + out.start_line() + for var in reversed(prototype.stack.inputs): + res = stack.pop(var) + if not skip_inputs: + out.emit(res) + if not prototype.properties.stores_sp: + for i, var in enumerate(prototype.stack.outputs): + res = stack.push(var) + if not var.peek or is_override: + out.emit(res) + if debug: + args = [] + for var in prototype.stack.inputs: + if not var.peek or is_override: + args.append(var.name) + out.emit(f'DEBUG_PRINTF({", ".join(args)});\n') + if override: + for cache in uop.caches: + if cache.name != "unused": + if cache.size == 4: + type = cast = "PyObject *" + else: + type = f"uint{cache.size*16}_t " + cast = f"uint{cache.size*16}_t" + out.emit(f"{type}{cache.name} = ({cast})this_instr->operand;\n") + if override: + replacement_funcs = { + "DECREF_INPUTS": decref_inputs, + "SYNC_SP": replace_sync_sp, + } + emit_tokens(out, override, stack, None, replacement_funcs) + else: + emit_default(out, uop) + + if prototype.properties.stores_sp: + for i, var in enumerate(prototype.stack.outputs): + if not var.peek or is_override: + out.emit(stack.push(var)) + out.start_line() + stack.flush(out, cast_type="_Py_UOpsSymType *") + except SizeMismatch as ex: + raise analysis_error(ex.args[0], uop.body[0]) + + +SKIPS = ("_EXTENDED_ARG",) + + +def generate_abstract_interpreter( + filenames: list[str], + abstract: Analysis, + base: Analysis, + outfile: TextIO, + debug: bool, +) -> None: + write_header(__file__, filenames, outfile) + out = CWriter(outfile, 2, False) + out.emit("\n") + base_uop_names = set([uop.name for uop in base.uops.values()]) + for abstract_uop_name in abstract.uops: + assert abstract_uop_name in base_uop_names,\ + f"All abstract uops should override base uops, but {abstract_uop_name} is not." + + for uop in base.uops.values(): + override: Uop | None = None + if uop.name in abstract.uops: + override = abstract.uops[uop.name] + validate_uop(override, uop) + if uop.properties.tier_one_only: + continue + if uop.is_super(): + continue + if not uop.is_viable(): + out.emit(f"/* {uop.name} is not a viable micro-op for tier 2 */\n\n") + continue + out.emit(f"case {uop.name}: {{\n") + if override: + declare_variables(override, out, skip_inputs=False) + else: + declare_variables(uop, out, skip_inputs=True) + stack = Stack() + write_uop(override, uop, out, stack, debug, skip_inputs=(override is None)) + out.start_line() + out.emit("break;\n") + out.emit("}") + out.emit("\n\n") + + +def generate_tier2_abstract_from_files( + filenames: list[str], outfilename: str, debug: bool=False +) -> None: + assert len(filenames) == 2, "Need a base file and an abstract cases file." + base = analyze_files([filenames[0]]) + abstract = analyze_files([filenames[1]]) + with open(outfilename, "w") as outfile: + generate_abstract_interpreter(filenames, abstract, base, outfile, debug) + + +arg_parser = argparse.ArgumentParser( + description="Generate the code for the tier 2 interpreter.", + formatter_class=argparse.ArgumentDefaultsHelpFormatter, +) + +arg_parser.add_argument( + "-o", "--output", type=str, help="Generated code", default=DEFAULT_OUTPUT +) + + +arg_parser.add_argument("input", nargs=1, help="Abstract interpreter definition file") + +arg_parser.add_argument( + "base", nargs=argparse.REMAINDER, help="The base instruction definition file(s)" +) + +arg_parser.add_argument("-d", "--debug", help="Insert debug calls", action="store_true") + +if __name__ == "__main__": + args = arg_parser.parse_args() + if len(args.base) == 0: + args.input.append(DEFAULT_INPUT) + args.input.append(DEFAULT_ABSTRACT_INPUT) + abstract = analyze_files(args.input) + base = analyze_files(args.base) + with open(args.output, "w") as outfile: + generate_abstract_interpreter(args.input, abstract, base, outfile, args.debug) diff --git a/config.sub b/config.sub index d74fb6deac942a..2c6a07ab3c34ea 100755 --- a/config.sub +++ b/config.sub @@ -1,14 +1,14 @@ #! /bin/sh # Configuration validation subroutine script. -# Copyright 1992-2021 Free Software Foundation, Inc. +# Copyright 1992-2024 Free Software Foundation, Inc. # shellcheck disable=SC2006,SC2268 # see below for rationale -timestamp='2021-08-14' +timestamp='2024-01-01' # This file is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 3 of the License, or +# the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, but @@ -76,13 +76,13 @@ Report bugs and patches to ." version="\ GNU config.sub ($timestamp) -Copyright 1992-2021 Free Software Foundation, Inc. +Copyright 1992-2024 Free Software Foundation, Inc. This is free software; see the source for copying conditions. There is NO warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE." help=" -Try \`$me --help' for more information." +Try '$me --help' for more information." # Parse command line while test $# -gt 0 ; do @@ -130,7 +130,7 @@ IFS=$saved_IFS # Separate into logical components for further validation case $1 in *-*-*-*-*) - echo Invalid configuration \`"$1"\': more than four components >&2 + echo "Invalid configuration '$1': more than four components" >&2 exit 1 ;; *-*-*-*) @@ -145,7 +145,8 @@ case $1 in nto-qnx* | linux-* | uclinux-uclibc* \ | uclinux-gnu* | kfreebsd*-gnu* | knetbsd*-gnu* | netbsd*-gnu* \ | netbsd*-eabi* | kopensolaris*-gnu* | cloudabi*-eabi* \ - | storm-chaos* | os2-emx* | rtmk-nova*) + | storm-chaos* | os2-emx* | rtmk-nova* | managarm-* \ + | windows-* ) basic_machine=$field1 basic_os=$maybe_os ;; @@ -943,7 +944,7 @@ $basic_machine EOF IFS=$saved_IFS ;; - # We use `pc' rather than `unknown' + # We use 'pc' rather than 'unknown' # because (1) that's what they normally are, and # (2) the word "unknown" tends to confuse beginning users. i*86 | x86_64) @@ -1020,6 +1021,11 @@ case $cpu-$vendor in ;; # Here we normalize CPU types with a missing or matching vendor + armh-unknown | armh-alt) + cpu=armv7l + vendor=alt + basic_os=${basic_os:-linux-gnueabihf} + ;; dpx20-unknown | dpx20-bull) cpu=rs6000 vendor=bull @@ -1070,7 +1076,7 @@ case $cpu-$vendor in pentium-* | p5-* | k5-* | k6-* | nexgen-* | viac3-*) cpu=i586 ;; - pentiumpro-* | p6-* | 6x86-* | athlon-* | athalon_*-*) + pentiumpro-* | p6-* | 6x86-* | athlon-* | athlon_*-*) cpu=i686 ;; pentiumii-* | pentium2-* | pentiumiii-* | pentium3-*) @@ -1121,7 +1127,7 @@ case $cpu-$vendor in xscale-* | xscalee[bl]-*) cpu=`echo "$cpu" | sed 's/^xscale/arm/'` ;; - arm64-*) + arm64-* | aarch64le-*) cpu=aarch64 ;; @@ -1175,7 +1181,7 @@ case $cpu-$vendor in case $cpu in 1750a | 580 \ | a29k \ - | aarch64 | aarch64_be \ + | aarch64 | aarch64_be | aarch64c | arm64ec \ | abacus \ | alpha | alphaev[4-8] | alphaev56 | alphaev6[78] \ | alpha64 | alpha64ev[4-8] | alpha64ev56 | alpha64ev6[78] \ @@ -1194,50 +1200,29 @@ case $cpu-$vendor in | d10v | d30v | dlx | dsp16xx \ | e2k | elxsi | epiphany \ | f30[01] | f700 | fido | fr30 | frv | ft32 | fx80 \ + | javascript \ | h8300 | h8500 \ | hppa | hppa1.[01] | hppa2.0 | hppa2.0[nw] | hppa64 \ | hexagon \ | i370 | i*86 | i860 | i960 | ia16 | ia64 \ | ip2k | iq2000 \ | k1om \ + | kvx \ | le32 | le64 \ | lm32 \ - | loongarch32 | loongarch64 | loongarchx32 \ + | loongarch32 | loongarch64 \ | m32c | m32r | m32rle \ | m5200 | m68000 | m680[012346]0 | m68360 | m683?2 | m68k \ | m6811 | m68hc11 | m6812 | m68hc12 | m68hcs12x \ | m88110 | m88k | maxq | mb | mcore | mep | metag \ | microblaze | microblazeel \ - | mips | mipsbe | mipseb | mipsel | mipsle \ - | mips16 \ - | mips64 | mips64eb | mips64el \ - | mips64octeon | mips64octeonel \ - | mips64orion | mips64orionel \ - | mips64r5900 | mips64r5900el \ - | mips64vr | mips64vrel \ - | mips64vr4100 | mips64vr4100el \ - | mips64vr4300 | mips64vr4300el \ - | mips64vr5000 | mips64vr5000el \ - | mips64vr5900 | mips64vr5900el \ - | mipsisa32 | mipsisa32el \ - | mipsisa32r2 | mipsisa32r2el \ - | mipsisa32r3 | mipsisa32r3el \ - | mipsisa32r5 | mipsisa32r5el \ - | mipsisa32r6 | mipsisa32r6el \ - | mipsisa64 | mipsisa64el \ - | mipsisa64r2 | mipsisa64r2el \ - | mipsisa64r3 | mipsisa64r3el \ - | mipsisa64r5 | mipsisa64r5el \ - | mipsisa64r6 | mipsisa64r6el \ - | mipsisa64sb1 | mipsisa64sb1el \ - | mipsisa64sr71k | mipsisa64sr71kel \ - | mipsr5900 | mipsr5900el \ - | mipstx39 | mipstx39el \ + | mips* \ | mmix \ | mn10200 | mn10300 \ | moxie \ | mt \ | msp430 \ + | nanomips* \ | nds32 | nds32le | nds32be \ | nfp \ | nios | nios2 | nios2eb | nios2el \ @@ -1269,6 +1254,7 @@ case $cpu-$vendor in | ubicom32 \ | v70 | v850 | v850e | v850e1 | v850es | v850e2 | v850e2v3 \ | vax \ + | vc4 \ | visium \ | w65 \ | wasm32 | wasm64 \ @@ -1280,7 +1266,7 @@ case $cpu-$vendor in ;; *) - echo Invalid configuration \`"$1"\': machine \`"$cpu-$vendor"\' not recognized 1>&2 + echo "Invalid configuration '$1': machine '$cpu-$vendor' not recognized" 1>&2 exit 1 ;; esac @@ -1301,11 +1287,12 @@ esac # Decode manufacturer-specific aliases for certain operating systems. -if test x$basic_os != x +if test x"$basic_os" != x then -# First recognize some ad-hoc caes, or perhaps split kernel-os, or else just +# First recognize some ad-hoc cases, or perhaps split kernel-os, or else just # set os. +obj= case $basic_os in gnu/linux*) kernel=linux @@ -1336,6 +1323,10 @@ EOF kernel=linux os=`echo "$basic_os" | sed -e 's|linux|gnu|'` ;; + managarm*) + kernel=managarm + os=`echo "$basic_os" | sed -e 's|managarm|mlibc|'` + ;; *) kernel= os=$basic_os @@ -1501,10 +1492,16 @@ case $os in os=eabi ;; *) - os=elf + os= + obj=elf ;; esac ;; + aout* | coff* | elf* | pe*) + # These are machine code file formats, not OSes + obj=$os + os= + ;; *) # No normalization, but not necessarily accepted, that comes below. ;; @@ -1523,12 +1520,15 @@ else # system, and we'll never get to this point. kernel= +obj= case $cpu-$vendor in score-*) - os=elf + os= + obj=elf ;; spu-*) - os=elf + os= + obj=elf ;; *-acorn) os=riscix1.2 @@ -1538,28 +1538,35 @@ case $cpu-$vendor in os=gnu ;; arm*-semi) - os=aout + os= + obj=aout ;; c4x-* | tic4x-*) - os=coff + os= + obj=coff ;; c8051-*) - os=elf + os= + obj=elf ;; clipper-intergraph) os=clix ;; hexagon-*) - os=elf + os= + obj=elf ;; tic54x-*) - os=coff + os= + obj=coff ;; tic55x-*) - os=coff + os= + obj=coff ;; tic6x-*) - os=coff + os= + obj=coff ;; # This must come before the *-dec entry. pdp10-*) @@ -1581,19 +1588,24 @@ case $cpu-$vendor in os=sunos3 ;; m68*-cisco) - os=aout + os= + obj=aout ;; mep-*) - os=elf + os= + obj=elf ;; mips*-cisco) - os=elf + os= + obj=elf ;; - mips*-*) - os=elf + mips*-*|nanomips*-*) + os= + obj=elf ;; or32-*) - os=coff + os= + obj=coff ;; *-tti) # must be before sparc entry or we get the wrong os. os=sysv3 @@ -1602,7 +1614,8 @@ case $cpu-$vendor in os=sunos4.1.1 ;; pru-*) - os=elf + os= + obj=elf ;; *-be) os=beos @@ -1683,10 +1696,12 @@ case $cpu-$vendor in os=uxpv ;; *-rom68k) - os=coff + os= + obj=coff ;; *-*bug) - os=coff + os= + obj=coff ;; *-apple) os=macos @@ -1704,10 +1719,11 @@ esac fi -# Now, validate our (potentially fixed-up) OS. +# Now, validate our (potentially fixed-up) individual pieces (OS, OBJ). + case $os in # Sometimes we do "kernel-libc", so those need to count as OSes. - musl* | newlib* | relibc* | uclibc*) + llvm* | musl* | newlib* | relibc* | uclibc*) ;; # Likewise for "kernel-abi" eabi* | gnueabi*) @@ -1715,6 +1731,9 @@ case $os in # VxWorks passes extra cpu info in the 4th filed. simlinux | simwindows | spe) ;; + # See `case $cpu-$os` validation below + ghcjs) + ;; # Now accept the basic system types. # The portable systems comes first. # Each alternative MUST end in a * to match a version number. @@ -1723,7 +1742,7 @@ case $os in | hpux* | unos* | osf* | luna* | dgux* | auroraux* | solaris* \ | sym* | plan9* | psp* | sim* | xray* | os68k* | v88r* \ | hiux* | abug | nacl* | netware* | windows* \ - | os9* | macos* | osx* | ios* \ + | os9* | macos* | osx* | ios* | tvos* | watchos* \ | mpw* | magic* | mmixware* | mon960* | lnews* \ | amigaos* | amigados* | msdos* | newsos* | unicos* | aof* \ | aos* | aros* | cloudabi* | sortix* | twizzler* \ @@ -1732,11 +1751,11 @@ case $os in | mirbsd* | netbsd* | dicos* | openedition* | ose* \ | bitrig* | openbsd* | secbsd* | solidbsd* | libertybsd* | os108* \ | ekkobsd* | freebsd* | riscix* | lynxos* | os400* \ - | bosx* | nextstep* | cxux* | aout* | elf* | oabi* \ - | ptx* | coff* | ecoff* | winnt* | domain* | vsta* \ + | bosx* | nextstep* | cxux* | oabi* \ + | ptx* | ecoff* | winnt* | domain* | vsta* \ | udi* | lites* | ieee* | go32* | aux* | hcos* \ | chorusrdb* | cegcc* | glidix* | serenity* \ - | cygwin* | msys* | pe* | moss* | proelf* | rtems* \ + | cygwin* | msys* | moss* | proelf* | rtems* \ | midipix* | mingw32* | mingw64* | mint* \ | uxpv* | beos* | mpeix* | udk* | moxiebox* \ | interix* | uwin* | mks* | rhapsody* | darwin* \ @@ -1748,49 +1767,117 @@ case $os in | skyos* | haiku* | rdos* | toppers* | drops* | es* \ | onefs* | tirtos* | phoenix* | fuchsia* | redox* | bme* \ | midnightbsd* | amdhsa* | unleashed* | emscripten* | wasi* \ - | nsk* | powerunix* | genode* | zvmoe* | qnx* | emx* | zephyr*) + | nsk* | powerunix* | genode* | zvmoe* | qnx* | emx* | zephyr* \ + | fiwix* | mlibc* | cos* | mbr* | ironclad* ) ;; # This one is extra strict with allowed versions sco3.2v2 | sco3.2v[4-9]* | sco5v6*) # Don't forget version if it is 3.2v4 or newer. ;; + # This refers to builds using the UEFI calling convention + # (which depends on the architecture) and PE file format. + # Note that this is both a different calling convention and + # different file format than that of GNU-EFI + # (x86_64-w64-mingw32). + uefi) + ;; none) ;; + kernel* | msvc* ) + # Restricted further below + ;; + '') + if test x"$obj" = x + then + echo "Invalid configuration '$1': Blank OS only allowed with explicit machine code file format" 1>&2 + fi + ;; *) - echo Invalid configuration \`"$1"\': OS \`"$os"\' not recognized 1>&2 + echo "Invalid configuration '$1': OS '$os' not recognized" 1>&2 + exit 1 + ;; +esac + +case $obj in + aout* | coff* | elf* | pe*) + ;; + '') + # empty is fine + ;; + *) + echo "Invalid configuration '$1': Machine code format '$obj' not recognized" 1>&2 + exit 1 + ;; +esac + +# Here we handle the constraint that a (synthetic) cpu and os are +# valid only in combination with each other and nowhere else. +case $cpu-$os in + # The "javascript-unknown-ghcjs" triple is used by GHC; we + # accept it here in order to tolerate that, but reject any + # variations. + javascript-ghcjs) + ;; + javascript-* | *-ghcjs) + echo "Invalid configuration '$1': cpu '$cpu' is not valid with os '$os$obj'" 1>&2 exit 1 ;; esac # As a final step for OS-related things, validate the OS-kernel combination # (given a valid OS), if there is a kernel. -case $kernel-$os in - linux-gnu* | linux-dietlibc* | linux-android* | linux-newlib* \ - | linux-musl* | linux-relibc* | linux-uclibc* ) +case $kernel-$os-$obj in + linux-gnu*- | linux-android*- | linux-dietlibc*- | linux-llvm*- \ + | linux-mlibc*- | linux-musl*- | linux-newlib*- \ + | linux-relibc*- | linux-uclibc*- ) + ;; + uclinux-uclibc*- ) + ;; + managarm-mlibc*- | managarm-kernel*- ) ;; - uclinux-uclibc* ) + windows*-msvc*-) ;; - -dietlibc* | -newlib* | -musl* | -relibc* | -uclibc* ) + -dietlibc*- | -llvm*- | -mlibc*- | -musl*- | -newlib*- | -relibc*- \ + | -uclibc*- ) # These are just libc implementations, not actual OSes, and thus # require a kernel. - echo "Invalid configuration \`$1': libc \`$os' needs explicit kernel." 1>&2 + echo "Invalid configuration '$1': libc '$os' needs explicit kernel." 1>&2 exit 1 ;; - kfreebsd*-gnu* | kopensolaris*-gnu*) + -kernel*- ) + echo "Invalid configuration '$1': '$os' needs explicit kernel." 1>&2 + exit 1 ;; - vxworks-simlinux | vxworks-simwindows | vxworks-spe) + *-kernel*- ) + echo "Invalid configuration '$1': '$kernel' does not support '$os'." 1>&2 + exit 1 ;; - nto-qnx*) + *-msvc*- ) + echo "Invalid configuration '$1': '$os' needs 'windows'." 1>&2 + exit 1 ;; - os2-emx) + kfreebsd*-gnu*- | kopensolaris*-gnu*-) + ;; + vxworks-simlinux- | vxworks-simwindows- | vxworks-spe-) + ;; + nto-qnx*-) + ;; + os2-emx-) ;; - *-eabi* | *-gnueabi*) + *-eabi*- | *-gnueabi*-) ;; - -*) + none--*) + # None (no kernel, i.e. freestanding / bare metal), + # can be paired with an machine code file format + ;; + -*-) # Blank kernel with real OS is always fine. ;; - *-*) - echo "Invalid configuration \`$1': Kernel \`$kernel' not known to work with OS \`$os'." 1>&2 + --*) + # Blank kernel and OS with real machine code file format is always fine. + ;; + *-*-*) + echo "Invalid configuration '$1': Kernel '$kernel' not known to work with OS '$os'." 1>&2 exit 1 ;; esac @@ -1873,7 +1960,7 @@ case $vendor in ;; esac -echo "$cpu-$vendor-${kernel:+$kernel-}$os" +echo "$cpu-$vendor${kernel:+-$kernel}${os:+-$os}${obj:+-$obj}" exit # Local variables: diff --git a/configure b/configure index 705a778cafced3..ba2d49df7c65fe 100755 --- a/configure +++ b/configure @@ -972,7 +972,7 @@ HAS_XCRUN EXPORT_MACOSX_DEPLOYMENT_TARGET CONFIGURE_MACOSX_DEPLOYMENT_TARGET _PYTHON_HOST_PLATFORM -MACHDEP +INSTALLTARGETS FRAMEWORKINSTALLAPPSPREFIX FRAMEWORKUNIXTOOLSPREFIX FRAMEWORKPYTHONW @@ -980,6 +980,8 @@ FRAMEWORKALTINSTALLLAST FRAMEWORKALTINSTALLFIRST FRAMEWORKINSTALLLAST FRAMEWORKINSTALLFIRST +RESSRCDIR +PYTHONFRAMEWORKINSTALLNAMEPREFIX PYTHONFRAMEWORKINSTALLDIR PYTHONFRAMEWORKPREFIX PYTHONFRAMEWORKDIR @@ -989,6 +991,7 @@ LIPO_INTEL64_FLAGS LIPO_32BIT_FLAGS ARCH_RUN_32BIT UNIVERSALSDK +MACHDEP PKG_CONFIG_LIBDIR PKG_CONFIG_PATH PKG_CONFIG @@ -4004,6 +4007,77 @@ if test "$with_pkg_config" = yes -a -z "$PKG_CONFIG"; then as_fn_error $? "pkg-config is required" "$LINENO" 5] fi +# Set name for machine-dependent library files + +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking MACHDEP" >&5 +printf %s "checking MACHDEP... " >&6; } +if test -z "$MACHDEP" +then + # avoid using uname for cross builds + if test "$cross_compiling" = yes; then + # ac_sys_system and ac_sys_release are used for setting + # a lot of different things including 'define_xopen_source' + # in the case statement below. + case "$host" in + *-*-linux-android*) + ac_sys_system=Linux-android + ;; + *-*-linux*) + ac_sys_system=Linux + ;; + *-*-cygwin*) + ac_sys_system=Cygwin + ;; + *-*-vxworks*) + ac_sys_system=VxWorks + ;; + *-*-emscripten) + ac_sys_system=Emscripten + ;; + *-*-wasi) + ac_sys_system=WASI + ;; + *) + # for now, limit cross builds to known configurations + MACHDEP="unknown" + as_fn_error $? "cross build not supported for $host" "$LINENO" 5 + esac + ac_sys_release= + else + ac_sys_system=`uname -s` + if test "$ac_sys_system" = "AIX" \ + -o "$ac_sys_system" = "UnixWare" -o "$ac_sys_system" = "OpenUNIX"; then + ac_sys_release=`uname -v` + else + ac_sys_release=`uname -r` + fi + fi + ac_md_system=`echo $ac_sys_system | + tr -d '/ ' | tr '[A-Z]' '[a-z]'` + ac_md_release=`echo $ac_sys_release | + tr -d '/ ' | sed 's/^[A-Z]\.//' | sed 's/\..*//'` + MACHDEP="$ac_md_system$ac_md_release" + + case $MACHDEP in + aix*) MACHDEP="aix";; + linux*) MACHDEP="linux";; + cygwin*) MACHDEP="cygwin";; + darwin*) MACHDEP="darwin";; + '') MACHDEP="unknown";; + esac + + if test "$ac_sys_system" = "SunOS"; then + # For Solaris, there isn't an OS version specific macro defined + # in most compilers, so we define one here. + SUNOS_VERSION=`echo $ac_sys_release | sed -e 's!\.\(0-9\)$!.0\1!g' | tr -d '.'` + +printf "%s\n" "#define Py_SUNOS_VERSION $SUNOS_VERSION" >>confdefs.h + + fi +fi +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: \"$MACHDEP\"" >&5 +printf "%s\n" "\"$MACHDEP\"" >&6; } + { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for --enable-universalsdk" >&5 printf %s "checking for --enable-universalsdk... " >&6; } # Check whether --enable-universalsdk was given. @@ -4127,11 +4201,15 @@ then : PYTHONFRAMEWORKDIR=no-framework PYTHONFRAMEWORKPREFIX= PYTHONFRAMEWORKINSTALLDIR= + PYTHONFRAMEWORKINSTALLNAMEPREFIX= + RESSRCDIR= FRAMEWORKINSTALLFIRST= FRAMEWORKINSTALLLAST= FRAMEWORKALTINSTALLFIRST= FRAMEWORKALTINSTALLLAST= FRAMEWORKPYTHONW= + INSTALLTARGETS="commoninstall bininstall maninstall" + if test "x${prefix}" = "xNONE"; then FRAMEWORKUNIXTOOLSPREFIX="${ac_default_prefix}" else @@ -4144,65 +4222,76 @@ then : PYTHONFRAMEWORKINSTALLDIR=$PYTHONFRAMEWORKPREFIX/$PYTHONFRAMEWORKDIR FRAMEWORKINSTALLFIRST="frameworkinstallstructure" FRAMEWORKALTINSTALLFIRST="frameworkinstallstructure " - FRAMEWORKINSTALLLAST="frameworkinstallmaclib frameworkinstallapps frameworkinstallunixtools" - FRAMEWORKALTINSTALLLAST="frameworkinstallmaclib frameworkinstallapps frameworkaltinstallunixtools" - FRAMEWORKPYTHONW="frameworkpythonw" - FRAMEWORKINSTALLAPPSPREFIX="/Applications" - if test "x${prefix}" = "xNONE" ; then - FRAMEWORKUNIXTOOLSPREFIX="${ac_default_prefix}" + case $ac_sys_system in #( + Darwin) : + FRAMEWORKINSTALLLAST="frameworkinstallmaclib frameworkinstallapps frameworkinstallunixtools" + FRAMEWORKALTINSTALLLAST="frameworkinstallmaclib frameworkinstallapps frameworkaltinstallunixtools" + FRAMEWORKPYTHONW="frameworkpythonw" + FRAMEWORKINSTALLAPPSPREFIX="/Applications" + INSTALLTARGETS="commoninstall bininstall maninstall" - else - FRAMEWORKUNIXTOOLSPREFIX="${prefix}" - fi + if test "x${prefix}" = "xNONE" ; then + FRAMEWORKUNIXTOOLSPREFIX="${ac_default_prefix}" - case "${enableval}" in - /System*) - FRAMEWORKINSTALLAPPSPREFIX="/Applications" - if test "${prefix}" = "NONE" ; then - # See below - FRAMEWORKUNIXTOOLSPREFIX="/usr" - fi - ;; + else + FRAMEWORKUNIXTOOLSPREFIX="${prefix}" + fi - /Library*) - FRAMEWORKINSTALLAPPSPREFIX="/Applications" - ;; + case "${enableval}" in + /System*) + FRAMEWORKINSTALLAPPSPREFIX="/Applications" + if test "${prefix}" = "NONE" ; then + # See below + FRAMEWORKUNIXTOOLSPREFIX="/usr" + fi + ;; + + /Library*) + FRAMEWORKINSTALLAPPSPREFIX="/Applications" + ;; + + */Library/Frameworks) + MDIR="`dirname "${enableval}"`" + MDIR="`dirname "${MDIR}"`" + FRAMEWORKINSTALLAPPSPREFIX="${MDIR}/Applications" + + if test "${prefix}" = "NONE"; then + # User hasn't specified the + # --prefix option, but wants to install + # the framework in a non-default location, + # ensure that the compatibility links get + # installed relative to that prefix as well + # instead of in /usr/local. + FRAMEWORKUNIXTOOLSPREFIX="${MDIR}" + fi + ;; - */Library/Frameworks) - MDIR="`dirname "${enableval}"`" - MDIR="`dirname "${MDIR}"`" - FRAMEWORKINSTALLAPPSPREFIX="${MDIR}/Applications" - - if test "${prefix}" = "NONE"; then - # User hasn't specified the - # --prefix option, but wants to install - # the framework in a non-default location, - # ensure that the compatibility links get - # installed relative to that prefix as well - # instead of in /usr/local. - FRAMEWORKUNIXTOOLSPREFIX="${MDIR}" - fi - ;; - - *) - FRAMEWORKINSTALLAPPSPREFIX="/Applications" - ;; - esac + *) + FRAMEWORKINSTALLAPPSPREFIX="/Applications" + ;; + esac - prefix=$PYTHONFRAMEWORKINSTALLDIR/Versions/$VERSION + prefix=$PYTHONFRAMEWORKINSTALLDIR/Versions/$VERSION + PYTHONFRAMEWORKINSTALLNAMEPREFIX=${prefix} + RESSRCDIR=Mac/Resources/framework - # Add files for Mac specific code to the list of output - # files: - ac_config_files="$ac_config_files Mac/Makefile" + # Add files for Mac specific code to the list of output + # files: + ac_config_files="$ac_config_files Mac/Makefile" - ac_config_files="$ac_config_files Mac/PythonLauncher/Makefile" + ac_config_files="$ac_config_files Mac/PythonLauncher/Makefile" - ac_config_files="$ac_config_files Mac/Resources/framework/Info.plist" + ac_config_files="$ac_config_files Mac/Resources/framework/Info.plist" - ac_config_files="$ac_config_files Mac/Resources/app/Info.plist" + ac_config_files="$ac_config_files Mac/Resources/app/Info.plist" - esac + ;; + *) + as_fn_error $? "Unknown platform for framework build" "$LINENO" 5 + ;; + esac + esac else $as_nop @@ -4210,11 +4299,14 @@ else $as_nop PYTHONFRAMEWORKDIR=no-framework PYTHONFRAMEWORKPREFIX= PYTHONFRAMEWORKINSTALLDIR= + PYTHONFRAMEWORKINSTALLNAMEPREFIX= + RESSRCDIR= FRAMEWORKINSTALLFIRST= FRAMEWORKINSTALLLAST= FRAMEWORKALTINSTALLFIRST= FRAMEWORKALTINSTALLLAST= FRAMEWORKPYTHONW= + INSTALLTARGETS="commoninstall bininstall maninstall" if test "x${prefix}" = "xNONE" ; then FRAMEWORKUNIXTOOLSPREFIX="${ac_default_prefix}" else @@ -4239,79 +4331,11 @@ fi -printf "%s\n" "#define _PYTHONFRAMEWORK \"${PYTHONFRAMEWORK}\"" >>confdefs.h -# Set name for machine-dependent library files -{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking MACHDEP" >&5 -printf %s "checking MACHDEP... " >&6; } -if test -z "$MACHDEP" -then - # avoid using uname for cross builds - if test "$cross_compiling" = yes; then - # ac_sys_system and ac_sys_release are used for setting - # a lot of different things including 'define_xopen_source' - # in the case statement below. - case "$host" in - *-*-linux-android*) - ac_sys_system=Linux-android - ;; - *-*-linux*) - ac_sys_system=Linux - ;; - *-*-cygwin*) - ac_sys_system=Cygwin - ;; - *-*-vxworks*) - ac_sys_system=VxWorks - ;; - *-*-emscripten) - ac_sys_system=Emscripten - ;; - *-*-wasi) - ac_sys_system=WASI - ;; - *) - # for now, limit cross builds to known configurations - MACHDEP="unknown" - as_fn_error $? "cross build not supported for $host" "$LINENO" 5 - esac - ac_sys_release= - else - ac_sys_system=`uname -s` - if test "$ac_sys_system" = "AIX" \ - -o "$ac_sys_system" = "UnixWare" -o "$ac_sys_system" = "OpenUNIX"; then - ac_sys_release=`uname -v` - else - ac_sys_release=`uname -r` - fi - fi - ac_md_system=`echo $ac_sys_system | - tr -d '/ ' | tr '[A-Z]' '[a-z]'` - ac_md_release=`echo $ac_sys_release | - tr -d '/ ' | sed 's/^[A-Z]\.//' | sed 's/\..*//'` - MACHDEP="$ac_md_system$ac_md_release" - - case $MACHDEP in - aix*) MACHDEP="aix";; - linux*) MACHDEP="linux";; - cygwin*) MACHDEP="cygwin";; - darwin*) MACHDEP="darwin";; - '') MACHDEP="unknown";; - esac - - if test "$ac_sys_system" = "SunOS"; then - # For Solaris, there isn't an OS version specific macro defined - # in most compilers, so we define one here. - SUNOS_VERSION=`echo $ac_sys_release | sed -e 's!\.\(0-9\)$!.0\1!g' | tr -d '.'` +printf "%s\n" "#define _PYTHONFRAMEWORK \"${PYTHONFRAMEWORK}\"" >>confdefs.h -printf "%s\n" "#define Py_SUNOS_VERSION $SUNOS_VERSION" >>confdefs.h - - fi -fi -{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: \"$MACHDEP\"" >&5 -printf "%s\n" "\"$MACHDEP\"" >&6; } if test "$cross_compiling" = yes; then @@ -4319,27 +4343,27 @@ if test "$cross_compiling" = yes; then *-*-linux*) case "$host_cpu" in arm*) - _host_cpu=arm + _host_ident=arm ;; *) - _host_cpu=$host_cpu + _host_ident=$host_cpu esac ;; *-*-cygwin*) - _host_cpu= + _host_ident= ;; *-*-vxworks*) - _host_cpu=$host_cpu + _host_ident=$host_cpu ;; wasm32-*-* | wasm64-*-*) - _host_cpu=$host_cpu + _host_ident=$host_cpu ;; *) # for now, limit cross builds to known configurations MACHDEP="unknown" as_fn_error $? "cross build not supported for $host" "$LINENO" 5 esac - _PYTHON_HOST_PLATFORM="$MACHDEP${_host_cpu:+-$_host_cpu}" + _PYTHON_HOST_PLATFORM="$MACHDEP${_host_ident:+-$_host_ident}" fi # Some systems cannot stand _XOPEN_SOURCE being defined at all; they @@ -6769,8 +6793,6 @@ case $ac_sys_system in #( ;; esac -{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $MULTIARCH" >&5 -printf "%s\n" "$MULTIARCH" >&6; } if test x$PLATFORM_TRIPLET != x && test x$MULTIARCH != x; then if test x$PLATFORM_TRIPLET != x$MULTIARCH; then @@ -6780,6 +6802,14 @@ elif test x$PLATFORM_TRIPLET != x && test x$MULTIARCH = x; then MULTIARCH=$PLATFORM_TRIPLET fi +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $MULTIARCH" >&5 +printf "%s\n" "$MULTIARCH" >&6; } + +case $ac_sys_system in #( + *) : + SOABI_PLATFORM=$PLATFORM_TRIPLET + ;; +esac if test x$MULTIARCH != x; then MULTIARCH_CPPFLAGS="-DMULTIARCH=\\\"$MULTIARCH\\\"" @@ -7271,7 +7301,7 @@ fi { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking LDLIBRARY" >&5 printf %s "checking LDLIBRARY... " >&6; } -# MacOSX framework builds need more magic. LDLIBRARY is the dynamic +# Apple framework builds need more magic. LDLIBRARY is the dynamic # library that we build, but we do not want to link against it (we # will find it with a -framework option). For this reason there is an # extra variable BLDLIBRARY against which Python and the extension @@ -7279,9 +7309,14 @@ printf %s "checking LDLIBRARY... " >&6; } # LDLIBRARY, but empty for MacOSX framework builds. if test "$enable_framework" then - LDLIBRARY='$(PYTHONFRAMEWORKDIR)/Versions/$(VERSION)/$(PYTHONFRAMEWORK)' - RUNSHARED=DYLD_FRAMEWORK_PATH=`pwd`${DYLD_FRAMEWORK_PATH:+:${DYLD_FRAMEWORK_PATH}} + case $ac_sys_system in + Darwin) + LDLIBRARY='$(PYTHONFRAMEWORKDIR)/Versions/$(VERSION)/$(PYTHONFRAMEWORK)';; + *) + as_fn_error $? "Unknown platform for framework build" "$LINENO" 5;; + esac BLDLIBRARY='' + RUNSHARED=DYLD_FRAMEWORK_PATH=`pwd`${DYLD_FRAMEWORK_PATH:+:${DYLD_FRAMEWORK_PATH}} else BLDLIBRARY='$(LDLIBRARY)' fi @@ -7294,64 +7329,64 @@ printf "%s\n" "#define Py_ENABLE_SHARED 1" >>confdefs.h case $ac_sys_system in CYGWIN*) - LDLIBRARY='libpython$(LDVERSION).dll.a' - DLLLIBRARY='libpython$(LDVERSION).dll' - ;; + LDLIBRARY='libpython$(LDVERSION).dll.a' + DLLLIBRARY='libpython$(LDVERSION).dll' + ;; SunOS*) - LDLIBRARY='libpython$(LDVERSION).so' - BLDLIBRARY='-Wl,-R,$(LIBDIR) -L. -lpython$(LDVERSION)' - RUNSHARED=LD_LIBRARY_PATH=`pwd`${LD_LIBRARY_PATH:+:${LD_LIBRARY_PATH}} - INSTSONAME="$LDLIBRARY".$SOVERSION - if test "$with_pydebug" != yes - then - PY3LIBRARY=libpython3.so - fi - ;; + LDLIBRARY='libpython$(LDVERSION).so' + BLDLIBRARY='-Wl,-R,$(LIBDIR) -L. -lpython$(LDVERSION)' + RUNSHARED=LD_LIBRARY_PATH=`pwd`${LD_LIBRARY_PATH:+:${LD_LIBRARY_PATH}} + INSTSONAME="$LDLIBRARY".$SOVERSION + if test "$with_pydebug" != yes + then + PY3LIBRARY=libpython3.so + fi + ;; Linux*|GNU*|NetBSD*|FreeBSD*|DragonFly*|OpenBSD*|VxWorks*) - LDLIBRARY='libpython$(LDVERSION).so' - BLDLIBRARY='-L. -lpython$(LDVERSION)' - RUNSHARED=LD_LIBRARY_PATH=`pwd`${LD_LIBRARY_PATH:+:${LD_LIBRARY_PATH}} - INSTSONAME="$LDLIBRARY".$SOVERSION - if test "$with_pydebug" != yes - then - PY3LIBRARY=libpython3.so - fi - ;; + LDLIBRARY='libpython$(LDVERSION).so' + BLDLIBRARY='-L. -lpython$(LDVERSION)' + RUNSHARED=LD_LIBRARY_PATH=`pwd`${LD_LIBRARY_PATH:+:${LD_LIBRARY_PATH}} + INSTSONAME="$LDLIBRARY".$SOVERSION + if test "$with_pydebug" != yes + then + PY3LIBRARY=libpython3.so + fi + ;; hp*|HP*) - case `uname -m` in - ia64) - LDLIBRARY='libpython$(LDVERSION).so' - ;; - *) - LDLIBRARY='libpython$(LDVERSION).sl' - ;; - esac - BLDLIBRARY='-Wl,+b,$(LIBDIR) -L. -lpython$(LDVERSION)' - RUNSHARED=SHLIB_PATH=`pwd`${SHLIB_PATH:+:${SHLIB_PATH}} - ;; + case `uname -m` in + ia64) + LDLIBRARY='libpython$(LDVERSION).so' + ;; + *) + LDLIBRARY='libpython$(LDVERSION).sl' + ;; + esac + BLDLIBRARY='-Wl,+b,$(LIBDIR) -L. -lpython$(LDVERSION)' + RUNSHARED=SHLIB_PATH=`pwd`${SHLIB_PATH:+:${SHLIB_PATH}} + ;; Darwin*) - LDLIBRARY='libpython$(LDVERSION).dylib' - BLDLIBRARY='-L. -lpython$(LDVERSION)' - RUNSHARED=DYLD_LIBRARY_PATH=`pwd`${DYLD_LIBRARY_PATH:+:${DYLD_LIBRARY_PATH}} - ;; + LDLIBRARY='libpython$(LDVERSION).dylib' + BLDLIBRARY='-L. -lpython$(LDVERSION)' + RUNSHARED=DYLD_LIBRARY_PATH=`pwd`${DYLD_LIBRARY_PATH:+:${DYLD_LIBRARY_PATH}} + ;; AIX*) - LDLIBRARY='libpython$(LDVERSION).so' - RUNSHARED=LIBPATH=`pwd`${LIBPATH:+:${LIBPATH}} - ;; + LDLIBRARY='libpython$(LDVERSION).so' + RUNSHARED=LIBPATH=`pwd`${LIBPATH:+:${LIBPATH}} + ;; esac else # shared is disabled PY_ENABLE_SHARED=0 case $ac_sys_system in CYGWIN*) - BLDLIBRARY='$(LIBRARY)' - LDLIBRARY='libpython$(LDVERSION).dll.a' - ;; + BLDLIBRARY='$(LIBRARY)' + LDLIBRARY='libpython$(LDVERSION).dll.a' + ;; esac fi if test "$cross_compiling" = yes; then - RUNSHARED= + RUNSHARED= fi @@ -23898,7 +23933,7 @@ printf %s "checking ABIFLAGS... " >&6; } printf "%s\n" "$ABIFLAGS" >&6; } { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking SOABI" >&5 printf %s "checking SOABI... " >&6; } -SOABI='cpython-'`echo $VERSION | tr -d .`${ABIFLAGS}${PLATFORM_TRIPLET:+-$PLATFORM_TRIPLET} +SOABI='cpython-'`echo $VERSION | tr -d .`${ABIFLAGS}${SOABI_PLATFORM:+-$SOABI_PLATFORM} { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $SOABI" >&5 printf "%s\n" "$SOABI" >&6; } @@ -23907,7 +23942,7 @@ printf "%s\n" "$SOABI" >&6; } if test "$Py_DEBUG" = 'true'; then # Similar to SOABI but remove "d" flag from ABIFLAGS - ALT_SOABI='cpython-'`echo $VERSION | tr -d .``echo $ABIFLAGS | tr -d d`${PLATFORM_TRIPLET:+-$PLATFORM_TRIPLET} + ALT_SOABI='cpython-'`echo $VERSION | tr -d .``echo $ABIFLAGS | tr -d d`${SOABI_PLATFORM:+-$SOABI_PLATFORM} printf "%s\n" "#define ALT_SOABI \"${ALT_SOABI}\"" >>confdefs.h diff --git a/configure.ac b/configure.ac index dee7ed552b370f..b39af7422c4c7c 100644 --- a/configure.ac +++ b/configure.ac @@ -307,6 +307,74 @@ if test "$with_pkg_config" = yes -a -z "$PKG_CONFIG"; then AC_MSG_ERROR([pkg-config is required])] fi +# Set name for machine-dependent library files +AC_ARG_VAR([MACHDEP], [name for machine-dependent library files]) +AC_MSG_CHECKING([MACHDEP]) +if test -z "$MACHDEP" +then + # avoid using uname for cross builds + if test "$cross_compiling" = yes; then + # ac_sys_system and ac_sys_release are used for setting + # a lot of different things including 'define_xopen_source' + # in the case statement below. + case "$host" in + *-*-linux-android*) + ac_sys_system=Linux-android + ;; + *-*-linux*) + ac_sys_system=Linux + ;; + *-*-cygwin*) + ac_sys_system=Cygwin + ;; + *-*-vxworks*) + ac_sys_system=VxWorks + ;; + *-*-emscripten) + ac_sys_system=Emscripten + ;; + *-*-wasi) + ac_sys_system=WASI + ;; + *) + # for now, limit cross builds to known configurations + MACHDEP="unknown" + AC_MSG_ERROR([cross build not supported for $host]) + esac + ac_sys_release= + else + ac_sys_system=`uname -s` + if test "$ac_sys_system" = "AIX" \ + -o "$ac_sys_system" = "UnixWare" -o "$ac_sys_system" = "OpenUNIX"; then + ac_sys_release=`uname -v` + else + ac_sys_release=`uname -r` + fi + fi + ac_md_system=`echo $ac_sys_system | + tr -d '[/ ]' | tr '[[A-Z]]' '[[a-z]]'` + ac_md_release=`echo $ac_sys_release | + tr -d '[/ ]' | sed 's/^[[A-Z]]\.//' | sed 's/\..*//'` + MACHDEP="$ac_md_system$ac_md_release" + + case $MACHDEP in + aix*) MACHDEP="aix";; + linux*) MACHDEP="linux";; + cygwin*) MACHDEP="cygwin";; + darwin*) MACHDEP="darwin";; + '') MACHDEP="unknown";; + esac + + if test "$ac_sys_system" = "SunOS"; then + # For Solaris, there isn't an OS version specific macro defined + # in most compilers, so we define one here. + SUNOS_VERSION=`echo $ac_sys_release | sed -e 's!\.\([0-9]\)$!.0\1!g' | tr -d '.'` + AC_DEFINE_UNQUOTED([Py_SUNOS_VERSION], [$SUNOS_VERSION], + [The version of SunOS/Solaris as reported by `uname -r' without the dot.]) + fi +fi +AC_MSG_RESULT(["$MACHDEP"]) + AC_MSG_CHECKING([for --enable-universalsdk]) AC_ARG_ENABLE([universalsdk], AS_HELP_STRING([--enable-universalsdk@<:@=SDKDIR@:>@], @@ -424,11 +492,15 @@ AC_ARG_ENABLE([framework], PYTHONFRAMEWORKDIR=no-framework PYTHONFRAMEWORKPREFIX= PYTHONFRAMEWORKINSTALLDIR= + PYTHONFRAMEWORKINSTALLNAMEPREFIX= + RESSRCDIR= FRAMEWORKINSTALLFIRST= FRAMEWORKINSTALLLAST= FRAMEWORKALTINSTALLFIRST= FRAMEWORKALTINSTALLLAST= FRAMEWORKPYTHONW= + INSTALLTARGETS="commoninstall bininstall maninstall" + if test "x${prefix}" = "xNONE"; then FRAMEWORKUNIXTOOLSPREFIX="${ac_default_prefix}" else @@ -441,71 +513,85 @@ AC_ARG_ENABLE([framework], PYTHONFRAMEWORKINSTALLDIR=$PYTHONFRAMEWORKPREFIX/$PYTHONFRAMEWORKDIR FRAMEWORKINSTALLFIRST="frameworkinstallstructure" FRAMEWORKALTINSTALLFIRST="frameworkinstallstructure " - FRAMEWORKINSTALLLAST="frameworkinstallmaclib frameworkinstallapps frameworkinstallunixtools" - FRAMEWORKALTINSTALLLAST="frameworkinstallmaclib frameworkinstallapps frameworkaltinstallunixtools" - FRAMEWORKPYTHONW="frameworkpythonw" - FRAMEWORKINSTALLAPPSPREFIX="/Applications" - if test "x${prefix}" = "xNONE" ; then - FRAMEWORKUNIXTOOLSPREFIX="${ac_default_prefix}" + case $ac_sys_system in #( + Darwin) : + FRAMEWORKINSTALLLAST="frameworkinstallmaclib frameworkinstallapps frameworkinstallunixtools" + FRAMEWORKALTINSTALLLAST="frameworkinstallmaclib frameworkinstallapps frameworkaltinstallunixtools" + FRAMEWORKPYTHONW="frameworkpythonw" + FRAMEWORKINSTALLAPPSPREFIX="/Applications" + INSTALLTARGETS="commoninstall bininstall maninstall" - else - FRAMEWORKUNIXTOOLSPREFIX="${prefix}" - fi + if test "x${prefix}" = "xNONE" ; then + FRAMEWORKUNIXTOOLSPREFIX="${ac_default_prefix}" - case "${enableval}" in - /System*) - FRAMEWORKINSTALLAPPSPREFIX="/Applications" - if test "${prefix}" = "NONE" ; then - # See below - FRAMEWORKUNIXTOOLSPREFIX="/usr" - fi - ;; + else + FRAMEWORKUNIXTOOLSPREFIX="${prefix}" + fi - /Library*) - FRAMEWORKINSTALLAPPSPREFIX="/Applications" - ;; + case "${enableval}" in + /System*) + FRAMEWORKINSTALLAPPSPREFIX="/Applications" + if test "${prefix}" = "NONE" ; then + # See below + FRAMEWORKUNIXTOOLSPREFIX="/usr" + fi + ;; + + /Library*) + FRAMEWORKINSTALLAPPSPREFIX="/Applications" + ;; + + */Library/Frameworks) + MDIR="`dirname "${enableval}"`" + MDIR="`dirname "${MDIR}"`" + FRAMEWORKINSTALLAPPSPREFIX="${MDIR}/Applications" + + if test "${prefix}" = "NONE"; then + # User hasn't specified the + # --prefix option, but wants to install + # the framework in a non-default location, + # ensure that the compatibility links get + # installed relative to that prefix as well + # instead of in /usr/local. + FRAMEWORKUNIXTOOLSPREFIX="${MDIR}" + fi + ;; - */Library/Frameworks) - MDIR="`dirname "${enableval}"`" - MDIR="`dirname "${MDIR}"`" - FRAMEWORKINSTALLAPPSPREFIX="${MDIR}/Applications" - - if test "${prefix}" = "NONE"; then - # User hasn't specified the - # --prefix option, but wants to install - # the framework in a non-default location, - # ensure that the compatibility links get - # installed relative to that prefix as well - # instead of in /usr/local. - FRAMEWORKUNIXTOOLSPREFIX="${MDIR}" - fi - ;; + *) + FRAMEWORKINSTALLAPPSPREFIX="/Applications" + ;; + esac - *) - FRAMEWORKINSTALLAPPSPREFIX="/Applications" - ;; + prefix=$PYTHONFRAMEWORKINSTALLDIR/Versions/$VERSION + PYTHONFRAMEWORKINSTALLNAMEPREFIX=${prefix} + RESSRCDIR=Mac/Resources/framework + + # Add files for Mac specific code to the list of output + # files: + AC_CONFIG_FILES([Mac/Makefile]) + AC_CONFIG_FILES([Mac/PythonLauncher/Makefile]) + AC_CONFIG_FILES([Mac/Resources/framework/Info.plist]) + AC_CONFIG_FILES([Mac/Resources/app/Info.plist]) + ;; + *) + AC_MSG_ERROR([Unknown platform for framework build]) + ;; + esac esac - - prefix=$PYTHONFRAMEWORKINSTALLDIR/Versions/$VERSION - - # Add files for Mac specific code to the list of output - # files: - AC_CONFIG_FILES([Mac/Makefile]) - AC_CONFIG_FILES([Mac/PythonLauncher/Makefile]) - AC_CONFIG_FILES([Mac/Resources/framework/Info.plist]) - AC_CONFIG_FILES([Mac/Resources/app/Info.plist]) - esac ],[ PYTHONFRAMEWORK= PYTHONFRAMEWORKDIR=no-framework PYTHONFRAMEWORKPREFIX= PYTHONFRAMEWORKINSTALLDIR= + PYTHONFRAMEWORKINSTALLNAMEPREFIX= + RESSRCDIR= FRAMEWORKINSTALLFIRST= FRAMEWORKINSTALLLAST= FRAMEWORKALTINSTALLFIRST= FRAMEWORKALTINSTALLLAST= FRAMEWORKPYTHONW= + INSTALLTARGETS="commoninstall bininstall maninstall" if test "x${prefix}" = "xNONE" ; then FRAMEWORKUNIXTOOLSPREFIX="${ac_default_prefix}" else @@ -519,6 +605,8 @@ AC_SUBST([PYTHONFRAMEWORKIDENTIFIER]) AC_SUBST([PYTHONFRAMEWORKDIR]) AC_SUBST([PYTHONFRAMEWORKPREFIX]) AC_SUBST([PYTHONFRAMEWORKINSTALLDIR]) +AC_SUBST([PYTHONFRAMEWORKINSTALLNAMEPREFIX]) +AC_SUBST([RESSRCDIR]) AC_SUBST([FRAMEWORKINSTALLFIRST]) AC_SUBST([FRAMEWORKINSTALLLAST]) AC_SUBST([FRAMEWORKALTINSTALLFIRST]) @@ -526,105 +614,38 @@ AC_SUBST([FRAMEWORKALTINSTALLLAST]) AC_SUBST([FRAMEWORKPYTHONW]) AC_SUBST([FRAMEWORKUNIXTOOLSPREFIX]) AC_SUBST([FRAMEWORKINSTALLAPPSPREFIX]) +AC_SUBST([INSTALLTARGETS]) AC_DEFINE_UNQUOTED([_PYTHONFRAMEWORK], ["${PYTHONFRAMEWORK}"], [framework name]) -# Set name for machine-dependent library files -AC_ARG_VAR([MACHDEP], [name for machine-dependent library files]) -AC_MSG_CHECKING([MACHDEP]) -if test -z "$MACHDEP" -then - # avoid using uname for cross builds - if test "$cross_compiling" = yes; then - # ac_sys_system and ac_sys_release are used for setting - # a lot of different things including 'define_xopen_source' - # in the case statement below. - case "$host" in - *-*-linux-android*) - ac_sys_system=Linux-android - ;; - *-*-linux*) - ac_sys_system=Linux - ;; - *-*-cygwin*) - ac_sys_system=Cygwin - ;; - *-*-vxworks*) - ac_sys_system=VxWorks - ;; - *-*-emscripten) - ac_sys_system=Emscripten - ;; - *-*-wasi) - ac_sys_system=WASI - ;; - *) - # for now, limit cross builds to known configurations - MACHDEP="unknown" - AC_MSG_ERROR([cross build not supported for $host]) - esac - ac_sys_release= - else - ac_sys_system=`uname -s` - if test "$ac_sys_system" = "AIX" \ - -o "$ac_sys_system" = "UnixWare" -o "$ac_sys_system" = "OpenUNIX"; then - ac_sys_release=`uname -v` - else - ac_sys_release=`uname -r` - fi - fi - ac_md_system=`echo $ac_sys_system | - tr -d '[/ ]' | tr '[[A-Z]]' '[[a-z]]'` - ac_md_release=`echo $ac_sys_release | - tr -d '[/ ]' | sed 's/^[[A-Z]]\.//' | sed 's/\..*//'` - MACHDEP="$ac_md_system$ac_md_release" - - case $MACHDEP in - aix*) MACHDEP="aix";; - linux*) MACHDEP="linux";; - cygwin*) MACHDEP="cygwin";; - darwin*) MACHDEP="darwin";; - '') MACHDEP="unknown";; - esac - - if test "$ac_sys_system" = "SunOS"; then - # For Solaris, there isn't an OS version specific macro defined - # in most compilers, so we define one here. - SUNOS_VERSION=`echo $ac_sys_release | sed -e 's!\.\([0-9]\)$!.0\1!g' | tr -d '.'` - AC_DEFINE_UNQUOTED([Py_SUNOS_VERSION], [$SUNOS_VERSION], - [The version of SunOS/Solaris as reported by `uname -r' without the dot.]) - fi -fi -AC_MSG_RESULT(["$MACHDEP"]) - AC_SUBST([_PYTHON_HOST_PLATFORM]) if test "$cross_compiling" = yes; then case "$host" in *-*-linux*) case "$host_cpu" in arm*) - _host_cpu=arm + _host_ident=arm ;; *) - _host_cpu=$host_cpu + _host_ident=$host_cpu esac ;; *-*-cygwin*) - _host_cpu= + _host_ident= ;; *-*-vxworks*) - _host_cpu=$host_cpu + _host_ident=$host_cpu ;; wasm32-*-* | wasm64-*-*) - _host_cpu=$host_cpu + _host_ident=$host_cpu ;; *) # for now, limit cross builds to known configurations MACHDEP="unknown" AC_MSG_ERROR([cross build not supported for $host]) esac - _PYTHON_HOST_PLATFORM="$MACHDEP${_host_cpu:+-$_host_cpu}" + _PYTHON_HOST_PLATFORM="$MACHDEP${_host_ident:+-$_host_ident}" fi # Some systems cannot stand _XOPEN_SOURCE being defined at all; they @@ -935,6 +956,14 @@ else fi rm -f conftest.out +dnl On some platforms, using a true "triplet" for MULTIARCH would be redundant. +dnl For example, `arm64-apple-darwin` is redundant, because there isn't a +dnl non-Apple Darwin. Including the CPU architecture can also be potentially +dnl redundant - on macOS, for example, it's possible to do a single compile +dnl pass that includes multiple architectures, so it would be misleading for +dnl MULTIARCH (and thus the sysconfigdata module name) to include a single CPU +dnl architecture. PLATFORM_TRIPLET will be a pair or single value for these +dnl platforms. AC_MSG_CHECKING([for multiarch]) AS_CASE([$ac_sys_system], [Darwin*], [MULTIARCH=""], @@ -942,7 +971,6 @@ AS_CASE([$ac_sys_system], [MULTIARCH=$($CC --print-multiarch 2>/dev/null)] ) AC_SUBST([MULTIARCH]) -AC_MSG_RESULT([$MULTIARCH]) if test x$PLATFORM_TRIPLET != x && test x$MULTIARCH != x; then if test x$PLATFORM_TRIPLET != x$MULTIARCH; then @@ -952,6 +980,16 @@ elif test x$PLATFORM_TRIPLET != x && test x$MULTIARCH = x; then MULTIARCH=$PLATFORM_TRIPLET fi AC_SUBST([PLATFORM_TRIPLET]) +AC_MSG_RESULT([$MULTIARCH]) + +dnl Even if we *do* include the CPU architecture in the MULTIARCH value, some +dnl platforms don't need the CPU architecture in the SOABI tag. These platforms +dnl will have multiple sysconfig modules (one for each CPU architecture), but +dnl use a single "fat" binary at runtime. SOABI_PLATFORM is the component of +dnl the PLATFORM_TRIPLET that will be used in binary module extensions. +AS_CASE([$ac_sys_system], + [SOABI_PLATFORM=$PLATFORM_TRIPLET] +) if test x$MULTIARCH != x; then MULTIARCH_CPPFLAGS="-DMULTIARCH=\\\"$MULTIARCH\\\"" @@ -1294,7 +1332,7 @@ fi AC_MSG_CHECKING([LDLIBRARY]) -# MacOSX framework builds need more magic. LDLIBRARY is the dynamic +# Apple framework builds need more magic. LDLIBRARY is the dynamic # library that we build, but we do not want to link against it (we # will find it with a -framework option). For this reason there is an # extra variable BLDLIBRARY against which Python and the extension @@ -1302,9 +1340,14 @@ AC_MSG_CHECKING([LDLIBRARY]) # LDLIBRARY, but empty for MacOSX framework builds. if test "$enable_framework" then - LDLIBRARY='$(PYTHONFRAMEWORKDIR)/Versions/$(VERSION)/$(PYTHONFRAMEWORK)' - RUNSHARED=DYLD_FRAMEWORK_PATH=`pwd`${DYLD_FRAMEWORK_PATH:+:${DYLD_FRAMEWORK_PATH}} + case $ac_sys_system in + Darwin) + LDLIBRARY='$(PYTHONFRAMEWORKDIR)/Versions/$(VERSION)/$(PYTHONFRAMEWORK)';; + *) + AC_MSG_ERROR([Unknown platform for framework build]);; + esac BLDLIBRARY='' + RUNSHARED=DYLD_FRAMEWORK_PATH=`pwd`${DYLD_FRAMEWORK_PATH:+:${DYLD_FRAMEWORK_PATH}} else BLDLIBRARY='$(LDLIBRARY)' fi @@ -1316,64 +1359,64 @@ if test $enable_shared = "yes"; then [Defined if Python is built as a shared library.]) case $ac_sys_system in CYGWIN*) - LDLIBRARY='libpython$(LDVERSION).dll.a' - DLLLIBRARY='libpython$(LDVERSION).dll' - ;; + LDLIBRARY='libpython$(LDVERSION).dll.a' + DLLLIBRARY='libpython$(LDVERSION).dll' + ;; SunOS*) - LDLIBRARY='libpython$(LDVERSION).so' - BLDLIBRARY='-Wl,-R,$(LIBDIR) -L. -lpython$(LDVERSION)' - RUNSHARED=LD_LIBRARY_PATH=`pwd`${LD_LIBRARY_PATH:+:${LD_LIBRARY_PATH}} - INSTSONAME="$LDLIBRARY".$SOVERSION - if test "$with_pydebug" != yes - then - PY3LIBRARY=libpython3.so - fi - ;; + LDLIBRARY='libpython$(LDVERSION).so' + BLDLIBRARY='-Wl,-R,$(LIBDIR) -L. -lpython$(LDVERSION)' + RUNSHARED=LD_LIBRARY_PATH=`pwd`${LD_LIBRARY_PATH:+:${LD_LIBRARY_PATH}} + INSTSONAME="$LDLIBRARY".$SOVERSION + if test "$with_pydebug" != yes + then + PY3LIBRARY=libpython3.so + fi + ;; Linux*|GNU*|NetBSD*|FreeBSD*|DragonFly*|OpenBSD*|VxWorks*) - LDLIBRARY='libpython$(LDVERSION).so' - BLDLIBRARY='-L. -lpython$(LDVERSION)' - RUNSHARED=LD_LIBRARY_PATH=`pwd`${LD_LIBRARY_PATH:+:${LD_LIBRARY_PATH}} - INSTSONAME="$LDLIBRARY".$SOVERSION - if test "$with_pydebug" != yes - then - PY3LIBRARY=libpython3.so - fi - ;; + LDLIBRARY='libpython$(LDVERSION).so' + BLDLIBRARY='-L. -lpython$(LDVERSION)' + RUNSHARED=LD_LIBRARY_PATH=`pwd`${LD_LIBRARY_PATH:+:${LD_LIBRARY_PATH}} + INSTSONAME="$LDLIBRARY".$SOVERSION + if test "$with_pydebug" != yes + then + PY3LIBRARY=libpython3.so + fi + ;; hp*|HP*) - case `uname -m` in - ia64) - LDLIBRARY='libpython$(LDVERSION).so' - ;; - *) - LDLIBRARY='libpython$(LDVERSION).sl' - ;; - esac - BLDLIBRARY='-Wl,+b,$(LIBDIR) -L. -lpython$(LDVERSION)' - RUNSHARED=SHLIB_PATH=`pwd`${SHLIB_PATH:+:${SHLIB_PATH}} - ;; + case `uname -m` in + ia64) + LDLIBRARY='libpython$(LDVERSION).so' + ;; + *) + LDLIBRARY='libpython$(LDVERSION).sl' + ;; + esac + BLDLIBRARY='-Wl,+b,$(LIBDIR) -L. -lpython$(LDVERSION)' + RUNSHARED=SHLIB_PATH=`pwd`${SHLIB_PATH:+:${SHLIB_PATH}} + ;; Darwin*) - LDLIBRARY='libpython$(LDVERSION).dylib' - BLDLIBRARY='-L. -lpython$(LDVERSION)' - RUNSHARED=DYLD_LIBRARY_PATH=`pwd`${DYLD_LIBRARY_PATH:+:${DYLD_LIBRARY_PATH}} - ;; + LDLIBRARY='libpython$(LDVERSION).dylib' + BLDLIBRARY='-L. -lpython$(LDVERSION)' + RUNSHARED=DYLD_LIBRARY_PATH=`pwd`${DYLD_LIBRARY_PATH:+:${DYLD_LIBRARY_PATH}} + ;; AIX*) - LDLIBRARY='libpython$(LDVERSION).so' - RUNSHARED=LIBPATH=`pwd`${LIBPATH:+:${LIBPATH}} - ;; + LDLIBRARY='libpython$(LDVERSION).so' + RUNSHARED=LIBPATH=`pwd`${LIBPATH:+:${LIBPATH}} + ;; esac else # shared is disabled PY_ENABLE_SHARED=0 case $ac_sys_system in CYGWIN*) - BLDLIBRARY='$(LIBRARY)' - LDLIBRARY='libpython$(LDVERSION).dll.a' - ;; + BLDLIBRARY='$(LIBRARY)' + LDLIBRARY='libpython$(LDVERSION).dll.a' + ;; esac fi if test "$cross_compiling" = yes; then - RUNSHARED= + RUNSHARED= fi AC_ARG_VAR([HOSTRUNNER], [Program to run CPython for the host platform]) @@ -5824,7 +5867,7 @@ AC_SUBST([SOABI]) AC_MSG_CHECKING([ABIFLAGS]) AC_MSG_RESULT([$ABIFLAGS]) AC_MSG_CHECKING([SOABI]) -SOABI='cpython-'`echo $VERSION | tr -d .`${ABIFLAGS}${PLATFORM_TRIPLET:+-$PLATFORM_TRIPLET} +SOABI='cpython-'`echo $VERSION | tr -d .`${ABIFLAGS}${SOABI_PLATFORM:+-$SOABI_PLATFORM} AC_MSG_RESULT([$SOABI]) # Release build, debug build (Py_DEBUG), and trace refs build (Py_TRACE_REFS) @@ -5832,7 +5875,7 @@ AC_MSG_RESULT([$SOABI]) if test "$Py_DEBUG" = 'true'; then # Similar to SOABI but remove "d" flag from ABIFLAGS AC_SUBST([ALT_SOABI]) - ALT_SOABI='cpython-'`echo $VERSION | tr -d .``echo $ABIFLAGS | tr -d d`${PLATFORM_TRIPLET:+-$PLATFORM_TRIPLET} + ALT_SOABI='cpython-'`echo $VERSION | tr -d .``echo $ABIFLAGS | tr -d d`${SOABI_PLATFORM:+-$SOABI_PLATFORM} AC_DEFINE_UNQUOTED([ALT_SOABI], ["${ALT_SOABI}"], [Alternative SOABI used in debug build to load C extensions built in release mode]) fi From 23e61d39975758f5c4a71cb67acd438072738faa Mon Sep 17 00:00:00 2001 From: Jeff Glass Date: Wed, 14 Feb 2024 13:57:16 -0600 Subject: [PATCH 27/31] Adjust formatting to better match PEP 7 --- Include/internal/pycore_code.h | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/Include/internal/pycore_code.h b/Include/internal/pycore_code.h index a465238d39faca..85c8c01d3690f4 100644 --- a/Include/internal/pycore_code.h +++ b/Include/internal/pycore_code.h @@ -286,9 +286,13 @@ extern int _PyStaticCode_Init(PyCodeObject *co); #define GC_STAT_ADD(gen, name, n) do { if (_Py_stats) _Py_stats->gc_stats[(gen)].name += (n); } while (0) #define OPT_STAT_INC(name) do { if (_Py_stats) _Py_stats->optimization_stats.name++; } while (0) #define UOP_STAT_INC(opname, name) do { if (_Py_stats) { assert(opname < 512); _Py_stats->optimization_stats.opcode[opname].name++; } } while (0) -#define UOP_PAIR_INC(uopcode, lastuop) do { if (lastuop && _Py_stats) { \ - _Py_stats->optimization_stats.opcode[lastuop].pair_count[uopcode]++; } \ - lastuop = uopcode; } while (0) +#define UOP_PAIR_INC(uopcode, lastuop) \ + do { \ + if (lastuop && _Py_stats) { \ + _Py_stats->optimization_stats.opcode[lastuop].pair_count[uopcode]++; \ + } \ + lastuop = uopcode; \ + } while (0) #define OPT_UNSUPPORTED_OPCODE(opname) do { if (_Py_stats) _Py_stats->optimization_stats.unsupported_opcode[opname]++; } while (0) #define OPT_HIST(length, name) \ do { \ From 9c92a0a6cc738936b7a96f85c9a9da0463d05748 Mon Sep 17 00:00:00 2001 From: Jeff Glass Date: Wed, 14 Feb 2024 15:07:01 -0600 Subject: [PATCH 28/31] Revert "Catch up with main" This reverts commit 03db7a5e950639cbbaea38ad4521f55ab39b1495. --- .editorconfig | 4 +- .gitattributes | 1 - Doc/c-api/long.rst | 70 - Doc/c-api/time.rst | 83 - Doc/c-api/unicode.rst | 7 +- Doc/c-api/utilities.rst | 1 - Doc/conf.py | 17 +- Doc/reference/datamodel.rst | 2 - Doc/tools/static/changelog_search.js | 102 +- Doc/using/mac.rst | 105 +- Doc/whatsnew/3.13.rst | 17 +- Include/Python.h | 1 - Include/cpython/longobject.h | 36 +- Include/cpython/optimizer.h | 10 +- Include/cpython/pystats.h | 3 - Include/cpython/pytime.h | 23 - .../pycore_global_objects_fini_generated.h | 10 - Include/internal/pycore_global_strings.h | 10 - Include/internal/pycore_interp.h | 8 +- Include/internal/pycore_opcode_metadata.h | 10 +- Include/internal/pycore_optimizer.h | 7 - .../internal/pycore_runtime_init_generated.h | 10 - Include/internal/pycore_time.h | 99 +- .../internal/pycore_unicodeobject_generated.h | 30 - Include/internal/pycore_uop_ids.h | 3 +- Include/internal/pycore_uop_metadata.h | 14 +- Lib/logging/config.py | 7 +- Lib/multiprocessing/connection.py | 14 +- Lib/pydoc.py | 2 +- Lib/test/_test_multiprocessing.py | 18 - Lib/test/libregrtest/findtests.py | 1 - Lib/test/{test_pydoc => }/pydoc_mod.py | 0 Lib/test/{test_pydoc => }/pydocfodder.py | 0 Lib/test/test_capi/test_long.py | 145 -- Lib/test/test_capi/test_opt.py | 209 -- Lib/test/test_capi/test_time.py | 71 - Lib/test/test_dataclasses/__init__.py | 3 - Lib/test/test_enum.py | 10 +- Lib/test/test_generated_cases.py | 153 -- Lib/test/{test_pydoc => }/test_pydoc.py | 51 +- Lib/test/test_pydoc/__init__.py | 6 - Lib/test/test_queue.py | 4 +- Lib/test/test_time.py | 17 +- Lib/test/test_winapi.py | 94 - Makefile.pre.in | 20 +- Misc/NEWS.d/3.12.0a1.rst | 4 +- Misc/NEWS.d/3.12.0b1.rst | 4 +- Misc/NEWS.d/3.13.0a1.rst | 2 +- ...-02-07-08-23-48.gh-issue-114099.XcEXEZ.rst | 2 - ...-11-16-02-07-48.gh-issue-110850.DQGNfF.rst | 9 - ...-02-05-17-11-15.gh-issue-111140.WMEjid.rst | 2 - ...-01-16-14-41-54.gh-issue-114058.Cb2b8h.rst | 1 - ...-02-09-07-20-16.gh-issue-115165.yfJLXA.rst | 2 +- ...3-08-11-18-21-38.gh-issue-89240.dtSOLG.rst | 1 - ...-02-08-21-37-22.gh-issue-115049.X1ObpJ.rst | 1 - Modules/Setup.stdlib.in | 2 +- Modules/_io/textio.c | 2 +- Modules/_pickle.c | 3 +- Modules/_randommodule.c | 5 +- Modules/_sqlite/util.c | 2 +- Modules/_struct.c | 20 +- Modules/_testcapi/long.c | 48 +- Modules/_testcapi/parts.h | 1 - Modules/_testcapi/time.c | 104 - Modules/_testcapimodule.c | 3 - Modules/_testinternalcapi/pytime.c | 16 + Modules/_threadmodule.c | 6 +- Modules/_tkinter.c | 3 +- Modules/_winapi.c | 506 ----- Modules/cjkcodecs/multibytecodec.c | 6 +- Modules/clinic/_winapi.c.h | 498 +---- Objects/exceptions.c | 7 +- Objects/longobject.c | 216 +-- PC/errmap.h | 3 - PC/launcher2.c | 15 +- PCbuild/_testcapi.vcxproj | 1 - PCbuild/_testcapi.vcxproj.filters | 3 - Python/bytecodes.c | 85 +- Python/executor_cases.c.h | 15 +- Python/generated_cases.c.h | 31 +- Python/optimizer.c | 66 +- Python/optimizer_analysis.c | 580 +----- Python/pylifecycle.c | 4 +- Python/pystate.c | 10 +- Python/pytime.c | 102 +- Python/specialize.c | 5 - .../tier2_redundancy_eliminator_bytecodes.c | 272 --- Python/tier2_redundancy_eliminator_cases.c.h | 1680 ----------------- Tools/c-analyzer/cpython/_parser.py | 2 - Tools/c-analyzer/cpython/ignored.tsv | 2 +- Tools/cases_generator/README.md | 3 - Tools/cases_generator/analyzer.py | 6 +- .../cases_generator/interpreter_definition.md | 26 +- Tools/cases_generator/parsing.py | 26 +- Tools/cases_generator/stack.py | 4 +- .../tier2_abstract_generator.py | 235 --- config.sub | 251 +-- configure | 383 ++-- configure.ac | 385 ++-- 99 files changed, 930 insertions(+), 6249 deletions(-) delete mode 100644 Doc/c-api/time.rst delete mode 100644 Include/cpython/pytime.h rename Lib/test/{test_pydoc => }/pydoc_mod.py (100%) rename Lib/test/{test_pydoc => }/pydocfodder.py (100%) delete mode 100644 Lib/test/test_capi/test_time.py rename Lib/test/{test_pydoc => }/test_pydoc.py (98%) delete mode 100644 Lib/test/test_pydoc/__init__.py delete mode 100644 Lib/test/test_winapi.py delete mode 100644 Misc/NEWS.d/next/Build/2024-02-07-08-23-48.gh-issue-114099.XcEXEZ.rst delete mode 100644 Misc/NEWS.d/next/C API/2023-11-16-02-07-48.gh-issue-110850.DQGNfF.rst delete mode 100644 Misc/NEWS.d/next/C API/2024-02-05-17-11-15.gh-issue-111140.WMEjid.rst delete mode 100644 Misc/NEWS.d/next/Core and Builtins/2024-01-16-14-41-54.gh-issue-114058.Cb2b8h.rst delete mode 100644 Misc/NEWS.d/next/Windows/2023-08-11-18-21-38.gh-issue-89240.dtSOLG.rst delete mode 100644 Misc/NEWS.d/next/Windows/2024-02-08-21-37-22.gh-issue-115049.X1ObpJ.rst delete mode 100644 Modules/_testcapi/time.c delete mode 100644 Python/tier2_redundancy_eliminator_bytecodes.c delete mode 100644 Python/tier2_redundancy_eliminator_cases.c.h delete mode 100644 Tools/cases_generator/tier2_abstract_generator.py diff --git a/.editorconfig b/.editorconfig index a6187d64f3ce46..0169eed951cd3f 100644 --- a/.editorconfig +++ b/.editorconfig @@ -1,6 +1,6 @@ root = true -[*.{py,c,cpp,h,js,rst,md,yml}] +[*.{py,c,cpp,h,rst,md,yml}] trim_trailing_whitespace = true insert_final_newline = true indent_style = space @@ -11,5 +11,5 @@ indent_size = 4 [*.rst] indent_size = 3 -[*.{js,yml}] +[*.yml] indent_size = 2 diff --git a/.gitattributes b/.gitattributes index 07d877027b09f6..2a48df079e1aeb 100644 --- a/.gitattributes +++ b/.gitattributes @@ -94,7 +94,6 @@ Programs/test_frozenmain.h generated Python/Python-ast.c generated Python/executor_cases.c.h generated Python/generated_cases.c.h generated -Python/tier2_redundancy_eliminator_bytecodes.c.h generated Python/opcode_targets.h generated Python/stdlib_module_names.h generated Tools/peg_generator/pegen/grammar_parser.py generated diff --git a/Doc/c-api/long.rst b/Doc/c-api/long.rst index f24282e76a33d1..f42e23db89ae39 100644 --- a/Doc/c-api/long.rst +++ b/Doc/c-api/long.rst @@ -113,28 +113,6 @@ distinguished from a number. Use :c:func:`PyErr_Occurred` to disambiguate. retrieved from the resulting value using :c:func:`PyLong_AsVoidPtr`. -.. c:function:: PyObject* PyLong_FromNativeBytes(const void* buffer, size_t n_bytes, int endianness) - - Create a Python integer from the value contained in the first *n_bytes* of - *buffer*, interpreted as a two's-complement signed number. - - *endianness* may be passed ``-1`` for the native endian that CPython was - compiled with, or else ``0`` for big endian and ``1`` for little. - - .. versionadded:: 3.13 - - -.. c:function:: PyObject* PyLong_FromUnsignedNativeBytes(const void* buffer, size_t n_bytes, int endianness) - - Create a Python integer from the value contained in the first *n_bytes* of - *buffer*, interpreted as an unsigned number. - - *endianness* may be passed ``-1`` for the native endian that CPython was - compiled with, or else ``0`` for big endian and ``1`` for little. - - .. versionadded:: 3.13 - - .. XXX alias PyLong_AS_LONG (for now) .. c:function:: long PyLong_AsLong(PyObject *obj) @@ -354,54 +332,6 @@ distinguished from a number. Use :c:func:`PyErr_Occurred` to disambiguate. Returns ``NULL`` on error. Use :c:func:`PyErr_Occurred` to disambiguate. -.. c:function:: Py_ssize_t PyLong_AsNativeBytes(PyObject *pylong, void* buffer, Py_ssize_t n_bytes, int endianness) - - Copy the Python integer value to a native *buffer* of size *n_bytes*:: - - int value; - Py_ssize_t bytes = PyLong_AsNativeBytes(v, &value, sizeof(value), -1); - if (bytes < 0) { - // Error occurred - return NULL; - } - else if (bytes <= (Py_ssize_t)sizeof(value)) { - // Success! - } - else { - // Overflow occurred, but 'value' contains truncated value - } - - *endianness* may be passed ``-1`` for the native endian that CPython was - compiled with, or ``0`` for big endian and ``1`` for little. - - Return ``-1`` with an exception raised if *pylong* cannot be interpreted as - an integer. Otherwise, return the size of the buffer required to store the - value. If this is equal to or less than *n_bytes*, the entire value was - copied. - - Unless an exception is raised, all *n_bytes* of the buffer will be written - with as much of the value as can fit. This allows the caller to ignore all - non-negative results if the intent is to match the typical behavior of a - C-style downcast. No exception is set for this case. - - Values are always copied as two's-complement, and sufficient buffer will be - requested to include a sign bit. For example, this may cause an value that - fits into 8 bytes when treated as unsigned to request 9 bytes, even though - all eight bytes were copied into the buffer. What has been omitted is the - zero sign bit, which is redundant when the intention is to treat the value as - unsigned. - - Passing zero to *n_bytes* will return the requested buffer size. - - .. note:: - - When the value does not fit in the provided buffer, the requested size - returned from the function may be larger than necessary. Passing 0 to this - function is not an accurate way to determine the bit length of a value. - - .. versionadded:: 3.13 - - .. c:function:: int PyUnstable_Long_IsCompact(const PyLongObject* op) Return 1 if *op* is compact, 0 otherwise. diff --git a/Doc/c-api/time.rst b/Doc/c-api/time.rst deleted file mode 100644 index 7791cdb1781055..00000000000000 --- a/Doc/c-api/time.rst +++ /dev/null @@ -1,83 +0,0 @@ -.. highlight:: c - -PyTime C API -============ - -.. versionadded:: 3.13 - -The clock C API provides access to system clocks. -It is similar to the Python :mod:`time` module. - -For C API related to the :mod:`datetime` module, see :ref:`datetimeobjects`. - - -Types ------ - -.. c:type:: PyTime_t - - A timestamp or duration in nanoseconds, represented as a signed 64-bit - integer. - - The reference point for timestamps depends on the clock used. For example, - :c:func:`PyTime_Time` returns timestamps relative to the UNIX epoch. - - The supported range is around [-292.3 years; +292.3 years]. - Using the Unix epoch (January 1st, 1970) as reference, the supported date - range is around [1677-09-21; 2262-04-11]. - The exact limits are exposed as constants: - -.. c:var:: PyTime_t PyTime_MIN - - Minimum value of :c:type:`PyTime_t`. - -.. c:var:: PyTime_t PyTime_MAX - - Maximum value of :c:type:`PyTime_t`. - - -Clock Functions ---------------- - -The following functions take a pointer to a :c:expr:`PyTime_t` that they -set to the value of a particular clock. -Details of each clock are given in the documentation of the corresponding -Python function. - -The functions return ``0`` on success, or ``-1`` (with an exception set) -on failure. - -On integer overflow, they set the :c:data:`PyExc_OverflowError` exception and -set ``*result`` to the value clamped to the ``[PyTime_MIN; PyTime_MAX]`` -range. -(On current systems, integer overflows are likely caused by misconfigured -system time.) - -As any other C API (unless otherwise specified), the functions must be called -with the :term:`GIL` held. - -.. c:function:: int PyTime_Monotonic(PyTime_t *result) - - Read the monotonic clock. - See :func:`time.monotonic` for important details on this clock. - -.. c:function:: int PyTime_PerfCounter(PyTime_t *result) - - Read the performance counter. - See :func:`time.perf_counter` for important details on this clock. - -.. c:function:: int PyTime_Time(PyTime_t *result) - - Read the “wall clock” time. - See :func:`time.time` for details important on this clock. - - -Conversion functions --------------------- - -.. c:function:: double PyTime_AsSecondsDouble(PyTime_t t) - - Convert a timestamp to a number of seconds as a C :c:expr:`double`. - - The function cannot fail, but note that :c:expr:`double` has limited - accuracy for large values. diff --git a/Doc/c-api/unicode.rst b/Doc/c-api/unicode.rst index 666ffe89605c56..5541eaa521803b 100644 --- a/Doc/c-api/unicode.rst +++ b/Doc/c-api/unicode.rst @@ -854,12 +854,7 @@ wchar_t Support Copy the Unicode object contents into the :c:type:`wchar_t` buffer *wstr*. At most *size* :c:type:`wchar_t` characters are copied (excluding a possibly trailing null termination character). Return the number of :c:type:`wchar_t` characters - copied or ``-1`` in case of an error. - - When *wstr* is ``NULL``, instead return the *size* that would be required - to store all of *unicode* including a terminating null. - - Note that the resulting :c:expr:`wchar_t*` + copied or ``-1`` in case of an error. Note that the resulting :c:expr:`wchar_t*` string may or may not be null-terminated. It is the responsibility of the caller to make sure that the :c:expr:`wchar_t*` string is null-terminated in case this is required by the application. Also, note that the :c:expr:`wchar_t*` string diff --git a/Doc/c-api/utilities.rst b/Doc/c-api/utilities.rst index 9d0abf440f791d..48ae54acebe887 100644 --- a/Doc/c-api/utilities.rst +++ b/Doc/c-api/utilities.rst @@ -20,5 +20,4 @@ and parsing function arguments and constructing Python values from C values. hash.rst reflection.rst codec.rst - time.rst perfmaps.rst diff --git a/Doc/conf.py b/Doc/conf.py index 677d139046e5d0..c2d57696aeeaa3 100644 --- a/Doc/conf.py +++ b/Doc/conf.py @@ -64,10 +64,6 @@ import patchlevel version, release = patchlevel.get_version_info() -rst_epilog = f""" -.. |python_version_literal| replace:: ``Python {version}`` -""" - # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: today = '' @@ -139,14 +135,11 @@ ('c:type', 'wchar_t'), ('c:type', '__int64'), ('c:type', 'unsigned __int64'), - ('c:type', 'double'), # Standard C structures ('c:struct', 'in6_addr'), ('c:struct', 'in_addr'), ('c:struct', 'stat'), ('c:struct', 'statvfs'), - ('c:struct', 'timeval'), - ('c:struct', 'timespec'), # Standard C macros ('c:macro', 'LLONG_MAX'), ('c:macro', 'LLONG_MIN'), @@ -276,12 +269,12 @@ ('py:meth', 'index'), # list.index, tuple.index, etc. ] -# gh-106948: Copy standard C types declared in the "c:type" domain and C -# structures declared in the "c:struct" domain to the "c:identifier" domain, -# since "c:function" markup looks for types in the "c:identifier" domain. Use -# list() to not iterate on items which are being added +# gh-106948: Copy standard C types declared in the "c:type" domain to the +# "c:identifier" domain, since "c:function" markup looks for types in the +# "c:identifier" domain. Use list() to not iterate on items which are being +# added for role, name in list(nitpick_ignore): - if role in ('c:type', 'c:struct'): + if role == 'c:type': nitpick_ignore.append(('c:identifier', name)) del role, name diff --git a/Doc/reference/datamodel.rst b/Doc/reference/datamodel.rst index 88bc025c7c3fb4..885ee825c12296 100644 --- a/Doc/reference/datamodel.rst +++ b/Doc/reference/datamodel.rst @@ -1134,8 +1134,6 @@ Special read-only attributes * - .. attribute:: codeobject.co_qualname - The fully qualified function name - .. versionadded:: 3.11 - * - .. attribute:: codeobject.co_argcount - The total number of positional :term:`parameters ` (including positional-only parameters and parameters with default values) diff --git a/Doc/tools/static/changelog_search.js b/Doc/tools/static/changelog_search.js index 0a77c0d71ae937..c881a9bd4c84a7 100644 --- a/Doc/tools/static/changelog_search.js +++ b/Doc/tools/static/changelog_search.js @@ -1,59 +1,53 @@ -document.addEventListener("DOMContentLoaded", function () { - // add the search form and bind the events - document - .querySelector("h1") - .insertAdjacentHTML( - "afterend", - [ - "

Filter entries by content:", - '', - '

', - ].join("\n"), - ); +$(document).ready(function() { + // add the search form and bind the events + $('h1').after([ + '

Filter entries by content:', + '', + '

' + ].join('\n')); - function doFilter() { - let query; - try { - query = new RegExp(document.querySelector("#searchbox").value, "i"); - } catch (e) { - return; // not a valid regex (yet) - } - // find headers for the versions (What's new in Python X.Y.Z?) - const h2s = document.querySelectorAll("#changelog h2"); - for (const h2 of h2s) { - let sections_found = 0; - // find headers for the sections (Core, Library, etc.) - const h3s = h2.parentNode.querySelectorAll("h3"); - for (const h3 of h3s) { - let entries_found = 0; - // find all the entries - const lis = h3.parentNode.querySelectorAll("li"); - for (let li of lis) { - // check if the query matches the entry - if (query.test(li.textContent)) { - li.style.display = "block"; - entries_found++; - } else { - li.style.display = "none"; - } + function dofilter() { + try { + var query = new RegExp($('#searchbox').val(), 'i'); } - // if there are entries, show the section, otherwise hide it - if (entries_found > 0) { - h3.parentNode.style.display = "block"; - sections_found++; - } else { - h3.parentNode.style.display = "none"; + catch (e) { + return; // not a valid regex (yet) } - } - if (sections_found > 0) { - h2.parentNode.style.display = "block"; - } else { - h2.parentNode.style.display = "none"; - } + // find headers for the versions (What's new in Python X.Y.Z?) + $('#changelog h2').each(function(index1, h2) { + var h2_parent = $(h2).parent(); + var sections_found = 0; + // find headers for the sections (Core, Library, etc.) + h2_parent.find('h3').each(function(index2, h3) { + var h3_parent = $(h3).parent(); + var entries_found = 0; + // find all the entries + h3_parent.find('li').each(function(index3, li) { + var li = $(li); + // check if the query matches the entry + if (query.test(li.text())) { + li.show(); + entries_found++; + } + else { + li.hide(); + } + }); + // if there are entries, show the section, otherwise hide it + if (entries_found > 0) { + h3_parent.show(); + sections_found++; + } + else { + h3_parent.hide(); + } + }); + if (sections_found > 0) + h2_parent.show(); + else + h2_parent.hide(); + }); } - } - document.querySelector("#searchbox").addEventListener("keyup", doFilter); - document - .querySelector("#searchbox-submit") - .addEventListener("click", doFilter); + $('#searchbox').keyup(dofilter); + $('#searchbox-submit').click(dofilter); }); diff --git a/Doc/using/mac.rst b/Doc/using/mac.rst index e99993238895f9..eb1413af2cbc3d 100644 --- a/Doc/using/mac.rst +++ b/Doc/using/mac.rst @@ -10,46 +10,41 @@ Using Python on a Mac Python on a Mac running macOS is in principle very similar to Python on any other Unix platform, but there are a number of additional features such as -the integrated development environment (IDE) and the Package Manager that are -worth pointing out. - +the IDE and the Package Manager that are worth pointing out. .. _getting-osx: -.. _getting-and-installing-macpython: -Getting and Installing Python -============================= +Getting and Installing MacPython +================================ macOS used to come with Python 2.7 pre-installed between versions 10.8 and `12.3 `_. -You are invited to install the most recent version of Python 3 from the `Python -website `__. -A current "universal2 binary" build of Python, which runs natively on the Mac's -new Apple Silicon and legacy Intel processors, is available there. +You are invited to install the most recent version of Python 3 from the Python +website (https://www.python.org). A current "universal binary" build of Python, +which runs natively on the Mac's new Intel and legacy PPC CPU's, is available +there. What you get after installing is a number of things: -* A |python_version_literal| folder in your :file:`Applications` folder. In here +* A :file:`Python 3.12` folder in your :file:`Applications` folder. In here you find IDLE, the development environment that is a standard part of official - Python distributions; and :program:`Python Launcher`, which handles double-clicking Python + Python distributions; and PythonLauncher, which handles double-clicking Python scripts from the Finder. * A framework :file:`/Library/Frameworks/Python.framework`, which includes the Python executable and libraries. The installer adds this location to your shell - path. To uninstall Python, you can remove these three things. A - symlink to the Python executable is placed in :file:`/usr/local/bin/`. - -.. note:: - - On macOS 10.8-12.3, the Apple-provided build of Python is installed in - :file:`/System/Library/Frameworks/Python.framework` and :file:`/usr/bin/python`, - respectively. You should never modify or delete these, as they are - Apple-controlled and are used by Apple- or third-party software. Remember that - if you choose to install a newer Python version from python.org, you will have - two different but functional Python installations on your computer, so it will - be important that your paths and usages are consistent with what you want to do. - -IDLE includes a Help menu that allows you to access Python documentation. If you + path. To uninstall MacPython, you can simply remove these three things. A + symlink to the Python executable is placed in /usr/local/bin/. + +The Apple-provided build of Python is installed in +:file:`/System/Library/Frameworks/Python.framework` and :file:`/usr/bin/python`, +respectively. You should never modify or delete these, as they are +Apple-controlled and are used by Apple- or third-party software. Remember that +if you choose to install a newer Python version from python.org, you will have +two different but functional Python installations on your computer, so it will +be important that your paths and usages are consistent with what you want to do. + +IDLE includes a help menu that allows you to access Python documentation. If you are completely new to Python you should start reading the tutorial introduction in that document. @@ -61,29 +56,29 @@ How to run a Python script -------------------------- Your best way to get started with Python on macOS is through the IDLE -integrated development environment; see section :ref:`ide` and use the Help menu +integrated development environment, see section :ref:`ide` and use the Help menu when the IDE is running. If you want to run Python scripts from the Terminal window command line or from the Finder you first need an editor to create your script. macOS comes with a -number of standard Unix command line editors, :program:`vim` -:program:`nano` among them. If you want a more Mac-like editor, -:program:`BBEdit` from Bare Bones Software (see -https://www.barebones.com/products/bbedit/index.html) are good choices, as is -:program:`TextMate` (see https://macromates.com). Other editors include -:program:`MacVim` (https://macvim.org) and :program:`Aquamacs` -(https://aquamacs.org). +number of standard Unix command line editors, :program:`vim` and +:program:`emacs` among them. If you want a more Mac-like editor, +:program:`BBEdit` or :program:`TextWrangler` from Bare Bones Software (see +http://www.barebones.com/products/bbedit/index.html) are good choices, as is +:program:`TextMate` (see https://macromates.com/). Other editors include +:program:`Gvim` (https://macvim.org/macvim/) and :program:`Aquamacs` +(http://aquamacs.org/). To run your script from the Terminal window you must make sure that :file:`/usr/local/bin` is in your shell search path. To run your script from the Finder you have two options: -* Drag it to :program:`Python Launcher`. +* Drag it to :program:`PythonLauncher` -* Select :program:`Python Launcher` as the default application to open your - script (or any ``.py`` script) through the finder Info window and double-click it. - :program:`Python Launcher` has various preferences to control how your script is +* Select :program:`PythonLauncher` as the default application to open your + script (or any .py script) through the finder Info window and double-click it. + :program:`PythonLauncher` has various preferences to control how your script is launched. Option-dragging allows you to change these for one invocation, or use its Preferences menu to change things globally. @@ -108,11 +103,10 @@ Python on macOS honors all standard Unix environment variables such as :envvar:`PYTHONPATH`, but setting these variables for programs started from the Finder is non-standard as the Finder does not read your :file:`.profile` or :file:`.cshrc` at startup. You need to create a file -:file:`~/.MacOSX/environment.plist`. See Apple's -`Technical Q&A QA1067 `__ -for details. +:file:`~/.MacOSX/environment.plist`. See Apple's Technical Document QA1067 for +details. -For more information on installation Python packages, see section +For more information on installation Python packages in MacPython, see section :ref:`mac-package-manager`. @@ -121,9 +115,9 @@ For more information on installation Python packages, see section The IDE ======= -Python ships with the standard IDLE development environment. A good +MacPython ships with the standard IDLE development environment. A good introduction to using IDLE can be found at -https://www.hashcollision.org/hkn/python/idle_intro/index.html. +http://www.hashcollision.org/hkn/python/idle_intro/index.html. .. _mac-package-manager: @@ -136,10 +130,8 @@ This section has moved to the `Python Packaging User Guide`_. .. _Python Packaging User Guide: https://packaging.python.org/en/latest/tutorials/installing-packages/ -.. _gui-programming-on-the-mac: - -GUI Programming -=============== +GUI Programming on the Mac +========================== There are several options for building GUI applications on the Mac with Python. @@ -159,25 +151,20 @@ macOS. Packages and documentation are available from https://www.wxpython.org. macOS. More information can be found at https://riverbankcomputing.com/software/pyqt/intro. -*PySide* is another cross-platform Qt-based toolkit. More information at -https://www.qt.io/qt-for-python. - -.. _distributing-python-applications-on-the-mac: - -Distributing Python Applications -================================ +Distributing Python Applications on the Mac +=========================================== The standard tool for deploying standalone Python applications on the Mac is -:program:`py2app`. More information on installing and using :program:`py2app` -can be found at https://pypi.org/project/py2app/. +:program:`py2app`. More information on installing and using py2app can be found +at https://pypi.org/project/py2app/. Other Resources =============== -The Pythonmac-SIG mailing list is an excellent support resource for Python users -and developers on the Mac: +The MacPython mailing list is an excellent support resource for Python users and +developers on the Mac: https://www.python.org/community/sigs/current/pythonmac-sig/ diff --git a/Doc/whatsnew/3.13.rst b/Doc/whatsnew/3.13.rst index b96720df0a2f2d..1b803278ae0d5b 100644 --- a/Doc/whatsnew/3.13.rst +++ b/Doc/whatsnew/3.13.rst @@ -587,7 +587,6 @@ Tier 2 IR by Mark Shannon and Guido van Rossum. Tier 2 optimizer by Ken Jin.) - Deprecated ========== @@ -1517,21 +1516,6 @@ New Features * Add :c:func:`Py_HashPointer` function to hash a pointer. (Contributed by Victor Stinner in :gh:`111545`.) -* Add PyTime C API: - - * :c:type:`PyTime_t` type. - * :c:var:`PyTime_MIN` and :c:var:`PyTime_MAX` constants. - * :c:func:`PyTime_AsSecondsDouble` - :c:func:`PyTime_Monotonic`, :c:func:`PyTime_PerfCounter`, and - :c:func:`PyTime_Time` functions. - - (Contributed by Victor Stinner and Petr Viktorin in :gh:`110850`.) - -* Add :c:func:`PyLong_AsNativeBytes`, :c:func:`PyLong_FromNativeBytes` and - :c:func:`PyLong_FromUnsignedNativeBytes` functions to simplify converting - between native integer types and Python :class:`int` objects. - (Contributed by Steve Dower in :gh:`111140`.) - Porting to Python 3.13 ---------------------- @@ -1591,6 +1575,7 @@ Porting to Python 3.13 platforms, the ``HAVE_STDDEF_H`` macro is only defined on Windows. (Contributed by Victor Stinner in :gh:`108765`.) + Deprecated ---------- diff --git a/Include/Python.h b/Include/Python.h index 01fc45137a17bb..196751c3201e62 100644 --- a/Include/Python.h +++ b/Include/Python.h @@ -97,7 +97,6 @@ #include "weakrefobject.h" #include "structseq.h" #include "cpython/picklebufobject.h" -#include "cpython/pytime.h" #include "codecs.h" #include "pyerrors.h" #include "pythread.h" diff --git a/Include/cpython/longobject.h b/Include/cpython/longobject.h index 07251db6bcc203..fd1be29ed397d1 100644 --- a/Include/cpython/longobject.h +++ b/Include/cpython/longobject.h @@ -4,40 +4,6 @@ PyAPI_FUNC(PyObject*) PyLong_FromUnicodeObject(PyObject *u, int base); -/* PyLong_AsNativeBytes: Copy the integer value to a native variable. - buffer points to the first byte of the variable. - n_bytes is the number of bytes available in the buffer. Pass 0 to request - the required size for the value. - endianness is -1 for native endian, 0 for big endian or 1 for little. - Big endian mode will write the most significant byte into the address - directly referenced by buffer; little endian will write the least significant - byte into that address. - - If an exception is raised, returns a negative value. - Otherwise, returns the number of bytes that are required to store the value. - To check that the full value is represented, ensure that the return value is - equal or less than n_bytes. - All n_bytes are guaranteed to be written (unless an exception occurs), and - so ignoring a positive return value is the equivalent of a downcast in C. - In cases where the full value could not be represented, the returned value - may be larger than necessary - this function is not an accurate way to - calculate the bit length of an integer object. - */ -PyAPI_FUNC(Py_ssize_t) PyLong_AsNativeBytes(PyObject* v, void* buffer, - Py_ssize_t n_bytes, int endianness); - -/* PyLong_FromNativeBytes: Create an int value from a native integer - n_bytes is the number of bytes to read from the buffer. Passing 0 will - always produce the zero int. - PyLong_FromUnsignedNativeBytes always produces a non-negative int. - endianness is -1 for native endian, 0 for big endian or 1 for little. - - Returns the int object, or NULL with an exception set. */ -PyAPI_FUNC(PyObject*) PyLong_FromNativeBytes(const void* buffer, size_t n_bytes, - int endianness); -PyAPI_FUNC(PyObject*) PyLong_FromUnsignedNativeBytes(const void* buffer, - size_t n_bytes, int endianness); - PyAPI_FUNC(int) PyUnstable_Long_IsCompact(const PyLongObject* op); PyAPI_FUNC(Py_ssize_t) PyUnstable_Long_CompactValue(const PyLongObject* op); @@ -84,7 +50,7 @@ PyAPI_FUNC(PyObject *) _PyLong_FromByteArray( */ PyAPI_FUNC(int) _PyLong_AsByteArray(PyLongObject* v, unsigned char* bytes, size_t n, - int little_endian, int is_signed, int with_exceptions); + int little_endian, int is_signed); /* For use by the gcd function in mathmodule.c */ PyAPI_FUNC(PyObject *) _PyLong_GCD(PyObject *, PyObject *); diff --git a/Include/cpython/optimizer.h b/Include/cpython/optimizer.h index f710ca76b2ba24..3928eca583ba5b 100644 --- a/Include/cpython/optimizer.h +++ b/Include/cpython/optimizer.h @@ -71,8 +71,6 @@ typedef struct { PyAPI_FUNC(int) PyUnstable_Replace_Executor(PyCodeObject *code, _Py_CODEUNIT *instr, _PyExecutorObject *executor); -_PyOptimizerObject *_Py_SetOptimizer(PyInterpreterState *interp, _PyOptimizerObject* optimizer); - PyAPI_FUNC(void) PyUnstable_SetOptimizer(_PyOptimizerObject* optimizer); PyAPI_FUNC(_PyOptimizerObject *) PyUnstable_GetOptimizer(void); @@ -82,6 +80,8 @@ PyAPI_FUNC(_PyExecutorObject *) PyUnstable_GetExecutor(PyCodeObject *code, int o int _PyOptimizer_Optimize(struct _PyInterpreterFrame *frame, _Py_CODEUNIT *start, PyObject **stack_pointer); +extern _PyOptimizerObject _PyOptimizer_Default; + void _Py_ExecutorInit(_PyExecutorObject *, _PyBloomFilter *); void _Py_ExecutorClear(_PyExecutorObject *); void _Py_BloomFilter_Init(_PyBloomFilter *); @@ -96,11 +96,7 @@ PyAPI_FUNC(PyObject *)PyUnstable_Optimizer_NewUOpOptimizer(void); #define OPTIMIZER_BITS_IN_COUNTER 4 /* Minimum of 16 additional executions before retry */ -#define MIN_TIER2_BACKOFF 4 -#define MAX_TIER2_BACKOFF (15 - OPTIMIZER_BITS_IN_COUNTER) -#define OPTIMIZER_BITS_MASK ((1 << OPTIMIZER_BITS_IN_COUNTER) - 1) -/* A value <= UINT16_MAX but large enough that when shifted is > UINT16_MAX */ -#define OPTIMIZER_UNREACHABLE_THRESHOLD UINT16_MAX +#define MINIMUM_TIER2_BACKOFF 4 #define _Py_MAX_ALLOWED_BUILTINS_MODIFICATIONS 3 #define _Py_MAX_ALLOWED_GLOBALS_MODIFICATIONS 6 diff --git a/Include/cpython/pystats.h b/Include/cpython/pystats.h index de7853914acb98..2354f0cce3396e 100644 --- a/Include/cpython/pystats.h +++ b/Include/cpython/pystats.h @@ -121,9 +121,6 @@ typedef struct _optimization_stats { uint64_t trace_length_hist[_Py_UOP_HIST_SIZE]; uint64_t trace_run_length_hist[_Py_UOP_HIST_SIZE]; uint64_t optimized_trace_length_hist[_Py_UOP_HIST_SIZE]; - uint64_t optimizer_attempts; - uint64_t optimizer_successes; - uint64_t optimizer_failure_reason_no_memory; } OptimizationStats; typedef struct _rare_event_stats { diff --git a/Include/cpython/pytime.h b/Include/cpython/pytime.h deleted file mode 100644 index d8244700d614ce..00000000000000 --- a/Include/cpython/pytime.h +++ /dev/null @@ -1,23 +0,0 @@ -// PyTime_t C API: see Doc/c-api/time.rst for the documentation. - -#ifndef Py_LIMITED_API -#ifndef Py_PYTIME_H -#define Py_PYTIME_H -#ifdef __cplusplus -extern "C" { -#endif - -typedef int64_t PyTime_t; -#define PyTime_MIN INT64_MIN -#define PyTime_MAX INT64_MAX - -PyAPI_FUNC(double) PyTime_AsSecondsDouble(PyTime_t t); -PyAPI_FUNC(int) PyTime_Monotonic(PyTime_t *result); -PyAPI_FUNC(int) PyTime_PerfCounter(PyTime_t *result); -PyAPI_FUNC(int) PyTime_Time(PyTime_t *result); - -#ifdef __cplusplus -} -#endif -#endif /* Py_PYTIME_H */ -#endif /* Py_LIMITED_API */ diff --git a/Include/internal/pycore_global_objects_fini_generated.h b/Include/internal/pycore_global_objects_fini_generated.h index 11755210d65432..932738c3049882 100644 --- a/Include/internal/pycore_global_objects_fini_generated.h +++ b/Include/internal/pycore_global_objects_fini_generated.h @@ -883,7 +883,6 @@ _PyStaticObjects_CheckRefcnt(PyInterpreterState *interp) { _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(defaultaction)); _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(delete)); _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(depth)); - _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(desired_access)); _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(detect_types)); _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(deterministic)); _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(device)); @@ -974,7 +973,6 @@ _PyStaticObjects_CheckRefcnt(PyInterpreterState *interp) { _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(groups)); _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(h)); _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(handle)); - _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(handle_seq)); _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(hash_name)); _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(header)); _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(headers)); @@ -992,12 +990,9 @@ _PyStaticObjects_CheckRefcnt(PyInterpreterState *interp) { _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(indexgroup)); _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(inf)); _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(infer_variance)); - _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(inherit_handle)); _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(inheritable)); _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(initial)); _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(initial_bytes)); - _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(initial_owner)); - _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(initial_state)); _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(initial_value)); _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(initval)); _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(inner_size)); @@ -1053,7 +1048,6 @@ _PyStaticObjects_CheckRefcnt(PyInterpreterState *interp) { _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(locals)); _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(logoption)); _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(loop)); - _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(manual_reset)); _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(mapping)); _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(match)); _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(max_length)); @@ -1070,7 +1064,6 @@ _PyStaticObjects_CheckRefcnt(PyInterpreterState *interp) { _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(metadata)); _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(method)); _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(microsecond)); - _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(milliseconds)); _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(minute)); _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(mod)); _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(mode)); @@ -1080,7 +1073,6 @@ _PyStaticObjects_CheckRefcnt(PyInterpreterState *interp) { _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(month)); _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(mro)); _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(msg)); - _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(mutex)); _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(mycmp)); _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(n)); _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(n_arg)); @@ -1184,7 +1176,6 @@ _PyStaticObjects_CheckRefcnt(PyInterpreterState *interp) { _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(sched_priority)); _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(scheduler)); _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(second)); - _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(security_attributes)); _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(seek)); _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(seekable)); _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(selectors)); @@ -1272,7 +1263,6 @@ _PyStaticObjects_CheckRefcnt(PyInterpreterState *interp) { _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(values)); _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(version)); _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(volume)); - _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(wait_all)); _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(warnings)); _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(warnoptions)); _PyStaticObject_CheckRefcnt((PyObject *)&_Py_ID(wbits)); diff --git a/Include/internal/pycore_global_strings.h b/Include/internal/pycore_global_strings.h index 576ac703ca1508..da62b4f0a951ff 100644 --- a/Include/internal/pycore_global_strings.h +++ b/Include/internal/pycore_global_strings.h @@ -372,7 +372,6 @@ struct _Py_global_strings { STRUCT_FOR_ID(defaultaction) STRUCT_FOR_ID(delete) STRUCT_FOR_ID(depth) - STRUCT_FOR_ID(desired_access) STRUCT_FOR_ID(detect_types) STRUCT_FOR_ID(deterministic) STRUCT_FOR_ID(device) @@ -463,7 +462,6 @@ struct _Py_global_strings { STRUCT_FOR_ID(groups) STRUCT_FOR_ID(h) STRUCT_FOR_ID(handle) - STRUCT_FOR_ID(handle_seq) STRUCT_FOR_ID(hash_name) STRUCT_FOR_ID(header) STRUCT_FOR_ID(headers) @@ -481,12 +479,9 @@ struct _Py_global_strings { STRUCT_FOR_ID(indexgroup) STRUCT_FOR_ID(inf) STRUCT_FOR_ID(infer_variance) - STRUCT_FOR_ID(inherit_handle) STRUCT_FOR_ID(inheritable) STRUCT_FOR_ID(initial) STRUCT_FOR_ID(initial_bytes) - STRUCT_FOR_ID(initial_owner) - STRUCT_FOR_ID(initial_state) STRUCT_FOR_ID(initial_value) STRUCT_FOR_ID(initval) STRUCT_FOR_ID(inner_size) @@ -542,7 +537,6 @@ struct _Py_global_strings { STRUCT_FOR_ID(locals) STRUCT_FOR_ID(logoption) STRUCT_FOR_ID(loop) - STRUCT_FOR_ID(manual_reset) STRUCT_FOR_ID(mapping) STRUCT_FOR_ID(match) STRUCT_FOR_ID(max_length) @@ -559,7 +553,6 @@ struct _Py_global_strings { STRUCT_FOR_ID(metadata) STRUCT_FOR_ID(method) STRUCT_FOR_ID(microsecond) - STRUCT_FOR_ID(milliseconds) STRUCT_FOR_ID(minute) STRUCT_FOR_ID(mod) STRUCT_FOR_ID(mode) @@ -569,7 +562,6 @@ struct _Py_global_strings { STRUCT_FOR_ID(month) STRUCT_FOR_ID(mro) STRUCT_FOR_ID(msg) - STRUCT_FOR_ID(mutex) STRUCT_FOR_ID(mycmp) STRUCT_FOR_ID(n) STRUCT_FOR_ID(n_arg) @@ -673,7 +665,6 @@ struct _Py_global_strings { STRUCT_FOR_ID(sched_priority) STRUCT_FOR_ID(scheduler) STRUCT_FOR_ID(second) - STRUCT_FOR_ID(security_attributes) STRUCT_FOR_ID(seek) STRUCT_FOR_ID(seekable) STRUCT_FOR_ID(selectors) @@ -761,7 +752,6 @@ struct _Py_global_strings { STRUCT_FOR_ID(values) STRUCT_FOR_ID(version) STRUCT_FOR_ID(volume) - STRUCT_FOR_ID(wait_all) STRUCT_FOR_ID(warnings) STRUCT_FOR_ID(warnoptions) STRUCT_FOR_ID(wbits) diff --git a/Include/internal/pycore_interp.h b/Include/internal/pycore_interp.h index c244d8966f238b..31d88071e19d0c 100644 --- a/Include/internal/pycore_interp.h +++ b/Include/internal/pycore_interp.h @@ -112,7 +112,7 @@ struct _is { /* The thread currently executing in the __main__ module, if any. */ PyThreadState *main; /* Used in Modules/_threadmodule.c. */ - Py_ssize_t count; + long count; /* Support for runtime thread stack size tuning. A value of 0 means using the platform's default stack size or the size specified by the THREAD_STACK_SIZE macro. */ @@ -239,10 +239,8 @@ struct _is { struct callable_cache callable_cache; _PyOptimizerObject *optimizer; _PyExecutorObject *executor_list_head; - /* These values are shifted and offset to speed up check in JUMP_BACKWARD */ - uint32_t optimizer_resume_threshold; - uint32_t optimizer_backedge_threshold; - + uint16_t optimizer_resume_threshold; + uint16_t optimizer_backedge_threshold; uint32_t next_func_version; _rare_events rare_events; PyDict_WatchCallback builtins_dict_watcher; diff --git a/Include/internal/pycore_opcode_metadata.h b/Include/internal/pycore_opcode_metadata.h index 6b60a6fbffdc5e..75d7f44025328e 100644 --- a/Include/internal/pycore_opcode_metadata.h +++ b/Include/internal/pycore_opcode_metadata.h @@ -1094,7 +1094,7 @@ const struct opcode_metadata _PyOpcode_opcode_metadata[268] = { [MATCH_KEYS] = { true, INSTR_FMT_IX, HAS_ERROR_FLAG | HAS_ESCAPES_FLAG }, [MATCH_MAPPING] = { true, INSTR_FMT_IX, 0 }, [MATCH_SEQUENCE] = { true, INSTR_FMT_IX, 0 }, - [NOP] = { true, INSTR_FMT_IX, HAS_PURE_FLAG }, + [NOP] = { true, INSTR_FMT_IX, 0 }, [POP_EXCEPT] = { true, INSTR_FMT_IX, HAS_ESCAPES_FLAG }, [POP_JUMP_IF_FALSE] = { true, INSTR_FMT_IBC, HAS_ARG_FLAG | HAS_JUMP_FLAG }, [POP_JUMP_IF_NONE] = { true, INSTR_FMT_IBC, HAS_ARG_FLAG | HAS_JUMP_FLAG }, @@ -1156,10 +1156,10 @@ const struct opcode_metadata _PyOpcode_opcode_metadata[268] = { [LOAD_SUPER_METHOD] = { true, -1, HAS_ARG_FLAG | HAS_NAME_FLAG | HAS_ERROR_FLAG | HAS_ESCAPES_FLAG }, [LOAD_ZERO_SUPER_ATTR] = { true, -1, HAS_ARG_FLAG | HAS_NAME_FLAG | HAS_ERROR_FLAG | HAS_ESCAPES_FLAG }, [LOAD_ZERO_SUPER_METHOD] = { true, -1, HAS_ARG_FLAG | HAS_NAME_FLAG | HAS_ERROR_FLAG | HAS_ESCAPES_FLAG }, - [POP_BLOCK] = { true, -1, HAS_PURE_FLAG }, - [SETUP_CLEANUP] = { true, -1, HAS_PURE_FLAG | HAS_ARG_FLAG }, - [SETUP_FINALLY] = { true, -1, HAS_PURE_FLAG | HAS_ARG_FLAG }, - [SETUP_WITH] = { true, -1, HAS_PURE_FLAG | HAS_ARG_FLAG }, + [POP_BLOCK] = { true, -1, 0 }, + [SETUP_CLEANUP] = { true, -1, HAS_ARG_FLAG }, + [SETUP_FINALLY] = { true, -1, HAS_ARG_FLAG }, + [SETUP_WITH] = { true, -1, HAS_ARG_FLAG }, [STORE_FAST_MAYBE_NULL] = { true, -1, HAS_ARG_FLAG | HAS_LOCAL_FLAG }, }; #endif diff --git a/Include/internal/pycore_optimizer.h b/Include/internal/pycore_optimizer.h index eee71c700d4904..e21412fc815540 100644 --- a/Include/internal/pycore_optimizer.h +++ b/Include/internal/pycore_optimizer.h @@ -8,13 +8,6 @@ extern "C" { # error "this header requires Py_BUILD_CORE define" #endif -#include "pycore_uop_ids.h" - -// This is the length of the trace we project initially. -#define UOP_MAX_TRACE_LENGTH 512 - -#define TRACE_STACK_SIZE 5 - int _Py_uop_analyze_and_optimize(_PyInterpreterFrame *frame, _PyUOpInstruction *trace, int trace_len, int curr_stackentries, _PyBloomFilter *dependencies); diff --git a/Include/internal/pycore_runtime_init_generated.h b/Include/internal/pycore_runtime_init_generated.h index e682c97e7c0248..68fbbcb4378e17 100644 --- a/Include/internal/pycore_runtime_init_generated.h +++ b/Include/internal/pycore_runtime_init_generated.h @@ -881,7 +881,6 @@ extern "C" { INIT_ID(defaultaction), \ INIT_ID(delete), \ INIT_ID(depth), \ - INIT_ID(desired_access), \ INIT_ID(detect_types), \ INIT_ID(deterministic), \ INIT_ID(device), \ @@ -972,7 +971,6 @@ extern "C" { INIT_ID(groups), \ INIT_ID(h), \ INIT_ID(handle), \ - INIT_ID(handle_seq), \ INIT_ID(hash_name), \ INIT_ID(header), \ INIT_ID(headers), \ @@ -990,12 +988,9 @@ extern "C" { INIT_ID(indexgroup), \ INIT_ID(inf), \ INIT_ID(infer_variance), \ - INIT_ID(inherit_handle), \ INIT_ID(inheritable), \ INIT_ID(initial), \ INIT_ID(initial_bytes), \ - INIT_ID(initial_owner), \ - INIT_ID(initial_state), \ INIT_ID(initial_value), \ INIT_ID(initval), \ INIT_ID(inner_size), \ @@ -1051,7 +1046,6 @@ extern "C" { INIT_ID(locals), \ INIT_ID(logoption), \ INIT_ID(loop), \ - INIT_ID(manual_reset), \ INIT_ID(mapping), \ INIT_ID(match), \ INIT_ID(max_length), \ @@ -1068,7 +1062,6 @@ extern "C" { INIT_ID(metadata), \ INIT_ID(method), \ INIT_ID(microsecond), \ - INIT_ID(milliseconds), \ INIT_ID(minute), \ INIT_ID(mod), \ INIT_ID(mode), \ @@ -1078,7 +1071,6 @@ extern "C" { INIT_ID(month), \ INIT_ID(mro), \ INIT_ID(msg), \ - INIT_ID(mutex), \ INIT_ID(mycmp), \ INIT_ID(n), \ INIT_ID(n_arg), \ @@ -1182,7 +1174,6 @@ extern "C" { INIT_ID(sched_priority), \ INIT_ID(scheduler), \ INIT_ID(second), \ - INIT_ID(security_attributes), \ INIT_ID(seek), \ INIT_ID(seekable), \ INIT_ID(selectors), \ @@ -1270,7 +1261,6 @@ extern "C" { INIT_ID(values), \ INIT_ID(version), \ INIT_ID(volume), \ - INIT_ID(wait_all), \ INIT_ID(warnings), \ INIT_ID(warnoptions), \ INIT_ID(wbits), \ diff --git a/Include/internal/pycore_time.h b/Include/internal/pycore_time.h index 1aad6ccea69ae3..dabbd7b41556cd 100644 --- a/Include/internal/pycore_time.h +++ b/Include/internal/pycore_time.h @@ -1,39 +1,34 @@ -// Internal PyTime_t C API: see Doc/c-api/time.rst for the documentation. +// The _PyTime_t API is written to use timestamp and timeout values stored in +// various formats and to read clocks. // -// The PyTime_t type is an integer to support directly common arithmetic -// operations such as t1 + t2. +// The _PyTime_t type is an integer to support directly common arithmetic +// operations like t1 + t2. // -// Time formats: +// The _PyTime_t API supports a resolution of 1 nanosecond. The _PyTime_t type +// is signed to support negative timestamps. The supported range is around +// [-292.3 years; +292.3 years]. Using the Unix epoch (January 1st, 1970), the +// supported date range is around [1677-09-21; 2262-04-11]. // -// * Seconds. -// * Seconds as a floating point number (C double). -// * Milliseconds (10^-3 seconds). -// * Microseconds (10^-6 seconds). -// * 100 nanoseconds (10^-7 seconds), used on Windows. -// * Nanoseconds (10^-9 seconds). -// * timeval structure, 1 microsecond (10^-6 seconds). -// * timespec structure, 1 nanosecond (10^-9 seconds). +// Formats: // -// Note that PyTime_t is now specified as int64_t, in nanoseconds. -// (If we need to change this, we'll need new public API with new names.) -// Previously, PyTime_t was configurable (in theory); some comments and code -// might still allude to that. +// * seconds +// * seconds as a floating pointer number (C double) +// * milliseconds (10^-3 seconds) +// * microseconds (10^-6 seconds) +// * 100 nanoseconds (10^-7 seconds) +// * nanoseconds (10^-9 seconds) +// * timeval structure, 1 microsecond resolution (10^-6 seconds) +// * timespec structure, 1 nanosecond resolution (10^-9 seconds) // // Integer overflows are detected and raise OverflowError. Conversion to a -// resolution larger than 1 nanosecond is rounded correctly with the requested -// rounding mode. Available rounding modes: +// resolution worse than 1 nanosecond is rounded correctly with the requested +// rounding mode. There are 4 rounding modes: floor (towards -inf), ceiling +// (towards +inf), half even and up (away from zero). // -// * Round towards minus infinity (-inf). For example, used to read a clock. -// * Round towards infinity (+inf). For example, used for timeout to wait "at -// least" N seconds. -// * Round to nearest with ties going to nearest even integer. For example, used -// to round from a Python float. -// * Round away from zero. For example, used for timeout. -// -// Some functions clamp the result in the range [PyTime_MIN; PyTime_MAX]. The -// caller doesn't have to handle errors and so doesn't need to hold the GIL to -// handle exceptions. For example, _PyTime_Add(t1, t2) computes t1+t2 and -// clamps the result on overflow. +// Some functions clamp the result in the range [_PyTime_MIN; _PyTime_MAX], so +// the caller doesn't have to handle errors and doesn't need to hold the GIL. +// For example, _PyTime_Add(t1, t2) computes t1+t2 and clamp the result on +// overflow. // // Clocks: // @@ -41,11 +36,10 @@ // * Monotonic clock // * Performance counter // -// Internally, operations like (t * k / q) with integers are implemented in a -// way to reduce the risk of integer overflow. Such operation is used to convert a -// clock value expressed in ticks with a frequency to PyTime_t, like -// QueryPerformanceCounter() with QueryPerformanceFrequency() on Windows. - +// Operations like (t * k / q) with integers are implemented in a way to reduce +// the risk of integer overflow. Such operation is used to convert a clock +// value expressed in ticks with a frequency to _PyTime_t, like +// QueryPerformanceCounter() with QueryPerformanceFrequency(). #ifndef Py_INTERNAL_TIME_H #define Py_INTERNAL_TIME_H @@ -62,7 +56,14 @@ extern "C" { struct timeval; #endif -typedef PyTime_t _PyTime_t; +// _PyTime_t: Python timestamp with subsecond precision. It can be used to +// store a duration, and so indirectly a date (related to another date, like +// UNIX epoch). +typedef int64_t _PyTime_t; +// _PyTime_MIN nanoseconds is around -292.3 years +#define _PyTime_MIN INT64_MIN +// _PyTime_MAX nanoseconds is around +292.3 years +#define _PyTime_MAX INT64_MAX #define _SIZEOF_PYTIME_T 8 typedef enum { @@ -146,7 +147,7 @@ PyAPI_FUNC(_PyTime_t) _PyTime_FromSecondsDouble(double seconds, _PyTime_round_t PyAPI_FUNC(_PyTime_t) _PyTime_FromNanoseconds(_PyTime_t ns); // Create a timestamp from a number of microseconds. -// Clamp to [PyTime_MIN; PyTime_MAX] on overflow. +// Clamp to [_PyTime_MIN; _PyTime_MAX] on overflow. extern _PyTime_t _PyTime_FromMicrosecondsClamp(_PyTime_t us); // Create a timestamp from nanoseconds (Python int). @@ -168,6 +169,10 @@ PyAPI_FUNC(int) _PyTime_FromMillisecondsObject(_PyTime_t *t, PyObject *obj, _PyTime_round_t round); +// Convert a timestamp to a number of seconds as a C double. +// Export for '_socket' shared extension. +PyAPI_FUNC(double) _PyTime_AsSecondsDouble(_PyTime_t t); + // Convert timestamp to a number of milliseconds (10^-3 seconds). // Export for '_ssl' shared extension. PyAPI_FUNC(_PyTime_t) _PyTime_AsMilliseconds(_PyTime_t t, @@ -178,6 +183,9 @@ PyAPI_FUNC(_PyTime_t) _PyTime_AsMilliseconds(_PyTime_t t, PyAPI_FUNC(_PyTime_t) _PyTime_AsMicroseconds(_PyTime_t t, _PyTime_round_t round); +// Convert timestamp to a number of nanoseconds (10^-9 seconds). +extern _PyTime_t _PyTime_AsNanoseconds(_PyTime_t t); + #ifdef MS_WINDOWS // Convert timestamp to a number of 100 nanoseconds (10^-7 seconds). extern _PyTime_t _PyTime_As100Nanoseconds(_PyTime_t t, @@ -242,7 +250,7 @@ PyAPI_FUNC(void) _PyTime_AsTimespec_clamp(_PyTime_t t, struct timespec *ts); #endif -// Compute t1 + t2. Clamp to [PyTime_MIN; PyTime_MAX] on overflow. +// Compute t1 + t2. Clamp to [_PyTime_MIN; _PyTime_MAX] on overflow. extern _PyTime_t _PyTime_Add(_PyTime_t t1, _PyTime_t t2); // Structure used by time.get_clock_info() @@ -259,8 +267,7 @@ typedef struct { // On integer overflow, silently ignore the overflow and clamp the clock to // [_PyTime_MIN; _PyTime_MAX]. // -// Use _PyTime_GetSystemClockWithInfo or the public PyTime_Time() to check -// for failure. +// Use _PyTime_GetSystemClockWithInfo() to check for failure. // Export for '_random' shared extension. PyAPI_FUNC(_PyTime_t) _PyTime_GetSystemClock(void); @@ -280,8 +287,7 @@ extern int _PyTime_GetSystemClockWithInfo( // On integer overflow, silently ignore the overflow and clamp the clock to // [_PyTime_MIN; _PyTime_MAX]. // -// Use _PyTime_GetMonotonicClockWithInfo or the public PyTime_Monotonic() -// to check for failure. +// Use _PyTime_GetMonotonicClockWithInfo() to check for failure. // Export for '_random' shared extension. PyAPI_FUNC(_PyTime_t) _PyTime_GetMonotonicClock(void); @@ -316,12 +322,10 @@ PyAPI_FUNC(int) _PyTime_gmtime(time_t t, struct tm *tm); // On integer overflow, silently ignore the overflow and clamp the clock to // [_PyTime_MIN; _PyTime_MAX]. // -// Use _PyTime_GetPerfCounterWithInfo() or the public PyTime_PerfCounter -// to check for failure. +// Use _PyTime_GetPerfCounterWithInfo() to check for failure. // Export for '_lsprof' shared extension. PyAPI_FUNC(_PyTime_t) _PyTime_GetPerfCounter(void); - // Get the performance counter: clock with the highest available resolution to // measure a short duration. // @@ -332,13 +336,6 @@ extern int _PyTime_GetPerfCounterWithInfo( _PyTime_t *t, _Py_clock_info_t *info); -// Alias for backward compatibility -#define _PyTime_MIN PyTime_MIN -#define _PyTime_MAX PyTime_MAX -#define _PyTime_AsSecondsDouble PyTime_AsSecondsDouble - - -// --- _PyDeadline ----------------------------------------------------------- // Create a deadline. // Pseudo code: _PyTime_GetMonotonicClock() + timeout. diff --git a/Include/internal/pycore_unicodeobject_generated.h b/Include/internal/pycore_unicodeobject_generated.h index 739af0e73c23ff..c8458b4e36ccc9 100644 --- a/Include/internal/pycore_unicodeobject_generated.h +++ b/Include/internal/pycore_unicodeobject_generated.h @@ -957,9 +957,6 @@ _PyUnicode_InitStaticStrings(PyInterpreterState *interp) { string = &_Py_ID(depth); assert(_PyUnicode_CheckConsistency(string, 1)); _PyUnicode_InternInPlace(interp, &string); - string = &_Py_ID(desired_access); - assert(_PyUnicode_CheckConsistency(string, 1)); - _PyUnicode_InternInPlace(interp, &string); string = &_Py_ID(detect_types); assert(_PyUnicode_CheckConsistency(string, 1)); _PyUnicode_InternInPlace(interp, &string); @@ -1230,9 +1227,6 @@ _PyUnicode_InitStaticStrings(PyInterpreterState *interp) { string = &_Py_ID(handle); assert(_PyUnicode_CheckConsistency(string, 1)); _PyUnicode_InternInPlace(interp, &string); - string = &_Py_ID(handle_seq); - assert(_PyUnicode_CheckConsistency(string, 1)); - _PyUnicode_InternInPlace(interp, &string); string = &_Py_ID(hash_name); assert(_PyUnicode_CheckConsistency(string, 1)); _PyUnicode_InternInPlace(interp, &string); @@ -1284,9 +1278,6 @@ _PyUnicode_InitStaticStrings(PyInterpreterState *interp) { string = &_Py_ID(infer_variance); assert(_PyUnicode_CheckConsistency(string, 1)); _PyUnicode_InternInPlace(interp, &string); - string = &_Py_ID(inherit_handle); - assert(_PyUnicode_CheckConsistency(string, 1)); - _PyUnicode_InternInPlace(interp, &string); string = &_Py_ID(inheritable); assert(_PyUnicode_CheckConsistency(string, 1)); _PyUnicode_InternInPlace(interp, &string); @@ -1296,12 +1287,6 @@ _PyUnicode_InitStaticStrings(PyInterpreterState *interp) { string = &_Py_ID(initial_bytes); assert(_PyUnicode_CheckConsistency(string, 1)); _PyUnicode_InternInPlace(interp, &string); - string = &_Py_ID(initial_owner); - assert(_PyUnicode_CheckConsistency(string, 1)); - _PyUnicode_InternInPlace(interp, &string); - string = &_Py_ID(initial_state); - assert(_PyUnicode_CheckConsistency(string, 1)); - _PyUnicode_InternInPlace(interp, &string); string = &_Py_ID(initial_value); assert(_PyUnicode_CheckConsistency(string, 1)); _PyUnicode_InternInPlace(interp, &string); @@ -1467,9 +1452,6 @@ _PyUnicode_InitStaticStrings(PyInterpreterState *interp) { string = &_Py_ID(loop); assert(_PyUnicode_CheckConsistency(string, 1)); _PyUnicode_InternInPlace(interp, &string); - string = &_Py_ID(manual_reset); - assert(_PyUnicode_CheckConsistency(string, 1)); - _PyUnicode_InternInPlace(interp, &string); string = &_Py_ID(mapping); assert(_PyUnicode_CheckConsistency(string, 1)); _PyUnicode_InternInPlace(interp, &string); @@ -1518,9 +1500,6 @@ _PyUnicode_InitStaticStrings(PyInterpreterState *interp) { string = &_Py_ID(microsecond); assert(_PyUnicode_CheckConsistency(string, 1)); _PyUnicode_InternInPlace(interp, &string); - string = &_Py_ID(milliseconds); - assert(_PyUnicode_CheckConsistency(string, 1)); - _PyUnicode_InternInPlace(interp, &string); string = &_Py_ID(minute); assert(_PyUnicode_CheckConsistency(string, 1)); _PyUnicode_InternInPlace(interp, &string); @@ -1548,9 +1527,6 @@ _PyUnicode_InitStaticStrings(PyInterpreterState *interp) { string = &_Py_ID(msg); assert(_PyUnicode_CheckConsistency(string, 1)); _PyUnicode_InternInPlace(interp, &string); - string = &_Py_ID(mutex); - assert(_PyUnicode_CheckConsistency(string, 1)); - _PyUnicode_InternInPlace(interp, &string); string = &_Py_ID(mycmp); assert(_PyUnicode_CheckConsistency(string, 1)); _PyUnicode_InternInPlace(interp, &string); @@ -1860,9 +1836,6 @@ _PyUnicode_InitStaticStrings(PyInterpreterState *interp) { string = &_Py_ID(second); assert(_PyUnicode_CheckConsistency(string, 1)); _PyUnicode_InternInPlace(interp, &string); - string = &_Py_ID(security_attributes); - assert(_PyUnicode_CheckConsistency(string, 1)); - _PyUnicode_InternInPlace(interp, &string); string = &_Py_ID(seek); assert(_PyUnicode_CheckConsistency(string, 1)); _PyUnicode_InternInPlace(interp, &string); @@ -2124,9 +2097,6 @@ _PyUnicode_InitStaticStrings(PyInterpreterState *interp) { string = &_Py_ID(volume); assert(_PyUnicode_CheckConsistency(string, 1)); _PyUnicode_InternInPlace(interp, &string); - string = &_Py_ID(wait_all); - assert(_PyUnicode_CheckConsistency(string, 1)); - _PyUnicode_InternInPlace(interp, &string); string = &_Py_ID(warnings); assert(_PyUnicode_CheckConsistency(string, 1)); _PyUnicode_InternInPlace(interp, &string); diff --git a/Include/internal/pycore_uop_ids.h b/Include/internal/pycore_uop_ids.h index 9bb537d355055d..b2476e1c6e5c4b 100644 --- a/Include/internal/pycore_uop_ids.h +++ b/Include/internal/pycore_uop_ids.h @@ -237,8 +237,7 @@ extern "C" { #define _CHECK_GLOBALS 384 #define _CHECK_BUILTINS 385 #define _INTERNAL_INCREMENT_OPT_COUNTER 386 -#define _CHECK_VALIDITY_AND_SET_IP 387 -#define MAX_UOP_ID 387 +#define MAX_UOP_ID 386 #ifdef __cplusplus } diff --git a/Include/internal/pycore_uop_metadata.h b/Include/internal/pycore_uop_metadata.h index 163a0320aa2298..2b5b37e6b8d6a4 100644 --- a/Include/internal/pycore_uop_metadata.h +++ b/Include/internal/pycore_uop_metadata.h @@ -16,7 +16,7 @@ extern const char * const _PyOpcode_uop_name[MAX_UOP_ID+1]; #ifdef NEED_OPCODE_METADATA const uint16_t _PyUop_Flags[MAX_UOP_ID+1] = { - [_NOP] = HAS_PURE_FLAG, + [_NOP] = 0, [_RESUME_CHECK] = HAS_DEOPT_FLAG, [_LOAD_FAST_CHECK] = HAS_ARG_FLAG | HAS_LOCAL_FLAG | HAS_ERROR_FLAG, [_LOAD_FAST] = HAS_ARG_FLAG | HAS_LOCAL_FLAG | HAS_PURE_FLAG, @@ -198,18 +198,17 @@ const uint16_t _PyUop_Flags[MAX_UOP_ID+1] = { [_GUARD_IS_NONE_POP] = HAS_DEOPT_FLAG, [_GUARD_IS_NOT_NONE_POP] = HAS_DEOPT_FLAG, [_JUMP_TO_TOP] = HAS_EVAL_BREAK_FLAG, - [_SET_IP] = 0, + [_SET_IP] = HAS_ARG_FLAG | HAS_ESCAPES_FLAG, [_SAVE_RETURN_OFFSET] = HAS_ARG_FLAG, [_EXIT_TRACE] = HAS_DEOPT_FLAG, [_CHECK_VALIDITY] = HAS_DEOPT_FLAG, - [_LOAD_CONST_INLINE] = HAS_PURE_FLAG, - [_LOAD_CONST_INLINE_BORROW] = HAS_PURE_FLAG, - [_LOAD_CONST_INLINE_WITH_NULL] = HAS_PURE_FLAG, - [_LOAD_CONST_INLINE_BORROW_WITH_NULL] = HAS_PURE_FLAG, + [_LOAD_CONST_INLINE] = 0, + [_LOAD_CONST_INLINE_BORROW] = 0, + [_LOAD_CONST_INLINE_WITH_NULL] = 0, + [_LOAD_CONST_INLINE_BORROW_WITH_NULL] = 0, [_CHECK_GLOBALS] = HAS_DEOPT_FLAG, [_CHECK_BUILTINS] = HAS_DEOPT_FLAG, [_INTERNAL_INCREMENT_OPT_COUNTER] = 0, - [_CHECK_VALIDITY_AND_SET_IP] = HAS_DEOPT_FLAG, }; const char *const _PyOpcode_uop_name[MAX_UOP_ID+1] = { @@ -265,7 +264,6 @@ const char *const _PyOpcode_uop_name[MAX_UOP_ID+1] = { [_CHECK_PEP_523] = "_CHECK_PEP_523", [_CHECK_STACK_SPACE] = "_CHECK_STACK_SPACE", [_CHECK_VALIDITY] = "_CHECK_VALIDITY", - [_CHECK_VALIDITY_AND_SET_IP] = "_CHECK_VALIDITY_AND_SET_IP", [_COMPARE_OP] = "_COMPARE_OP", [_COMPARE_OP_FLOAT] = "_COMPARE_OP_FLOAT", [_COMPARE_OP_INT] = "_COMPARE_OP_INT", diff --git a/Lib/logging/config.py b/Lib/logging/config.py index ea37dd7544564a..de06090942d965 100644 --- a/Lib/logging/config.py +++ b/Lib/logging/config.py @@ -667,9 +667,10 @@ def configure_formatter(self, config): except TypeError as te: if "'format'" not in str(te): raise - # logging.Formatter and its subclasses expect the `fmt` - # parameter instead of `format`. Retry passing configuration - # with `fmt`. + #Name of parameter changed from fmt to format. + #Retry with old name. + #This is so that code can be used with older Python versions + #(e.g. by Django) config['fmt'] = config.pop('format') config['()'] = factory result = self.configure_custom(config) diff --git a/Lib/multiprocessing/connection.py b/Lib/multiprocessing/connection.py index 58d697fdecacc0..c6a66a1bc963c3 100644 --- a/Lib/multiprocessing/connection.py +++ b/Lib/multiprocessing/connection.py @@ -1011,20 +1011,8 @@ def _exhaustive_wait(handles, timeout): # returning the first signalled might create starvation issues.) L = list(handles) ready = [] - # Windows limits WaitForMultipleObjects at 64 handles, and we use a - # few for synchronisation, so we switch to batched waits at 60. - if len(L) > 60: - try: - res = _winapi.BatchedWaitForMultipleObjects(L, False, timeout) - except TimeoutError: - return [] - ready.extend(L[i] for i in res) - if res: - L = [h for i, h in enumerate(L) if i > res[0] & i not in res] - timeout = 0 while L: - short_L = L[:60] if len(L) > 60 else L - res = _winapi.WaitForMultipleObjects(short_L, False, timeout) + res = _winapi.WaitForMultipleObjects(L, False, timeout) if res == WAIT_TIMEOUT: break elif WAIT_OBJECT_0 <= res < WAIT_OBJECT_0 + len(L): diff --git a/Lib/pydoc.py b/Lib/pydoc.py index 6d145abda9d4ab..17f7346e5cc619 100755 --- a/Lib/pydoc.py +++ b/Lib/pydoc.py @@ -552,7 +552,7 @@ def getdocloc(self, object, basedir=sysconfig.get_path('stdlib')): '_thread', 'zipimport') or (file.startswith(basedir) and not file.startswith(os.path.join(basedir, 'site-packages')))) and - object.__name__ not in ('xml.etree', 'test.test_pydoc.pydoc_mod')): + object.__name__ not in ('xml.etree', 'test.pydoc_mod')): if docloc.startswith(("http://", "https://")): docloc = "{}/{}.html".format(docloc.rstrip("/"), object.__name__.lower()) else: diff --git a/Lib/test/_test_multiprocessing.py b/Lib/test/_test_multiprocessing.py index 94ce85cac754ae..c0d3ca50f17d69 100644 --- a/Lib/test/_test_multiprocessing.py +++ b/Lib/test/_test_multiprocessing.py @@ -6113,24 +6113,6 @@ def test_spawn_sys_executable_none_allows_import(self): self.assertEqual(rc, 0) self.assertFalse(err, msg=err.decode('utf-8')) - def test_large_pool(self): - # - # gh-89240: Check that large pools are always okay - # - testfn = os_helper.TESTFN - self.addCleanup(os_helper.unlink, testfn) - with open(testfn, 'w', encoding='utf-8') as f: - f.write(textwrap.dedent('''\ - import multiprocessing - def f(x): return x*x - if __name__ == '__main__': - with multiprocessing.Pool(200) as p: - print(sum(p.map(f, range(1000)))) - ''')) - rc, out, err = script_helper.assert_python_ok(testfn) - self.assertEqual("332833500", out.decode('utf-8').strip()) - self.assertFalse(err, msg=err.decode('utf-8')) - # # Mixins diff --git a/Lib/test/libregrtest/findtests.py b/Lib/test/libregrtest/findtests.py index 4ac95e23a56b8f..ee890b5b1db4cd 100644 --- a/Lib/test/libregrtest/findtests.py +++ b/Lib/test/libregrtest/findtests.py @@ -23,7 +23,6 @@ "test_future_stmt", "test_gdb", "test_inspect", - "test_pydoc", "test_multiprocessing_fork", "test_multiprocessing_forkserver", "test_multiprocessing_spawn", diff --git a/Lib/test/test_pydoc/pydoc_mod.py b/Lib/test/pydoc_mod.py similarity index 100% rename from Lib/test/test_pydoc/pydoc_mod.py rename to Lib/test/pydoc_mod.py diff --git a/Lib/test/test_pydoc/pydocfodder.py b/Lib/test/pydocfodder.py similarity index 100% rename from Lib/test/test_pydoc/pydocfodder.py rename to Lib/test/pydocfodder.py diff --git a/Lib/test/test_capi/test_long.py b/Lib/test/test_capi/test_long.py index fc82cbfa66ea7a..8e3ef25d1ff86f 100644 --- a/Lib/test/test_capi/test_long.py +++ b/Lib/test/test_capi/test_long.py @@ -1,6 +1,5 @@ import unittest import sys -import test.support as support from test.support import import_helper @@ -424,150 +423,6 @@ def test_long_asvoidptr(self): self.assertRaises(OverflowError, asvoidptr, -2**1000) # CRASHES asvoidptr(NULL) - def test_long_asnativebytes(self): - import math - from _testcapi import ( - pylong_asnativebytes as asnativebytes, - SIZE_MAX, - ) - - # Abbreviate sizeof(Py_ssize_t) to SZ because we use it a lot - SZ = int(math.ceil(math.log(SIZE_MAX + 1) / math.log(2)) / 8) - MAX_SSIZE = 2 ** (SZ * 8 - 1) - 1 - MAX_USIZE = 2 ** (SZ * 8) - 1 - if support.verbose: - print(f"SIZEOF_SIZE={SZ}\n{MAX_SSIZE=:016X}\n{MAX_USIZE=:016X}") - - # These tests check that the requested buffer size is correct - for v, expect in [ - (0, SZ), - (512, SZ), - (-512, SZ), - (MAX_SSIZE, SZ), - (MAX_USIZE, SZ + 1), - (-MAX_SSIZE, SZ), - (-MAX_USIZE, SZ + 1), - (2**255-1, 32), - (-(2**255-1), 32), - (2**256-1, 33), - (-(2**256-1), 33), - ]: - with self.subTest(f"sizeof-{v:X}"): - buffer = bytearray(1) - self.assertEqual(expect, asnativebytes(v, buffer, 0, -1), - "PyLong_AsNativeBytes(v, NULL, 0, -1)") - # Also check via the __index__ path - self.assertEqual(expect, asnativebytes(Index(v), buffer, 0, -1), - "PyLong_AsNativeBytes(Index(v), NULL, 0, -1)") - - # We request as many bytes as `expect_be` contains, and always check - # the result (both big and little endian). We check the return value - # independently, since the buffer should always be filled correctly even - # if we need more bytes - for v, expect_be, expect_n in [ - (0, b'\x00', 1), - (0, b'\x00' * 2, 2), - (0, b'\x00' * 8, min(8, SZ)), - (1, b'\x01', 1), - (1, b'\x00' * 10 + b'\x01', min(11, SZ)), - (42, b'\x2a', 1), - (42, b'\x00' * 10 + b'\x2a', min(11, SZ)), - (-1, b'\xff', 1), - (-1, b'\xff' * 10, min(11, SZ)), - (-42, b'\xd6', 1), - (-42, b'\xff' * 10 + b'\xd6', min(11, SZ)), - # Extracts 255 into a single byte, but requests sizeof(Py_ssize_t) - (255, b'\xff', SZ), - (255, b'\x00\xff', 2), - (256, b'\x01\x00', 2), - # Extracts successfully (unsigned), but requests 9 bytes - (2**63, b'\x80' + b'\x00' * 7, 9), - # "Extracts", but requests 9 bytes - (-2**63, b'\x80' + b'\x00' * 7, 9), - (2**63, b'\x00\x80' + b'\x00' * 7, 9), - (-2**63, b'\xff\x80' + b'\x00' * 7, 9), - - (2**255-1, b'\x7f' + b'\xff' * 31, 32), - (-(2**255-1), b'\x80' + b'\x00' * 30 + b'\x01', 32), - # Request extra bytes, but result says we only needed 32 - (-(2**255-1), b'\xff\x80' + b'\x00' * 30 + b'\x01', 32), - (-(2**255-1), b'\xff\xff\x80' + b'\x00' * 30 + b'\x01', 32), - - # Extracting 256 bits of integer will request 33 bytes, but still - # copy as many bits as possible into the buffer. So we *can* copy - # into a 32-byte buffer, though negative number may be unrecoverable - (2**256-1, b'\xff' * 32, 33), - (2**256-1, b'\x00' + b'\xff' * 32, 33), - (-(2**256-1), b'\x00' * 31 + b'\x01', 33), - (-(2**256-1), b'\xff' + b'\x00' * 31 + b'\x01', 33), - (-(2**256-1), b'\xff\xff' + b'\x00' * 31 + b'\x01', 33), - - # The classic "Windows HRESULT as negative number" case - # HRESULT hr; - # PyLong_CopyBits(<-2147467259>, &hr, sizeof(HRESULT)) - # assert(hr == E_FAIL) - (-2147467259, b'\x80\x00\x40\x05', 4), - ]: - with self.subTest(f"{v:X}-{len(expect_be)}bytes"): - n = len(expect_be) - buffer = bytearray(n) - expect_le = expect_be[::-1] - - self.assertEqual(expect_n, asnativebytes(v, buffer, n, 0), - f"PyLong_AsNativeBytes(v, buffer, {n}, )") - self.assertEqual(expect_be, buffer[:n], "") - self.assertEqual(expect_n, asnativebytes(v, buffer, n, 1), - f"PyLong_AsNativeBytes(v, buffer, {n}, )") - self.assertEqual(expect_le, buffer[:n], "") - - # Check a few error conditions. These are validated in code, but are - # unspecified in docs, so if we make changes to the implementation, it's - # fine to just update these tests rather than preserve the behaviour. - with self.assertRaises(SystemError): - asnativebytes(1, buffer, 0, 2) - with self.assertRaises(TypeError): - asnativebytes('not a number', buffer, 0, -1) - - def test_long_fromnativebytes(self): - import math - from _testcapi import ( - pylong_fromnativebytes as fromnativebytes, - SIZE_MAX, - ) - - # Abbreviate sizeof(Py_ssize_t) to SZ because we use it a lot - SZ = int(math.ceil(math.log(SIZE_MAX + 1) / math.log(2)) / 8) - MAX_SSIZE = 2 ** (SZ * 8 - 1) - 1 - MAX_USIZE = 2 ** (SZ * 8) - 1 - - for v_be, expect_s, expect_u in [ - (b'\x00', 0, 0), - (b'\x01', 1, 1), - (b'\xff', -1, 255), - (b'\x00\xff', 255, 255), - (b'\xff\xff', -1, 65535), - ]: - with self.subTest(f"{expect_s}-{expect_u:X}-{len(v_be)}bytes"): - n = len(v_be) - v_le = v_be[::-1] - - self.assertEqual(expect_s, fromnativebytes(v_be, n, 0, 1), - f"PyLong_FromNativeBytes(buffer, {n}, )") - self.assertEqual(expect_s, fromnativebytes(v_le, n, 1, 1), - f"PyLong_FromNativeBytes(buffer, {n}, )") - self.assertEqual(expect_u, fromnativebytes(v_be, n, 0, 0), - f"PyLong_FromUnsignedNativeBytes(buffer, {n}, )") - self.assertEqual(expect_u, fromnativebytes(v_le, n, 1, 0), - f"PyLong_FromUnsignedNativeBytes(buffer, {n}, )") - - # Check native endian when the result would be the same either - # way and we can test it. - if v_be == v_le: - self.assertEqual(expect_s, fromnativebytes(v_be, n, -1, 1), - f"PyLong_FromNativeBytes(buffer, {n}, )") - self.assertEqual(expect_u, fromnativebytes(v_be, n, -1, 0), - f"PyLong_FromUnsignedNativeBytes(buffer, {n}, )") - if __name__ == "__main__": unittest.main() diff --git a/Lib/test/test_capi/test_opt.py b/Lib/test/test_capi/test_opt.py index b64aed10d2d653..e6b1b554c9af10 100644 --- a/Lib/test/test_capi/test_opt.py +++ b/Lib/test/test_capi/test_opt.py @@ -3,7 +3,6 @@ import sys import textwrap import unittest -import gc import _testinternalcapi @@ -557,214 +556,6 @@ def testfunc(n): # too much already. self.assertEqual(count, 1) -class TestUopsOptimization(unittest.TestCase): - - def test_int_type_propagation(self): - def testfunc(loops): - num = 0 - while num < loops: - x = num + num - a = x + 1 - num += 1 - return a - - opt = _testinternalcapi.get_uop_optimizer() - res = None - with temporary_optimizer(opt): - res = testfunc(32) - - ex = get_first_executor(testfunc) - self.assertIsNotNone(ex) - self.assertEqual(res, 63) - binop_count = [opname for opname, _, _ in ex if opname == "_BINARY_OP_ADD_INT"] - guard_both_int_count = [opname for opname, _, _ in ex if opname == "_GUARD_BOTH_INT"] - self.assertGreaterEqual(len(binop_count), 3) - self.assertLessEqual(len(guard_both_int_count), 1) - - def test_int_type_propagation_through_frame(self): - def double(x): - return x + x - def testfunc(loops): - num = 0 - while num < loops: - x = num + num - a = double(x) - num += 1 - return a - - opt = _testinternalcapi.get_uop_optimizer() - res = None - with temporary_optimizer(opt): - res = testfunc(32) - - ex = get_first_executor(testfunc) - self.assertIsNotNone(ex) - self.assertEqual(res, 124) - binop_count = [opname for opname, _, _ in ex if opname == "_BINARY_OP_ADD_INT"] - guard_both_int_count = [opname for opname, _, _ in ex if opname == "_GUARD_BOTH_INT"] - self.assertGreaterEqual(len(binop_count), 3) - self.assertLessEqual(len(guard_both_int_count), 1) - - def test_int_type_propagation_from_frame(self): - def double(x): - return x + x - def testfunc(loops): - num = 0 - while num < loops: - a = double(num) - x = a + a - num += 1 - return x - - opt = _testinternalcapi.get_uop_optimizer() - res = None - with temporary_optimizer(opt): - res = testfunc(32) - - ex = get_first_executor(testfunc) - self.assertIsNotNone(ex) - self.assertEqual(res, 124) - binop_count = [opname for opname, _, _ in ex if opname == "_BINARY_OP_ADD_INT"] - guard_both_int_count = [opname for opname, _, _ in ex if opname == "_GUARD_BOTH_INT"] - self.assertGreaterEqual(len(binop_count), 3) - self.assertLessEqual(len(guard_both_int_count), 1) - - def test_int_impure_region(self): - def testfunc(loops): - num = 0 - while num < loops: - x = num + num - y = 1 - x // 2 - a = x + y - num += 1 - return a - - opt = _testinternalcapi.get_uop_optimizer() - res = None - with temporary_optimizer(opt): - res = testfunc(64) - - ex = get_first_executor(testfunc) - self.assertIsNotNone(ex) - binop_count = [opname for opname, _, _ in ex if opname == "_BINARY_OP_ADD_INT"] - self.assertGreaterEqual(len(binop_count), 3) - - def test_call_py_exact_args(self): - def testfunc(n): - def dummy(x): - return x+1 - for i in range(n): - dummy(i) - - opt = _testinternalcapi.get_uop_optimizer() - with temporary_optimizer(opt): - testfunc(20) - - ex = get_first_executor(testfunc) - self.assertIsNotNone(ex) - uops = {opname for opname, _, _ in ex} - self.assertIn("_PUSH_FRAME", uops) - self.assertIn("_BINARY_OP_ADD_INT", uops) - self.assertNotIn("_CHECK_PEP_523", uops) - - def test_int_type_propagate_through_range(self): - def testfunc(n): - - for i in range(n): - x = i + i - return x - - opt = _testinternalcapi.get_uop_optimizer() - with temporary_optimizer(opt): - res = testfunc(20) - - ex = get_first_executor(testfunc) - self.assertEqual(res, 19 * 2) - self.assertIsNotNone(ex) - uops = {opname for opname, _, _ in ex} - self.assertNotIn("_GUARD_BOTH_INT", uops) - - def test_int_value_numbering(self): - def testfunc(n): - - y = 1 - for i in range(n): - x = y - z = x - a = z - b = a - res = x + z + a + b - return res - - opt = _testinternalcapi.get_uop_optimizer() - with temporary_optimizer(opt): - res = testfunc(20) - - ex = get_first_executor(testfunc) - self.assertEqual(res, 4) - self.assertIsNotNone(ex) - uops = {opname for opname, _, _ in ex} - self.assertIn("_GUARD_BOTH_INT", uops) - guard_count = [opname for opname, _, _ in ex if opname == "_GUARD_BOTH_INT"] - self.assertEqual(len(guard_count), 1) - - def test_comprehension(self): - def testfunc(n): - for _ in range(n): - return [i for i in range(n)] - - opt = _testinternalcapi.get_uop_optimizer() - with temporary_optimizer(opt): - testfunc(20) - - ex = get_first_executor(testfunc) - self.assertIsNotNone(ex) - uops = {opname for opname, _, _ in ex} - self.assertNotIn("_BINARY_OP_ADD_INT", uops) - - def test_call_py_exact_args_disappearing(self): - def dummy(x): - return x+1 - - def testfunc(n): - for i in range(n): - dummy(i) - - opt = _testinternalcapi.get_uop_optimizer() - # Trigger specialization - testfunc(8) - with temporary_optimizer(opt): - del dummy - gc.collect() - - def dummy(x): - return x + 2 - testfunc(10) - - ex = get_first_executor(testfunc) - # Honestly as long as it doesn't crash it's fine. - # Whether we get an executor or not is non-deterministic, - # because it's decided by when the function is freed. - # This test is a little implementation specific. - - def test_promote_globals_to_constants(self): - def testfunc(n): - for i in range(n): - x = range(i) - return x - - opt = _testinternalcapi.get_uop_optimizer() - with temporary_optimizer(opt): - testfunc(20) - - ex = get_first_executor(testfunc) - self.assertIsNotNone(ex) - uops = {opname for opname, _, _ in ex} - self.assertNotIn("_LOAD_GLOBAL_BUILTIN", uops) - self.assertIn("_LOAD_CONST_INLINE_BORROW_WITH_NULL", uops) - - if __name__ == "__main__": unittest.main() diff --git a/Lib/test/test_capi/test_time.py b/Lib/test/test_capi/test_time.py deleted file mode 100644 index 10b7fbf2c372a3..00000000000000 --- a/Lib/test/test_capi/test_time.py +++ /dev/null @@ -1,71 +0,0 @@ -import time -import unittest -from test.support import import_helper -_testcapi = import_helper.import_module('_testcapi') - - -PyTime_MIN = _testcapi.PyTime_MIN -PyTime_MAX = _testcapi.PyTime_MAX -SEC_TO_NS = 10 ** 9 -DAY_TO_SEC = (24 * 60 * 60) -# Worst clock resolution: maximum delta between two clock reads. -CLOCK_RES = 0.050 - - -class CAPITest(unittest.TestCase): - def test_min_max(self): - # PyTime_t is just int64_t - self.assertEqual(PyTime_MIN, -2**63) - self.assertEqual(PyTime_MAX, 2**63 - 1) - - def check_clock(self, c_func, py_func): - t1 = c_func() - t2 = py_func() - self.assertAlmostEqual(t1, t2, delta=CLOCK_RES) - - def test_assecondsdouble(self): - # Test PyTime_AsSecondsDouble() - def ns_to_sec(ns): - if abs(ns) % SEC_TO_NS == 0: - return float(ns // SEC_TO_NS) - else: - return float(ns) / SEC_TO_NS - - seconds = ( - 0, - 1, - DAY_TO_SEC, - 365 * DAY_TO_SEC, - ) - values = { - PyTime_MIN, - PyTime_MIN + 1, - PyTime_MAX - 1, - PyTime_MAX, - } - for second in seconds: - ns = second * SEC_TO_NS - values.add(ns) - # test nanosecond before/after to test rounding - values.add(ns - 1) - values.add(ns + 1) - for ns in list(values): - if (-ns) > PyTime_MAX: - continue - values.add(-ns) - for ns in sorted(values): - with self.subTest(ns=ns): - self.assertEqual(_testcapi.PyTime_AsSecondsDouble(ns), - ns_to_sec(ns)) - - def test_monotonic(self): - # Test PyTime_Monotonic() - self.check_clock(_testcapi.PyTime_Monotonic, time.monotonic) - - def test_perf_counter(self): - # Test PyTime_PerfCounter() - self.check_clock(_testcapi.PyTime_PerfCounter, time.perf_counter) - - def test_time(self): - # Test PyTime_time() - self.check_clock(_testcapi.PyTime_Time, time.time) diff --git a/Lib/test/test_dataclasses/__init__.py b/Lib/test/test_dataclasses/__init__.py index ede74b0dd15ccf..272d427875ae40 100644 --- a/Lib/test/test_dataclasses/__init__.py +++ b/Lib/test/test_dataclasses/__init__.py @@ -22,8 +22,6 @@ import typing # Needed for the string "typing.ClassVar[int]" to work as an annotation. import dataclasses # Needed for the string "dataclasses.InitVar[int]" to work as an annotation. -from test import support - # Just any custom exception we can catch. class CustomError(Exception): pass @@ -2218,7 +2216,6 @@ def assertDocStrEqual(self, a, b): # whitespace stripped. self.assertEqual(a.replace(' ', ''), b.replace(' ', '')) - @support.requires_docstrings def test_existing_docstring_not_overridden(self): @dataclass class C: diff --git a/Lib/test/test_enum.py b/Lib/test/test_enum.py index 61060f3dc29fd4..5d7dae8829574b 100644 --- a/Lib/test/test_enum.py +++ b/Lib/test/test_enum.py @@ -4905,15 +4905,15 @@ class Color(enum.Enum) | value | | ---------------------------------------------------------------------- - | Static methods inherited from enum.EnumType: + | Methods inherited from enum.EnumType: | - | __contains__(value) + | __contains__(value) from enum.EnumType | - | __getitem__(name) + | __getitem__(name) from enum.EnumType | - | __iter__() + | __iter__() from enum.EnumType | - | __len__() + | __len__() from enum.EnumType | | ---------------------------------------------------------------------- | Readonly properties inherited from enum.EnumType: diff --git a/Lib/test/test_generated_cases.py b/Lib/test/test_generated_cases.py index a7ad6c7320b4ee..ca1228ee7008a9 100644 --- a/Lib/test/test_generated_cases.py +++ b/Lib/test/test_generated_cases.py @@ -33,7 +33,6 @@ def skip_if_different_mount_drives(): import parser from stack import Stack import tier1_generator - import tier2_abstract_generator def handle_stderr(): @@ -794,157 +793,5 @@ def test_annotated_op(self): self.run_cases_test(input, output) -class TestGeneratedAbstractCases(unittest.TestCase): - def setUp(self) -> None: - super().setUp() - self.maxDiff = None - - self.temp_dir = tempfile.gettempdir() - self.temp_input_filename = os.path.join(self.temp_dir, "input.txt") - self.temp_input2_filename = os.path.join(self.temp_dir, "input2.txt") - self.temp_output_filename = os.path.join(self.temp_dir, "output.txt") - - def tearDown(self) -> None: - for filename in [ - self.temp_input_filename, - self.temp_input2_filename, - self.temp_output_filename, - ]: - try: - os.remove(filename) - except: - pass - super().tearDown() - - def run_cases_test(self, input: str, input2: str, expected: str): - with open(self.temp_input_filename, "w+") as temp_input: - temp_input.write(parser.BEGIN_MARKER) - temp_input.write(input) - temp_input.write(parser.END_MARKER) - temp_input.flush() - - with open(self.temp_input2_filename, "w+") as temp_input: - temp_input.write(parser.BEGIN_MARKER) - temp_input.write(input2) - temp_input.write(parser.END_MARKER) - temp_input.flush() - - with handle_stderr(): - tier2_abstract_generator.generate_tier2_abstract_from_files( - [self.temp_input_filename, self.temp_input2_filename], - self.temp_output_filename - ) - - with open(self.temp_output_filename) as temp_output: - lines = temp_output.readlines() - while lines and lines[0].startswith(("// ", "#", " #", "\n")): - lines.pop(0) - while lines and lines[-1].startswith(("#", "\n")): - lines.pop(-1) - actual = "".join(lines) - self.assertEqual(actual.strip(), expected.strip()) - - def test_overridden_abstract(self): - input = """ - pure op(OP, (--)) { - spam(); - } - """ - input2 = """ - pure op(OP, (--)) { - eggs(); - } - """ - output = """ - case OP: { - eggs(); - break; - } - """ - self.run_cases_test(input, input2, output) - - def test_overridden_abstract_args(self): - input = """ - pure op(OP, (arg1 -- out)) { - spam(); - } - op(OP2, (arg1 -- out)) { - eggs(); - } - """ - input2 = """ - op(OP, (arg1 -- out)) { - eggs(); - } - """ - output = """ - case OP: { - _Py_UOpsSymType *arg1; - _Py_UOpsSymType *out; - arg1 = stack_pointer[-1]; - eggs(); - stack_pointer[-1] = out; - break; - } - - case OP2: { - _Py_UOpsSymType *out; - out = sym_new_unknown(ctx); - if (out == NULL) goto out_of_space; - stack_pointer[-1] = out; - break; - } - """ - self.run_cases_test(input, input2, output) - - def test_no_overridden_case(self): - input = """ - pure op(OP, (arg1 -- out)) { - spam(); - } - - pure op(OP2, (arg1 -- out)) { - } - - """ - input2 = """ - pure op(OP2, (arg1 -- out)) { - } - """ - output = """ - case OP: { - _Py_UOpsSymType *out; - out = sym_new_unknown(ctx); - if (out == NULL) goto out_of_space; - stack_pointer[-1] = out; - break; - } - - case OP2: { - _Py_UOpsSymType *arg1; - _Py_UOpsSymType *out; - arg1 = stack_pointer[-1]; - stack_pointer[-1] = out; - break; - } - """ - self.run_cases_test(input, input2, output) - - def test_missing_override_failure(self): - input = """ - pure op(OP, (arg1 -- out)) { - spam(); - } - """ - input2 = """ - pure op(OTHER, (arg1 -- out)) { - } - """ - output = """ - """ - with self.assertRaisesRegex(AssertionError, "All abstract uops"): - self.run_cases_test(input, input2, output) - - if __name__ == "__main__": unittest.main() diff --git a/Lib/test/test_pydoc/test_pydoc.py b/Lib/test/test_pydoc.py similarity index 98% rename from Lib/test/test_pydoc/test_pydoc.py rename to Lib/test/test_pydoc.py index 0dd24e6d347364..f3c26624c624f5 100644 --- a/Lib/test/test_pydoc/test_pydoc.py +++ b/Lib/test/test_pydoc.py @@ -34,8 +34,8 @@ captured_stderr, is_emscripten, is_wasi, requires_docstrings, MISSING_C_DOCSTRINGS) from test.support.os_helper import (TESTFN, rmtree, unlink) -from test.test_pydoc import pydoc_mod -from test.test_pydoc import pydocfodder +from test import pydoc_mod +from test import pydocfodder class nonascii: @@ -52,7 +52,7 @@ class nonascii: expected_text_pattern = """ NAME - test.test_pydoc.pydoc_mod - This is a test module for test_pydoc + test.pydoc_mod - This is a test module for test_pydoc %s CLASSES builtins.object @@ -125,7 +125,7 @@ class C(builtins.object) DATA __xyz__ = 'X, Y and Z' - c_alias = test.test_pydoc.pydoc_mod.C[int] + c_alias = test.pydoc_mod.C[int] list_alias1 = typing.List[int] list_alias2 = list[int] type_union1 = typing.Union[int, str] @@ -148,7 +148,7 @@ class C(builtins.object) for s in expected_data_docstrings) html2text_of_expected = """ -test.test_pydoc.pydoc_mod (version 1.2.3.4) +test.pydoc_mod (version 1.2.3.4) This is a test module for test_pydoc Modules @@ -213,7 +213,7 @@ class C(builtins.object) Data __xyz__ = 'X, Y and Z' - c_alias = test.test_pydoc.pydoc_mod.C[int] + c_alias = test.pydoc_mod.C[int] list_alias1 = typing.List[int] list_alias2 = list[int] type_union1 = typing.Union[int, str] @@ -342,7 +342,7 @@ def get_pydoc_link(module): "Returns a documentation web link of a module" abspath = os.path.abspath dirname = os.path.dirname - basedir = dirname(dirname(dirname(abspath(__file__)))) + basedir = dirname(dirname(abspath(__file__))) doc = pydoc.TextDoc() loc = doc.getdocloc(module, basedir=basedir) return loc @@ -489,7 +489,7 @@ def test_not_here(self): @requires_docstrings def test_not_ascii(self): - result = run_pydoc('test.test_pydoc.test_pydoc.nonascii', PYTHONIOENCODING='ascii') + result = run_pydoc('test.test_pydoc.nonascii', PYTHONIOENCODING='ascii') encoded = nonascii.__doc__.encode('ascii', 'backslashreplace') self.assertIn(encoded, result) @@ -669,9 +669,9 @@ def test_help_output_redirect(self): buf = StringIO() helper = pydoc.Helper(output=buf) unused, doc_loc = get_pydoc_text(pydoc_mod) - module = "test.test_pydoc.pydoc_mod" + module = "test.pydoc_mod" help_header = """ - Help on module test.test_pydoc.pydoc_mod in test.test_pydoc: + Help on module test.pydoc_mod in test: """.lstrip() help_header = textwrap.dedent(help_header) @@ -1142,6 +1142,7 @@ class TestDescriptions(unittest.TestCase): def test_module(self): # Check that pydocfodder module can be described + from test import pydocfodder doc = pydoc.render_doc(pydocfodder) self.assertIn("pydocfodder", doc) @@ -1424,10 +1425,10 @@ def smeth(*args, **kwargs): self.assertEqual(self._get_summary_line(C.meth), "meth" + unbound) self.assertEqual(self._get_summary_line(C().meth), - "meth" + bound + " method of test.test_pydoc.test_pydoc.C instance") + "meth" + bound + " method of test.test_pydoc.C instance") C.cmeth.__func__.__text_signature__ = text_signature self.assertEqual(self._get_summary_line(C.cmeth), - "cmeth" + bound + " class method of test.test_pydoc.test_pydoc.C") + "cmeth" + bound + " class method of test.test_pydoc.C") C.smeth.__text_signature__ = text_signature self.assertEqual(self._get_summary_line(C.smeth), "smeth" + unbound) @@ -1464,7 +1465,7 @@ def cm(cls, x): 'cm(...)\n' ' A class method\n') self.assertEqual(self._get_summary_lines(X.cm), """\ -cm(x) class method of test.test_pydoc.test_pydoc.X +cm(x) class method of test.test_pydoc.X A class method """) self.assertIn(""" @@ -1646,19 +1647,19 @@ def test_text_doc_routines_in_class(self, cls=pydocfodder.B): lines = self.getsection(result, f' | Methods {where}:', ' | ' + '-'*70) self.assertIn(' | A_method_alias = A_method(self)', lines) self.assertIn(' | B_method_alias = B_method(self)', lines) - self.assertIn(' | A_staticmethod(x, y) from test.test_pydoc.pydocfodder.A', lines) + self.assertIn(' | A_staticmethod(x, y) from test.pydocfodder.A', lines) self.assertIn(' | A_staticmethod_alias = A_staticmethod(x, y)', lines) - self.assertIn(' | global_func(x, y) from test.test_pydoc.pydocfodder', lines) + self.assertIn(' | global_func(x, y) from test.pydocfodder', lines) self.assertIn(' | global_func_alias = global_func(x, y)', lines) - self.assertIn(' | global_func2_alias = global_func2(x, y) from test.test_pydoc.pydocfodder', lines) + self.assertIn(' | global_func2_alias = global_func2(x, y) from test.pydocfodder', lines) self.assertIn(' | __repr__(self, /) from builtins.object', lines) self.assertIn(' | object_repr = __repr__(self, /)', lines) lines = self.getsection(result, f' | Static methods {where}:', ' | ' + '-'*70) - self.assertIn(' | A_classmethod_ref = A_classmethod(x) class method of test.test_pydoc.pydocfodder.A', lines) - note = '' if cls is pydocfodder.B else ' class method of test.test_pydoc.pydocfodder.B' + self.assertIn(' | A_classmethod_ref = A_classmethod(x) class method of test.pydocfodder.A', lines) + note = '' if cls is pydocfodder.B else ' class method of test.pydocfodder.B' self.assertIn(' | B_classmethod_ref = B_classmethod(x)' + note, lines) - self.assertIn(' | A_method_ref = A_method() method of test.test_pydoc.pydocfodder.A instance', lines) + self.assertIn(' | A_method_ref = A_method() method of test.pydocfodder.A instance', lines) self.assertIn(' | get(key, default=None, /) method of builtins.dict instance', lines) self.assertIn(' | dict_get = get(key, default=None, /) method of builtins.dict instance', lines) @@ -1674,19 +1675,19 @@ def test_html_doc_routines_in_class(self, cls=pydocfodder.B): lines = self.getsection(result, f'Methods {where}:', '-'*70) self.assertIn('A_method_alias = A_method(self)', lines) self.assertIn('B_method_alias = B_method(self)', lines) - self.assertIn('A_staticmethod(x, y) from test.test_pydoc.pydocfodder.A', lines) + self.assertIn('A_staticmethod(x, y) from test.pydocfodder.A', lines) self.assertIn('A_staticmethod_alias = A_staticmethod(x, y)', lines) - self.assertIn('global_func(x, y) from test.test_pydoc.pydocfodder', lines) + self.assertIn('global_func(x, y) from test.pydocfodder', lines) self.assertIn('global_func_alias = global_func(x, y)', lines) - self.assertIn('global_func2_alias = global_func2(x, y) from test.test_pydoc.pydocfodder', lines) + self.assertIn('global_func2_alias = global_func2(x, y) from test.pydocfodder', lines) self.assertIn('__repr__(self, /) from builtins.object', lines) self.assertIn('object_repr = __repr__(self, /)', lines) lines = self.getsection(result, f'Static methods {where}:', '-'*70) - self.assertIn('A_classmethod_ref = A_classmethod(x) class method of test.test_pydoc.pydocfodder.A', lines) - note = '' if cls is pydocfodder.B else ' class method of test.test_pydoc.pydocfodder.B' + self.assertIn('A_classmethod_ref = A_classmethod(x) class method of test.pydocfodder.A', lines) + note = '' if cls is pydocfodder.B else ' class method of test.pydocfodder.B' self.assertIn('B_classmethod_ref = B_classmethod(x)' + note, lines) - self.assertIn('A_method_ref = A_method() method of test.test_pydoc.pydocfodder.A instance', lines) + self.assertIn('A_method_ref = A_method() method of test.pydocfodder.A instance', lines) lines = self.getsection(result, f'Class methods {where}:', '-'*70) self.assertIn('B_classmethod(x)', lines) diff --git a/Lib/test/test_pydoc/__init__.py b/Lib/test/test_pydoc/__init__.py deleted file mode 100644 index f2a39a3fe29c7f..00000000000000 --- a/Lib/test/test_pydoc/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -import os -from test import support - - -def load_tests(*args): - return support.load_package_tests(os.path.dirname(__file__), *args) diff --git a/Lib/test/test_queue.py b/Lib/test/test_queue.py index 92d670ca6f8f5b..d308a212999429 100644 --- a/Lib/test/test_queue.py +++ b/Lib/test/test_queue.py @@ -403,11 +403,11 @@ def _shutdown_all_methods_in_many_threads(self, immediate): for thread in ps[1:]: thread.join() - @unittest.skip("test times out (gh-115258)") + @unittest.skipIf(sys.platform == "win32", "test times out (gh-115258)") def test_shutdown_all_methods_in_many_threads(self): return self._shutdown_all_methods_in_many_threads(False) - @unittest.skip("test times out (gh-115258)") + @unittest.skipIf(sys.platform == "win32", "test times out (gh-115258)") def test_shutdown_immediate_all_methods_in_many_threads(self): return self._shutdown_all_methods_in_many_threads(True) diff --git a/Lib/test/test_time.py b/Lib/test/test_time.py index a0aeea515afbd6..3b5640abdb6b89 100644 --- a/Lib/test/test_time.py +++ b/Lib/test/test_time.py @@ -43,8 +43,8 @@ class _PyTime(enum.IntEnum): ROUND_UP = 3 # _PyTime_t is int64_t -PyTime_MIN = -2 ** 63 -PyTime_MAX = 2 ** 63 - 1 +_PyTime_MIN = -2 ** 63 +_PyTime_MAX = 2 ** 63 - 1 # Rounding modes supported by PyTime ROUNDING_MODES = ( @@ -934,7 +934,7 @@ def test_FromSecondsObject(self): _PyTime_FromSecondsObject(float('nan'), time_rnd) def test_AsSecondsDouble(self): - from _testcapi import PyTime_AsSecondsDouble + from _testinternalcapi import _PyTime_AsSecondsDouble def float_converter(ns): if abs(ns) % SEC_TO_NS == 0: @@ -942,10 +942,15 @@ def float_converter(ns): else: return float(ns) / SEC_TO_NS - self.check_int_rounding(lambda ns, rnd: PyTime_AsSecondsDouble(ns), + self.check_int_rounding(lambda ns, rnd: _PyTime_AsSecondsDouble(ns), float_converter, NS_TO_SEC) + # test nan + for time_rnd, _ in ROUNDING_MODES: + with self.assertRaises(TypeError): + _PyTime_AsSecondsDouble(float('nan')) + def create_decimal_converter(self, denominator): denom = decimal.Decimal(denominator) @@ -1004,7 +1009,7 @@ def test_AsTimeval_clamp(self): tv_sec_max = self.time_t_max tv_sec_min = self.time_t_min - for t in (PyTime_MIN, PyTime_MAX): + for t in (_PyTime_MIN, _PyTime_MAX): ts = _PyTime_AsTimeval_clamp(t, _PyTime.ROUND_CEILING) with decimal.localcontext() as context: context.rounding = decimal.ROUND_CEILING @@ -1023,7 +1028,7 @@ def test_AsTimeval_clamp(self): def test_AsTimespec_clamp(self): from _testinternalcapi import _PyTime_AsTimespec_clamp - for t in (PyTime_MIN, PyTime_MAX): + for t in (_PyTime_MIN, _PyTime_MAX): ts = _PyTime_AsTimespec_clamp(t) tv_sec, tv_nsec = divmod(t, NS_TO_SEC) if self.time_t_max < tv_sec: diff --git a/Lib/test/test_winapi.py b/Lib/test/test_winapi.py deleted file mode 100644 index 014aeea7239e2b..00000000000000 --- a/Lib/test/test_winapi.py +++ /dev/null @@ -1,94 +0,0 @@ -# Test the Windows-only _winapi module - -import random -import threading -import time -import unittest -from test.support import import_helper - -_winapi = import_helper.import_module('_winapi', required_on=['win']) - -MAXIMUM_WAIT_OBJECTS = 64 -MAXIMUM_BATCHED_WAIT_OBJECTS = (MAXIMUM_WAIT_OBJECTS - 1) ** 2 - -class WinAPIBatchedWaitForMultipleObjectsTests(unittest.TestCase): - def _events_waitall_test(self, n): - evts = [_winapi.CreateEventW(0, False, False, None) for _ in range(n)] - - with self.assertRaises(TimeoutError): - _winapi.BatchedWaitForMultipleObjects(evts, True, 100) - - # Ensure no errors raised when all are triggered - for e in evts: - _winapi.SetEvent(e) - try: - _winapi.BatchedWaitForMultipleObjects(evts, True, 100) - except TimeoutError: - self.fail("expected wait to complete immediately") - - # Choose 8 events to set, distributed throughout the list, to make sure - # we don't always have them in the first chunk - chosen = [i * (len(evts) // 8) for i in range(8)] - - # Replace events with invalid handles to make sure we fail - for i in chosen: - old_evt = evts[i] - evts[i] = -1 - with self.assertRaises(OSError): - _winapi.BatchedWaitForMultipleObjects(evts, True, 100) - evts[i] = old_evt - - - def _events_waitany_test(self, n): - evts = [_winapi.CreateEventW(0, False, False, None) for _ in range(n)] - - with self.assertRaises(TimeoutError): - _winapi.BatchedWaitForMultipleObjects(evts, False, 100) - - # Choose 8 events to set, distributed throughout the list, to make sure - # we don't always have them in the first chunk - chosen = [i * (len(evts) // 8) for i in range(8)] - - # Trigger one by one. They are auto-reset events, so will only trigger once - for i in chosen: - with self.subTest(f"trigger event {i} of {len(evts)}"): - _winapi.SetEvent(evts[i]) - triggered = _winapi.BatchedWaitForMultipleObjects(evts, False, 10000) - self.assertSetEqual(set(triggered), {i}) - - # Trigger all at once. This may require multiple calls - for i in chosen: - _winapi.SetEvent(evts[i]) - triggered = set() - while len(triggered) < len(chosen): - triggered.update(_winapi.BatchedWaitForMultipleObjects(evts, False, 10000)) - self.assertSetEqual(triggered, set(chosen)) - - # Replace events with invalid handles to make sure we fail - for i in chosen: - with self.subTest(f"corrupt event {i} of {len(evts)}"): - old_evt = evts[i] - evts[i] = -1 - with self.assertRaises(OSError): - _winapi.BatchedWaitForMultipleObjects(evts, False, 100) - evts[i] = old_evt - - - def test_few_events_waitall(self): - self._events_waitall_test(16) - - def test_many_events_waitall(self): - self._events_waitall_test(256) - - def test_max_events_waitall(self): - self._events_waitall_test(MAXIMUM_BATCHED_WAIT_OBJECTS) - - - def test_few_events_waitany(self): - self._events_waitany_test(16) - - def test_many_events_waitany(self): - self._events_waitany_test(256) - - def test_max_events_waitany(self): - self._events_waitany_test(MAXIMUM_BATCHED_WAIT_OBJECTS) diff --git a/Makefile.pre.in b/Makefile.pre.in index d3b18acad61ce5..4dabe328ce0362 100644 --- a/Makefile.pre.in +++ b/Makefile.pre.in @@ -184,8 +184,6 @@ PYTHONFRAMEWORK= @PYTHONFRAMEWORK@ PYTHONFRAMEWORKDIR= @PYTHONFRAMEWORKDIR@ PYTHONFRAMEWORKPREFIX= @PYTHONFRAMEWORKPREFIX@ PYTHONFRAMEWORKINSTALLDIR= @PYTHONFRAMEWORKINSTALLDIR@ -PYTHONFRAMEWORKINSTALLNAMEPREFIX= @PYTHONFRAMEWORKINSTALLNAMEPREFIX@ -RESSRCDIR= @RESSRCDIR@ # Deployment target selected during configure, to be checked # by distutils. The export statement is needed to ensure that the # deployment target is active during build. @@ -868,7 +866,7 @@ libpython3.so: libpython$(LDVERSION).so $(BLDSHARED) $(NO_AS_NEEDED) -o $@ -Wl,-h$@ $^ libpython$(LDVERSION).dylib: $(LIBRARY_OBJS) - $(CC) -dynamiclib $(PY_CORE_LDFLAGS) -undefined dynamic_lookup -Wl,-install_name,$(PYTHONFRAMEWORKINSTALLNAMEPREFIX)/lib/libpython$(LDVERSION).dylib -Wl,-compatibility_version,$(VERSION) -Wl,-current_version,$(VERSION) -o $@ $(LIBRARY_OBJS) $(DTRACE_OBJS) $(SHLIBS) $(LIBC) $(LIBM); \ + $(CC) -dynamiclib -Wl,-single_module $(PY_CORE_LDFLAGS) -undefined dynamic_lookup -Wl,-install_name,$(prefix)/lib/libpython$(LDVERSION).dylib -Wl,-compatibility_version,$(VERSION) -Wl,-current_version,$(VERSION) -o $@ $(LIBRARY_OBJS) $(DTRACE_OBJS) $(SHLIBS) $(LIBC) $(LIBM); \ libpython$(VERSION).sl: $(LIBRARY_OBJS) @@ -893,13 +891,14 @@ $(BUILDPYTHON)-gdb.py: $(SRC_GDB_HOOKS) # This rule is here for OPENSTEP/Rhapsody/MacOSX. It builds a temporary # minimal framework (not including the Lib directory and such) in the current # directory. +RESSRCDIR=Mac/Resources/framework $(PYTHONFRAMEWORKDIR)/Versions/$(VERSION)/$(PYTHONFRAMEWORK): \ $(LIBRARY) \ $(RESSRCDIR)/Info.plist $(INSTALL) -d -m $(DIRMODE) $(PYTHONFRAMEWORKDIR)/Versions/$(VERSION) $(CC) -o $(LDLIBRARY) $(PY_CORE_LDFLAGS) -dynamiclib \ - -all_load $(LIBRARY) \ - -install_name $(DESTDIR)$(PYTHONFRAMEWORKINSTALLNAMEPREFIX)/$(PYTHONFRAMEWORK) \ + -all_load $(LIBRARY) -Wl,-single_module \ + -install_name $(DESTDIR)$(PYTHONFRAMEWORKINSTALLDIR)/Versions/$(VERSION)/$(PYTHONFRAMEWORK) \ -compatibility_version $(VERSION) \ -current_version $(VERSION) \ -framework CoreFoundation $(LIBS); @@ -1863,10 +1862,6 @@ regen-cases: -o $(srcdir)/Python/generated_cases.c.h.new $(srcdir)/Python/bytecodes.c $(PYTHON_FOR_REGEN) $(srcdir)/Tools/cases_generator/tier2_generator.py \ -o $(srcdir)/Python/executor_cases.c.h.new $(srcdir)/Python/bytecodes.c - $(PYTHON_FOR_REGEN) $(srcdir)/Tools/cases_generator/tier2_abstract_generator.py \ - -o $(srcdir)/Python/tier2_redundancy_eliminator_cases.c.h.new \ - $(srcdir)/Python/tier2_redundancy_eliminator_bytecodes.c \ - $(srcdir)/Python/bytecodes.c $(PYTHON_FOR_REGEN) $(srcdir)/Tools/cases_generator/opcode_metadata_generator.py \ -o $(srcdir)/Include/internal/pycore_opcode_metadata.h.new $(srcdir)/Python/bytecodes.c $(PYTHON_FOR_REGEN) $(srcdir)/Tools/cases_generator/uop_metadata_generator.py -o \ @@ -1878,7 +1873,6 @@ regen-cases: $(UPDATE_FILE) $(srcdir)/Include/internal/pycore_opcode_metadata.h $(srcdir)/Include/internal/pycore_opcode_metadata.h.new $(UPDATE_FILE) $(srcdir)/Include/internal/pycore_uop_metadata.h $(srcdir)/Include/internal/pycore_uop_metadata.h.new $(UPDATE_FILE) $(srcdir)/Python/executor_cases.c.h $(srcdir)/Python/executor_cases.c.h.new - $(UPDATE_FILE) $(srcdir)/Python/tier2_redundancy_eliminator_cases.c.h $(srcdir)/Python/tier2_redundancy_eliminator_cases.c.h.new $(UPDATE_FILE) $(srcdir)/Lib/_opcode_metadata.py $(srcdir)/Lib/_opcode_metadata.py.new Python/compile.o: $(srcdir)/Include/internal/pycore_opcode_metadata.h @@ -1900,8 +1894,7 @@ Python/optimizer.o: \ Python/optimizer_analysis.o: \ $(srcdir)/Include/internal/pycore_opcode_metadata.h \ - $(srcdir)/Include/internal/pycore_optimizer.h \ - $(srcdir)/Python/tier2_redundancy_eliminator_cases.c.h + $(srcdir)/Include/internal/pycore_optimizer.h Python/frozen.o: $(FROZEN_FILES_OUT) @@ -2007,7 +2000,7 @@ multissltest: all # which can lead to two parallel `./python setup.py build` processes that # step on each others toes. .PHONY: install -install: @FRAMEWORKINSTALLFIRST@ @INSTALLTARGETS@ @FRAMEWORKINSTALLLAST@ +install: @FRAMEWORKINSTALLFIRST@ commoninstall bininstall maninstall @FRAMEWORKINSTALLLAST@ if test "x$(ENSUREPIP)" != "xno" ; then \ case $(ENSUREPIP) in \ upgrade) ensurepip="--upgrade" ;; \ @@ -2313,7 +2306,6 @@ TESTSUBDIRS= idlelib/idle_test \ test/test_module \ test/test_pathlib \ test/test_peg_generator \ - test/test_pydoc \ test/test_sqlite3 \ test/test_tkinter \ test/test_tomllib \ diff --git a/Misc/NEWS.d/3.12.0a1.rst b/Misc/NEWS.d/3.12.0a1.rst index 4739e0fb89a4a8..f192bf086ed259 100644 --- a/Misc/NEWS.d/3.12.0a1.rst +++ b/Misc/NEWS.d/3.12.0a1.rst @@ -2722,7 +2722,7 @@ on future on an error - e.g. TimeoutError or KeyboardInterrupt. Fix a :mod:`sqlite3` regression where ``*args`` and ``**kwds`` were incorrectly relayed from :py:func:`~sqlite3.connect` to the :class:`~sqlite3.Connection` factory. The regression was introduced in -3.11a1 with PR 24421 (:gh:`85128`). Patch by Erlend E. Aasland. +3.11a1 with PR 24421 (:gh:`85128`). Patch by Erlend E. Aasland.` .. @@ -2988,7 +2988,7 @@ Kumar Aditya. .. section: Library Fix crash in :class:`struct.Struct` when it was not completely initialized -by initializing it in :meth:`~object.__new__`. Patch by Kumar Aditya. +by initializing it in :meth:`~object.__new__``. Patch by Kumar Aditya. .. diff --git a/Misc/NEWS.d/3.12.0b1.rst b/Misc/NEWS.d/3.12.0b1.rst index d9804be764c9a9..21f2c748f40548 100644 --- a/Misc/NEWS.d/3.12.0b1.rst +++ b/Misc/NEWS.d/3.12.0b1.rst @@ -563,10 +563,10 @@ Complex function calls are now faster and consume no C stack space. .. nonce: fvgsCl .. section: Core and Builtins -``len()`` for 0-dimensional :class:`memoryview` objects (such as +``len()`` for 0-dimensional :class:`memoryview`` objects (such as ``memoryview(ctypes.c_uint8(42))``) now raises a :exc:`TypeError`. Previously this returned ``1``, which was not consistent with ``mem_0d[0]`` -raising an :exc:`IndexError`. +raising an :exc:`IndexError``. .. diff --git a/Misc/NEWS.d/3.13.0a1.rst b/Misc/NEWS.d/3.13.0a1.rst index 16715bee5a8e49..d385b6a4504f97 100644 --- a/Misc/NEWS.d/3.13.0a1.rst +++ b/Misc/NEWS.d/3.13.0a1.rst @@ -4380,7 +4380,7 @@ Patch by Victor Stinner. .. nonce: I6MQhb .. section: Library -:pep:`594`: Remove the :mod:`!cgi` and :mod:`!cgitb` modules, deprecated in +:pep:`594`: Remove the :mod:`!cgi`` and :mod:`!cgitb` modules, deprecated in Python 3.11. Patch by Victor Stinner. .. diff --git a/Misc/NEWS.d/next/Build/2024-02-07-08-23-48.gh-issue-114099.XcEXEZ.rst b/Misc/NEWS.d/next/Build/2024-02-07-08-23-48.gh-issue-114099.XcEXEZ.rst deleted file mode 100644 index 5e4acfba8a6949..00000000000000 --- a/Misc/NEWS.d/next/Build/2024-02-07-08-23-48.gh-issue-114099.XcEXEZ.rst +++ /dev/null @@ -1,2 +0,0 @@ -configure and Makefile were refactored to accomodate framework builds on -Apple platforms other than macOS. diff --git a/Misc/NEWS.d/next/C API/2023-11-16-02-07-48.gh-issue-110850.DQGNfF.rst b/Misc/NEWS.d/next/C API/2023-11-16-02-07-48.gh-issue-110850.DQGNfF.rst deleted file mode 100644 index 998d4426dd53f9..00000000000000 --- a/Misc/NEWS.d/next/C API/2023-11-16-02-07-48.gh-issue-110850.DQGNfF.rst +++ /dev/null @@ -1,9 +0,0 @@ -Add PyTime C API: - -* :c:type:`PyTime_t` type. -* :c:var:`PyTime_MIN` and :c:var:`PyTime_MAX` constants. -* :c:func:`PyTime_AsSecondsDouble`, - :c:func:`PyTime_Monotonic`, :c:func:`PyTime_PerfCounter`, and - :c:func:`PyTime_Time` functions. - -Patch by Victor Stinner. diff --git a/Misc/NEWS.d/next/C API/2024-02-05-17-11-15.gh-issue-111140.WMEjid.rst b/Misc/NEWS.d/next/C API/2024-02-05-17-11-15.gh-issue-111140.WMEjid.rst deleted file mode 100644 index a8aa191b5eb3ba..00000000000000 --- a/Misc/NEWS.d/next/C API/2024-02-05-17-11-15.gh-issue-111140.WMEjid.rst +++ /dev/null @@ -1,2 +0,0 @@ -Adds :c:func:`PyLong_AsNativeBytes`, :c:func:`PyLong_FromNativeBytes` and -:c:func:`PyLong_FromUnsignedNativeBytes` functions. diff --git a/Misc/NEWS.d/next/Core and Builtins/2024-01-16-14-41-54.gh-issue-114058.Cb2b8h.rst b/Misc/NEWS.d/next/Core and Builtins/2024-01-16-14-41-54.gh-issue-114058.Cb2b8h.rst deleted file mode 100644 index beb82dbcd3cccd..00000000000000 --- a/Misc/NEWS.d/next/Core and Builtins/2024-01-16-14-41-54.gh-issue-114058.Cb2b8h.rst +++ /dev/null @@ -1 +0,0 @@ -Implement the foundations of the Tier 2 redundancy eliminator. diff --git a/Misc/NEWS.d/next/Library/2024-02-09-07-20-16.gh-issue-115165.yfJLXA.rst b/Misc/NEWS.d/next/Library/2024-02-09-07-20-16.gh-issue-115165.yfJLXA.rst index 3e6eef183ad524..73d3d001f07f3f 100644 --- a/Misc/NEWS.d/next/Library/2024-02-09-07-20-16.gh-issue-115165.yfJLXA.rst +++ b/Misc/NEWS.d/next/Library/2024-02-09-07-20-16.gh-issue-115165.yfJLXA.rst @@ -1,4 +1,4 @@ Most exceptions are now ignored when attempting to set the ``__orig_class__`` attribute on objects returned when calling :mod:`typing` generic aliases (including generic aliases created using :data:`typing.Annotated`). -Previously only :exc:`AttributeError` was ignored. Patch by Dave Shawley. +Previously only :exc:`AttributeError`` was ignored. Patch by Dave Shawley. diff --git a/Misc/NEWS.d/next/Windows/2023-08-11-18-21-38.gh-issue-89240.dtSOLG.rst b/Misc/NEWS.d/next/Windows/2023-08-11-18-21-38.gh-issue-89240.dtSOLG.rst deleted file mode 100644 index 8ffe328b16598a..00000000000000 --- a/Misc/NEWS.d/next/Windows/2023-08-11-18-21-38.gh-issue-89240.dtSOLG.rst +++ /dev/null @@ -1 +0,0 @@ -Allows :mod:`multiprocessing` to create pools of greater than 62 processes. diff --git a/Misc/NEWS.d/next/Windows/2024-02-08-21-37-22.gh-issue-115049.X1ObpJ.rst b/Misc/NEWS.d/next/Windows/2024-02-08-21-37-22.gh-issue-115049.X1ObpJ.rst deleted file mode 100644 index a679391857dcb3..00000000000000 --- a/Misc/NEWS.d/next/Windows/2024-02-08-21-37-22.gh-issue-115049.X1ObpJ.rst +++ /dev/null @@ -1 +0,0 @@ -Fixes ``py.exe`` launcher failing when run as users without user profiles. diff --git a/Modules/Setup.stdlib.in b/Modules/Setup.stdlib.in index e98775a4808765..8a65a9cffb1b9d 100644 --- a/Modules/Setup.stdlib.in +++ b/Modules/Setup.stdlib.in @@ -162,7 +162,7 @@ @MODULE__XXTESTFUZZ_TRUE@_xxtestfuzz _xxtestfuzz/_xxtestfuzz.c _xxtestfuzz/fuzzer.c @MODULE__TESTBUFFER_TRUE@_testbuffer _testbuffer.c @MODULE__TESTINTERNALCAPI_TRUE@_testinternalcapi _testinternalcapi.c _testinternalcapi/test_lock.c _testinternalcapi/pytime.c _testinternalcapi/set.c _testinternalcapi/test_critical_sections.c -@MODULE__TESTCAPI_TRUE@_testcapi _testcapimodule.c _testcapi/vectorcall.c _testcapi/vectorcall_limited.c _testcapi/heaptype.c _testcapi/abstract.c _testcapi/bytearray.c _testcapi/bytes.c _testcapi/unicode.c _testcapi/dict.c _testcapi/set.c _testcapi/list.c _testcapi/tuple.c _testcapi/getargs.c _testcapi/datetime.c _testcapi/docstring.c _testcapi/mem.c _testcapi/watchers.c _testcapi/long.c _testcapi/float.c _testcapi/complex.c _testcapi/numbers.c _testcapi/structmember.c _testcapi/exceptions.c _testcapi/code.c _testcapi/buffer.c _testcapi/pyatomic.c _testcapi/pyos.c _testcapi/file.c _testcapi/codec.c _testcapi/immortal.c _testcapi/heaptype_relative.c _testcapi/gc.c _testcapi/sys.c _testcapi/hash.c _testcapi/time.c +@MODULE__TESTCAPI_TRUE@_testcapi _testcapimodule.c _testcapi/vectorcall.c _testcapi/vectorcall_limited.c _testcapi/heaptype.c _testcapi/abstract.c _testcapi/bytearray.c _testcapi/bytes.c _testcapi/unicode.c _testcapi/dict.c _testcapi/set.c _testcapi/list.c _testcapi/tuple.c _testcapi/getargs.c _testcapi/datetime.c _testcapi/docstring.c _testcapi/mem.c _testcapi/watchers.c _testcapi/long.c _testcapi/float.c _testcapi/complex.c _testcapi/numbers.c _testcapi/structmember.c _testcapi/exceptions.c _testcapi/code.c _testcapi/buffer.c _testcapi/pyatomic.c _testcapi/pyos.c _testcapi/file.c _testcapi/codec.c _testcapi/immortal.c _testcapi/heaptype_relative.c _testcapi/gc.c _testcapi/sys.c _testcapi/hash.c @MODULE__TESTCLINIC_TRUE@_testclinic _testclinic.c @MODULE__TESTCLINIC_LIMITED_TRUE@_testclinic_limited _testclinic_limited.c diff --git a/Modules/_io/textio.c b/Modules/_io/textio.c index a3239ec0f52960..d794af8de2b8f0 100644 --- a/Modules/_io/textio.c +++ b/Modules/_io/textio.c @@ -2393,7 +2393,7 @@ textiowrapper_parse_cookie(cookie_type *cookie, PyObject *cookieObj) return -1; if (_PyLong_AsByteArray(cookieLong, buffer, sizeof(buffer), - PY_LITTLE_ENDIAN, 0, 1) < 0) { + PY_LITTLE_ENDIAN, 0) < 0) { Py_DECREF(cookieLong); return -1; } diff --git a/Modules/_pickle.c b/Modules/_pickle.c index 0d83261168185d..f210c0ca205991 100644 --- a/Modules/_pickle.c +++ b/Modules/_pickle.c @@ -2162,8 +2162,7 @@ save_long(PicklerObject *self, PyObject *obj) pdata = (unsigned char *)PyBytes_AS_STRING(repr); i = _PyLong_AsByteArray((PyLongObject *)obj, pdata, nbytes, - 1 /* little endian */ , 1 /* signed */ , - 1 /* with exceptions */); + 1 /* little endian */ , 1 /* signed */ ); if (i < 0) goto error; /* If the int is negative, this may be a byte more than diff --git a/Modules/_randommodule.c b/Modules/_randommodule.c index 4463157d62248d..4403e1d132c057 100644 --- a/Modules/_randommodule.c +++ b/Modules/_randommodule.c @@ -262,7 +262,7 @@ random_seed_urandom(RandomObject *self) static void random_seed_time_pid(RandomObject *self) { - PyTime_t now; + _PyTime_t now; uint32_t key[5]; now = _PyTime_GetSystemClock(); @@ -342,8 +342,7 @@ random_seed(RandomObject *self, PyObject *arg) res = _PyLong_AsByteArray((PyLongObject *)n, (unsigned char *)key, keyused * 4, PY_LITTLE_ENDIAN, - 0, /* unsigned */ - 1); /* with exceptions */ + 0); /* unsigned */ if (res == -1) { goto Done; } diff --git a/Modules/_sqlite/util.c b/Modules/_sqlite/util.c index 9e8613ef67916e..833a666301d8ff 100644 --- a/Modules/_sqlite/util.c +++ b/Modules/_sqlite/util.c @@ -162,7 +162,7 @@ _pysqlite_long_as_int64(PyObject * py_val) sqlite_int64 int64val; if (_PyLong_AsByteArray((PyLongObject *)py_val, (unsigned char *)&int64val, sizeof(int64val), - IS_LITTLE_ENDIAN, 1 /* signed */, 0) >= 0) { + IS_LITTLE_ENDIAN, 1 /* signed */) >= 0) { return int64val; } } diff --git a/Modules/_struct.c b/Modules/_struct.c index fa2cd37e003e0a..bd16fa89f18945 100644 --- a/Modules/_struct.c +++ b/Modules/_struct.c @@ -1000,10 +1000,9 @@ bp_longlong(_structmodulestate *state, char *p, PyObject *v, const formatdef *f) (unsigned char *)p, 8, 0, /* little_endian */ - 1, /* signed */ - 0 /* !with_exceptions */); + 1 /* signed */); Py_DECREF(v); - if (res < 0) { + if (res == -1 && PyErr_Occurred()) { PyErr_Format(state->StructError, "'%c' format requires %lld <= number <= %lld", f->format, @@ -1025,10 +1024,9 @@ bp_ulonglong(_structmodulestate *state, char *p, PyObject *v, const formatdef *f (unsigned char *)p, 8, 0, /* little_endian */ - 0, /* signed */ - 0 /* !with_exceptions */); + 0 /* signed */); Py_DECREF(v); - if (res < 0) { + if (res == -1 && PyErr_Occurred()) { PyErr_Format(state->StructError, "'%c' format requires 0 <= number <= %llu", f->format, @@ -1262,10 +1260,9 @@ lp_longlong(_structmodulestate *state, char *p, PyObject *v, const formatdef *f) (unsigned char *)p, 8, 1, /* little_endian */ - 1, /* signed */ - 0 /* !with_exceptions */); + 1 /* signed */); Py_DECREF(v); - if (res < 0) { + if (res == -1 && PyErr_Occurred()) { PyErr_Format(state->StructError, "'%c' format requires %lld <= number <= %lld", f->format, @@ -1287,10 +1284,9 @@ lp_ulonglong(_structmodulestate *state, char *p, PyObject *v, const formatdef *f (unsigned char *)p, 8, 1, /* little_endian */ - 0, /* signed */ - 0 /* !with_exceptions */); + 0 /* signed */); Py_DECREF(v); - if (res < 0) { + if (res == -1 && PyErr_Occurred()) { PyErr_Format(state->StructError, "'%c' format requires 0 <= number <= %llu", f->format, diff --git a/Modules/_testcapi/long.c b/Modules/_testcapi/long.c index dc21cf9f475228..32ad8d32ab8523 100644 --- a/Modules/_testcapi/long.c +++ b/Modules/_testcapi/long.c @@ -776,51 +776,6 @@ pylong_asvoidptr(PyObject *module, PyObject *arg) return Py_NewRef((PyObject *)value); } -static PyObject * -pylong_asnativebytes(PyObject *module, PyObject *args) -{ - PyObject *v; - Py_buffer buffer; - Py_ssize_t n, endianness; - if (!PyArg_ParseTuple(args, "Ow*nn", &v, &buffer, &n, &endianness)) { - return NULL; - } - if (buffer.readonly) { - PyErr_SetString(PyExc_TypeError, "buffer must be writable"); - PyBuffer_Release(&buffer); - return NULL; - } - if (buffer.len < n) { - PyErr_SetString(PyExc_ValueError, "buffer must be at least 'n' bytes"); - PyBuffer_Release(&buffer); - return NULL; - } - Py_ssize_t res = PyLong_AsNativeBytes(v, buffer.buf, n, (int)endianness); - PyBuffer_Release(&buffer); - return res >= 0 ? PyLong_FromSsize_t(res) : NULL; -} - -static PyObject * -pylong_fromnativebytes(PyObject *module, PyObject *args) -{ - Py_buffer buffer; - Py_ssize_t n, endianness, signed_; - if (!PyArg_ParseTuple(args, "y*nnn", &buffer, &n, &endianness, &signed_)) { - return NULL; - } - if (buffer.len < n) { - PyErr_SetString(PyExc_ValueError, "buffer must be at least 'n' bytes"); - PyBuffer_Release(&buffer); - return NULL; - } - PyObject *res = signed_ - ? PyLong_FromNativeBytes(buffer.buf, n, (int)endianness) - : PyLong_FromUnsignedNativeBytes(buffer.buf, n, (int)endianness); - PyBuffer_Release(&buffer); - return res; -} - - static PyMethodDef test_methods[] = { _TESTCAPI_TEST_LONG_AND_OVERFLOW_METHODDEF _TESTCAPI_TEST_LONG_API_METHODDEF @@ -849,8 +804,6 @@ static PyMethodDef test_methods[] = { {"pylong_as_size_t", pylong_as_size_t, METH_O}, {"pylong_asdouble", pylong_asdouble, METH_O}, {"pylong_asvoidptr", pylong_asvoidptr, METH_O}, - {"pylong_asnativebytes", pylong_asnativebytes, METH_VARARGS}, - {"pylong_fromnativebytes", pylong_fromnativebytes, METH_VARARGS}, {NULL}, }; @@ -860,5 +813,6 @@ _PyTestCapi_Init_Long(PyObject *mod) if (PyModule_AddFunctions(mod, test_methods) < 0) { return -1; } + return 0; } diff --git a/Modules/_testcapi/parts.h b/Modules/_testcapi/parts.h index e8cfb2423500d4..29817edd69b134 100644 --- a/Modules/_testcapi/parts.h +++ b/Modules/_testcapi/parts.h @@ -59,7 +59,6 @@ int _PyTestCapi_Init_Immortal(PyObject *module); int _PyTestCapi_Init_GC(PyObject *module); int _PyTestCapi_Init_Sys(PyObject *module); int _PyTestCapi_Init_Hash(PyObject *module); -int _PyTestCapi_Init_Time(PyObject *module); int _PyTestCapi_Init_VectorcallLimited(PyObject *module); int _PyTestCapi_Init_HeaptypeRelative(PyObject *module); diff --git a/Modules/_testcapi/time.c b/Modules/_testcapi/time.c deleted file mode 100644 index 57eb9135d30029..00000000000000 --- a/Modules/_testcapi/time.c +++ /dev/null @@ -1,104 +0,0 @@ -#include "parts.h" - - -static int -pytime_from_nanoseconds(PyTime_t *tp, PyObject *obj) -{ - if (!PyLong_Check(obj)) { - PyErr_Format(PyExc_TypeError, "expect int, got %s", - Py_TYPE(obj)->tp_name); - return -1; - } - - long long nsec = PyLong_AsLongLong(obj); - if (nsec == -1 && PyErr_Occurred()) { - return -1; - } - - Py_BUILD_ASSERT(sizeof(long long) == sizeof(PyTime_t)); - *tp = (PyTime_t)nsec; - return 0; -} - - -static PyObject * -test_pytime_assecondsdouble(PyObject *Py_UNUSED(self), PyObject *args) -{ - PyObject *obj; - if (!PyArg_ParseTuple(args, "O", &obj)) { - return NULL; - } - PyTime_t ts; - if (pytime_from_nanoseconds(&ts, obj) < 0) { - return NULL; - } - double d = PyTime_AsSecondsDouble(ts); - return PyFloat_FromDouble(d); -} - - -static PyObject* -pytime_as_float(PyTime_t t) -{ - return PyFloat_FromDouble(PyTime_AsSecondsDouble(t)); -} - - - -static PyObject* -test_pytime_monotonic(PyObject *Py_UNUSED(self), PyObject *Py_UNUSED(args)) -{ - PyTime_t t; - if (PyTime_Monotonic(&t) < 0) { - return NULL; - } - return pytime_as_float(t); -} - - -static PyObject* -test_pytime_perf_counter(PyObject *Py_UNUSED(self), PyObject *Py_UNUSED(args)) -{ - PyTime_t t; - if (PyTime_PerfCounter(&t) < 0) { - return NULL; - } - return pytime_as_float(t); -} - - -static PyObject* -test_pytime_time(PyObject *Py_UNUSED(self), PyObject *Py_UNUSED(args)) -{ - PyTime_t t; - if (PyTime_Time(&t) < 0) { - printf("ERR! %d\n", (int)t); - return NULL; - } - return pytime_as_float(t); -} - - -static PyMethodDef test_methods[] = { - {"PyTime_AsSecondsDouble", test_pytime_assecondsdouble, METH_VARARGS}, - {"PyTime_Monotonic", test_pytime_monotonic, METH_NOARGS}, - {"PyTime_PerfCounter", test_pytime_perf_counter, METH_NOARGS}, - {"PyTime_Time", test_pytime_time, METH_NOARGS}, - {NULL}, -}; - -int -_PyTestCapi_Init_Time(PyObject *m) -{ - if (PyModule_AddFunctions(m, test_methods) < 0) { - return -1; - } - Py_BUILD_ASSERT(sizeof(long long) == sizeof(PyTime_t)); - if (PyModule_AddObject(m, "PyTime_MIN", PyLong_FromLongLong(PyTime_MIN)) < 0) { - return 1; - } - if (PyModule_AddObject(m, "PyTime_MAX", PyLong_FromLongLong(PyTime_MAX)) < 0) { - return 1; - } - return 0; -} diff --git a/Modules/_testcapimodule.c b/Modules/_testcapimodule.c index b03f871b089c8a..e67de3eeb6e17e 100644 --- a/Modules/_testcapimodule.c +++ b/Modules/_testcapimodule.c @@ -4107,9 +4107,6 @@ PyInit__testcapi(void) if (_PyTestCapi_Init_Hash(m) < 0) { return NULL; } - if (_PyTestCapi_Init_Time(m) < 0) { - return NULL; - } PyState_AddModule(m, &_testcapimodule); return m; diff --git a/Modules/_testinternalcapi/pytime.c b/Modules/_testinternalcapi/pytime.c index f0f758ea032df8..2b5f9eb0ef2851 100644 --- a/Modules/_testinternalcapi/pytime.c +++ b/Modules/_testinternalcapi/pytime.c @@ -52,6 +52,21 @@ test_pytime_fromsecondsobject(PyObject *self, PyObject *args) return _PyTime_AsNanosecondsObject(ts); } +static PyObject * +test_pytime_assecondsdouble(PyObject *self, PyObject *args) +{ + PyObject *obj; + if (!PyArg_ParseTuple(args, "O", &obj)) { + return NULL; + } + _PyTime_t ts; + if (_PyTime_FromNanosecondsObject(&ts, obj) < 0) { + return NULL; + } + double d = _PyTime_AsSecondsDouble(ts); + return PyFloat_FromDouble(d); +} + static PyObject * test_PyTime_AsTimeval(PyObject *self, PyObject *args) { @@ -239,6 +254,7 @@ test_pytime_object_to_timespec(PyObject *self, PyObject *args) static PyMethodDef TestMethods[] = { {"_PyTime_AsMicroseconds", test_PyTime_AsMicroseconds, METH_VARARGS}, {"_PyTime_AsMilliseconds", test_PyTime_AsMilliseconds, METH_VARARGS}, + {"_PyTime_AsSecondsDouble", test_pytime_assecondsdouble, METH_VARARGS}, #ifdef HAVE_CLOCK_GETTIME {"_PyTime_AsTimespec", test_PyTime_AsTimespec, METH_VARARGS}, {"_PyTime_AsTimespec_clamp", test_PyTime_AsTimespec_clamp, METH_VARARGS}, diff --git a/Modules/_threadmodule.c b/Modules/_threadmodule.c index d7840eaf45e8d6..df02b023012fbd 100644 --- a/Modules/_threadmodule.c +++ b/Modules/_threadmodule.c @@ -1244,7 +1244,7 @@ thread_run(void *boot_raw) _PyThreadState_Bind(tstate); PyEval_AcquireThread(tstate); - _Py_atomic_add_ssize(&tstate->interp->threads.count, 1); + tstate->interp->threads.count++; PyObject *res = PyObject_Call(boot->func, boot->args, boot->kwargs); if (res == NULL) { @@ -1262,7 +1262,7 @@ thread_run(void *boot_raw) thread_bootstate_free(boot, 1); - _Py_atomic_add_ssize(&tstate->interp->threads.count, -1); + tstate->interp->threads.count--; PyThreadState_Clear(tstate); _PyThreadState_DeleteCurrent(tstate); @@ -1539,7 +1539,7 @@ static PyObject * thread__count(PyObject *self, PyObject *Py_UNUSED(ignored)) { PyInterpreterState *interp = _PyInterpreterState_GET(); - return PyLong_FromSsize_t(_Py_atomic_load_ssize(&interp->threads.count)); + return PyLong_FromLong(interp->threads.count); } PyDoc_STRVAR(_count_doc, diff --git a/Modules/_tkinter.c b/Modules/_tkinter.c index e3789867dc085f..f6181168a85ae1 100644 --- a/Modules/_tkinter.c +++ b/Modules/_tkinter.c @@ -926,8 +926,7 @@ AsObj(PyObject *value) (unsigned char *)(void *)&wideValue, sizeof(wideValue), PY_LITTLE_ENDIAN, - /* signed */ 1, - /* with_exceptions */ 1) == 0) { + /* signed */ 1) == 0) { return Tcl_NewWideIntObj(wideValue); } PyErr_Clear(); diff --git a/Modules/_winapi.c b/Modules/_winapi.c index 83a4ccd4802ae0..5e5eb123c4ccff 100644 --- a/Modules/_winapi.c +++ b/Modules/_winapi.c @@ -438,39 +438,6 @@ _winapi_ConnectNamedPipe_impl(PyObject *module, HANDLE handle, Py_RETURN_NONE; } -/*[clinic input] -_winapi.CreateEventW -> HANDLE - - security_attributes: LPSECURITY_ATTRIBUTES - manual_reset: BOOL - initial_state: BOOL - name: LPCWSTR(accept={str, NoneType}) -[clinic start generated code]*/ - -static HANDLE -_winapi_CreateEventW_impl(PyObject *module, - LPSECURITY_ATTRIBUTES security_attributes, - BOOL manual_reset, BOOL initial_state, - LPCWSTR name) -/*[clinic end generated code: output=2d4c7d5852ecb298 input=4187cee28ac763f8]*/ -{ - HANDLE handle; - - if (PySys_Audit("_winapi.CreateEventW", "bbu", manual_reset, initial_state, name) < 0) { - return INVALID_HANDLE_VALUE; - } - - Py_BEGIN_ALLOW_THREADS - handle = CreateEventW(security_attributes, manual_reset, initial_state, name); - Py_END_ALLOW_THREADS - - if (handle == INVALID_HANDLE_VALUE) { - PyErr_SetFromWindowsErr(0); - } - - return handle; -} - /*[clinic input] _winapi.CreateFile -> HANDLE @@ -707,37 +674,6 @@ _winapi_CreateJunction_impl(PyObject *module, LPCWSTR src_path, Py_RETURN_NONE; } -/*[clinic input] -_winapi.CreateMutexW -> HANDLE - - security_attributes: LPSECURITY_ATTRIBUTES - initial_owner: BOOL - name: LPCWSTR(accept={str, NoneType}) -[clinic start generated code]*/ - -static HANDLE -_winapi_CreateMutexW_impl(PyObject *module, - LPSECURITY_ATTRIBUTES security_attributes, - BOOL initial_owner, LPCWSTR name) -/*[clinic end generated code: output=31b9ee8fc37e49a5 input=7d54b921e723254a]*/ -{ - HANDLE handle; - - if (PySys_Audit("_winapi.CreateMutexW", "bu", initial_owner, name) < 0) { - return INVALID_HANDLE_VALUE; - } - - Py_BEGIN_ALLOW_THREADS - handle = CreateMutexW(security_attributes, initial_owner, name); - Py_END_ALLOW_THREADS - - if (handle == INVALID_HANDLE_VALUE) { - PyErr_SetFromWindowsErr(0); - } - - return handle; -} - /*[clinic input] _winapi.CreateNamedPipe -> HANDLE @@ -1654,67 +1590,6 @@ _winapi_UnmapViewOfFile_impl(PyObject *module, LPCVOID address) Py_RETURN_NONE; } -/*[clinic input] -_winapi.OpenEventW -> HANDLE - - desired_access: DWORD - inherit_handle: BOOL - name: LPCWSTR -[clinic start generated code]*/ - -static HANDLE -_winapi_OpenEventW_impl(PyObject *module, DWORD desired_access, - BOOL inherit_handle, LPCWSTR name) -/*[clinic end generated code: output=c4a45e95545a4bd2 input=dec26598748d35aa]*/ -{ - HANDLE handle; - - if (PySys_Audit("_winapi.OpenEventW", "Iu", desired_access, name) < 0) { - return INVALID_HANDLE_VALUE; - } - - Py_BEGIN_ALLOW_THREADS - handle = OpenEventW(desired_access, inherit_handle, name); - Py_END_ALLOW_THREADS - - if (handle == INVALID_HANDLE_VALUE) { - PyErr_SetFromWindowsErr(0); - } - - return handle; -} - - -/*[clinic input] -_winapi.OpenMutexW -> HANDLE - - desired_access: DWORD - inherit_handle: BOOL - name: LPCWSTR -[clinic start generated code]*/ - -static HANDLE -_winapi_OpenMutexW_impl(PyObject *module, DWORD desired_access, - BOOL inherit_handle, LPCWSTR name) -/*[clinic end generated code: output=dda39d7844397bf0 input=f3a7b466c5307712]*/ -{ - HANDLE handle; - - if (PySys_Audit("_winapi.OpenMutexW", "Iu", desired_access, name) < 0) { - return INVALID_HANDLE_VALUE; - } - - Py_BEGIN_ALLOW_THREADS - handle = OpenMutexW(desired_access, inherit_handle, name); - Py_END_ALLOW_THREADS - - if (handle == INVALID_HANDLE_VALUE) { - PyErr_SetFromWindowsErr(0); - } - - return handle; -} - /*[clinic input] _winapi.OpenFileMapping -> HANDLE @@ -1945,75 +1820,6 @@ _winapi_ReadFile_impl(PyObject *module, HANDLE handle, DWORD size, return Py_BuildValue("NI", buf, err); } -/*[clinic input] -_winapi.ReleaseMutex - - mutex: HANDLE -[clinic start generated code]*/ - -static PyObject * -_winapi_ReleaseMutex_impl(PyObject *module, HANDLE mutex) -/*[clinic end generated code: output=5b9001a72dd8af37 input=49e9d20de3559d84]*/ -{ - int err = 0; - - Py_BEGIN_ALLOW_THREADS - if (!ReleaseMutex(mutex)) { - err = GetLastError(); - } - Py_END_ALLOW_THREADS - if (err) { - return PyErr_SetFromWindowsErr(err); - } - Py_RETURN_NONE; -} - -/*[clinic input] -_winapi.ResetEvent - - event: HANDLE -[clinic start generated code]*/ - -static PyObject * -_winapi_ResetEvent_impl(PyObject *module, HANDLE event) -/*[clinic end generated code: output=81c8501d57c0530d input=e2d42d990322e87a]*/ -{ - int err = 0; - - Py_BEGIN_ALLOW_THREADS - if (!ResetEvent(event)) { - err = GetLastError(); - } - Py_END_ALLOW_THREADS - if (err) { - return PyErr_SetFromWindowsErr(err); - } - Py_RETURN_NONE; -} - -/*[clinic input] -_winapi.SetEvent - - event: HANDLE -[clinic start generated code]*/ - -static PyObject * -_winapi_SetEvent_impl(PyObject *module, HANDLE event) -/*[clinic end generated code: output=c18ba09eb9aa774d input=e660e830a37c09f8]*/ -{ - int err = 0; - - Py_BEGIN_ALLOW_THREADS - if (!SetEvent(event)) { - err = GetLastError(); - } - Py_END_ALLOW_THREADS - if (err) { - return PyErr_SetFromWindowsErr(err); - } - Py_RETURN_NONE; -} - /*[clinic input] _winapi.SetNamedPipeHandleState @@ -2136,310 +1942,6 @@ _winapi_WaitNamedPipe_impl(PyObject *module, LPCTSTR name, DWORD timeout) Py_RETURN_NONE; } - -typedef struct { - HANDLE handles[MAXIMUM_WAIT_OBJECTS]; - HANDLE cancel_event; - DWORD handle_base; - DWORD handle_count; - HANDLE thread; - volatile DWORD result; -} BatchedWaitData; - -static DWORD WINAPI -_batched_WaitForMultipleObjects_thread(LPVOID param) -{ - BatchedWaitData *data = (BatchedWaitData *)param; - data->result = WaitForMultipleObjects( - data->handle_count, - data->handles, - FALSE, - INFINITE - ); - if (data->result == WAIT_FAILED) { - DWORD err = GetLastError(); - SetEvent(data->cancel_event); - return err; - } else if (data->result >= WAIT_ABANDONED_0 && data->result < WAIT_ABANDONED_0 + MAXIMUM_WAIT_OBJECTS) { - data->result = WAIT_FAILED; - SetEvent(data->cancel_event); - return ERROR_ABANDONED_WAIT_0; - } - return 0; -} - -/*[clinic input] -_winapi.BatchedWaitForMultipleObjects - - handle_seq: object - wait_all: BOOL - milliseconds: DWORD(c_default='INFINITE') = _winapi.INFINITE - -Supports a larger number of handles than WaitForMultipleObjects - -Note that the handles may be waited on other threads, which could cause -issues for objects like mutexes that become associated with the thread -that was waiting for them. Objects may also be left signalled, even if -the wait fails. - -It is recommended to use WaitForMultipleObjects whenever possible, and -only switch to BatchedWaitForMultipleObjects for scenarios where you -control all the handles involved, such as your own thread pool or -files, and all wait objects are left unmodified by a wait (for example, -manual reset events, threads, and files/pipes). - -Overlapped handles returned from this module use manual reset events. -[clinic start generated code]*/ - -static PyObject * -_winapi_BatchedWaitForMultipleObjects_impl(PyObject *module, - PyObject *handle_seq, - BOOL wait_all, DWORD milliseconds) -/*[clinic end generated code: output=d21c1a4ad0a252fd input=7e196f29005dc77b]*/ -{ - Py_ssize_t thread_count = 0, handle_count = 0, i, j; - Py_ssize_t nhandles; - BatchedWaitData *thread_data[MAXIMUM_WAIT_OBJECTS]; - HANDLE handles[MAXIMUM_WAIT_OBJECTS]; - HANDLE sigint_event = NULL; - HANDLE cancel_event = NULL; - DWORD result; - - const Py_ssize_t _MAXIMUM_TOTAL_OBJECTS = (MAXIMUM_WAIT_OBJECTS - 1) * (MAXIMUM_WAIT_OBJECTS - 1); - - if (!PySequence_Check(handle_seq)) { - PyErr_Format(PyExc_TypeError, - "sequence type expected, got '%s'", - Py_TYPE(handle_seq)->tp_name); - return NULL; - } - nhandles = PySequence_Length(handle_seq); - if (nhandles == -1) { - return NULL; - } - if (nhandles == 0) { - return wait_all ? Py_NewRef(Py_None) : PyList_New(0); - } - - /* If this is the main thread then make the wait interruptible - by Ctrl-C. When waiting for *all* handles, it is only checked - in between batches. */ - if (_PyOS_IsMainThread()) { - sigint_event = _PyOS_SigintEvent(); - assert(sigint_event != NULL); - } - - if (nhandles < 0 || nhandles > _MAXIMUM_TOTAL_OBJECTS) { - PyErr_Format(PyExc_ValueError, - "need at most %zd handles, got a sequence of length %zd", - _MAXIMUM_TOTAL_OBJECTS, nhandles); - return NULL; - } - - if (!wait_all) { - cancel_event = CreateEventW(NULL, TRUE, FALSE, NULL); - if (!cancel_event) { - PyErr_SetExcFromWindowsErr(PyExc_OSError, 0); - return NULL; - } - } - - i = 0; - while (i < nhandles) { - BatchedWaitData *data = (BatchedWaitData*)PyMem_Malloc(sizeof(BatchedWaitData)); - if (!data) { - goto error; - } - thread_data[thread_count++] = data; - data->thread = NULL; - data->cancel_event = cancel_event; - data->handle_base = Py_SAFE_DOWNCAST(i, Py_ssize_t, DWORD); - data->handle_count = Py_SAFE_DOWNCAST(nhandles - i, Py_ssize_t, DWORD); - if (data->handle_count > MAXIMUM_WAIT_OBJECTS - 1) { - data->handle_count = MAXIMUM_WAIT_OBJECTS - 1; - } - for (j = 0; j < data->handle_count; ++i, ++j) { - PyObject *v = PySequence_GetItem(handle_seq, i); - if (!v || !PyArg_Parse(v, F_HANDLE, &data->handles[j])) { - Py_XDECREF(v); - goto error; - } - Py_DECREF(v); - } - if (!wait_all) { - data->handles[data->handle_count++] = cancel_event; - } - } - - DWORD err = 0; - - /* We need to use different strategies when waiting for ALL handles - as opposed to ANY handle. This is because there is no way to - (safely) interrupt a thread that is waiting for all handles in a - group. So for ALL handles, we loop over each set and wait. For - ANY handle, we use threads and wait on them. */ - if (wait_all) { - Py_BEGIN_ALLOW_THREADS - long long deadline = 0; - if (milliseconds != INFINITE) { - deadline = (long long)GetTickCount64() + milliseconds; - } - - for (i = 0; !err && i < thread_count; ++i) { - DWORD timeout = milliseconds; - if (deadline) { - long long time_to_deadline = deadline - GetTickCount64(); - if (time_to_deadline <= 0) { - err = WAIT_TIMEOUT; - break; - } else if (time_to_deadline < UINT_MAX) { - timeout = (DWORD)time_to_deadline; - } - } - result = WaitForMultipleObjects(thread_data[i]->handle_count, - thread_data[i]->handles, TRUE, timeout); - // ABANDONED is not possible here because we own all the handles - if (result == WAIT_FAILED) { - err = GetLastError(); - } else if (result == WAIT_TIMEOUT) { - err = WAIT_TIMEOUT; - } - - if (!err && sigint_event) { - result = WaitForSingleObject(sigint_event, 0); - if (result == WAIT_OBJECT_0) { - err = ERROR_CONTROL_C_EXIT; - } else if (result == WAIT_FAILED) { - err = GetLastError(); - } - } - } - - CloseHandle(cancel_event); - - Py_END_ALLOW_THREADS - } else { - Py_BEGIN_ALLOW_THREADS - - for (i = 0; i < thread_count; ++i) { - BatchedWaitData *data = thread_data[i]; - data->thread = CreateThread( - NULL, - 1, // smallest possible initial stack - _batched_WaitForMultipleObjects_thread, - (LPVOID)data, - CREATE_SUSPENDED, - NULL - ); - if (!data->thread) { - err = GetLastError(); - break; - } - handles[handle_count++] = data->thread; - } - Py_END_ALLOW_THREADS - - if (err) { - PyErr_SetExcFromWindowsErr(PyExc_OSError, err); - goto error; - } - if (handle_count > MAXIMUM_WAIT_OBJECTS - 1) { - // basically an assert, but stronger - PyErr_SetString(PyExc_SystemError, "allocated too many wait objects"); - goto error; - } - - Py_BEGIN_ALLOW_THREADS - - // Once we start resuming threads, can no longer "goto error" - for (i = 0; i < thread_count; ++i) { - ResumeThread(thread_data[i]->thread); - } - if (sigint_event) { - handles[handle_count++] = sigint_event; - } - result = WaitForMultipleObjects((DWORD)handle_count, handles, wait_all, milliseconds); - // ABANDONED is not possible here because we own all the handles - if (result == WAIT_FAILED) { - err = GetLastError(); - } else if (result == WAIT_TIMEOUT) { - err = WAIT_TIMEOUT; - } else if (sigint_event && result == WAIT_OBJECT_0 + handle_count) { - err = ERROR_CONTROL_C_EXIT; - } - - SetEvent(cancel_event); - - // Wait for all threads to finish before we start freeing their memory - if (sigint_event) { - handle_count -= 1; - } - WaitForMultipleObjects((DWORD)handle_count, handles, TRUE, INFINITE); - - for (i = 0; i < thread_count; ++i) { - if (!err && thread_data[i]->result == WAIT_FAILED) { - if (!GetExitCodeThread(thread_data[i]->thread, &err)) { - err = GetLastError(); - } - } - CloseHandle(thread_data[i]->thread); - } - - CloseHandle(cancel_event); - - Py_END_ALLOW_THREADS - - } - - PyObject *triggered_indices; - if (sigint_event != NULL && err == ERROR_CONTROL_C_EXIT) { - errno = EINTR; - PyErr_SetFromErrno(PyExc_OSError); - triggered_indices = NULL; - } else if (err) { - PyErr_SetExcFromWindowsErr(PyExc_OSError, err); - triggered_indices = NULL; - } else if (wait_all) { - triggered_indices = Py_NewRef(Py_None); - } else { - triggered_indices = PyList_New(0); - if (triggered_indices) { - for (i = 0; i < thread_count; ++i) { - Py_ssize_t triggered = (Py_ssize_t)thread_data[i]->result - WAIT_OBJECT_0; - if (triggered >= 0 && triggered < thread_data[i]->handle_count - 1) { - PyObject *v = PyLong_FromSsize_t(thread_data[i]->handle_base + triggered); - if (!v || PyList_Append(triggered_indices, v) < 0) { - Py_XDECREF(v); - Py_CLEAR(triggered_indices); - break; - } - Py_DECREF(v); - } - } - } - } - - for (i = 0; i < thread_count; ++i) { - PyMem_Free((void *)thread_data[i]); - } - - return triggered_indices; - -error: - // We should only enter here before any threads start running. - // Once we start resuming threads, different cleanup is required - CloseHandle(cancel_event); - while (--thread_count >= 0) { - HANDLE t = thread_data[thread_count]->thread; - if (t) { - TerminateThread(t, WAIT_ABANDONED_0); - CloseHandle(t); - } - PyMem_Free((void *)thread_data[thread_count]); - } - return NULL; -} - /*[clinic input] _winapi.WaitForMultipleObjects @@ -2833,10 +2335,8 @@ _winapi_CopyFile2_impl(PyObject *module, LPCWSTR existing_file_name, static PyMethodDef winapi_functions[] = { _WINAPI_CLOSEHANDLE_METHODDEF _WINAPI_CONNECTNAMEDPIPE_METHODDEF - _WINAPI_CREATEEVENTW_METHODDEF _WINAPI_CREATEFILE_METHODDEF _WINAPI_CREATEFILEMAPPING_METHODDEF - _WINAPI_CREATEMUTEXW_METHODDEF _WINAPI_CREATENAMEDPIPE_METHODDEF _WINAPI_CREATEPIPE_METHODDEF _WINAPI_CREATEPROCESS_METHODDEF @@ -2850,23 +2350,17 @@ static PyMethodDef winapi_functions[] = { _WINAPI_GETSTDHANDLE_METHODDEF _WINAPI_GETVERSION_METHODDEF _WINAPI_MAPVIEWOFFILE_METHODDEF - _WINAPI_OPENEVENTW_METHODDEF _WINAPI_OPENFILEMAPPING_METHODDEF - _WINAPI_OPENMUTEXW_METHODDEF _WINAPI_OPENPROCESS_METHODDEF _WINAPI_PEEKNAMEDPIPE_METHODDEF _WINAPI_LCMAPSTRINGEX_METHODDEF _WINAPI_READFILE_METHODDEF - _WINAPI_RELEASEMUTEX_METHODDEF - _WINAPI_RESETEVENT_METHODDEF - _WINAPI_SETEVENT_METHODDEF _WINAPI_SETNAMEDPIPEHANDLESTATE_METHODDEF _WINAPI_TERMINATEPROCESS_METHODDEF _WINAPI_UNMAPVIEWOFFILE_METHODDEF _WINAPI_VIRTUALQUERYSIZE_METHODDEF _WINAPI_WAITNAMEDPIPE_METHODDEF _WINAPI_WAITFORMULTIPLEOBJECTS_METHODDEF - _WINAPI_BATCHEDWAITFORMULTIPLEOBJECTS_METHODDEF _WINAPI_WAITFORSINGLEOBJECT_METHODDEF _WINAPI_WRITEFILE_METHODDEF _WINAPI_GETACP_METHODDEF diff --git a/Modules/cjkcodecs/multibytecodec.c b/Modules/cjkcodecs/multibytecodec.c index 2125da437963d2..5d3c16a98423ba 100644 --- a/Modules/cjkcodecs/multibytecodec.c +++ b/Modules/cjkcodecs/multibytecodec.c @@ -973,8 +973,7 @@ _multibytecodec_MultibyteIncrementalEncoder_setstate_impl(MultibyteIncrementalEn if (_PyLong_AsByteArray(statelong, statebytes, sizeof(statebytes), 1 /* little-endian */ , - 0 /* unsigned */ , - 1 /* with_exceptions */) < 0) { + 0 /* unsigned */ ) < 0) { goto errorexit; } @@ -1256,8 +1255,7 @@ _multibytecodec_MultibyteIncrementalDecoder_setstate_impl(MultibyteIncrementalDe if (_PyLong_AsByteArray(statelong, statebytes, sizeof(statebytes), 1 /* little-endian */ , - 0 /* unsigned */ , - 1 /* with_exceptions */) < 0) { + 0 /* unsigned */ ) < 0) { return NULL; } diff --git a/Modules/clinic/_winapi.c.h b/Modules/clinic/_winapi.c.h index 468457e624c691..d1052f38919dde 100644 --- a/Modules/clinic/_winapi.c.h +++ b/Modules/clinic/_winapi.c.h @@ -151,76 +151,6 @@ _winapi_ConnectNamedPipe(PyObject *module, PyObject *const *args, Py_ssize_t nar return return_value; } -PyDoc_STRVAR(_winapi_CreateEventW__doc__, -"CreateEventW($module, /, security_attributes, manual_reset,\n" -" initial_state, name)\n" -"--\n" -"\n"); - -#define _WINAPI_CREATEEVENTW_METHODDEF \ - {"CreateEventW", _PyCFunction_CAST(_winapi_CreateEventW), METH_FASTCALL|METH_KEYWORDS, _winapi_CreateEventW__doc__}, - -static HANDLE -_winapi_CreateEventW_impl(PyObject *module, - LPSECURITY_ATTRIBUTES security_attributes, - BOOL manual_reset, BOOL initial_state, - LPCWSTR name); - -static PyObject * -_winapi_CreateEventW(PyObject *module, PyObject *const *args, Py_ssize_t nargs, PyObject *kwnames) -{ - PyObject *return_value = NULL; - #if defined(Py_BUILD_CORE) && !defined(Py_BUILD_CORE_MODULE) - - #define NUM_KEYWORDS 4 - static struct { - PyGC_Head _this_is_not_used; - PyObject_VAR_HEAD - PyObject *ob_item[NUM_KEYWORDS]; - } _kwtuple = { - .ob_base = PyVarObject_HEAD_INIT(&PyTuple_Type, NUM_KEYWORDS) - .ob_item = { &_Py_ID(security_attributes), &_Py_ID(manual_reset), &_Py_ID(initial_state), &_Py_ID(name), }, - }; - #undef NUM_KEYWORDS - #define KWTUPLE (&_kwtuple.ob_base.ob_base) - - #else // !Py_BUILD_CORE - # define KWTUPLE NULL - #endif // !Py_BUILD_CORE - - static const char * const _keywords[] = {"security_attributes", "manual_reset", "initial_state", "name", NULL}; - static _PyArg_Parser _parser = { - .keywords = _keywords, - .format = "" F_POINTER "iiO&:CreateEventW", - .kwtuple = KWTUPLE, - }; - #undef KWTUPLE - LPSECURITY_ATTRIBUTES security_attributes; - BOOL manual_reset; - BOOL initial_state; - LPCWSTR name = NULL; - HANDLE _return_value; - - if (!_PyArg_ParseStackAndKeywords(args, nargs, kwnames, &_parser, - &security_attributes, &manual_reset, &initial_state, _PyUnicode_WideCharString_Opt_Converter, &name)) { - goto exit; - } - _return_value = _winapi_CreateEventW_impl(module, security_attributes, manual_reset, initial_state, name); - if ((_return_value == INVALID_HANDLE_VALUE) && PyErr_Occurred()) { - goto exit; - } - if (_return_value == NULL) { - Py_RETURN_NONE; - } - return_value = HANDLE_TO_PYNUM(_return_value); - -exit: - /* Cleanup for name */ - PyMem_Free((void *)name); - - return return_value; -} - PyDoc_STRVAR(_winapi_CreateFile__doc__, "CreateFile($module, file_name, desired_access, share_mode,\n" " security_attributes, creation_disposition,\n" @@ -367,73 +297,6 @@ _winapi_CreateJunction(PyObject *module, PyObject *const *args, Py_ssize_t nargs return return_value; } -PyDoc_STRVAR(_winapi_CreateMutexW__doc__, -"CreateMutexW($module, /, security_attributes, initial_owner, name)\n" -"--\n" -"\n"); - -#define _WINAPI_CREATEMUTEXW_METHODDEF \ - {"CreateMutexW", _PyCFunction_CAST(_winapi_CreateMutexW), METH_FASTCALL|METH_KEYWORDS, _winapi_CreateMutexW__doc__}, - -static HANDLE -_winapi_CreateMutexW_impl(PyObject *module, - LPSECURITY_ATTRIBUTES security_attributes, - BOOL initial_owner, LPCWSTR name); - -static PyObject * -_winapi_CreateMutexW(PyObject *module, PyObject *const *args, Py_ssize_t nargs, PyObject *kwnames) -{ - PyObject *return_value = NULL; - #if defined(Py_BUILD_CORE) && !defined(Py_BUILD_CORE_MODULE) - - #define NUM_KEYWORDS 3 - static struct { - PyGC_Head _this_is_not_used; - PyObject_VAR_HEAD - PyObject *ob_item[NUM_KEYWORDS]; - } _kwtuple = { - .ob_base = PyVarObject_HEAD_INIT(&PyTuple_Type, NUM_KEYWORDS) - .ob_item = { &_Py_ID(security_attributes), &_Py_ID(initial_owner), &_Py_ID(name), }, - }; - #undef NUM_KEYWORDS - #define KWTUPLE (&_kwtuple.ob_base.ob_base) - - #else // !Py_BUILD_CORE - # define KWTUPLE NULL - #endif // !Py_BUILD_CORE - - static const char * const _keywords[] = {"security_attributes", "initial_owner", "name", NULL}; - static _PyArg_Parser _parser = { - .keywords = _keywords, - .format = "" F_POINTER "iO&:CreateMutexW", - .kwtuple = KWTUPLE, - }; - #undef KWTUPLE - LPSECURITY_ATTRIBUTES security_attributes; - BOOL initial_owner; - LPCWSTR name = NULL; - HANDLE _return_value; - - if (!_PyArg_ParseStackAndKeywords(args, nargs, kwnames, &_parser, - &security_attributes, &initial_owner, _PyUnicode_WideCharString_Opt_Converter, &name)) { - goto exit; - } - _return_value = _winapi_CreateMutexW_impl(module, security_attributes, initial_owner, name); - if ((_return_value == INVALID_HANDLE_VALUE) && PyErr_Occurred()) { - goto exit; - } - if (_return_value == NULL) { - Py_RETURN_NONE; - } - return_value = HANDLE_TO_PYNUM(_return_value); - -exit: - /* Cleanup for name */ - PyMem_Free((void *)name); - - return return_value; -} - PyDoc_STRVAR(_winapi_CreateNamedPipe__doc__, "CreateNamedPipe($module, name, open_mode, pipe_mode, max_instances,\n" " out_buffer_size, in_buffer_size, default_timeout,\n" @@ -908,138 +771,6 @@ _winapi_UnmapViewOfFile(PyObject *module, PyObject *arg) return return_value; } -PyDoc_STRVAR(_winapi_OpenEventW__doc__, -"OpenEventW($module, /, desired_access, inherit_handle, name)\n" -"--\n" -"\n"); - -#define _WINAPI_OPENEVENTW_METHODDEF \ - {"OpenEventW", _PyCFunction_CAST(_winapi_OpenEventW), METH_FASTCALL|METH_KEYWORDS, _winapi_OpenEventW__doc__}, - -static HANDLE -_winapi_OpenEventW_impl(PyObject *module, DWORD desired_access, - BOOL inherit_handle, LPCWSTR name); - -static PyObject * -_winapi_OpenEventW(PyObject *module, PyObject *const *args, Py_ssize_t nargs, PyObject *kwnames) -{ - PyObject *return_value = NULL; - #if defined(Py_BUILD_CORE) && !defined(Py_BUILD_CORE_MODULE) - - #define NUM_KEYWORDS 3 - static struct { - PyGC_Head _this_is_not_used; - PyObject_VAR_HEAD - PyObject *ob_item[NUM_KEYWORDS]; - } _kwtuple = { - .ob_base = PyVarObject_HEAD_INIT(&PyTuple_Type, NUM_KEYWORDS) - .ob_item = { &_Py_ID(desired_access), &_Py_ID(inherit_handle), &_Py_ID(name), }, - }; - #undef NUM_KEYWORDS - #define KWTUPLE (&_kwtuple.ob_base.ob_base) - - #else // !Py_BUILD_CORE - # define KWTUPLE NULL - #endif // !Py_BUILD_CORE - - static const char * const _keywords[] = {"desired_access", "inherit_handle", "name", NULL}; - static _PyArg_Parser _parser = { - .keywords = _keywords, - .format = "kiO&:OpenEventW", - .kwtuple = KWTUPLE, - }; - #undef KWTUPLE - DWORD desired_access; - BOOL inherit_handle; - LPCWSTR name = NULL; - HANDLE _return_value; - - if (!_PyArg_ParseStackAndKeywords(args, nargs, kwnames, &_parser, - &desired_access, &inherit_handle, _PyUnicode_WideCharString_Converter, &name)) { - goto exit; - } - _return_value = _winapi_OpenEventW_impl(module, desired_access, inherit_handle, name); - if ((_return_value == INVALID_HANDLE_VALUE) && PyErr_Occurred()) { - goto exit; - } - if (_return_value == NULL) { - Py_RETURN_NONE; - } - return_value = HANDLE_TO_PYNUM(_return_value); - -exit: - /* Cleanup for name */ - PyMem_Free((void *)name); - - return return_value; -} - -PyDoc_STRVAR(_winapi_OpenMutexW__doc__, -"OpenMutexW($module, /, desired_access, inherit_handle, name)\n" -"--\n" -"\n"); - -#define _WINAPI_OPENMUTEXW_METHODDEF \ - {"OpenMutexW", _PyCFunction_CAST(_winapi_OpenMutexW), METH_FASTCALL|METH_KEYWORDS, _winapi_OpenMutexW__doc__}, - -static HANDLE -_winapi_OpenMutexW_impl(PyObject *module, DWORD desired_access, - BOOL inherit_handle, LPCWSTR name); - -static PyObject * -_winapi_OpenMutexW(PyObject *module, PyObject *const *args, Py_ssize_t nargs, PyObject *kwnames) -{ - PyObject *return_value = NULL; - #if defined(Py_BUILD_CORE) && !defined(Py_BUILD_CORE_MODULE) - - #define NUM_KEYWORDS 3 - static struct { - PyGC_Head _this_is_not_used; - PyObject_VAR_HEAD - PyObject *ob_item[NUM_KEYWORDS]; - } _kwtuple = { - .ob_base = PyVarObject_HEAD_INIT(&PyTuple_Type, NUM_KEYWORDS) - .ob_item = { &_Py_ID(desired_access), &_Py_ID(inherit_handle), &_Py_ID(name), }, - }; - #undef NUM_KEYWORDS - #define KWTUPLE (&_kwtuple.ob_base.ob_base) - - #else // !Py_BUILD_CORE - # define KWTUPLE NULL - #endif // !Py_BUILD_CORE - - static const char * const _keywords[] = {"desired_access", "inherit_handle", "name", NULL}; - static _PyArg_Parser _parser = { - .keywords = _keywords, - .format = "kiO&:OpenMutexW", - .kwtuple = KWTUPLE, - }; - #undef KWTUPLE - DWORD desired_access; - BOOL inherit_handle; - LPCWSTR name = NULL; - HANDLE _return_value; - - if (!_PyArg_ParseStackAndKeywords(args, nargs, kwnames, &_parser, - &desired_access, &inherit_handle, _PyUnicode_WideCharString_Converter, &name)) { - goto exit; - } - _return_value = _winapi_OpenMutexW_impl(module, desired_access, inherit_handle, name); - if ((_return_value == INVALID_HANDLE_VALUE) && PyErr_Occurred()) { - goto exit; - } - if (_return_value == NULL) { - Py_RETURN_NONE; - } - return_value = HANDLE_TO_PYNUM(_return_value); - -exit: - /* Cleanup for name */ - PyMem_Free((void *)name); - - return return_value; -} - PyDoc_STRVAR(_winapi_OpenFileMapping__doc__, "OpenFileMapping($module, desired_access, inherit_handle, name, /)\n" "--\n" @@ -1260,162 +991,6 @@ _winapi_ReadFile(PyObject *module, PyObject *const *args, Py_ssize_t nargs, PyOb return return_value; } -PyDoc_STRVAR(_winapi_ReleaseMutex__doc__, -"ReleaseMutex($module, /, mutex)\n" -"--\n" -"\n"); - -#define _WINAPI_RELEASEMUTEX_METHODDEF \ - {"ReleaseMutex", _PyCFunction_CAST(_winapi_ReleaseMutex), METH_FASTCALL|METH_KEYWORDS, _winapi_ReleaseMutex__doc__}, - -static PyObject * -_winapi_ReleaseMutex_impl(PyObject *module, HANDLE mutex); - -static PyObject * -_winapi_ReleaseMutex(PyObject *module, PyObject *const *args, Py_ssize_t nargs, PyObject *kwnames) -{ - PyObject *return_value = NULL; - #if defined(Py_BUILD_CORE) && !defined(Py_BUILD_CORE_MODULE) - - #define NUM_KEYWORDS 1 - static struct { - PyGC_Head _this_is_not_used; - PyObject_VAR_HEAD - PyObject *ob_item[NUM_KEYWORDS]; - } _kwtuple = { - .ob_base = PyVarObject_HEAD_INIT(&PyTuple_Type, NUM_KEYWORDS) - .ob_item = { &_Py_ID(mutex), }, - }; - #undef NUM_KEYWORDS - #define KWTUPLE (&_kwtuple.ob_base.ob_base) - - #else // !Py_BUILD_CORE - # define KWTUPLE NULL - #endif // !Py_BUILD_CORE - - static const char * const _keywords[] = {"mutex", NULL}; - static _PyArg_Parser _parser = { - .keywords = _keywords, - .format = "" F_HANDLE ":ReleaseMutex", - .kwtuple = KWTUPLE, - }; - #undef KWTUPLE - HANDLE mutex; - - if (!_PyArg_ParseStackAndKeywords(args, nargs, kwnames, &_parser, - &mutex)) { - goto exit; - } - return_value = _winapi_ReleaseMutex_impl(module, mutex); - -exit: - return return_value; -} - -PyDoc_STRVAR(_winapi_ResetEvent__doc__, -"ResetEvent($module, /, event)\n" -"--\n" -"\n"); - -#define _WINAPI_RESETEVENT_METHODDEF \ - {"ResetEvent", _PyCFunction_CAST(_winapi_ResetEvent), METH_FASTCALL|METH_KEYWORDS, _winapi_ResetEvent__doc__}, - -static PyObject * -_winapi_ResetEvent_impl(PyObject *module, HANDLE event); - -static PyObject * -_winapi_ResetEvent(PyObject *module, PyObject *const *args, Py_ssize_t nargs, PyObject *kwnames) -{ - PyObject *return_value = NULL; - #if defined(Py_BUILD_CORE) && !defined(Py_BUILD_CORE_MODULE) - - #define NUM_KEYWORDS 1 - static struct { - PyGC_Head _this_is_not_used; - PyObject_VAR_HEAD - PyObject *ob_item[NUM_KEYWORDS]; - } _kwtuple = { - .ob_base = PyVarObject_HEAD_INIT(&PyTuple_Type, NUM_KEYWORDS) - .ob_item = { &_Py_ID(event), }, - }; - #undef NUM_KEYWORDS - #define KWTUPLE (&_kwtuple.ob_base.ob_base) - - #else // !Py_BUILD_CORE - # define KWTUPLE NULL - #endif // !Py_BUILD_CORE - - static const char * const _keywords[] = {"event", NULL}; - static _PyArg_Parser _parser = { - .keywords = _keywords, - .format = "" F_HANDLE ":ResetEvent", - .kwtuple = KWTUPLE, - }; - #undef KWTUPLE - HANDLE event; - - if (!_PyArg_ParseStackAndKeywords(args, nargs, kwnames, &_parser, - &event)) { - goto exit; - } - return_value = _winapi_ResetEvent_impl(module, event); - -exit: - return return_value; -} - -PyDoc_STRVAR(_winapi_SetEvent__doc__, -"SetEvent($module, /, event)\n" -"--\n" -"\n"); - -#define _WINAPI_SETEVENT_METHODDEF \ - {"SetEvent", _PyCFunction_CAST(_winapi_SetEvent), METH_FASTCALL|METH_KEYWORDS, _winapi_SetEvent__doc__}, - -static PyObject * -_winapi_SetEvent_impl(PyObject *module, HANDLE event); - -static PyObject * -_winapi_SetEvent(PyObject *module, PyObject *const *args, Py_ssize_t nargs, PyObject *kwnames) -{ - PyObject *return_value = NULL; - #if defined(Py_BUILD_CORE) && !defined(Py_BUILD_CORE_MODULE) - - #define NUM_KEYWORDS 1 - static struct { - PyGC_Head _this_is_not_used; - PyObject_VAR_HEAD - PyObject *ob_item[NUM_KEYWORDS]; - } _kwtuple = { - .ob_base = PyVarObject_HEAD_INIT(&PyTuple_Type, NUM_KEYWORDS) - .ob_item = { &_Py_ID(event), }, - }; - #undef NUM_KEYWORDS - #define KWTUPLE (&_kwtuple.ob_base.ob_base) - - #else // !Py_BUILD_CORE - # define KWTUPLE NULL - #endif // !Py_BUILD_CORE - - static const char * const _keywords[] = {"event", NULL}; - static _PyArg_Parser _parser = { - .keywords = _keywords, - .format = "" F_HANDLE ":SetEvent", - .kwtuple = KWTUPLE, - }; - #undef KWTUPLE - HANDLE event; - - if (!_PyArg_ParseStackAndKeywords(args, nargs, kwnames, &_parser, - &event)) { - goto exit; - } - return_value = _winapi_SetEvent_impl(module, event); - -exit: - return return_value; -} - PyDoc_STRVAR(_winapi_SetNamedPipeHandleState__doc__, "SetNamedPipeHandleState($module, named_pipe, mode,\n" " max_collection_count, collect_data_timeout, /)\n" @@ -1539,77 +1114,6 @@ _winapi_WaitNamedPipe(PyObject *module, PyObject *const *args, Py_ssize_t nargs) return return_value; } -PyDoc_STRVAR(_winapi_BatchedWaitForMultipleObjects__doc__, -"BatchedWaitForMultipleObjects($module, /, handle_seq, wait_all,\n" -" milliseconds=_winapi.INFINITE)\n" -"--\n" -"\n" -"Supports a larger number of handles than WaitForMultipleObjects\n" -"\n" -"Note that the handles may be waited on other threads, which could cause\n" -"issues for objects like mutexes that become associated with the thread\n" -"that was waiting for them. Objects may also be left signalled, even if\n" -"the wait fails.\n" -"\n" -"It is recommended to use WaitForMultipleObjects whenever possible, and\n" -"only switch to BatchedWaitForMultipleObjects for scenarios where you\n" -"control all the handles involved, such as your own thread pool or\n" -"files, and all wait objects are left unmodified by a wait (for example,\n" -"manual reset events, threads, and files/pipes).\n" -"\n" -"Overlapped handles returned from this module use manual reset events."); - -#define _WINAPI_BATCHEDWAITFORMULTIPLEOBJECTS_METHODDEF \ - {"BatchedWaitForMultipleObjects", _PyCFunction_CAST(_winapi_BatchedWaitForMultipleObjects), METH_FASTCALL|METH_KEYWORDS, _winapi_BatchedWaitForMultipleObjects__doc__}, - -static PyObject * -_winapi_BatchedWaitForMultipleObjects_impl(PyObject *module, - PyObject *handle_seq, - BOOL wait_all, DWORD milliseconds); - -static PyObject * -_winapi_BatchedWaitForMultipleObjects(PyObject *module, PyObject *const *args, Py_ssize_t nargs, PyObject *kwnames) -{ - PyObject *return_value = NULL; - #if defined(Py_BUILD_CORE) && !defined(Py_BUILD_CORE_MODULE) - - #define NUM_KEYWORDS 3 - static struct { - PyGC_Head _this_is_not_used; - PyObject_VAR_HEAD - PyObject *ob_item[NUM_KEYWORDS]; - } _kwtuple = { - .ob_base = PyVarObject_HEAD_INIT(&PyTuple_Type, NUM_KEYWORDS) - .ob_item = { &_Py_ID(handle_seq), &_Py_ID(wait_all), &_Py_ID(milliseconds), }, - }; - #undef NUM_KEYWORDS - #define KWTUPLE (&_kwtuple.ob_base.ob_base) - - #else // !Py_BUILD_CORE - # define KWTUPLE NULL - #endif // !Py_BUILD_CORE - - static const char * const _keywords[] = {"handle_seq", "wait_all", "milliseconds", NULL}; - static _PyArg_Parser _parser = { - .keywords = _keywords, - .format = "Oi|k:BatchedWaitForMultipleObjects", - .kwtuple = KWTUPLE, - }; - #undef KWTUPLE - PyObject *handle_seq; - BOOL wait_all; - DWORD milliseconds = INFINITE; - - if (!_PyArg_ParseStackAndKeywords(args, nargs, kwnames, &_parser, - &handle_seq, &wait_all, &milliseconds)) { - goto exit; - } - return_value = _winapi_BatchedWaitForMultipleObjects_impl(module, handle_seq, wait_all, milliseconds); - -exit: - return return_value; -} - PyDoc_STRVAR(_winapi_WaitForMultipleObjects__doc__, "WaitForMultipleObjects($module, handle_seq, wait_flag,\n" " milliseconds=_winapi.INFINITE, /)\n" @@ -1978,4 +1482,4 @@ _winapi_CopyFile2(PyObject *module, PyObject *const *args, Py_ssize_t nargs, PyO return return_value; } -/*[clinic end generated code: output=1f5bbcfa8d1847c5 input=a9049054013a1b77]*/ +/*[clinic end generated code: output=2350d4f2275d3a6f input=a9049054013a1b77]*/ diff --git a/Objects/exceptions.c b/Objects/exceptions.c index 3df3a9b3b1a253..cff55d05163b6b 100644 --- a/Objects/exceptions.c +++ b/Objects/exceptions.c @@ -3539,6 +3539,7 @@ SimpleExtendsException(PyExc_Warning, ResourceWarning, #undef EOPNOTSUPP #undef EPROTONOSUPPORT #undef EPROTOTYPE +#undef ETIMEDOUT #undef EWOULDBLOCK #if defined(WSAEALREADY) && !defined(EALREADY) @@ -3559,6 +3560,9 @@ SimpleExtendsException(PyExc_Warning, ResourceWarning, #if defined(WSAESHUTDOWN) && !defined(ESHUTDOWN) #define ESHUTDOWN WSAESHUTDOWN #endif +#if defined(WSAETIMEDOUT) && !defined(ETIMEDOUT) +#define ETIMEDOUT WSAETIMEDOUT +#endif #if defined(WSAEWOULDBLOCK) && !defined(EWOULDBLOCK) #define EWOULDBLOCK WSAEWOULDBLOCK #endif @@ -3743,9 +3747,6 @@ _PyExc_InitState(PyInterpreterState *interp) #endif ADD_ERRNO(ProcessLookupError, ESRCH); ADD_ERRNO(TimeoutError, ETIMEDOUT); -#ifdef WSAETIMEDOUT - ADD_ERRNO(TimeoutError, WSAETIMEDOUT); -#endif return _PyStatus_OK(); diff --git a/Objects/longobject.c b/Objects/longobject.c index 932111f58425f2..e655ba19e8f1c1 100644 --- a/Objects/longobject.c +++ b/Objects/longobject.c @@ -928,8 +928,7 @@ _PyLong_FromByteArray(const unsigned char* bytes, size_t n, int _PyLong_AsByteArray(PyLongObject* v, unsigned char* bytes, size_t n, - int little_endian, int is_signed, - int with_exceptions) + int little_endian, int is_signed) { Py_ssize_t i; /* index into v->long_value.ob_digit */ Py_ssize_t ndigits; /* number of digits */ @@ -946,10 +945,8 @@ _PyLong_AsByteArray(PyLongObject* v, ndigits = _PyLong_DigitCount(v); if (_PyLong_IsNegative(v)) { if (!is_signed) { - if (with_exceptions) { - PyErr_SetString(PyExc_OverflowError, - "can't convert negative int to unsigned"); - } + PyErr_SetString(PyExc_OverflowError, + "can't convert negative int to unsigned"); return -1; } do_twos_comp = 1; @@ -970,12 +967,7 @@ _PyLong_AsByteArray(PyLongObject* v, /* Copy over all the Python digits. It's crucial that every Python digit except for the MSD contribute exactly PyLong_SHIFT bits to the total, so first assert that the int is - normalized. - NOTE: PyLong_AsNativeBytes() assumes that this function will fill in 'n' - bytes even if it eventually fails to convert the whole number. Make sure - you account for that if you are changing this algorithm to return without - doing that. - */ + normalized. */ assert(ndigits == 0 || v->long_value.ob_digit[ndigits - 1] != 0); j = 0; accum = 0; @@ -1060,203 +1052,11 @@ _PyLong_AsByteArray(PyLongObject* v, return 0; Overflow: - if (with_exceptions) { - PyErr_SetString(PyExc_OverflowError, "int too big to convert"); - } + PyErr_SetString(PyExc_OverflowError, "int too big to convert"); return -1; } -// Refactored out for readability, not reuse -static inline int -_fits_in_n_bits(Py_ssize_t v, Py_ssize_t n) -{ - if (n >= (Py_ssize_t)sizeof(Py_ssize_t) * 8) { - return 1; - } - // If all bits above n are the same, we fit. - // (Use n-1 if we require the sign bit to be consistent.) - Py_ssize_t v_extended = v >> ((int)n - 1); - return v_extended == 0 || v_extended == -1; -} - -static inline int -_resolve_endianness(int *endianness) -{ - if (*endianness < 0) { - *endianness = PY_LITTLE_ENDIAN; - } - if (*endianness != 0 && *endianness != 1) { - PyErr_SetString(PyExc_SystemError, "invalid 'endianness' value"); - return -1; - } - return 0; -} - -Py_ssize_t -PyLong_AsNativeBytes(PyObject* vv, void* buffer, Py_ssize_t n, int endianness) -{ - PyLongObject *v; - union { - Py_ssize_t v; - unsigned char b[sizeof(Py_ssize_t)]; - } cv; - int do_decref = 0; - Py_ssize_t res = 0; - - if (vv == NULL || n < 0) { - PyErr_BadInternalCall(); - return -1; - } - - int little_endian = endianness; - if (_resolve_endianness(&little_endian) < 0) { - return -1; - } - - if (PyLong_Check(vv)) { - v = (PyLongObject *)vv; - } - else { - v = (PyLongObject *)_PyNumber_Index(vv); - if (v == NULL) { - return -1; - } - do_decref = 1; - } - - if (_PyLong_IsCompact(v)) { - res = 0; - cv.v = _PyLong_CompactValue(v); - /* Most paths result in res = sizeof(compact value). Only the case - * where 0 < n < sizeof(compact value) do we need to check and adjust - * our return value. */ - res = sizeof(cv.b); - if (n <= 0) { - // nothing to do! - } - else if (n <= sizeof(cv.b)) { -#if PY_LITTLE_ENDIAN - if (little_endian) { - memcpy(buffer, cv.b, n); - } - else { - for (Py_ssize_t i = 0; i < n; ++i) { - ((unsigned char*)buffer)[n - i - 1] = cv.b[i]; - } - } -#else - if (little_endian) { - for (Py_ssize_t i = 0; i < n; ++i) { - ((unsigned char*)buffer)[i] = cv.b[sizeof(cv.b) - i - 1]; - } - } - else { - memcpy(buffer, &cv.b[sizeof(cv.b) - n], n); - } -#endif - - /* If we fit, return the requested number of bytes */ - if (_fits_in_n_bits(cv.v, n * 8)) { - res = n; - } - } - else { - unsigned char fill = cv.v < 0 ? 0xFF : 0x00; -#if PY_LITTLE_ENDIAN - if (little_endian) { - memcpy(buffer, cv.b, sizeof(cv.b)); - memset((char *)buffer + sizeof(cv.b), fill, n - sizeof(cv.b)); - } - else { - unsigned char *b = (unsigned char *)buffer; - for (Py_ssize_t i = 0; i < n - (int)sizeof(cv.b); ++i) { - *b++ = fill; - } - for (Py_ssize_t i = sizeof(cv.b); i > 0; --i) { - *b++ = cv.b[i - 1]; - } - } -#else - if (little_endian) { - unsigned char *b = (unsigned char *)buffer; - for (Py_ssize_t i = sizeof(cv.b); i > 0; --i) { - *b++ = cv.b[i - 1]; - } - for (Py_ssize_t i = 0; i < n - sizeof(cv.b); ++i) { - *b++ = fill; - } - } - else { - memset(buffer, fill, n - sizeof(cv.b)); - memcpy((char *)buffer + n - sizeof(cv.b), cv.b, sizeof(cv.b)); - } -#endif - } - } - else { - if (n > 0) { - _PyLong_AsByteArray(v, buffer, (size_t)n, little_endian, 1, 0); - } - - // More efficient calculation for number of bytes required? - size_t nb = _PyLong_NumBits((PyObject *)v); - /* Normally this would be((nb - 1) / 8) + 1 to avoid rounding up - * multiples of 8 to the next byte, but we add an implied bit for - * the sign and it cancels out. */ - size_t n_needed = (nb / 8) + 1; - res = (Py_ssize_t)n_needed; - if ((size_t)res != n_needed) { - PyErr_SetString(PyExc_OverflowError, - "value too large to convert"); - res = -1; - } - } - - if (do_decref) { - Py_DECREF(v); - } - - return res; -} - - -PyObject * -PyLong_FromNativeBytes(const void* buffer, size_t n, int endianness) -{ - if (!buffer) { - PyErr_BadInternalCall(); - return NULL; - } - - int little_endian = endianness; - if (_resolve_endianness(&little_endian) < 0) { - return NULL; - } - - return _PyLong_FromByteArray((const unsigned char *)buffer, n, - little_endian, 1); -} - - -PyObject * -PyLong_FromUnsignedNativeBytes(const void* buffer, size_t n, int endianness) -{ - if (!buffer) { - PyErr_BadInternalCall(); - return NULL; - } - - int little_endian = endianness; - if (_resolve_endianness(&little_endian) < 0) { - return NULL; - } - - return _PyLong_FromByteArray((const unsigned char *)buffer, n, - little_endian, 0); -} - - /* Create a new int object from a C pointer */ PyObject * @@ -1431,7 +1231,7 @@ PyLong_AsLongLong(PyObject *vv) } else { res = _PyLong_AsByteArray((PyLongObject *)v, (unsigned char *)&bytes, - SIZEOF_LONG_LONG, PY_LITTLE_ENDIAN, 1, 1); + SIZEOF_LONG_LONG, PY_LITTLE_ENDIAN, 1); } if (do_decref) { Py_DECREF(v); @@ -1470,7 +1270,7 @@ PyLong_AsUnsignedLongLong(PyObject *vv) } else { res = _PyLong_AsByteArray((PyLongObject *)vv, (unsigned char *)&bytes, - SIZEOF_LONG_LONG, PY_LITTLE_ENDIAN, 0, 1); + SIZEOF_LONG_LONG, PY_LITTLE_ENDIAN, 0); } /* Plan 9 can't handle long long in ? : expressions */ @@ -6268,7 +6068,7 @@ int_to_bytes_impl(PyObject *self, Py_ssize_t length, PyObject *byteorder, if (_PyLong_AsByteArray((PyLongObject *)self, (unsigned char *)PyBytes_AS_STRING(bytes), - length, little_endian, is_signed, 1) < 0) { + length, little_endian, is_signed) < 0) { Py_DECREF(bytes); return NULL; } diff --git a/PC/errmap.h b/PC/errmap.h index a064ecb80b1ed9..a7489ab75c6561 100644 --- a/PC/errmap.h +++ b/PC/errmap.h @@ -129,9 +129,6 @@ winerror_to_errno(int winerror) case ERROR_NO_UNICODE_TRANSLATION: // 1113 return EILSEQ; - case WAIT_TIMEOUT: // 258 - return ETIMEDOUT; - case ERROR_INVALID_FUNCTION: // 1 case ERROR_INVALID_ACCESS: // 12 case ERROR_INVALID_DATA: // 13 diff --git a/PC/launcher2.c b/PC/launcher2.c index 90b0fdebd3bdfb..e426eccd700044 100644 --- a/PC/launcher2.c +++ b/PC/launcher2.c @@ -1594,7 +1594,6 @@ _registryReadLegacyEnvironment(const SearchInfo *search, HKEY root, EnvironmentI int count = swprintf_s(realTag, tagLength + 4, L"%s-32", env->tag); if (count == -1) { - debug(L"# Failed to generate 32bit tag\n"); free(realTag); return RC_INTERNAL_ERROR; } @@ -1750,18 +1749,10 @@ appxSearch(const SearchInfo *search, EnvironmentInfo **result, const wchar_t *pa exeName = search->windowed ? L"pythonw.exe" : L"python.exe"; } - // Failure to get LocalAppData may just mean we're running as a user who - // doesn't have a profile directory. - // In this case, return "not found", but don't fail. - // Chances are they can't launch Store installs anyway. - if (FAILED(SHGetFolderPathW(NULL, CSIDL_LOCAL_APPDATA, NULL, 0, buffer))) { - return RC_NO_PYTHON; - } - - if (!join(buffer, MAXLEN, L"Microsoft\\WindowsApps") || + if (FAILED(SHGetFolderPathW(NULL, CSIDL_LOCAL_APPDATA, NULL, 0, buffer)) || + !join(buffer, MAXLEN, L"Microsoft\\WindowsApps") || !join(buffer, MAXLEN, packageFamilyName) || !join(buffer, MAXLEN, exeName)) { - debug(L"# Failed to construct App Execution Alias path\n"); return RC_INTERNAL_ERROR; } @@ -1991,7 +1982,6 @@ collectEnvironments(const SearchInfo *search, EnvironmentInfo **result) EnvironmentInfo *env = NULL; if (!result) { - debug(L"# collectEnvironments() was passed a NULL result\n"); return RC_INTERNAL_ERROR; } *result = NULL; @@ -2286,7 +2276,6 @@ int selectEnvironment(const SearchInfo *search, EnvironmentInfo *root, EnvironmentInfo **best) { if (!best) { - debug(L"# selectEnvironment() was passed a NULL best\n"); return RC_INTERNAL_ERROR; } if (!root) { diff --git a/PCbuild/_testcapi.vcxproj b/PCbuild/_testcapi.vcxproj index 66df0a61b5b5a6..6911aacab29b97 100644 --- a/PCbuild/_testcapi.vcxproj +++ b/PCbuild/_testcapi.vcxproj @@ -125,7 +125,6 @@ - diff --git a/PCbuild/_testcapi.vcxproj.filters b/PCbuild/_testcapi.vcxproj.filters index 651eb1d6ba0b7f..6059959bb9a040 100644 --- a/PCbuild/_testcapi.vcxproj.filters +++ b/PCbuild/_testcapi.vcxproj.filters @@ -105,9 +105,6 @@ Source Files - - Source Files - Source Files diff --git a/Python/bytecodes.c b/Python/bytecodes.c index 96b97ca4be6d93..197dff4b9888ce 100644 --- a/Python/bytecodes.c +++ b/Python/bytecodes.c @@ -133,7 +133,7 @@ dummy_func( switch (opcode) { // BEGIN BYTECODES // - pure inst(NOP, (--)) { + inst(NOP, (--)) { } family(RESUME, 0) = { @@ -411,12 +411,12 @@ dummy_func( // BINARY_OP_INPLACE_ADD_UNICODE, // See comments at that opcode. }; - op(_GUARD_BOTH_INT, (left, right -- left, right)) { + op(_GUARD_BOTH_INT, (left, right -- left: &PYLONG_TYPE, right: &PYLONG_TYPE)) { DEOPT_IF(!PyLong_CheckExact(left)); DEOPT_IF(!PyLong_CheckExact(right)); } - pure op(_BINARY_OP_MULTIPLY_INT, (left, right -- res)) { + pure op(_BINARY_OP_MULTIPLY_INT, (left, right -- res: &PYLONG_TYPE)) { STAT_INC(BINARY_OP, hit); res = _PyLong_Multiply((PyLongObject *)left, (PyLongObject *)right); _Py_DECREF_SPECIALIZED(right, (destructor)PyObject_Free); @@ -424,7 +424,7 @@ dummy_func( ERROR_IF(res == NULL, error); } - pure op(_BINARY_OP_ADD_INT, (left, right -- res)) { + pure op(_BINARY_OP_ADD_INT, (left, right -- res: &PYLONG_TYPE)) { STAT_INC(BINARY_OP, hit); res = _PyLong_Add((PyLongObject *)left, (PyLongObject *)right); _Py_DECREF_SPECIALIZED(right, (destructor)PyObject_Free); @@ -432,7 +432,7 @@ dummy_func( ERROR_IF(res == NULL, error); } - pure op(_BINARY_OP_SUBTRACT_INT, (left, right -- res)) { + pure op(_BINARY_OP_SUBTRACT_INT, (left, right -- res: &PYLONG_TYPE)) { STAT_INC(BINARY_OP, hit); res = _PyLong_Subtract((PyLongObject *)left, (PyLongObject *)right); _Py_DECREF_SPECIALIZED(right, (destructor)PyObject_Free); @@ -447,12 +447,12 @@ dummy_func( macro(BINARY_OP_SUBTRACT_INT) = _GUARD_BOTH_INT + unused/1 + _BINARY_OP_SUBTRACT_INT; - op(_GUARD_BOTH_FLOAT, (left, right -- left, right)) { + op(_GUARD_BOTH_FLOAT, (left, right -- left: &PYFLOAT_TYPE, right: &PYFLOAT_TYPE)) { DEOPT_IF(!PyFloat_CheckExact(left)); DEOPT_IF(!PyFloat_CheckExact(right)); } - pure op(_BINARY_OP_MULTIPLY_FLOAT, (left, right -- res)) { + pure op(_BINARY_OP_MULTIPLY_FLOAT, (left, right -- res: &PYFLOAT_TYPE)) { STAT_INC(BINARY_OP, hit); double dres = ((PyFloatObject *)left)->ob_fval * @@ -460,7 +460,7 @@ dummy_func( DECREF_INPUTS_AND_REUSE_FLOAT(left, right, dres, res); } - pure op(_BINARY_OP_ADD_FLOAT, (left, right -- res)) { + pure op(_BINARY_OP_ADD_FLOAT, (left, right -- res: &PYFLOAT_TYPE)) { STAT_INC(BINARY_OP, hit); double dres = ((PyFloatObject *)left)->ob_fval + @@ -468,7 +468,7 @@ dummy_func( DECREF_INPUTS_AND_REUSE_FLOAT(left, right, dres, res); } - pure op(_BINARY_OP_SUBTRACT_FLOAT, (left, right -- res)) { + pure op(_BINARY_OP_SUBTRACT_FLOAT, (left, right -- res: &PYFLOAT_TYPE)) { STAT_INC(BINARY_OP, hit); double dres = ((PyFloatObject *)left)->ob_fval - @@ -483,12 +483,12 @@ dummy_func( macro(BINARY_OP_SUBTRACT_FLOAT) = _GUARD_BOTH_FLOAT + unused/1 + _BINARY_OP_SUBTRACT_FLOAT; - op(_GUARD_BOTH_UNICODE, (left, right -- left, right)) { + op(_GUARD_BOTH_UNICODE, (left, right -- left: &PYUNICODE_TYPE, right: &PYUNICODE_TYPE)) { DEOPT_IF(!PyUnicode_CheckExact(left)); DEOPT_IF(!PyUnicode_CheckExact(right)); } - pure op(_BINARY_OP_ADD_UNICODE, (left, right -- res)) { + pure op(_BINARY_OP_ADD_UNICODE, (left, right -- res: &PYUNICODE_TYPE)) { STAT_INC(BINARY_OP, hit); res = PyUnicode_Concat(left, right); _Py_DECREF_SPECIALIZED(left, _PyUnicode_ExactDealloc); @@ -1877,7 +1877,7 @@ dummy_func( something was returned by a descriptor protocol). Set the second element of the stack to NULL, to signal CALL that it's not a method call. - meth | NULL | arg1 | ... | argN + NULL | meth | arg1 | ... | argN */ DECREF_INPUTS(); ERROR_IF(attr == NULL, error); @@ -1901,7 +1901,7 @@ dummy_func( LOAD_ATTR, }; - op(_GUARD_TYPE_VERSION, (type_version/2, owner -- owner)) { + op(_GUARD_TYPE_VERSION, (type_version/2, owner -- owner: &(GUARD_TYPE_VERSION_TYPE + type_version))) { PyTypeObject *tp = Py_TYPE(owner); assert(type_version != 0); DEOPT_IF(tp->tp_version_tag != type_version); @@ -2082,7 +2082,7 @@ dummy_func( DISPATCH_INLINED(new_frame); } - op(_GUARD_DORV_VALUES, (owner -- owner)) { + op(_GUARD_DORV_VALUES, (owner -- owner: &GUARD_DORV_VALUES_TYPE)) { assert(Py_TYPE(owner)->tp_flags & Py_TPFLAGS_MANAGED_DICT); PyDictOrValues dorv = *_PyObject_DictOrValuesPointer(owner); DEOPT_IF(!_PyDictOrValues_IsValues(dorv)); @@ -2318,16 +2318,13 @@ dummy_func( assert(oparg <= INSTR_OFFSET()); JUMPBY(-oparg); #if ENABLE_SPECIALIZATION - uint16_t counter = this_instr[1].cache; - this_instr[1].cache = counter + (1 << OPTIMIZER_BITS_IN_COUNTER); + this_instr[1].cache += (1 << OPTIMIZER_BITS_IN_COUNTER); /* We are using unsigned values, but we really want signed values, so - * do the 2s complement adjustment manually */ - uint32_t offset_counter = counter ^ (1 << 15); - uint32_t threshold = tstate->interp->optimizer_backedge_threshold; - assert((threshold & OPTIMIZER_BITS_MASK) == 0); - // Use '>=' not '>' so that the optimizer/backoff bits do not effect the result. + * do the 2s complement comparison manually */ + uint16_t ucounter = this_instr[1].cache + (1 << 15); + uint16_t threshold = tstate->interp->optimizer_backedge_threshold + (1 << 15); // Double-check that the opcode isn't instrumented or something: - if (offset_counter >= threshold && this_instr->op.code == JUMP_BACKWARD) { + if (ucounter > threshold && this_instr->op.code == JUMP_BACKWARD) { OPT_STAT_INC(attempts); _Py_CODEUNIT *start = this_instr; /* Back up over EXTENDED_ARGs so optimizer sees the whole instruction */ @@ -2341,18 +2338,18 @@ dummy_func( // Rewind and enter the executor: assert(start->op.code == ENTER_EXECUTOR); next_instr = start; - this_instr[1].cache &= OPTIMIZER_BITS_MASK; + this_instr[1].cache &= ((1 << OPTIMIZER_BITS_IN_COUNTER) - 1); } else { - int backoff = this_instr[1].cache & OPTIMIZER_BITS_MASK; - backoff++; - if (backoff < MIN_TIER2_BACKOFF) { - backoff = MIN_TIER2_BACKOFF; + int backoff = this_instr[1].cache & ((1 << OPTIMIZER_BITS_IN_COUNTER) - 1); + if (backoff < MINIMUM_TIER2_BACKOFF) { + backoff = MINIMUM_TIER2_BACKOFF; } - else if (backoff > MAX_TIER2_BACKOFF) { - backoff = MAX_TIER2_BACKOFF; + else if (backoff < 15 - OPTIMIZER_BITS_IN_COUNTER) { + backoff++; } - this_instr[1].cache = ((UINT16_MAX << OPTIMIZER_BITS_IN_COUNTER) << backoff) | backoff; + assert(backoff <= 15 - OPTIMIZER_BITS_IN_COUNTER); + this_instr[1].cache = ((1 << 16) - ((1 << OPTIMIZER_BITS_IN_COUNTER) << backoff)) | backoff; } } #endif /* ENABLE_SPECIALIZATION */ @@ -2714,7 +2711,7 @@ dummy_func( DEOPT_IF(r->len <= 0); } - op(_ITER_NEXT_RANGE, (iter -- iter, next)) { + op(_ITER_NEXT_RANGE, (iter -- iter, next: &PYLONG_TYPE)) { _PyRangeIterObject *r = (_PyRangeIterObject *)iter; assert(Py_TYPE(r) == &PyRangeIter_Type); assert(r->len > 0); @@ -2872,13 +2869,13 @@ dummy_func( exc_info->exc_value = Py_NewRef(new_exc); } - op(_GUARD_DORV_VALUES_INST_ATTR_FROM_DICT, (owner -- owner)) { + op(_GUARD_DORV_VALUES_INST_ATTR_FROM_DICT, (owner -- owner: &GUARD_DORV_VALUES_INST_ATTR_FROM_DICT_TYPE)) { assert(Py_TYPE(owner)->tp_flags & Py_TPFLAGS_MANAGED_DICT); PyDictOrValues *dorv = _PyObject_DictOrValuesPointer(owner); DEOPT_IF(!_PyDictOrValues_IsValues(*dorv) && !_PyObject_MakeInstanceAttributesFromDict(owner, dorv)); } - op(_GUARD_KEYS_VERSION, (keys_version/2, owner -- owner)) { + op(_GUARD_KEYS_VERSION, (keys_version/2, owner -- owner: &(GUARD_KEYS_VERSION_TYPE + keys_version))) { PyTypeObject *owner_cls = Py_TYPE(owner); PyHeapTypeObject *owner_heap_type = (PyHeapTypeObject *)owner_cls; DEOPT_IF(owner_heap_type->ht_cached_keys->dk_version != keys_version); @@ -3093,7 +3090,7 @@ dummy_func( macro(CALL) = _SPECIALIZE_CALL + unused/2 + _CALL; - op(_CHECK_CALL_BOUND_METHOD_EXACT_ARGS, (callable, null, unused[oparg] -- callable, null, unused[oparg])) { + op(_CHECK_CALL_BOUND_METHOD_EXACT_ARGS, (callable, null, unused[oparg] -- callable: &PYMETHOD_TYPE, null: &NULL_TYPE, unused[oparg])) { DEOPT_IF(null != NULL); DEOPT_IF(Py_TYPE(callable) != &PyMethod_Type); } @@ -3111,7 +3108,7 @@ dummy_func( DEOPT_IF(tstate->interp->eval_frame); } - op(_CHECK_FUNCTION_EXACT_ARGS, (func_version/2, callable, self_or_null, unused[oparg] -- callable, self_or_null, unused[oparg])) { + op(_CHECK_FUNCTION_EXACT_ARGS, (func_version/2, callable, self_or_null, unused[oparg] -- callable: &(PYFUNCTION_TYPE_VERSION_TYPE + func_version), self_or_null, unused[oparg])) { DEOPT_IF(!PyFunction_Check(callable)); PyFunctionObject *func = (PyFunctionObject *)callable; DEOPT_IF(func->func_version != func_version); @@ -4037,9 +4034,10 @@ dummy_func( CHECK_EVAL_BREAKER(); } - op(_SET_IP, (instr_ptr/4 --)) { + op(_SET_IP, (--)) { TIER_TWO_ONLY - frame->instr_ptr = (_Py_CODEUNIT *)instr_ptr; + // TODO: Put the code pointer in `operand` to avoid indirection via `frame` + frame->instr_ptr = _PyCode_CODE(_PyFrame_GetCode(frame)) + oparg; } op(_SAVE_RETURN_OFFSET, (--)) { @@ -4061,23 +4059,23 @@ dummy_func( DEOPT_IF(!current_executor->vm_data.valid); } - pure op(_LOAD_CONST_INLINE, (ptr/4 -- value)) { + op(_LOAD_CONST_INLINE, (ptr/4 -- value)) { TIER_TWO_ONLY value = Py_NewRef(ptr); } - pure op(_LOAD_CONST_INLINE_BORROW, (ptr/4 -- value)) { + op(_LOAD_CONST_INLINE_BORROW, (ptr/4 -- value)) { TIER_TWO_ONLY value = ptr; } - pure op(_LOAD_CONST_INLINE_WITH_NULL, (ptr/4 -- value, null)) { + op(_LOAD_CONST_INLINE_WITH_NULL, (ptr/4 -- value, null)) { TIER_TWO_ONLY value = Py_NewRef(ptr); null = NULL; } - pure op(_LOAD_CONST_INLINE_BORROW_WITH_NULL, (ptr/4 -- value, null)) { + op(_LOAD_CONST_INLINE_BORROW_WITH_NULL, (ptr/4 -- value, null)) { TIER_TWO_ONLY value = ptr; null = NULL; @@ -4099,11 +4097,6 @@ dummy_func( exe->count++; } - op(_CHECK_VALIDITY_AND_SET_IP, (instr_ptr/4 --)) { - TIER_TWO_ONLY - DEOPT_IF(!current_executor->vm_data.valid); - frame->instr_ptr = (_Py_CODEUNIT *)instr_ptr; - } // END BYTECODES // diff --git a/Python/executor_cases.c.h b/Python/executor_cases.c.h index 58d238320276f4..2d914b82dbf88f 100644 --- a/Python/executor_cases.c.h +++ b/Python/executor_cases.c.h @@ -1598,7 +1598,7 @@ something was returned by a descriptor protocol). Set the second element of the stack to NULL, to signal CALL that it's not a method call. - meth | NULL | arg1 | ... | argN + NULL | meth | arg1 | ... | argN */ Py_DECREF(owner); if (attr == NULL) goto pop_1_error_tier_two; @@ -3360,9 +3360,10 @@ } case _SET_IP: { - PyObject *instr_ptr = (PyObject *)CURRENT_OPERAND(); + oparg = CURRENT_OPARG(); TIER_TWO_ONLY - frame->instr_ptr = (_Py_CODEUNIT *)instr_ptr; + // TODO: Put the code pointer in `operand` to avoid indirection via `frame` + frame->instr_ptr = _PyCode_CODE(_PyFrame_GetCode(frame)) + oparg; break; } @@ -3458,12 +3459,4 @@ break; } - case _CHECK_VALIDITY_AND_SET_IP: { - PyObject *instr_ptr = (PyObject *)CURRENT_OPERAND(); - TIER_TWO_ONLY - if (!current_executor->vm_data.valid) goto deoptimize; - frame->instr_ptr = (_Py_CODEUNIT *)instr_ptr; - break; - } - #undef TIER_TWO diff --git a/Python/generated_cases.c.h b/Python/generated_cases.c.h index a49223e4db5318..e5244147d499af 100644 --- a/Python/generated_cases.c.h +++ b/Python/generated_cases.c.h @@ -3263,16 +3263,13 @@ assert(oparg <= INSTR_OFFSET()); JUMPBY(-oparg); #if ENABLE_SPECIALIZATION - uint16_t counter = this_instr[1].cache; - this_instr[1].cache = counter + (1 << OPTIMIZER_BITS_IN_COUNTER); + this_instr[1].cache += (1 << OPTIMIZER_BITS_IN_COUNTER); /* We are using unsigned values, but we really want signed values, so - * do the 2s complement adjustment manually */ - uint32_t offset_counter = counter ^ (1 << 15); - uint32_t threshold = tstate->interp->optimizer_backedge_threshold; - assert((threshold & OPTIMIZER_BITS_MASK) == 0); - // Use '>=' not '>' so that the optimizer/backoff bits do not effect the result. + * do the 2s complement comparison manually */ + uint16_t ucounter = this_instr[1].cache + (1 << 15); + uint16_t threshold = tstate->interp->optimizer_backedge_threshold + (1 << 15); // Double-check that the opcode isn't instrumented or something: - if (offset_counter >= threshold && this_instr->op.code == JUMP_BACKWARD) { + if (ucounter > threshold && this_instr->op.code == JUMP_BACKWARD) { OPT_STAT_INC(attempts); _Py_CODEUNIT *start = this_instr; /* Back up over EXTENDED_ARGs so optimizer sees the whole instruction */ @@ -3286,18 +3283,18 @@ // Rewind and enter the executor: assert(start->op.code == ENTER_EXECUTOR); next_instr = start; - this_instr[1].cache &= OPTIMIZER_BITS_MASK; + this_instr[1].cache &= ((1 << OPTIMIZER_BITS_IN_COUNTER) - 1); } else { - int backoff = this_instr[1].cache & OPTIMIZER_BITS_MASK; - backoff++; - if (backoff < MIN_TIER2_BACKOFF) { - backoff = MIN_TIER2_BACKOFF; + int backoff = this_instr[1].cache & ((1 << OPTIMIZER_BITS_IN_COUNTER) - 1); + if (backoff < MINIMUM_TIER2_BACKOFF) { + backoff = MINIMUM_TIER2_BACKOFF; } - else if (backoff > MAX_TIER2_BACKOFF) { - backoff = MAX_TIER2_BACKOFF; + else if (backoff < 15 - OPTIMIZER_BITS_IN_COUNTER) { + backoff++; } - this_instr[1].cache = ((UINT16_MAX << OPTIMIZER_BITS_IN_COUNTER) << backoff) | backoff; + assert(backoff <= 15 - OPTIMIZER_BITS_IN_COUNTER); + this_instr[1].cache = ((1 << 16) - ((1 << OPTIMIZER_BITS_IN_COUNTER) << backoff)) | backoff; } } #endif /* ENABLE_SPECIALIZATION */ @@ -3423,7 +3420,7 @@ something was returned by a descriptor protocol). Set the second element of the stack to NULL, to signal CALL that it's not a method call. - meth | NULL | arg1 | ... | argN + NULL | meth | arg1 | ... | argN */ Py_DECREF(owner); if (attr == NULL) goto pop_1_error; diff --git a/Python/optimizer.c b/Python/optimizer.c index efa19680c9b1f3..ad9ac382d300ef 100644 --- a/Python/optimizer.c +++ b/Python/optimizer.c @@ -17,6 +17,8 @@ #include "pycore_uop_metadata.h" // Uop tables #undef NEED_OPCODE_METADATA +#define UOP_MAX_TRACE_LENGTH 512 + #define MAX_EXECUTORS_SIZE 256 @@ -109,9 +111,6 @@ never_optimize( _PyExecutorObject **exec, int Py_UNUSED(stack_entries)) { - /* Although it should be benign for this to be called, - * it shouldn't happen, so fail in debug builds. */ - assert(0 && "never optimize should never be called"); return 0; } @@ -123,19 +122,13 @@ PyTypeObject _PyDefaultOptimizer_Type = { .tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_DISALLOW_INSTANTIATION, }; -static _PyOptimizerObject _PyOptimizer_Default = { +_PyOptimizerObject _PyOptimizer_Default = { PyObject_HEAD_INIT(&_PyDefaultOptimizer_Type) .optimize = never_optimize, - .resume_threshold = OPTIMIZER_UNREACHABLE_THRESHOLD, - .backedge_threshold = OPTIMIZER_UNREACHABLE_THRESHOLD, + .resume_threshold = INT16_MAX, + .backedge_threshold = INT16_MAX, }; -static uint32_t -shift_and_offset_threshold(uint16_t threshold) -{ - return (threshold << OPTIMIZER_BITS_IN_COUNTER) + (1 << 15); -} - _PyOptimizerObject * PyUnstable_GetOptimizer(void) { @@ -143,33 +136,24 @@ PyUnstable_GetOptimizer(void) if (interp->optimizer == &_PyOptimizer_Default) { return NULL; } - assert(interp->optimizer_backedge_threshold == - shift_and_offset_threshold(interp->optimizer->backedge_threshold)); - assert(interp->optimizer_resume_threshold == - shift_and_offset_threshold(interp->optimizer->resume_threshold)); + assert(interp->optimizer_backedge_threshold == interp->optimizer->backedge_threshold); + assert(interp->optimizer_resume_threshold == interp->optimizer->resume_threshold); Py_INCREF(interp->optimizer); return interp->optimizer; } -_PyOptimizerObject * -_Py_SetOptimizer(PyInterpreterState *interp, _PyOptimizerObject *optimizer) +void +PyUnstable_SetOptimizer(_PyOptimizerObject *optimizer) { + PyInterpreterState *interp = _PyInterpreterState_GET(); if (optimizer == NULL) { optimizer = &_PyOptimizer_Default; } _PyOptimizerObject *old = interp->optimizer; Py_INCREF(optimizer); interp->optimizer = optimizer; - interp->optimizer_backedge_threshold = shift_and_offset_threshold(optimizer->backedge_threshold); - interp->optimizer_resume_threshold = shift_and_offset_threshold(optimizer->resume_threshold); - return old; -} - -void -PyUnstable_SetOptimizer(_PyOptimizerObject *optimizer) -{ - PyInterpreterState *interp = _PyInterpreterState_GET(); - _PyOptimizerObject *old = _Py_SetOptimizer(interp, optimizer); + interp->optimizer_backedge_threshold = optimizer->backedge_threshold; + interp->optimizer_resume_threshold = optimizer->resume_threshold; Py_DECREF(old); } @@ -324,6 +308,8 @@ BRANCH_TO_GUARD[4][2] = { [POP_JUMP_IF_NOT_NONE - POP_JUMP_IF_FALSE][1] = _GUARD_IS_NOT_NONE_POP, }; +#define TRACE_STACK_SIZE 5 + #define CONFIDENCE_RANGE 1000 #define CONFIDENCE_CUTOFF 333 @@ -337,11 +323,10 @@ BRANCH_TO_GUARD[4][2] = { #define ADD_TO_TRACE(OPCODE, OPARG, OPERAND, TARGET) \ DPRINTF(2, \ - " ADD_TO_TRACE(%s, %d, %" PRIu64 ", %d)\n", \ + " ADD_TO_TRACE(%s, %d, %" PRIu64 ")\n", \ _PyUOpName(OPCODE), \ (OPARG), \ - (uint64_t)(OPERAND), \ - TARGET); \ + (uint64_t)(OPERAND)); \ assert(trace_length < max_length); \ trace[trace_length].opcode = (OPCODE); \ trace[trace_length].oparg = (OPARG); \ @@ -432,8 +417,9 @@ translate_bytecode_to_trace( top: // Jump here after _PUSH_FRAME or likely branches for (;;) { target = INSTR_IP(instr, code); - RESERVE_RAW(2, "epilogue"); // Always need space for _SET_IP, _CHECK_VALIDITY and _EXIT_TRACE - ADD_TO_TRACE(_CHECK_VALIDITY_AND_SET_IP, 0, (uintptr_t)instr, target); + RESERVE_RAW(3, "epilogue"); // Always need space for _SET_IP, _CHECK_VALIDITY and _EXIT_TRACE + ADD_TO_TRACE(_SET_IP, target, 0, target); + ADD_TO_TRACE(_CHECK_VALIDITY, 0, 0, target); uint32_t opcode = instr->op.code; uint32_t oparg = instr->op.arg; @@ -839,13 +825,11 @@ uop_optimize( char *uop_optimize = Py_GETENV("PYTHONUOPSOPTIMIZE"); if (uop_optimize == NULL || *uop_optimize > '0') { err = _Py_uop_analyze_and_optimize(frame, buffer, - UOP_MAX_TRACE_LENGTH, - curr_stackentries, &dependencies); + UOP_MAX_TRACE_LENGTH, curr_stackentries, &dependencies); if (err <= 0) { return err; } } - assert(err == 1); _PyExecutorObject *executor = make_executor_from_uops(buffer, &dependencies); if (executor == NULL) { return -1; @@ -877,10 +861,10 @@ PyUnstable_Optimizer_NewUOpOptimizer(void) return NULL; } opt->optimize = uop_optimize; - opt->resume_threshold = OPTIMIZER_UNREACHABLE_THRESHOLD; - // Need a few iterations to settle specializations, - // and to ammortize the cost of optimization. - opt->backedge_threshold = 16; + opt->resume_threshold = INT16_MAX; + // Need at least 3 iterations to settle specializations. + // A few lower bits of the counter are reserved for other flags. + opt->backedge_threshold = 16 << OPTIMIZER_BITS_IN_COUNTER; return (PyObject *)opt; } @@ -967,7 +951,7 @@ PyUnstable_Optimizer_NewCounter(void) return NULL; } opt->base.optimize = counter_optimize; - opt->base.resume_threshold = OPTIMIZER_UNREACHABLE_THRESHOLD; + opt->base.resume_threshold = INT16_MAX; opt->base.backedge_threshold = 0; opt->count = 0; return (PyObject *)opt; diff --git a/Python/optimizer_analysis.c b/Python/optimizer_analysis.c index 49974520de924d..b14e6950b4a06b 100644 --- a/Python/optimizer_analysis.c +++ b/Python/optimizer_analysis.c @@ -1,14 +1,3 @@ -/* - * This file contains the support code for CPython's uops redundancy eliminator. - * It also performs some simple optimizations. - * It performs a traditional data-flow analysis[1] over the trace of uops. - * Using the information gained, it chooses to emit, or skip certain instructions - * if possible. - * - * [1] For information on data-flow analysis, please see - * https://clang.llvm.org/docs/DataFlowAnalysisIntro.html - * - * */ #include "Python.h" #include "opcode.h" #include "pycore_dict.h" @@ -20,355 +9,10 @@ #include "pycore_dict.h" #include "pycore_long.h" #include "cpython/optimizer.h" -#include "pycore_optimizer.h" -#include "pycore_object.h" -#include "pycore_dict.h" -#include "pycore_function.h" -#include "pycore_uop_metadata.h" -#include "pycore_uop_ids.h" -#include "pycore_range.h" - -#include #include #include #include - -// Holds locals, stack, locals, stack ... co_consts (in that order) -#define MAX_ABSTRACT_INTERP_SIZE 4096 - -#define OVERALLOCATE_FACTOR 5 - -#define TY_ARENA_SIZE (UOP_MAX_TRACE_LENGTH * OVERALLOCATE_FACTOR) - -// Need extras for root frame and for overflow frame (see TRACE_STACK_PUSH()) -#define MAX_ABSTRACT_FRAME_DEPTH (TRACE_STACK_SIZE + 2) - -#ifdef Py_DEBUG - static const char *const DEBUG_ENV = "PYTHON_OPT_DEBUG"; - static inline int get_lltrace(void) { - char *uop_debug = Py_GETENV(DEBUG_ENV); - int lltrace = 0; - if (uop_debug != NULL && *uop_debug >= '0') { - lltrace = *uop_debug - '0'; // TODO: Parse an int and all that - } - return lltrace; - } - #define DPRINTF(level, ...) \ - if (get_lltrace() >= (level)) { printf(__VA_ARGS__); } -#else - #define DPRINTF(level, ...) -#endif - - -// Flags for below. -#define KNOWN 1 << 0 -#define TRUE_CONST 1 << 1 -#define IS_NULL 1 << 2 -#define NOT_NULL 1 << 3 - -typedef struct { - int flags; - PyTypeObject *typ; - // constant propagated value (might be NULL) - PyObject *const_val; -} _Py_UOpsSymType; - - -typedef struct _Py_UOpsAbstractFrame { - // Max stacklen - int stack_len; - int locals_len; - - _Py_UOpsSymType **stack_pointer; - _Py_UOpsSymType **stack; - _Py_UOpsSymType **locals; -} _Py_UOpsAbstractFrame; - - -typedef struct ty_arena { - int ty_curr_number; - int ty_max_number; - _Py_UOpsSymType arena[TY_ARENA_SIZE]; -} ty_arena; - -// Tier 2 types meta interpreter -typedef struct _Py_UOpsAbstractInterpContext { - PyObject_HEAD - // The current "executing" frame. - _Py_UOpsAbstractFrame *frame; - _Py_UOpsAbstractFrame frames[MAX_ABSTRACT_FRAME_DEPTH]; - int curr_frame_depth; - - // Arena for the symbolic types. - ty_arena t_arena; - - _Py_UOpsSymType **n_consumed; - _Py_UOpsSymType **limit; - _Py_UOpsSymType *locals_and_stack[MAX_ABSTRACT_INTERP_SIZE]; -} _Py_UOpsAbstractInterpContext; - -static inline _Py_UOpsSymType* sym_new_unknown(_Py_UOpsAbstractInterpContext *ctx); - -// 0 on success, -1 on error. -static _Py_UOpsAbstractFrame * -ctx_frame_new( - _Py_UOpsAbstractInterpContext *ctx, - PyCodeObject *co, - _Py_UOpsSymType **localsplus_start, - int n_locals_already_filled, - int curr_stackentries -) -{ - assert(ctx->curr_frame_depth < MAX_ABSTRACT_FRAME_DEPTH); - _Py_UOpsAbstractFrame *frame = &ctx->frames[ctx->curr_frame_depth]; - - frame->stack_len = co->co_stacksize; - frame->locals_len = co->co_nlocalsplus; - - frame->locals = localsplus_start; - frame->stack = frame->locals + co->co_nlocalsplus; - frame->stack_pointer = frame->stack + curr_stackentries; - ctx->n_consumed = localsplus_start + (co->co_nlocalsplus + co->co_stacksize); - if (ctx->n_consumed >= ctx->limit) { - return NULL; - } - - - // Initialize with the initial state of all local variables - for (int i = n_locals_already_filled; i < co->co_nlocalsplus; i++) { - _Py_UOpsSymType *local = sym_new_unknown(ctx); - if (local == NULL) { - return NULL; - } - frame->locals[i] = local; - } - - - // Initialize the stack as well - for (int i = 0; i < curr_stackentries; i++) { - _Py_UOpsSymType *stackvar = sym_new_unknown(ctx); - if (stackvar == NULL) { - return NULL; - } - frame->stack[i] = stackvar; - } - - return frame; -} - -static void -abstractcontext_fini(_Py_UOpsAbstractInterpContext *ctx) -{ - if (ctx == NULL) { - return; - } - ctx->curr_frame_depth = 0; - int tys = ctx->t_arena.ty_curr_number; - for (int i = 0; i < tys; i++) { - Py_CLEAR(ctx->t_arena.arena[i].const_val); - } -} - -static int -abstractcontext_init( - _Py_UOpsAbstractInterpContext *ctx, - PyCodeObject *co, - int curr_stacklen, - int ir_entries -) -{ - ctx->limit = ctx->locals_and_stack + MAX_ABSTRACT_INTERP_SIZE; - ctx->n_consumed = ctx->locals_and_stack; -#ifdef Py_DEBUG // Aids debugging a little. There should never be NULL in the abstract interpreter. - for (int i = 0 ; i < MAX_ABSTRACT_INTERP_SIZE; i++) { - ctx->locals_and_stack[i] = NULL; - } -#endif - - // Setup the arena for sym expressions. - ctx->t_arena.ty_curr_number = 0; - ctx->t_arena.ty_max_number = TY_ARENA_SIZE; - - // Frame setup - ctx->curr_frame_depth = 0; - _Py_UOpsAbstractFrame *frame = ctx_frame_new(ctx, co, ctx->n_consumed, 0, curr_stacklen); - if (frame == NULL) { - return -1; - } - ctx->curr_frame_depth++; - ctx->frame = frame; - return 0; -} - - -static int -ctx_frame_pop( - _Py_UOpsAbstractInterpContext *ctx -) -{ - _Py_UOpsAbstractFrame *frame = ctx->frame; - - ctx->n_consumed = frame->locals; - ctx->curr_frame_depth--; - assert(ctx->curr_frame_depth >= 1); - ctx->frame = &ctx->frames[ctx->curr_frame_depth - 1]; - - return 0; -} - - -// Takes a borrowed reference to const_val, turns that into a strong reference. -static _Py_UOpsSymType* -sym_new(_Py_UOpsAbstractInterpContext *ctx, - PyObject *const_val) -{ - _Py_UOpsSymType *self = &ctx->t_arena.arena[ctx->t_arena.ty_curr_number]; - if (ctx->t_arena.ty_curr_number >= ctx->t_arena.ty_max_number) { - OPT_STAT_INC(optimizer_failure_reason_no_memory); - DPRINTF(1, "out of space for symbolic expression type\n"); - return NULL; - } - ctx->t_arena.ty_curr_number++; - self->const_val = NULL; - self->typ = NULL; - self->flags = 0; - - if (const_val != NULL) { - self->const_val = Py_NewRef(const_val); - } - - return self; -} - -static inline void -sym_set_flag(_Py_UOpsSymType *sym, int flag) -{ - sym->flags |= flag; -} - -static inline void -sym_clear_flag(_Py_UOpsSymType *sym, int flag) -{ - sym->flags &= (~flag); -} - -static inline bool -sym_has_flag(_Py_UOpsSymType *sym, int flag) -{ - return (sym->flags & flag) != 0; -} - -static inline bool -sym_is_known(_Py_UOpsSymType *sym) -{ - return sym_has_flag(sym, KNOWN); -} - -static inline bool -sym_is_not_null(_Py_UOpsSymType *sym) -{ - return (sym->flags & (IS_NULL | NOT_NULL)) == NOT_NULL; -} - -static inline bool -sym_is_null(_Py_UOpsSymType *sym) -{ - return (sym->flags & (IS_NULL | NOT_NULL)) == IS_NULL; -} - -static inline void -sym_set_type(_Py_UOpsSymType *sym, PyTypeObject *tp) -{ - assert(PyType_Check(tp)); - sym->typ = tp; - sym_set_flag(sym, KNOWN); - sym_set_flag(sym, NOT_NULL); -} - -static inline void -sym_set_null(_Py_UOpsSymType *sym) -{ - sym_set_flag(sym, IS_NULL); - sym_set_flag(sym, KNOWN); -} - - -static inline _Py_UOpsSymType* -sym_new_unknown(_Py_UOpsAbstractInterpContext *ctx) -{ - return sym_new(ctx,NULL); -} - -static inline _Py_UOpsSymType* -sym_new_known_notnull(_Py_UOpsAbstractInterpContext *ctx) -{ - _Py_UOpsSymType *res = sym_new_unknown(ctx); - if (res == NULL) { - return NULL; - } - sym_set_flag(res, NOT_NULL); - return res; -} - -static inline _Py_UOpsSymType* -sym_new_known_type(_Py_UOpsAbstractInterpContext *ctx, - PyTypeObject *typ) -{ - _Py_UOpsSymType *res = sym_new(ctx,NULL); - if (res == NULL) { - return NULL; - } - sym_set_type(res, typ); - return res; -} - -// Takes a borrowed reference to const_val. -static inline _Py_UOpsSymType* -sym_new_const(_Py_UOpsAbstractInterpContext *ctx, PyObject *const_val) -{ - assert(const_val != NULL); - _Py_UOpsSymType *temp = sym_new( - ctx, - const_val - ); - if (temp == NULL) { - return NULL; - } - sym_set_type(temp, Py_TYPE(const_val)); - sym_set_flag(temp, TRUE_CONST); - sym_set_flag(temp, KNOWN); - sym_set_flag(temp, NOT_NULL); - return temp; -} - -static _Py_UOpsSymType* -sym_new_null(_Py_UOpsAbstractInterpContext *ctx) -{ - _Py_UOpsSymType *null_sym = sym_new_unknown(ctx); - if (null_sym == NULL) { - return NULL; - } - sym_set_null(null_sym); - return null_sym; -} - - -static inline bool -sym_matches_type(_Py_UOpsSymType *sym, PyTypeObject *typ) -{ - assert(typ == NULL || PyType_Check(typ)); - if (!sym_has_flag(sym, KNOWN)) { - return false; - } - return sym->typ == typ; -} - - -static inline bool -op_is_end(uint32_t opcode) -{ - return opcode == _EXIT_TRACE || opcode == _JUMP_TO_TOP; -} +#include "pycore_optimizer.h" static int get_mutations(PyObject* dict) { @@ -555,163 +199,12 @@ remove_globals(_PyInterpreterFrame *frame, _PyUOpInstruction *buffer, builtins = func->func_builtins; break; } - default: - if (op_is_end(opcode)) { - return 1; - } - break; - } - } - return 0; -} - - - -#define STACK_LEVEL() ((int)(stack_pointer - ctx->frame->stack)) - -#define GETLOCAL(idx) ((ctx->frame->locals[idx])) - -#define REPLACE_OP(INST, OP, ARG, OPERAND) \ - INST->opcode = OP; \ - INST->oparg = ARG; \ - INST->operand = OPERAND; - -#define _LOAD_ATTR_NOT_NULL \ - do { \ - attr = sym_new_known_notnull(ctx); \ - if (attr == NULL) { \ - goto error; \ - } \ - null = sym_new_null(ctx); \ - if (null == NULL) { \ - goto error; \ - } \ - } while (0); - - -/* 1 for success, 0 for not ready, cannot error at the moment. */ -static int -uop_redundancy_eliminator( - PyCodeObject *co, - _PyUOpInstruction *trace, - int trace_len, - int curr_stacklen -) -{ - - _Py_UOpsAbstractInterpContext context; - _Py_UOpsAbstractInterpContext *ctx = &context; - - if (abstractcontext_init( - ctx, - co, curr_stacklen, - trace_len) < 0) { - goto out_of_space; - } - - for (_PyUOpInstruction *this_instr = trace; - this_instr < trace + trace_len && !op_is_end(this_instr->opcode); - this_instr++) { - - int oparg = this_instr->oparg; - uint32_t opcode = this_instr->opcode; - - _Py_UOpsSymType **stack_pointer = ctx->frame->stack_pointer; - - DPRINTF(3, "Abstract interpreting %s:%d ", - _PyOpcode_uop_name[opcode], - oparg); - switch (opcode) { -#include "tier2_redundancy_eliminator_cases.c.h" - - default: - DPRINTF(1, "Unknown opcode in abstract interpreter\n"); - Py_UNREACHABLE(); - } - assert(ctx->frame != NULL); - DPRINTF(3, " stack_level %d\n", STACK_LEVEL()); - ctx->frame->stack_pointer = stack_pointer; - assert(STACK_LEVEL() >= 0); - } - - abstractcontext_fini(ctx); - return 1; - -out_of_space: - DPRINTF(1, "Out of space in abstract interpreter\n"); - abstractcontext_fini(ctx); - return 0; - -error: - DPRINTF(1, "Encountered error in abstract interpreter\n"); - abstractcontext_fini(ctx); - return 0; -} - - -static void -remove_unneeded_uops(_PyUOpInstruction *buffer, int buffer_size) -{ - /* Remove _SET_IP and _CHECK_VALIDITY where possible. - * _SET_IP is needed if the following instruction escapes or - * could error. _CHECK_VALIDITY is needed if the previous - * instruction could have escaped. */ - int last_set_ip = -1; - bool may_have_escaped = false; - for (int pc = 0; pc < buffer_size; pc++) { - int opcode = buffer[pc].opcode; - switch (opcode) { - case _SET_IP: - buffer[pc].opcode = NOP; - last_set_ip = pc; - break; - case _CHECK_VALIDITY: - if (may_have_escaped) { - may_have_escaped = false; - } - else { - buffer[pc].opcode = NOP; - } - break; - case _CHECK_VALIDITY_AND_SET_IP: - if (may_have_escaped) { - may_have_escaped = false; - buffer[pc].opcode = _CHECK_VALIDITY; - } - else { - buffer[pc].opcode = NOP; - } - last_set_ip = pc; - break; case _JUMP_TO_TOP: case _EXIT_TRACE: - return; - default: - { - bool needs_ip = false; - if (_PyUop_Flags[opcode] & HAS_ESCAPES_FLAG) { - needs_ip = true; - may_have_escaped = true; - } - if (_PyUop_Flags[opcode] & HAS_ERROR_FLAG) { - needs_ip = true; - } - if (opcode == _PUSH_FRAME) { - needs_ip = true; - } - if (needs_ip && last_set_ip >= 0) { - if (buffer[last_set_ip].opcode == _CHECK_VALIDITY) { - buffer[last_set_ip].opcode = _CHECK_VALIDITY_AND_SET_IP; - } - else { - assert(buffer[last_set_ip].opcode == _NOP); - buffer[last_set_ip].opcode = _SET_IP; - } - last_set_ip = -1; - } - } + return 1; } } + return 0; } static void @@ -757,9 +250,44 @@ peephole_opt(_PyInterpreterFrame *frame, _PyUOpInstruction *buffer, int buffer_s } } -// 0 - failure, no error raised, just fall back to Tier 1 -// -1 - failure, and raise error -// 1 - optimizer success +static void +remove_unneeded_uops(_PyUOpInstruction *buffer, int buffer_size) +{ + int last_set_ip = -1; + bool maybe_invalid = false; + for (int pc = 0; pc < buffer_size; pc++) { + int opcode = buffer[pc].opcode; + if (opcode == _SET_IP) { + buffer[pc].opcode = NOP; + last_set_ip = pc; + } + else if (opcode == _CHECK_VALIDITY) { + if (maybe_invalid) { + maybe_invalid = false; + } + else { + buffer[pc].opcode = NOP; + } + } + else if (opcode == _JUMP_TO_TOP || opcode == _EXIT_TRACE) { + break; + } + else { + if (_PyUop_Flags[opcode] & HAS_ESCAPES_FLAG) { + maybe_invalid = true; + if (last_set_ip >= 0) { + buffer[last_set_ip].opcode = _SET_IP; + } + } + if ((_PyUop_Flags[opcode] & HAS_ERROR_FLAG) || opcode == _PUSH_FRAME) { + if (last_set_ip >= 0) { + buffer[last_set_ip].opcode = _SET_IP; + } + } + } + } +} + int _Py_uop_analyze_and_optimize( _PyInterpreterFrame *frame, @@ -769,33 +297,11 @@ _Py_uop_analyze_and_optimize( _PyBloomFilter *dependencies ) { - OPT_STAT_INC(optimizer_attempts); - int err = remove_globals(frame, buffer, buffer_size, dependencies); - if (err == 0) { - goto not_ready; - } - if (err < 0) { - goto error; + if (err <= 0) { + return err; } - peephole_opt(frame, buffer, buffer_size); - - err = uop_redundancy_eliminator( - (PyCodeObject *)frame->f_executable, buffer, - buffer_size, curr_stacklen); - - if (err == 0) { - goto not_ready; - } - assert(err == 1); - remove_unneeded_uops(buffer, buffer_size); - - OPT_STAT_INC(optimizer_successes); return 1; -not_ready: - return 0; -error: - return -1; } diff --git a/Python/pylifecycle.c b/Python/pylifecycle.c index 7e4c07bb657d19..230018068d751c 100644 --- a/Python/pylifecycle.c +++ b/Python/pylifecycle.c @@ -1627,8 +1627,8 @@ finalize_modules(PyThreadState *tstate) // Invalidate all executors and turn off tier 2 optimizer _Py_Executors_InvalidateAll(interp); - _PyOptimizerObject *old = _Py_SetOptimizer(interp, NULL); - Py_XDECREF(old); + Py_XDECREF(interp->optimizer); + interp->optimizer = &_PyOptimizer_Default; // Stop watching __builtin__ modifications PyDict_Unwatch(0, interp->builtins); diff --git a/Python/pystate.c b/Python/pystate.c index 996f465825215f..937c43033b068d 100644 --- a/Python/pystate.c +++ b/Python/pystate.c @@ -625,7 +625,9 @@ init_interpreter(PyInterpreterState *interp, } interp->sys_profile_initialized = false; interp->sys_trace_initialized = false; - (void)_Py_SetOptimizer(interp, NULL); + interp->optimizer = &_PyOptimizer_Default; + interp->optimizer_backedge_threshold = _PyOptimizer_Default.backedge_threshold; + interp->optimizer_resume_threshold = _PyOptimizer_Default.backedge_threshold; interp->next_func_version = 1; interp->executor_list_head = NULL; if (interp != &runtime->_main_interpreter) { @@ -778,8 +780,10 @@ interpreter_clear(PyInterpreterState *interp, PyThreadState *tstate) tstate->_status.cleared = 0; } - _PyOptimizerObject *old = _Py_SetOptimizer(interp, NULL); - Py_DECREF(old); + Py_CLEAR(interp->optimizer); + interp->optimizer = &_PyOptimizer_Default; + interp->optimizer_backedge_threshold = _PyOptimizer_Default.backedge_threshold; + interp->optimizer_resume_threshold = _PyOptimizer_Default.backedge_threshold; /* It is possible that any of the objects below have a finalizer that runs Python code or otherwise relies on a thread state diff --git a/Python/pytime.c b/Python/pytime.c index fb0ed85c541e68..77cb95f8feb179 100644 --- a/Python/pytime.c +++ b/Python/pytime.c @@ -50,7 +50,7 @@ # error "time_t is not a two's complement integer type" #endif -#if PyTime_MIN + PyTime_MAX != -1 +#if _PyTime_MIN + _PyTime_MAX != -1 # error "_PyTime_t is not a two's complement integer type" #endif @@ -124,16 +124,16 @@ pytime_as_nanoseconds(_PyTime_t t) } -// Compute t1 + t2. Clamp to [PyTime_MIN; PyTime_MAX] on overflow. +// Compute t1 + t2. Clamp to [_PyTime_MIN; _PyTime_MAX] on overflow. static inline int pytime_add(_PyTime_t *t1, _PyTime_t t2) { - if (t2 > 0 && *t1 > PyTime_MAX - t2) { - *t1 = PyTime_MAX; + if (t2 > 0 && *t1 > _PyTime_MAX - t2) { + *t1 = _PyTime_MAX; return -1; } - else if (t2 < 0 && *t1 < PyTime_MIN - t2) { - *t1 = PyTime_MIN; + else if (t2 < 0 && *t1 < _PyTime_MIN - t2) { + *t1 = _PyTime_MIN; return -1; } else { @@ -156,7 +156,7 @@ pytime_mul_check_overflow(_PyTime_t a, _PyTime_t b) { if (b != 0) { assert(b > 0); - return ((a < PyTime_MIN / b) || (PyTime_MAX / b < a)); + return ((a < _PyTime_MIN / b) || (_PyTime_MAX / b < a)); } else { return 0; @@ -164,13 +164,13 @@ pytime_mul_check_overflow(_PyTime_t a, _PyTime_t b) } -// Compute t * k. Clamp to [PyTime_MIN; PyTime_MAX] on overflow. +// Compute t * k. Clamp to [_PyTime_MIN; _PyTime_MAX] on overflow. static inline int pytime_mul(_PyTime_t *t, _PyTime_t k) { assert(k >= 0); if (pytime_mul_check_overflow(*t, k)) { - *t = (*t >= 0) ? PyTime_MAX : PyTime_MIN; + *t = (*t >= 0) ? _PyTime_MAX : _PyTime_MIN; return -1; } else { @@ -180,7 +180,7 @@ pytime_mul(_PyTime_t *t, _PyTime_t k) } -// Compute t * k. Clamp to [PyTime_MIN; PyTime_MAX] on overflow. +// Compute t * k. Clamp to [_PyTime_MIN; _PyTime_MAX] on overflow. static inline _PyTime_t _PyTime_Mul(_PyTime_t t, _PyTime_t k) { @@ -459,12 +459,12 @@ _PyTime_FromSeconds(int seconds) /* ensure that integer overflow cannot happen, int type should have 32 bits, whereas _PyTime_t type has at least 64 bits (SEC_TO_NS takes 30 bits). */ - static_assert(INT_MAX <= PyTime_MAX / SEC_TO_NS, "_PyTime_t overflow"); - static_assert(INT_MIN >= PyTime_MIN / SEC_TO_NS, "_PyTime_t underflow"); + static_assert(INT_MAX <= _PyTime_MAX / SEC_TO_NS, "_PyTime_t overflow"); + static_assert(INT_MIN >= _PyTime_MIN / SEC_TO_NS, "_PyTime_t underflow"); _PyTime_t t = (_PyTime_t)seconds; - assert((t >= 0 && t <= PyTime_MAX / SEC_TO_NS) - || (t < 0 && t >= PyTime_MIN / SEC_TO_NS)); + assert((t >= 0 && t <= _PyTime_MAX / SEC_TO_NS) + || (t < 0 && t >= _PyTime_MIN / SEC_TO_NS)); t *= SEC_TO_NS; return pytime_from_nanoseconds(t); } @@ -587,7 +587,7 @@ pytime_from_double(_PyTime_t *tp, double value, _PyTime_round_t round, d = pytime_round(d, round); /* See comments in pytime_double_to_denominator */ - if (!((double)PyTime_MIN <= d && d < -(double)PyTime_MIN)) { + if (!((double)_PyTime_MIN <= d && d < -(double)_PyTime_MIN)) { pytime_time_t_overflow(); return -1; } @@ -649,12 +649,12 @@ _PyTime_FromMillisecondsObject(_PyTime_t *tp, PyObject *obj, _PyTime_round_t rou double -PyTime_AsSecondsDouble(PyTime_t t) +_PyTime_AsSecondsDouble(_PyTime_t t) { /* volatile avoids optimization changing how numbers are rounded */ volatile double d; - PyTime_t ns = pytime_as_nanoseconds(t); + _PyTime_t ns = pytime_as_nanoseconds(t); if (ns % SEC_TO_NS == 0) { /* Divide using integers to avoid rounding issues on the integer part. 1e-9 cannot be stored exactly in IEEE 64-bit. */ @@ -695,7 +695,7 @@ pytime_divide_round_up(const _PyTime_t t, const _PyTime_t k) assert(k > 1); if (t >= 0) { // Don't use (t + k - 1) / k to avoid integer overflow - // if t is equal to PyTime_MAX + // if t is equal to _PyTime_MAX _PyTime_t q = t / k; if (t % k) { q += 1; @@ -704,7 +704,7 @@ pytime_divide_round_up(const _PyTime_t t, const _PyTime_t k) } else { // Don't use (t - (k - 1)) / k to avoid integer overflow - // if t is equals to PyTime_MIN. + // if t is equals to _PyTime_MIN. _PyTime_t q = t / k; if (t % k) { q -= 1; @@ -759,7 +759,7 @@ pytime_divide(const _PyTime_t t, const _PyTime_t k, // Compute (t / k, t % k) in (pq, pr). // Make sure that 0 <= pr < k. // Return 0 on success. -// Return -1 on underflow and store (PyTime_MIN, 0) in (pq, pr). +// Return -1 on underflow and store (_PyTime_MIN, 0) in (pq, pr). static int pytime_divmod(const _PyTime_t t, const _PyTime_t k, _PyTime_t *pq, _PyTime_t *pr) @@ -768,8 +768,8 @@ pytime_divmod(const _PyTime_t t, const _PyTime_t k, _PyTime_t q = t / k; _PyTime_t r = t % k; if (r < 0) { - if (q == PyTime_MIN) { - *pq = PyTime_MIN; + if (q == _PyTime_MIN) { + *pq = _PyTime_MIN; *pr = 0; return -1; } @@ -784,6 +784,13 @@ pytime_divmod(const _PyTime_t t, const _PyTime_t k, } +_PyTime_t +_PyTime_AsNanoseconds(_PyTime_t t) +{ + return pytime_as_nanoseconds(t); +} + + #ifdef MS_WINDOWS _PyTime_t _PyTime_As100Nanoseconds(_PyTime_t t, _PyTime_round_t round) @@ -919,7 +926,6 @@ _PyTime_AsTimespec(_PyTime_t t, struct timespec *ts) #endif -// N.B. If raise_exc=0, this may be called without the GIL. static int py_get_system_clock(_PyTime_t *tp, _Py_clock_info_t *info, int raise_exc) { @@ -1044,18 +1050,6 @@ _PyTime_GetSystemClock(void) } -int -PyTime_Time(PyTime_t *result) -{ - if (py_get_system_clock(result, NULL, 1) < 0) { - // If clock_gettime(CLOCK_REALTIME) or gettimeofday() fails: - // silently ignore the failure and return 0. - *result = 0; - return -1; - } - return 1; -} - int _PyTime_GetSystemClockWithInfo(_PyTime_t *t, _Py_clock_info_t *info) { @@ -1098,7 +1092,6 @@ py_mach_timebase_info(_PyTimeFraction *base, int raise) #endif -// N.B. If raise_exc=0, this may be called without the GIL. static int py_get_monotonic_clock(_PyTime_t *tp, _Py_clock_info_t *info, int raise_exc) { @@ -1109,13 +1102,13 @@ py_get_monotonic_clock(_PyTime_t *tp, _Py_clock_info_t *info, int raise_exc) static_assert(sizeof(ticks) <= sizeof(_PyTime_t), "ULONGLONG is larger than _PyTime_t"); _PyTime_t t; - if (ticks <= (ULONGLONG)PyTime_MAX) { + if (ticks <= (ULONGLONG)_PyTime_MAX) { t = (_PyTime_t)ticks; } else { // GetTickCount64() maximum is larger than _PyTime_t maximum: // ULONGLONG is unsigned, whereas _PyTime_t is signed. - t = PyTime_MAX; + t = _PyTime_MAX; } int res = pytime_mul(&t, MS_TO_NS); @@ -1158,7 +1151,7 @@ py_get_monotonic_clock(_PyTime_t *tp, _Py_clock_info_t *info, int raise_exc) uint64_t uticks = mach_absolute_time(); // unsigned => signed - assert(uticks <= (uint64_t)PyTime_MAX); + assert(uticks <= (uint64_t)_PyTime_MAX); _PyTime_t ticks = (_PyTime_t)uticks; _PyTime_t ns = _PyTimeFraction_Mul(ticks, &base); @@ -1236,17 +1229,6 @@ _PyTime_GetMonotonicClock(void) } -int -PyTime_Monotonic(PyTime_t *result) -{ - if (py_get_monotonic_clock(result, NULL, 1) < 0) { - *result = 0; - return -1; - } - return 0; -} - - int _PyTime_GetMonotonicClockWithInfo(_PyTime_t *tp, _Py_clock_info_t *info) { @@ -1286,7 +1268,6 @@ py_win_perf_counter_frequency(_PyTimeFraction *base, int raise) } -// N.B. If raise_exc=0, this may be called without the GIL. static int py_get_win_perf_counter(_PyTime_t *tp, _Py_clock_info_t *info, int raise_exc) { @@ -1354,25 +1335,6 @@ _PyTime_GetPerfCounter(void) } -int -PyTime_PerfCounter(PyTime_t *result) -{ - int res; -#ifdef MS_WINDOWS - res = py_get_win_perf_counter(result, NULL, 1); -#else - res = py_get_monotonic_clock(result, NULL, 1); -#endif - if (res < 0) { - // If py_win_perf_counter_frequency() or py_get_monotonic_clock() - // fails: silently ignore the failure and return 0. - *result = 0; - return -1; - } - return 0; -} - - int _PyTime_localtime(time_t t, struct tm *tm) { diff --git a/Python/specialize.c b/Python/specialize.c index 7642589508f184..0c7279a7871f94 100644 --- a/Python/specialize.c +++ b/Python/specialize.c @@ -240,11 +240,6 @@ print_optimization_stats(FILE *out, OptimizationStats *stats) print_histogram(out, "Trace run length", stats->trace_run_length_hist); print_histogram(out, "Optimized trace length", stats->optimized_trace_length_hist); - fprintf(out, "Optimization optimizer attempts: %" PRIu64 "\n", stats->optimizer_attempts); - fprintf(out, "Optimization optimizer successes: %" PRIu64 "\n", stats->optimizer_successes); - fprintf(out, "Optimization optimizer failure no memory: %" PRIu64 "\n", - stats->optimizer_failure_reason_no_memory); - const char* const* names; for (int i = 0; i < 512; i++) { if (i < 256) { diff --git a/Python/tier2_redundancy_eliminator_bytecodes.c b/Python/tier2_redundancy_eliminator_bytecodes.c deleted file mode 100644 index 3272b187f20d0e..00000000000000 --- a/Python/tier2_redundancy_eliminator_bytecodes.c +++ /dev/null @@ -1,272 +0,0 @@ -#include "Python.h" -#include "pycore_uops.h" -#include "pycore_uop_ids.h" - -#define op(name, ...) /* NAME is ignored */ - -typedef struct _Py_UOpsSymType _Py_UOpsSymType; -typedef struct _Py_UOpsAbstractInterpContext _Py_UOpsAbstractInterpContext; -typedef struct _Py_UOpsAbstractFrame _Py_UOpsAbstractFrame; - -static int -dummy_func(void) { - - PyCodeObject *code; - int oparg; - _Py_UOpsSymType *flag; - _Py_UOpsSymType *left; - _Py_UOpsSymType *right; - _Py_UOpsSymType *value; - _Py_UOpsSymType *res; - _Py_UOpsSymType *iter; - _Py_UOpsSymType *top; - _Py_UOpsSymType *bottom; - _Py_UOpsAbstractFrame *frame; - _Py_UOpsAbstractInterpContext *ctx; - _PyUOpInstruction *this_instr; - _PyBloomFilter *dependencies; - int modified; - -// BEGIN BYTECODES // - - op(_LOAD_FAST_CHECK, (-- value)) { - value = GETLOCAL(oparg); - // We guarantee this will error - just bail and don't optimize it. - if (sym_is_null(value)) { - goto out_of_space; - } - } - - op(_LOAD_FAST, (-- value)) { - value = GETLOCAL(oparg); - } - - op(_LOAD_FAST_AND_CLEAR, (-- value)) { - value = GETLOCAL(oparg); - _Py_UOpsSymType *temp = sym_new_null(ctx); - if (temp == NULL) { - goto out_of_space; - } - GETLOCAL(oparg) = temp; - } - - op(_STORE_FAST, (value --)) { - GETLOCAL(oparg) = value; - } - - op(_PUSH_NULL, (-- res)) { - res = sym_new_null(ctx); - if (res == NULL) { - goto out_of_space; - }; - } - - op(_GUARD_BOTH_INT, (left, right -- left, right)) { - if (sym_matches_type(left, &PyLong_Type) && - sym_matches_type(right, &PyLong_Type)) { - REPLACE_OP(this_instr, _NOP, 0, 0); - } - sym_set_type(left, &PyLong_Type); - sym_set_type(right, &PyLong_Type); - } - - op(_GUARD_BOTH_FLOAT, (left, right -- left, right)) { - if (sym_matches_type(left, &PyFloat_Type) && - sym_matches_type(right, &PyFloat_Type)) { - REPLACE_OP(this_instr, _NOP, 0 ,0); - } - sym_set_type(left, &PyFloat_Type); - sym_set_type(right, &PyFloat_Type); - } - - - op(_BINARY_OP_ADD_INT, (left, right -- res)) { - // TODO constant propagation - (void)left; - (void)right; - res = sym_new_known_type(ctx, &PyLong_Type); - if (res == NULL) { - goto out_of_space; - } - } - - op(_LOAD_CONST, (-- value)) { - // There should be no LOAD_CONST. It should be all - // replaced by peephole_opt. - Py_UNREACHABLE(); - } - - op(_LOAD_CONST_INLINE, (ptr/4 -- value)) { - value = sym_new_const(ctx, ptr); - if (value == NULL) { - goto out_of_space; - } - } - - op(_LOAD_CONST_INLINE_BORROW, (ptr/4 -- value)) { - value = sym_new_const(ctx, ptr); - if (value == NULL) { - goto out_of_space; - } - } - - op(_LOAD_CONST_INLINE_WITH_NULL, (ptr/4 -- value, null)) { - value = sym_new_const(ctx, ptr); - if (value == NULL) { - goto out_of_space; - } - null = sym_new_null(ctx); - if (null == NULL) { - goto out_of_space; - } - } - - op(_LOAD_CONST_INLINE_BORROW_WITH_NULL, (ptr/4 -- value, null)) { - value = sym_new_const(ctx, ptr); - if (value == NULL) { - goto out_of_space; - } - null = sym_new_null(ctx); - if (null == NULL) { - goto out_of_space; - } - } - - - op(_COPY, (bottom, unused[oparg-1] -- bottom, unused[oparg-1], top)) { - assert(oparg > 0); - top = bottom; - } - - op(_SWAP, (bottom, unused[oparg-2], top -- - top, unused[oparg-2], bottom)) { - } - - op(_LOAD_ATTR_INSTANCE_VALUE, (index/1, owner -- attr, null if (oparg & 1))) { - _LOAD_ATTR_NOT_NULL - (void)index; - (void)owner; - } - - op(_LOAD_ATTR_MODULE, (index/1, owner -- attr, null if (oparg & 1))) { - _LOAD_ATTR_NOT_NULL - (void)index; - (void)owner; - } - - op(_LOAD_ATTR_WITH_HINT, (hint/1, owner -- attr, null if (oparg & 1))) { - _LOAD_ATTR_NOT_NULL - (void)hint; - (void)owner; - } - - op(_LOAD_ATTR_SLOT, (index/1, owner -- attr, null if (oparg & 1))) { - _LOAD_ATTR_NOT_NULL - (void)index; - (void)owner; - } - - op(_LOAD_ATTR_CLASS, (descr/4, owner -- attr, null if (oparg & 1))) { - _LOAD_ATTR_NOT_NULL - (void)descr; - (void)owner; - } - - op(_CHECK_FUNCTION_EXACT_ARGS, (func_version/2, callable, self_or_null, unused[oparg] -- callable, self_or_null, unused[oparg])) { - sym_set_type(callable, &PyFunction_Type); - (void)self_or_null; - (void)func_version; - } - - op(_CHECK_CALL_BOUND_METHOD_EXACT_ARGS, (callable, null, unused[oparg] -- callable, null, unused[oparg])) { - sym_set_null(null); - sym_set_type(callable, &PyMethod_Type); - } - - op(_INIT_CALL_PY_EXACT_ARGS, (callable, self_or_null, args[oparg] -- new_frame: _Py_UOpsAbstractFrame *)) { - int argcount = oparg; - - (void)callable; - - PyFunctionObject *func = (PyFunctionObject *)(this_instr + 2)->operand; - if (func == NULL) { - goto error; - } - PyCodeObject *co = (PyCodeObject *)func->func_code; - - assert(self_or_null != NULL); - assert(args != NULL); - if (sym_is_not_null(self_or_null)) { - // Bound method fiddling, same as _INIT_CALL_PY_EXACT_ARGS in VM - args--; - argcount++; - } - - _Py_UOpsSymType **localsplus_start = ctx->n_consumed; - int n_locals_already_filled = 0; - // Can determine statically, so we interleave the new locals - // and make the current stack the new locals. - // This also sets up for true call inlining. - if (sym_is_known(self_or_null)) { - localsplus_start = args; - n_locals_already_filled = argcount; - } - new_frame = ctx_frame_new(ctx, co, localsplus_start, n_locals_already_filled, 0); - if (new_frame == NULL){ - goto out_of_space; - } - } - - op(_POP_FRAME, (retval -- res)) { - SYNC_SP(); - ctx->frame->stack_pointer = stack_pointer; - ctx_frame_pop(ctx); - stack_pointer = ctx->frame->stack_pointer; - res = retval; - } - - op(_PUSH_FRAME, (new_frame: _Py_UOpsAbstractFrame * -- unused if (0))) { - SYNC_SP(); - ctx->frame->stack_pointer = stack_pointer; - ctx->frame = new_frame; - ctx->curr_frame_depth++; - stack_pointer = new_frame->stack_pointer; - } - - op(_UNPACK_SEQUENCE, (seq -- values[oparg])) { - /* This has to be done manually */ - (void)seq; - for (int i = 0; i < oparg; i++) { - values[i] = sym_new_unknown(ctx); - if (values[i] == NULL) { - goto out_of_space; - } - } - } - - op(_UNPACK_EX, (seq -- values[oparg & 0xFF], unused, unused[oparg >> 8])) { - /* This has to be done manually */ - (void)seq; - int totalargs = (oparg & 0xFF) + (oparg >> 8) + 1; - for (int i = 0; i < totalargs; i++) { - values[i] = sym_new_unknown(ctx); - if (values[i] == NULL) { - goto out_of_space; - } - } - } - - op(_ITER_NEXT_RANGE, (iter -- iter, next)) { - next = sym_new_known_type(ctx, &PyLong_Type); - if (next == NULL) { - goto out_of_space; - } - (void)iter; - } - - - - -// END BYTECODES // - -} \ No newline at end of file diff --git a/Python/tier2_redundancy_eliminator_cases.c.h b/Python/tier2_redundancy_eliminator_cases.c.h deleted file mode 100644 index c2b7bbaf1c4481..00000000000000 --- a/Python/tier2_redundancy_eliminator_cases.c.h +++ /dev/null @@ -1,1680 +0,0 @@ -// This file is generated by Tools/cases_generator/tier2_abstract_generator.py -// from: -// Python/tier2_redundancy_eliminator_bytecodes.c -// Do not edit! - - case _NOP: { - break; - } - - case _RESUME_CHECK: { - break; - } - - /* _INSTRUMENTED_RESUME is not a viable micro-op for tier 2 */ - - case _LOAD_FAST_CHECK: { - _Py_UOpsSymType *value; - value = GETLOCAL(oparg); - // We guarantee this will error - just bail and don't optimize it. - if (sym_is_null(value)) { - goto out_of_space; - } - stack_pointer[0] = value; - stack_pointer += 1; - break; - } - - case _LOAD_FAST: { - _Py_UOpsSymType *value; - value = GETLOCAL(oparg); - stack_pointer[0] = value; - stack_pointer += 1; - break; - } - - case _LOAD_FAST_AND_CLEAR: { - _Py_UOpsSymType *value; - value = GETLOCAL(oparg); - _Py_UOpsSymType *temp = sym_new_null(ctx); - if (temp == NULL) { - goto out_of_space; - } - GETLOCAL(oparg) = temp; - stack_pointer[0] = value; - stack_pointer += 1; - break; - } - - case _LOAD_CONST: { - _Py_UOpsSymType *value; - // There should be no LOAD_CONST. It should be all - // replaced by peephole_opt. - Py_UNREACHABLE(); - stack_pointer[0] = value; - stack_pointer += 1; - break; - } - - case _STORE_FAST: { - _Py_UOpsSymType *value; - value = stack_pointer[-1]; - GETLOCAL(oparg) = value; - stack_pointer += -1; - break; - } - - case _POP_TOP: { - stack_pointer += -1; - break; - } - - case _PUSH_NULL: { - _Py_UOpsSymType *res; - res = sym_new_null(ctx); - if (res == NULL) { - goto out_of_space; - }; - stack_pointer[0] = res; - stack_pointer += 1; - break; - } - - case _END_SEND: { - _Py_UOpsSymType *value; - value = sym_new_unknown(ctx); - if (value == NULL) goto out_of_space; - stack_pointer[-2] = value; - stack_pointer += -1; - break; - } - - case _UNARY_NEGATIVE: { - _Py_UOpsSymType *res; - res = sym_new_unknown(ctx); - if (res == NULL) goto out_of_space; - stack_pointer[-1] = res; - break; - } - - case _UNARY_NOT: { - _Py_UOpsSymType *res; - res = sym_new_unknown(ctx); - if (res == NULL) goto out_of_space; - stack_pointer[-1] = res; - break; - } - - case _TO_BOOL: { - _Py_UOpsSymType *res; - res = sym_new_unknown(ctx); - if (res == NULL) goto out_of_space; - stack_pointer[-1] = res; - break; - } - - case _TO_BOOL_BOOL: { - break; - } - - case _TO_BOOL_INT: { - _Py_UOpsSymType *res; - res = sym_new_unknown(ctx); - if (res == NULL) goto out_of_space; - stack_pointer[-1] = res; - break; - } - - case _TO_BOOL_LIST: { - _Py_UOpsSymType *res; - res = sym_new_unknown(ctx); - if (res == NULL) goto out_of_space; - stack_pointer[-1] = res; - break; - } - - case _TO_BOOL_NONE: { - _Py_UOpsSymType *res; - res = sym_new_unknown(ctx); - if (res == NULL) goto out_of_space; - stack_pointer[-1] = res; - break; - } - - case _TO_BOOL_STR: { - _Py_UOpsSymType *res; - res = sym_new_unknown(ctx); - if (res == NULL) goto out_of_space; - stack_pointer[-1] = res; - break; - } - - case _TO_BOOL_ALWAYS_TRUE: { - _Py_UOpsSymType *res; - res = sym_new_unknown(ctx); - if (res == NULL) goto out_of_space; - stack_pointer[-1] = res; - break; - } - - case _UNARY_INVERT: { - _Py_UOpsSymType *res; - res = sym_new_unknown(ctx); - if (res == NULL) goto out_of_space; - stack_pointer[-1] = res; - break; - } - - case _GUARD_BOTH_INT: { - _Py_UOpsSymType *right; - _Py_UOpsSymType *left; - right = stack_pointer[-1]; - left = stack_pointer[-2]; - if (sym_matches_type(left, &PyLong_Type) && - sym_matches_type(right, &PyLong_Type)) { - REPLACE_OP(this_instr, _NOP, 0, 0); - } - sym_set_type(left, &PyLong_Type); - sym_set_type(right, &PyLong_Type); - break; - } - - case _BINARY_OP_MULTIPLY_INT: { - _Py_UOpsSymType *res; - res = sym_new_unknown(ctx); - if (res == NULL) goto out_of_space; - stack_pointer[-2] = res; - stack_pointer += -1; - break; - } - - case _BINARY_OP_ADD_INT: { - _Py_UOpsSymType *right; - _Py_UOpsSymType *left; - _Py_UOpsSymType *res; - right = stack_pointer[-1]; - left = stack_pointer[-2]; - // TODO constant propagation - (void)left; - (void)right; - res = sym_new_known_type(ctx, &PyLong_Type); - if (res == NULL) { - goto out_of_space; - } - stack_pointer[-2] = res; - stack_pointer += -1; - break; - } - - case _BINARY_OP_SUBTRACT_INT: { - _Py_UOpsSymType *res; - res = sym_new_unknown(ctx); - if (res == NULL) goto out_of_space; - stack_pointer[-2] = res; - stack_pointer += -1; - break; - } - - case _GUARD_BOTH_FLOAT: { - _Py_UOpsSymType *right; - _Py_UOpsSymType *left; - right = stack_pointer[-1]; - left = stack_pointer[-2]; - if (sym_matches_type(left, &PyFloat_Type) && - sym_matches_type(right, &PyFloat_Type)) { - REPLACE_OP(this_instr, _NOP, 0 ,0); - } - sym_set_type(left, &PyFloat_Type); - sym_set_type(right, &PyFloat_Type); - break; - } - - case _BINARY_OP_MULTIPLY_FLOAT: { - _Py_UOpsSymType *res; - res = sym_new_unknown(ctx); - if (res == NULL) goto out_of_space; - stack_pointer[-2] = res; - stack_pointer += -1; - break; - } - - case _BINARY_OP_ADD_FLOAT: { - _Py_UOpsSymType *res; - res = sym_new_unknown(ctx); - if (res == NULL) goto out_of_space; - stack_pointer[-2] = res; - stack_pointer += -1; - break; - } - - case _BINARY_OP_SUBTRACT_FLOAT: { - _Py_UOpsSymType *res; - res = sym_new_unknown(ctx); - if (res == NULL) goto out_of_space; - stack_pointer[-2] = res; - stack_pointer += -1; - break; - } - - case _GUARD_BOTH_UNICODE: { - break; - } - - case _BINARY_OP_ADD_UNICODE: { - _Py_UOpsSymType *res; - res = sym_new_unknown(ctx); - if (res == NULL) goto out_of_space; - stack_pointer[-2] = res; - stack_pointer += -1; - break; - } - - case _BINARY_SUBSCR: { - _Py_UOpsSymType *res; - res = sym_new_unknown(ctx); - if (res == NULL) goto out_of_space; - stack_pointer[-2] = res; - stack_pointer += -1; - break; - } - - case _BINARY_SLICE: { - _Py_UOpsSymType *res; - res = sym_new_unknown(ctx); - if (res == NULL) goto out_of_space; - stack_pointer[-3] = res; - stack_pointer += -2; - break; - } - - case _STORE_SLICE: { - stack_pointer += -4; - break; - } - - case _BINARY_SUBSCR_LIST_INT: { - _Py_UOpsSymType *res; - res = sym_new_unknown(ctx); - if (res == NULL) goto out_of_space; - stack_pointer[-2] = res; - stack_pointer += -1; - break; - } - - case _BINARY_SUBSCR_STR_INT: { - _Py_UOpsSymType *res; - res = sym_new_unknown(ctx); - if (res == NULL) goto out_of_space; - stack_pointer[-2] = res; - stack_pointer += -1; - break; - } - - case _BINARY_SUBSCR_TUPLE_INT: { - _Py_UOpsSymType *res; - res = sym_new_unknown(ctx); - if (res == NULL) goto out_of_space; - stack_pointer[-2] = res; - stack_pointer += -1; - break; - } - - case _BINARY_SUBSCR_DICT: { - _Py_UOpsSymType *res; - res = sym_new_unknown(ctx); - if (res == NULL) goto out_of_space; - stack_pointer[-2] = res; - stack_pointer += -1; - break; - } - - /* _BINARY_SUBSCR_GETITEM is not a viable micro-op for tier 2 */ - - case _LIST_APPEND: { - stack_pointer += -1; - break; - } - - case _SET_ADD: { - stack_pointer += -1; - break; - } - - case _STORE_SUBSCR: { - stack_pointer += -3; - break; - } - - case _STORE_SUBSCR_LIST_INT: { - stack_pointer += -3; - break; - } - - case _STORE_SUBSCR_DICT: { - stack_pointer += -3; - break; - } - - case _DELETE_SUBSCR: { - stack_pointer += -2; - break; - } - - case _CALL_INTRINSIC_1: { - _Py_UOpsSymType *res; - res = sym_new_unknown(ctx); - if (res == NULL) goto out_of_space; - stack_pointer[-1] = res; - break; - } - - case _CALL_INTRINSIC_2: { - _Py_UOpsSymType *res; - res = sym_new_unknown(ctx); - if (res == NULL) goto out_of_space; - stack_pointer[-2] = res; - stack_pointer += -1; - break; - } - - case _POP_FRAME: { - _Py_UOpsSymType *retval; - _Py_UOpsSymType *res; - retval = stack_pointer[-1]; - stack_pointer += -1; - ctx->frame->stack_pointer = stack_pointer; - ctx_frame_pop(ctx); - stack_pointer = ctx->frame->stack_pointer; - res = retval; - stack_pointer[0] = res; - stack_pointer += 1; - break; - } - - /* _INSTRUMENTED_RETURN_VALUE is not a viable micro-op for tier 2 */ - - /* _INSTRUMENTED_RETURN_CONST is not a viable micro-op for tier 2 */ - - case _GET_AITER: { - _Py_UOpsSymType *iter; - iter = sym_new_unknown(ctx); - if (iter == NULL) goto out_of_space; - stack_pointer[-1] = iter; - break; - } - - case _GET_ANEXT: { - _Py_UOpsSymType *awaitable; - awaitable = sym_new_unknown(ctx); - if (awaitable == NULL) goto out_of_space; - stack_pointer[0] = awaitable; - stack_pointer += 1; - break; - } - - case _GET_AWAITABLE: { - _Py_UOpsSymType *iter; - iter = sym_new_unknown(ctx); - if (iter == NULL) goto out_of_space; - stack_pointer[-1] = iter; - break; - } - - /* _SEND is not a viable micro-op for tier 2 */ - - /* _SEND_GEN is not a viable micro-op for tier 2 */ - - /* _INSTRUMENTED_YIELD_VALUE is not a viable micro-op for tier 2 */ - - case _POP_EXCEPT: { - stack_pointer += -1; - break; - } - - case _LOAD_ASSERTION_ERROR: { - _Py_UOpsSymType *value; - value = sym_new_unknown(ctx); - if (value == NULL) goto out_of_space; - stack_pointer[0] = value; - stack_pointer += 1; - break; - } - - case _LOAD_BUILD_CLASS: { - _Py_UOpsSymType *bc; - bc = sym_new_unknown(ctx); - if (bc == NULL) goto out_of_space; - stack_pointer[0] = bc; - stack_pointer += 1; - break; - } - - case _STORE_NAME: { - stack_pointer += -1; - break; - } - - case _DELETE_NAME: { - break; - } - - case _UNPACK_SEQUENCE: { - _Py_UOpsSymType *seq; - _Py_UOpsSymType **values; - seq = stack_pointer[-1]; - values = &stack_pointer[-1]; - /* This has to be done manually */ - (void)seq; - for (int i = 0; i < oparg; i++) { - values[i] = sym_new_unknown(ctx); - if (values[i] == NULL) { - goto out_of_space; - } - } - stack_pointer += -1 + oparg; - break; - } - - case _UNPACK_SEQUENCE_TWO_TUPLE: { - _Py_UOpsSymType **values; - values = &stack_pointer[-1]; - for (int _i = oparg; --_i >= 0;) { - values[_i] = sym_new_unknown(ctx); - if (values[_i] == NULL) goto out_of_space; - } - stack_pointer += -1 + oparg; - break; - } - - case _UNPACK_SEQUENCE_TUPLE: { - _Py_UOpsSymType **values; - values = &stack_pointer[-1]; - for (int _i = oparg; --_i >= 0;) { - values[_i] = sym_new_unknown(ctx); - if (values[_i] == NULL) goto out_of_space; - } - stack_pointer += -1 + oparg; - break; - } - - case _UNPACK_SEQUENCE_LIST: { - _Py_UOpsSymType **values; - values = &stack_pointer[-1]; - for (int _i = oparg; --_i >= 0;) { - values[_i] = sym_new_unknown(ctx); - if (values[_i] == NULL) goto out_of_space; - } - stack_pointer += -1 + oparg; - break; - } - - case _UNPACK_EX: { - _Py_UOpsSymType *seq; - _Py_UOpsSymType **values; - seq = stack_pointer[-1]; - values = &stack_pointer[-1]; - /* This has to be done manually */ - (void)seq; - int totalargs = (oparg & 0xFF) + (oparg >> 8) + 1; - for (int i = 0; i < totalargs; i++) { - values[i] = sym_new_unknown(ctx); - if (values[i] == NULL) { - goto out_of_space; - } - } - stack_pointer += (oparg >> 8) + (oparg & 0xFF); - break; - } - - case _STORE_ATTR: { - stack_pointer += -2; - break; - } - - case _DELETE_ATTR: { - stack_pointer += -1; - break; - } - - case _STORE_GLOBAL: { - stack_pointer += -1; - break; - } - - case _DELETE_GLOBAL: { - break; - } - - case _LOAD_LOCALS: { - _Py_UOpsSymType *locals; - locals = sym_new_unknown(ctx); - if (locals == NULL) goto out_of_space; - stack_pointer[0] = locals; - stack_pointer += 1; - break; - } - - case _LOAD_FROM_DICT_OR_GLOBALS: { - _Py_UOpsSymType *v; - v = sym_new_unknown(ctx); - if (v == NULL) goto out_of_space; - stack_pointer[-1] = v; - break; - } - - case _LOAD_NAME: { - _Py_UOpsSymType *v; - v = sym_new_unknown(ctx); - if (v == NULL) goto out_of_space; - stack_pointer[0] = v; - stack_pointer += 1; - break; - } - - case _LOAD_GLOBAL: { - _Py_UOpsSymType *res; - _Py_UOpsSymType *null = NULL; - res = sym_new_unknown(ctx); - if (res == NULL) goto out_of_space; - null = sym_new_null(ctx); - if (null == NULL) goto out_of_space; - stack_pointer[0] = res; - if (oparg & 1) stack_pointer[1] = null; - stack_pointer += 1 + (oparg & 1); - break; - } - - case _GUARD_GLOBALS_VERSION: { - break; - } - - case _GUARD_BUILTINS_VERSION: { - break; - } - - case _LOAD_GLOBAL_MODULE: { - _Py_UOpsSymType *res; - _Py_UOpsSymType *null = NULL; - res = sym_new_unknown(ctx); - if (res == NULL) goto out_of_space; - null = sym_new_null(ctx); - if (null == NULL) goto out_of_space; - stack_pointer[0] = res; - if (oparg & 1) stack_pointer[1] = null; - stack_pointer += 1 + (oparg & 1); - break; - } - - case _LOAD_GLOBAL_BUILTINS: { - _Py_UOpsSymType *res; - _Py_UOpsSymType *null = NULL; - res = sym_new_unknown(ctx); - if (res == NULL) goto out_of_space; - null = sym_new_null(ctx); - if (null == NULL) goto out_of_space; - stack_pointer[0] = res; - if (oparg & 1) stack_pointer[1] = null; - stack_pointer += 1 + (oparg & 1); - break; - } - - case _DELETE_FAST: { - break; - } - - case _MAKE_CELL: { - break; - } - - case _DELETE_DEREF: { - break; - } - - case _LOAD_FROM_DICT_OR_DEREF: { - _Py_UOpsSymType *value; - value = sym_new_unknown(ctx); - if (value == NULL) goto out_of_space; - stack_pointer[-1] = value; - break; - } - - case _LOAD_DEREF: { - _Py_UOpsSymType *value; - value = sym_new_unknown(ctx); - if (value == NULL) goto out_of_space; - stack_pointer[0] = value; - stack_pointer += 1; - break; - } - - case _STORE_DEREF: { - stack_pointer += -1; - break; - } - - case _COPY_FREE_VARS: { - break; - } - - case _BUILD_STRING: { - _Py_UOpsSymType *str; - str = sym_new_unknown(ctx); - if (str == NULL) goto out_of_space; - stack_pointer[-oparg] = str; - stack_pointer += 1 - oparg; - break; - } - - case _BUILD_TUPLE: { - _Py_UOpsSymType *tup; - tup = sym_new_unknown(ctx); - if (tup == NULL) goto out_of_space; - stack_pointer[-oparg] = tup; - stack_pointer += 1 - oparg; - break; - } - - case _BUILD_LIST: { - _Py_UOpsSymType *list; - list = sym_new_unknown(ctx); - if (list == NULL) goto out_of_space; - stack_pointer[-oparg] = list; - stack_pointer += 1 - oparg; - break; - } - - case _LIST_EXTEND: { - stack_pointer += -1; - break; - } - - case _SET_UPDATE: { - stack_pointer += -1; - break; - } - - case _BUILD_SET: { - _Py_UOpsSymType *set; - set = sym_new_unknown(ctx); - if (set == NULL) goto out_of_space; - stack_pointer[-oparg] = set; - stack_pointer += 1 - oparg; - break; - } - - case _BUILD_MAP: { - _Py_UOpsSymType *map; - map = sym_new_unknown(ctx); - if (map == NULL) goto out_of_space; - stack_pointer[-oparg*2] = map; - stack_pointer += 1 - oparg*2; - break; - } - - case _SETUP_ANNOTATIONS: { - break; - } - - case _BUILD_CONST_KEY_MAP: { - _Py_UOpsSymType *map; - map = sym_new_unknown(ctx); - if (map == NULL) goto out_of_space; - stack_pointer[-1 - oparg] = map; - stack_pointer += -oparg; - break; - } - - case _DICT_UPDATE: { - stack_pointer += -1; - break; - } - - case _DICT_MERGE: { - stack_pointer += -1; - break; - } - - case _MAP_ADD: { - stack_pointer += -2; - break; - } - - /* _INSTRUMENTED_LOAD_SUPER_ATTR is not a viable micro-op for tier 2 */ - - case _LOAD_SUPER_ATTR_ATTR: { - _Py_UOpsSymType *attr; - attr = sym_new_unknown(ctx); - if (attr == NULL) goto out_of_space; - stack_pointer[-3] = attr; - stack_pointer += -2 + ((0) ? 1 : 0); - break; - } - - case _LOAD_SUPER_ATTR_METHOD: { - _Py_UOpsSymType *attr; - _Py_UOpsSymType *self_or_null; - attr = sym_new_unknown(ctx); - if (attr == NULL) goto out_of_space; - self_or_null = sym_new_unknown(ctx); - if (self_or_null == NULL) goto out_of_space; - stack_pointer[-3] = attr; - stack_pointer[-2] = self_or_null; - stack_pointer += -1; - break; - } - - case _LOAD_ATTR: { - _Py_UOpsSymType *attr; - _Py_UOpsSymType *self_or_null = NULL; - attr = sym_new_unknown(ctx); - if (attr == NULL) goto out_of_space; - self_or_null = sym_new_unknown(ctx); - if (self_or_null == NULL) goto out_of_space; - stack_pointer[-1] = attr; - if (oparg & 1) stack_pointer[0] = self_or_null; - stack_pointer += (oparg & 1); - break; - } - - case _GUARD_TYPE_VERSION: { - break; - } - - case _CHECK_MANAGED_OBJECT_HAS_VALUES: { - break; - } - - case _LOAD_ATTR_INSTANCE_VALUE: { - _Py_UOpsSymType *owner; - _Py_UOpsSymType *attr; - _Py_UOpsSymType *null = NULL; - owner = stack_pointer[-1]; - uint16_t index = (uint16_t)this_instr->operand; - _LOAD_ATTR_NOT_NULL - (void)index; - (void)owner; - stack_pointer[-1] = attr; - if (oparg & 1) stack_pointer[0] = null; - stack_pointer += (oparg & 1); - break; - } - - case _CHECK_ATTR_MODULE: { - break; - } - - case _LOAD_ATTR_MODULE: { - _Py_UOpsSymType *owner; - _Py_UOpsSymType *attr; - _Py_UOpsSymType *null = NULL; - owner = stack_pointer[-1]; - uint16_t index = (uint16_t)this_instr->operand; - _LOAD_ATTR_NOT_NULL - (void)index; - (void)owner; - stack_pointer[-1] = attr; - if (oparg & 1) stack_pointer[0] = null; - stack_pointer += (oparg & 1); - break; - } - - case _CHECK_ATTR_WITH_HINT: { - break; - } - - case _LOAD_ATTR_WITH_HINT: { - _Py_UOpsSymType *owner; - _Py_UOpsSymType *attr; - _Py_UOpsSymType *null = NULL; - owner = stack_pointer[-1]; - uint16_t hint = (uint16_t)this_instr->operand; - _LOAD_ATTR_NOT_NULL - (void)hint; - (void)owner; - stack_pointer[-1] = attr; - if (oparg & 1) stack_pointer[0] = null; - stack_pointer += (oparg & 1); - break; - } - - case _LOAD_ATTR_SLOT: { - _Py_UOpsSymType *owner; - _Py_UOpsSymType *attr; - _Py_UOpsSymType *null = NULL; - owner = stack_pointer[-1]; - uint16_t index = (uint16_t)this_instr->operand; - _LOAD_ATTR_NOT_NULL - (void)index; - (void)owner; - stack_pointer[-1] = attr; - if (oparg & 1) stack_pointer[0] = null; - stack_pointer += (oparg & 1); - break; - } - - case _CHECK_ATTR_CLASS: { - break; - } - - case _LOAD_ATTR_CLASS: { - _Py_UOpsSymType *owner; - _Py_UOpsSymType *attr; - _Py_UOpsSymType *null = NULL; - owner = stack_pointer[-1]; - PyObject *descr = (PyObject *)this_instr->operand; - _LOAD_ATTR_NOT_NULL - (void)descr; - (void)owner; - stack_pointer[-1] = attr; - if (oparg & 1) stack_pointer[0] = null; - stack_pointer += (oparg & 1); - break; - } - - /* _LOAD_ATTR_PROPERTY is not a viable micro-op for tier 2 */ - - /* _LOAD_ATTR_GETATTRIBUTE_OVERRIDDEN is not a viable micro-op for tier 2 */ - - case _GUARD_DORV_VALUES: { - break; - } - - case _STORE_ATTR_INSTANCE_VALUE: { - stack_pointer += -2; - break; - } - - /* _STORE_ATTR_WITH_HINT is not a viable micro-op for tier 2 */ - - case _STORE_ATTR_SLOT: { - stack_pointer += -2; - break; - } - - case _COMPARE_OP: { - _Py_UOpsSymType *res; - res = sym_new_unknown(ctx); - if (res == NULL) goto out_of_space; - stack_pointer[-2] = res; - stack_pointer += -1; - break; - } - - case _COMPARE_OP_FLOAT: { - _Py_UOpsSymType *res; - res = sym_new_unknown(ctx); - if (res == NULL) goto out_of_space; - stack_pointer[-2] = res; - stack_pointer += -1; - break; - } - - case _COMPARE_OP_INT: { - _Py_UOpsSymType *res; - res = sym_new_unknown(ctx); - if (res == NULL) goto out_of_space; - stack_pointer[-2] = res; - stack_pointer += -1; - break; - } - - case _COMPARE_OP_STR: { - _Py_UOpsSymType *res; - res = sym_new_unknown(ctx); - if (res == NULL) goto out_of_space; - stack_pointer[-2] = res; - stack_pointer += -1; - break; - } - - case _IS_OP: { - _Py_UOpsSymType *b; - b = sym_new_unknown(ctx); - if (b == NULL) goto out_of_space; - stack_pointer[-2] = b; - stack_pointer += -1; - break; - } - - case _CONTAINS_OP: { - _Py_UOpsSymType *b; - b = sym_new_unknown(ctx); - if (b == NULL) goto out_of_space; - stack_pointer[-2] = b; - stack_pointer += -1; - break; - } - - case _CHECK_EG_MATCH: { - _Py_UOpsSymType *rest; - _Py_UOpsSymType *match; - rest = sym_new_unknown(ctx); - if (rest == NULL) goto out_of_space; - match = sym_new_unknown(ctx); - if (match == NULL) goto out_of_space; - stack_pointer[-2] = rest; - stack_pointer[-1] = match; - break; - } - - case _CHECK_EXC_MATCH: { - _Py_UOpsSymType *b; - b = sym_new_unknown(ctx); - if (b == NULL) goto out_of_space; - stack_pointer[-1] = b; - break; - } - - /* _JUMP_BACKWARD is not a viable micro-op for tier 2 */ - - /* _POP_JUMP_IF_FALSE is not a viable micro-op for tier 2 */ - - /* _POP_JUMP_IF_TRUE is not a viable micro-op for tier 2 */ - - case _IS_NONE: { - _Py_UOpsSymType *b; - b = sym_new_unknown(ctx); - if (b == NULL) goto out_of_space; - stack_pointer[-1] = b; - break; - } - - case _GET_LEN: { - _Py_UOpsSymType *len_o; - len_o = sym_new_unknown(ctx); - if (len_o == NULL) goto out_of_space; - stack_pointer[0] = len_o; - stack_pointer += 1; - break; - } - - case _MATCH_CLASS: { - _Py_UOpsSymType *attrs; - attrs = sym_new_unknown(ctx); - if (attrs == NULL) goto out_of_space; - stack_pointer[-3] = attrs; - stack_pointer += -2; - break; - } - - case _MATCH_MAPPING: { - _Py_UOpsSymType *res; - res = sym_new_unknown(ctx); - if (res == NULL) goto out_of_space; - stack_pointer[0] = res; - stack_pointer += 1; - break; - } - - case _MATCH_SEQUENCE: { - _Py_UOpsSymType *res; - res = sym_new_unknown(ctx); - if (res == NULL) goto out_of_space; - stack_pointer[0] = res; - stack_pointer += 1; - break; - } - - case _MATCH_KEYS: { - _Py_UOpsSymType *values_or_none; - values_or_none = sym_new_unknown(ctx); - if (values_or_none == NULL) goto out_of_space; - stack_pointer[0] = values_or_none; - stack_pointer += 1; - break; - } - - case _GET_ITER: { - _Py_UOpsSymType *iter; - iter = sym_new_unknown(ctx); - if (iter == NULL) goto out_of_space; - stack_pointer[-1] = iter; - break; - } - - case _GET_YIELD_FROM_ITER: { - _Py_UOpsSymType *iter; - iter = sym_new_unknown(ctx); - if (iter == NULL) goto out_of_space; - stack_pointer[-1] = iter; - break; - } - - /* _FOR_ITER is not a viable micro-op for tier 2 */ - - case _FOR_ITER_TIER_TWO: { - _Py_UOpsSymType *next; - next = sym_new_unknown(ctx); - if (next == NULL) goto out_of_space; - stack_pointer[0] = next; - stack_pointer += 1; - break; - } - - /* _INSTRUMENTED_FOR_ITER is not a viable micro-op for tier 2 */ - - case _ITER_CHECK_LIST: { - break; - } - - /* _ITER_JUMP_LIST is not a viable micro-op for tier 2 */ - - case _GUARD_NOT_EXHAUSTED_LIST: { - break; - } - - case _ITER_NEXT_LIST: { - _Py_UOpsSymType *next; - next = sym_new_unknown(ctx); - if (next == NULL) goto out_of_space; - stack_pointer[0] = next; - stack_pointer += 1; - break; - } - - case _ITER_CHECK_TUPLE: { - break; - } - - /* _ITER_JUMP_TUPLE is not a viable micro-op for tier 2 */ - - case _GUARD_NOT_EXHAUSTED_TUPLE: { - break; - } - - case _ITER_NEXT_TUPLE: { - _Py_UOpsSymType *next; - next = sym_new_unknown(ctx); - if (next == NULL) goto out_of_space; - stack_pointer[0] = next; - stack_pointer += 1; - break; - } - - case _ITER_CHECK_RANGE: { - break; - } - - /* _ITER_JUMP_RANGE is not a viable micro-op for tier 2 */ - - case _GUARD_NOT_EXHAUSTED_RANGE: { - break; - } - - case _ITER_NEXT_RANGE: { - _Py_UOpsSymType *iter; - _Py_UOpsSymType *next; - iter = stack_pointer[-1]; - next = sym_new_known_type(ctx, &PyLong_Type); - if (next == NULL) { - goto out_of_space; - } - (void)iter; - stack_pointer[0] = next; - stack_pointer += 1; - break; - } - - /* _FOR_ITER_GEN is not a viable micro-op for tier 2 */ - - case _BEFORE_ASYNC_WITH: { - _Py_UOpsSymType *exit; - _Py_UOpsSymType *res; - exit = sym_new_unknown(ctx); - if (exit == NULL) goto out_of_space; - res = sym_new_unknown(ctx); - if (res == NULL) goto out_of_space; - stack_pointer[-1] = exit; - stack_pointer[0] = res; - stack_pointer += 1; - break; - } - - case _BEFORE_WITH: { - _Py_UOpsSymType *exit; - _Py_UOpsSymType *res; - exit = sym_new_unknown(ctx); - if (exit == NULL) goto out_of_space; - res = sym_new_unknown(ctx); - if (res == NULL) goto out_of_space; - stack_pointer[-1] = exit; - stack_pointer[0] = res; - stack_pointer += 1; - break; - } - - case _WITH_EXCEPT_START: { - _Py_UOpsSymType *res; - res = sym_new_unknown(ctx); - if (res == NULL) goto out_of_space; - stack_pointer[0] = res; - stack_pointer += 1; - break; - } - - case _PUSH_EXC_INFO: { - _Py_UOpsSymType *prev_exc; - _Py_UOpsSymType *new_exc; - prev_exc = sym_new_unknown(ctx); - if (prev_exc == NULL) goto out_of_space; - new_exc = sym_new_unknown(ctx); - if (new_exc == NULL) goto out_of_space; - stack_pointer[-1] = prev_exc; - stack_pointer[0] = new_exc; - stack_pointer += 1; - break; - } - - case _GUARD_DORV_VALUES_INST_ATTR_FROM_DICT: { - break; - } - - case _GUARD_KEYS_VERSION: { - break; - } - - case _LOAD_ATTR_METHOD_WITH_VALUES: { - _Py_UOpsSymType *attr; - _Py_UOpsSymType *self = NULL; - attr = sym_new_unknown(ctx); - if (attr == NULL) goto out_of_space; - self = sym_new_unknown(ctx); - if (self == NULL) goto out_of_space; - stack_pointer[-1] = attr; - if (1) stack_pointer[0] = self; - stack_pointer += ((1) ? 1 : 0); - break; - } - - case _LOAD_ATTR_METHOD_NO_DICT: { - _Py_UOpsSymType *attr; - _Py_UOpsSymType *self = NULL; - attr = sym_new_unknown(ctx); - if (attr == NULL) goto out_of_space; - self = sym_new_unknown(ctx); - if (self == NULL) goto out_of_space; - stack_pointer[-1] = attr; - if (1) stack_pointer[0] = self; - stack_pointer += ((1) ? 1 : 0); - break; - } - - case _LOAD_ATTR_NONDESCRIPTOR_WITH_VALUES: { - _Py_UOpsSymType *attr; - attr = sym_new_unknown(ctx); - if (attr == NULL) goto out_of_space; - stack_pointer[-1] = attr; - stack_pointer += ((0) ? 1 : 0); - break; - } - - case _LOAD_ATTR_NONDESCRIPTOR_NO_DICT: { - _Py_UOpsSymType *attr; - attr = sym_new_unknown(ctx); - if (attr == NULL) goto out_of_space; - stack_pointer[-1] = attr; - stack_pointer += ((0) ? 1 : 0); - break; - } - - case _CHECK_ATTR_METHOD_LAZY_DICT: { - break; - } - - case _LOAD_ATTR_METHOD_LAZY_DICT: { - _Py_UOpsSymType *attr; - _Py_UOpsSymType *self = NULL; - attr = sym_new_unknown(ctx); - if (attr == NULL) goto out_of_space; - self = sym_new_unknown(ctx); - if (self == NULL) goto out_of_space; - stack_pointer[-1] = attr; - if (1) stack_pointer[0] = self; - stack_pointer += ((1) ? 1 : 0); - break; - } - - /* _INSTRUMENTED_CALL is not a viable micro-op for tier 2 */ - - /* _CALL is not a viable micro-op for tier 2 */ - - case _CHECK_CALL_BOUND_METHOD_EXACT_ARGS: { - _Py_UOpsSymType *null; - _Py_UOpsSymType *callable; - null = stack_pointer[-1 - oparg]; - callable = stack_pointer[-2 - oparg]; - sym_set_null(null); - sym_set_type(callable, &PyMethod_Type); - break; - } - - case _INIT_CALL_BOUND_METHOD_EXACT_ARGS: { - _Py_UOpsSymType *func; - _Py_UOpsSymType *self; - func = sym_new_unknown(ctx); - if (func == NULL) goto out_of_space; - self = sym_new_unknown(ctx); - if (self == NULL) goto out_of_space; - stack_pointer[-2 - oparg] = func; - stack_pointer[-1 - oparg] = self; - break; - } - - case _CHECK_PEP_523: { - break; - } - - case _CHECK_FUNCTION_EXACT_ARGS: { - _Py_UOpsSymType *self_or_null; - _Py_UOpsSymType *callable; - self_or_null = stack_pointer[-1 - oparg]; - callable = stack_pointer[-2 - oparg]; - uint32_t func_version = (uint32_t)this_instr->operand; - sym_set_type(callable, &PyFunction_Type); - (void)self_or_null; - (void)func_version; - break; - } - - case _CHECK_STACK_SPACE: { - break; - } - - case _INIT_CALL_PY_EXACT_ARGS: { - _Py_UOpsSymType **args; - _Py_UOpsSymType *self_or_null; - _Py_UOpsSymType *callable; - _Py_UOpsAbstractFrame *new_frame; - args = &stack_pointer[-oparg]; - self_or_null = stack_pointer[-1 - oparg]; - callable = stack_pointer[-2 - oparg]; - int argcount = oparg; - (void)callable; - PyFunctionObject *func = (PyFunctionObject *)(this_instr + 2)->operand; - if (func == NULL) { - goto error; - } - PyCodeObject *co = (PyCodeObject *)func->func_code; - assert(self_or_null != NULL); - assert(args != NULL); - if (sym_is_not_null(self_or_null)) { - // Bound method fiddling, same as _INIT_CALL_PY_EXACT_ARGS in VM - args--; - argcount++; - } - _Py_UOpsSymType **localsplus_start = ctx->n_consumed; - int n_locals_already_filled = 0; - // Can determine statically, so we interleave the new locals - // and make the current stack the new locals. - // This also sets up for true call inlining. - if (sym_is_known(self_or_null)) { - localsplus_start = args; - n_locals_already_filled = argcount; - } - new_frame = ctx_frame_new(ctx, co, localsplus_start, n_locals_already_filled, 0); - if (new_frame == NULL){ - goto out_of_space; - } - stack_pointer[-2 - oparg] = (_Py_UOpsSymType *)new_frame; - stack_pointer += -1 - oparg; - break; - } - - case _PUSH_FRAME: { - _Py_UOpsAbstractFrame *new_frame; - new_frame = (_Py_UOpsAbstractFrame *)stack_pointer[-1]; - stack_pointer += -1; - ctx->frame->stack_pointer = stack_pointer; - ctx->frame = new_frame; - ctx->curr_frame_depth++; - stack_pointer = new_frame->stack_pointer; - stack_pointer += ((0) ? 1 : 0); - break; - } - - /* _CALL_PY_WITH_DEFAULTS is not a viable micro-op for tier 2 */ - - case _CALL_TYPE_1: { - _Py_UOpsSymType *res; - res = sym_new_unknown(ctx); - if (res == NULL) goto out_of_space; - stack_pointer[-2 - oparg] = res; - stack_pointer += -1 - oparg; - break; - } - - case _CALL_STR_1: { - _Py_UOpsSymType *res; - res = sym_new_unknown(ctx); - if (res == NULL) goto out_of_space; - stack_pointer[-2 - oparg] = res; - stack_pointer += -1 - oparg; - break; - } - - case _CALL_TUPLE_1: { - _Py_UOpsSymType *res; - res = sym_new_unknown(ctx); - if (res == NULL) goto out_of_space; - stack_pointer[-2 - oparg] = res; - stack_pointer += -1 - oparg; - break; - } - - /* _CALL_ALLOC_AND_ENTER_INIT is not a viable micro-op for tier 2 */ - - case _EXIT_INIT_CHECK: { - stack_pointer += -1; - break; - } - - case _CALL_BUILTIN_CLASS: { - _Py_UOpsSymType *res; - res = sym_new_unknown(ctx); - if (res == NULL) goto out_of_space; - stack_pointer[-2 - oparg] = res; - stack_pointer += -1 - oparg; - break; - } - - case _CALL_BUILTIN_O: { - _Py_UOpsSymType *res; - res = sym_new_unknown(ctx); - if (res == NULL) goto out_of_space; - stack_pointer[-2 - oparg] = res; - stack_pointer += -1 - oparg; - break; - } - - case _CALL_BUILTIN_FAST: { - _Py_UOpsSymType *res; - res = sym_new_unknown(ctx); - if (res == NULL) goto out_of_space; - stack_pointer[-2 - oparg] = res; - stack_pointer += -1 - oparg; - break; - } - - case _CALL_BUILTIN_FAST_WITH_KEYWORDS: { - _Py_UOpsSymType *res; - res = sym_new_unknown(ctx); - if (res == NULL) goto out_of_space; - stack_pointer[-2 - oparg] = res; - stack_pointer += -1 - oparg; - break; - } - - case _CALL_LEN: { - _Py_UOpsSymType *res; - res = sym_new_unknown(ctx); - if (res == NULL) goto out_of_space; - stack_pointer[-2 - oparg] = res; - stack_pointer += -1 - oparg; - break; - } - - case _CALL_ISINSTANCE: { - _Py_UOpsSymType *res; - res = sym_new_unknown(ctx); - if (res == NULL) goto out_of_space; - stack_pointer[-2 - oparg] = res; - stack_pointer += -1 - oparg; - break; - } - - case _CALL_METHOD_DESCRIPTOR_O: { - _Py_UOpsSymType *res; - res = sym_new_unknown(ctx); - if (res == NULL) goto out_of_space; - stack_pointer[-2 - oparg] = res; - stack_pointer += -1 - oparg; - break; - } - - case _CALL_METHOD_DESCRIPTOR_FAST_WITH_KEYWORDS: { - _Py_UOpsSymType *res; - res = sym_new_unknown(ctx); - if (res == NULL) goto out_of_space; - stack_pointer[-2 - oparg] = res; - stack_pointer += -1 - oparg; - break; - } - - case _CALL_METHOD_DESCRIPTOR_NOARGS: { - _Py_UOpsSymType *res; - res = sym_new_unknown(ctx); - if (res == NULL) goto out_of_space; - stack_pointer[-2 - oparg] = res; - stack_pointer += -1 - oparg; - break; - } - - case _CALL_METHOD_DESCRIPTOR_FAST: { - _Py_UOpsSymType *res; - res = sym_new_unknown(ctx); - if (res == NULL) goto out_of_space; - stack_pointer[-2 - oparg] = res; - stack_pointer += -1 - oparg; - break; - } - - /* _INSTRUMENTED_CALL_KW is not a viable micro-op for tier 2 */ - - /* _CALL_KW is not a viable micro-op for tier 2 */ - - /* _INSTRUMENTED_CALL_FUNCTION_EX is not a viable micro-op for tier 2 */ - - /* _CALL_FUNCTION_EX is not a viable micro-op for tier 2 */ - - case _MAKE_FUNCTION: { - _Py_UOpsSymType *func; - func = sym_new_unknown(ctx); - if (func == NULL) goto out_of_space; - stack_pointer[-1] = func; - break; - } - - case _SET_FUNCTION_ATTRIBUTE: { - _Py_UOpsSymType *func; - func = sym_new_unknown(ctx); - if (func == NULL) goto out_of_space; - stack_pointer[-2] = func; - stack_pointer += -1; - break; - } - - case _BUILD_SLICE: { - _Py_UOpsSymType *slice; - slice = sym_new_unknown(ctx); - if (slice == NULL) goto out_of_space; - stack_pointer[-2 - ((oparg == 3) ? 1 : 0)] = slice; - stack_pointer += -1 - ((oparg == 3) ? 1 : 0); - break; - } - - case _CONVERT_VALUE: { - _Py_UOpsSymType *result; - result = sym_new_unknown(ctx); - if (result == NULL) goto out_of_space; - stack_pointer[-1] = result; - break; - } - - case _FORMAT_SIMPLE: { - _Py_UOpsSymType *res; - res = sym_new_unknown(ctx); - if (res == NULL) goto out_of_space; - stack_pointer[-1] = res; - break; - } - - case _FORMAT_WITH_SPEC: { - _Py_UOpsSymType *res; - res = sym_new_unknown(ctx); - if (res == NULL) goto out_of_space; - stack_pointer[-2] = res; - stack_pointer += -1; - break; - } - - case _COPY: { - _Py_UOpsSymType *bottom; - _Py_UOpsSymType *top; - bottom = stack_pointer[-1 - (oparg-1)]; - assert(oparg > 0); - top = bottom; - stack_pointer[0] = top; - stack_pointer += 1; - break; - } - - case _BINARY_OP: { - _Py_UOpsSymType *res; - res = sym_new_unknown(ctx); - if (res == NULL) goto out_of_space; - stack_pointer[-2] = res; - stack_pointer += -1; - break; - } - - case _SWAP: { - _Py_UOpsSymType *top; - _Py_UOpsSymType *bottom; - top = stack_pointer[-1]; - bottom = stack_pointer[-2 - (oparg-2)]; - stack_pointer[-2 - (oparg-2)] = top; - stack_pointer[-1] = bottom; - break; - } - - /* _INSTRUMENTED_INSTRUCTION is not a viable micro-op for tier 2 */ - - /* _INSTRUMENTED_JUMP_FORWARD is not a viable micro-op for tier 2 */ - - /* _INSTRUMENTED_JUMP_BACKWARD is not a viable micro-op for tier 2 */ - - /* _INSTRUMENTED_POP_JUMP_IF_TRUE is not a viable micro-op for tier 2 */ - - /* _INSTRUMENTED_POP_JUMP_IF_FALSE is not a viable micro-op for tier 2 */ - - /* _INSTRUMENTED_POP_JUMP_IF_NONE is not a viable micro-op for tier 2 */ - - /* _INSTRUMENTED_POP_JUMP_IF_NOT_NONE is not a viable micro-op for tier 2 */ - - case _GUARD_IS_TRUE_POP: { - stack_pointer += -1; - break; - } - - case _GUARD_IS_FALSE_POP: { - stack_pointer += -1; - break; - } - - case _GUARD_IS_NONE_POP: { - stack_pointer += -1; - break; - } - - case _GUARD_IS_NOT_NONE_POP: { - stack_pointer += -1; - break; - } - - case _JUMP_TO_TOP: { - break; - } - - case _SET_IP: { - break; - } - - case _SAVE_RETURN_OFFSET: { - break; - } - - case _EXIT_TRACE: { - break; - } - - case _CHECK_VALIDITY: { - break; - } - - case _LOAD_CONST_INLINE: { - _Py_UOpsSymType *value; - PyObject *ptr = (PyObject *)this_instr->operand; - value = sym_new_const(ctx, ptr); - if (value == NULL) { - goto out_of_space; - } - stack_pointer[0] = value; - stack_pointer += 1; - break; - } - - case _LOAD_CONST_INLINE_BORROW: { - _Py_UOpsSymType *value; - PyObject *ptr = (PyObject *)this_instr->operand; - value = sym_new_const(ctx, ptr); - if (value == NULL) { - goto out_of_space; - } - stack_pointer[0] = value; - stack_pointer += 1; - break; - } - - case _LOAD_CONST_INLINE_WITH_NULL: { - _Py_UOpsSymType *value; - _Py_UOpsSymType *null; - PyObject *ptr = (PyObject *)this_instr->operand; - value = sym_new_const(ctx, ptr); - if (value == NULL) { - goto out_of_space; - } - null = sym_new_null(ctx); - if (null == NULL) { - goto out_of_space; - } - stack_pointer[0] = value; - stack_pointer[1] = null; - stack_pointer += 2; - break; - } - - case _LOAD_CONST_INLINE_BORROW_WITH_NULL: { - _Py_UOpsSymType *value; - _Py_UOpsSymType *null; - PyObject *ptr = (PyObject *)this_instr->operand; - value = sym_new_const(ctx, ptr); - if (value == NULL) { - goto out_of_space; - } - null = sym_new_null(ctx); - if (null == NULL) { - goto out_of_space; - } - stack_pointer[0] = value; - stack_pointer[1] = null; - stack_pointer += 2; - break; - } - - case _CHECK_GLOBALS: { - break; - } - - case _CHECK_BUILTINS: { - break; - } - - case _INTERNAL_INCREMENT_OPT_COUNTER: { - stack_pointer += -1; - break; - } - - case _CHECK_VALIDITY_AND_SET_IP: { - break; - } - diff --git a/Tools/c-analyzer/cpython/_parser.py b/Tools/c-analyzer/cpython/_parser.py index be89a26058e8e8..444063d2148934 100644 --- a/Tools/c-analyzer/cpython/_parser.py +++ b/Tools/c-analyzer/cpython/_parser.py @@ -83,11 +83,9 @@ def clean_lines(text): Python/frozen_modules/*.h Python/generated_cases.c.h Python/executor_cases.c.h -Python/tier2_redundancy_eliminator_cases.c.h # not actually source Python/bytecodes.c -Python/tier2_redundancy_eliminator_bytecodes.c # mimalloc Objects/mimalloc/*.c diff --git a/Tools/c-analyzer/cpython/ignored.tsv b/Tools/c-analyzer/cpython/ignored.tsv index 14bcd85b9eae59..c75aff8c1723c1 100644 --- a/Tools/c-analyzer/cpython/ignored.tsv +++ b/Tools/c-analyzer/cpython/ignored.tsv @@ -734,6 +734,6 @@ Modules/expat/xmlrole.c - error - ## other Modules/_io/_iomodule.c - _PyIO_Module - Modules/_sqlite/module.c - _sqlite3module - -Python/optimizer_analysis.c - _Py_UOpsAbstractFrame_Type - +Python/optimizer_analysis.c - _Py_PartitionRootNode_Type - Python/optimizer_analysis.c - _Py_UOpsAbstractInterpContext_Type - Modules/clinic/md5module.c.h _md5_md5 _keywords - diff --git a/Tools/cases_generator/README.md b/Tools/cases_generator/README.md index d35a868b42ea9e..7fec8a882336cd 100644 --- a/Tools/cases_generator/README.md +++ b/Tools/cases_generator/README.md @@ -13,9 +13,6 @@ What's currently here: - `parser.py` helper for interactions with `parsing.py` - `tierN_generator.py`: a couple of driver scripts to read `Python/bytecodes.c` and write `Python/generated_cases.c.h` (and several other files) -- `tier2_abstract_generator.py`: reads `Python/bytecodes.c` and - `Python/tier2_redundancy_eliminator_bytecodes.c` and writes - `Python/tier2_redundancy_eliminator_cases.c.h` - `stack.py`: code to handle generalized stack effects - `cwriter.py`: code which understands tokens and how to format C code; main class: `CWriter` diff --git a/Tools/cases_generator/analyzer.py b/Tools/cases_generator/analyzer.py index 3497b7fcdf35d3..b80fa66e2a159a 100644 --- a/Tools/cases_generator/analyzer.py +++ b/Tools/cases_generator/analyzer.py @@ -24,6 +24,7 @@ class Properties: pure: bool passthrough: bool + guard: bool def dump(self, indent: str) -> None: print(indent, end="") @@ -50,6 +51,7 @@ def from_list(properties: list["Properties"]) -> "Properties": has_free=any(p.has_free for p in properties), pure=all(p.pure for p in properties), passthrough=all(p.passthrough for p in properties), + guard=all(p.guard for p in properties), ) @@ -71,6 +73,7 @@ def from_list(properties: list["Properties"]) -> "Properties": has_free=False, pure=False, passthrough=False, + guard=False, ) @@ -270,7 +273,7 @@ def override_error( def convert_stack_item(item: parser.StackEffect) -> StackItem: return StackItem( - item.name, item.type, item.cond, (item.size or "1") + item.name, item.type, item.cond, (item.size or "1"), type_prop=item.type_prop ) @@ -470,6 +473,7 @@ def compute_properties(op: parser.InstDef) -> Properties: has_free=has_free, pure="pure" in op.annotations, passthrough=passthrough, + guard=passthrough and deopts, ) diff --git a/Tools/cases_generator/interpreter_definition.md b/Tools/cases_generator/interpreter_definition.md index 9b5733562f77b4..e87aff43762b11 100644 --- a/Tools/cases_generator/interpreter_definition.md +++ b/Tools/cases_generator/interpreter_definition.md @@ -109,7 +109,10 @@ and a piece of C code describing its semantics:: NAME [":" type] [ "if" "(" C-expression ")" ] type: - NAME ["*"] + NAME ["*"] | type_prop + + type_prop: + "&" "(" NAME ["+" NAME] ")" stream: NAME "/" size @@ -139,7 +142,26 @@ The following definitions may occur: The optional `type` in an `object` is the C type. It defaults to `PyObject *`. The objects before the "--" are the objects on top of the stack at the start of the instruction. Those after the "--" are the objects on top of the stack at the -end of the instruction. +end of the instruction. When prefixed by a `&`, the `type` production rule follows the +`type_prop` production rule. This indicates the type of the value is of that specific type +after the operation. In this case, the type may also contain 64-bit refinement information +that is fetched from a previously defined operand in the instruction header, such as +a type version tag. This follows the format `type + refinement`. The list of possible types +and their refinements are below. They obey the following predicates: + + +* `PYLONG_TYPE`: `Py_TYPE(val) == &PyLong_Type` +* `PYFLOAT_TYPE`: `Py_TYPE(val) == &PyFloat_Type` +* `PYUNICODE_TYPE`: `Py_TYPE(val) == &PYUNICODE_TYPE` +* `NULL_TYPE`: `val == NULL` +* `GUARD_TYPE_VERSION_TYPE`: `type->tp_version_tag == auxillary` +* `GUARD_DORV_VALUES_TYPE`: `_PyDictOrValues_IsValues(obj)` +* `GUARD_DORV_VALUES_INST_ATTR_FROM_DICT_TYPE`: + `_PyDictOrValues_IsValues(obj) || _PyObject_MakeInstanceAttributesFromDict(obj, dorv)` +* `GUARD_KEYS_VERSION_TYPE`: `owner_heap_type->ht_cached_keys->dk_version == auxillary` +* `PYMETHOD_TYPE`: `Py_TYPE(val) == &PyMethod_Type` +* `PYFUNCTION_TYPE_VERSION_TYPE`: + `PyFunction_Check(callable) && func->func_version == auxillary && code->co_argcount == oparg + (self_or_null != NULL)` An `inst` without `stack_effect` is a transitional form to allow the original C code diff --git a/Tools/cases_generator/parsing.py b/Tools/cases_generator/parsing.py index a8961f28babea1..307919cb37ce1e 100644 --- a/Tools/cases_generator/parsing.py +++ b/Tools/cases_generator/parsing.py @@ -75,6 +75,11 @@ class StackEffect(Node): size: str = "" # Optional `[size]` # Note: size cannot be combined with type or cond + # Optional `(type, refinement)` + type_prop: None | tuple[str, None | str] = field( + default_factory=lambda: None, init=True, compare=False, hash=False + ) + def __repr__(self) -> str: items = [self.name, self.type, self.cond, self.size] while items and items[-1] == "": @@ -255,14 +260,25 @@ def cache_effect(self) -> CacheEffect | None: @contextual def stack_effect(self) -> StackEffect | None: - # IDENTIFIER [':' IDENTIFIER [TIMES]] ['if' '(' expression ')'] + # IDENTIFIER [':' [IDENTIFIER [TIMES]] ['&' '(' IDENTIFIER ['+' IDENTIFIER] ')']] ['if' '(' expression ')'] # | IDENTIFIER '[' expression ']' if tkn := self.expect(lx.IDENTIFIER): type_text = "" + type_prop = None if self.expect(lx.COLON): - type_text = self.require(lx.IDENTIFIER).text.strip() - if self.expect(lx.TIMES): - type_text += " *" + if i := self.expect(lx.IDENTIFIER): + type_text = i.text.strip() + if self.expect(lx.TIMES): + type_text += " *" + if self.expect(lx.AND): + consumed_bracket = self.expect(lx.LPAREN) is not None + type_prop_text = self.require(lx.IDENTIFIER).text.strip() + refinement = None + if self.expect(lx.PLUS): + refinement = self.require(lx.IDENTIFIER).text.strip() + type_prop = (type_prop_text, refinement) + if consumed_bracket: + self.require(lx.RPAREN) cond_text = "" if self.expect(lx.IF): self.require(lx.LPAREN) @@ -279,7 +295,7 @@ def stack_effect(self) -> StackEffect | None: self.require(lx.RBRACKET) type_text = "PyObject **" size_text = size.text.strip() - return StackEffect(tkn.text, type_text, cond_text, size_text) + return StackEffect(tkn.text, type_text, cond_text, size_text, type_prop) return None @contextual diff --git a/Tools/cases_generator/stack.py b/Tools/cases_generator/stack.py index 97a301142d59c7..f62ece43c1be7f 100644 --- a/Tools/cases_generator/stack.py +++ b/Tools/cases_generator/stack.py @@ -168,11 +168,11 @@ def push(self, var: StackItem) -> str: self.top_offset.push(var) return "" - def flush(self, out: CWriter, cast_type: str = "PyObject *") -> None: + def flush(self, out: CWriter) -> None: out.start_line() for var in self.variables: if not var.peek: - cast = f"({cast_type})" if var.type else "" + cast = "(PyObject *)" if var.type else "" if var.name not in UNUSED and not var.is_array(): if var.condition: out.emit(f"if ({var.condition}) ") diff --git a/Tools/cases_generator/tier2_abstract_generator.py b/Tools/cases_generator/tier2_abstract_generator.py deleted file mode 100644 index cc29b1660d26ed..00000000000000 --- a/Tools/cases_generator/tier2_abstract_generator.py +++ /dev/null @@ -1,235 +0,0 @@ -"""Generate the cases for the tier 2 redundancy eliminator/abstract interpreter. -Reads the instruction definitions from bytecodes.c. and tier2_redundancy_eliminator.bytecodes.c -Writes the cases to tier2_redundancy_eliminator_cases.c.h, which is #included in Python/optimizer_analysis.c. -""" - -import argparse -import os.path -import sys - -from analyzer import ( - Analysis, - Instruction, - Uop, - Part, - analyze_files, - Skip, - StackItem, - analysis_error, -) -from generators_common import ( - DEFAULT_INPUT, - ROOT, - write_header, - emit_tokens, - emit_to, - replace_sync_sp, -) -from cwriter import CWriter -from typing import TextIO, Iterator -from lexer import Token -from stack import StackOffset, Stack, SizeMismatch, UNUSED - -DEFAULT_OUTPUT = ROOT / "Python/tier2_redundancy_eliminator_cases.c.h" -DEFAULT_ABSTRACT_INPUT = ROOT / "Python/tier2_redundancy_eliminator_bytecodes.c" - - -def validate_uop(override: Uop, uop: Uop) -> None: - # To do - pass - - -def type_name(var: StackItem) -> str: - if var.is_array(): - return f"_Py_UOpsSymType **" - if var.type: - return var.type - return f"_Py_UOpsSymType *" - - -def declare_variables(uop: Uop, out: CWriter, skip_inputs: bool) -> None: - variables = {"unused"} - if not skip_inputs: - for var in reversed(uop.stack.inputs): - if var.name not in variables: - variables.add(var.name) - if var.condition: - out.emit(f"{type_name(var)}{var.name} = NULL;\n") - else: - out.emit(f"{type_name(var)}{var.name};\n") - for var in uop.stack.outputs: - if var.peek: - continue - if var.name not in variables: - variables.add(var.name) - if var.condition: - out.emit(f"{type_name(var)}{var.name} = NULL;\n") - else: - out.emit(f"{type_name(var)}{var.name};\n") - - -def decref_inputs( - out: CWriter, - tkn: Token, - tkn_iter: Iterator[Token], - uop: Uop, - stack: Stack, - inst: Instruction | None, -) -> None: - next(tkn_iter) - next(tkn_iter) - next(tkn_iter) - out.emit_at("", tkn) - - -def emit_default(out: CWriter, uop: Uop) -> None: - for i, var in enumerate(uop.stack.outputs): - if var.name != "unused" and not var.peek: - if var.is_array(): - out.emit(f"for (int _i = {var.size}; --_i >= 0;) {{\n") - out.emit(f"{var.name}[_i] = sym_new_unknown(ctx);\n") - out.emit(f"if ({var.name}[_i] == NULL) goto out_of_space;\n") - out.emit("}\n") - elif var.name == "null": - out.emit(f"{var.name} = sym_new_null(ctx);\n") - out.emit(f"if ({var.name} == NULL) goto out_of_space;\n") - else: - out.emit(f"{var.name} = sym_new_unknown(ctx);\n") - out.emit(f"if ({var.name} == NULL) goto out_of_space;\n") - - -def write_uop( - override: Uop | None, - uop: Uop, - out: CWriter, - stack: Stack, - debug: bool, - skip_inputs: bool, -) -> None: - try: - prototype = override if override else uop - is_override = override is not None - out.start_line() - for var in reversed(prototype.stack.inputs): - res = stack.pop(var) - if not skip_inputs: - out.emit(res) - if not prototype.properties.stores_sp: - for i, var in enumerate(prototype.stack.outputs): - res = stack.push(var) - if not var.peek or is_override: - out.emit(res) - if debug: - args = [] - for var in prototype.stack.inputs: - if not var.peek or is_override: - args.append(var.name) - out.emit(f'DEBUG_PRINTF({", ".join(args)});\n') - if override: - for cache in uop.caches: - if cache.name != "unused": - if cache.size == 4: - type = cast = "PyObject *" - else: - type = f"uint{cache.size*16}_t " - cast = f"uint{cache.size*16}_t" - out.emit(f"{type}{cache.name} = ({cast})this_instr->operand;\n") - if override: - replacement_funcs = { - "DECREF_INPUTS": decref_inputs, - "SYNC_SP": replace_sync_sp, - } - emit_tokens(out, override, stack, None, replacement_funcs) - else: - emit_default(out, uop) - - if prototype.properties.stores_sp: - for i, var in enumerate(prototype.stack.outputs): - if not var.peek or is_override: - out.emit(stack.push(var)) - out.start_line() - stack.flush(out, cast_type="_Py_UOpsSymType *") - except SizeMismatch as ex: - raise analysis_error(ex.args[0], uop.body[0]) - - -SKIPS = ("_EXTENDED_ARG",) - - -def generate_abstract_interpreter( - filenames: list[str], - abstract: Analysis, - base: Analysis, - outfile: TextIO, - debug: bool, -) -> None: - write_header(__file__, filenames, outfile) - out = CWriter(outfile, 2, False) - out.emit("\n") - base_uop_names = set([uop.name for uop in base.uops.values()]) - for abstract_uop_name in abstract.uops: - assert abstract_uop_name in base_uop_names,\ - f"All abstract uops should override base uops, but {abstract_uop_name} is not." - - for uop in base.uops.values(): - override: Uop | None = None - if uop.name in abstract.uops: - override = abstract.uops[uop.name] - validate_uop(override, uop) - if uop.properties.tier_one_only: - continue - if uop.is_super(): - continue - if not uop.is_viable(): - out.emit(f"/* {uop.name} is not a viable micro-op for tier 2 */\n\n") - continue - out.emit(f"case {uop.name}: {{\n") - if override: - declare_variables(override, out, skip_inputs=False) - else: - declare_variables(uop, out, skip_inputs=True) - stack = Stack() - write_uop(override, uop, out, stack, debug, skip_inputs=(override is None)) - out.start_line() - out.emit("break;\n") - out.emit("}") - out.emit("\n\n") - - -def generate_tier2_abstract_from_files( - filenames: list[str], outfilename: str, debug: bool=False -) -> None: - assert len(filenames) == 2, "Need a base file and an abstract cases file." - base = analyze_files([filenames[0]]) - abstract = analyze_files([filenames[1]]) - with open(outfilename, "w") as outfile: - generate_abstract_interpreter(filenames, abstract, base, outfile, debug) - - -arg_parser = argparse.ArgumentParser( - description="Generate the code for the tier 2 interpreter.", - formatter_class=argparse.ArgumentDefaultsHelpFormatter, -) - -arg_parser.add_argument( - "-o", "--output", type=str, help="Generated code", default=DEFAULT_OUTPUT -) - - -arg_parser.add_argument("input", nargs=1, help="Abstract interpreter definition file") - -arg_parser.add_argument( - "base", nargs=argparse.REMAINDER, help="The base instruction definition file(s)" -) - -arg_parser.add_argument("-d", "--debug", help="Insert debug calls", action="store_true") - -if __name__ == "__main__": - args = arg_parser.parse_args() - if len(args.base) == 0: - args.input.append(DEFAULT_INPUT) - args.input.append(DEFAULT_ABSTRACT_INPUT) - abstract = analyze_files(args.input) - base = analyze_files(args.base) - with open(args.output, "w") as outfile: - generate_abstract_interpreter(args.input, abstract, base, outfile, args.debug) diff --git a/config.sub b/config.sub index 2c6a07ab3c34ea..d74fb6deac942a 100755 --- a/config.sub +++ b/config.sub @@ -1,14 +1,14 @@ #! /bin/sh # Configuration validation subroutine script. -# Copyright 1992-2024 Free Software Foundation, Inc. +# Copyright 1992-2021 Free Software Foundation, Inc. # shellcheck disable=SC2006,SC2268 # see below for rationale -timestamp='2024-01-01' +timestamp='2021-08-14' # This file is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or +# the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, but @@ -76,13 +76,13 @@ Report bugs and patches to ." version="\ GNU config.sub ($timestamp) -Copyright 1992-2024 Free Software Foundation, Inc. +Copyright 1992-2021 Free Software Foundation, Inc. This is free software; see the source for copying conditions. There is NO warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE." help=" -Try '$me --help' for more information." +Try \`$me --help' for more information." # Parse command line while test $# -gt 0 ; do @@ -130,7 +130,7 @@ IFS=$saved_IFS # Separate into logical components for further validation case $1 in *-*-*-*-*) - echo "Invalid configuration '$1': more than four components" >&2 + echo Invalid configuration \`"$1"\': more than four components >&2 exit 1 ;; *-*-*-*) @@ -145,8 +145,7 @@ case $1 in nto-qnx* | linux-* | uclinux-uclibc* \ | uclinux-gnu* | kfreebsd*-gnu* | knetbsd*-gnu* | netbsd*-gnu* \ | netbsd*-eabi* | kopensolaris*-gnu* | cloudabi*-eabi* \ - | storm-chaos* | os2-emx* | rtmk-nova* | managarm-* \ - | windows-* ) + | storm-chaos* | os2-emx* | rtmk-nova*) basic_machine=$field1 basic_os=$maybe_os ;; @@ -944,7 +943,7 @@ $basic_machine EOF IFS=$saved_IFS ;; - # We use 'pc' rather than 'unknown' + # We use `pc' rather than `unknown' # because (1) that's what they normally are, and # (2) the word "unknown" tends to confuse beginning users. i*86 | x86_64) @@ -1021,11 +1020,6 @@ case $cpu-$vendor in ;; # Here we normalize CPU types with a missing or matching vendor - armh-unknown | armh-alt) - cpu=armv7l - vendor=alt - basic_os=${basic_os:-linux-gnueabihf} - ;; dpx20-unknown | dpx20-bull) cpu=rs6000 vendor=bull @@ -1076,7 +1070,7 @@ case $cpu-$vendor in pentium-* | p5-* | k5-* | k6-* | nexgen-* | viac3-*) cpu=i586 ;; - pentiumpro-* | p6-* | 6x86-* | athlon-* | athlon_*-*) + pentiumpro-* | p6-* | 6x86-* | athlon-* | athalon_*-*) cpu=i686 ;; pentiumii-* | pentium2-* | pentiumiii-* | pentium3-*) @@ -1127,7 +1121,7 @@ case $cpu-$vendor in xscale-* | xscalee[bl]-*) cpu=`echo "$cpu" | sed 's/^xscale/arm/'` ;; - arm64-* | aarch64le-*) + arm64-*) cpu=aarch64 ;; @@ -1181,7 +1175,7 @@ case $cpu-$vendor in case $cpu in 1750a | 580 \ | a29k \ - | aarch64 | aarch64_be | aarch64c | arm64ec \ + | aarch64 | aarch64_be \ | abacus \ | alpha | alphaev[4-8] | alphaev56 | alphaev6[78] \ | alpha64 | alpha64ev[4-8] | alpha64ev56 | alpha64ev6[78] \ @@ -1200,29 +1194,50 @@ case $cpu-$vendor in | d10v | d30v | dlx | dsp16xx \ | e2k | elxsi | epiphany \ | f30[01] | f700 | fido | fr30 | frv | ft32 | fx80 \ - | javascript \ | h8300 | h8500 \ | hppa | hppa1.[01] | hppa2.0 | hppa2.0[nw] | hppa64 \ | hexagon \ | i370 | i*86 | i860 | i960 | ia16 | ia64 \ | ip2k | iq2000 \ | k1om \ - | kvx \ | le32 | le64 \ | lm32 \ - | loongarch32 | loongarch64 \ + | loongarch32 | loongarch64 | loongarchx32 \ | m32c | m32r | m32rle \ | m5200 | m68000 | m680[012346]0 | m68360 | m683?2 | m68k \ | m6811 | m68hc11 | m6812 | m68hc12 | m68hcs12x \ | m88110 | m88k | maxq | mb | mcore | mep | metag \ | microblaze | microblazeel \ - | mips* \ + | mips | mipsbe | mipseb | mipsel | mipsle \ + | mips16 \ + | mips64 | mips64eb | mips64el \ + | mips64octeon | mips64octeonel \ + | mips64orion | mips64orionel \ + | mips64r5900 | mips64r5900el \ + | mips64vr | mips64vrel \ + | mips64vr4100 | mips64vr4100el \ + | mips64vr4300 | mips64vr4300el \ + | mips64vr5000 | mips64vr5000el \ + | mips64vr5900 | mips64vr5900el \ + | mipsisa32 | mipsisa32el \ + | mipsisa32r2 | mipsisa32r2el \ + | mipsisa32r3 | mipsisa32r3el \ + | mipsisa32r5 | mipsisa32r5el \ + | mipsisa32r6 | mipsisa32r6el \ + | mipsisa64 | mipsisa64el \ + | mipsisa64r2 | mipsisa64r2el \ + | mipsisa64r3 | mipsisa64r3el \ + | mipsisa64r5 | mipsisa64r5el \ + | mipsisa64r6 | mipsisa64r6el \ + | mipsisa64sb1 | mipsisa64sb1el \ + | mipsisa64sr71k | mipsisa64sr71kel \ + | mipsr5900 | mipsr5900el \ + | mipstx39 | mipstx39el \ | mmix \ | mn10200 | mn10300 \ | moxie \ | mt \ | msp430 \ - | nanomips* \ | nds32 | nds32le | nds32be \ | nfp \ | nios | nios2 | nios2eb | nios2el \ @@ -1254,7 +1269,6 @@ case $cpu-$vendor in | ubicom32 \ | v70 | v850 | v850e | v850e1 | v850es | v850e2 | v850e2v3 \ | vax \ - | vc4 \ | visium \ | w65 \ | wasm32 | wasm64 \ @@ -1266,7 +1280,7 @@ case $cpu-$vendor in ;; *) - echo "Invalid configuration '$1': machine '$cpu-$vendor' not recognized" 1>&2 + echo Invalid configuration \`"$1"\': machine \`"$cpu-$vendor"\' not recognized 1>&2 exit 1 ;; esac @@ -1287,12 +1301,11 @@ esac # Decode manufacturer-specific aliases for certain operating systems. -if test x"$basic_os" != x +if test x$basic_os != x then -# First recognize some ad-hoc cases, or perhaps split kernel-os, or else just +# First recognize some ad-hoc caes, or perhaps split kernel-os, or else just # set os. -obj= case $basic_os in gnu/linux*) kernel=linux @@ -1323,10 +1336,6 @@ EOF kernel=linux os=`echo "$basic_os" | sed -e 's|linux|gnu|'` ;; - managarm*) - kernel=managarm - os=`echo "$basic_os" | sed -e 's|managarm|mlibc|'` - ;; *) kernel= os=$basic_os @@ -1492,16 +1501,10 @@ case $os in os=eabi ;; *) - os= - obj=elf + os=elf ;; esac ;; - aout* | coff* | elf* | pe*) - # These are machine code file formats, not OSes - obj=$os - os= - ;; *) # No normalization, but not necessarily accepted, that comes below. ;; @@ -1520,15 +1523,12 @@ else # system, and we'll never get to this point. kernel= -obj= case $cpu-$vendor in score-*) - os= - obj=elf + os=elf ;; spu-*) - os= - obj=elf + os=elf ;; *-acorn) os=riscix1.2 @@ -1538,35 +1538,28 @@ case $cpu-$vendor in os=gnu ;; arm*-semi) - os= - obj=aout + os=aout ;; c4x-* | tic4x-*) - os= - obj=coff + os=coff ;; c8051-*) - os= - obj=elf + os=elf ;; clipper-intergraph) os=clix ;; hexagon-*) - os= - obj=elf + os=elf ;; tic54x-*) - os= - obj=coff + os=coff ;; tic55x-*) - os= - obj=coff + os=coff ;; tic6x-*) - os= - obj=coff + os=coff ;; # This must come before the *-dec entry. pdp10-*) @@ -1588,24 +1581,19 @@ case $cpu-$vendor in os=sunos3 ;; m68*-cisco) - os= - obj=aout + os=aout ;; mep-*) - os= - obj=elf + os=elf ;; mips*-cisco) - os= - obj=elf + os=elf ;; - mips*-*|nanomips*-*) - os= - obj=elf + mips*-*) + os=elf ;; or32-*) - os= - obj=coff + os=coff ;; *-tti) # must be before sparc entry or we get the wrong os. os=sysv3 @@ -1614,8 +1602,7 @@ case $cpu-$vendor in os=sunos4.1.1 ;; pru-*) - os= - obj=elf + os=elf ;; *-be) os=beos @@ -1696,12 +1683,10 @@ case $cpu-$vendor in os=uxpv ;; *-rom68k) - os= - obj=coff + os=coff ;; *-*bug) - os= - obj=coff + os=coff ;; *-apple) os=macos @@ -1719,11 +1704,10 @@ esac fi -# Now, validate our (potentially fixed-up) individual pieces (OS, OBJ). - +# Now, validate our (potentially fixed-up) OS. case $os in # Sometimes we do "kernel-libc", so those need to count as OSes. - llvm* | musl* | newlib* | relibc* | uclibc*) + musl* | newlib* | relibc* | uclibc*) ;; # Likewise for "kernel-abi" eabi* | gnueabi*) @@ -1731,9 +1715,6 @@ case $os in # VxWorks passes extra cpu info in the 4th filed. simlinux | simwindows | spe) ;; - # See `case $cpu-$os` validation below - ghcjs) - ;; # Now accept the basic system types. # The portable systems comes first. # Each alternative MUST end in a * to match a version number. @@ -1742,7 +1723,7 @@ case $os in | hpux* | unos* | osf* | luna* | dgux* | auroraux* | solaris* \ | sym* | plan9* | psp* | sim* | xray* | os68k* | v88r* \ | hiux* | abug | nacl* | netware* | windows* \ - | os9* | macos* | osx* | ios* | tvos* | watchos* \ + | os9* | macos* | osx* | ios* \ | mpw* | magic* | mmixware* | mon960* | lnews* \ | amigaos* | amigados* | msdos* | newsos* | unicos* | aof* \ | aos* | aros* | cloudabi* | sortix* | twizzler* \ @@ -1751,11 +1732,11 @@ case $os in | mirbsd* | netbsd* | dicos* | openedition* | ose* \ | bitrig* | openbsd* | secbsd* | solidbsd* | libertybsd* | os108* \ | ekkobsd* | freebsd* | riscix* | lynxos* | os400* \ - | bosx* | nextstep* | cxux* | oabi* \ - | ptx* | ecoff* | winnt* | domain* | vsta* \ + | bosx* | nextstep* | cxux* | aout* | elf* | oabi* \ + | ptx* | coff* | ecoff* | winnt* | domain* | vsta* \ | udi* | lites* | ieee* | go32* | aux* | hcos* \ | chorusrdb* | cegcc* | glidix* | serenity* \ - | cygwin* | msys* | moss* | proelf* | rtems* \ + | cygwin* | msys* | pe* | moss* | proelf* | rtems* \ | midipix* | mingw32* | mingw64* | mint* \ | uxpv* | beos* | mpeix* | udk* | moxiebox* \ | interix* | uwin* | mks* | rhapsody* | darwin* \ @@ -1767,117 +1748,49 @@ case $os in | skyos* | haiku* | rdos* | toppers* | drops* | es* \ | onefs* | tirtos* | phoenix* | fuchsia* | redox* | bme* \ | midnightbsd* | amdhsa* | unleashed* | emscripten* | wasi* \ - | nsk* | powerunix* | genode* | zvmoe* | qnx* | emx* | zephyr* \ - | fiwix* | mlibc* | cos* | mbr* | ironclad* ) + | nsk* | powerunix* | genode* | zvmoe* | qnx* | emx* | zephyr*) ;; # This one is extra strict with allowed versions sco3.2v2 | sco3.2v[4-9]* | sco5v6*) # Don't forget version if it is 3.2v4 or newer. ;; - # This refers to builds using the UEFI calling convention - # (which depends on the architecture) and PE file format. - # Note that this is both a different calling convention and - # different file format than that of GNU-EFI - # (x86_64-w64-mingw32). - uefi) - ;; none) ;; - kernel* | msvc* ) - # Restricted further below - ;; - '') - if test x"$obj" = x - then - echo "Invalid configuration '$1': Blank OS only allowed with explicit machine code file format" 1>&2 - fi - ;; *) - echo "Invalid configuration '$1': OS '$os' not recognized" 1>&2 - exit 1 - ;; -esac - -case $obj in - aout* | coff* | elf* | pe*) - ;; - '') - # empty is fine - ;; - *) - echo "Invalid configuration '$1': Machine code format '$obj' not recognized" 1>&2 - exit 1 - ;; -esac - -# Here we handle the constraint that a (synthetic) cpu and os are -# valid only in combination with each other and nowhere else. -case $cpu-$os in - # The "javascript-unknown-ghcjs" triple is used by GHC; we - # accept it here in order to tolerate that, but reject any - # variations. - javascript-ghcjs) - ;; - javascript-* | *-ghcjs) - echo "Invalid configuration '$1': cpu '$cpu' is not valid with os '$os$obj'" 1>&2 + echo Invalid configuration \`"$1"\': OS \`"$os"\' not recognized 1>&2 exit 1 ;; esac # As a final step for OS-related things, validate the OS-kernel combination # (given a valid OS), if there is a kernel. -case $kernel-$os-$obj in - linux-gnu*- | linux-android*- | linux-dietlibc*- | linux-llvm*- \ - | linux-mlibc*- | linux-musl*- | linux-newlib*- \ - | linux-relibc*- | linux-uclibc*- ) - ;; - uclinux-uclibc*- ) - ;; - managarm-mlibc*- | managarm-kernel*- ) +case $kernel-$os in + linux-gnu* | linux-dietlibc* | linux-android* | linux-newlib* \ + | linux-musl* | linux-relibc* | linux-uclibc* ) ;; - windows*-msvc*-) + uclinux-uclibc* ) ;; - -dietlibc*- | -llvm*- | -mlibc*- | -musl*- | -newlib*- | -relibc*- \ - | -uclibc*- ) + -dietlibc* | -newlib* | -musl* | -relibc* | -uclibc* ) # These are just libc implementations, not actual OSes, and thus # require a kernel. - echo "Invalid configuration '$1': libc '$os' needs explicit kernel." 1>&2 - exit 1 - ;; - -kernel*- ) - echo "Invalid configuration '$1': '$os' needs explicit kernel." 1>&2 - exit 1 - ;; - *-kernel*- ) - echo "Invalid configuration '$1': '$kernel' does not support '$os'." 1>&2 + echo "Invalid configuration \`$1': libc \`$os' needs explicit kernel." 1>&2 exit 1 ;; - *-msvc*- ) - echo "Invalid configuration '$1': '$os' needs 'windows'." 1>&2 - exit 1 - ;; - kfreebsd*-gnu*- | kopensolaris*-gnu*-) + kfreebsd*-gnu* | kopensolaris*-gnu*) ;; - vxworks-simlinux- | vxworks-simwindows- | vxworks-spe-) + vxworks-simlinux | vxworks-simwindows | vxworks-spe) ;; - nto-qnx*-) - ;; - os2-emx-) + nto-qnx*) ;; - *-eabi*- | *-gnueabi*-) + os2-emx) ;; - none--*) - # None (no kernel, i.e. freestanding / bare metal), - # can be paired with an machine code file format + *-eabi* | *-gnueabi*) ;; - -*-) + -*) # Blank kernel with real OS is always fine. ;; - --*) - # Blank kernel and OS with real machine code file format is always fine. - ;; - *-*-*) - echo "Invalid configuration '$1': Kernel '$kernel' not known to work with OS '$os'." 1>&2 + *-*) + echo "Invalid configuration \`$1': Kernel \`$kernel' not known to work with OS \`$os'." 1>&2 exit 1 ;; esac @@ -1960,7 +1873,7 @@ case $vendor in ;; esac -echo "$cpu-$vendor${kernel:+-$kernel}${os:+-$os}${obj:+-$obj}" +echo "$cpu-$vendor-${kernel:+$kernel-}$os" exit # Local variables: diff --git a/configure b/configure index ba2d49df7c65fe..705a778cafced3 100755 --- a/configure +++ b/configure @@ -972,7 +972,7 @@ HAS_XCRUN EXPORT_MACOSX_DEPLOYMENT_TARGET CONFIGURE_MACOSX_DEPLOYMENT_TARGET _PYTHON_HOST_PLATFORM -INSTALLTARGETS +MACHDEP FRAMEWORKINSTALLAPPSPREFIX FRAMEWORKUNIXTOOLSPREFIX FRAMEWORKPYTHONW @@ -980,8 +980,6 @@ FRAMEWORKALTINSTALLLAST FRAMEWORKALTINSTALLFIRST FRAMEWORKINSTALLLAST FRAMEWORKINSTALLFIRST -RESSRCDIR -PYTHONFRAMEWORKINSTALLNAMEPREFIX PYTHONFRAMEWORKINSTALLDIR PYTHONFRAMEWORKPREFIX PYTHONFRAMEWORKDIR @@ -991,7 +989,6 @@ LIPO_INTEL64_FLAGS LIPO_32BIT_FLAGS ARCH_RUN_32BIT UNIVERSALSDK -MACHDEP PKG_CONFIG_LIBDIR PKG_CONFIG_PATH PKG_CONFIG @@ -4007,77 +4004,6 @@ if test "$with_pkg_config" = yes -a -z "$PKG_CONFIG"; then as_fn_error $? "pkg-config is required" "$LINENO" 5] fi -# Set name for machine-dependent library files - -{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking MACHDEP" >&5 -printf %s "checking MACHDEP... " >&6; } -if test -z "$MACHDEP" -then - # avoid using uname for cross builds - if test "$cross_compiling" = yes; then - # ac_sys_system and ac_sys_release are used for setting - # a lot of different things including 'define_xopen_source' - # in the case statement below. - case "$host" in - *-*-linux-android*) - ac_sys_system=Linux-android - ;; - *-*-linux*) - ac_sys_system=Linux - ;; - *-*-cygwin*) - ac_sys_system=Cygwin - ;; - *-*-vxworks*) - ac_sys_system=VxWorks - ;; - *-*-emscripten) - ac_sys_system=Emscripten - ;; - *-*-wasi) - ac_sys_system=WASI - ;; - *) - # for now, limit cross builds to known configurations - MACHDEP="unknown" - as_fn_error $? "cross build not supported for $host" "$LINENO" 5 - esac - ac_sys_release= - else - ac_sys_system=`uname -s` - if test "$ac_sys_system" = "AIX" \ - -o "$ac_sys_system" = "UnixWare" -o "$ac_sys_system" = "OpenUNIX"; then - ac_sys_release=`uname -v` - else - ac_sys_release=`uname -r` - fi - fi - ac_md_system=`echo $ac_sys_system | - tr -d '/ ' | tr '[A-Z]' '[a-z]'` - ac_md_release=`echo $ac_sys_release | - tr -d '/ ' | sed 's/^[A-Z]\.//' | sed 's/\..*//'` - MACHDEP="$ac_md_system$ac_md_release" - - case $MACHDEP in - aix*) MACHDEP="aix";; - linux*) MACHDEP="linux";; - cygwin*) MACHDEP="cygwin";; - darwin*) MACHDEP="darwin";; - '') MACHDEP="unknown";; - esac - - if test "$ac_sys_system" = "SunOS"; then - # For Solaris, there isn't an OS version specific macro defined - # in most compilers, so we define one here. - SUNOS_VERSION=`echo $ac_sys_release | sed -e 's!\.\(0-9\)$!.0\1!g' | tr -d '.'` - -printf "%s\n" "#define Py_SUNOS_VERSION $SUNOS_VERSION" >>confdefs.h - - fi -fi -{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: \"$MACHDEP\"" >&5 -printf "%s\n" "\"$MACHDEP\"" >&6; } - { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for --enable-universalsdk" >&5 printf %s "checking for --enable-universalsdk... " >&6; } # Check whether --enable-universalsdk was given. @@ -4201,15 +4127,11 @@ then : PYTHONFRAMEWORKDIR=no-framework PYTHONFRAMEWORKPREFIX= PYTHONFRAMEWORKINSTALLDIR= - PYTHONFRAMEWORKINSTALLNAMEPREFIX= - RESSRCDIR= FRAMEWORKINSTALLFIRST= FRAMEWORKINSTALLLAST= FRAMEWORKALTINSTALLFIRST= FRAMEWORKALTINSTALLLAST= FRAMEWORKPYTHONW= - INSTALLTARGETS="commoninstall bininstall maninstall" - if test "x${prefix}" = "xNONE"; then FRAMEWORKUNIXTOOLSPREFIX="${ac_default_prefix}" else @@ -4222,76 +4144,65 @@ then : PYTHONFRAMEWORKINSTALLDIR=$PYTHONFRAMEWORKPREFIX/$PYTHONFRAMEWORKDIR FRAMEWORKINSTALLFIRST="frameworkinstallstructure" FRAMEWORKALTINSTALLFIRST="frameworkinstallstructure " + FRAMEWORKINSTALLLAST="frameworkinstallmaclib frameworkinstallapps frameworkinstallunixtools" + FRAMEWORKALTINSTALLLAST="frameworkinstallmaclib frameworkinstallapps frameworkaltinstallunixtools" + FRAMEWORKPYTHONW="frameworkpythonw" + FRAMEWORKINSTALLAPPSPREFIX="/Applications" - case $ac_sys_system in #( - Darwin) : - FRAMEWORKINSTALLLAST="frameworkinstallmaclib frameworkinstallapps frameworkinstallunixtools" - FRAMEWORKALTINSTALLLAST="frameworkinstallmaclib frameworkinstallapps frameworkaltinstallunixtools" - FRAMEWORKPYTHONW="frameworkpythonw" - FRAMEWORKINSTALLAPPSPREFIX="/Applications" - INSTALLTARGETS="commoninstall bininstall maninstall" + if test "x${prefix}" = "xNONE" ; then + FRAMEWORKUNIXTOOLSPREFIX="${ac_default_prefix}" - if test "x${prefix}" = "xNONE" ; then - FRAMEWORKUNIXTOOLSPREFIX="${ac_default_prefix}" + else + FRAMEWORKUNIXTOOLSPREFIX="${prefix}" + fi - else - FRAMEWORKUNIXTOOLSPREFIX="${prefix}" - fi + case "${enableval}" in + /System*) + FRAMEWORKINSTALLAPPSPREFIX="/Applications" + if test "${prefix}" = "NONE" ; then + # See below + FRAMEWORKUNIXTOOLSPREFIX="/usr" + fi + ;; - case "${enableval}" in - /System*) - FRAMEWORKINSTALLAPPSPREFIX="/Applications" - if test "${prefix}" = "NONE" ; then - # See below - FRAMEWORKUNIXTOOLSPREFIX="/usr" - fi - ;; - - /Library*) - FRAMEWORKINSTALLAPPSPREFIX="/Applications" - ;; - - */Library/Frameworks) - MDIR="`dirname "${enableval}"`" - MDIR="`dirname "${MDIR}"`" - FRAMEWORKINSTALLAPPSPREFIX="${MDIR}/Applications" - - if test "${prefix}" = "NONE"; then - # User hasn't specified the - # --prefix option, but wants to install - # the framework in a non-default location, - # ensure that the compatibility links get - # installed relative to that prefix as well - # instead of in /usr/local. - FRAMEWORKUNIXTOOLSPREFIX="${MDIR}" - fi - ;; + /Library*) + FRAMEWORKINSTALLAPPSPREFIX="/Applications" + ;; - *) - FRAMEWORKINSTALLAPPSPREFIX="/Applications" - ;; - esac + */Library/Frameworks) + MDIR="`dirname "${enableval}"`" + MDIR="`dirname "${MDIR}"`" + FRAMEWORKINSTALLAPPSPREFIX="${MDIR}/Applications" + + if test "${prefix}" = "NONE"; then + # User hasn't specified the + # --prefix option, but wants to install + # the framework in a non-default location, + # ensure that the compatibility links get + # installed relative to that prefix as well + # instead of in /usr/local. + FRAMEWORKUNIXTOOLSPREFIX="${MDIR}" + fi + ;; - prefix=$PYTHONFRAMEWORKINSTALLDIR/Versions/$VERSION - PYTHONFRAMEWORKINSTALLNAMEPREFIX=${prefix} - RESSRCDIR=Mac/Resources/framework + *) + FRAMEWORKINSTALLAPPSPREFIX="/Applications" + ;; + esac - # Add files for Mac specific code to the list of output - # files: - ac_config_files="$ac_config_files Mac/Makefile" + prefix=$PYTHONFRAMEWORKINSTALLDIR/Versions/$VERSION - ac_config_files="$ac_config_files Mac/PythonLauncher/Makefile" + # Add files for Mac specific code to the list of output + # files: + ac_config_files="$ac_config_files Mac/Makefile" - ac_config_files="$ac_config_files Mac/Resources/framework/Info.plist" + ac_config_files="$ac_config_files Mac/PythonLauncher/Makefile" - ac_config_files="$ac_config_files Mac/Resources/app/Info.plist" + ac_config_files="$ac_config_files Mac/Resources/framework/Info.plist" - ;; - *) - as_fn_error $? "Unknown platform for framework build" "$LINENO" 5 - ;; - esac - esac + ac_config_files="$ac_config_files Mac/Resources/app/Info.plist" + + esac else $as_nop @@ -4299,14 +4210,11 @@ else $as_nop PYTHONFRAMEWORKDIR=no-framework PYTHONFRAMEWORKPREFIX= PYTHONFRAMEWORKINSTALLDIR= - PYTHONFRAMEWORKINSTALLNAMEPREFIX= - RESSRCDIR= FRAMEWORKINSTALLFIRST= FRAMEWORKINSTALLLAST= FRAMEWORKALTINSTALLFIRST= FRAMEWORKALTINSTALLLAST= FRAMEWORKPYTHONW= - INSTALLTARGETS="commoninstall bininstall maninstall" if test "x${prefix}" = "xNONE" ; then FRAMEWORKUNIXTOOLSPREFIX="${ac_default_prefix}" else @@ -4331,11 +4239,79 @@ fi +printf "%s\n" "#define _PYTHONFRAMEWORK \"${PYTHONFRAMEWORK}\"" >>confdefs.h +# Set name for machine-dependent library files -printf "%s\n" "#define _PYTHONFRAMEWORK \"${PYTHONFRAMEWORK}\"" >>confdefs.h +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking MACHDEP" >&5 +printf %s "checking MACHDEP... " >&6; } +if test -z "$MACHDEP" +then + # avoid using uname for cross builds + if test "$cross_compiling" = yes; then + # ac_sys_system and ac_sys_release are used for setting + # a lot of different things including 'define_xopen_source' + # in the case statement below. + case "$host" in + *-*-linux-android*) + ac_sys_system=Linux-android + ;; + *-*-linux*) + ac_sys_system=Linux + ;; + *-*-cygwin*) + ac_sys_system=Cygwin + ;; + *-*-vxworks*) + ac_sys_system=VxWorks + ;; + *-*-emscripten) + ac_sys_system=Emscripten + ;; + *-*-wasi) + ac_sys_system=WASI + ;; + *) + # for now, limit cross builds to known configurations + MACHDEP="unknown" + as_fn_error $? "cross build not supported for $host" "$LINENO" 5 + esac + ac_sys_release= + else + ac_sys_system=`uname -s` + if test "$ac_sys_system" = "AIX" \ + -o "$ac_sys_system" = "UnixWare" -o "$ac_sys_system" = "OpenUNIX"; then + ac_sys_release=`uname -v` + else + ac_sys_release=`uname -r` + fi + fi + ac_md_system=`echo $ac_sys_system | + tr -d '/ ' | tr '[A-Z]' '[a-z]'` + ac_md_release=`echo $ac_sys_release | + tr -d '/ ' | sed 's/^[A-Z]\.//' | sed 's/\..*//'` + MACHDEP="$ac_md_system$ac_md_release" + case $MACHDEP in + aix*) MACHDEP="aix";; + linux*) MACHDEP="linux";; + cygwin*) MACHDEP="cygwin";; + darwin*) MACHDEP="darwin";; + '') MACHDEP="unknown";; + esac + + if test "$ac_sys_system" = "SunOS"; then + # For Solaris, there isn't an OS version specific macro defined + # in most compilers, so we define one here. + SUNOS_VERSION=`echo $ac_sys_release | sed -e 's!\.\(0-9\)$!.0\1!g' | tr -d '.'` + +printf "%s\n" "#define Py_SUNOS_VERSION $SUNOS_VERSION" >>confdefs.h + + fi +fi +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: \"$MACHDEP\"" >&5 +printf "%s\n" "\"$MACHDEP\"" >&6; } if test "$cross_compiling" = yes; then @@ -4343,27 +4319,27 @@ if test "$cross_compiling" = yes; then *-*-linux*) case "$host_cpu" in arm*) - _host_ident=arm + _host_cpu=arm ;; *) - _host_ident=$host_cpu + _host_cpu=$host_cpu esac ;; *-*-cygwin*) - _host_ident= + _host_cpu= ;; *-*-vxworks*) - _host_ident=$host_cpu + _host_cpu=$host_cpu ;; wasm32-*-* | wasm64-*-*) - _host_ident=$host_cpu + _host_cpu=$host_cpu ;; *) # for now, limit cross builds to known configurations MACHDEP="unknown" as_fn_error $? "cross build not supported for $host" "$LINENO" 5 esac - _PYTHON_HOST_PLATFORM="$MACHDEP${_host_ident:+-$_host_ident}" + _PYTHON_HOST_PLATFORM="$MACHDEP${_host_cpu:+-$_host_cpu}" fi # Some systems cannot stand _XOPEN_SOURCE being defined at all; they @@ -6793,6 +6769,8 @@ case $ac_sys_system in #( ;; esac +{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $MULTIARCH" >&5 +printf "%s\n" "$MULTIARCH" >&6; } if test x$PLATFORM_TRIPLET != x && test x$MULTIARCH != x; then if test x$PLATFORM_TRIPLET != x$MULTIARCH; then @@ -6802,14 +6780,6 @@ elif test x$PLATFORM_TRIPLET != x && test x$MULTIARCH = x; then MULTIARCH=$PLATFORM_TRIPLET fi -{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $MULTIARCH" >&5 -printf "%s\n" "$MULTIARCH" >&6; } - -case $ac_sys_system in #( - *) : - SOABI_PLATFORM=$PLATFORM_TRIPLET - ;; -esac if test x$MULTIARCH != x; then MULTIARCH_CPPFLAGS="-DMULTIARCH=\\\"$MULTIARCH\\\"" @@ -7301,7 +7271,7 @@ fi { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking LDLIBRARY" >&5 printf %s "checking LDLIBRARY... " >&6; } -# Apple framework builds need more magic. LDLIBRARY is the dynamic +# MacOSX framework builds need more magic. LDLIBRARY is the dynamic # library that we build, but we do not want to link against it (we # will find it with a -framework option). For this reason there is an # extra variable BLDLIBRARY against which Python and the extension @@ -7309,14 +7279,9 @@ printf %s "checking LDLIBRARY... " >&6; } # LDLIBRARY, but empty for MacOSX framework builds. if test "$enable_framework" then - case $ac_sys_system in - Darwin) - LDLIBRARY='$(PYTHONFRAMEWORKDIR)/Versions/$(VERSION)/$(PYTHONFRAMEWORK)';; - *) - as_fn_error $? "Unknown platform for framework build" "$LINENO" 5;; - esac - BLDLIBRARY='' + LDLIBRARY='$(PYTHONFRAMEWORKDIR)/Versions/$(VERSION)/$(PYTHONFRAMEWORK)' RUNSHARED=DYLD_FRAMEWORK_PATH=`pwd`${DYLD_FRAMEWORK_PATH:+:${DYLD_FRAMEWORK_PATH}} + BLDLIBRARY='' else BLDLIBRARY='$(LDLIBRARY)' fi @@ -7329,64 +7294,64 @@ printf "%s\n" "#define Py_ENABLE_SHARED 1" >>confdefs.h case $ac_sys_system in CYGWIN*) - LDLIBRARY='libpython$(LDVERSION).dll.a' - DLLLIBRARY='libpython$(LDVERSION).dll' - ;; + LDLIBRARY='libpython$(LDVERSION).dll.a' + DLLLIBRARY='libpython$(LDVERSION).dll' + ;; SunOS*) - LDLIBRARY='libpython$(LDVERSION).so' - BLDLIBRARY='-Wl,-R,$(LIBDIR) -L. -lpython$(LDVERSION)' - RUNSHARED=LD_LIBRARY_PATH=`pwd`${LD_LIBRARY_PATH:+:${LD_LIBRARY_PATH}} - INSTSONAME="$LDLIBRARY".$SOVERSION - if test "$with_pydebug" != yes - then - PY3LIBRARY=libpython3.so - fi - ;; + LDLIBRARY='libpython$(LDVERSION).so' + BLDLIBRARY='-Wl,-R,$(LIBDIR) -L. -lpython$(LDVERSION)' + RUNSHARED=LD_LIBRARY_PATH=`pwd`${LD_LIBRARY_PATH:+:${LD_LIBRARY_PATH}} + INSTSONAME="$LDLIBRARY".$SOVERSION + if test "$with_pydebug" != yes + then + PY3LIBRARY=libpython3.so + fi + ;; Linux*|GNU*|NetBSD*|FreeBSD*|DragonFly*|OpenBSD*|VxWorks*) - LDLIBRARY='libpython$(LDVERSION).so' - BLDLIBRARY='-L. -lpython$(LDVERSION)' - RUNSHARED=LD_LIBRARY_PATH=`pwd`${LD_LIBRARY_PATH:+:${LD_LIBRARY_PATH}} - INSTSONAME="$LDLIBRARY".$SOVERSION - if test "$with_pydebug" != yes - then - PY3LIBRARY=libpython3.so - fi - ;; + LDLIBRARY='libpython$(LDVERSION).so' + BLDLIBRARY='-L. -lpython$(LDVERSION)' + RUNSHARED=LD_LIBRARY_PATH=`pwd`${LD_LIBRARY_PATH:+:${LD_LIBRARY_PATH}} + INSTSONAME="$LDLIBRARY".$SOVERSION + if test "$with_pydebug" != yes + then + PY3LIBRARY=libpython3.so + fi + ;; hp*|HP*) - case `uname -m` in - ia64) - LDLIBRARY='libpython$(LDVERSION).so' - ;; - *) - LDLIBRARY='libpython$(LDVERSION).sl' - ;; - esac - BLDLIBRARY='-Wl,+b,$(LIBDIR) -L. -lpython$(LDVERSION)' - RUNSHARED=SHLIB_PATH=`pwd`${SHLIB_PATH:+:${SHLIB_PATH}} - ;; + case `uname -m` in + ia64) + LDLIBRARY='libpython$(LDVERSION).so' + ;; + *) + LDLIBRARY='libpython$(LDVERSION).sl' + ;; + esac + BLDLIBRARY='-Wl,+b,$(LIBDIR) -L. -lpython$(LDVERSION)' + RUNSHARED=SHLIB_PATH=`pwd`${SHLIB_PATH:+:${SHLIB_PATH}} + ;; Darwin*) - LDLIBRARY='libpython$(LDVERSION).dylib' - BLDLIBRARY='-L. -lpython$(LDVERSION)' - RUNSHARED=DYLD_LIBRARY_PATH=`pwd`${DYLD_LIBRARY_PATH:+:${DYLD_LIBRARY_PATH}} - ;; + LDLIBRARY='libpython$(LDVERSION).dylib' + BLDLIBRARY='-L. -lpython$(LDVERSION)' + RUNSHARED=DYLD_LIBRARY_PATH=`pwd`${DYLD_LIBRARY_PATH:+:${DYLD_LIBRARY_PATH}} + ;; AIX*) - LDLIBRARY='libpython$(LDVERSION).so' - RUNSHARED=LIBPATH=`pwd`${LIBPATH:+:${LIBPATH}} - ;; + LDLIBRARY='libpython$(LDVERSION).so' + RUNSHARED=LIBPATH=`pwd`${LIBPATH:+:${LIBPATH}} + ;; esac else # shared is disabled PY_ENABLE_SHARED=0 case $ac_sys_system in CYGWIN*) - BLDLIBRARY='$(LIBRARY)' - LDLIBRARY='libpython$(LDVERSION).dll.a' - ;; + BLDLIBRARY='$(LIBRARY)' + LDLIBRARY='libpython$(LDVERSION).dll.a' + ;; esac fi if test "$cross_compiling" = yes; then - RUNSHARED= + RUNSHARED= fi @@ -23933,7 +23898,7 @@ printf %s "checking ABIFLAGS... " >&6; } printf "%s\n" "$ABIFLAGS" >&6; } { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking SOABI" >&5 printf %s "checking SOABI... " >&6; } -SOABI='cpython-'`echo $VERSION | tr -d .`${ABIFLAGS}${SOABI_PLATFORM:+-$SOABI_PLATFORM} +SOABI='cpython-'`echo $VERSION | tr -d .`${ABIFLAGS}${PLATFORM_TRIPLET:+-$PLATFORM_TRIPLET} { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $SOABI" >&5 printf "%s\n" "$SOABI" >&6; } @@ -23942,7 +23907,7 @@ printf "%s\n" "$SOABI" >&6; } if test "$Py_DEBUG" = 'true'; then # Similar to SOABI but remove "d" flag from ABIFLAGS - ALT_SOABI='cpython-'`echo $VERSION | tr -d .``echo $ABIFLAGS | tr -d d`${SOABI_PLATFORM:+-$SOABI_PLATFORM} + ALT_SOABI='cpython-'`echo $VERSION | tr -d .``echo $ABIFLAGS | tr -d d`${PLATFORM_TRIPLET:+-$PLATFORM_TRIPLET} printf "%s\n" "#define ALT_SOABI \"${ALT_SOABI}\"" >>confdefs.h diff --git a/configure.ac b/configure.ac index b39af7422c4c7c..dee7ed552b370f 100644 --- a/configure.ac +++ b/configure.ac @@ -307,74 +307,6 @@ if test "$with_pkg_config" = yes -a -z "$PKG_CONFIG"; then AC_MSG_ERROR([pkg-config is required])] fi -# Set name for machine-dependent library files -AC_ARG_VAR([MACHDEP], [name for machine-dependent library files]) -AC_MSG_CHECKING([MACHDEP]) -if test -z "$MACHDEP" -then - # avoid using uname for cross builds - if test "$cross_compiling" = yes; then - # ac_sys_system and ac_sys_release are used for setting - # a lot of different things including 'define_xopen_source' - # in the case statement below. - case "$host" in - *-*-linux-android*) - ac_sys_system=Linux-android - ;; - *-*-linux*) - ac_sys_system=Linux - ;; - *-*-cygwin*) - ac_sys_system=Cygwin - ;; - *-*-vxworks*) - ac_sys_system=VxWorks - ;; - *-*-emscripten) - ac_sys_system=Emscripten - ;; - *-*-wasi) - ac_sys_system=WASI - ;; - *) - # for now, limit cross builds to known configurations - MACHDEP="unknown" - AC_MSG_ERROR([cross build not supported for $host]) - esac - ac_sys_release= - else - ac_sys_system=`uname -s` - if test "$ac_sys_system" = "AIX" \ - -o "$ac_sys_system" = "UnixWare" -o "$ac_sys_system" = "OpenUNIX"; then - ac_sys_release=`uname -v` - else - ac_sys_release=`uname -r` - fi - fi - ac_md_system=`echo $ac_sys_system | - tr -d '[/ ]' | tr '[[A-Z]]' '[[a-z]]'` - ac_md_release=`echo $ac_sys_release | - tr -d '[/ ]' | sed 's/^[[A-Z]]\.//' | sed 's/\..*//'` - MACHDEP="$ac_md_system$ac_md_release" - - case $MACHDEP in - aix*) MACHDEP="aix";; - linux*) MACHDEP="linux";; - cygwin*) MACHDEP="cygwin";; - darwin*) MACHDEP="darwin";; - '') MACHDEP="unknown";; - esac - - if test "$ac_sys_system" = "SunOS"; then - # For Solaris, there isn't an OS version specific macro defined - # in most compilers, so we define one here. - SUNOS_VERSION=`echo $ac_sys_release | sed -e 's!\.\([0-9]\)$!.0\1!g' | tr -d '.'` - AC_DEFINE_UNQUOTED([Py_SUNOS_VERSION], [$SUNOS_VERSION], - [The version of SunOS/Solaris as reported by `uname -r' without the dot.]) - fi -fi -AC_MSG_RESULT(["$MACHDEP"]) - AC_MSG_CHECKING([for --enable-universalsdk]) AC_ARG_ENABLE([universalsdk], AS_HELP_STRING([--enable-universalsdk@<:@=SDKDIR@:>@], @@ -492,15 +424,11 @@ AC_ARG_ENABLE([framework], PYTHONFRAMEWORKDIR=no-framework PYTHONFRAMEWORKPREFIX= PYTHONFRAMEWORKINSTALLDIR= - PYTHONFRAMEWORKINSTALLNAMEPREFIX= - RESSRCDIR= FRAMEWORKINSTALLFIRST= FRAMEWORKINSTALLLAST= FRAMEWORKALTINSTALLFIRST= FRAMEWORKALTINSTALLLAST= FRAMEWORKPYTHONW= - INSTALLTARGETS="commoninstall bininstall maninstall" - if test "x${prefix}" = "xNONE"; then FRAMEWORKUNIXTOOLSPREFIX="${ac_default_prefix}" else @@ -513,85 +441,71 @@ AC_ARG_ENABLE([framework], PYTHONFRAMEWORKINSTALLDIR=$PYTHONFRAMEWORKPREFIX/$PYTHONFRAMEWORKDIR FRAMEWORKINSTALLFIRST="frameworkinstallstructure" FRAMEWORKALTINSTALLFIRST="frameworkinstallstructure " + FRAMEWORKINSTALLLAST="frameworkinstallmaclib frameworkinstallapps frameworkinstallunixtools" + FRAMEWORKALTINSTALLLAST="frameworkinstallmaclib frameworkinstallapps frameworkaltinstallunixtools" + FRAMEWORKPYTHONW="frameworkpythonw" + FRAMEWORKINSTALLAPPSPREFIX="/Applications" - case $ac_sys_system in #( - Darwin) : - FRAMEWORKINSTALLLAST="frameworkinstallmaclib frameworkinstallapps frameworkinstallunixtools" - FRAMEWORKALTINSTALLLAST="frameworkinstallmaclib frameworkinstallapps frameworkaltinstallunixtools" - FRAMEWORKPYTHONW="frameworkpythonw" - FRAMEWORKINSTALLAPPSPREFIX="/Applications" - INSTALLTARGETS="commoninstall bininstall maninstall" + if test "x${prefix}" = "xNONE" ; then + FRAMEWORKUNIXTOOLSPREFIX="${ac_default_prefix}" - if test "x${prefix}" = "xNONE" ; then - FRAMEWORKUNIXTOOLSPREFIX="${ac_default_prefix}" + else + FRAMEWORKUNIXTOOLSPREFIX="${prefix}" + fi - else - FRAMEWORKUNIXTOOLSPREFIX="${prefix}" - fi + case "${enableval}" in + /System*) + FRAMEWORKINSTALLAPPSPREFIX="/Applications" + if test "${prefix}" = "NONE" ; then + # See below + FRAMEWORKUNIXTOOLSPREFIX="/usr" + fi + ;; - case "${enableval}" in - /System*) - FRAMEWORKINSTALLAPPSPREFIX="/Applications" - if test "${prefix}" = "NONE" ; then - # See below - FRAMEWORKUNIXTOOLSPREFIX="/usr" - fi - ;; - - /Library*) - FRAMEWORKINSTALLAPPSPREFIX="/Applications" - ;; - - */Library/Frameworks) - MDIR="`dirname "${enableval}"`" - MDIR="`dirname "${MDIR}"`" - FRAMEWORKINSTALLAPPSPREFIX="${MDIR}/Applications" - - if test "${prefix}" = "NONE"; then - # User hasn't specified the - # --prefix option, but wants to install - # the framework in a non-default location, - # ensure that the compatibility links get - # installed relative to that prefix as well - # instead of in /usr/local. - FRAMEWORKUNIXTOOLSPREFIX="${MDIR}" - fi - ;; + /Library*) + FRAMEWORKINSTALLAPPSPREFIX="/Applications" + ;; - *) - FRAMEWORKINSTALLAPPSPREFIX="/Applications" - ;; - esac + */Library/Frameworks) + MDIR="`dirname "${enableval}"`" + MDIR="`dirname "${MDIR}"`" + FRAMEWORKINSTALLAPPSPREFIX="${MDIR}/Applications" + + if test "${prefix}" = "NONE"; then + # User hasn't specified the + # --prefix option, but wants to install + # the framework in a non-default location, + # ensure that the compatibility links get + # installed relative to that prefix as well + # instead of in /usr/local. + FRAMEWORKUNIXTOOLSPREFIX="${MDIR}" + fi + ;; - prefix=$PYTHONFRAMEWORKINSTALLDIR/Versions/$VERSION - PYTHONFRAMEWORKINSTALLNAMEPREFIX=${prefix} - RESSRCDIR=Mac/Resources/framework - - # Add files for Mac specific code to the list of output - # files: - AC_CONFIG_FILES([Mac/Makefile]) - AC_CONFIG_FILES([Mac/PythonLauncher/Makefile]) - AC_CONFIG_FILES([Mac/Resources/framework/Info.plist]) - AC_CONFIG_FILES([Mac/Resources/app/Info.plist]) - ;; - *) - AC_MSG_ERROR([Unknown platform for framework build]) - ;; - esac + *) + FRAMEWORKINSTALLAPPSPREFIX="/Applications" + ;; esac + + prefix=$PYTHONFRAMEWORKINSTALLDIR/Versions/$VERSION + + # Add files for Mac specific code to the list of output + # files: + AC_CONFIG_FILES([Mac/Makefile]) + AC_CONFIG_FILES([Mac/PythonLauncher/Makefile]) + AC_CONFIG_FILES([Mac/Resources/framework/Info.plist]) + AC_CONFIG_FILES([Mac/Resources/app/Info.plist]) + esac ],[ PYTHONFRAMEWORK= PYTHONFRAMEWORKDIR=no-framework PYTHONFRAMEWORKPREFIX= PYTHONFRAMEWORKINSTALLDIR= - PYTHONFRAMEWORKINSTALLNAMEPREFIX= - RESSRCDIR= FRAMEWORKINSTALLFIRST= FRAMEWORKINSTALLLAST= FRAMEWORKALTINSTALLFIRST= FRAMEWORKALTINSTALLLAST= FRAMEWORKPYTHONW= - INSTALLTARGETS="commoninstall bininstall maninstall" if test "x${prefix}" = "xNONE" ; then FRAMEWORKUNIXTOOLSPREFIX="${ac_default_prefix}" else @@ -605,8 +519,6 @@ AC_SUBST([PYTHONFRAMEWORKIDENTIFIER]) AC_SUBST([PYTHONFRAMEWORKDIR]) AC_SUBST([PYTHONFRAMEWORKPREFIX]) AC_SUBST([PYTHONFRAMEWORKINSTALLDIR]) -AC_SUBST([PYTHONFRAMEWORKINSTALLNAMEPREFIX]) -AC_SUBST([RESSRCDIR]) AC_SUBST([FRAMEWORKINSTALLFIRST]) AC_SUBST([FRAMEWORKINSTALLLAST]) AC_SUBST([FRAMEWORKALTINSTALLFIRST]) @@ -614,38 +526,105 @@ AC_SUBST([FRAMEWORKALTINSTALLLAST]) AC_SUBST([FRAMEWORKPYTHONW]) AC_SUBST([FRAMEWORKUNIXTOOLSPREFIX]) AC_SUBST([FRAMEWORKINSTALLAPPSPREFIX]) -AC_SUBST([INSTALLTARGETS]) AC_DEFINE_UNQUOTED([_PYTHONFRAMEWORK], ["${PYTHONFRAMEWORK}"], [framework name]) +# Set name for machine-dependent library files +AC_ARG_VAR([MACHDEP], [name for machine-dependent library files]) +AC_MSG_CHECKING([MACHDEP]) +if test -z "$MACHDEP" +then + # avoid using uname for cross builds + if test "$cross_compiling" = yes; then + # ac_sys_system and ac_sys_release are used for setting + # a lot of different things including 'define_xopen_source' + # in the case statement below. + case "$host" in + *-*-linux-android*) + ac_sys_system=Linux-android + ;; + *-*-linux*) + ac_sys_system=Linux + ;; + *-*-cygwin*) + ac_sys_system=Cygwin + ;; + *-*-vxworks*) + ac_sys_system=VxWorks + ;; + *-*-emscripten) + ac_sys_system=Emscripten + ;; + *-*-wasi) + ac_sys_system=WASI + ;; + *) + # for now, limit cross builds to known configurations + MACHDEP="unknown" + AC_MSG_ERROR([cross build not supported for $host]) + esac + ac_sys_release= + else + ac_sys_system=`uname -s` + if test "$ac_sys_system" = "AIX" \ + -o "$ac_sys_system" = "UnixWare" -o "$ac_sys_system" = "OpenUNIX"; then + ac_sys_release=`uname -v` + else + ac_sys_release=`uname -r` + fi + fi + ac_md_system=`echo $ac_sys_system | + tr -d '[/ ]' | tr '[[A-Z]]' '[[a-z]]'` + ac_md_release=`echo $ac_sys_release | + tr -d '[/ ]' | sed 's/^[[A-Z]]\.//' | sed 's/\..*//'` + MACHDEP="$ac_md_system$ac_md_release" + + case $MACHDEP in + aix*) MACHDEP="aix";; + linux*) MACHDEP="linux";; + cygwin*) MACHDEP="cygwin";; + darwin*) MACHDEP="darwin";; + '') MACHDEP="unknown";; + esac + + if test "$ac_sys_system" = "SunOS"; then + # For Solaris, there isn't an OS version specific macro defined + # in most compilers, so we define one here. + SUNOS_VERSION=`echo $ac_sys_release | sed -e 's!\.\([0-9]\)$!.0\1!g' | tr -d '.'` + AC_DEFINE_UNQUOTED([Py_SUNOS_VERSION], [$SUNOS_VERSION], + [The version of SunOS/Solaris as reported by `uname -r' without the dot.]) + fi +fi +AC_MSG_RESULT(["$MACHDEP"]) + AC_SUBST([_PYTHON_HOST_PLATFORM]) if test "$cross_compiling" = yes; then case "$host" in *-*-linux*) case "$host_cpu" in arm*) - _host_ident=arm + _host_cpu=arm ;; *) - _host_ident=$host_cpu + _host_cpu=$host_cpu esac ;; *-*-cygwin*) - _host_ident= + _host_cpu= ;; *-*-vxworks*) - _host_ident=$host_cpu + _host_cpu=$host_cpu ;; wasm32-*-* | wasm64-*-*) - _host_ident=$host_cpu + _host_cpu=$host_cpu ;; *) # for now, limit cross builds to known configurations MACHDEP="unknown" AC_MSG_ERROR([cross build not supported for $host]) esac - _PYTHON_HOST_PLATFORM="$MACHDEP${_host_ident:+-$_host_ident}" + _PYTHON_HOST_PLATFORM="$MACHDEP${_host_cpu:+-$_host_cpu}" fi # Some systems cannot stand _XOPEN_SOURCE being defined at all; they @@ -956,14 +935,6 @@ else fi rm -f conftest.out -dnl On some platforms, using a true "triplet" for MULTIARCH would be redundant. -dnl For example, `arm64-apple-darwin` is redundant, because there isn't a -dnl non-Apple Darwin. Including the CPU architecture can also be potentially -dnl redundant - on macOS, for example, it's possible to do a single compile -dnl pass that includes multiple architectures, so it would be misleading for -dnl MULTIARCH (and thus the sysconfigdata module name) to include a single CPU -dnl architecture. PLATFORM_TRIPLET will be a pair or single value for these -dnl platforms. AC_MSG_CHECKING([for multiarch]) AS_CASE([$ac_sys_system], [Darwin*], [MULTIARCH=""], @@ -971,6 +942,7 @@ AS_CASE([$ac_sys_system], [MULTIARCH=$($CC --print-multiarch 2>/dev/null)] ) AC_SUBST([MULTIARCH]) +AC_MSG_RESULT([$MULTIARCH]) if test x$PLATFORM_TRIPLET != x && test x$MULTIARCH != x; then if test x$PLATFORM_TRIPLET != x$MULTIARCH; then @@ -980,16 +952,6 @@ elif test x$PLATFORM_TRIPLET != x && test x$MULTIARCH = x; then MULTIARCH=$PLATFORM_TRIPLET fi AC_SUBST([PLATFORM_TRIPLET]) -AC_MSG_RESULT([$MULTIARCH]) - -dnl Even if we *do* include the CPU architecture in the MULTIARCH value, some -dnl platforms don't need the CPU architecture in the SOABI tag. These platforms -dnl will have multiple sysconfig modules (one for each CPU architecture), but -dnl use a single "fat" binary at runtime. SOABI_PLATFORM is the component of -dnl the PLATFORM_TRIPLET that will be used in binary module extensions. -AS_CASE([$ac_sys_system], - [SOABI_PLATFORM=$PLATFORM_TRIPLET] -) if test x$MULTIARCH != x; then MULTIARCH_CPPFLAGS="-DMULTIARCH=\\\"$MULTIARCH\\\"" @@ -1332,7 +1294,7 @@ fi AC_MSG_CHECKING([LDLIBRARY]) -# Apple framework builds need more magic. LDLIBRARY is the dynamic +# MacOSX framework builds need more magic. LDLIBRARY is the dynamic # library that we build, but we do not want to link against it (we # will find it with a -framework option). For this reason there is an # extra variable BLDLIBRARY against which Python and the extension @@ -1340,14 +1302,9 @@ AC_MSG_CHECKING([LDLIBRARY]) # LDLIBRARY, but empty for MacOSX framework builds. if test "$enable_framework" then - case $ac_sys_system in - Darwin) - LDLIBRARY='$(PYTHONFRAMEWORKDIR)/Versions/$(VERSION)/$(PYTHONFRAMEWORK)';; - *) - AC_MSG_ERROR([Unknown platform for framework build]);; - esac - BLDLIBRARY='' + LDLIBRARY='$(PYTHONFRAMEWORKDIR)/Versions/$(VERSION)/$(PYTHONFRAMEWORK)' RUNSHARED=DYLD_FRAMEWORK_PATH=`pwd`${DYLD_FRAMEWORK_PATH:+:${DYLD_FRAMEWORK_PATH}} + BLDLIBRARY='' else BLDLIBRARY='$(LDLIBRARY)' fi @@ -1359,64 +1316,64 @@ if test $enable_shared = "yes"; then [Defined if Python is built as a shared library.]) case $ac_sys_system in CYGWIN*) - LDLIBRARY='libpython$(LDVERSION).dll.a' - DLLLIBRARY='libpython$(LDVERSION).dll' - ;; + LDLIBRARY='libpython$(LDVERSION).dll.a' + DLLLIBRARY='libpython$(LDVERSION).dll' + ;; SunOS*) - LDLIBRARY='libpython$(LDVERSION).so' - BLDLIBRARY='-Wl,-R,$(LIBDIR) -L. -lpython$(LDVERSION)' - RUNSHARED=LD_LIBRARY_PATH=`pwd`${LD_LIBRARY_PATH:+:${LD_LIBRARY_PATH}} - INSTSONAME="$LDLIBRARY".$SOVERSION - if test "$with_pydebug" != yes - then - PY3LIBRARY=libpython3.so - fi - ;; + LDLIBRARY='libpython$(LDVERSION).so' + BLDLIBRARY='-Wl,-R,$(LIBDIR) -L. -lpython$(LDVERSION)' + RUNSHARED=LD_LIBRARY_PATH=`pwd`${LD_LIBRARY_PATH:+:${LD_LIBRARY_PATH}} + INSTSONAME="$LDLIBRARY".$SOVERSION + if test "$with_pydebug" != yes + then + PY3LIBRARY=libpython3.so + fi + ;; Linux*|GNU*|NetBSD*|FreeBSD*|DragonFly*|OpenBSD*|VxWorks*) - LDLIBRARY='libpython$(LDVERSION).so' - BLDLIBRARY='-L. -lpython$(LDVERSION)' - RUNSHARED=LD_LIBRARY_PATH=`pwd`${LD_LIBRARY_PATH:+:${LD_LIBRARY_PATH}} - INSTSONAME="$LDLIBRARY".$SOVERSION - if test "$with_pydebug" != yes - then - PY3LIBRARY=libpython3.so - fi - ;; + LDLIBRARY='libpython$(LDVERSION).so' + BLDLIBRARY='-L. -lpython$(LDVERSION)' + RUNSHARED=LD_LIBRARY_PATH=`pwd`${LD_LIBRARY_PATH:+:${LD_LIBRARY_PATH}} + INSTSONAME="$LDLIBRARY".$SOVERSION + if test "$with_pydebug" != yes + then + PY3LIBRARY=libpython3.so + fi + ;; hp*|HP*) - case `uname -m` in - ia64) - LDLIBRARY='libpython$(LDVERSION).so' - ;; - *) - LDLIBRARY='libpython$(LDVERSION).sl' - ;; - esac - BLDLIBRARY='-Wl,+b,$(LIBDIR) -L. -lpython$(LDVERSION)' - RUNSHARED=SHLIB_PATH=`pwd`${SHLIB_PATH:+:${SHLIB_PATH}} - ;; + case `uname -m` in + ia64) + LDLIBRARY='libpython$(LDVERSION).so' + ;; + *) + LDLIBRARY='libpython$(LDVERSION).sl' + ;; + esac + BLDLIBRARY='-Wl,+b,$(LIBDIR) -L. -lpython$(LDVERSION)' + RUNSHARED=SHLIB_PATH=`pwd`${SHLIB_PATH:+:${SHLIB_PATH}} + ;; Darwin*) - LDLIBRARY='libpython$(LDVERSION).dylib' - BLDLIBRARY='-L. -lpython$(LDVERSION)' - RUNSHARED=DYLD_LIBRARY_PATH=`pwd`${DYLD_LIBRARY_PATH:+:${DYLD_LIBRARY_PATH}} - ;; + LDLIBRARY='libpython$(LDVERSION).dylib' + BLDLIBRARY='-L. -lpython$(LDVERSION)' + RUNSHARED=DYLD_LIBRARY_PATH=`pwd`${DYLD_LIBRARY_PATH:+:${DYLD_LIBRARY_PATH}} + ;; AIX*) - LDLIBRARY='libpython$(LDVERSION).so' - RUNSHARED=LIBPATH=`pwd`${LIBPATH:+:${LIBPATH}} - ;; + LDLIBRARY='libpython$(LDVERSION).so' + RUNSHARED=LIBPATH=`pwd`${LIBPATH:+:${LIBPATH}} + ;; esac else # shared is disabled PY_ENABLE_SHARED=0 case $ac_sys_system in CYGWIN*) - BLDLIBRARY='$(LIBRARY)' - LDLIBRARY='libpython$(LDVERSION).dll.a' - ;; + BLDLIBRARY='$(LIBRARY)' + LDLIBRARY='libpython$(LDVERSION).dll.a' + ;; esac fi if test "$cross_compiling" = yes; then - RUNSHARED= + RUNSHARED= fi AC_ARG_VAR([HOSTRUNNER], [Program to run CPython for the host platform]) @@ -5867,7 +5824,7 @@ AC_SUBST([SOABI]) AC_MSG_CHECKING([ABIFLAGS]) AC_MSG_RESULT([$ABIFLAGS]) AC_MSG_CHECKING([SOABI]) -SOABI='cpython-'`echo $VERSION | tr -d .`${ABIFLAGS}${SOABI_PLATFORM:+-$SOABI_PLATFORM} +SOABI='cpython-'`echo $VERSION | tr -d .`${ABIFLAGS}${PLATFORM_TRIPLET:+-$PLATFORM_TRIPLET} AC_MSG_RESULT([$SOABI]) # Release build, debug build (Py_DEBUG), and trace refs build (Py_TRACE_REFS) @@ -5875,7 +5832,7 @@ AC_MSG_RESULT([$SOABI]) if test "$Py_DEBUG" = 'true'; then # Similar to SOABI but remove "d" flag from ABIFLAGS AC_SUBST([ALT_SOABI]) - ALT_SOABI='cpython-'`echo $VERSION | tr -d .``echo $ABIFLAGS | tr -d d`${SOABI_PLATFORM:+-$SOABI_PLATFORM} + ALT_SOABI='cpython-'`echo $VERSION | tr -d .``echo $ABIFLAGS | tr -d d`${PLATFORM_TRIPLET:+-$PLATFORM_TRIPLET} AC_DEFINE_UNQUOTED([ALT_SOABI], ["${ALT_SOABI}"], [Alternative SOABI used in debug build to load C extensions built in release mode]) fi From 0a141730bf8d27bfac809414e1fc878a45e93b9d Mon Sep 17 00:00:00 2001 From: Jeff Glass Date: Thu, 15 Feb 2024 16:30:59 -0600 Subject: [PATCH 29/31] Address review comments --- Include/cpython/pystats.h | 4 +++- Python/specialize.c | 5 +++-- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/Include/cpython/pystats.h b/Include/cpython/pystats.h index de7853914acb98..d6f5452a2c70fb 100644 --- a/Include/cpython/pystats.h +++ b/Include/cpython/pystats.h @@ -19,6 +19,8 @@ // Define _PY_INTERPRETER macro to increment interpreter_increfs and // interpreter_decrefs. Otherwise, increment increfs and decrefs. +#include "pycore_uop_ids.h" // MAX_UOP_ID + #ifndef Py_CPYTHON_PYSTATS_H # error "this header file must not be included directly" #endif @@ -99,7 +101,7 @@ typedef struct _gc_stats { typedef struct _uop_stats { uint64_t execution_count; uint64_t miss; - uint64_t pair_count[512]; + uint64_t pair_count[MAX_UOP_ID + 1]; } UOpStats; #define _Py_UOP_HIST_SIZE 32 diff --git a/Python/specialize.c b/Python/specialize.c index 7642589508f184..7cd50d9ab288d1 100644 --- a/Python/specialize.c +++ b/Python/specialize.c @@ -11,6 +11,7 @@ #include "pycore_object.h" #include "pycore_opcode_metadata.h" // _PyOpcode_Caches #include "pycore_uop_metadata.h" // _PyOpcode_uop_name +#include "pycore_uop_ids.h" // MAX_UOP_ID #include "pycore_opcode_utils.h" // RESUME_AT_FUNC_START #include "pycore_pylifecycle.h" // _PyOS_URandomNonblock() #include "pycore_runtime.h" // _Py_ID() @@ -271,8 +272,8 @@ print_optimization_stats(FILE *out, OptimizationStats *stats) } } - for (int i = 0; i < 512; i++){ - for (int j = 0; j < 512; j++) { + for (int i = 1; i <= MAX_UOP_ID; i++){ + for (int j = 1; j <= MAX_UOP_ID; j++) { if (stats->opcode[i].pair_count[j]) { fprintf(out, "uop[%s].pair_count[%s] : %" PRIu64 "\n", _PyOpcode_uop_name[i], _PyOpcode_uop_name[j], stats->opcode[i].pair_count[j]); From 516bab60005910918c32805395417f8d86a35434 Mon Sep 17 00:00:00 2001 From: Jeff Glass Date: Sun, 7 Apr 2024 14:07:44 -0500 Subject: [PATCH 30/31] Undo spurious changes to prefixes --- Tools/scripts/summarize_stats.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Tools/scripts/summarize_stats.py b/Tools/scripts/summarize_stats.py index a8ed8881d4e6f6..e121206e250251 100644 --- a/Tools/scripts/summarize_stats.py +++ b/Tools/scripts/summarize_stats.py @@ -1186,7 +1186,7 @@ def iter_optimization_tables(base_stats: Stats, head_stats: Stats | None = None) ) ], ) - yield pair_count_section("Non-JIT Uop") + yield pair_count_section("uop") yield Section( "Unsupported opcodes", "", @@ -1242,7 +1242,7 @@ def calc_rows(stats: Stats) -> Rows: LAYOUT = [ execution_count_section(), - pair_count_section("Tier 1"), + pair_count_section("opcode"), pre_succ_pairs_section(), specialization_section(), specialization_effectiveness_section(), From 7349474e556662b8206dfea9c801ea31badaebf0 Mon Sep 17 00:00:00 2001 From: Jeff Glass Date: Sun, 7 Apr 2024 14:10:08 -0500 Subject: [PATCH 31/31] Add title keyword arg to pair count tables --- Tools/scripts/summarize_stats.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/Tools/scripts/summarize_stats.py b/Tools/scripts/summarize_stats.py index e121206e250251..662e40882edc2a 100644 --- a/Tools/scripts/summarize_stats.py +++ b/Tools/scripts/summarize_stats.py @@ -525,7 +525,7 @@ def get_histogram(self, prefix: str) -> list[tuple[int, int]]: def get_rare_events(self) -> list[tuple[str, int]]: prefix = "Rare event " return [ - (key[len(prefix) + 1: -1].replace("_", " "), val) + (key[len(prefix) + 1 : -1].replace("_", " "), val) for key, val in self._data.items() if key.startswith(prefix) ] @@ -709,7 +709,7 @@ def execution_count_section() -> Section: ) -def pair_count_section(prefix: str) -> Section: +def pair_count_section(prefix: str, title=None) -> Section: def calc_pair_count_table(stats: Stats) -> Rows: opcode_stats = stats.get_opcode_stats(prefix) pair_counts = opcode_stats.get_pair_counts() @@ -733,7 +733,7 @@ def calc_pair_count_table(stats: Stats) -> Rows: return Section( "Pair counts", - f"Pair counts for top 100 {prefix} pairs", + f"Pair counts for top 100 {title if title else prefix} pairs", [ Table( ("Pair", "Count:", "Self:", "Cumulative:"), @@ -1186,7 +1186,7 @@ def iter_optimization_tables(base_stats: Stats, head_stats: Stats | None = None) ) ], ) - yield pair_count_section("uop") + yield pair_count_section(prefix="uop", title="Non-JIT uop") yield Section( "Unsupported opcodes", "",