Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 3 additions & 3 deletions Doc/c-api/float.rst
Original file line number Diff line number Diff line change
Expand Up @@ -201,8 +201,8 @@ NaNs (if such things exist on the platform) isn't handled correctly, and
attempting to unpack a bytes string containing an IEEE INF or NaN will raise an
exception.

Note that NaNs type may not be preserved on IEEE platforms (signaling NaN become
quiet NaN), for example on x86 systems in 32-bit mode.
Note that NaN type may not be preserved on IEEE platforms (signaling NaNs become
quiet NaNs), for example on x86 systems in 32-bit mode.

On non-IEEE platforms with more precision, or larger dynamic range, than IEEE
754 supports, not all values can be packed; on non-IEEE platforms with less
Expand All @@ -216,7 +216,7 @@ Pack functions

The pack routines write 2, 4 or 8 bytes, starting at *p*. *le* is an
:c:expr:`int` argument, non-zero if you want the bytes string in little-endian
format (exponent last, at ``p+1``, ``p+3``, or ``p+6`` ``p+7``), zero if you
format (exponent last, at ``p+1``, ``p+3``, or ``p+6`` and ``p+7``), zero if you
want big-endian format (exponent first, at *p*). The :c:macro:`PY_BIG_ENDIAN`
constant can be used to use the native endian: it is equal to ``1`` on big
endian processor, or ``0`` on little endian processor.
Expand Down
4 changes: 2 additions & 2 deletions Doc/whatsnew/3.15.rst
Original file line number Diff line number Diff line change
Expand Up @@ -1286,11 +1286,11 @@ Upgraded JIT compiler

Results from the `pyperformance <https://github.com/python/pyperformance>`__
benchmark suite report
`4-5% <https://raw.githubusercontent.com/facebookexperimental/free-threading-benchmarking/refs/heads/main/results/bm-20260110-3.15.0a3%2B-aa8578d-JIT/bm-20260110-vultr-x86_64-python-aa8578dc54df2af9daa3-3.15.0a3%2B-aa8578d-vs-base.svg>`__
`5-6% <https://doesjitgobrrr.com/run/2026-03-11>`__
geometric mean performance improvement for the JIT over the standard CPython
interpreter built with all optimizations enabled on x86-64 Linux. On AArch64
macOS, the JIT has a
`7-8% <https://raw.githubusercontent.com/facebookexperimental/free-threading-benchmarking/refs/heads/main/results/bm-20260110-3.15.0a3%2B-aa8578d-JIT/bm-20260110-macm4pro-arm64-python-aa8578dc54df2af9daa3-3.15.0a3%2B-aa8578d-vs-base.svg>`__
`8-9% <https://doesjitgobrrr.com/run/2026-03-11>`__
speedup over the :ref:`tail calling interpreter <whatsnew314-tail-call-interpreter>`
with all optimizations enabled. The speedups for JIT
builds versus no JIT builds range from roughly 15% slowdown to over
Expand Down
8 changes: 7 additions & 1 deletion Lib/multiprocessing/context.py
Original file line number Diff line number Diff line change
Expand Up @@ -145,7 +145,13 @@ def freeze_support(self):
'''Check whether this is a fake forked process in a frozen executable.
If so then run code specified by commandline and exit.
'''
if self.get_start_method() == 'spawn' and getattr(sys, 'frozen', False):
# gh-140814: allow_none=True avoids locking in the default start
# method, which would cause a later set_start_method() to fail.
# None is safe to pass through: spawn.freeze_support()
# independently detects whether this process is a spawned
# child, so the start method check here is only an optimization.
if (getattr(sys, 'frozen', False)
and self.get_start_method(allow_none=True) in ('spawn', None)):
from .spawn import freeze_support
freeze_support()

Expand Down
14 changes: 14 additions & 0 deletions Lib/test/_test_multiprocessing.py
Original file line number Diff line number Diff line change
Expand Up @@ -6005,6 +6005,20 @@ def test_spawn_dont_set_context(self):
process.join()
self.assertIsNone(multiprocessing.get_start_method(allow_none=True))

@only_run_in_spawn_testsuite("freeze_support is not start method specific")
def test_freeze_support_dont_set_context(self):
# gh-140814: freeze_support() should not set the start method
# as a side effect, so a later set_start_method() still works.
multiprocessing.set_start_method(None, force=True)
try:
multiprocessing.freeze_support()
self.assertIsNone(
multiprocessing.get_start_method(allow_none=True))
# Should not raise "context has already been set"
multiprocessing.set_start_method('spawn')
finally:
multiprocessing.set_start_method(None, force=True)

def test_context_check_module_types(self):
try:
ctx = multiprocessing.get_context('forkserver')
Expand Down
12 changes: 12 additions & 0 deletions Lib/test/test_capi/test_misc.py
Original file line number Diff line number Diff line change
Expand Up @@ -916,6 +916,18 @@ def genf(): yield
gen = genf()
self.assertEqual(_testcapi.gen_get_code(gen), gen.gi_code)

def test_tp_bases_slot(self):
cls = _testcapi.HeapCTypeWithBasesSlot
self.assertEqual(cls.__bases__, (int,))
self.assertEqual(cls.__base__, int)

def test_tp_bases_slot_none(self):
self.assertRaisesRegex(
SystemError,
"Py_tp_bases is not a tuple",
_testcapi.create_heapctype_with_none_bases_slot
)


@requires_limited_api
class TestHeapTypeRelative(unittest.TestCase):
Expand Down
9 changes: 8 additions & 1 deletion Lib/test/test_free_threading/test_itertools.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
import unittest
from itertools import batched, chain, combinations_with_replacement, cycle, permutations
from itertools import accumulate, batched, chain, combinations_with_replacement, cycle, permutations
from test.support import threading_helper


Expand All @@ -16,6 +16,13 @@ def work_iterator(it):

class ItertoolsThreading(unittest.TestCase):

@threading_helper.reap_threads
def test_accumulate(self):
number_of_iterations = 10
for _ in range(number_of_iterations):
it = accumulate(tuple(range(40)))
threading_helper.run_concurrently(work_iterator, nthreads=10, args=[it])

@threading_helper.reap_threads
def test_batched(self):
number_of_iterations = 10
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
Fix text wrapping and formatting of ``-X`` option descriptions in the
:manpage:`python(1)` man page by using proper roff markup.
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
Make concurrent iteration over :class:`itertools.accumulate` safe under free-threading.
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
:func:`multiprocessing.freeze_support` no longer sets the default start method
as a side effect, which previously caused a subsequent
:func:`multiprocessing.set_start_method` call to raise :exc:`RuntimeError`.
175 changes: 99 additions & 76 deletions Misc/python.man
Original file line number Diff line number Diff line change
Expand Up @@ -320,82 +320,105 @@ a regular expression on the warning message.
.TP
.BI "\-X " option
Set implementation-specific option. The following options are available:

\fB\-X cpu_count=\fIN\fR: override the return value of \fIos.cpu_count()\fR;
\fB\-X cpu_count=default\fR cancels overriding; also \fBPYTHON_CPU_COUNT\fI

\fB\-X dev\fR: enable CPython's "development mode", introducing additional
runtime checks which are too expensive to be enabled by default. It
will not be more verbose than the default if the code is correct: new
warnings are only emitted when an issue is detected. Effect of the
developer mode:
* Add default warning filter, as \fB\-W default\fR
* Install debug hooks on memory allocators: see the
PyMem_SetupDebugHooks() C function
* Enable the faulthandler module to dump the Python traceback on a
crash
* Enable asyncio debug mode
* Set the dev_mode attribute of sys.flags to True
* io.IOBase destructor logs close() exceptions

\fB\-X importtime\fR: show how long each import takes. It shows module name,
cumulative time (including nested imports) and self time (excluding
nested imports). Note that its output may be broken in multi-threaded
application. Typical usage is
\fBpython3 \-X importtime \-c 'import asyncio'\fR

\fB\-X importtime=2\fR enables additional output that indicates when an
imported module has already been loaded. In such cases, the string
\fBcached\fR will be printed in both time columns.

\fB\-X faulthandler\fR: enable faulthandler

\fB\-X frozen_modules=\fR[\fBon\fR|\fBoff\fR]: whether or not frozen modules
should be used.
The default is "on" (or "off" if you are running a local build).

\fB\-X gil=\fR[\fB0\fR|\fB1\fR]: enable (1) or disable (0) the GIL; also
\fBPYTHON_GIL\fR
Only available in builds configured with \fB\-\-disable\-gil\fR.

\fB\-X int_max_str_digits=\fInumber\fR: limit the size of int<->str conversions.
This helps avoid denial of service attacks when parsing untrusted data.
The default is sys.int_info.default_max_str_digits. 0 disables.

\fB\-X no_debug_ranges\fR: disable the inclusion of the tables mapping extra
location information (end line, start column offset and end column
offset) to every instruction in code objects. This is useful when
smaller code objects and pyc files are desired as well as suppressing
the extra visual location indicators when the interpreter displays
tracebacks.

\fB\-X perf\fR: support the Linux "perf" profiler; also \fBPYTHONPERFSUPPORT=1\fR

\fB\-X perf_jit\fR: support the Linux "perf" profiler with DWARF support;
also \fBPYTHON_PERF_JIT_SUPPORT=1\fR

\fB\-X presite=\fIMOD\fR: import this module before site; also \fBPYTHON_PRESITE\fR
This only works on debug builds.

\fB\-X pycache_prefix=\fIPATH\fR: enable writing .pyc files to a parallel
tree rooted at the given directory instead of to the code tree.

\fB\-X showrefcount\fR: output the total reference count and number of used
memory blocks when the program finishes or after each statement in the
interactive interpreter. This only works on debug builds

\fB\-X tracemalloc\fR: start tracing Python memory allocations using the
tracemalloc module. By default, only the most recent frame is stored in a
traceback of a trace. Use \-X tracemalloc=NFRAME to start tracing with a
traceback limit of NFRAME frames

\fB\-X utf8\fR: enable UTF-8 mode for operating system interfaces,
overriding the default locale-aware mode. \fB\-X utf8=0\fR explicitly
disables UTF-8 mode (even when it would otherwise activate
automatically). See \fBPYTHONUTF8\fR for more details

\fB\-X warn_default_encoding\fR: enable opt-in EncodingWarning for 'encoding=None'

.RS
.TP
\fB\-X cpu_count=\fIN\fR
Override the return value of \fIos.cpu_count()\fR.
\fB\-X cpu_count=default\fR cancels overriding.
See also \fBPYTHON_CPU_COUNT\fR.
.TP
\fB\-X dev\fR
Enable CPython's "development mode", introducing additional
runtime checks which are too expensive to be enabled by default. It
will not be more verbose than the default if the code is correct: new
warnings are only emitted when an issue is detected. Effect of the
developer mode:
.RS
.IP \(bu 2
Add default warning filter, as \fB\-W default\fR.
.IP \(bu 2
Install debug hooks on memory allocators: see the
PyMem_SetupDebugHooks() C function.
.IP \(bu 2
Enable the faulthandler module to dump the Python traceback on a crash.
.IP \(bu 2
Enable asyncio debug mode.
.IP \(bu 2
Set the dev_mode attribute of sys.flags to True.
.IP \(bu 2
io.IOBase destructor logs close() exceptions.
.RE
.TP
\fB\-X importtime\fR
Show how long each import takes. It shows module name,
cumulative time (including nested imports) and self time (excluding
nested imports). Note that its output may be broken in multi-threaded
application. Typical usage is
\fBpython3 \-X importtime \-c 'import asyncio'\fR.
.IP
\fB\-X importtime=2\fR enables additional output that indicates when an
imported module has already been loaded. In such cases, the string
\fBcached\fR will be printed in both time columns.
.TP
\fB\-X faulthandler\fR
Enable faulthandler.
.TP
\fB\-X frozen_modules=\fR[\fBon\fR|\fBoff\fR]
Whether or not frozen modules should be used.
The default is "on" (or "off" if you are running a local build).
.TP
\fB\-X gil=\fR[\fB0\fR|\fB1\fR]
Enable (1) or disable (0) the GIL. See also \fBPYTHON_GIL\fR.
Only available in builds configured with \fB\-\-disable\-gil\fR.
.TP
\fB\-X int_max_str_digits=\fInumber\fR
Limit the size of int<->str conversions.
This helps avoid denial of service attacks when parsing untrusted data.
The default is sys.int_info.default_max_str_digits. 0 disables.
.TP
\fB\-X no_debug_ranges\fR
Disable the inclusion of the tables mapping extra
location information (end line, start column offset and end column
offset) to every instruction in code objects. This is useful when
smaller code objects and pyc files are desired as well as suppressing
the extra visual location indicators when the interpreter displays
tracebacks.
.TP
\fB\-X perf\fR
Support the Linux "perf" profiler. See also \fBPYTHONPERFSUPPORT=1\fR.
.TP
\fB\-X perf_jit\fR
Support the Linux "perf" profiler with DWARF support.
See also \fBPYTHON_PERF_JIT_SUPPORT=1\fR.
.TP
\fB\-X presite=\fIMOD\fR
Import this module before site. See also \fBPYTHON_PRESITE\fR.
This only works on debug builds.
.TP
\fB\-X pycache_prefix=\fIPATH\fR
Enable writing .pyc files to a parallel
tree rooted at the given directory instead of to the code tree.
.TP
\fB\-X showrefcount\fR
Output the total reference count and number of used
memory blocks when the program finishes or after each statement in the
interactive interpreter. This only works on debug builds.
.TP
\fB\-X tracemalloc\fR
Start tracing Python memory allocations using the
tracemalloc module. By default, only the most recent frame is stored in a
traceback of a trace. Use \fB\-X tracemalloc=\fINFRAME\fR to start tracing with a
traceback limit of NFRAME frames.
.TP
\fB\-X utf8\fR
Enable UTF-8 mode for operating system interfaces,
overriding the default locale-aware mode. \fB\-X utf8=0\fR explicitly
disables UTF-8 mode (even when it would otherwise activate
automatically). See \fBPYTHONUTF8\fR for more details.
.TP
\fB\-X warn_default_encoding\fR
Enable opt-in EncodingWarning for 'encoding=None'.
.RE
.TP
.B \-x
Skip the first line of the source. This is intended for a DOS
Expand Down
45 changes: 45 additions & 0 deletions Modules/_testcapi/heaptype.c
Original file line number Diff line number Diff line change
Expand Up @@ -543,6 +543,25 @@ pytype_getmodulebytoken(PyObject *self, PyObject *args)
return PyType_GetModuleByToken((PyTypeObject *)type, token);
}

static PyType_Slot HeapCTypeWithBasesSlotNone_slots[] = {
{Py_tp_bases, NULL}, /* filled out with Py_None in runtime */
{0, 0},
};

static PyType_Spec HeapCTypeWithBasesSlotNone_spec = {
.name = "_testcapi.HeapCTypeWithBasesSlotNone",
.basicsize = sizeof(PyObject),
.flags = Py_TPFLAGS_DEFAULT,
.slots = HeapCTypeWithBasesSlotNone_slots
};

static PyObject *
create_heapctype_with_none_bases_slot(PyObject *self, PyObject *Py_UNUSED(ignored))
{
HeapCTypeWithBasesSlotNone_slots[0].pfunc = Py_None;
return PyType_FromSpec(&HeapCTypeWithBasesSlotNone_spec);
}


static PyMethodDef TestMethods[] = {
{"pytype_fromspec_meta", pytype_fromspec_meta, METH_O},
Expand All @@ -562,6 +581,8 @@ static PyMethodDef TestMethods[] = {
{"pytype_getbasebytoken", pytype_getbasebytoken, METH_VARARGS},
{"pytype_getmodulebydef", pytype_getmodulebydef, METH_O},
{"pytype_getmodulebytoken", pytype_getmodulebytoken, METH_VARARGS},
{"create_heapctype_with_none_bases_slot",
create_heapctype_with_none_bases_slot, METH_NOARGS},
{NULL},
};

Expand Down Expand Up @@ -892,6 +913,18 @@ static PyType_Spec HeapCTypeMetaclassNullNew_spec = {
.slots = empty_type_slots
};

static PyType_Slot HeapCTypeWithBasesSlot_slots[] = {
{Py_tp_bases, NULL}, /* filled out in module init function */
{0, 0},
};

static PyType_Spec HeapCTypeWithBasesSlot_spec = {
.name = "_testcapi.HeapCTypeWithBasesSlot",
.basicsize = sizeof(PyLongObject),
.flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE,
.slots = HeapCTypeWithBasesSlot_slots
};


typedef struct {
PyObject_HEAD
Expand Down Expand Up @@ -1432,6 +1465,18 @@ _PyTestCapi_Init_Heaptype(PyObject *m) {
&PyType_Type, m, &HeapCTypeMetaclassNullNew_spec, (PyObject *) &PyType_Type);
ADD("HeapCTypeMetaclassNullNew", HeapCTypeMetaclassNullNew);

PyObject *bases = PyTuple_Pack(1, &PyLong_Type);
if (bases == NULL) {
return -1;
}
HeapCTypeWithBasesSlot_slots[0].pfunc = bases;
PyObject *HeapCTypeWithBasesSlot = PyType_FromSpec(&HeapCTypeWithBasesSlot_spec);
Py_DECREF(bases);
if (HeapCTypeWithBasesSlot == NULL) {
return -1;
}
ADD("HeapCTypeWithBasesSlot", HeapCTypeWithBasesSlot);

ADD("Py_TP_USE_SPEC", PyLong_FromVoidPtr(Py_TP_USE_SPEC));

PyObject *HeapCCollection = PyType_FromMetaclass(
Expand Down
12 changes: 11 additions & 1 deletion Modules/itertoolsmodule.c
Original file line number Diff line number Diff line change
Expand Up @@ -3073,7 +3073,7 @@ accumulate_traverse(PyObject *op, visitproc visit, void *arg)
}

static PyObject *
accumulate_next(PyObject *op)
accumulate_next_lock_held(PyObject *op)
{
accumulateobject *lz = accumulateobject_CAST(op);
PyObject *val, *newtotal;
Expand Down Expand Up @@ -3105,6 +3105,16 @@ accumulate_next(PyObject *op)
return newtotal;
}

static PyObject *
accumulate_next(PyObject *op)
{
PyObject *result;
Py_BEGIN_CRITICAL_SECTION(op);
result = accumulate_next_lock_held(op);
Py_END_CRITICAL_SECTION()
return result;
}

static PyType_Slot accumulate_slots[] = {
{Py_tp_dealloc, accumulate_dealloc},
{Py_tp_getattro, PyObject_GenericGetAttr},
Expand Down
Loading