Skip to content

Commit 4d1c98d

Browse files
committed
Reapply "refactor(python): drop support for 3.9, document 3.14 support (NVIDIA#1069)" (NVIDIA#1109)
This reverts commit fcd7b99.
1 parent 0eb838f commit 4d1c98d

27 files changed

+83
-108
lines changed

.github/workflows/build-wheel.yml

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,6 @@ jobs:
2828
fail-fast: false
2929
matrix:
3030
python-version:
31-
- "3.9"
3231
- "3.10"
3332
- "3.11"
3433
- "3.12"

CONTRIBUTING.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -113,7 +113,7 @@ flowchart TD
113113
B2["linux-aarch64<br/>(Self-hosted)"]
114114
B3["win-64<br/>(GitHub-hosted)"]
115115
end
116-
BUILD_DETAILS["• Python versions: 3.9, 3.10, 3.11, 3.12, 3.13<br/>• CUDA version: 13.0.0 (build-time)<br/>• Components: cuda-core, cuda-bindings,<br/> cuda-pathfinder, cuda-python"]
116+
BUILD_DETAILS["• Python versions: 3.10, 3.11, 3.12, 3.13, 3.14<br/>• CUDA version: 13.0.0 (build-time)<br/>• Components: cuda-core, cuda-bindings,<br/> cuda-pathfinder, cuda-python"]
117117
end
118118
119119
%% Artifact Storage

ci/test-matrix.json

Lines changed: 0 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -4,8 +4,6 @@
44
"_notes": "DRIVER: 'earliest' does not work with CUDA 12.9.1 and LOCAL_CTK: 0 does not work with CUDA 12.0.1",
55
"linux": {
66
"pull-request": [
7-
{ "ARCH": "amd64", "PY_VER": "3.9", "CUDA_VER": "12.9.1", "LOCAL_CTK": "0", "GPU": "l4", "DRIVER": "latest" },
8-
{ "ARCH": "amd64", "PY_VER": "3.9", "CUDA_VER": "13.0.2", "LOCAL_CTK": "1", "GPU": "l4", "DRIVER": "latest" },
97
{ "ARCH": "amd64", "PY_VER": "3.10", "CUDA_VER": "12.9.1", "LOCAL_CTK": "1", "GPU": "v100", "DRIVER": "latest" },
108
{ "ARCH": "amd64", "PY_VER": "3.10", "CUDA_VER": "13.0.2", "LOCAL_CTK": "0", "GPU": "l4", "DRIVER": "latest" },
119
{ "ARCH": "amd64", "PY_VER": "3.11", "CUDA_VER": "12.9.1", "LOCAL_CTK": "0", "GPU": "rtxpro6000", "DRIVER": "latest" },
@@ -16,8 +14,6 @@
1614
{ "ARCH": "amd64", "PY_VER": "3.13", "CUDA_VER": "13.0.2", "LOCAL_CTK": "1", "GPU": "rtxpro6000", "DRIVER": "latest" },
1715
{ "ARCH": "amd64", "PY_VER": "3.14", "CUDA_VER": "13.0.2", "LOCAL_CTK": "1", "GPU": "l4", "DRIVER": "latest" },
1816
{ "ARCH": "amd64", "PY_VER": "3.14t", "CUDA_VER": "13.0.2", "LOCAL_CTK": "1", "GPU": "l4", "DRIVER": "latest" },
19-
{ "ARCH": "arm64", "PY_VER": "3.9", "CUDA_VER": "12.9.1", "LOCAL_CTK": "0", "GPU": "a100", "DRIVER": "latest" },
20-
{ "ARCH": "arm64", "PY_VER": "3.9", "CUDA_VER": "13.0.2", "LOCAL_CTK": "1", "GPU": "a100", "DRIVER": "latest" },
2117
{ "ARCH": "arm64", "PY_VER": "3.10", "CUDA_VER": "12.9.1", "LOCAL_CTK": "1", "GPU": "a100", "DRIVER": "latest" },
2218
{ "ARCH": "arm64", "PY_VER": "3.10", "CUDA_VER": "13.0.2", "LOCAL_CTK": "0", "GPU": "a100", "DRIVER": "latest" },
2319
{ "ARCH": "arm64", "PY_VER": "3.11", "CUDA_VER": "12.9.1", "LOCAL_CTK": "0", "GPU": "a100", "DRIVER": "latest" },
@@ -30,11 +26,6 @@
3026
{ "ARCH": "arm64", "PY_VER": "3.14t", "CUDA_VER": "13.0.2", "LOCAL_CTK": "1", "GPU": "a100", "DRIVER": "latest" }
3127
],
3228
"nightly": [
33-
{ "ARCH": "amd64", "PY_VER": "3.9", "CUDA_VER": "11.8.0", "LOCAL_CTK": "0", "GPU": "l4", "DRIVER": "earliest" },
34-
{ "ARCH": "amd64", "PY_VER": "3.9", "CUDA_VER": "11.8.0", "LOCAL_CTK": "1", "GPU": "l4", "DRIVER": "latest" },
35-
{ "ARCH": "amd64", "PY_VER": "3.9", "CUDA_VER": "12.0.1", "LOCAL_CTK": "1", "GPU": "l4", "DRIVER": "latest" },
36-
{ "ARCH": "amd64", "PY_VER": "3.9", "CUDA_VER": "12.9.1", "LOCAL_CTK": "0", "GPU": "l4", "DRIVER": "latest" },
37-
{ "ARCH": "amd64", "PY_VER": "3.9", "CUDA_VER": "12.9.1", "LOCAL_CTK": "1", "GPU": "l4", "DRIVER": "latest" },
3829
{ "ARCH": "amd64", "PY_VER": "3.10", "CUDA_VER": "11.8.0", "LOCAL_CTK": "0", "GPU": "l4", "DRIVER": "earliest" },
3930
{ "ARCH": "amd64", "PY_VER": "3.10", "CUDA_VER": "11.8.0", "LOCAL_CTK": "1", "GPU": "l4", "DRIVER": "latest" },
4031
{ "ARCH": "amd64", "PY_VER": "3.10", "CUDA_VER": "12.0.1", "LOCAL_CTK": "1", "GPU": "l4", "DRIVER": "latest" },
@@ -55,11 +46,6 @@
5546
{ "ARCH": "amd64", "PY_VER": "3.13", "CUDA_VER": "12.0.1", "LOCAL_CTK": "1", "GPU": "l4", "DRIVER": "latest" },
5647
{ "ARCH": "amd64", "PY_VER": "3.13", "CUDA_VER": "12.9.1", "LOCAL_CTK": "0", "GPU": "l4", "DRIVER": "latest" },
5748
{ "ARCH": "amd64", "PY_VER": "3.13", "CUDA_VER": "12.9.1", "LOCAL_CTK": "1", "GPU": "l4", "DRIVER": "latest" },
58-
{ "ARCH": "arm64", "PY_VER": "3.9", "CUDA_VER": "11.8.0", "LOCAL_CTK": "0", "GPU": "a100", "DRIVER": "earliest" },
59-
{ "ARCH": "arm64", "PY_VER": "3.9", "CUDA_VER": "11.8.0", "LOCAL_CTK": "1", "GPU": "a100", "DRIVER": "latest" },
60-
{ "ARCH": "arm64", "PY_VER": "3.9", "CUDA_VER": "12.0.1", "LOCAL_CTK": "1", "GPU": "a100", "DRIVER": "latest" },
61-
{ "ARCH": "arm64", "PY_VER": "3.9", "CUDA_VER": "12.9.1", "LOCAL_CTK": "0", "GPU": "a100", "DRIVER": "latest" },
62-
{ "ARCH": "arm64", "PY_VER": "3.9", "CUDA_VER": "12.9.1", "LOCAL_CTK": "1", "GPU": "a100", "DRIVER": "latest" },
6349
{ "ARCH": "arm64", "PY_VER": "3.10", "CUDA_VER": "11.8.0", "LOCAL_CTK": "0", "GPU": "a100", "DRIVER": "earliest" },
6450
{ "ARCH": "arm64", "PY_VER": "3.10", "CUDA_VER": "11.8.0", "LOCAL_CTK": "1", "GPU": "a100", "DRIVER": "latest" },
6551
{ "ARCH": "arm64", "PY_VER": "3.10", "CUDA_VER": "12.0.1", "LOCAL_CTK": "1", "GPU": "a100", "DRIVER": "latest" },

cuda_bindings/docs/source/install.rst

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@ Runtime Requirements
1010
``cuda.bindings`` supports the same platforms as CUDA. Runtime dependencies are:
1111

1212
* Linux (x86-64, arm64) and Windows (x86-64)
13-
* Python 3.9 - 3.14
13+
* Python 3.10 - 3.14
1414
* Driver: Linux (580.65.06 or later) Windows (580.88 or later)
1515
* Optionally, NVRTC, nvJitLink, NVVM, and cuFile from CUDA Toolkit 13.x
1616

cuda_bindings/docs/source/support.rst

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@ The ``cuda.bindings`` module has the following support policy:
1919
depends on the underlying driver and the Toolkit versions, as described in the compatibility
2020
documentation.)
2121
4. The module supports all Python versions following the `CPython EOL schedule`_. As of writing
22-
Python 3.9 - 3.13 are supported.
22+
Python 3.10 - 3.14 are supported.
2323
5. The module exposes a Cython layer from which types and functions could be ``cimport``'d. While
2424
we strive to keep this layer stable, due to Cython limitations a new *minor* release of this
2525
module could require Cython layer users to rebuild their projects and update their pinning to

cuda_bindings/pyproject.toml

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -9,16 +9,17 @@ name = "cuda-bindings"
99
description = "Python bindings for CUDA"
1010
authors = [{name = "NVIDIA Corporation", email = "cuda-python-conduct@nvidia.com"},]
1111
license = "LicenseRef-NVIDIA-SOFTWARE-LICENSE"
12+
requires-python = ">=3.10"
1213
classifiers = [
1314
"Intended Audience :: Developers",
1415
"Topic :: Database",
1516
"Topic :: Scientific/Engineering",
1617
"Programming Language :: Python",
17-
"Programming Language :: Python :: 3.9",
1818
"Programming Language :: Python :: 3.10",
1919
"Programming Language :: Python :: 3.11",
2020
"Programming Language :: Python :: 3.12",
2121
"Programming Language :: Python :: 3.13",
22+
"Programming Language :: Python :: 3.14",
2223
"Environment :: GPU :: NVIDIA CUDA",
2324
]
2425
dynamic = [

cuda_core/cuda/core/experimental/__init__.py

Lines changed: 0 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -26,17 +26,6 @@
2626
finally:
2727
del cuda.bindings, importlib, subdir, cuda_major, cuda_minor
2828

29-
import sys # noqa: E402
30-
import warnings # noqa: E402
31-
32-
if sys.version_info < (3, 10):
33-
warnings.warn(
34-
"support for Python 3.9 and below is deprecated and subject to future removal",
35-
category=FutureWarning,
36-
stacklevel=1,
37-
)
38-
del sys, warnings
39-
4029
from cuda.core.experimental import utils # noqa: E402
4130
from cuda.core.experimental._device import Device # noqa: E402
4231
from cuda.core.experimental._event import Event, EventOptions # noqa: E402

cuda_core/cuda/core/experimental/_device.pyx

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@ from cuda.bindings cimport cydriver
1010
from cuda.core.experimental._utils.cuda_utils cimport HANDLE_RETURN
1111

1212
import threading
13-
from typing import Optional, Union
13+
from typing import Union
1414

1515
from cuda.core.experimental._context import Context, ContextOptions
1616
from cuda.core.experimental._event import Event, EventOptions
@@ -951,7 +951,7 @@ class Device:
951951
"""
952952
__slots__ = ("_id", "_mr", "_has_inited", "_properties")
953953

954-
def __new__(cls, device_id: Optional[int] = None):
954+
def __new__(cls, device_id: int | None = None):
955955
global _is_cuInit
956956
if _is_cuInit is False:
957957
with _lock, nogil:
@@ -1223,7 +1223,7 @@ class Device:
12231223
"""
12241224
raise NotImplementedError("WIP: https://github.com/NVIDIA/cuda-python/issues/189")
12251225

1226-
def create_stream(self, obj: Optional[IsStreamT] = None, options: Optional[StreamOptions] = None) -> Stream:
1226+
def create_stream(self, obj: IsStreamT | None = None, options: StreamOptions | None = None) -> Stream:
12271227
"""Create a Stream object.
12281228

12291229
New stream objects can be created in two different ways:
@@ -1254,7 +1254,7 @@ class Device:
12541254
self._check_context_initialized()
12551255
return Stream._init(obj=obj, options=options, device_id=self._id)
12561256

1257-
def create_event(self, options: Optional[EventOptions] = None) -> Event:
1257+
def create_event(self, options: EventOptions | None = None) -> Event:
12581258
"""Create an Event object without recording it to a Stream.
12591259

12601260
Note
@@ -1276,7 +1276,7 @@ class Device:
12761276
ctx = self._get_current_context()
12771277
return Event._init(self._id, ctx, options, True)
12781278

1279-
def allocate(self, size, stream: Optional[Stream] = None) -> Buffer:
1279+
def allocate(self, size, stream: Stream | None = None) -> Buffer:
12801280
"""Allocate device memory from a specified stream.
12811281

12821282
Allocates device memory of `size` bytes on the specified `stream`

cuda_core/cuda/core/experimental/_launch_config.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@
33
# SPDX-License-Identifier: Apache-2.0
44

55
from dataclasses import dataclass
6-
from typing import Optional, Union
6+
from typing import Union
77

88
from cuda.core.experimental._device import Device
99
from cuda.core.experimental._utils.cuda_utils import (
@@ -68,8 +68,8 @@ class LaunchConfig:
6868
grid: Union[tuple, int] = None
6969
cluster: Union[tuple, int] = None
7070
block: Union[tuple, int] = None
71-
shmem_size: Optional[int] = None
72-
cooperative_launch: Optional[bool] = False
71+
shmem_size: int | None = None
72+
cooperative_launch: bool | None = False
7373

7474
def __post_init__(self):
7575
_lazy_init()

cuda_core/cuda/core/experimental/_linker.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -343,7 +343,7 @@ def _exception_manager(self):
343343
# our constructor could raise, in which case there's no handle available
344344
error_log = self.get_error_log()
345345
# Starting Python 3.11 we could also use Exception.add_note() for the same purpose, but
346-
# unfortunately we are still supporting Python 3.9/3.10...
346+
# unfortunately we are still supporting Python 3.10...
347347
# Here we rely on both CUDAError and nvJitLinkError have the error string placed in .args[0].
348348
e.args = (e.args[0] + (f"\nLinker error log: {error_log}" if error_log else ""), *e.args[1:])
349349
raise e

0 commit comments

Comments
 (0)