From f2e48b39eda18713439891da9fcbf0338e86e4d9 Mon Sep 17 00:00:00 2001 From: Luca Antiga Date: Mon, 25 Mar 2024 12:33:30 -0400 Subject: [PATCH] Add custom CUDA kernels to README (#71) --- README.md | 3 ++- pyproject.toml | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index ce5399d57..a672abe48 100644 --- a/README.md +++ b/README.md @@ -154,7 +154,8 @@ Thunder doesn't generate code for accelerators directly. It acquires and transfo - [Apex](https://github.com/NVIDIA/apex) - [TransformerEngine](https://github.com/NVIDIA/TransformerEngine) - [PyTorch eager](https://github.com/pytorch/pytorch) -- custom kernels, including those written with [OpenAI Triton](https://github.com/openai/triton) +- Custom CUDA kernels through [PyCUDA](https://documen.tician.de/pycuda/tutorial.html#interoperability-with-other-libraries-using-the-cuda-array-interface), [Numba](https://numba.readthedocs.io/en/stable/cuda/kernels.html), [CuPy](https://docs.cupy.dev/en/stable/user_guide/kernel.html) +- Custom kernels written in [OpenAI Triton](https://github.com/openai/triton) Modules and functions compiled with Thunder fully interoperate with vanilla PyTorch and support PyTorch's autograd. Also, Thunder works alongside torch.compile to leverage its state-of-the-art optimizations. diff --git a/pyproject.toml b/pyproject.toml index 520a66127..fc0374633 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -56,7 +56,8 @@ quiet-level = 3 # https://github.com/codespell-project/codespell/issues/2839#issuecomment-1731601603 # also adding links until they ignored by its: nature # https://github.com/codespell-project/codespell/issues/2243#issuecomment-1732019960 -#ignore-words-list = "" +# documen is used in an url in README +ignore-words-list = "documen" [tool.black]