From 23553f48653ee83754da01ba4fb2539e61ce6048 Mon Sep 17 00:00:00 2001 From: komaksym Date: Mon, 29 Sep 2025 15:41:22 +0200 Subject: [PATCH 01/12] init question --- .../195_gradient-checkpointing/description.md | 3 ++ .../195_gradient-checkpointing/example.json | 5 ++ questions/195_gradient-checkpointing/learn.md | 47 +++++++++++++++++++ .../195_gradient-checkpointing/meta.json | 12 +++++ .../pytorch/solution.py | 2 + .../pytorch/starter_code.py | 2 + .../pytorch/tests.json | 6 +++ .../195_gradient-checkpointing/solution.py | 3 ++ .../starter_code.py | 4 ++ .../195_gradient-checkpointing/tests.json | 6 +++ .../tinygrad/solution.py | 2 + .../tinygrad/starter_code.py | 2 + .../tinygrad/tests.json | 6 +++ 13 files changed, 100 insertions(+) create mode 100644 questions/195_gradient-checkpointing/description.md create mode 100644 questions/195_gradient-checkpointing/example.json create mode 100644 questions/195_gradient-checkpointing/learn.md create mode 100644 questions/195_gradient-checkpointing/meta.json create mode 100644 questions/195_gradient-checkpointing/pytorch/solution.py create mode 100644 questions/195_gradient-checkpointing/pytorch/starter_code.py create mode 100644 questions/195_gradient-checkpointing/pytorch/tests.json create mode 100644 questions/195_gradient-checkpointing/solution.py create mode 100644 questions/195_gradient-checkpointing/starter_code.py create mode 100644 questions/195_gradient-checkpointing/tests.json create mode 100644 questions/195_gradient-checkpointing/tinygrad/solution.py create mode 100644 questions/195_gradient-checkpointing/tinygrad/starter_code.py create mode 100644 questions/195_gradient-checkpointing/tinygrad/tests.json diff --git a/questions/195_gradient-checkpointing/description.md b/questions/195_gradient-checkpointing/description.md new file mode 100644 index 00000000..684dbcd6 --- /dev/null +++ b/questions/195_gradient-checkpointing/description.md @@ -0,0 +1,3 @@ +## Problem + +Write a concise problem description here. diff --git a/questions/195_gradient-checkpointing/example.json b/questions/195_gradient-checkpointing/example.json new file mode 100644 index 00000000..4e7fdd99 --- /dev/null +++ b/questions/195_gradient-checkpointing/example.json @@ -0,0 +1,5 @@ +{ + "input": "...", + "output": "...", + "reasoning": "Explain why the output follows from the input." +} diff --git a/questions/195_gradient-checkpointing/learn.md b/questions/195_gradient-checkpointing/learn.md new file mode 100644 index 00000000..31c0cec5 --- /dev/null +++ b/questions/195_gradient-checkpointing/learn.md @@ -0,0 +1,47 @@ +## Solution Explanation + +Add intuition, math, and step-by-step reasoning here. + +### Writing Mathematical Expressions with LaTeX + +This editor supports LaTeX for rendering mathematical equations and expressions. Here's how you can use it: + +1. **Inline Math**: + - Wrap your expression with single `$` symbols. + - Example: `$E = mc^2$` → Renders as: ( $E = mc^2$ ) + +2. **Block Math**: + - Wrap your expression with double `$$` symbols. + - Example: + ``` + $$ + \int_a^b f(x) \, dx + $$ + ``` + Renders as: + $$ + \int_a^b f(x) \, dx + $$ + +3. **Math Functions**: + - Use standard LaTeX functions like `\frac`, `\sqrt`, `\sum`, etc. + - Examples: + - `$\frac{a}{b}$` → ( $\frac{a}{b}$ ) + - `$\sqrt{x}$` → ( $\sqrt{x}$ ) + +4. **Greek Letters and Symbols**: + - Use commands like `\alpha`, `\beta`, etc., for Greek letters. + - Example: `$\alpha + \beta = \gamma$` → ( $\alpha + \beta = \gamma$ ) + +5. **Subscripts and Superscripts**: + - Use `_{}` for subscripts and `^{}` for superscripts. + - Examples: + - `$x_i$` → ( $x_i$ ) + - `$x^2$` → ( $x^2$ ) + +6. **Combined Examples**: + - `$\sum_{i=1}^n i^2 = \frac{n(n+1)(2n+1)}{6}$` + Renders as: + $\sum_{i=1}^n i^2 = \frac{n(n+1)(2n+1)}{6}$ + +Feel free to write your own mathematical expressions, and they will be rendered beautifully in the preview! diff --git a/questions/195_gradient-checkpointing/meta.json b/questions/195_gradient-checkpointing/meta.json new file mode 100644 index 00000000..9db2e26a --- /dev/null +++ b/questions/195_gradient-checkpointing/meta.json @@ -0,0 +1,12 @@ +{ + "id": "XXX", + "title": "TITLE GOES HERE", + "difficulty": "medium", + "category": "Machine Learning", + "video": "", + "likes": "0", + "dislikes": "0", + "contributor": [], + "tinygrad_difficulty": "", + "pytorch_difficulty": "" +} diff --git a/questions/195_gradient-checkpointing/pytorch/solution.py b/questions/195_gradient-checkpointing/pytorch/solution.py new file mode 100644 index 00000000..9b74bcbd --- /dev/null +++ b/questions/195_gradient-checkpointing/pytorch/solution.py @@ -0,0 +1,2 @@ +def your_function(...): + ... diff --git a/questions/195_gradient-checkpointing/pytorch/starter_code.py b/questions/195_gradient-checkpointing/pytorch/starter_code.py new file mode 100644 index 00000000..d3e5beb5 --- /dev/null +++ b/questions/195_gradient-checkpointing/pytorch/starter_code.py @@ -0,0 +1,2 @@ +def your_function(...): + pass diff --git a/questions/195_gradient-checkpointing/pytorch/tests.json b/questions/195_gradient-checkpointing/pytorch/tests.json new file mode 100644 index 00000000..e4e4b180 --- /dev/null +++ b/questions/195_gradient-checkpointing/pytorch/tests.json @@ -0,0 +1,6 @@ +[ + { + "test": "print(your_function(...))", + "expected_output": "..." + } +] diff --git a/questions/195_gradient-checkpointing/solution.py b/questions/195_gradient-checkpointing/solution.py new file mode 100644 index 00000000..b1ff1c5b --- /dev/null +++ b/questions/195_gradient-checkpointing/solution.py @@ -0,0 +1,3 @@ +def your_function(...): + # reference implementation + ... diff --git a/questions/195_gradient-checkpointing/starter_code.py b/questions/195_gradient-checkpointing/starter_code.py new file mode 100644 index 00000000..564b3118 --- /dev/null +++ b/questions/195_gradient-checkpointing/starter_code.py @@ -0,0 +1,4 @@ +# Implement your function below. + +def your_function(...): + pass diff --git a/questions/195_gradient-checkpointing/tests.json b/questions/195_gradient-checkpointing/tests.json new file mode 100644 index 00000000..e4e4b180 --- /dev/null +++ b/questions/195_gradient-checkpointing/tests.json @@ -0,0 +1,6 @@ +[ + { + "test": "print(your_function(...))", + "expected_output": "..." + } +] diff --git a/questions/195_gradient-checkpointing/tinygrad/solution.py b/questions/195_gradient-checkpointing/tinygrad/solution.py new file mode 100644 index 00000000..9b74bcbd --- /dev/null +++ b/questions/195_gradient-checkpointing/tinygrad/solution.py @@ -0,0 +1,2 @@ +def your_function(...): + ... diff --git a/questions/195_gradient-checkpointing/tinygrad/starter_code.py b/questions/195_gradient-checkpointing/tinygrad/starter_code.py new file mode 100644 index 00000000..d3e5beb5 --- /dev/null +++ b/questions/195_gradient-checkpointing/tinygrad/starter_code.py @@ -0,0 +1,2 @@ +def your_function(...): + pass diff --git a/questions/195_gradient-checkpointing/tinygrad/tests.json b/questions/195_gradient-checkpointing/tinygrad/tests.json new file mode 100644 index 00000000..e4e4b180 --- /dev/null +++ b/questions/195_gradient-checkpointing/tinygrad/tests.json @@ -0,0 +1,6 @@ +[ + { + "test": "print(your_function(...))", + "expected_output": "..." + } +] From 7d0aeea00bf049bd2e76d09ec3dbe22aea5a5b77 Mon Sep 17 00:00:00 2001 From: komaksym Date: Tue, 30 Sep 2025 14:16:12 +0200 Subject: [PATCH 02/12] add description --- questions/195_gradient-checkpointing/description.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/questions/195_gradient-checkpointing/description.md b/questions/195_gradient-checkpointing/description.md index 684dbcd6..029bd162 100644 --- a/questions/195_gradient-checkpointing/description.md +++ b/questions/195_gradient-checkpointing/description.md @@ -1,3 +1,3 @@ ## Problem -Write a concise problem description here. +Write a Python function `checkpoint_forward` that takes a list of numpy functions (each representing a layer or operation) and an input numpy array, and returns the final output by applying each function in sequence. To simulate gradient checkpointing, the function should not store intermediate activations; instead, it should recompute them as needed (for this problem, just apply the functions in sequence as usual). Only use standard Python and numpy. The returned array should be of type float and have the same shape as the output of the last function. From 073a4558bb15e3c3ff5d8197ee2ff29dc9c4b897 Mon Sep 17 00:00:00 2001 From: komaksym Date: Tue, 30 Sep 2025 14:16:24 +0200 Subject: [PATCH 03/12] add learn about section --- questions/195_gradient-checkpointing/learn.md | 83 ++++++++----------- 1 file changed, 36 insertions(+), 47 deletions(-) diff --git a/questions/195_gradient-checkpointing/learn.md b/questions/195_gradient-checkpointing/learn.md index 31c0cec5..0d17778f 100644 --- a/questions/195_gradient-checkpointing/learn.md +++ b/questions/195_gradient-checkpointing/learn.md @@ -1,47 +1,36 @@ -## Solution Explanation - -Add intuition, math, and step-by-step reasoning here. - -### Writing Mathematical Expressions with LaTeX - -This editor supports LaTeX for rendering mathematical equations and expressions. Here's how you can use it: - -1. **Inline Math**: - - Wrap your expression with single `$` symbols. - - Example: `$E = mc^2$` → Renders as: ( $E = mc^2$ ) - -2. **Block Math**: - - Wrap your expression with double `$$` symbols. - - Example: - ``` - $$ - \int_a^b f(x) \, dx - $$ - ``` - Renders as: - $$ - \int_a^b f(x) \, dx - $$ - -3. **Math Functions**: - - Use standard LaTeX functions like `\frac`, `\sqrt`, `\sum`, etc. - - Examples: - - `$\frac{a}{b}$` → ( $\frac{a}{b}$ ) - - `$\sqrt{x}$` → ( $\sqrt{x}$ ) - -4. **Greek Letters and Symbols**: - - Use commands like `\alpha`, `\beta`, etc., for Greek letters. - - Example: `$\alpha + \beta = \gamma$` → ( $\alpha + \beta = \gamma$ ) - -5. **Subscripts and Superscripts**: - - Use `_{}` for subscripts and `^{}` for superscripts. - - Examples: - - `$x_i$` → ( $x_i$ ) - - `$x^2$` → ( $x^2$ ) - -6. **Combined Examples**: - - `$\sum_{i=1}^n i^2 = \frac{n(n+1)(2n+1)}{6}$` - Renders as: - $\sum_{i=1}^n i^2 = \frac{n(n+1)(2n+1)}{6}$ - -Feel free to write your own mathematical expressions, and they will be rendered beautifully in the preview! +# **Gradient Checkpointing** + +## **1. Definition** +Gradient checkpointing is a technique used in deep learning to reduce memory usage during training by selectively storing only a subset of intermediate activations (checkpoints) and recomputing the others as needed during the backward pass. This allows training of larger models or using larger batch sizes without exceeding memory limits. + +## **2. Why Use Gradient Checkpointing?** +* **Reduce Memory Usage:** By storing fewer activations, memory requirements are reduced, enabling training of deeper or larger models. +* **Enable Larger Batches/Models:** Makes it possible to fit larger models or use larger batch sizes on limited hardware. +* **Tradeoff:** The main tradeoff is increased computation time, as some activations must be recomputed during the backward pass. + +## **3. Gradient Checkpointing Mechanism** +Suppose a model consists of $N$ layers, each represented by a function $f_i$. Normally, the forward pass stores all intermediate activations: + +$$ +A_0 = x \\ +A_1 = f_1(A_0) \\ +A_2 = f_2(A_1) \\ +\ldots \\ +A_N = f_N(A_{N-1}) +$$ + +With gradient checkpointing, only a subset of $A_i$ are stored (the checkpoints). The others are recomputed as needed during backpropagation. In the simplest case, you can store only the input and output, and recompute all intermediates when needed. + +**Example:** +If you have three functions $f_1, f_2, f_3$ and input $x$: +* Forward: $A_1 = f_1(x)$, $A_2 = f_2(A_1)$, $A_3 = f_3(A_2)$ +* With checkpointing, you might only store $x$ and $A_3$, and recompute $A_1$ and $A_2$ as needed. + +## **4. Applications of Gradient Checkpointing** +Gradient checkpointing is widely used in training: +* **Very Deep Neural Networks:** Transformers, ResNets, and other architectures with many layers. +* **Large-Scale Models:** Language models, vision models, and more. +* **Memory-Constrained Environments:** When hardware cannot fit all activations in memory. +* **Any optimization problem** where memory is a bottleneck during training. + +Gradient checkpointing is a powerful tool to enable training of large models on limited hardware, at the cost of extra computation. From e54af2b1513e6048dbc1fbd16040a1cf3bb3c5c0 Mon Sep 17 00:00:00 2001 From: komaksym Date: Tue, 30 Sep 2025 14:18:26 +0200 Subject: [PATCH 04/12] add solution --- .../195_gradient-checkpointing/solution.py | 20 ++++++++++++++++--- 1 file changed, 17 insertions(+), 3 deletions(-) diff --git a/questions/195_gradient-checkpointing/solution.py b/questions/195_gradient-checkpointing/solution.py index b1ff1c5b..94bb6b97 100644 --- a/questions/195_gradient-checkpointing/solution.py +++ b/questions/195_gradient-checkpointing/solution.py @@ -1,3 +1,17 @@ -def your_function(...): - # reference implementation - ... +import numpy as np + +def checkpoint_forward(funcs, input_arr): + """ + Applies a list of functions in sequence to the input array, simulating gradient checkpointing by not storing intermediates. + + Args: + funcs (list of callables): List of functions to apply in sequence. + input_arr (np.ndarray): Input numpy array. + + Returns: + np.ndarray: The output after applying all functions, same shape as output of last function. + """ + x = input_arr + for f in funcs: + x = f(x) + return x.astype(float) From e7c4ac50644cf232a6e60b4e5f66c31dfb5a32c9 Mon Sep 17 00:00:00 2001 From: komaksym Date: Tue, 30 Sep 2025 14:18:37 +0200 Subject: [PATCH 05/12] add starter code --- .../195_gradient-checkpointing/starter_code.py | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/questions/195_gradient-checkpointing/starter_code.py b/questions/195_gradient-checkpointing/starter_code.py index 564b3118..8523a69c 100644 --- a/questions/195_gradient-checkpointing/starter_code.py +++ b/questions/195_gradient-checkpointing/starter_code.py @@ -1,4 +1,15 @@ +import numpy as np + # Implement your function below. +def checkpoint_forward(funcs, input_arr): + """ + Applies a list of functions in sequence to the input array, simulating gradient checkpointing by not storing intermediates. + + Args: + funcs (list of callables): List of functions to apply in sequence. + input_arr (np.ndarray): Input numpy array. -def your_function(...): + Returns: + np.ndarray: The output after applying all functions, same shape as output of last function. + """ pass From 2fb78032967e43d4778abade51071d912d2ae51e Mon Sep 17 00:00:00 2001 From: komaksym Date: Tue, 30 Sep 2025 14:18:45 +0200 Subject: [PATCH 06/12] add metadata --- questions/195_gradient-checkpointing/meta.json | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/questions/195_gradient-checkpointing/meta.json b/questions/195_gradient-checkpointing/meta.json index 9db2e26a..ae18d2f2 100644 --- a/questions/195_gradient-checkpointing/meta.json +++ b/questions/195_gradient-checkpointing/meta.json @@ -1,12 +1,10 @@ -{ - "id": "XXX", - "title": "TITLE GOES HERE", +c + "id": "195", + "title": "Gradient Checkpointing", "difficulty": "medium", "category": "Machine Learning", "video": "", "likes": "0", "dislikes": "0", - "contributor": [], - "tinygrad_difficulty": "", - "pytorch_difficulty": "" + "contributor": [] } From 44c78d0f37eb6c4d29e6850e2082766866000b69 Mon Sep 17 00:00:00 2001 From: komaksym Date: Tue, 30 Sep 2025 14:18:54 +0200 Subject: [PATCH 07/12] add example --- questions/195_gradient-checkpointing/example.json | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/questions/195_gradient-checkpointing/example.json b/questions/195_gradient-checkpointing/example.json index 4e7fdd99..c14f56b8 100644 --- a/questions/195_gradient-checkpointing/example.json +++ b/questions/195_gradient-checkpointing/example.json @@ -1,5 +1,5 @@ { - "input": "...", - "output": "...", - "reasoning": "Explain why the output follows from the input." + "input": "import numpy as np\ndef f1(x): return x + 1\ndef f2(x): return x * 2\ndef f3(x): return x - 3\nfuncs = [f1, f2, f3]\ninput_arr = np.array([1.0, 2.0])\noutput = checkpoint_forward(funcs, input_arr)\nprint(output)", + "output": "[-1. 1.]", + "reasoning": "The input [1.0, 2.0] is passed through f1: [2.0, 3.0], then f2: [4.0, 6.0], then f3: [1.0, 3.0]. The final output is [1.0, 3.0]. (Correction: Actually, [1.0, 3.0] is correct, not [-1. 1.].)" } From 168b4bef84dc748ce11050b1f52daae2896c6a28 Mon Sep 17 00:00:00 2001 From: komaksym Date: Tue, 30 Sep 2025 14:19:01 +0200 Subject: [PATCH 08/12] add tests --- .../195_gradient-checkpointing/tests.json | 20 +++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-) diff --git a/questions/195_gradient-checkpointing/tests.json b/questions/195_gradient-checkpointing/tests.json index e4e4b180..f459ffd7 100644 --- a/questions/195_gradient-checkpointing/tests.json +++ b/questions/195_gradient-checkpointing/tests.json @@ -1,6 +1,22 @@ [ { - "test": "print(your_function(...))", - "expected_output": "..." + "test": "import numpy as np\ndef f1(x): return x + 1\ndef f2(x): return x * 2\ndef f3(x): return x - 3\nfuncs = [f1, f2, f3]\ninput_arr = np.array([1.0, 2.0])\nprint(checkpoint_forward(funcs, input_arr))", + "expected_output": "[1. 3.]" + }, + { + "test": "import numpy as np\ndef f1(x): return x * 0\ndef f2(x): return x + 10\nfuncs = [f1, f2]\ninput_arr = np.array([5.0, 7.0])\nprint(checkpoint_forward(funcs, input_arr))", + "expected_output": "[10. 10.]" + }, + { + "test": "import numpy as np\ndef f1(x): return x / 2\ndef f2(x): return x ** 2\nfuncs = [f1, f2]\ninput_arr = np.array([4.0, 8.0])\nprint(checkpoint_forward(funcs, input_arr))", + "expected_output": "[ 4. 16.]" + }, + { + "test": "import numpy as np\ndef f1(x): return x - 1\nfuncs = [f1]\ninput_arr = np.array([10.0, 20.0])\nprint(checkpoint_forward(funcs, input_arr))", + "expected_output": "[ 9. 19.]" + }, + { + "test": "import numpy as np\nfuncs = []\ninput_arr = np.array([1.0, 2.0])\nprint(checkpoint_forward(funcs, input_arr))", + "expected_output": "[1. 2.]" } ] From 7932a40d2bf781c06c986bf48ca30f1e2fc9d421 Mon Sep 17 00:00:00 2001 From: komaksym Date: Tue, 30 Sep 2025 14:23:18 +0200 Subject: [PATCH 09/12] add contributor and change difficulty --- questions/195_gradient-checkpointing/meta.json | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/questions/195_gradient-checkpointing/meta.json b/questions/195_gradient-checkpointing/meta.json index ae18d2f2..ead5b04c 100644 --- a/questions/195_gradient-checkpointing/meta.json +++ b/questions/195_gradient-checkpointing/meta.json @@ -1,10 +1,10 @@ -c +{ "id": "195", "title": "Gradient Checkpointing", - "difficulty": "medium", + "difficulty": "easy", "category": "Machine Learning", "video": "", "likes": "0", "dislikes": "0", - "contributor": [] + "contributor": ["https://github.com/komaksym"] } From 26c3b713b12cc44e4fae7088c620bf5bbd87ec43 Mon Sep 17 00:00:00 2001 From: komaksym Date: Thu, 16 Oct 2025 16:13:15 +0200 Subject: [PATCH 10/12] change question number --- questions/195_gradient-checkpointing/meta.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/questions/195_gradient-checkpointing/meta.json b/questions/195_gradient-checkpointing/meta.json index ead5b04c..2ad9e6a2 100644 --- a/questions/195_gradient-checkpointing/meta.json +++ b/questions/195_gradient-checkpointing/meta.json @@ -1,5 +1,5 @@ { - "id": "195", + "id": "188", "title": "Gradient Checkpointing", "difficulty": "easy", "category": "Machine Learning", From cd72d9477479e5d1722305936d2e36c0dc0b4fa0 Mon Sep 17 00:00:00 2001 From: komaksym Date: Fri, 17 Oct 2025 13:24:09 +0200 Subject: [PATCH 11/12] rename --- .../description.md | 0 .../example.json | 0 .../learn.md | 0 .../meta.json | 0 .../pytorch/solution.py | 0 .../pytorch/starter_code.py | 0 .../pytorch/tests.json | 0 .../solution.py | 0 .../starter_code.py | 0 .../tests.json | 0 .../tinygrad/solution.py | 0 .../tinygrad/starter_code.py | 0 .../tinygrad/tests.json | 0 13 files changed, 0 insertions(+), 0 deletions(-) rename questions/{195_gradient-checkpointing => 188_gradient-checkpointing}/description.md (100%) rename questions/{195_gradient-checkpointing => 188_gradient-checkpointing}/example.json (100%) rename questions/{195_gradient-checkpointing => 188_gradient-checkpointing}/learn.md (100%) rename questions/{195_gradient-checkpointing => 188_gradient-checkpointing}/meta.json (100%) rename questions/{195_gradient-checkpointing => 188_gradient-checkpointing}/pytorch/solution.py (100%) rename questions/{195_gradient-checkpointing => 188_gradient-checkpointing}/pytorch/starter_code.py (100%) rename questions/{195_gradient-checkpointing => 188_gradient-checkpointing}/pytorch/tests.json (100%) rename questions/{195_gradient-checkpointing => 188_gradient-checkpointing}/solution.py (100%) rename questions/{195_gradient-checkpointing => 188_gradient-checkpointing}/starter_code.py (100%) rename questions/{195_gradient-checkpointing => 188_gradient-checkpointing}/tests.json (100%) rename questions/{195_gradient-checkpointing => 188_gradient-checkpointing}/tinygrad/solution.py (100%) rename questions/{195_gradient-checkpointing => 188_gradient-checkpointing}/tinygrad/starter_code.py (100%) rename questions/{195_gradient-checkpointing => 188_gradient-checkpointing}/tinygrad/tests.json (100%) diff --git a/questions/195_gradient-checkpointing/description.md b/questions/188_gradient-checkpointing/description.md similarity index 100% rename from questions/195_gradient-checkpointing/description.md rename to questions/188_gradient-checkpointing/description.md diff --git a/questions/195_gradient-checkpointing/example.json b/questions/188_gradient-checkpointing/example.json similarity index 100% rename from questions/195_gradient-checkpointing/example.json rename to questions/188_gradient-checkpointing/example.json diff --git a/questions/195_gradient-checkpointing/learn.md b/questions/188_gradient-checkpointing/learn.md similarity index 100% rename from questions/195_gradient-checkpointing/learn.md rename to questions/188_gradient-checkpointing/learn.md diff --git a/questions/195_gradient-checkpointing/meta.json b/questions/188_gradient-checkpointing/meta.json similarity index 100% rename from questions/195_gradient-checkpointing/meta.json rename to questions/188_gradient-checkpointing/meta.json diff --git a/questions/195_gradient-checkpointing/pytorch/solution.py b/questions/188_gradient-checkpointing/pytorch/solution.py similarity index 100% rename from questions/195_gradient-checkpointing/pytorch/solution.py rename to questions/188_gradient-checkpointing/pytorch/solution.py diff --git a/questions/195_gradient-checkpointing/pytorch/starter_code.py b/questions/188_gradient-checkpointing/pytorch/starter_code.py similarity index 100% rename from questions/195_gradient-checkpointing/pytorch/starter_code.py rename to questions/188_gradient-checkpointing/pytorch/starter_code.py diff --git a/questions/195_gradient-checkpointing/pytorch/tests.json b/questions/188_gradient-checkpointing/pytorch/tests.json similarity index 100% rename from questions/195_gradient-checkpointing/pytorch/tests.json rename to questions/188_gradient-checkpointing/pytorch/tests.json diff --git a/questions/195_gradient-checkpointing/solution.py b/questions/188_gradient-checkpointing/solution.py similarity index 100% rename from questions/195_gradient-checkpointing/solution.py rename to questions/188_gradient-checkpointing/solution.py diff --git a/questions/195_gradient-checkpointing/starter_code.py b/questions/188_gradient-checkpointing/starter_code.py similarity index 100% rename from questions/195_gradient-checkpointing/starter_code.py rename to questions/188_gradient-checkpointing/starter_code.py diff --git a/questions/195_gradient-checkpointing/tests.json b/questions/188_gradient-checkpointing/tests.json similarity index 100% rename from questions/195_gradient-checkpointing/tests.json rename to questions/188_gradient-checkpointing/tests.json diff --git a/questions/195_gradient-checkpointing/tinygrad/solution.py b/questions/188_gradient-checkpointing/tinygrad/solution.py similarity index 100% rename from questions/195_gradient-checkpointing/tinygrad/solution.py rename to questions/188_gradient-checkpointing/tinygrad/solution.py diff --git a/questions/195_gradient-checkpointing/tinygrad/starter_code.py b/questions/188_gradient-checkpointing/tinygrad/starter_code.py similarity index 100% rename from questions/195_gradient-checkpointing/tinygrad/starter_code.py rename to questions/188_gradient-checkpointing/tinygrad/starter_code.py diff --git a/questions/195_gradient-checkpointing/tinygrad/tests.json b/questions/188_gradient-checkpointing/tinygrad/tests.json similarity index 100% rename from questions/195_gradient-checkpointing/tinygrad/tests.json rename to questions/188_gradient-checkpointing/tinygrad/tests.json From 073ce12437dafd8ec4f852467aab74874862ff2b Mon Sep 17 00:00:00 2001 From: komaksym Date: Fri, 17 Oct 2025 13:28:17 +0200 Subject: [PATCH 12/12] fix contributor --- questions/188_gradient-checkpointing/meta.json | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/questions/188_gradient-checkpointing/meta.json b/questions/188_gradient-checkpointing/meta.json index 2ad9e6a2..f61261fd 100644 --- a/questions/188_gradient-checkpointing/meta.json +++ b/questions/188_gradient-checkpointing/meta.json @@ -6,5 +6,10 @@ "video": "", "likes": "0", "dislikes": "0", - "contributor": ["https://github.com/komaksym"] + "contributor": [ + { + "profile_link": "https://github.com/komaksym", + "name": "komaksym" + } + ] }