Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
[feat] add split_dim arg to reversible, remove retain_grad, add bench…
…mark_reversible (#45) * perf(revnet): remove/fuse cat-split * style(reversible): run pre-commit, remove unused variables * feat(tbenchmark): add basic revnet time benchmark * style(benchmark): run black over revnet bench * feat(reversible): make split dim a parameter * revert(reversible): readd original code/remove split fusion * feat(reversible): readd split dim arg * perf(reversible): remove retain graph It's not needed but makes the execution up to 20% slower. * style(reversible): remove one singular newline seriously?
- Loading branch information
Showing
2 changed files
with
91 additions
and
7 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,83 @@ | ||
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. | ||
# | ||
# This source code is licensed under the BSD license found in the | ||
# LICENSE file in the root directory of this source tree. | ||
|
||
|
||
from typing import Any, Dict | ||
|
||
import torch | ||
import triton | ||
|
||
from xformers.benchmarks.utils import TestCase, pretty_plot, pretty_print | ||
from xformers.components.reversible import ReversibleSequence | ||
|
||
SHAPES = [(16384, 32), (2048, 256), (128, 4096)] | ||
|
||
DEPTH = [4, 32, 256] | ||
|
||
|
||
def bench_revnet(backward: bool): | ||
device = torch.device("cuda") | ||
bw = "+bw" if backward else "" | ||
|
||
for dtype in [torch.float16, torch.float32]: | ||
results: Dict[str, Any] = {} | ||
|
||
for B, K in SHAPES: | ||
for depth in DEPTH: | ||
f = torch.nn.Linear(K, K).to(device=device, dtype=dtype) | ||
g = torch.nn.Linear(K, K).to(device=device, dtype=dtype) | ||
revseq = ReversibleSequence( | ||
torch.nn.ModuleList([torch.nn.ModuleList([f, g])] * depth) | ||
) | ||
revseq = revseq.to(device=device, dtype=dtype) | ||
|
||
a = torch.rand( | ||
1, B, K, device=device, dtype=dtype, requires_grad=backward | ||
) | ||
b = torch.rand( | ||
1, B, K * 2, device=device, dtype=dtype, requires_grad=backward | ||
) | ||
|
||
def normal_step(): | ||
y = a | ||
for _ in range(depth): | ||
y = y + f(y) | ||
y = y + g(y) | ||
if backward: | ||
torch.norm(y).backward() | ||
return y | ||
|
||
def reversible_step(): | ||
y = revseq(b) | ||
if backward: | ||
torch.norm(y).backward() | ||
return y | ||
|
||
for testcase in [ | ||
TestCase(normal_step, f"residual - fw{bw}"), | ||
TestCase(reversible_step, f"reversible - fw{bw}"), | ||
]: | ||
time = triton.testing.do_bench(testcase.function)[0] | ||
key = f"Batch={B}, Features={K}, Depth={depth}" | ||
if key not in results: | ||
results[key] = {} | ||
|
||
results[key][testcase.name] = f"{time:.2f}" | ||
|
||
pretty_print( | ||
results, | ||
title=f"\n --- Type: {dtype} --- ", | ||
units="runtime in ms, lower is better", | ||
) | ||
pretty_plot( | ||
results, | ||
title=f"RevNet-FW{bw}-{dtype}", | ||
units="runtime in ms, lower is better", | ||
dash_key="pytorch", | ||
) | ||
|
||
|
||
for bw in [False, True]: | ||
bench_revnet(bw) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters