Skip to content

Commit 48a77cc

Browse files
danthe3rdxFormers Bot
authored andcommitted
Move autogenerated files to 'autogen' folder
ghstack-source-id: e8fd348 Pull Request resolved: https://github.com/fairinternal/xformers/pull/456 __original_commit__ = fairinternal/xformers@dfbacbc69253f36c3cf3a13df9b2eb66348447c3
1 parent 82d5881 commit 48a77cc

File tree

53 files changed

+110
-156
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

53 files changed

+110
-156
lines changed

.github/workflows/linters_reusable.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -46,4 +46,4 @@ jobs:
4646
clang-format --version
4747
4848
# apply to our files - excluding autogenerated files
49-
./.circleci/run-clang-format.py -e "*fmha/kernels" -r xformers/csrc
49+
./.circleci/run-clang-format.py -e "*fmha/autogen" -r xformers/csrc

xformers/csrc/attention/cuda/fmha/attention_backward_generic.cu

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10,9 +10,9 @@
1010
#include <torch/library.h>
1111
#include "ATen/ops/empty_like.h"
1212

13+
#include "autogen/cutlassB.h"
1314
#include "gemm_kernel_utils.h"
1415
#include "kernel_backward.h"
15-
#include "kernels/cutlassB.h"
1616
#include "pytorch_utils.h"
1717

1818
namespace {

xformers/csrc/attention/cuda/fmha/attention_forward_generic.cu

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -12,8 +12,8 @@
1212
#include <torch/library.h>
1313
#include <ATen/cuda/CUDAGraphsUtils.cuh>
1414

15+
#include "autogen/cutlassF.h"
1516
#include "kernel_forward.h"
16-
#include "kernels/cutlassF.h"
1717
#include "pytorch_utils.h"
1818

1919
namespace {

xformers/csrc/attention/cuda/fmha/kernels/cutlassB.h renamed to xformers/csrc/attention/cuda/fmha/autogen/cutlassB.h

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,7 @@
11
// This file is auto-generated. See "generate_kernels.py"
2-
#include "../kernel_backward.h"
3-
42
#pragma once
53
#ifndef XFORMERS_MEM_EFF_ATTENTION_DISABLE_BACKWARD
4+
#include "../kernel_backward.h"
65
// ======== f16 / sm50 ========
76
__global__ void __launch_bounds__(
87
AttentionBackwardKernel<cutlass::arch::Sm50, cutlass::half_t, true, false, false, 64, 64, 32>::kNumThreads,

xformers/csrc/attention/cuda/fmha/kernels/cutlassF.h renamed to xformers/csrc/attention/cuda/fmha/autogen/cutlassF.h

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,7 @@
11
// This file is auto-generated. See "generate_kernels.py"
2-
#include "../kernel_forward.h"
3-
42
#pragma once
53
#ifndef XFORMERS_MEM_EFF_ATTENTION_DISABLE_FORWARD
4+
#include "../kernel_forward.h"
65
// ======== bf16 / sm80 ========
76
__global__ void __launch_bounds__(
87
AttentionKernel<cutlass::bfloat16_t, cutlass::arch::Sm80, true, 64, 64, true, true, true>::kNumThreads,

xformers/csrc/attention/cuda/fmha/kernels/cutlassB_bf16_aligned_k128.cu renamed to xformers/csrc/attention/cuda/fmha/autogen/impl/cutlassB_bf16_aligned_k128.cu

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,6 @@
1-
#ifndef XFORMERS_MEM_EFF_ATTENTION_DISABLE_BACKWARD
21
// This file is auto-generated. See "generate_kernels.py"
3-
#include "../kernel_backward.h"
4-
2+
#ifndef XFORMERS_MEM_EFF_ATTENTION_DISABLE_BACKWARD
3+
#include "../../kernel_backward.h"
54
__global__ void __launch_bounds__(
65
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, true, 128, 128, 128>::kNumThreads,
76
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, true, 128, 128, 128>::kMinBlocksPerSm)

xformers/csrc/attention/cuda/fmha/kernels/cutlassB_bf16_aligned_k128_dropout.cu renamed to xformers/csrc/attention/cuda/fmha/autogen/impl/cutlassB_bf16_aligned_k128_dropout.cu

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,6 @@
1-
#ifndef XFORMERS_MEM_EFF_ATTENTION_DISABLE_BACKWARD
21
// This file is auto-generated. See "generate_kernels.py"
3-
#include "../kernel_backward.h"
4-
2+
#ifndef XFORMERS_MEM_EFF_ATTENTION_DISABLE_BACKWARD
3+
#include "../../kernel_backward.h"
54
__global__ void __launch_bounds__(
65
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, true, 128, 128, 128>::kNumThreads,
76
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, true, 128, 128, 128>::kMinBlocksPerSm)

xformers/csrc/attention/cuda/fmha/kernels/cutlassB_bf16_aligned_k32.cu renamed to xformers/csrc/attention/cuda/fmha/autogen/impl/cutlassB_bf16_aligned_k32.cu

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,6 @@
1-
#ifndef XFORMERS_MEM_EFF_ATTENTION_DISABLE_BACKWARD
21
// This file is auto-generated. See "generate_kernels.py"
3-
#include "../kernel_backward.h"
4-
2+
#ifndef XFORMERS_MEM_EFF_ATTENTION_DISABLE_BACKWARD
3+
#include "../../kernel_backward.h"
54
__global__ void __launch_bounds__(
65
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, true, 64, 64, 32>::kNumThreads,
76
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, true, 64, 64, 32>::kMinBlocksPerSm)

xformers/csrc/attention/cuda/fmha/kernels/cutlassB_bf16_aligned_k32_dropout.cu renamed to xformers/csrc/attention/cuda/fmha/autogen/impl/cutlassB_bf16_aligned_k32_dropout.cu

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,6 @@
1-
#ifndef XFORMERS_MEM_EFF_ATTENTION_DISABLE_BACKWARD
21
// This file is auto-generated. See "generate_kernels.py"
3-
#include "../kernel_backward.h"
4-
2+
#ifndef XFORMERS_MEM_EFF_ATTENTION_DISABLE_BACKWARD
3+
#include "../../kernel_backward.h"
54
__global__ void __launch_bounds__(
65
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, true, 64, 64, 32>::kNumThreads,
76
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, true, 64, 64, 32>::kMinBlocksPerSm)

xformers/csrc/attention/cuda/fmha/kernels/cutlassB_bf16_aligned_k64.cu renamed to xformers/csrc/attention/cuda/fmha/autogen/impl/cutlassB_bf16_aligned_k64.cu

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,6 @@
1-
#ifndef XFORMERS_MEM_EFF_ATTENTION_DISABLE_BACKWARD
21
// This file is auto-generated. See "generate_kernels.py"
3-
#include "../kernel_backward.h"
4-
2+
#ifndef XFORMERS_MEM_EFF_ATTENTION_DISABLE_BACKWARD
3+
#include "../../kernel_backward.h"
54
__global__ void __launch_bounds__(
65
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, true, 64, 64, 64>::kNumThreads,
76
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, true, 64, 64, 64>::kMinBlocksPerSm)

0 commit comments

Comments
 (0)