Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Refactor the codegen directory, pt 10 #2479

Closed
wants to merge 2 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
14 changes: 8 additions & 6 deletions fbgemm_gpu/codegen/genscript/generate_backward_split.py
Original file line number Diff line number Diff line change
Expand Up @@ -92,7 +92,9 @@ def generate_backward_split_gpu(**kwargs: Any) -> None:
)

# Generate optimizer kernel
CodeTemplate.load("embedding_optimizer_split_device_kernel_template.cuh").write(
CodeTemplate.load(
"training/optimizer/embedding_optimizer_split_device_kernel_template.cuh"
).write(
f"gen_embedding_optimizer_{optimizer}_split_device_kernel.cuh", **kwargs
)

Expand All @@ -107,11 +109,11 @@ def generate_backward_split_gpu(**kwargs: Any) -> None:
f"gen_embedding_backward_split_{optimizer}.cpp",
),
(
"embedding_split_host_pt2_autograd_template.cpp",
"training/pt2/embedding_split_host_pt2_autograd_template.cpp",
f"gen_embedding_split_{optimizer}_pt2_autograd.cpp",
),
(
"embedding_split_host_pt2_cuda_wrapper_template.cpp",
"training/pt2/embedding_split_host_pt2_cuda_wrapper_template.cpp",
f"gen_embedding_backward_split_{optimizer}_pt2_cuda_wrapper.cpp",
),
]:
Expand All @@ -122,7 +124,7 @@ def generate_backward_split_gpu(**kwargs: Any) -> None:
if kwargs.get("has_cpu_support") or kwargs.get("has_gpu_support"):
# Generates Python invoker for CUDA + CPU, and PT2
template = CodeTemplate.load(
"split_embedding_codegen_lookup_invoker.template"
"training/python/split_embedding_codegen_lookup_invoker.template"
)
for filename in [
f"lookup_{optimizer}.py",
Expand Down Expand Up @@ -154,7 +156,7 @@ def generate_backward_split_cpu(**kwargs: Any) -> None:
f"gen_embedding_backward_split_{optimizer}_cpu.cpp",
),
(
"embedding_split_host_pt2_cpu_wrapper_template.cpp",
"training/pt2/embedding_split_host_pt2_cpu_wrapper_template.cpp",
f"gen_embedding_backward_split_{optimizer}_pt2_cpu_wrapper.cpp",
),
]:
Expand Down Expand Up @@ -217,7 +219,7 @@ def generate_backward_indices() -> None:

@staticmethod
def generate_init_py() -> None:
CodeTemplate.load("__init__.template").write("__init__.py")
CodeTemplate.load("training/python/__init__.template").write("__init__.py")

@staticmethod
def generate() -> None:
Expand Down
53 changes: 27 additions & 26 deletions fbgemm_gpu/codegen/genscript/generate_embedding_optimizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,32 +40,33 @@ def generate(**kwargs: Any) -> None:
)
kwargs["args"] = kwargs["args"].cuda

# Generate CUDA host code
CodeTemplate.load("embedding_optimizer_split_template.cu").write(
f"gen_embedding_optimizer_{optimizer}_split_cuda.cu", **kwargs
)

# Generate CUDA kernel code
CodeTemplate.load("embedding_optimizer_split_kernel_template.cu").write(
f"gen_embedding_optimizer_{optimizer}_split_kernel.cu", **kwargs
)

# Generate host code
CodeTemplate.load("embedding_optimizer_split_host_template.cpp").write(
f"gen_embedding_optimizer_{optimizer}_split.cpp", **kwargs
)

# Generates Python invoker for CUDA
CodeTemplate.load("split_embedding_optimizer_codegen.template").write(
f"split_embedding_optimizer_{optimizer}.py",
is_fbcode=args.is_fbcode,
**kwargs,
)

# Generate optimizer kernel headers
CodeTemplate.load("embedding_optimizer_split_device_kernel_template.cuh").write(
f"gen_embedding_optimizer_{optimizer}_split_device_kernel.cuh", **kwargs
)
PREFIX = "training/optimizer"

for template_filepath, filename in [
( # CUDA host code
f"{PREFIX}/embedding_optimizer_split_template.cu",
f"gen_embedding_optimizer_{optimizer}_split_cuda.cu",
),
( # CUDA kernel code
f"{PREFIX}/embedding_optimizer_split_kernel_template.cu",
f"gen_embedding_optimizer_{optimizer}_split_kernel.cu",
),
( # CPU code
f"{PREFIX}/embedding_optimizer_split_host_template.cpp",
f"gen_embedding_optimizer_{optimizer}_split.cpp",
),
( # Optimizer kernel headers
f"{PREFIX}/embedding_optimizer_split_device_kernel_template.cuh",
f"gen_embedding_optimizer_{optimizer}_split_device_kernel.cuh",
),
( # Python kernel invokers
"training/python/split_embedding_optimizer_codegen.template",
f"split_embedding_optimizer_{optimizer}.py",
),
]:
CodeTemplate.load(template_filepath).write(
filename, is_fbcode=args.is_fbcode, **kwargs
)


def main() -> None:
Expand Down
4 changes: 2 additions & 2 deletions fbgemm_gpu/codegen/genscript/generate_forward_split.py
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ def render_forward_templates(
def generate_pt2_wrappers() -> None:
# Generate PT2 forward wrapper (CUDA)
CodeTemplate.load(
"embedding_split_host_pt2_cuda_wrapper_template.cpp",
"training/pt2/embedding_split_host_pt2_cuda_wrapper_template.cpp",
).write(
f"gen_embedding_forward_split_pt2_cuda_wrapper.cpp",
has_gpu_support=True,
Expand All @@ -67,7 +67,7 @@ def generate_pt2_wrappers() -> None:

# Generate PT2 forward wrapper (CPU)
CodeTemplate.load(
"embedding_split_host_pt2_cpu_wrapper_template.cpp",
"training/pt2/embedding_split_host_pt2_cpu_wrapper_template.cpp",
).write(
f"gen_embedding_forward_split_pt2_cpu_wrapper.cpp",
has_cpu_support=True,
Expand Down
Loading