Skip to content

Commit

Permalink
Fix hip imports in fbgemm
Browse files Browse the repository at this point in the history
Summary: We have consolidated the hip and cuda dependency. Missed this place where we still have separate imports. Fixing.

Reviewed By: jianyuh

Differential Revision: D56543680

fbshipit-source-id: 74889fbf81f8f328daa3f1566faa8a6ff78a2b42
  • Loading branch information
xw285cornell authored and facebook-github-bot committed May 12, 2024
1 parent ee1e850 commit ef263d5
Show file tree
Hide file tree
Showing 2 changed files with 6 additions and 20 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -24,23 +24,13 @@ try:
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu/codegen:embedding_ops_cuda_training")
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu/codegen:embedding_ops_cpu_training")
except Exception:
if torch.version.hip:
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu/codegen:embedding_ops_hip")
else:
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu/codegen:embedding_ops")
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu/codegen:embedding_ops")
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu/codegen:embedding_ops_cpu")

if torch.version.hip:
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:cumem_utils_hip")
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops_hip")
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:split_table_batched_embeddings_hip")
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:embedding_inplace_update_hip")
else:
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:cumem_utils")
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops")
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:split_table_batched_embeddings")
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:embedding_inplace_update")

torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:embedding_inplace_update")
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:split_table_batched_embeddings")
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:cumem_utils")
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops")
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops_cpu")
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:embedding_inplace_update_cpu")

Expand Down
6 changes: 1 addition & 5 deletions fbgemm_gpu/fbgemm_gpu/quantize_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,11 +17,7 @@
# pyre-ignore[21]
from fbgemm_gpu import open_source # noqa: F401
except Exception:
if torch.version.hip:
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops_hip")
else:
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops")

torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops")
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops_cpu")

TORCH_HALF_MIN: float = torch.finfo(torch.float16).min
Expand Down

0 comments on commit ef263d5

Please sign in to comment.