Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion caffe2/core/hip/context_hip.h
Original file line number Diff line number Diff line change
Expand Up @@ -175,7 +175,7 @@ class HIPContext final : public BaseContext {
return hip_objects_.GetStream(gpu_id, stream_id);
}

rocblas_handle rocblas_handle() {
rocblas_handle rocblashandle() {
return hip_objects_.GetHandle(gpu_id_, stream_id_);
}

Expand Down
28 changes: 14 additions & 14 deletions caffe2/utils/hip/math_hip.cc
Original file line number Diff line number Diff line change
Expand Up @@ -759,7 +759,7 @@ void Gemm<float, HIPContext>(
? rocblas_operation_none
: rocblas_operation_transpose;
ROCBLAS_ENFORCE(rocblas_sgemm(
context->rocblas_handle(),
context->rocblashandle(),
cuTransB,
cuTransA,
N,
Expand Down Expand Up @@ -803,7 +803,7 @@ void Gemm<at::Half, HIPContext>(
: rocblas_operation_transpose;
if (math_type == TensorProto_DataType_FLOAT) {
ROCBLAS_CHECK(rocblas_sgemmEx(
context->rocblas_handle(),
context->rocblashandle(),
cuTransB,
cuTransA,
N,
Expand All @@ -828,7 +828,7 @@ void Gemm<at::Half, HIPContext>(

// call cublasHgemm
ROCBLAS_CHECK(cublasHgemm(
context->rocblas_handle(),
context->rocblashandle(),
cuTransB,
cuTransA,
N,
Expand Down Expand Up @@ -933,7 +933,7 @@ void GemmStridedBatched<float, HIPContext>(
? rocblas_operation_none
: rocblas_operation_transpose;
ROCBLAS_ENFORCE(rocblas_sgemm_strided_batched(
context->rocblas_handle(),
context->rocblashandle(),
cuTransB,
cuTransA,
N,
Expand Down Expand Up @@ -1004,7 +1004,7 @@ void GemmStridedBatched<at::Half, HIPContext>(
__half alpha_fp16 = at::Half(alpha);
__half beta_fp16 = at::Half(beta);
ROCBLAS_ENFORCE(cublasHgemmStridedBatched(
context->rocblas_handle(),
context->rocblashandle(),
cuTransB,
cuTransA,
N,
Expand Down Expand Up @@ -1051,7 +1051,7 @@ void GemmEx<float, HIPContext>(
? rocblas_operation_none
: rocblas_operation_transpose;
ROCBLAS_ENFORCE(rocblas_sgemm(
context->rocblas_handle(),
context->rocblashandle(),
cuTransB,
cuTransA,
N,
Expand Down Expand Up @@ -1083,7 +1083,7 @@ void Gemv<float, HIPContext>(
? rocblas_operation_transpose
: rocblas_operation_none;
ROCBLAS_ENFORCE(rocblas_sgemv(
context->rocblas_handle(),
context->rocblashandle(),
cuTransA,
N,
M,
Expand Down Expand Up @@ -1170,7 +1170,7 @@ void Gemv<at::Half, HIPContext>(

if (math_type == TensorProto_DataType_FLOAT) {
ROCBLAS_CHECK(cublasSgemmEx(
context->rocblas_handle(),
context->rocblashandle(),
cuTransA,
rocblas_operation_none,
m,
Expand All @@ -1192,7 +1192,7 @@ void Gemv<at::Half, HIPContext>(
__half beta_fp16 = at::Half(beta);

ROCBLAS_CHECK(cublasHgemm(
context->rocblas_handle(),
context->rocblashandle(),
cuTransA,
rocblas_operation_none,
m,
Expand Down Expand Up @@ -1390,7 +1390,7 @@ void Dot<float, HIPContext>(
HIPContext* context) {
float result;
ROCBLAS_ENFORCE(
rocblas_sdot(context->rocblas_handle(), n, a, 1, b, 1, &result));
rocblas_sdot(context->rocblashandle(), n, a, 1, b, 1, &result));
context->CopyFromCPU<float>(1, &result, y);
}

Expand All @@ -1406,7 +1406,7 @@ void Dot<at::Half, HIPContext>(
at::Half result;
// execute with 32-bit math
ROCBLAS_CHECK(cublasDotEx(
context->rocblas_handle(),
context->rocblashandle(),
n,
a,
CUDA_R_16F,
Expand Down Expand Up @@ -1879,7 +1879,7 @@ void Axpy<float, HIPContext>(
float* Y,
HIPContext* context) {
ROCBLAS_ENFORCE(
rocblas_saxpy(context->rocblas_handle(), N, &alpha, X, 1, Y, 1));
rocblas_saxpy(context->rocblashandle(), N, &alpha, X, 1, Y, 1));
}

template <>
Expand All @@ -1891,7 +1891,7 @@ void Axpy<double, HIPContext>(
HIPContext* context) {
double alpha_d{alpha};
ROCBLAS_ENFORCE(
rocblas_daxpy(context->rocblas_handle(), N, &alpha_d, X, 1, Y, 1));
rocblas_daxpy(context->rocblashandle(), N, &alpha_d, X, 1, Y, 1));
}

template <>
Expand All @@ -1904,7 +1904,7 @@ void Axpy<at::Half, HIPContext>(
CAFFE_THROW("Unsupported math type");
#if ROCBLAS_FP16
ROCBLAS_CHECK(cublasAxpyEx(
context->rocblas_handle(),
context->rocblashandle(),
N,
&alpha,
CUDA_R_16F,
Expand Down
2 changes: 1 addition & 1 deletion tools/amd_build/pyHIPIFY/cuda_to_hip_mappings.py
Original file line number Diff line number Diff line change
Expand Up @@ -2230,7 +2230,7 @@
("HasCudaGPU" , ("HasHipGPU", API_CAFFE2)),
("__expf" , ("expf", API_CAFFE2)),
("CUBLAS_ENFORCE" , ("ROCBLAS_ENFORCE", API_CAFFE2)),
("cublas_handle" , ("rocblas_handle", API_CAFFE2)),
("cublas_handle" , ("rocblashandle", API_CAFFE2)),
("CURAND_ENFORCE" ,("HIPRAND_ENFORCE", API_CAFFE2)),
("curandGenerateUniform" , ("hiprandGenerateUniform", API_CAFFE2)),
("curand_generator" , ("hiprand_generator", API_CAFFE2)),
Expand Down