Skip to content

Commit

Permalink
CUDA BF16 backwards
Browse files Browse the repository at this point in the history
  • Loading branch information
zasdfgbnm committed Dec 3, 2020
1 parent e7038a7 commit f1cb78b
Showing 1 changed file with 27 additions and 33 deletions.
60 changes: 27 additions & 33 deletions aten/src/ATen/native/cuda/BinaryMiscBackwardOpsKernels.cu
Expand Up @@ -16,10 +16,8 @@ namespace native {

void sigmoid_backward_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "sigmoid_backward_cuda", [&]() {
AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "sigmoid_backward_cuda", [&] {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
return a * (scalar_t(1.) - b) * b;
});
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
return a * (scalar_t(1.) - b) * b;
});
});
}
Expand All @@ -31,31 +29,29 @@ void logit_backward_kernel_cuda(TensorIterator& iter, Scalar eps_scalar) {
iter.dtype(),
"logit_cuda",
[&]() {
AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "logit_cuda", [&] {
using T_ACC = acc_type<scalar_t, true>;
const T_ACC eps = eps_scalar.to<T_ACC>();
if (eps < T_ACC(0)) {
gpu_kernel(
iter, [] GPU_LAMBDA(scalar_t dy, scalar_t x) -> scalar_t {
const T_ACC dy_acc = static_cast<T_ACC>(dy);
const T_ACC x_acc = static_cast<T_ACC>(x);
return (x_acc < T_ACC(0) || x_acc > T_ACC(1))
? std::numeric_limits<T_ACC>::quiet_NaN()
: dy_acc / (x_acc * (T_ACC(1) - x_acc));
});
} else {
const T_ACC lo = eps;
const T_ACC hi = T_ACC(1) - eps;
gpu_kernel(
iter, [lo, hi] GPU_LAMBDA(scalar_t dy, scalar_t x) -> scalar_t {
const T_ACC dy_acc = static_cast<T_ACC>(dy);
const T_ACC x_acc = static_cast<T_ACC>(x);
return (x_acc < lo || x_acc > hi)
? T_ACC(0)
: dy_acc / (x_acc * (T_ACC(1) - x_acc));
});
}
});
using T_ACC = acc_type<scalar_t, true>;
const T_ACC eps = eps_scalar.to<T_ACC>();
if (eps < T_ACC(0)) {
gpu_kernel(
iter, [] GPU_LAMBDA(scalar_t dy, scalar_t x) -> scalar_t {
const T_ACC dy_acc = static_cast<T_ACC>(dy);
const T_ACC x_acc = static_cast<T_ACC>(x);
return (x_acc < T_ACC(0) || x_acc > T_ACC(1))
? std::numeric_limits<T_ACC>::quiet_NaN()
: dy_acc / (x_acc * (T_ACC(1) - x_acc));
});
} else {
const T_ACC lo = eps;
const T_ACC hi = T_ACC(1) - eps;
gpu_kernel(
iter, [lo, hi] GPU_LAMBDA(scalar_t dy, scalar_t x) -> scalar_t {
const T_ACC dy_acc = static_cast<T_ACC>(dy);
const T_ACC x_acc = static_cast<T_ACC>(x);
return (x_acc < lo || x_acc > hi)
? T_ACC(0)
: dy_acc / (x_acc * (T_ACC(1) - x_acc));
});
}
});
}

Expand All @@ -68,10 +64,8 @@ void tanh_backward_kernel_cuda(TensorIterator& iter) {
});
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "tanh_backward_cuda", [&]() {
AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "tanh_backward_cuda", [&] {
gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
return a * (scalar_t{1.} - b * b);
});
gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
return a * (scalar_t{1.} - b * b);
});
});
}
Expand Down

0 comments on commit f1cb78b

Please sign in to comment.