Skip to content

Commit

Permalink
*_CHECK_THROW: don't shadow result
Browse files Browse the repository at this point in the history
  • Loading branch information
Tom94 committed Sep 5, 2023
1 parent 6f018a9 commit ead4158
Show file tree
Hide file tree
Showing 6 changed files with 37 additions and 22 deletions.
4 changes: 4 additions & 0 deletions .editorconfig
Original file line number Diff line number Diff line change
Expand Up @@ -9,3 +9,7 @@ trim_trailing_whitespace = true

[*.md]
trim_trailing_whitespace = false

[*.clangd]
indent_style = space
indent_size = 2
5 changes: 3 additions & 2 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -1,8 +1,9 @@
.DS_Store
*.egg-info
__pycache__
*.o
/*.jpg
__pycache__
build*
dist
/.cache
/.vscode
/*.jpg
25 changes: 13 additions & 12 deletions include/tiny-cuda-nn/common_host.h
Original file line number Diff line number Diff line change
Expand Up @@ -74,39 +74,40 @@ void set_verbose(bool verbose);
/// Checks the result of a cuXXXXXX call and throws an error on failure
#define CU_CHECK_THROW(x) \
do { \
CUresult result = x; \
if (result != CUDA_SUCCESS) { \
CUresult _result = x; \
if (_result != CUDA_SUCCESS) { \
const char *msg; \
cuGetErrorName(result, &msg); \
cuGetErrorName(_result, &msg); \
throw std::runtime_error{fmt::format(FILE_LINE " " #x " failed: {}", msg)}; \
} \
} while(0)

/// Checks the result of a cuXXXXXX call and prints an error on failure
#define CU_CHECK_PRINT(x) \
do { \
CUresult result = x; \
if (result != CUDA_SUCCESS) { \
CUresult _result = x; \
if (_result != CUDA_SUCCESS) { \
const char *msg; \
cuGetErrorName(result, &msg); \
cuGetErrorName(_result, &msg); \
log_error(FILE_LINE " " #x " failed: {}", msg); \
} \
} while(0)

/// Checks the result of a cudaXXXXXX call and throws an error on failure
#define CUDA_CHECK_THROW(x) \
do { \
cudaError_t result = x; \
if (result != cudaSuccess) \
throw std::runtime_error{fmt::format(FILE_LINE " " #x " failed: {}", cudaGetErrorString(result))}; \
cudaError_t _result = x; \
if (_result != cudaSuccess) \
throw std::runtime_error{fmt::format(FILE_LINE " " #x " failed: {}", cudaGetErrorString(_result))}; \
} while(0)

/// Checks the result of a cudaXXXXXX call and prints an error on failure
#define CUDA_CHECK_PRINT(x) \
do { \
cudaError_t result = x; \
if (result != cudaSuccess) \
log_error(FILE_LINE " " #x " failed: {}", cudaGetErrorString(result)); \
cudaError_t _result = x; \
if (_result != cudaSuccess) \
log_error(FILE_LINE " " #x " failed: {}", cudaGetErrorString(_result)); \
} while(0)
} while(0)

//////////////////////////////
Expand Down
6 changes: 3 additions & 3 deletions include/tiny-cuda-nn/cutlass_matmul.h
Original file line number Diff line number Diff line change
Expand Up @@ -51,9 +51,9 @@ namespace tcnn {

#define CUTLASS_CHECK_THROW(x) \
do { \
cutlass::Status error = x; \
if (error != cutlass::Status::kSuccess) \
throw std::runtime_error(std::string(FILE_LINE " " #x " failed with error ") + cutlassGetStatusString(error)); \
cutlass::Status _result = x; \
if (_result != cutlass::Status::kSuccess) \
throw std::runtime_error(std::string(FILE_LINE " " #x " failed with error ") + cutlassGetStatusString(_result)); \
} while(0)

using SmArch = std::conditional_t<MIN_GPU_ARCH >= 80,
Expand Down
6 changes: 3 additions & 3 deletions include/tiny-cuda-nn/optimizers/shampoo.h
Original file line number Diff line number Diff line change
Expand Up @@ -63,9 +63,9 @@ inline std::string cublasGetError(cublasStatus_t error) {

#define CUBLAS_CHECK_THROW(x) \
do { \
cublasStatus_t result = x; \
if (result != CUBLAS_STATUS_SUCCESS) \
throw std::runtime_error(std::string("CUBLAS Error: " #x " failed with error ") + cublasGetError(result)); \
cublasStatus_t _result = x; \
if (_result != CUBLAS_STATUS_SUCCESS) \
throw std::runtime_error(std::string("CUBLAS Error: " #x " failed with error ") + cublasGetError(_result)); \
} while(0)

template <typename T>
Expand Down
13 changes: 11 additions & 2 deletions include/tiny-cuda-nn/vec.h
Original file line number Diff line number Diff line change
Expand Up @@ -508,6 +508,15 @@ DEF_NON_TEMPLATED_VECTOR_TYPES(u16vec, uint16_t)
DEF_NON_TEMPLATED_VECTOR_TYPES(hvec, __half)
#endif

#if defined(__CUDACC__)
inline TCNN_HOST_DEVICE float4 to_float4(const vec4& x) { return {x.x, x.y, x.z, x.w}; }
inline TCNN_HOST_DEVICE float3 to_float3(const vec3& x) { return {x.x, x.y, x.z}; }
inline TCNN_HOST_DEVICE float2 to_float2(const vec2& x) { return {x.x, x.y}; }
inline TCNN_HOST_DEVICE vec4 to_vec4(const float4& x) { return {x.x, x.y, x.z, x.w}; }
inline TCNN_HOST_DEVICE vec3 to_vec3(const float3& x) { return {x.x, x.y, x.z}; }
inline TCNN_HOST_DEVICE vec2 to_vec2(const float2& x) { return {x.x, x.y}; }
#endif

template <typename T, uint32_t N, uint32_t M>
struct tmat {
tmat() = default;
Expand Down Expand Up @@ -1067,14 +1076,14 @@ struct tquat {
} else if (m[0][0] > m[1][1] && m[0][0] > m[2][2]) {
T S = sqrt((T)1 + m[0][0] - m[1][1] - m[2][2]) * (T)2; // S=4*x
w = (m[1][2] - m[2][1]) / S;
x = 0.25 * S;
x = (T)0.25 * S;
y = (m[1][0] + m[0][1]) / S;
z = (m[2][0] + m[0][2]) / S;
} else if (m[1][1] > m[2][2]) {
T S = sqrt((T)1 + m[1][1] - m[0][0] - m[2][2]) * (T)2; // S=4*y
w = (m[2][0] - m[0][2]) / S;
x = (m[1][0] + m[0][1]) / S;
y = 0.25 * S;
y = (T)0.25 * S;
z = (m[2][1] + m[1][2]) / S;
} else {
T S = sqrt((T)1 + m[2][2] - m[0][0] - m[1][1]) * (T)2; // S=4*z
Expand Down

0 comments on commit ead4158

Please sign in to comment.