Skip to content

Commit

Permalink
add ifdef to make it compatible with QNN older versions
Browse files Browse the repository at this point in the history
  • Loading branch information
HectorSVC committed May 1, 2024
1 parent 832ed76 commit e256485
Showing 1 changed file with 132 additions and 44 deletions.
176 changes: 132 additions & 44 deletions onnxruntime/core/providers/qnn/builder/qnn_def.cc
Original file line number Diff line number Diff line change
Expand Up @@ -23,39 +23,55 @@ size_t memscpy(void* dst, size_t dst_size, const void* src, size_t copy_size) {
void SetQnnTensorType(Qnn_Tensor_t& qnn_tensor, Qnn_TensorType_t tensor_type) {
if (QNN_TENSOR_VERSION_1 == qnn_tensor.version) {
qnn_tensor.v1.type = tensor_type;
} else if (QNN_TENSOR_VERSION_2 == qnn_tensor.version) {
}
#ifdef QNN_TENSOR_V2_INIT
else if (QNN_TENSOR_VERSION_2 == qnn_tensor.version) {

Check warning on line 28 in onnxruntime/core/providers/qnn/builder/qnn_def.cc

View workflow job for this annotation

GitHub Actions / Lint C++

[cpplint] reported by reviewdog 🐶 If an else has a brace on one side, it should have it on both [readability/braces] [5] Raw Output: onnxruntime/core/providers/qnn/builder/qnn_def.cc:28: If an else has a brace on one side, it should have it on both [readability/braces] [5]
qnn_tensor.v2.type = tensor_type;
} else {
}
#endif // QNN_TENSOR_V2_INIT
else {

Check warning on line 32 in onnxruntime/core/providers/qnn/builder/qnn_def.cc

View workflow job for this annotation

GitHub Actions / Lint C++

[cpplint] reported by reviewdog 🐶 If an else has a brace on one side, it should have it on both [readability/braces] [5] Raw Output: onnxruntime/core/providers/qnn/builder/qnn_def.cc:32: If an else has a brace on one side, it should have it on both [readability/braces] [5]
ORT_THROW("QNN tensor version not supported, QNN tensor version: ", qnn_tensor.version);
}
}

void SetQnnTensorName(Qnn_Tensor_t& qnn_tensor, const char* name) {
if (QNN_TENSOR_VERSION_1 == qnn_tensor.version) {
qnn_tensor.v1.name = name;
} else if (QNN_TENSOR_VERSION_2 == qnn_tensor.version) {
}
#ifdef QNN_TENSOR_V2_INIT
else if (QNN_TENSOR_VERSION_2 == qnn_tensor.version) {

Check warning on line 42 in onnxruntime/core/providers/qnn/builder/qnn_def.cc

View workflow job for this annotation

GitHub Actions / Lint C++

[cpplint] reported by reviewdog 🐶 If an else has a brace on one side, it should have it on both [readability/braces] [5] Raw Output: onnxruntime/core/providers/qnn/builder/qnn_def.cc:42: If an else has a brace on one side, it should have it on both [readability/braces] [5]
qnn_tensor.v2.name = name;
} else {
}
#endif // QNN_TENSOR_V2_INIT
else {

Check warning on line 46 in onnxruntime/core/providers/qnn/builder/qnn_def.cc

View workflow job for this annotation

GitHub Actions / Lint C++

[cpplint] reported by reviewdog 🐶 If an else has a brace on one side, it should have it on both [readability/braces] [5] Raw Output: onnxruntime/core/providers/qnn/builder/qnn_def.cc:46: If an else has a brace on one side, it should have it on both [readability/braces] [5]
ORT_THROW("QNN tensor version not supported, QNN tensor version: ", qnn_tensor.version);
}
}

void SetQnnTensorDataFormat(Qnn_Tensor_t& qnn_tensor, Qnn_TensorDataFormat_t data_format) {
if (QNN_TENSOR_VERSION_1 == qnn_tensor.version) {
qnn_tensor.v1.dataFormat = data_format;
} else if (QNN_TENSOR_VERSION_2 == qnn_tensor.version) {
}
#ifdef QNN_TENSOR_V2_INIT
else if (QNN_TENSOR_VERSION_2 == qnn_tensor.version) {

Check warning on line 56 in onnxruntime/core/providers/qnn/builder/qnn_def.cc

View workflow job for this annotation

GitHub Actions / Lint C++

[cpplint] reported by reviewdog 🐶 If an else has a brace on one side, it should have it on both [readability/braces] [5] Raw Output: onnxruntime/core/providers/qnn/builder/qnn_def.cc:56: If an else has a brace on one side, it should have it on both [readability/braces] [5]
qnn_tensor.v2.dataFormat = data_format;
} else {
}
#endif // QNN_TENSOR_V2_INIT
else {

Check warning on line 60 in onnxruntime/core/providers/qnn/builder/qnn_def.cc

View workflow job for this annotation

GitHub Actions / Lint C++

[cpplint] reported by reviewdog 🐶 If an else has a brace on one side, it should have it on both [readability/braces] [5] Raw Output: onnxruntime/core/providers/qnn/builder/qnn_def.cc:60: If an else has a brace on one side, it should have it on both [readability/braces] [5]
ORT_THROW("QNN tensor version not supported, QNN tensor version: ", qnn_tensor.version);
}
}

void SetQnnTensorDataType(Qnn_Tensor_t& qnn_tensor, Qnn_DataType_t data_type) {
if (QNN_TENSOR_VERSION_1 == qnn_tensor.version) {
qnn_tensor.v1.dataType = data_type;
} else if (QNN_TENSOR_VERSION_2 == qnn_tensor.version) {
}
#ifdef QNN_TENSOR_V2_INIT
else if (QNN_TENSOR_VERSION_2 == qnn_tensor.version) {

Check warning on line 70 in onnxruntime/core/providers/qnn/builder/qnn_def.cc

View workflow job for this annotation

GitHub Actions / Lint C++

[cpplint] reported by reviewdog 🐶 If an else has a brace on one side, it should have it on both [readability/braces] [5] Raw Output: onnxruntime/core/providers/qnn/builder/qnn_def.cc:70: If an else has a brace on one side, it should have it on both [readability/braces] [5]
qnn_tensor.v2.dataType = data_type;
} else {
}
#endif // QNN_TENSOR_V2_INIT
else {

Check warning on line 74 in onnxruntime/core/providers/qnn/builder/qnn_def.cc

View workflow job for this annotation

GitHub Actions / Lint C++

[cpplint] reported by reviewdog 🐶 If an else has a brace on one side, it should have it on both [readability/braces] [5] Raw Output: onnxruntime/core/providers/qnn/builder/qnn_def.cc:74: If an else has a brace on one side, it should have it on both [readability/braces] [5]
ORT_THROW("QNN tensor version not supported, QNN tensor version: ", qnn_tensor.version);
}
}
Expand All @@ -64,20 +80,28 @@ void SetQnnTensorDim(Qnn_Tensor_t& qnn_tensor, const std::vector<uint32_t>& dime
if (QNN_TENSOR_VERSION_1 == qnn_tensor.version) {
qnn_tensor.v1.rank = static_cast<uint32_t>(dimensions.size());
qnn_tensor.v1.dimensions = const_cast<uint32_t*>(dimensions.data());
} else if (QNN_TENSOR_VERSION_2 == qnn_tensor.version) {
}
#ifdef QNN_TENSOR_V2_INIT
else if (QNN_TENSOR_VERSION_2 == qnn_tensor.version) {

Check warning on line 85 in onnxruntime/core/providers/qnn/builder/qnn_def.cc

View workflow job for this annotation

GitHub Actions / Lint C++

[cpplint] reported by reviewdog 🐶 If an else has a brace on one side, it should have it on both [readability/braces] [5] Raw Output: onnxruntime/core/providers/qnn/builder/qnn_def.cc:85: If an else has a brace on one side, it should have it on both [readability/braces] [5]
qnn_tensor.v2.rank = static_cast<uint32_t>(dimensions.size());
qnn_tensor.v2.dimensions = const_cast<uint32_t*>(dimensions.data());
} else {
}
#endif // QNN_TENSOR_V2_INIT
else {

Check warning on line 90 in onnxruntime/core/providers/qnn/builder/qnn_def.cc

View workflow job for this annotation

GitHub Actions / Lint C++

[cpplint] reported by reviewdog 🐶 If an else has a brace on one side, it should have it on both [readability/braces] [5] Raw Output: onnxruntime/core/providers/qnn/builder/qnn_def.cc:90: If an else has a brace on one side, it should have it on both [readability/braces] [5]
ORT_THROW("QNN tensor version not supported, QNN tensor version: ", qnn_tensor.version);
}
}

void SetQnnTensorMemType(Qnn_Tensor_t& qnn_tensor, Qnn_TensorMemType_t mem_type) {
if (QNN_TENSOR_VERSION_1 == qnn_tensor.version) {
qnn_tensor.v1.memType = mem_type;
} else if (QNN_TENSOR_VERSION_2 == qnn_tensor.version) {
}
#ifdef QNN_TENSOR_V2_INIT
else if (QNN_TENSOR_VERSION_2 == qnn_tensor.version) {
qnn_tensor.v2.memType = mem_type;
} else {
}
#endif // QNN_TENSOR_V2_INIT
else {
ORT_THROW("QNN tensor version not supported, QNN tensor version: ", qnn_tensor.version);
}
}
Expand All @@ -87,11 +111,15 @@ void SetQnnTensorClientBuf(Qnn_Tensor_t& qnn_tensor, const std::vector<uint8_t>&
auto size = client_buf.size() * sizeof(uint8_t);
qnn_tensor.v1.clientBuf.data = const_cast<void*>(static_cast<const void*>(client_buf.data()));
qnn_tensor.v1.clientBuf.dataSize = static_cast<uint32_t>(size);
} else if (QNN_TENSOR_VERSION_2 == qnn_tensor.version) {
}
#ifdef QNN_TENSOR_V2_INIT
else if (QNN_TENSOR_VERSION_2 == qnn_tensor.version) {
auto size = client_buf.size() * sizeof(uint8_t);
qnn_tensor.v2.clientBuf.data = const_cast<void*>(static_cast<const void*>(client_buf.data()));
qnn_tensor.v2.clientBuf.dataSize = static_cast<uint32_t>(size);
} else {
}
#endif // QNN_TENSOR_V2_INIT
else {
ORT_THROW("QNN tensor version not supported, QNN tensor version: ", qnn_tensor.version);
}
}
Expand All @@ -101,11 +129,15 @@ void SetQnnTensorClientBuf(Qnn_Tensor_t& qnn_tensor, const std::vector<uint32_t>
auto size = client_buf.size() * sizeof(uint32_t);
qnn_tensor.v1.clientBuf.data = const_cast<void*>(static_cast<const void*>(client_buf.data()));
qnn_tensor.v1.clientBuf.dataSize = static_cast<uint32_t>(size);
} else if (QNN_TENSOR_VERSION_2 == qnn_tensor.version) {
}
#ifdef QNN_TENSOR_V2_INIT
else if (QNN_TENSOR_VERSION_2 == qnn_tensor.version) {
auto size = client_buf.size() * sizeof(uint32_t);
qnn_tensor.v2.clientBuf.data = const_cast<void*>(static_cast<const void*>(client_buf.data()));
qnn_tensor.v2.clientBuf.dataSize = static_cast<uint32_t>(size);
} else {
}
#endif // QNN_TENSOR_V2_INIT
else {
ORT_THROW("QNN tensor version not supported, QNN tensor version: ", qnn_tensor.version);
}
}
Expand All @@ -114,140 +146,196 @@ void SetQnnTensorClientBuf(Qnn_Tensor_t& qnn_tensor, void* buf_data, uint32_t bu
if (QNN_TENSOR_VERSION_1 == qnn_tensor.version) {
qnn_tensor.v1.clientBuf.data = buf_data;
qnn_tensor.v1.clientBuf.dataSize = buf_size;
} else if (QNN_TENSOR_VERSION_2 == qnn_tensor.version) {
}
#ifdef QNN_TENSOR_V2_INIT
else if (QNN_TENSOR_VERSION_2 == qnn_tensor.version) {
qnn_tensor.v2.clientBuf.data = buf_data;
qnn_tensor.v2.clientBuf.dataSize = buf_size;
} else {
}
#endif // QNN_TENSOR_V2_INIT
else {
ORT_THROW("QNN tensor version not supported, QNN tensor version: ", qnn_tensor.version);
}
}

void SetQnnTensorClientBufSize(Qnn_Tensor_t& qnn_tensor, uint32_t client_buf_size) {
if (QNN_TENSOR_VERSION_1 == qnn_tensor.version) {
qnn_tensor.v1.clientBuf.dataSize = client_buf_size;
} else if (QNN_TENSOR_VERSION_2 == qnn_tensor.version) {
}
#ifdef QNN_TENSOR_V2_INIT
else if (QNN_TENSOR_VERSION_2 == qnn_tensor.version) {
qnn_tensor.v2.clientBuf.dataSize = client_buf_size;
} else {
}
#endif // QNN_TENSOR_V2_INIT
else {
ORT_THROW("QNN tensor version not supported, QNN tensor version: ", qnn_tensor.version);
}
}

void SetQnnTensorClientBufData(Qnn_Tensor_t& qnn_tensor, void* client_buf_data) {
if (QNN_TENSOR_VERSION_1 == qnn_tensor.version) {
qnn_tensor.v1.clientBuf.data = client_buf_data;
} else if (QNN_TENSOR_VERSION_2 == qnn_tensor.version) {
}
#ifdef QNN_TENSOR_V2_INIT
else if (QNN_TENSOR_VERSION_2 == qnn_tensor.version) {
qnn_tensor.v2.clientBuf.data = client_buf_data;
} else {
}
#endif // QNN_TENSOR_V2_INIT
else {
ORT_THROW("QNN tensor version not supported, QNN tensor version: ", qnn_tensor.version);
}
}

void SetQnnTensorQParams(Qnn_Tensor_t& qnn_tensor, const Qnn_QuantizeParams_t& quantize_params) {
if (QNN_TENSOR_VERSION_1 == qnn_tensor.version) {
qnn_tensor.v1.quantizeParams = quantize_params;
} else if (QNN_TENSOR_VERSION_2 == qnn_tensor.version) {
}
#ifdef QNN_TENSOR_V2_INIT
else if (QNN_TENSOR_VERSION_2 == qnn_tensor.version) {
qnn_tensor.v2.quantizeParams = quantize_params;
} else {
}
#endif // QNN_TENSOR_V2_INIT
else {
ORT_THROW("QNN tensor version not supported, QNN tensor version: ", qnn_tensor.version);
}
}

uint32_t GetQnnTensorID(const Qnn_Tensor_t& qnn_tensor) {
if (QNN_TENSOR_VERSION_1 == qnn_tensor.version) {
return qnn_tensor.v1.id;
} else if (QNN_TENSOR_VERSION_2 == qnn_tensor.version) {
}
#ifdef QNN_TENSOR_V2_INIT
else if (QNN_TENSOR_VERSION_2 == qnn_tensor.version) {
return qnn_tensor.v2.id;
} else {
}
#endif // QNN_TENSOR_V2_INIT
else {
ORT_THROW("QNN tensor version not supported, QNN tensor version: ", qnn_tensor.version);
}
}

Qnn_TensorType_t GetQnnTensorType(const Qnn_Tensor_t& qnn_tensor) {
if (QNN_TENSOR_VERSION_1 == qnn_tensor.version) {
return qnn_tensor.v1.type;
} else if (QNN_TENSOR_VERSION_2 == qnn_tensor.version) {
}
#ifdef QNN_TENSOR_V2_INIT
else if (QNN_TENSOR_VERSION_2 == qnn_tensor.version) {
return qnn_tensor.v2.type;
} else {
}
#endif // QNN_TENSOR_V2_INIT
else {
ORT_THROW("QNN tensor version not supported, QNN tensor version: ", qnn_tensor.version);
}
}

const char* GetQnnTensorName(const Qnn_Tensor_t& qnn_tensor) {
if (QNN_TENSOR_VERSION_1 == qnn_tensor.version) {
return qnn_tensor.v1.name;
} else if (QNN_TENSOR_VERSION_2 == qnn_tensor.version) {
}
#ifdef QNN_TENSOR_V2_INIT
else if (QNN_TENSOR_VERSION_2 == qnn_tensor.version) {
return qnn_tensor.v2.name;
} else {
}
#endif // QNN_TENSOR_V2_INIT
else {
ORT_THROW("QNN tensor version not supported, QNN tensor version: ", qnn_tensor.version);
}
}

Qnn_TensorDataFormat_t GetQnnTensorDataFormat(const Qnn_Tensor_t& qnn_tensor) {
if (QNN_TENSOR_VERSION_1 == qnn_tensor.version) {
return qnn_tensor.v1.dataFormat;
} else if (QNN_TENSOR_VERSION_2 == qnn_tensor.version) {
}
#ifdef QNN_TENSOR_V2_INIT
else if (QNN_TENSOR_VERSION_2 == qnn_tensor.version) {
return qnn_tensor.v2.dataFormat;
} else {
}
#endif // QNN_TENSOR_V2_INIT
else {
ORT_THROW("QNN tensor version not supported, QNN tensor version: ", qnn_tensor.version);
}
}

Qnn_DataType_t GetQnnTensorDataType(const Qnn_Tensor_t& qnn_tensor) {
if (QNN_TENSOR_VERSION_1 == qnn_tensor.version) {
return qnn_tensor.v1.dataType;
} else if (QNN_TENSOR_VERSION_2 == qnn_tensor.version) {
}
#ifdef QNN_TENSOR_V2_INIT
else if (QNN_TENSOR_VERSION_2 == qnn_tensor.version) {
return qnn_tensor.v2.dataType;
} else {
}
#endif // QNN_TENSOR_V2_INIT
else {
ORT_THROW("QNN tensor version not supported, QNN tensor version: ", qnn_tensor.version);
}
}

Qnn_TensorMemType_t GetQnnTensorMemType(const Qnn_Tensor_t& qnn_tensor) {
if (QNN_TENSOR_VERSION_1 == qnn_tensor.version) {
return qnn_tensor.v1.memType;
} else if (QNN_TENSOR_VERSION_2 == qnn_tensor.version) {
}
#ifdef QNN_TENSOR_V2_INIT
else if (QNN_TENSOR_VERSION_2 == qnn_tensor.version) {
return qnn_tensor.v2.memType;
} else {
}
#endif // QNN_TENSOR_V2_INIT
else {
ORT_THROW("QNN tensor version not supported, QNN tensor version: ", qnn_tensor.version);
}
}

uint32_t GetQnnTensorRank(const Qnn_Tensor_t& qnn_tensor) {
if (QNN_TENSOR_VERSION_1 == qnn_tensor.version) {
return qnn_tensor.v1.rank;
} else if (QNN_TENSOR_VERSION_2 == qnn_tensor.version) {
}
#ifdef QNN_TENSOR_V2_INIT
else if (QNN_TENSOR_VERSION_2 == qnn_tensor.version) {
return qnn_tensor.v2.rank;
} else {
}
#endif // QNN_TENSOR_V2_INIT
else {
ORT_THROW("QNN tensor version not supported, QNN tensor version: ", qnn_tensor.version);
}
}

uint32_t* GetQnnTensorDims(const Qnn_Tensor_t& qnn_tensor) {
if (QNN_TENSOR_VERSION_1 == qnn_tensor.version) {
return qnn_tensor.v1.dimensions;
} else if (QNN_TENSOR_VERSION_2 == qnn_tensor.version) {
}
#ifdef QNN_TENSOR_V2_INIT
else if (QNN_TENSOR_VERSION_2 == qnn_tensor.version) {
return qnn_tensor.v2.dimensions;
} else {
}
#endif // QNN_TENSOR_V2_INIT
else {
ORT_THROW("QNN tensor version not supported, QNN tensor version: ", qnn_tensor.version);
}
}

const Qnn_ClientBuffer_t& GetQnnTensorClientBuf(const Qnn_Tensor_t& qnn_tensor) {
if (QNN_TENSOR_VERSION_1 == qnn_tensor.version) {
return qnn_tensor.v1.clientBuf;
} else if (QNN_TENSOR_VERSION_2 == qnn_tensor.version) {
}
#ifdef QNN_TENSOR_V2_INIT
else if (QNN_TENSOR_VERSION_2 == qnn_tensor.version) {
return qnn_tensor.v2.clientBuf;
} else {
}
#endif // QNN_TENSOR_V2_INIT
else {
ORT_THROW("QNN tensor version not supported, QNN tensor version: ", qnn_tensor.version);
}
}

const Qnn_QuantizeParams_t& GetQnnTensorQParams(const Qnn_Tensor_t& qnn_tensor) {
if (QNN_TENSOR_VERSION_1 == qnn_tensor.version) {
return qnn_tensor.v1.quantizeParams;
} else if (QNN_TENSOR_VERSION_2 == qnn_tensor.version) {
}
#ifdef QNN_TENSOR_V2_INIT
else if (QNN_TENSOR_VERSION_2 == qnn_tensor.version) {
return qnn_tensor.v2.quantizeParams;
} else {
}
#endif // QNN_TENSOR_V2_INIT
else {
ORT_THROW("QNN tensor version not supported, QNN tensor version: ", qnn_tensor.version);
}
}
Expand Down

0 comments on commit e256485

Please sign in to comment.