Skip to content
Permalink
Browse files

Eliminate compilation warnings

  • Loading branch information...
Po Yen Chen
Po Yen Chen committed Mar 10, 2019
1 parent 368d26a commit 730ce862e05feb3f9f906dafda2a14b49e0057e5
@@ -40,7 +40,7 @@ class Glob
Result match(PathList& pPathnames);

/// return the pattern used to match
const Path& pattern() const { return m_Pattern.native(); }
Path pattern() const { return m_Pattern.native(); }

private:
friend class GlobImpl;
@@ -122,17 +122,21 @@ void ONNC_RUNTIME_conv_float(
int32_t ndim = input_X_ndim;

if (ndim == 4) {
typedef const float (*input_tensor_type )[input_X_dims[1] ][input_X_dims[2] ][input_X_dims[3] ];
typedef const float (*weight_tensor_type)[input_W_dims[1] ][input_W_dims[2] ][input_W_dims[3] ];
typedef float (*output_tensor_type)[output_Y_dims[1]][output_Y_dims[2]][output_Y_dims[3]];

ONNC_RUNTIME_conv_2d_float(onnc_runtime_context,
input_X_dims[0], input_X_dims[1],
input_X_dims[2], input_X_dims[3],
input_X,
(input_tensor_type)input_X,
input_W_dims[0], input_W_dims[1],
input_W_dims[2], input_W_dims[3],
input_W,
(weight_tensor_type)input_W,
input_B,
output_Y_dims[0], output_Y_dims[1],
output_Y_dims[2], output_Y_dims[3],
output_Y,
(output_tensor_type)output_Y,
0,
dilations,
group,
@@ -6,7 +6,7 @@
static void forLoop(
int32_t axisIndex, int32_t ndim
, const int32_t * restrict lowerBound, const int32_t * restrict uperBound
, float * input, int32_t inputIndex
, const float * input, int32_t inputIndex
, float * output, int32_t* outputIndex
,int32_t * axisDistance
){
@@ -4,11 +4,11 @@
#include <stdbool.h>

static void forLoop(
float * restrict input_input, int32_t input_index,
const float * restrict input_input, int32_t input_index,
int32_t input_input_ndim, const int32_t * restrict input_input_dims,
int32_t dimIndex, int32_t * restrict axisDistance,
int32_t axis, int32_t axisLower, int32_t axisHigher,
float ** restrict output, int32_t output_row, int32_t * restrict output_col
float * const * restrict output, int32_t output_row, int32_t * restrict output_col
){
if(dimIndex == input_input_ndim){
int32_t col = *output_col;
@@ -1046,7 +1046,7 @@ void CodeEmitVisitor::visit(const Softmax& pOp)
op_buf->src_data.channel = input_input_dims[1];
op_buf->src_data.line_stride = iinfo.stride_line;
op_buf->src_data.surf_stride = iinfo.stride_surface;
NVDLA_DBG("softmax in(sz:%d w:%d h:%d c:%d ls:%d ss%d)\n", input_mle.size, input_input_dims[3], input_input_dims[2],
NVDLA_DBG("softmax in(sz:%lu w:%d h:%d c:%d ls:%d ss%d)\n", input_mle.size, input_input_dims[3], input_input_dims[2],
input_input_dims[1], iinfo.stride_line, iinfo.stride_surface);

op_buf->dst_data.addressIndex = issueEmuAddr(output_mid);
@@ -1057,7 +1057,7 @@ void CodeEmitVisitor::visit(const Softmax& pOp)
op_buf->dst_data.channel = output_output_dims[1];
op_buf->dst_data.line_stride = oinfo.stride_line;
op_buf->dst_data.surf_stride = oinfo.stride_surface;
NVDLA_DBG("softmax out(sz:%d w:%d h:%d c:%d ls:%d ss%d)\n", output_mle.size, output_output_dims[3],
NVDLA_DBG("softmax out(sz:%lu w:%d h:%d c:%d ls:%d ss%d)\n", output_mle.size, output_output_dims[3],
output_output_dims[2], output_output_dims[1], oinfo.stride_line, oinfo.stride_surface);
issueEmuOp(softmax_op);
}
@@ -1198,7 +1198,7 @@ int CodeEmitVisitor::issueEmuAddr(int mid)
ale.mem_id = mid;
ale.id = aid;

NVDLA_DBG("AddressEntry s:%9d o:%9d mid:%3d id:%3d\n", ale.size, ale.offset, ale.mem_id, ale.id);
NVDLA_DBG("AddressEntry s:%9lu o:%9lu mid:%3d id:%3d\n", ale.size, ale.offset, ale.mem_id, ale.id);
m_pMeta.m_AddressListEntries.push_back(ale);
return aid;
}
@@ -1224,7 +1224,7 @@ int CodeEmitVisitor::issueDlaAddr(int mid, NvDlaCubeInfo cube, int groups, int g
ale.id = aid;
NVDLA_DBG("cube(%d %d %d %d %d), group(%d/%d) ofs %d\n", cube.dim_n, cube.dim_c, cube.dim_h, cube.dim_w,
cube.element_size, groups, gidx, ofs);
NVDLA_DBG("AddressEntry s:%9d o:%9d mid:%3d id:%3d\n", ale.size, ale.offset, ale.mem_id, ale.id);
NVDLA_DBG("AddressEntry s:%9lu o:%9lu mid:%3d id:%3d\n", ale.size, ale.offset, ale.mem_id, ale.id);

m_pMeta.m_AddressListEntries.push_back(ale);
return aid;
@@ -90,7 +90,7 @@ Pass::ReturnType NvDlaMemInfoPass::runOnModule(Module& pModule)
// for weight, memory buffers are allocated & blob files are also generated in ComputeOperator.

FloatTensor* t = static_cast<FloatTensor*>(v);
NVDLA_DBG("weight size:%d %d\n", mem->length(), t->getValues().size());
NVDLA_DBG("weight size:%u %zu\n", mem->length(), t->getValues().size());

} else {
NVDLA_DBG("operand size:%d\n", mem->length());
@@ -339,7 +339,7 @@ int NvDlaTaskSubmitPass::submitMemAllocAddress(int size, std::string blob_name)
ale.mem_id = mle.id;
ale.id = aid;

NVDLA_DBG("AddressEntry s:%9d o:%9d mid:%3d id:%3d\n", ale.size, ale.offset, ale.mem_id, ale.id);
NVDLA_DBG("AddressEntry s:%9lu o:%9lu mid:%3d id:%3d\n", ale.size, ale.offset, ale.mem_id, ale.id);
m_pMeta->m_AddressListEntries.push_back(ale);
return aid;
}
@@ -34,7 +34,9 @@
#endif

#ifdef NDEBUG
#define ENABLE_DEBUG()
# ifndef ENABLE_DEBUG
# define ENABLE_DEBUG()
# endif
#endif

using namespace onnc;

0 comments on commit 730ce86

Please sign in to comment.
You can’t perform that action at this time.
You signed in with another tab or window. Reload to refresh your session. You signed out in another tab or window. Reload to refresh your session.