Permalink
Browse files

C++ Warning Reduction

Pivotal Tracker: #72898288

Additional author: Feng, Xixuan (Aaron) <xfeng@gopivotal.com>

Changes:
- add many static_cast
- fix mlogregr_result to use bigint for num_processed
  • Loading branch information...
1 parent 751d42b commit 5f104148cdab48f515bfc4826a58dbfdca3aedd3 Shengwen Yang committed with haying Jul 1, 2014
Showing with 223 additions and 206 deletions.
  1. +4 −2 doc/src/sql.ll
  2. +5 −2 methods/kmeans/src/pg_gp/kmeans.c
  3. +3 −3 methods/sketch/src/pg_gp/sketch_support.c
  4. +2 −1 methods/svec_util/src/pg_gp/generate_svec.c
  5. +2 −2 src/modules/convex/utils_regularization.cpp
  6. +1 −1 src/modules/crf/viterbi.cpp
  7. +1 −1 src/modules/elastic_net/elastic_net_binomial_fista.cpp
  8. +1 −1 src/modules/elastic_net/elastic_net_gaussian_fista.cpp
  9. +4 −4 src/modules/elastic_net/elastic_net_optimizer_fista.hpp
  10. +3 −3 src/modules/elastic_net/elastic_net_optimizer_igd.hpp
  11. +16 −13 src/modules/lda/lda.cpp
  12. +7 −6 src/modules/linalg/crossprod.cpp
  13. +2 −2 src/modules/linalg/dim_conversion.cpp
  14. +13 −13 src/modules/linalg/matrix_op.cpp
  15. +11 −8 src/modules/linalg/svd.cpp
  16. +1 −1 src/modules/linear_systems/dense_linear_systems_states.hpp
  17. +2 −2 src/modules/linear_systems/sparse_linear_systems.cpp
  18. +3 −3 src/modules/regress/LinearRegression_impl.hpp
  19. +1 −1 src/modules/regress/clustered_errors.cpp
  20. +1 −1 src/modules/regress/clustered_errors_state.hpp
  21. +2 −2 src/modules/regress/logistic.cpp
  22. +35 −34 src/modules/regress/marginal.cpp
  23. +9 −9 src/modules/regress/mlogr_margins.cpp
  24. +3 −3 src/modules/regress/multilogistic.cpp
  25. +1 −1 src/modules/stats/clustered_variance_coxph.cpp
  26. +5 −5 src/modules/stats/cox_prop_hazards.cpp
  27. +3 −3 src/modules/stats/coxph_improved.cpp
  28. +10 −10 src/modules/stats/marginal_cox.cpp
  29. +2 −2 src/modules/stats/robust_variance_coxph.cpp
  30. +58 −55 src/modules/tsa/arima.cpp
  31. +11 −11 src/ports/postgres/modules/regress/multilogistic.sql_in
  32. +1 −1 src/ports/postgres/modules/regress/test/logistic.sql_in
View
@@ -292,8 +292,10 @@ void SQLScanner::preScannerAction(SQLParser::semantic_type * /* yylval */,
yylloc->step();
// Start at oldLength: We don't want to count preserved text more than once
- for (int i = oldLength; i < yyleng; i++) {
- if (yytext[i] == '\r' && i + 1 < yyleng && yytext[i + 1] == '\n') {
+ for (size_t i = oldLength; i < yyleng; i++) {
+ if (yytext[i] == '\r' &&
+ static_cast<size_t>(i + 1) < yyleng &&
+ yytext[i + 1] == '\n') {
i++; yylloc->lines(1);
} else if (yytext[i] == '\r' || yytext[i] == '\n') {
yylloc->lines(1);
@@ -277,11 +277,14 @@ get_metric_fn_for_array(KMeansMetric inMetric)
calc_tanimoto_distance
};
- if (inMetric < 1 || inMetric > sizeof(metrics)/sizeof(PGFunction))
+ if (inMetric < 1 || inMetric > sizeof(metrics)/sizeof(PGFunction)) {
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("invalid metric")));
- return metrics[inMetric - 1];
+ return NULL;
+ } else {
+ return metrics[inMetric - 1];
+ }
}
PG_FUNCTION_INFO_V1(internal_kmeans_closest_centroid);
@@ -342,15 +342,15 @@ Datum sketch_leftmost_zero(PG_FUNCTION_ARGS);
* the C99 standard.
*/
#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wsign-compare"
#endif
+#pragma GCC diagnostic ignored "-Wsign-compare"
Datum sketch_rightmost_one(PG_FUNCTION_ARGS)
{
bytea *bitmap = (bytea *)PG_GETARG_BYTEA_P(0);
size_t sketchsz = PG_GETARG_INT32(1); /* size in bits */
size_t sketchnum = PG_GETARG_INT32(2); /* from the left! */
char * bits = VARDATA(bitmap);
- size_t len = (size_t)VARSIZE_ANY_EXHDR(bitmap);
+ size_t len = (size_t)(VARSIZE_ANY_EXHDR(bitmap));
return rightmost_one((uint8 *)bits, len, sketchsz, sketchnum);
}
@@ -361,7 +361,7 @@ Datum sketch_leftmost_zero(PG_FUNCTION_ARGS)
size_t sketchsz = PG_GETARG_INT32(1); /* size in bits */
size_t sketchnum = PG_GETARG_INT32(2); /* from the left! */
char * bits = VARDATA(bitmap);
- size_t len = (size_t)VARSIZE_ANY_EXHDR(bitmap);
+ size_t len = (size_t)(VARSIZE_ANY_EXHDR(bitmap));
return leftmost_zero((uint8 *)bits, len, sketchsz, sketchnum);
}
@@ -58,7 +58,8 @@ Datum generate_sparse_vector(PG_FUNCTION_ARGS)
/* Check if term index array has indexes in proper order or not */
for(int i = 0; i < term_index_nelems; i++)
{
- if ((term_index_data[i] < 0) || (term_index_data[i] >= dict_size))
+ if (DatumGetInt64(term_index_data[i]) < 0 ||
+ DatumGetInt64(term_index_data[i]) >= dict_size)
elog(ERROR, "Term indexes must range from 0 to total number of elements in the dictonary - 1.");
}
@@ -131,8 +131,8 @@ AnyType utils_var_scales_final::run (AnyType& args)
if (state.numRows == 0) return Null();
- state.mean /= state.numRows;
- state.std /= state.numRows;
+ state.mean /= static_cast<double>(state.numRows);
+ state.std /= static_cast<double>(state.numRows);
for (uint32_t i = 0; i < state.dimension; i++)
state.std(i) = sqrt(state.std(i) - state.mean(i) * state.mean(i));
@@ -44,7 +44,7 @@ AnyType vcrf_top1_label::run(AnyType& args) {
if (numLabels == 0)
throw std::invalid_argument("Number of labels cannot be zero");
- int doc_len = rArray.size()/numLabels;
+ int doc_len = static_cast<int>(rArray.size() / numLabels);
double* prev_top1_array = new double[numLabels];
double* curr_top1_array = new double[numLabels];
@@ -41,7 +41,7 @@ class BinomialFista
inline void BinomialFista::update_y_intercept_final(FistaState<MutableArrayHandle<double> >& state)
{
- state.gradient_intercept = state.gradient_intercept / state.totalRows;
+ state.gradient_intercept = state.gradient_intercept / static_cast<double>(state.totalRows);
}
// -----------------------------------------------------------------------------
@@ -50,7 +50,7 @@ class GaussianFista
inline void GaussianFista::update_y_intercept_final (
FistaState<MutableArrayHandle<double> >& state)
{
- state.gradient_intercept = state.gradient_intercept / state.totalRows;
+ state.gradient_intercept = state.gradient_intercept / static_cast<double>(state.totalRows);
}
// -----------------------------------------------------------------------------
@@ -202,7 +202,7 @@ AnyType Fista<Model>::fista_final (AnyType& args)
if (state.numRows == 0) return Null();
if (state.backtracking == 0) {
- state.gradient = state.gradient / state.totalRows;
+ state.gradient = state.gradient / static_cast<double>(state.totalRows);
double la = state.lambda * (1 - state.alpha);
for (uint32_t i = 0; i < state.dimension; i++)
if (state.coef_y(i) != 0)
@@ -236,11 +236,11 @@ AnyType Fista<Model>::fista_final (AnyType& args)
}
else {
// finish computing fn and Qfn if needed
- state.fn = state.fn / state.totalRows + 0.5 * state.lambda * (1 - state.alpha)
+ state.fn = state.fn / static_cast<double>(state.totalRows) + 0.5 * state.lambda * (1 - state.alpha)
* sparse_dot(state.b_coef, state.b_coef);
if (state.backtracking == 1)
- state.Qfn = state.Qfn / state.totalRows + 0.5 * state.lambda * (1 - state.alpha)
+ state.Qfn = state.Qfn / static_cast<double>(state.totalRows) + 0.5 * state.lambda * (1 - state.alpha)
* sparse_dot(state.coef_y, state.coef_y);
ColumnVector r = state.b_coef - state.coef_y;
@@ -292,7 +292,7 @@ AnyType Fista<Model>::fista_final (AnyType& args)
// compute the final loglikelihood value from the accumulated loss value
double loss_value;
- loss_value = state.loglikelihood / (state.numRows * 2);
+ loss_value = state.loglikelihood / static_cast<double>(state.numRows * 2);
double sum_sqr_coef = 0;
double sum_abs_coef = 0;
for (uint32_t i = 0; i < state.dimension; i++){
@@ -97,9 +97,9 @@ AnyType Igd<Model>::igd_transition (AnyType& args, const Allocator& inAllocator)
ColumnVector gradient(state.dimension); // gradient for coef only, not for intercept
Model::compute_gradient(gradient, state, x, y);
- double a = state.stepsize / state.totalRows;
+ double a = state.stepsize / static_cast<double>(state.totalRows);
double b = state.stepsize * state.alpha * state.lambda
- / state.totalRows;
+ / static_cast<double>(state.totalRows);
for (uint32_t i = 0; i < state.dimension; i++)
{
// step 1
@@ -180,7 +180,7 @@ AnyType Igd<Model>::igd_final (AnyType& args)
// compute the final loglikelihood value from the accumulated loss value
double loss_value;
- loss_value = state.loglikelihood / (state.numRows * 2);
+ loss_value = state.loglikelihood / static_cast<double>(state.numRows * 2);
double sum_sqr_coef = 0;
double sum_abs_coef = 0;
for (uint32_t i = 0; i < state.dimension; i++){
View
@@ -147,7 +147,7 @@ template<class T> static T __max(ArrayHandle<T> ah){
**/
static int32_t __sum(ArrayHandle<int32_t> ah){
const int32_t * array = ah.ptr();
- int32_t size = ah.size();
+ size_t size = ah.size();
return std::accumulate(array, array + size, static_cast<int32_t>(0));
}
@@ -244,8 +244,8 @@ AnyType lda_gibbs_sample::run(AnyType & args)
throw std::runtime_error("args.mSysInfo->user_fctx is null");
}
- int32_t unique_word_count = words.size();
- for(int it = 0; it < iter_num; it++){
+ int32_t unique_word_count = static_cast<int32_t>(words.size());
+ for(int32_t it = 0; it < iter_num; it++){
int32_t word_index = topic_num;
for(int32_t i = 0; i < unique_word_count; i++) {
int32_t wordid = words[i];
@@ -296,7 +296,7 @@ AnyType lda_random_assign::run(AnyType & args)
INT4TI.align));
for(int32_t i = 0; i < word_count; i++){
- int32_t topic = random() % topic_num;
+ int32_t topic = static_cast<int32_t>(random() % topic_num);
doc_topic[topic] += 1;
doc_topic[topic_num + i] = topic;
}
@@ -363,7 +363,7 @@ AnyType lda_count_topic_sfunc::run(AnyType & args)
state = args[0].getAs<MutableArrayHandle<int64_t> >();
}
- int32_t unique_word_count = words.size();
+ int32_t unique_word_count = static_cast<int32_t>(words.size());
int32_t word_index = 0;
for(int32_t i = 0; i < unique_word_count; i++){
int32_t wordid = words[i];
@@ -410,8 +410,8 @@ AnyType lda_transpose::run(AnyType & args)
if(matrix.dims() != 2)
throw std::domain_error("invalid dimension");
- int32_t row_num = matrix.sizeOfDim(0);
- int32_t col_num = matrix.sizeOfDim(1);
+ int32_t row_num = static_cast<int32_t>(matrix.sizeOfDim(0));
+ int32_t col_num = static_cast<int32_t>(matrix.sizeOfDim(1));
int dims[2] = {col_num, row_num};
int lbs[2] = {1, 1};
@@ -453,8 +453,8 @@ void * lda_unnest::SRF_init(AnyType &args)
sr_ctx * ctx = new sr_ctx;
ctx->inarray = inarray.ptr();
- ctx->maxcall = inarray.sizeOfDim(0);
- ctx->dim = inarray.sizeOfDim(1);
+ ctx->maxcall = static_cast<int32_t>(inarray.sizeOfDim(0));
+ ctx->dim = static_cast<int32_t>(inarray.sizeOfDim(1));
ctx->curcall = 0;
return ctx;
@@ -552,9 +552,12 @@ AnyType lda_perplexity_sfunc::run(AnyType & args){
if(__min(model) < 0)
throw std::invalid_argument("invalid topic counts in model");
- state = madlib_construct_array(
- NULL, model.size() + 1, INT8TI.oid, INT8TI.len, INT8TI.byval,
- INT8TI.align);
+ state = madlib_construct_array(NULL,
+ static_cast<int>(model.size()) + 1,
+ INT8TI.oid,
+ INT8TI.len,
+ INT8TI.byval,
+ INT8TI.align);
memcpy(state.ptr(), model.ptr(), model.size() * sizeof(int64_t));
}else{
@@ -625,7 +628,7 @@ AnyType l1_norm_with_smoothing::run(AnyType & args){
double sum = 0.0;
for(size_t i = 0; i < arr.size(); i++)
sum += fabs(arr[i]);
- sum += smooth * arr.size();
+ sum += smooth * static_cast<double>(arr.size());
double inverse_sum = 0.0;
if (sum != 0.0)
@@ -14,7 +14,8 @@ AnyType __pivotalr_crossprod_transition::run (AnyType& args)
{
ArrayHandle<double> left = args[1].getAs<ArrayHandle<double> >();
ArrayHandle<double> right = args[2].getAs<ArrayHandle<double> >();
- int m = left.size(), n = right.size();
+ size_t m = left.size();
+ size_t n = right.size();
MutableArrayHandle<double> state(NULL);
if (args[0].isNull()) {
@@ -25,8 +26,8 @@ AnyType __pivotalr_crossprod_transition::run (AnyType& args)
state = args[0].getAs<MutableArrayHandle<double> >();
int count = 0;
- for (int i = 0; i < m; i++)
- for (int j = 0; j < n; j++)
+ for (size_t i = 0; i < m; i++)
+ for (size_t j = 0; j < n; j++)
state[count++] += left[i] * right[j];
return state;
@@ -51,7 +52,7 @@ AnyType __pivotalr_crossprod_merge::run (AnyType& args)
AnyType __pivotalr_crossprod_sym_transition::run (AnyType& args)
{
ArrayHandle<double> arr = args[1].getAs<ArrayHandle<double> >();
- int n = arr.size();
+ size_t n = arr.size();
MutableArrayHandle<double> state(NULL);
if (args[0].isNull()) {
@@ -62,8 +63,8 @@ AnyType __pivotalr_crossprod_sym_transition::run (AnyType& args)
state = args[0].getAs<MutableArrayHandle<double> >();
int count = 0;
- for (int i = 0; i < n; i++)
- for (int j = 0; j <= i; j++)
+ for (size_t i = 0; i < n; i++)
+ for (size_t j = 0; j <= i; j++)
state[count++] += arr[i] * arr[j];
return state;
@@ -36,8 +36,8 @@ AnyType array_to_1d::run(AnyType & args) {
allocateArray<double, dbal::FunctionContext,
dbal::DoZero, dbal::ThrowBadAlloc>(in_array.size() + 2);
// The fist two elements encode the dimension info
- out_array[0] = in_array.sizeOfDim(0);
- out_array[1] = in_array.sizeOfDim(1);
+ out_array[0] = static_cast<double>(in_array.sizeOfDim(0));
+ out_array[1] = static_cast<double>(in_array.sizeOfDim(1));
memcpy(out_array.ptr() + 2, in_array.ptr(), sizeof(double) * in_array.size());
return out_array;
@@ -80,8 +80,8 @@ AnyType matrix_mem_sum_sfunc::run(AnyType & args)
"invalid argument - 2-d array expected");
}
- int row_m = m.sizeOfDim(0);
- int col_m = m.sizeOfDim(1);
+ int row_m = static_cast<int>(m.sizeOfDim(0));
+ int col_m = static_cast<int>(m.sizeOfDim(1));
MutableArrayHandle<double> state(NULL);
if (args[0].isNull()){
@@ -110,7 +110,7 @@ AnyType matrix_blockize_sfunc::run(AnyType & args)
int32_t row_id = args[1].getAs<int32_t>();
ArrayHandle<double> row_vec = args[2].getAs<ArrayHandle<double> >();
- int32_t csize = row_vec.sizeOfDim(0);
+ int32_t csize = static_cast<int32_t>(row_vec.sizeOfDim(0));
int32_t rsize = args[3].getAs<int32_t>();
if(rsize < 1){
throw std::invalid_argument(
@@ -146,10 +146,10 @@ AnyType matrix_mem_mult::run(AnyType & args)
"invalid argument - 2-d array expected");
}
- int row_a = a.sizeOfDim(0);
- int col_a = a.sizeOfDim(1);
- int row_b = b.sizeOfDim(0);
- int col_b = b.sizeOfDim(1);
+ int row_a = static_cast<int>(a.sizeOfDim(0));
+ int col_a = static_cast<int>(a.sizeOfDim(1));
+ int row_b = static_cast<int>(b.sizeOfDim(0));
+ int col_b = static_cast<int>(b.sizeOfDim(1));
if ((!trans_b && col_a != row_b) || (trans_b && col_a != col_b)){
throw std::invalid_argument(
@@ -192,8 +192,8 @@ AnyType matrix_mem_trans::run(AnyType & args)
"invalid argument - 2-d array expected");
}
- int row_m = m.sizeOfDim(0);
- int col_m = m.sizeOfDim(1);
+ int row_m = static_cast<int>(m.sizeOfDim(0));
+ int col_m = static_cast<int>(m.sizeOfDim(1));
int dims[2] = {col_m, row_m};
int lbs[2] = {1, 1};
@@ -267,7 +267,7 @@ void * row_split::SRF_init(AnyType &args)
sr_ctx1 * ctx = new sr_ctx1;
ctx->inarray = inarray.ptr();
- ctx->dim = inarray.sizeOfDim(0);
+ ctx->dim = static_cast<int32_t>(inarray.sizeOfDim(0));
ctx->size = size;
ctx->maxcall = static_cast<int32_t>(
ceil(static_cast<double>(ctx->dim) / size));
@@ -315,7 +315,7 @@ AnyType matrix_unblockize_sfunc::run(AnyType & args)
int32_t total_col_dim = args[1].getAs<int32_t>();
int32_t col_id = args[2].getAs<int32_t>();
ArrayHandle<double> row_vec = args[3].getAs<ArrayHandle<double> >();
- int32_t col_dim = row_vec.sizeOfDim(0);
+ int32_t col_dim = static_cast<int32_t>(row_vec.sizeOfDim(0));
if(total_col_dim < 1){
throw std::invalid_argument(
@@ -361,8 +361,8 @@ void * unnest_block::SRF_init(AnyType &args)
sr_ctx2 * ctx = new sr_ctx2;
ctx->inarray = inarray.ptr();
- ctx->maxcall = inarray.sizeOfDim(0);
- ctx->dim = inarray.sizeOfDim(1);
+ ctx->maxcall = static_cast<int32_t>(inarray.sizeOfDim(0));
+ ctx->dim = static_cast<int32_t>(inarray.sizeOfDim(1));
ctx->curcall = 0;
return ctx;
Oops, something went wrong.

0 comments on commit 5f10414

Please sign in to comment.