Skip to content

HTTPS clone URL

Subversion checkout URL

You can clone with HTTPS or Subversion.

Download ZIP
Browse files

C++ Warning Reduction

Pivotal Tracker: #72898288

Additional author: Feng, Xixuan (Aaron) <xfeng@gopivotal.com>

Changes:
- add many static_cast
- fix mlogregr_result to use bigint for num_processed
  • Loading branch information...
commit 5f104148cdab48f515bfc4826a58dbfdca3aedd3 1 parent 751d42b
Shengwen Yang authored haying committed
Showing with 223 additions and 206 deletions.
  1. +4 −2 doc/src/sql.ll
  2. +5 −2 methods/kmeans/src/pg_gp/kmeans.c
  3. +3 −3 methods/sketch/src/pg_gp/sketch_support.c
  4. +2 −1  methods/svec_util/src/pg_gp/generate_svec.c
  5. +2 −2 src/modules/convex/utils_regularization.cpp
  6. +1 −1  src/modules/crf/viterbi.cpp
  7. +1 −1  src/modules/elastic_net/elastic_net_binomial_fista.cpp
  8. +1 −1  src/modules/elastic_net/elastic_net_gaussian_fista.cpp
  9. +4 −4 src/modules/elastic_net/elastic_net_optimizer_fista.hpp
  10. +3 −3 src/modules/elastic_net/elastic_net_optimizer_igd.hpp
  11. +16 −13 src/modules/lda/lda.cpp
  12. +7 −6 src/modules/linalg/crossprod.cpp
  13. +2 −2 src/modules/linalg/dim_conversion.cpp
  14. +13 −13 src/modules/linalg/matrix_op.cpp
  15. +11 −8 src/modules/linalg/svd.cpp
  16. +1 −1  src/modules/linear_systems/dense_linear_systems_states.hpp
  17. +2 −2 src/modules/linear_systems/sparse_linear_systems.cpp
  18. +3 −3 src/modules/regress/LinearRegression_impl.hpp
  19. +1 −1  src/modules/regress/clustered_errors.cpp
  20. +1 −1  src/modules/regress/clustered_errors_state.hpp
  21. +2 −2 src/modules/regress/logistic.cpp
  22. +35 −34 src/modules/regress/marginal.cpp
  23. +9 −9 src/modules/regress/mlogr_margins.cpp
  24. +3 −3 src/modules/regress/multilogistic.cpp
  25. +1 −1  src/modules/stats/clustered_variance_coxph.cpp
  26. +5 −5 src/modules/stats/cox_prop_hazards.cpp
  27. +3 −3 src/modules/stats/coxph_improved.cpp
  28. +10 −10 src/modules/stats/marginal_cox.cpp
  29. +2 −2 src/modules/stats/robust_variance_coxph.cpp
  30. +58 −55 src/modules/tsa/arima.cpp
  31. +11 −11 src/ports/postgres/modules/regress/multilogistic.sql_in
  32. +1 −1  src/ports/postgres/modules/regress/test/logistic.sql_in
6 doc/src/sql.ll
View
@@ -292,8 +292,10 @@ void SQLScanner::preScannerAction(SQLParser::semantic_type * /* yylval */,
yylloc->step();
// Start at oldLength: We don't want to count preserved text more than once
- for (int i = oldLength; i < yyleng; i++) {
- if (yytext[i] == '\r' && i + 1 < yyleng && yytext[i + 1] == '\n') {
+ for (size_t i = oldLength; i < yyleng; i++) {
+ if (yytext[i] == '\r' &&
+ static_cast<size_t>(i + 1) < yyleng &&
+ yytext[i + 1] == '\n') {
i++; yylloc->lines(1);
} else if (yytext[i] == '\r' || yytext[i] == '\n') {
yylloc->lines(1);
7 methods/kmeans/src/pg_gp/kmeans.c
View
@@ -277,11 +277,14 @@ get_metric_fn_for_array(KMeansMetric inMetric)
calc_tanimoto_distance
};
- if (inMetric < 1 || inMetric > sizeof(metrics)/sizeof(PGFunction))
+ if (inMetric < 1 || inMetric > sizeof(metrics)/sizeof(PGFunction)) {
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("invalid metric")));
- return metrics[inMetric - 1];
+ return NULL;
+ } else {
+ return metrics[inMetric - 1];
+ }
}
PG_FUNCTION_INFO_V1(internal_kmeans_closest_centroid);
6 methods/sketch/src/pg_gp/sketch_support.c
View
@@ -342,15 +342,15 @@ Datum sketch_leftmost_zero(PG_FUNCTION_ARGS);
* the C99 standard.
*/
#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wsign-compare"
#endif
+#pragma GCC diagnostic ignored "-Wsign-compare"
Datum sketch_rightmost_one(PG_FUNCTION_ARGS)
{
bytea *bitmap = (bytea *)PG_GETARG_BYTEA_P(0);
size_t sketchsz = PG_GETARG_INT32(1); /* size in bits */
size_t sketchnum = PG_GETARG_INT32(2); /* from the left! */
char * bits = VARDATA(bitmap);
- size_t len = (size_t)VARSIZE_ANY_EXHDR(bitmap);
+ size_t len = (size_t)(VARSIZE_ANY_EXHDR(bitmap));
return rightmost_one((uint8 *)bits, len, sketchsz, sketchnum);
}
@@ -361,7 +361,7 @@ Datum sketch_leftmost_zero(PG_FUNCTION_ARGS)
size_t sketchsz = PG_GETARG_INT32(1); /* size in bits */
size_t sketchnum = PG_GETARG_INT32(2); /* from the left! */
char * bits = VARDATA(bitmap);
- size_t len = (size_t)VARSIZE_ANY_EXHDR(bitmap);
+ size_t len = (size_t)(VARSIZE_ANY_EXHDR(bitmap));
return leftmost_zero((uint8 *)bits, len, sketchsz, sketchnum);
}
3  methods/svec_util/src/pg_gp/generate_svec.c
View
@@ -58,7 +58,8 @@ Datum generate_sparse_vector(PG_FUNCTION_ARGS)
/* Check if term index array has indexes in proper order or not */
for(int i = 0; i < term_index_nelems; i++)
{
- if ((term_index_data[i] < 0) || (term_index_data[i] >= dict_size))
+ if (DatumGetInt64(term_index_data[i]) < 0 ||
+ DatumGetInt64(term_index_data[i]) >= dict_size)
elog(ERROR, "Term indexes must range from 0 to total number of elements in the dictonary - 1.");
}
4 src/modules/convex/utils_regularization.cpp
View
@@ -131,8 +131,8 @@ AnyType utils_var_scales_final::run (AnyType& args)
if (state.numRows == 0) return Null();
- state.mean /= state.numRows;
- state.std /= state.numRows;
+ state.mean /= static_cast<double>(state.numRows);
+ state.std /= static_cast<double>(state.numRows);
for (uint32_t i = 0; i < state.dimension; i++)
state.std(i) = sqrt(state.std(i) - state.mean(i) * state.mean(i));
2  src/modules/crf/viterbi.cpp
View
@@ -44,7 +44,7 @@ AnyType vcrf_top1_label::run(AnyType& args) {
if (numLabels == 0)
throw std::invalid_argument("Number of labels cannot be zero");
- int doc_len = rArray.size()/numLabels;
+ int doc_len = static_cast<int>(rArray.size() / numLabels);
double* prev_top1_array = new double[numLabels];
double* curr_top1_array = new double[numLabels];
2  src/modules/elastic_net/elastic_net_binomial_fista.cpp
View
@@ -41,7 +41,7 @@ class BinomialFista
inline void BinomialFista::update_y_intercept_final(FistaState<MutableArrayHandle<double> >& state)
{
- state.gradient_intercept = state.gradient_intercept / state.totalRows;
+ state.gradient_intercept = state.gradient_intercept / static_cast<double>(state.totalRows);
}
// -----------------------------------------------------------------------------
2  src/modules/elastic_net/elastic_net_gaussian_fista.cpp
View
@@ -50,7 +50,7 @@ class GaussianFista
inline void GaussianFista::update_y_intercept_final (
FistaState<MutableArrayHandle<double> >& state)
{
- state.gradient_intercept = state.gradient_intercept / state.totalRows;
+ state.gradient_intercept = state.gradient_intercept / static_cast<double>(state.totalRows);
}
// -----------------------------------------------------------------------------
8 src/modules/elastic_net/elastic_net_optimizer_fista.hpp
View
@@ -202,7 +202,7 @@ AnyType Fista<Model>::fista_final (AnyType& args)
if (state.numRows == 0) return Null();
if (state.backtracking == 0) {
- state.gradient = state.gradient / state.totalRows;
+ state.gradient = state.gradient / static_cast<double>(state.totalRows);
double la = state.lambda * (1 - state.alpha);
for (uint32_t i = 0; i < state.dimension; i++)
if (state.coef_y(i) != 0)
@@ -236,11 +236,11 @@ AnyType Fista<Model>::fista_final (AnyType& args)
}
else {
// finish computing fn and Qfn if needed
- state.fn = state.fn / state.totalRows + 0.5 * state.lambda * (1 - state.alpha)
+ state.fn = state.fn / static_cast<double>(state.totalRows) + 0.5 * state.lambda * (1 - state.alpha)
* sparse_dot(state.b_coef, state.b_coef);
if (state.backtracking == 1)
- state.Qfn = state.Qfn / state.totalRows + 0.5 * state.lambda * (1 - state.alpha)
+ state.Qfn = state.Qfn / static_cast<double>(state.totalRows) + 0.5 * state.lambda * (1 - state.alpha)
* sparse_dot(state.coef_y, state.coef_y);
ColumnVector r = state.b_coef - state.coef_y;
@@ -292,7 +292,7 @@ AnyType Fista<Model>::fista_final (AnyType& args)
// compute the final loglikelihood value from the accumulated loss value
double loss_value;
- loss_value = state.loglikelihood / (state.numRows * 2);
+ loss_value = state.loglikelihood / static_cast<double>(state.numRows * 2);
double sum_sqr_coef = 0;
double sum_abs_coef = 0;
for (uint32_t i = 0; i < state.dimension; i++){
6 src/modules/elastic_net/elastic_net_optimizer_igd.hpp
View
@@ -97,9 +97,9 @@ AnyType Igd<Model>::igd_transition (AnyType& args, const Allocator& inAllocator)
ColumnVector gradient(state.dimension); // gradient for coef only, not for intercept
Model::compute_gradient(gradient, state, x, y);
- double a = state.stepsize / state.totalRows;
+ double a = state.stepsize / static_cast<double>(state.totalRows);
double b = state.stepsize * state.alpha * state.lambda
- / state.totalRows;
+ / static_cast<double>(state.totalRows);
for (uint32_t i = 0; i < state.dimension; i++)
{
// step 1
@@ -180,7 +180,7 @@ AnyType Igd<Model>::igd_final (AnyType& args)
// compute the final loglikelihood value from the accumulated loss value
double loss_value;
- loss_value = state.loglikelihood / (state.numRows * 2);
+ loss_value = state.loglikelihood / static_cast<double>(state.numRows * 2);
double sum_sqr_coef = 0;
double sum_abs_coef = 0;
for (uint32_t i = 0; i < state.dimension; i++){
29 src/modules/lda/lda.cpp
View
@@ -147,7 +147,7 @@ template<class T> static T __max(ArrayHandle<T> ah){
**/
static int32_t __sum(ArrayHandle<int32_t> ah){
const int32_t * array = ah.ptr();
- int32_t size = ah.size();
+ size_t size = ah.size();
return std::accumulate(array, array + size, static_cast<int32_t>(0));
}
@@ -244,8 +244,8 @@ AnyType lda_gibbs_sample::run(AnyType & args)
throw std::runtime_error("args.mSysInfo->user_fctx is null");
}
- int32_t unique_word_count = words.size();
- for(int it = 0; it < iter_num; it++){
+ int32_t unique_word_count = static_cast<int32_t>(words.size());
+ for(int32_t it = 0; it < iter_num; it++){
int32_t word_index = topic_num;
for(int32_t i = 0; i < unique_word_count; i++) {
int32_t wordid = words[i];
@@ -296,7 +296,7 @@ AnyType lda_random_assign::run(AnyType & args)
INT4TI.align));
for(int32_t i = 0; i < word_count; i++){
- int32_t topic = random() % topic_num;
+ int32_t topic = static_cast<int32_t>(random() % topic_num);
doc_topic[topic] += 1;
doc_topic[topic_num + i] = topic;
}
@@ -363,7 +363,7 @@ AnyType lda_count_topic_sfunc::run(AnyType & args)
state = args[0].getAs<MutableArrayHandle<int64_t> >();
}
- int32_t unique_word_count = words.size();
+ int32_t unique_word_count = static_cast<int32_t>(words.size());
int32_t word_index = 0;
for(int32_t i = 0; i < unique_word_count; i++){
int32_t wordid = words[i];
@@ -410,8 +410,8 @@ AnyType lda_transpose::run(AnyType & args)
if(matrix.dims() != 2)
throw std::domain_error("invalid dimension");
- int32_t row_num = matrix.sizeOfDim(0);
- int32_t col_num = matrix.sizeOfDim(1);
+ int32_t row_num = static_cast<int32_t>(matrix.sizeOfDim(0));
+ int32_t col_num = static_cast<int32_t>(matrix.sizeOfDim(1));
int dims[2] = {col_num, row_num};
int lbs[2] = {1, 1};
@@ -453,8 +453,8 @@ void * lda_unnest::SRF_init(AnyType &args)
sr_ctx * ctx = new sr_ctx;
ctx->inarray = inarray.ptr();
- ctx->maxcall = inarray.sizeOfDim(0);
- ctx->dim = inarray.sizeOfDim(1);
+ ctx->maxcall = static_cast<int32_t>(inarray.sizeOfDim(0));
+ ctx->dim = static_cast<int32_t>(inarray.sizeOfDim(1));
ctx->curcall = 0;
return ctx;
@@ -552,9 +552,12 @@ AnyType lda_perplexity_sfunc::run(AnyType & args){
if(__min(model) < 0)
throw std::invalid_argument("invalid topic counts in model");
- state = madlib_construct_array(
- NULL, model.size() + 1, INT8TI.oid, INT8TI.len, INT8TI.byval,
- INT8TI.align);
+ state = madlib_construct_array(NULL,
+ static_cast<int>(model.size()) + 1,
+ INT8TI.oid,
+ INT8TI.len,
+ INT8TI.byval,
+ INT8TI.align);
memcpy(state.ptr(), model.ptr(), model.size() * sizeof(int64_t));
}else{
@@ -625,7 +628,7 @@ AnyType l1_norm_with_smoothing::run(AnyType & args){
double sum = 0.0;
for(size_t i = 0; i < arr.size(); i++)
sum += fabs(arr[i]);
- sum += smooth * arr.size();
+ sum += smooth * static_cast<double>(arr.size());
double inverse_sum = 0.0;
if (sum != 0.0)
13 src/modules/linalg/crossprod.cpp
View
@@ -14,7 +14,8 @@ AnyType __pivotalr_crossprod_transition::run (AnyType& args)
{
ArrayHandle<double> left = args[1].getAs<ArrayHandle<double> >();
ArrayHandle<double> right = args[2].getAs<ArrayHandle<double> >();
- int m = left.size(), n = right.size();
+ size_t m = left.size();
+ size_t n = right.size();
MutableArrayHandle<double> state(NULL);
if (args[0].isNull()) {
@@ -25,8 +26,8 @@ AnyType __pivotalr_crossprod_transition::run (AnyType& args)
state = args[0].getAs<MutableArrayHandle<double> >();
int count = 0;
- for (int i = 0; i < m; i++)
- for (int j = 0; j < n; j++)
+ for (size_t i = 0; i < m; i++)
+ for (size_t j = 0; j < n; j++)
state[count++] += left[i] * right[j];
return state;
@@ -51,7 +52,7 @@ AnyType __pivotalr_crossprod_merge::run (AnyType& args)
AnyType __pivotalr_crossprod_sym_transition::run (AnyType& args)
{
ArrayHandle<double> arr = args[1].getAs<ArrayHandle<double> >();
- int n = arr.size();
+ size_t n = arr.size();
MutableArrayHandle<double> state(NULL);
if (args[0].isNull()) {
@@ -62,8 +63,8 @@ AnyType __pivotalr_crossprod_sym_transition::run (AnyType& args)
state = args[0].getAs<MutableArrayHandle<double> >();
int count = 0;
- for (int i = 0; i < n; i++)
- for (int j = 0; j <= i; j++)
+ for (size_t i = 0; i < n; i++)
+ for (size_t j = 0; j <= i; j++)
state[count++] += arr[i] * arr[j];
return state;
4 src/modules/linalg/dim_conversion.cpp
View
@@ -36,8 +36,8 @@ AnyType array_to_1d::run(AnyType & args) {
allocateArray<double, dbal::FunctionContext,
dbal::DoZero, dbal::ThrowBadAlloc>(in_array.size() + 2);
// The fist two elements encode the dimension info
- out_array[0] = in_array.sizeOfDim(0);
- out_array[1] = in_array.sizeOfDim(1);
+ out_array[0] = static_cast<double>(in_array.sizeOfDim(0));
+ out_array[1] = static_cast<double>(in_array.sizeOfDim(1));
memcpy(out_array.ptr() + 2, in_array.ptr(), sizeof(double) * in_array.size());
return out_array;
26 src/modules/linalg/matrix_op.cpp
View
@@ -80,8 +80,8 @@ AnyType matrix_mem_sum_sfunc::run(AnyType & args)
"invalid argument - 2-d array expected");
}
- int row_m = m.sizeOfDim(0);
- int col_m = m.sizeOfDim(1);
+ int row_m = static_cast<int>(m.sizeOfDim(0));
+ int col_m = static_cast<int>(m.sizeOfDim(1));
MutableArrayHandle<double> state(NULL);
if (args[0].isNull()){
@@ -110,7 +110,7 @@ AnyType matrix_blockize_sfunc::run(AnyType & args)
int32_t row_id = args[1].getAs<int32_t>();
ArrayHandle<double> row_vec = args[2].getAs<ArrayHandle<double> >();
- int32_t csize = row_vec.sizeOfDim(0);
+ int32_t csize = static_cast<int32_t>(row_vec.sizeOfDim(0));
int32_t rsize = args[3].getAs<int32_t>();
if(rsize < 1){
throw std::invalid_argument(
@@ -146,10 +146,10 @@ AnyType matrix_mem_mult::run(AnyType & args)
"invalid argument - 2-d array expected");
}
- int row_a = a.sizeOfDim(0);
- int col_a = a.sizeOfDim(1);
- int row_b = b.sizeOfDim(0);
- int col_b = b.sizeOfDim(1);
+ int row_a = static_cast<int>(a.sizeOfDim(0));
+ int col_a = static_cast<int>(a.sizeOfDim(1));
+ int row_b = static_cast<int>(b.sizeOfDim(0));
+ int col_b = static_cast<int>(b.sizeOfDim(1));
if ((!trans_b && col_a != row_b) || (trans_b && col_a != col_b)){
throw std::invalid_argument(
@@ -192,8 +192,8 @@ AnyType matrix_mem_trans::run(AnyType & args)
"invalid argument - 2-d array expected");
}
- int row_m = m.sizeOfDim(0);
- int col_m = m.sizeOfDim(1);
+ int row_m = static_cast<int>(m.sizeOfDim(0));
+ int col_m = static_cast<int>(m.sizeOfDim(1));
int dims[2] = {col_m, row_m};
int lbs[2] = {1, 1};
@@ -267,7 +267,7 @@ void * row_split::SRF_init(AnyType &args)
sr_ctx1 * ctx = new sr_ctx1;
ctx->inarray = inarray.ptr();
- ctx->dim = inarray.sizeOfDim(0);
+ ctx->dim = static_cast<int32_t>(inarray.sizeOfDim(0));
ctx->size = size;
ctx->maxcall = static_cast<int32_t>(
ceil(static_cast<double>(ctx->dim) / size));
@@ -315,7 +315,7 @@ AnyType matrix_unblockize_sfunc::run(AnyType & args)
int32_t total_col_dim = args[1].getAs<int32_t>();
int32_t col_id = args[2].getAs<int32_t>();
ArrayHandle<double> row_vec = args[3].getAs<ArrayHandle<double> >();
- int32_t col_dim = row_vec.sizeOfDim(0);
+ int32_t col_dim = static_cast<int32_t>(row_vec.sizeOfDim(0));
if(total_col_dim < 1){
throw std::invalid_argument(
@@ -361,8 +361,8 @@ void * unnest_block::SRF_init(AnyType &args)
sr_ctx2 * ctx = new sr_ctx2;
ctx->inarray = inarray.ptr();
- ctx->maxcall = inarray.sizeOfDim(0);
- ctx->dim = inarray.sizeOfDim(1);
+ ctx->maxcall = static_cast<int32_t>(inarray.sizeOfDim(0));
+ ctx->dim = static_cast<int32_t>(inarray.sizeOfDim(1));
ctx->curcall = 0;
return ctx;
19 src/modules/linalg/svd.cpp
View
@@ -235,9 +235,12 @@ AnyType svd_gram_schmidt_orthogonalize_sfunc::run(AnyType & args){
MutableArrayHandle<double> state(NULL);
if(args[0].isNull()){
state = MutableArrayHandle<double>(
- madlib_construct_array(
- NULL, u.size() * 2, FLOAT8TI.oid, FLOAT8TI.len, FLOAT8TI.byval,
- FLOAT8TI.align));
+ madlib_construct_array(NULL,
+ static_cast<int>(u.size()) * 2,
+ FLOAT8TI.oid,
+ FLOAT8TI.len,
+ FLOAT8TI.byval,
+ FLOAT8TI.align));
// Save v into the state variable
memcpy(state.ptr() + u.size(), v.data(), v.size() * sizeof(double));
@@ -376,7 +379,7 @@ AnyType svd_decompose_bidiagonal_prefunc::run(AnyType & args){
**/
AnyType svd_decompose_bidiagonal_ffunc::run(AnyType & args){
MappedColumnVector state = args[0].getAs<MappedColumnVector>();
- size_t k = static_cast<size_t>(sqrt(state.size()));
+ size_t k = static_cast<size_t>(sqrt(static_cast<double>(state.size())));
// Note that Eigen Matrix deserializes the vector in the column order
// Thus transpose() is needed after resize()
@@ -451,8 +454,8 @@ AnyType svd_block_lanczos_sfunc::run(AnyType & args){
// Note that m is constructed in the column-first order
Matrix m = block;
- int row_size = block.cols();
- int col_size = block.rows();
+ size_t row_size = block.cols();
+ size_t col_size = block.rows();
Matrix v = block.transpose() * vec.segment(col_id * col_size, col_size);
for(int32_t i = 0; i < v.size(); i++)
@@ -506,7 +509,7 @@ AnyType svd_vec_mult_matrix::run(AnyType & args){
// Any integer is ok
if(k <= 0 || k > mat.rows()){
- k = mat.rows();
+ k = static_cast<int32_t>(mat.rows());
}
// Note mat is constructed in the column-first order
@@ -554,7 +557,7 @@ void * svd_vec_trans_mult_matrix::SRF_init(AnyType &args){
"invalid parameter - k should be in the range of (0, mat.cols()]");
}
- ctx->max_call = ctx->vec.size();
+ ctx->max_call = static_cast<int32_t>(ctx->vec.size());
ctx->cur_call = 0;
return ctx;
2  src/modules/linear_systems/dense_linear_systems_states.hpp
View
@@ -44,7 +44,7 @@ inline void ResidualState<Container>::bind(ByteStream_type& inStream){
>> widthOfA
>> residual_norm
>> b_norm;
- uint16_t actualWidthOfA = widthOfA.isNull() ? 0 : static_cast<uint16_t>(widthOfA);
+ uint16_t actualWidthOfA = widthOfA.isNull() ? static_cast<uint16_t>(0) : static_cast<uint16_t>(widthOfA);
inStream >> solution.rebind(actualWidthOfA);
}
4 src/modules/linear_systems/sparse_linear_systems.cpp
View
@@ -679,8 +679,8 @@ sparse_inmem_iterative_linear_system_final::run(AnyType &args) {
}
- int iters;
- double error;
+ int iters = 0;
+ double error = 0.;
// Switch case needs scoping in C++ if you want to declare inside it
6 src/modules/regress/LinearRegression_impl.hpp
View
@@ -291,7 +291,7 @@ inline
void
RobustLinearRegressionAccumulator<Container>::bind(ByteStream_type& inStream) {
inStream >> numRows >> widthOfX;
- uint16_t actualWidthOfX = widthOfX.isNull() ? 0 : static_cast<uint16_t>(widthOfX);
+ uint16_t actualWidthOfX = widthOfX.isNull() ? static_cast<uint16_t>(0) : static_cast<uint16_t>(widthOfX);
inStream >> ols_coef.rebind(actualWidthOfX)
>> X_transp_X.rebind(actualWidthOfX, actualWidthOfX)
>> X_transp_r2_X.rebind(actualWidthOfX, actualWidthOfX);
@@ -494,7 +494,7 @@ HeteroLinearRegressionAccumulator<Container>::bind(ByteStream_type& inStream) {
inStream
>> numRows >> widthOfX >> a_sum >> a_square_sum;
uint16_t actualWidthOfX = widthOfX.isNull()
- ? 0
+ ? static_cast<uint16_t>(0)
: static_cast<uint16_t>(widthOfX);
inStream
>> X_transp_A.rebind(actualWidthOfX)
@@ -636,7 +636,7 @@ HeteroLinearRegression::compute(
if (ess > tss) ess = tss;
// Test statistic: numRows*Coefficient of determination
- test_statistic = inState.numRows*(tss == 0 ? 1 : ess / tss);
+ test_statistic = static_cast<double>(inState.numRows) * (tss == 0 ? 1 : ess / tss);
pValue = prob::cdf(complement(prob::chi_squared(
static_cast<double>(inState.widthOfX-1)), test_statistic));
2  src/modules/regress/clustered_errors.cpp
View
@@ -202,7 +202,7 @@ AnyType clustered_compute_stats (AnyType& args,
const MappedColumnVector& breadvec = args[2].getAs<MappedColumnVector>();
int mcluster = args[3].getAs<int>();
int numRows = args[4].getAs<int>();
- int k = coef.size();
+ int k = static_cast<int>(coef.size());
Matrix bread(k,k);
Matrix meat(k,k);
int count = 0;
2  src/modules/regress/clustered_errors_state.hpp
View
@@ -45,7 +45,7 @@ template <class Container>
inline void ClusteredState<Container>::bind(ByteStream_type& inStream)
{
inStream >> numRows >> widthOfX >> numCategories >> refCategory;
- uint16_t actualWidthOfX = widthOfX.isNull() ? 0 : static_cast<uint16_t>(widthOfX);
+ uint16_t actualWidthOfX = widthOfX.isNull() ? static_cast<uint16_t>(0) : static_cast<uint16_t>(widthOfX);
inStream >> coef.rebind(actualWidthOfX)
>> meat_half.rebind(1, actualWidthOfX)
>> bread.rebind(actualWidthOfX, actualWidthOfX);
4 src/modules/regress/logistic.cpp
View
@@ -1666,14 +1666,14 @@ marginal_logregr_step_final::run(AnyType &args) {
Matrix variance = decomposition.pseudoInverse();
// Standard error according to the delta method
Matrix std_err;
- std_err = state.delta * variance * trans(state.delta) / (state.numRows*state.numRows);
+ std_err = state.delta * variance * trans(state.delta) / static_cast<double>(state.numRows*state.numRows);
// Computing the marginal effects
return marginalstateToResult(*this,
state.coef,
std_err.diagonal(),
state.marginal_effects_per_observation,
- state.numRows);
+ static_cast<double>(state.numRows));
}
// ------------------------ End of Marginal ------------------------------------
69 src/modules/regress/marginal.cpp
View
@@ -42,7 +42,7 @@ AnyType margins_stateToResult(
const ColumnVector &inmarginal_effects_per_observation,
const double numRows) {
- uint16_t n_basis_terms = inmarginal_effects_per_observation.size();
+ uint16_t n_basis_terms = static_cast<uint16_t>(inmarginal_effects_per_observation.size());
MutableNativeColumnVector marginal_effects(
inAllocator.allocateArray<double>(n_basis_terms));
MutableNativeColumnVector stdErr(
@@ -310,11 +310,11 @@ margins_linregr_int_final::run(AnyType &args) {
// we only need the diagonal elements of the variance, so we perform a dot
// product of each row with itself to compute each diagonal element.
ColumnVector variance_diagonal =
- variance.cwiseProduct(state.delta).rowwise().sum() / (state.numRows * state.numRows);
+ variance.cwiseProduct(state.delta).rowwise().sum() / static_cast<double>(state.numRows * state.numRows);
// Computing the marginal effects
return margins_stateToResult(*this, variance_diagonal,
- state.marginal_effects, state.numRows);
+ state.marginal_effects, static_cast<double>(state.numRows));
}
// ---------------------------------------------------------------------------
@@ -499,15 +499,15 @@ margins_logregr_int_transition::run(AnyType &args) {
MappedColumnVector basis_indices = args[4].getAs<MappedColumnVector>();
// below symbols match the ones used in the design doc
- const uint16_t N = beta.size();
- const uint16_t M = basis_indices.size();
+ const uint16_t N = static_cast<uint16_t>(beta.size());
+ const uint16_t M = static_cast<uint16_t>(basis_indices.size());
assert(N >= M);
Matrix J; // J: N * M
if (args[5].isNull()){
J = Matrix::Zero(N, M);
for (Index i = 0; i < M; ++i)
- J(basis_indices(i), i) = 1;
+ J(static_cast<Index>(basis_indices(i)), i) = 1;
} else{
J = args[5].getAs<MappedMatrix>();
}
@@ -527,7 +527,7 @@ margins_logregr_int_transition::run(AnyType &args) {
warning("The categorical indices contain NULL values");
return Null();
}
- numCategoricalVars = categorical_indices.size();
+ numCategoricalVars = static_cast<uint16_t>(categorical_indices.size());
}
if (state.numRows == 0) {
@@ -544,12 +544,12 @@ margins_logregr_int_transition::run(AnyType &args) {
for (Index i = 0; i < basis_indices.size(); ++i){
for (Index j = 0; j < categorical_indices.size(); ++j){
if (basis_indices(i) == categorical_indices(j)){
- tmp_cat_basis_indices.push_back(i);
+ tmp_cat_basis_indices.push_back(static_cast<uint16_t>(i));
continue;
}
}
}
- state.numCategoricalVarsInSubset = tmp_cat_basis_indices.size();
+ state.numCategoricalVarsInSubset = static_cast<uint16_t>(tmp_cat_basis_indices.size());
}
state.initialize(*this,
static_cast<uint16_t>(N),
@@ -615,8 +615,8 @@ margins_logregr_int_transition::run(AnyType &args) {
f_set = f;
f_unset = f;
for (Index j=0; j < shortened_f_set.size(); ++j){
- f_set(categorical_indices(j)) = shortened_f_set(j);
- f_unset(categorical_indices(j)) = shortened_f_unset(j);
+ f_set(static_cast<Index>(categorical_indices(j))) = shortened_f_set(j);
+ f_unset(static_cast<Index>(categorical_indices(j))) = shortened_f_unset(j);
}
} else {
f_set = shortened_f_set;
@@ -682,11 +682,11 @@ margins_logregr_int_final::run(AnyType &args) {
// product of each row with state.delta to compute each diagonal element.
// We divide by numRows^2 since we need the average variance
ColumnVector variance_diagonal =
- variance.cwiseProduct(state.delta).rowwise().sum() / (state.numRows * state.numRows);
+ variance.cwiseProduct(state.delta).rowwise().sum() / static_cast<double>(state.numRows * state.numRows);
// Computing the final results
return margins_stateToResult(*this, variance_diagonal,
- state.marginal_effects, state.numRows);
+ state.marginal_effects, static_cast<double>(state.numRows));
}
// ------------------------ End of Logistic Marginal ---------------------------
@@ -828,16 +828,16 @@ class MarginsMLogregrInteractionState {
marginal_effects.rebind(&mStorage[5], M, L-1);
- uint16_t current_length = 5 + M*(L-1);
+ int current_length = 5 + M * (L - 1);
training_data_vcov.rebind(&mStorage[current_length], N*(L-1), N*(L-1));
- current_length += N*(L-1)*N*(L-1);
+ current_length += N * (L - 1) * N * (L - 1);
delta.rebind(&mStorage[current_length], M*(L-1), N*(L-1));
- current_length += N*(L-1)*M*(L-1);
+ current_length += N * (L - 1) * M * (L - 1);
if (inNumCategoricalVars > 0)
- categorical_basis_indices.rebind(&mStorage[current_length], inNumCategoricalVars);
+ categorical_basis_indices.rebind(&mStorage[static_cast<uint16_t>(current_length)], inNumCategoricalVars);
}
Handle mStorage;
@@ -901,15 +901,15 @@ margins_mlogregr_int_transition::run(AnyType &args) {
MappedColumnVector basis_indices = args[4].getAs<MappedColumnVector>();
// all variable symbols correspond to the design document
- const uint16_t N = beta.rows();
- const uint16_t M = basis_indices.size();
+ const uint16_t N = static_cast<uint16_t>(beta.rows());
+ const uint16_t M = static_cast<uint16_t>(basis_indices.size());
assert(N >= M);
Matrix J; // J: N x M
if (args[5].isNull()){
J = Matrix::Zero(N, M);
for (Index i = 0; i < M; ++i)
- J(basis_indices(i), i) = 1;
+ J(static_cast<Index>(basis_indices(i)), i) = 1;
} else{
J = args[5].getAs<MappedMatrix>();
}
@@ -929,7 +929,7 @@ margins_mlogregr_int_transition::run(AnyType &args) {
warning("The categorical indices contain NULL values");
return Null();
}
- numCategoricalVars = categorical_indices.size();
+ numCategoricalVars = static_cast<uint16_t>(categorical_indices.size());
}
if (state.numRows == 0) {
@@ -946,12 +946,12 @@ margins_mlogregr_int_transition::run(AnyType &args) {
for (Index i = 0; i < basis_indices.size(); ++i){
for (Index j = 0; j < categorical_indices.size(); ++j){
if (basis_indices(i) == categorical_indices(j)){
- tmp_cat_basis_indices.push_back(i);
+ tmp_cat_basis_indices.push_back(static_cast<uint16_t>(i));
continue;
}
}
}
- state.numCategoricalVarsInSubset = tmp_cat_basis_indices.size();
+ state.numCategoricalVarsInSubset = static_cast<uint16_t>(tmp_cat_basis_indices.size());
}
state.initialize(*this,
static_cast<uint16_t>(J.rows()),
@@ -972,15 +972,15 @@ margins_mlogregr_int_transition::run(AnyType &args) {
// all variable symbols correspond to the design document
const uint16_t & L = state.numCategories;
- ColumnVector prob = trans(beta) * f;
- Matrix J_trans_beta = trans(J) * beta;
+ ColumnVector prob(trans(beta) * f);
+ Matrix J_trans_beta(trans(J) * beta);
// Calculate the odds ratio
prob = prob.array().exp();
double prob_sum = prob.sum();
prob = prob / (1 + prob_sum);
- ColumnVector JBP = J_trans_beta * prob;
+ ColumnVector JBP(J_trans_beta * prob);
Matrix curr_margins = J_trans_beta * prob.asDiagonal() - JBP * trans(prob);
// compute delta using 2nd derivatives
@@ -988,7 +988,8 @@ margins_mlogregr_int_transition::run(AnyType &args) {
// row_index = [0, (L-1)M), col_index = [0, (L-1)N)
// row_index(m, l) = m * (L-1) + l
// col_index(n, l1) = n * (L-1) + l1
- int row_index, col_index, delta_l_l1;
+ Index row_index, col_index;
+ int delta_l_l1;
for (int m = 0; m < M; m++){
// Skip the categorical variables
if (state.numCategoricalVarsInSubset > 0) {
@@ -1051,22 +1052,22 @@ margins_mlogregr_int_transition::run(AnyType &args) {
f_set = f;
f_unset = f;
for (Index j=0; j < shortened_f_set.size(); ++j){
- f_set(categorical_indices(j)) = shortened_f_set(j);
- f_unset(categorical_indices(j)) = shortened_f_unset(j);
+ f_set(static_cast<Index>(categorical_indices(j))) = shortened_f_set(j);
+ f_unset(static_cast<Index>(categorical_indices(j))) = shortened_f_unset(j);
}
} else {
f_set = shortened_f_set;
f_unset = shortened_f_unset;
}
- RowVector p_set = trans(f_set) * beta;
+ RowVector p_set(trans(f_set) * beta);
{
p_set = p_set.array().exp();
double p_sum = p_set.sum();
p_set = p_set / (1 + p_sum);
}
- RowVector p_unset = trans(f_unset) * beta;
+ RowVector p_unset(trans(f_unset) * beta);
{
p_unset = p_unset.array().exp();
double p_sum = p_unset.sum();
@@ -1135,17 +1136,17 @@ margins_mlogregr_int_final::run(AnyType &args) {
if (state.numRows == 0)
return Null();
- state.marginal_effects /= state.numRows;
+ state.marginal_effects /= static_cast<double>(state.numRows);
Matrix marginal_effects_trans = trans(state.marginal_effects);
AnyType tuple;
tuple << marginal_effects_trans;
// Variance for marginal effects according to the delta method
- Matrix variance = state.delta * state.training_data_vcov;
+ Matrix variance(state.delta * state.training_data_vcov);
// // we only need the diagonal elements of the variance, so we perform a dot
// // product of each row with itself to compute each diagonal element.
// // We divide by numRows^2 since we need the average variance
- Matrix std_err = variance.cwiseProduct(state.delta).rowwise().sum() / (state.numRows * state.numRows);
+ Matrix std_err = variance.cwiseProduct(state.delta).rowwise().sum() / static_cast<double>(state.numRows * state.numRows);
std_err = std_err.array().sqrt();
std_err.resize(state.numCategories-1, state.numBasis);
tuple << std_err;
18 src/modules/regress/mlogr_margins.cpp
View
@@ -200,9 +200,9 @@ mlogregr_marginal_step_transition::run(AnyType &args) {
}
// Get the category & numCategories as integer
- int16_t category = args[1].getAs<int>();
+ int16_t category = static_cast<int16_t>(args[1].getAs<int>());
// Number of categories after pivoting (We pivot around ref_category)
- int16_t numCategories = (args[2].getAs<int>() - 1);
+ int16_t numCategories = static_cast<int16_t>(args[2].getAs<int>() - 1);
int32_t ref_category = args[3].getAs<int32_t>();
MappedMatrix coefMat = args[5].getAs<MappedMatrix>();
@@ -297,7 +297,7 @@ mlogregr_marginal_step_transition::run(AnyType &args) {
}
triangularView<Lower>(state.X_transp_AX) += X_transp_AX;
- int numIndepVars = state.coef.size() / state.numCategories;
+ int numIndepVars = static_cast<int>(state.coef.size() / state.numCategories);
// Marginal effects (reference calculated separately)
ColumnVector coef_trans_prob;
@@ -356,7 +356,7 @@ mlogregr_marginal_step_merge_states::run(AnyType &args) {
AnyType mlogregr_marginalstateToResult(
const Allocator &inAllocator,
- const int numRows,
+ const double numRows,
const ColumnVector &inCoef,
const ColumnVector &inMargins,
const ColumnVector &inVariance
@@ -424,7 +424,7 @@ mlogregr_marginal_step_final::run(AnyType &args) {
// Include marginal effects of reference variable:
// FIXME: They have been taken out of the output for now
//const int size = state.coef.size() + numIndepVars;
- const int size = state.coef.size();
+ const size_t size = state.coef.size();
// Variance-covariance calculation
// ----------------------------------------------------------
@@ -434,7 +434,7 @@ mlogregr_marginal_step_final::run(AnyType &args) {
// Precompute -(X^T * A * X)^-1
Matrix V = decomposition.pseudoInverse();
- int numIndepVars = state.coef.size() / state.numCategories;
+ int numIndepVars = static_cast<int>(state.coef.size() / state.numCategories);
int numCategories = state.numCategories;
Matrix coef = state.coef;
@@ -445,7 +445,7 @@ mlogregr_marginal_step_final::run(AnyType &args) {
ColumnVector variance(size);
variance.setOnes();
- variance = (state.delta * V * trans(state.delta) / (state.numRows*state.numRows)).diagonal();
+ variance = (state.delta * V * trans(state.delta) / static_cast<double>(state.numRows*state.numRows)).diagonal();
// Add in reference variables to all the calculations
// ----------------------------------------------------------
@@ -457,12 +457,12 @@ mlogregr_marginal_step_final::run(AnyType &args) {
for (int j=0; j < numCategories; j++){
int index = k * numCategories + j;
coef_with_ref(index) = coef(j,k);
- margins_with_ref(index) = state.margins_matrix(j,k) / state.numRows;
+ margins_with_ref(index) = state.margins_matrix(j,k) / static_cast<double>(state.numRows);
}
}
return mlogregr_marginalstateToResult(*this,
- state.numRows,
+ static_cast<double>(state.numRows),
coef_with_ref,
margins_with_ref,
variance);
6 src/modules/regress/multilogistic.cpp
View
@@ -566,7 +566,7 @@ AnyType mLogstateToResult(
int ref_category = state.ref_category;
const HandleMap<const ColumnVector, TransparentHandle<double> > &inCoef = state.coef;
double logLikelihood = state.logLikelihood;
- int num_processed = state.numRows;
+ uint64_t num_processed = state.numRows;
// Per the hack at the end of the final function we place the inverse
// of the X_tranp_AX into the state.X_transp_AX
@@ -667,9 +667,9 @@ mlogregr_robust_step_transition::run(AnyType &args) {
}
// Get the category & numCategories as integer
- int16_t category = args[1].getAs<int>();
+ int16_t category = static_cast<int16_t>(args[1].getAs<int>());
// Number of categories after pivoting (We pivot around the first category)
- int16_t numCategories = (args[2].getAs<int>() - 1);
+ int16_t numCategories = static_cast<int16_t>(args[2].getAs<int>() - 1);
int32_t ref_category = args[3].getAs<int32_t>();
MappedMatrix coefMat = args[5].getAs<MappedMatrix>();
2  src/modules/stats/clustered_variance_coxph.cpp
View
@@ -197,7 +197,7 @@ AnyType coxph_compute_clustered_stats::run(AnyType& args)
hessian.transpose(), EigenvaluesOnly, ComputePseudoInverse);
Matrix inverse_of_hessian = decomposition.pseudoInverse();
- Matrix sandwich = inverse_of_hessian * (matA * matA.transpose()) * inverse_of_hessian;
+ Matrix sandwich(inverse_of_hessian * (matA * matA.transpose()) * inverse_of_hessian);
ColumnVector sig = sandwich.diagonal();
MutableNativeColumnVector std_err(
10 src/modules/stats/cox_prop_hazards.cpp
View
@@ -87,7 +87,7 @@ AnyType zph_transition::run(AnyType &args){
if (!dbal::eigen_integration::isfinite(x))
throw std::domain_error("Design matrix is not finite.");
- int data_dim = x.size();
+ int data_dim = static_cast<int>(x.size());
if (data_dim > std::numeric_limits<uint16_t>::max())
throw std::domain_error(
"Number of independent variables cannot be larger than 65535.");
@@ -354,10 +354,10 @@ AnyType array_elem_corr_final::run(AnyType &args) {
return Null();
ColumnVector S_xy =
- state.numRows * state.sum_xy - state.sum_x * state.sum_y;
+ static_cast<double>(state.numRows) * state.sum_xy - state.sum_x * state.sum_y;
ColumnVector S_xx =
- state.numRows * state.sum_xx - state.sum_x.cwiseProduct(state.sum_x);
- double S_yy = state.numRows * state.sum_yy - state.sum_y * state.sum_y;
+ static_cast<double>(state.numRows) * state.sum_xx - state.sum_x.cwiseProduct(state.sum_x);
+ double S_yy = static_cast<double>(state.numRows) * state.sum_yy - state.sum_y * state.sum_y;
ColumnVector correlation = S_xy.cwiseQuotient(S_xx.cwiseSqrt() * sqrt(S_yy));
return correlation;
@@ -371,7 +371,7 @@ AnyType coxph_resid_stat_transition::run(AnyType &args) {
MutableArrayHandle<double> state(NULL);
if(args[0].isNull()){
- int n = residual.size();
+ int n = static_cast<int>(residual.size());
// state[0]: m
// state[1]: n
// state[2]: w_t * w
6 src/modules/stats/coxph_improved.cpp
View
@@ -182,7 +182,7 @@ AnyType compute_coxph_result::run(AnyType &args) {
int nIter = args[3].getAs<int>();
MappedColumnVector stds = args[4].getAs<MappedColumnVector>();
- int m = coef.size();
+ int m = static_cast<int>(coef.size());
Matrix hessian = d2L;
hessian.resize(m, m);
@@ -386,10 +386,10 @@ AnyType array_avg_transition::run(AnyType& args)
state[0] += 1;
if (use_abs)
- for (size_t i = 1; i <= x.size(); i++)
+ for (int i = 1; i <= x.size(); i++)
state[i] += fabs(x(i-1));
else
- for (size_t i = 1; i <= x.size(); i++)
+ for (int i = 1; i <= x.size(); i++)
state[i] += x(i-1);
return state;
20 src/modules/stats/marginal_cox.cpp
View
@@ -208,15 +208,15 @@ margins_coxph_int_transition::run(AnyType &args) {
MappedColumnVector basis_indices = args[4].getAs<MappedColumnVector>();
// all variable symbols correspond to the design document
- const uint16_t N = beta.size();
- const uint16_t M = basis_indices.size();
+ const uint16_t N = static_cast<uint16_t>(beta.size());
+ const uint16_t M = static_cast<uint16_t>(basis_indices.size());
assert(N >= M);
Matrix J; // J: N x M
if (args[5].isNull()){
J = Matrix::Zero(N, M);
for (Index i = 0; i < M; ++i)
- J(basis_indices(i), i) = 1;
+ J(static_cast<Index>(basis_indices(i)), i) = 1;
} else{
J = args[5].getAs<MappedMatrix>();
}
@@ -234,7 +234,7 @@ margins_coxph_int_transition::run(AnyType &args) {
} catch (const ArrayWithNullException &e) {
throw std::runtime_error("The categorical indices contain NULL values");
}
- numCategoricalVars = categorical_indices.size();
+ numCategoricalVars = static_cast<uint16_t>(categorical_indices.size());
}
if (state.numRows == 0) {
@@ -249,12 +249,12 @@ margins_coxph_int_transition::run(AnyType &args) {
for (Index i = 0; i < basis_indices.size(); ++i){
for (Index j = 0; j < categorical_indices.size(); ++j){
if (basis_indices(i) == categorical_indices(j)){
- tmp_cat_basis_indices.push_back(i);
+ tmp_cat_basis_indices.push_back(static_cast<uint16_t>(i));
continue;
}
}
}
- state.numCategoricalVarsInSubset = tmp_cat_basis_indices.size();
+ state.numCategoricalVarsInSubset = static_cast<uint16_t>(tmp_cat_basis_indices.size());
}
state.initialize(*this,
@@ -324,8 +324,8 @@ margins_coxph_int_transition::run(AnyType &args) {
f_set = f;
f_unset = f;
for (Index j=0; j < shortened_f_set.size(); ++j){
- f_set(categorical_indices(j)) = shortened_f_set(j);
- f_unset(categorical_indices(j)) = shortened_f_unset(j);
+ f_set(static_cast<Index>(categorical_indices(j))) = shortened_f_set(j);
+ f_unset(static_cast<Index>(categorical_indices(j))) = shortened_f_unset(j);
}
} else {
f_set = shortened_f_set;
@@ -389,7 +389,7 @@ margins_coxph_int_final::run(AnyType &args) {
// We divide by numRows^2 since we need the average variance
ColumnVector std_err =
variance.cwiseProduct(state.delta).rowwise().sum();
- std_err = std_err.array().sqrt() / state.numRows;
+ std_err = std_err.array().sqrt() / static_cast<double>(state.numRows);
MutableNativeColumnVector tStats(this->allocateArray<double>(state.numBasis));
MutableNativeColumnVector pValues(this->allocateArray<double>(state.numBasis));
@@ -426,7 +426,7 @@ margins_compute_stats::run(AnyType &args) {
MappedColumnVector marginal_effects = args[0].getAs<MappedColumnVector>();
MappedColumnVector std_err = args[1].getAs<MappedColumnVector>();
- uint16_t n_basis_terms = marginal_effects.size();
+ uint16_t n_basis_terms = static_cast<uint16_t>(marginal_effects.size());
MutableNativeColumnVector tStats(
(*this).allocateArray<double>(n_basis_terms));
MutableNativeColumnVector pValues(
4 src/modules/stats/robust_variance_coxph.cpp
View
@@ -250,7 +250,7 @@ AnyType rb_coxph_step_final::run(AnyType& args)
state.hessian, EigenvaluesOnly, ComputePseudoInverse);
Matrix inverse_of_hessian = decomposition.pseudoInverse();
- Matrix sandwich = inverse_of_hessian * state.M * inverse_of_hessian;
+ Matrix sandwich(inverse_of_hessian * state.M * inverse_of_hessian);
ColumnVector sig = sandwich.diagonal();
MutableNativeColumnVector std_err(
@@ -475,7 +475,7 @@ AnyType rb_sum_strata_final::run(AnyType& args)
state.hessian, EigenvaluesOnly, ComputePseudoInverse);
Matrix inverse_of_hessian = decomposition.pseudoInverse();
- Matrix sandwich = inverse_of_hessian * state.M * inverse_of_hessian;
+ Matrix sandwich(inverse_of_hessian * state.M * inverse_of_hessian);
ColumnVector sig = sandwich.diagonal();
MutableNativeColumnVector std_err(
113 src/modules/tsa/arima.cpp
View
@@ -2,7 +2,7 @@
*
* @file arima.cpp
*
- * @brief ARIMA
+ * @brief ARIMA
*
* @date Aug 21, 2013
*//* ----------------------------------------------------------------------- */
@@ -47,7 +47,7 @@ static type_info FLOAT8TI(FLOAT8OID);
AnyType arima_residual::run (AnyType & args)
{
- int32_t distid = args[0].getAs<int32_t>();
+ int32_t distid = args[0].getAs<int32_t>();
ArrayHandle<double> tvals = args[1].getAs<ArrayHandle<double> >();
int p = args[2].getAs<int>();
int d = args[3].getAs<int>();
@@ -65,7 +65,8 @@ AnyType arima_residual::run (AnyType & args)
if(q > 0)
prez = args[8].getAs<ArrayHandle<double> >();
- int ret_size = (distid == 1) ? (tvals.size()+d) : (tvals.size()-p);
+ int ret_size = static_cast<int>((distid == 1) ? \
+ (tvals.size()+d) : (tvals.size()-p));
MutableArrayHandle<double> res(
madlib_construct_array(
NULL, ret_size, FLOAT8TI.oid,
@@ -77,7 +78,7 @@ AnyType arima_residual::run (AnyType & args)
double err = tvals[i] - mean;
for(int j = 0; j < p; j++)
err -= phi[j] * (tvals[i - j - 1] - mean);
- // note that for distid = 1, the first p residuals
+ // note that for distid = 1, the first p residuals
// will always be 0
res[(distid == 1) ? i+d : (i - p)] = err;
}
@@ -104,7 +105,7 @@ AnyType arima_residual::run (AnyType & args)
err -= theta[j] * errs[t-p+q-j-1];
errs[(distid == 1) ? (t+q+d) : (t - p + q)] = err;
}
- memcpy(res.ptr(), errs + q, ret_size * sizeof(double));
+ memcpy(res.ptr(), errs + q, ret_size * sizeof(double));
delete[] errs;
}
@@ -134,23 +135,23 @@ static int * diff_coef (int d)
AnyType arima_diff::run (AnyType & args)
{
ArrayHandle<double> tvals = args[0].getAs<ArrayHandle<double> >();
- int32_t d = args[1].getAs<int32_t>();
- int sz = tvals.size() - d;
+ uint32_t d = args[1].getAs<uint32_t>();
+ int sz = static_cast<int>(tvals.size() - d);
MutableArrayHandle<double> diffs(
madlib_construct_array(
NULL, sz, FLOAT8TI.oid, FLOAT8TI.len, FLOAT8TI.byval, FLOAT8TI.align));
-
+
// get diff coef
- int * coef = diff_coef(d);
+ int* coef = diff_coef(d);
- // in-place diff
- for(int i = tvals.size() - 1; i >= d; i--){
+ // in-place diff
+ for(size_t i = tvals.size() - 1; i >= d; i--){
diffs[i-d] = 0;
- for(int j = 0; j <=d; j++)
- diffs[i-d] += coef[j] * tvals[i - j];
+ for(size_t j = 0; j <=d; j++)
+ diffs[i-d] += coef[j] * tvals[i - j];
// tvals[i] = diff;
}
-
+
// // set the first d elements to zero
// for(int i = 0; i < d; i++)
// tvals[i] = 0;
@@ -162,21 +163,24 @@ AnyType arima_diff::run (AnyType & args)
// ----------------------------------------------------------------------
AnyType arima_adjust::run (AnyType & args)
-{
+{
int distid = args[0].getAs<int>();
if (distid == 1) return args[1];
-
+
ArrayHandle<double> cur_tvals = args[1].getAs<ArrayHandle<double> >();
ArrayHandle<double> pre_tvals = args[2].getAs<ArrayHandle<double> >();
int32_t p = args[3].getAs<int32_t>();
- // note that curr_tvals.size() could be different with prez_tvals.size()
+ // note that curr_tvals.size() could be different with prez_tvals.size()
MutableArrayHandle<double> res(
- madlib_construct_array(
- NULL, cur_tvals.size() + p, FLOAT8TI.oid,
- FLOAT8TI.len, FLOAT8TI.byval, FLOAT8TI.align));
-
+ madlib_construct_array(NULL,
+ static_cast<int>(cur_tvals.size() + p),
+ FLOAT8TI.oid,
+ FLOAT8TI.len,
+ FLOAT8TI.byval,
+ FLOAT8TI.align));
+
// fill in the last p values from the previous tvals
for(int i = 0; i < p; i++)
res[i] = pre_tvals[pre_tvals.size() - p + i];
@@ -195,13 +199,13 @@ AnyType arima_lm_delta::run (AnyType & args)
MappedColumnVector g = args[1].getAs<MappedColumnVector>();
double u = args[2].getAs<double>();
- int l = g.size();
+ size_t l = g.size();
Matrix m_jj(jj);
m_jj.resize(l, l);
-
+
Matrix a = m_jj.diagonal().asDiagonal();
a = m_jj.transpose() + u * a;
-
+
ColumnVector x = a.lu().solve(g);
return x;
}
@@ -220,12 +224,12 @@ AnyType arima_lm::run (AnyType & args)
ArrayHandle<double> theta(NULL);
if (q > 0)
theta = args[5].getAs<ArrayHandle<double> >();
-
+
bool include_mean = true;
double mean = 0.0;
if (args[6].isNull())
include_mean = false;
- else
+ else
mean = args[6].getAs<double>();
int l = p + q;
@@ -247,10 +251,10 @@ AnyType arima_lm::run (AnyType & args)
prez = args[7].getAs<MutableArrayHandle<double> >();
prej = args[8].getAs<MutableArrayHandle<double> >();
}
- }
-
+ }
+
// minus the mean
- if (include_mean)
+ if (include_mean)
for(size_t i = 0; i < tvals.size(); i++)
tvals[i] -= mean;
@@ -294,7 +298,7 @@ AnyType arima_lm::run (AnyType & args)
jacob[p + i] -= theta[j] * prej[(q - j - 1) * l + p + i];
}
- // compute the partial derivatives over mean
+ // compute the partial derivatives over mean
if (include_mean) {
jacob[p + q] = 1;
for(int i = 0; i < p; i++)
@@ -324,9 +328,9 @@ AnyType arima_lm::run (AnyType & args)
for(int j = 0; j < l; j++)
jj[i * l + j] += jacob[i] * jacob[j];
- // update jz
+ // update jz
for(int i = 0; i < l; i++)
- jz[i] += jacob[i] * err;
+ jz[i] += jacob[i] * err;
// delete jacob
if(jacob) delete[] jacob;
@@ -344,7 +348,7 @@ AnyType arima_lm_result_sfunc::run (AnyType& args)
ArrayHandle<double> jj = args[1].getAs<ArrayHandle<double> >();
ArrayHandle<double> jz = args[2].getAs<ArrayHandle<double> >();
double z2 = args[3].getAs<double>();
- int l = jz.size();
+ int l = static_cast<int>(jz.size());
int l2 = l*l;
MutableArrayHandle<double> state(NULL);
@@ -356,7 +360,7 @@ AnyType arima_lm_result_sfunc::run (AnyType& args)
// state[l*l+l+1] - l
state = madlib_construct_array(NULL, l2+l+2, FLOAT8TI.oid,
FLOAT8TI.len, FLOAT8TI.byval, FLOAT8TI.align);
-
+
for (int i = 0; i < l2; i++) state[i] = jj[i];
for (int i = 0; i < l; i++) state[l2 + i] = jz[i];
state[l2 + l] = z2;
@@ -404,7 +408,7 @@ AnyType arima_lm_result_ffunc::run (AnyType& args)
if (jj[ll] > mx)
mx = jj[ll];
}
-
+
MutableArrayHandle<double> arr_jj(
madlib_construct_array(
NULL, l*l, FLOAT8TI.oid, FLOAT8TI.len, FLOAT8TI.byval, FLOAT8TI.align));
@@ -422,7 +426,7 @@ AnyType arima_lm_result_ffunc::run (AnyType& args)
// ----------------------------------------------------------------------
-static double error(int tid, const double * tvals, int p, int q,
+static double error(int tid, const double * tvals, int p, int q,
const double * phi, const double * theta, const double * prez)
{
double err = 0.0;
@@ -431,7 +435,7 @@ static double error(int tid, const double * tvals, int p, int q,
for(int i = 0; i < p; i++)
err -= phi[i] * tvals[p - i - 1];
for(int i = 0; i < q; i++)
- err -= theta[i] * prez[q - i - 1];
+ err -= theta[i] * prez[q - i - 1];
}
return err;
@@ -439,7 +443,7 @@ static double error(int tid, const double * tvals, int p, int q,
// ----------------------------------------------------------------------
-static double error(int tid, const double * tvals, int p, int q,
+static double error(int tid, const double * tvals, int p, int q,
double * phi, double * theta, const double * prez,
double delta, int pos1, int pos2, int sign1, int sign2)
{
@@ -450,7 +454,7 @@ static double error(int tid, const double * tvals, int p, int q,
phi[pos1] += sign1 * delta;
else if (pos1 < p + q)
theta[pos1-p] += sign1 * delta;
- else
+ else
dmean = sign1 * delta;
} else {
if (pos1 < p)
@@ -474,7 +478,7 @@ static double error(int tid, const double * tvals, int p, int q,
for (int i = 0; i < p; i++)
err -= phi[i] * (tvals[p - i - 1] - dmean);
for (int i = 0; i < q; i++)
- err -= theta[i] * prez[q - i - 1];
+ err -= theta[i] * prez[q - i - 1];
}
// Restore the original coefficients
@@ -537,11 +541,11 @@ AnyType arima_lm_stat_sfunc::run (AnyType& args)
tvals[i] -= mean;
MutableArrayHandle<double> state(NULL);
-
+
if (args[0].isNull()) {
// Eqs. (1.2.21, 1.2.22) tells how many Z^2 are needed
// Also Hessian is symmetric
- int sz = (2 * l * l + 1) * (1 + q) + 4;
+ int sz = (2 * l * l + 1) * (1 + q) + 4;
// state[0] -- l
// state[1] -- delta
// state[2] -- N
@@ -572,14 +576,14 @@ AnyType arima_lm_stat_sfunc::run (AnyType& args)
// The one without delta
int prez_offset = 4 + 2 * l * l;
for (size_t t = p; t < tvals.size(); t++) {
- int dtid = (distid == 1) ? (1+t) : (p+1);
- int dtv = t - p;
+ int dtid = static_cast<int>((distid == 1) ? (1+t) : (p+1));
+ int dtv = static_cast<int>(t - p);
double * prez = state.ptr() + prez_offset;
double err = error(dtid, tvals.ptr()+dtv, p, q, phi.ptr(), theta.ptr(), prez);
state[3] += err * err;
- update_prez(prez, q, err);
-
+ update_prez(prez, q, err);
+
// The others with delta
int count = 0;
for (int i = 0; i < l; i++) {
@@ -601,15 +605,14 @@ AnyType arima_lm_stat_sfunc::run (AnyType& args)
update_prez(prez, q, err);
}
count += 4;
- }
+ }
}
}
}
- if (distid == 1)
- state[2] += tvals.size();
- else
- state[2] += tvals.size() - p;
+ if (distid == 1) { state[2] += static_cast<double>(tvals.size()); }
+ else { state[2] += static_cast<double>(tvals.size() - p); }
+
return state;
}
@@ -638,7 +641,7 @@ AnyType arima_lm_stat_ffunc::run (AnyType& args)
if(j == i) {
hessian[l * i + i] = (state[4 + i * 2] - 2 * z2 + state[4 + i * 2 + 1]) / delta2;
} else {
- hessian[l * i + j] = (state[offset + count] - state[offset + count + 1]
+ hessian[l * i + j] = (state[offset + count] - state[offset + count + 1]
- state[offset + count + 2] + state[offset + count + 3]) / delta2;
hessian[l * j + i] = hessian[l * i + j];
count += 4;
@@ -651,7 +654,7 @@ AnyType arima_lm_stat_ffunc::run (AnyType& args)
SymmetricPositiveDefiniteEigenDecomposition<Matrix> decomposition(
m.transpose(), EigenvaluesOnly, ComputePseudoInverse);
ColumnVector diag = decomposition.pseudoInverse().diagonal();
-
+
MutableArrayHandle<double> std_err(
madlib_construct_array(
NULL, l, FLOAT8TI.oid, FLOAT8TI.len, FLOAT8TI.byval,
@@ -659,9 +662,9 @@ AnyType arima_lm_stat_ffunc::run (AnyType& args)
for (int i = 0; i < l; i++) std_err[i] = sqrt(diag[i]);
delete [] hessian;
-
+
AnyType tuple;
- tuple << std_err << sigma2 << loglik;
+ tuple << std_err << sigma2 << loglik;
return tuple;
}
22 src/ports/postgres/modules/regress/multilogistic.sql_in
View
@@ -416,23 +416,23 @@ File multilogistic.sql_in documenting the multinomial logistic regression functi
DROP TYPE IF EXISTS MADLIB_SCHEMA.mlogregr_result CASCADE;
CREATE TYPE MADLIB_SCHEMA.mlogregr_result AS
(
- ref_category INTEGER,
- coef DOUBLE PRECISION[],
- log_likelihood DOUBLE PRECISION,
- std_err DOUBLE PRECISION[],
- z_stats DOUBLE PRECISION[],
- p_values DOUBLE PRECISION[],
- odds_ratios DOUBLE PRECISION[],
- condition_no DOUBLE PRECISION,
- num_iterations INTEGER,
- num_processed INTEGER,
+ ref_category INTEGER,
+ coef DOUBLE PRECISION[],
+ log_likelihood DOUBLE PRECISION,
+ std_err DOUBLE PRECISION[],
+ z_stats DOUBLE PRECISION[],
+ p_values DOUBLE PRECISION[],
+ odds_ratios DOUBLE PRECISION[],
+ condition_no DOUBLE PRECISION,
+ num_iterations INTEGER,
+ num_processed BIGINT,
variance_covariance DOUBLE PRECISION[]
);
DROP TYPE IF EXISTS MADLIB_SCHEMA.mlogregr_summary_result CASCADE;
CREATE TYPE MADLIB_SCHEMA.mlogregr_summary_result AS
(
- coef DOUBLE PRECISION[],
+ coef DOUBLE PRECISION[],
variance_covariance DOUBLE PRECISION[]
);
2  src/ports/postgres/modules/regress/test/logistic.sql_in
View
@@ -683,7 +683,7 @@ select logregr_train(
'admit',
'ARRAY[1, gre, gpa, (rank = 2)::INT::FLOAT8, (rank = 3)::INT::FLOAT8, (rank = 4)::INT::FLOAT8]',
Null,
- 2000,
+ 20,
'cg',
0
);
Please sign in to comment.
Something went wrong with that request. Please try again.