diff --git a/benchmarks/hasheddoc_benchmarks.cpp b/benchmarks/hasheddoc_benchmarks.cpp index f48204499be..3046db768c1 100644 --- a/benchmarks/hasheddoc_benchmarks.cpp +++ b/benchmarks/hasheddoc_benchmarks.cpp @@ -22,13 +22,14 @@ int main(int argv, char** argc) int32_t num_strings = 5000; int32_t max_str_length = 10000; SGStringList string_list(num_strings, max_str_length); - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::uniform_int_distribution dist('A', 'Z'); SG_SPRINT("Creating features...\n"); for (index_t i=0; i(max_str_length); for (index_t j=0; jrandom('A', 'Z'); + string_list.strings[i].string[j] = (char)dist(prng); } SG_SPRINT("Features were created.\n"); diff --git a/benchmarks/rf_feats_benchmark.cpp b/benchmarks/rf_feats_benchmark.cpp index 9daf777223d..0947f1d99e1 100644 --- a/benchmarks/rf_feats_benchmark.cpp +++ b/benchmarks/rf_feats_benchmark.cpp @@ -16,7 +16,8 @@ int main(int argv, char** argc) int32_t dims[] = {100, 300, 600}; CTime* timer = new CTime(); - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::uniform_int_distribution dist(0, 1); for (index_t d=0; d<3; d++) { int32_t num_dim = dims[d]; @@ -28,7 +29,7 @@ int main(int argv, char** argc) { for (index_t j=0; jrandom(0, 1) + 0.5; + mat(j, i) = dist(prng) + 0.5; } } diff --git a/benchmarks/rf_feats_kernel_comp.cpp b/benchmarks/rf_feats_kernel_comp.cpp index 52bc49cf336..5126c5a41cd 100644 --- a/benchmarks/rf_feats_kernel_comp.cpp +++ b/benchmarks/rf_feats_kernel_comp.cpp @@ -29,7 +29,8 @@ int main(int argv, char** argc) float64_t lin_C = 0.1; float64_t non_lin_C = 0.1; CPRCEvaluation* evaluator = new CPRCEvaluation(); - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::uniform_int_distribution dist(0, 1); CSqrtDiagKernelNormalizer* normalizer = new CSqrtDiagKernelNormalizer(true); SG_REF(normalizer); for (index_t d=0; d<4; d++) @@ -49,12 +50,12 @@ int main(int argv, char** argc) if ((i+j)%2==0) { labs[i] = -1; - mat(j, i) = m_rng->random(0, 1) + 0.5; + mat(j, i) = dist(prng) + 0.5; } else { labs[i] = 1; - mat(j, i) = m_rng->random(0, 1) - 0.5; + mat(j, i) = dist(prng) - 0.5; } } } diff --git a/examples/undocumented/libshogun/classifier_larank.cpp b/examples/undocumented/libshogun/classifier_larank.cpp index a554ea3e1b5..492b24964c5 100644 --- a/examples/undocumented/libshogun/classifier_larank.cpp +++ b/examples/undocumented/libshogun/classifier_larank.cpp @@ -24,14 +24,15 @@ void test() SGMatrix matrix_test(num_class, num_vec); CMulticlassLabels* labels=new CMulticlassLabels(num_vec); CMulticlassLabels* labels_test=new CMulticlassLabels(num_vec); - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::normal_distribution dist(0, 1); for (index_t i=0; istd_normal_distrib(); - matrix_test(j, i) = m_rng->std_normal_distrib(); + matrix(j, i) = dist(prng); + matrix_test(j, i) = dist(prng); labels->set_label(i, label); labels_test->set_label(i, label); } diff --git a/examples/undocumented/libshogun/classifier_latent_svm.cpp b/examples/undocumented/libshogun/classifier_latent_svm.cpp index 38e3039fa6a..63407e80d75 100644 --- a/examples/undocumented/libshogun/classifier_latent_svm.cpp +++ b/examples/undocumented/libshogun/classifier_latent_svm.cpp @@ -110,7 +110,7 @@ static void read_dataset(char* fname, CLatentFeatures*& feats, CLatentLabels*& l SG_REF(labels); CBinaryLabels* ys = new CBinaryLabels(num_examples); - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); feats = new CLatentFeatures(num_examples); SG_REF(feats); @@ -144,10 +144,11 @@ static void read_dataset(char* fname, CLatentFeatures*& feats, CLatentLabels*& l while ((*pchar)!='\n') pchar++; *pchar = '\0'; height = atoi(last_pchar); - + std::uniform_int_distribution dist_w(0, width - 1); + std::uniform_int_distribution dist_h(0, height - 1); /* create latent label */ - int x = m_rng->random(0, width - 1); - int y = m_rng->random(0, height - 1); + int x = dist_w(prng); + int y = dist_h(prng); CBoundingBox* bb = new CBoundingBox(x,y); labels->add_latent_label(bb); diff --git a/examples/undocumented/libshogun/classifier_libsvm_probabilities.cpp b/examples/undocumented/libshogun/classifier_libsvm_probabilities.cpp index ab96153d89f..4ab6f2a6c7e 100644 --- a/examples/undocumented/libshogun/classifier_libsvm_probabilities.cpp +++ b/examples/undocumented/libshogun/classifier_libsvm_probabilities.cpp @@ -10,7 +10,8 @@ using namespace shogun; //generates data points (of different classes) randomly void gen_rand_data(SGMatrix features, SGVector labels, float64_t distance) { - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::uniform_real_distribution dist(0, 1.0); index_t num_samples = labels.vlen; index_t dimensions = features.num_rows; for (int32_t i = 0; i < num_samples; i++) @@ -19,13 +20,13 @@ void gen_rand_data(SGMatrix features, SGVector labels, flo { labels[i] = -1.0; for (int32_t j = 0; j < dimensions; j++) - features(j, i) = m_rng->random(0.0, 1.0) + distance; + features(j, i) = dist(prng) + distance; } else { labels[i] = 1.0; for (int32_t j = 0; j < dimensions; j++) - features(j, i) = m_rng->random(0.0, 1.0) - distance; + features(j, i) = dist(prng) - distance; } } labels.display_vector("labels"); diff --git a/examples/undocumented/libshogun/classifier_mkl_svmlight_modelselection_bug.cpp b/examples/undocumented/libshogun/classifier_mkl_svmlight_modelselection_bug.cpp index b672d6e1fd2..3d55b6a223a 100644 --- a/examples/undocumented/libshogun/classifier_mkl_svmlight_modelselection_bug.cpp +++ b/examples/undocumented/libshogun/classifier_mkl_svmlight_modelselection_bug.cpp @@ -66,9 +66,10 @@ void test() /* create some data and labels */ SGMatrix matrix(dim_vectors, num_vectors); CBinaryLabels* labels=new CBinaryLabels(num_vectors); - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::normal_distribution dist(0, 1); for (int32_t i=0; istd_normal_distrib(); + matrix.matrix[i] = dist(prng); /* create num_feautres 2-dimensional vectors */ CDenseFeatures* features=new CDenseFeatures(); diff --git a/examples/undocumented/libshogun/classifier_svmlight_string_features_precomputed_kernel.cpp b/examples/undocumented/libshogun/classifier_svmlight_string_features_precomputed_kernel.cpp index 091d9095292..8b85bc166ea 100644 --- a/examples/undocumented/libshogun/classifier_svmlight_string_features_precomputed_kernel.cpp +++ b/examples/undocumented/libshogun/classifier_svmlight_string_features_precomputed_kernel.cpp @@ -27,24 +27,26 @@ void test_svmlight() float64_t p_x=0.5; // probability for class A float64_t mostly_prob=0.8; CDenseLabels* labels=new CBinaryLabels(num_train+num_test); - auto m_rng = std::unique_ptr(new CRandom(17)); + auto prng = get_prng(); + std::uniform_real_distribution dist_real(0.0, 1.0); + std::uniform_int_distribution dist_int(1, max_length); SGStringList data(num_train+num_test, max_length); for (index_t i=0; irandom(1, max_length); + index_t length = dist_int(prng); /* allocate string */ data.strings[i]=SGString(length); /* fill with elements and set label */ - if (p_x < m_rng->random(0.0, 1.0)) + if (p_x < dist_real(prng)) { labels->set_label(i, 1); for (index_t j=0; jrandom(0.0, 1.0) ? '0' : '1'; + char c = mostly_prob < dist_real(prng) ? '0' : '1'; data.strings[i].string[j]=c; } } @@ -53,7 +55,7 @@ void test_svmlight() labels->set_label(i, -1); for (index_t j=0; jrandom(0.0, 1.0) ? '1' : '0'; + char c = mostly_prob < dist_real(prng) ? '1' : '0'; data.strings[i].string[j]=c; } } diff --git a/examples/undocumented/libshogun/clustering_kmeans.cpp b/examples/undocumented/libshogun/clustering_kmeans.cpp index e01c3ce86ea..aea63f3032c 100644 --- a/examples/undocumented/libshogun/clustering_kmeans.cpp +++ b/examples/undocumented/libshogun/clustering_kmeans.cpp @@ -36,7 +36,7 @@ int main(int argc, char **argv) int32_t dim_features=3; int32_t num_vectors_per_cluster=5; float64_t cluster_std_dev=2.0; - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); /* build random cluster centers */ SGMatrix cluster_centers(dim_features, num_clusters); @@ -57,7 +57,9 @@ int main(int argc, char **argv) idx+=j; idx+=k*dim_features; float64_t entry=cluster_centers.matrix[i*dim_features+j]; - data.matrix[idx] = m_rng->normal_random(entry, cluster_std_dev); + std::normal_distribution dist( + entry, cluster_std_dev); + data.matrix[idx] = dist(prng); } } } diff --git a/examples/undocumented/libshogun/converter_jade_bss.cpp b/examples/undocumented/libshogun/converter_jade_bss.cpp index c6950098a9a..ae7642536f0 100644 --- a/examples/undocumented/libshogun/converter_jade_bss.cpp +++ b/examples/undocumented/libshogun/converter_jade_bss.cpp @@ -28,7 +28,8 @@ using namespace Eigen; void test() { // Generate sample data - auto m_rng = std::unique_ptr(new CRandom(0)); + auto prng = get_prng(); + std::normal_distribution dist(0, 1); int n_samples = 2000; VectorXd time(n_samples, true); time.setLinSpaced(n_samples,0,10); @@ -39,11 +40,11 @@ void test() { // Sin wave S(0,i) = sin(2*time[i]); - S(0, i) += 0.2 * m_rng->std_normal_distrib(); + S(0, i) += 0.2 * dist(prng); // Square wave S(1,i) = sin(3*time[i]) < 0 ? -1 : 1; - S(1, i) += 0.2 * m_rng->std_normal_distrib(); + S(1, i) += 0.2 * dist(prng); } // Standardize data diff --git a/examples/undocumented/libshogun/evaluation_cross_validation_classification.cpp b/examples/undocumented/libshogun/evaluation_cross_validation_classification.cpp index f21eb40a801..b88a2b633a0 100644 --- a/examples/undocumented/libshogun/evaluation_cross_validation_classification.cpp +++ b/examples/undocumented/libshogun/evaluation_cross_validation_classification.cpp @@ -26,7 +26,7 @@ void test_cross_validation() /* data matrix dimensions */ index_t num_vectors=40; index_t num_features=5; - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); /* data means -1, 1 in all components, std deviation of 3 */ SGVector mean_1(num_features); SGVector mean_2(num_features); @@ -44,8 +44,8 @@ void test_cross_validation() for (index_t j=0; jnormal_random(mean, sigma); + std::normal_distribution dist(mean, sigma); + train_dat.matrix[i * num_features + j] = dist(prng); } } diff --git a/examples/undocumented/libshogun/evaluation_cross_validation_locked_comparison.cpp b/examples/undocumented/libshogun/evaluation_cross_validation_locked_comparison.cpp index 89d96eb391e..1a052d9f7c7 100644 --- a/examples/undocumented/libshogun/evaluation_cross_validation_locked_comparison.cpp +++ b/examples/undocumented/libshogun/evaluation_cross_validation_locked_comparison.cpp @@ -35,7 +35,8 @@ void test_cross_validation() SGVector::fill_vector(mean_1.vector, mean_1.vlen, -1.0); SGVector::fill_vector(mean_2.vector, mean_2.vlen, 1.0); float64_t sigma=1.5; - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::normal_distribution dist(0, 1); /* fill data matrix around mean */ SGMatrix train_dat(num_features, num_vectors); @@ -44,8 +45,7 @@ void test_cross_validation() for (index_t j=0; jnormal_random(mean, sigma); + train_dat.matrix[i * num_features + j] = dist(prng); } } diff --git a/examples/undocumented/libshogun/evaluation_cross_validation_mkl_weight_storage.cpp b/examples/undocumented/libshogun/evaluation_cross_validation_mkl_weight_storage.cpp index 3efdf06ea76..ba99709cd0e 100644 --- a/examples/undocumented/libshogun/evaluation_cross_validation_mkl_weight_storage.cpp +++ b/examples/undocumented/libshogun/evaluation_cross_validation_mkl_weight_storage.cpp @@ -24,7 +24,8 @@ void gen_rand_data(SGVector lab, SGMatrix feat, { index_t dims=feat.num_rows; index_t num=lab.vlen; - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::uniform_real_distribution uniform_dist(0.0, 1.0); for (int32_t i=0; i lab, SGMatrix feat, lab[i]=-1.0; for (int32_t j=0; jrandom(0.0, 1.0) + dist; + feat(j, i) = uniform_dist(prng) + dist; } else { lab[i]=1.0; for (int32_t j=0; jrandom(0.0, 1.0) - dist; + feat(j, i) = uniform_dist(prng) - dist; } } lab.display_vector("lab"); diff --git a/examples/undocumented/libshogun/evaluation_cross_validation_regression.cpp b/examples/undocumented/libshogun/evaluation_cross_validation_regression.cpp index 9ef0e2d85f5..0bb40bf0ee5 100644 --- a/examples/undocumented/libshogun/evaluation_cross_validation_regression.cpp +++ b/examples/undocumented/libshogun/evaluation_cross_validation_regression.cpp @@ -30,7 +30,8 @@ void test_cross_validation() /* training label data */ SGVector lab(num_vectors); - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::uniform_real_distribution dist(0.0, 1.0); /* fill data matrix and labels */ SGMatrix train_dat(num_features, num_vectors); @@ -38,7 +39,7 @@ void test_cross_validation() for (index_t i=0; inormal_random(0, 1.0); + lab.vector[i] = i + dist(prng); } /* training features */ diff --git a/examples/undocumented/libshogun/features_subset_labels.cpp b/examples/undocumented/libshogun/features_subset_labels.cpp index f9145e24a41..7cb6059fa62 100644 --- a/examples/undocumented/libshogun/features_subset_labels.cpp +++ b/examples/undocumented/libshogun/features_subset_labels.cpp @@ -20,8 +20,9 @@ const int32_t num_classes=3; void test() { - auto m_rng = std::unique_ptr(new CRandom()); - const int32_t num_subset_idx = m_rng->random(1, num_labels); + auto prng = get_prng(); + std::uniform_int_distribution dist(1, num_labels); + const int32_t num_subset_idx = dist(prng); /* create labels */ CMulticlassLabels* labels=new CMulticlassLabels(num_labels); diff --git a/examples/undocumented/libshogun/features_subset_simple_features.cpp b/examples/undocumented/libshogun/features_subset_simple_features.cpp index ceed29bbe2f..14f57ce97dd 100644 --- a/examples/undocumented/libshogun/features_subset_simple_features.cpp +++ b/examples/undocumented/libshogun/features_subset_simple_features.cpp @@ -46,17 +46,19 @@ const int32_t dim_features=6; void test() { - auto m_rng = std::unique_ptr(new CRandom()); - const int32_t num_subset_idx = m_rng->random(1, num_vectors); + auto prng = get_prng(); + std::uniform_int_distribution dist(1, num_vectors); + const int32_t num_subset_idx = dist(prng); /* create feature data matrix */ SGMatrix data(dim_features, num_vectors); /* fill matrix with random data */ + std::uniform_int_distribution dist_s(-5, 5); for (index_t i=0; irandom(-5, 5); + data.matrix[i * dim_features + j] = dist_s(prng); } /* create simple features */ diff --git a/examples/undocumented/libshogun/hashed_features_example.cpp b/examples/undocumented/libshogun/hashed_features_example.cpp index 930c27e931d..237585bc4ca 100644 --- a/examples/undocumented/libshogun/hashed_features_example.cpp +++ b/examples/undocumented/libshogun/hashed_features_example.cpp @@ -12,12 +12,13 @@ int main() int32_t num_vectors = 5; int32_t dim = 20; - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::uniform_int_distribution dist(-dim, dim); SGMatrix mat(dim, num_vectors); for (index_t v=0; vrandom(-dim, dim); + mat(d, v) = dist(prng); } int32_t hashing_dim = 12; diff --git a/examples/undocumented/libshogun/kernel_custom.cpp b/examples/undocumented/libshogun/kernel_custom.cpp index f6b5526d14b..adde82a25b4 100644 --- a/examples/undocumented/libshogun/kernel_custom.cpp +++ b/examples/undocumented/libshogun/kernel_custom.cpp @@ -30,11 +30,11 @@ void test_custom_kernel_subsets() /* create a random permutation */ SGVector subset(m); - auto prng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); for (index_t run=0; run<100; ++run) { subset.range_fill(); - CMath::permute(subset, prng.get()); + CMath::permute(subset, prng); // subset.display_vector("permutation"); features->add_subset(subset); k->init(features, features); diff --git a/examples/undocumented/libshogun/kernel_custom_kernel.cpp b/examples/undocumented/libshogun/kernel_custom_kernel.cpp index 39625877d69..aa59bce952e 100644 --- a/examples/undocumented/libshogun/kernel_custom_kernel.cpp +++ b/examples/undocumented/libshogun/kernel_custom_kernel.cpp @@ -28,11 +28,11 @@ void test_custom_kernel_subsets() /* create a random permutation */ SGVector subset(m); - auto prng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); for (index_t run=0; run<100; ++run) { subset.range_fill(); - CMath::permute(subset, prng.get()); + CMath::permute(subset, prng); // subset.display_vector("permutation"); features->add_subset(subset); k->init(features, features); diff --git a/examples/undocumented/libshogun/kernel_machine_train_locked.cpp b/examples/undocumented/libshogun/kernel_machine_train_locked.cpp index 19ad7841a17..e0ab44b33b1 100644 --- a/examples/undocumented/libshogun/kernel_machine_train_locked.cpp +++ b/examples/undocumented/libshogun/kernel_machine_train_locked.cpp @@ -34,7 +34,7 @@ void test() SGVector::display_vector(mean_1.vector, mean_1.vlen, "mean 1"); SGVector::display_vector(mean_2.vector, mean_2.vlen, "mean 2"); - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); /* fill data matrix around mean */ SGMatrix train_dat(num_features, num_vectors); @@ -43,8 +43,8 @@ void test() for (index_t j=0; jnormal_random(mean, sigma); + std::normal_distribution dist(mean, sigma); + train_dat.matrix[i * num_features + j] = dist(prng); } } diff --git a/examples/undocumented/libshogun/library_serialization.cpp b/examples/undocumented/libshogun/library_serialization.cpp index d924f6920ba..61abcdf9d60 100644 --- a/examples/undocumented/libshogun/library_serialization.cpp +++ b/examples/undocumented/libshogun/library_serialization.cpp @@ -12,14 +12,15 @@ int main(int argc, char** argv) /* create feature data matrix */ SGMatrix data(3, 20); - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::uniform_int_distribution dist(1, 9); /* fill matrix with random data */ for (index_t i=0; i<20*3; ++i) { if (i%2==0) data.matrix[i]=0; else - data.matrix[i] = m_rng->random(1, 9); + data.matrix[i] = dist(prng); } /* create sparse features */ diff --git a/examples/undocumented/libshogun/modelselection_combined_kernel_sub_parameters.cpp b/examples/undocumented/libshogun/modelselection_combined_kernel_sub_parameters.cpp index dacd83f87d4..84f47351ba4 100644 --- a/examples/undocumented/libshogun/modelselection_combined_kernel_sub_parameters.cpp +++ b/examples/undocumented/libshogun/modelselection_combined_kernel_sub_parameters.cpp @@ -97,10 +97,11 @@ void modelselection_combined_kernel() /* create some data and labels */ SGMatrix matrix(dim_vectors, num_vectors); CBinaryLabels* labels=new CBinaryLabels(num_vectors); - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::normal_distribution dist(0, 1); for (int32_t i=0; istd_normal_distrib(); + matrix.matrix[i] = dist(prng); /* create num_feautres 2-dimensional vectors */ CDenseFeatures* features=new CDenseFeatures(matrix); diff --git a/examples/undocumented/libshogun/modelselection_grid_search_kernel.cpp b/examples/undocumented/libshogun/modelselection_grid_search_kernel.cpp index 6a8144bb222..5ad4ca903cd 100644 --- a/examples/undocumented/libshogun/modelselection_grid_search_kernel.cpp +++ b/examples/undocumented/libshogun/modelselection_grid_search_kernel.cpp @@ -100,10 +100,11 @@ int main(int argc, char **argv) /* create some data and labels */ SGMatrix matrix(dim_vectors, num_vectors); CBinaryLabels* labels=new CBinaryLabels(num_vectors); - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::normal_distribution dist(0, 1); for (int32_t i=0; istd_normal_distrib(); + matrix.matrix[i] = dist(prng); /* create num_feautres 2-dimensional vectors */ CDenseFeatures* features=new CDenseFeatures(matrix); diff --git a/examples/undocumented/libshogun/modelselection_grid_search_krr.cpp b/examples/undocumented/libshogun/modelselection_grid_search_krr.cpp index c5172e4b099..019ecf5e648 100644 --- a/examples/undocumented/libshogun/modelselection_grid_search_krr.cpp +++ b/examples/undocumented/libshogun/modelselection_grid_search_krr.cpp @@ -74,7 +74,8 @@ void test_cross_validation() /* training label data */ SGVector lab(num_vectors); - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::normal_distribution dist(0, 1); /* fill data matrix and labels */ SGMatrix train_dat(num_features, num_vectors); @@ -82,7 +83,7 @@ void test_cross_validation() for (index_t i=0; inormal_random(0, 1.0); + lab.vector[i] = i + dist(prng); } /* training features */ diff --git a/examples/undocumented/libshogun/modelselection_grid_search_mkl.cpp b/examples/undocumented/libshogun/modelselection_grid_search_mkl.cpp index 8b793a451f1..584e85934d3 100644 --- a/examples/undocumented/libshogun/modelselection_grid_search_mkl.cpp +++ b/examples/undocumented/libshogun/modelselection_grid_search_mkl.cpp @@ -61,9 +61,10 @@ void test() /* create some data and labels */ SGMatrix matrix(dim_vectors, num_vectors); - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::normal_distribution dist(0, 1); for (int32_t i=0; istd_normal_distrib(); + matrix.matrix[i] = dist(prng); /* create feature object */ CDenseFeatures* features=new CDenseFeatures (); diff --git a/examples/undocumented/libshogun/modelselection_grid_search_multiclass_svm.cpp b/examples/undocumented/libshogun/modelselection_grid_search_multiclass_svm.cpp index d7a47cb3e60..bc19c598fbe 100644 --- a/examples/undocumented/libshogun/modelselection_grid_search_multiclass_svm.cpp +++ b/examples/undocumented/libshogun/modelselection_grid_search_multiclass_svm.cpp @@ -51,13 +51,14 @@ void test() /* create data: some easy multiclass data */ SGMatrix feat=SGMatrix(dim_vectors, num_vectors); SGVector lab(num_vectors); - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::normal_distribution dist(0, 1); for (index_t j=0; jstd_normal_distrib(); + feat(i, j) = dist(prng); /* make sure classes are (alomst) linearly seperable against each other */ feat(lab[j],j)+=distance; diff --git a/examples/undocumented/libshogun/modelselection_grid_search_string_kernel.cpp b/examples/undocumented/libshogun/modelselection_grid_search_string_kernel.cpp index 90bef7e0f87..39c0119da7e 100644 --- a/examples/undocumented/libshogun/modelselection_grid_search_string_kernel.cpp +++ b/examples/undocumented/libshogun/modelselection_grid_search_string_kernel.cpp @@ -73,17 +73,20 @@ int main(int argc, char **argv) index_t num_subsets=num_strings/3; SGStringList strings(num_strings, max_string_length); - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::uniform_int_distribution dist_len( + min_string_length, max_string_length); + std::uniform_int_distribution dist_asc('A', 'Z'); for (index_t i=0; irandom(min_string_length, max_string_length); + index_t len = dist_len(prng); SGString current(len); SG_SPRINT("string %i: \"", i); /* fill with random uppercase letters (ASCII) */ for (index_t j=0; jrandom('A', 'Z'); + current.string[j] = (char)dist_asc(prng); char* string=new char[2]; string[0]=current.string[j]; diff --git a/examples/undocumented/libshogun/neuralnets_deep_belief_network.cpp b/examples/undocumented/libshogun/neuralnets_deep_belief_network.cpp index 52a1a092592..369fca1a325 100644 --- a/examples/undocumented/libshogun/neuralnets_deep_belief_network.cpp +++ b/examples/undocumented/libshogun/neuralnets_deep_belief_network.cpp @@ -45,7 +45,8 @@ int main(int, char*[]) init_shogun_with_defaults(); // initialize the random number generator with a fixed seed, for repeatability - auto m_rng = std::unique_ptr(new CRandom(10)); + auto prng = get_prng(); + std::uniform_real_distribution dist(-1.0, 1.0); // Prepare the training data const int num_features = 5; @@ -67,11 +68,14 @@ int main(int, char*[]) } for (int32_t i=0; irandom(-1.0, 1.0); + means[i] = dist(prng); for (int32_t i=0; inormal_random(means[i], 1.0); + { + std::normal_distribution dist_x(means[i], 1.0); + X(i, j) = dist_x(prng); + } CDenseFeatures* features = new CDenseFeatures(X); diff --git a/examples/undocumented/libshogun/parameter_iterate_float64.cpp b/examples/undocumented/libshogun/parameter_iterate_float64.cpp index 41f729caa6b..0c60848a2a2 100644 --- a/examples/undocumented/libshogun/parameter_iterate_float64.cpp +++ b/examples/undocumented/libshogun/parameter_iterate_float64.cpp @@ -29,9 +29,13 @@ int main(int argc, char** argv) /* create some random data */ SGMatrix matrix(n,n); - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); for(int32_t i=0; irandom((float64_t)-n, (float64_t)n); + { + std::uniform_real_distribution dist( + (float64_t)-n, (float64_t)n); + matrix.matrix[i] = dist(prng); + } SGMatrix::display_matrix(matrix.matrix, n, n); diff --git a/examples/undocumented/libshogun/parameter_iterate_sgobject.cpp b/examples/undocumented/libshogun/parameter_iterate_sgobject.cpp index cc78092a216..5aa6bc4722f 100644 --- a/examples/undocumented/libshogun/parameter_iterate_sgobject.cpp +++ b/examples/undocumented/libshogun/parameter_iterate_sgobject.cpp @@ -26,11 +26,15 @@ int main(int argc, char** argv) const int32_t n=7; init_shogun(&print_message); - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); /* create some random data and hand it to each kernel */ SGMatrix matrix(n,n); for (int32_t k=0; krandom((float64_t)-n, (float64_t)n); + { + std::uniform_real_distribution dist( + (float64_t)-n, (float64_t)n); + matrix.matrix[k] = dist(prng); + } SG_SPRINT("feature data:\n"); SGMatrix::display_matrix(matrix.matrix, n, n); @@ -41,8 +45,8 @@ int main(int argc, char** argv) CGaussianKernel** kernels=SG_MALLOC(CGaussianKernel*, n); for (int32_t i=0; irandom(0.0, (float64_t)n * n)); + std::uniform_real_distribution dist(0.0, (float64_t)n * n); + kernels[i] = new CGaussianKernel(10, dist(prng)); /* hand data to kernel */ kernels[i]->init(features, features); diff --git a/examples/undocumented/libshogun/preprocessor_randomfouriergauss.cpp b/examples/undocumented/libshogun/preprocessor_randomfouriergauss.cpp index 3b54dfd2ed9..a61f2b4eb31 100644 --- a/examples/undocumented/libshogun/preprocessor_randomfouriergauss.cpp +++ b/examples/undocumented/libshogun/preprocessor_randomfouriergauss.cpp @@ -27,7 +27,8 @@ void gen_rand_data(float64_t* & feat, float64_t* & lab,const int32_t num,const i { lab=SG_MALLOC(float64_t, num); feat=SG_MALLOC(float64_t, num*dims); - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::uniform_real_distribution dist(0.0, 1.0); for (int32_t i=0; irandom(0.0, 1.0) + dist; + feat[i * dims + j] = dist(prng) + dist; } else { lab[i]=1.0; for (int32_t j=0; jrandom(0.0, 1.0) - dist; + feat[i * dims + j] = dist(prng) - dist; } } CMath::display_vector(lab,num); diff --git a/examples/undocumented/libshogun/random_fourier_features.cpp b/examples/undocumented/libshogun/random_fourier_features.cpp index 3546777b138..f931611e679 100644 --- a/examples/undocumented/libshogun/random_fourier_features.cpp +++ b/examples/undocumented/libshogun/random_fourier_features.cpp @@ -18,7 +18,8 @@ void load_data(int32_t num_dim, int32_t num_vecs, { SGMatrix mat(num_dim, num_vecs); SGVector labs(num_vecs); - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::uniform_int_distribution dist(0, 1); for (index_t i=0; irandom(0, 1) + 0.5; + mat(j, i) = dist(prng) + 0.5; } else { labs[i] = 1; - mat(j, i) = m_rng->random(0, 1) - 0.5; + mat(j, i) = dist(prng) - 0.5; } } } diff --git a/examples/undocumented/libshogun/regression_gaussian_process_simple_exact.cpp b/examples/undocumented/libshogun/regression_gaussian_process_simple_exact.cpp index 877158cd51d..d40b1b6d4ca 100644 --- a/examples/undocumented/libshogun/regression_gaussian_process_simple_exact.cpp +++ b/examples/undocumented/libshogun/regression_gaussian_process_simple_exact.cpp @@ -28,10 +28,11 @@ void test() SGMatrix X(1, n); SGMatrix X_test(1, n); SGVector Y(n); - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::uniform_real_distribution dist(0.0, x_range); for (index_t i=0; irandom(0.0, x_range); + X[i] = dist(prng); X_test[i]=(float64_t)i / n*x_range; Y[i]=CMath::sin(X[i]); } diff --git a/examples/undocumented/libshogun/regression_libsvr.cpp b/examples/undocumented/libshogun/regression_libsvr.cpp index e22de541478..8c387554ec9 100644 --- a/examples/undocumented/libshogun/regression_libsvr.cpp +++ b/examples/undocumented/libshogun/regression_libsvr.cpp @@ -22,7 +22,8 @@ void test_libsvr() /* create some easy regression data: 1d noisy sine wave */ index_t n=100; float64_t x_range=6; - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::uniform_real_distribution dist(0.0, x_range); SGMatrix feat_train(1, n); SGMatrix feat_test(1, n); SGVector lab_train(n); @@ -30,8 +31,8 @@ void test_libsvr() for (index_t i=0; irandom(0.0, x_range); - feat_test[i]=(float64_t)i/n*x_range; + feat_train[i] = dist(prng); + feat_test[i] = (float64_t)i / n * x_range; lab_train[i]=CMath::sin(feat_train[i]); lab_test[i]=CMath::sin(feat_test[i]); } diff --git a/examples/undocumented/libshogun/serialization_multiclass_labels.cpp b/examples/undocumented/libshogun/serialization_multiclass_labels.cpp index 2dbafb4d3ed..33a6c998a86 100644 --- a/examples/undocumented/libshogun/serialization_multiclass_labels.cpp +++ b/examples/undocumented/libshogun/serialization_multiclass_labels.cpp @@ -25,11 +25,12 @@ void test() labels->allocate_confidences_for(n_class); SGVector conf(n_class); - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::normal_distribution dist(0, 1); for (index_t i=0; istd_normal_distrib(); + conf[i] = dist(prng); - for (index_t i=0; iset_multiclass_confidences(i, conf); /* create serialized copy */ diff --git a/examples/undocumented/libshogun/so_fg_model.cpp b/examples/undocumented/libshogun/so_fg_model.cpp index 5202a2c136d..36ef4dd2403 100644 --- a/examples/undocumented/libshogun/so_fg_model.cpp +++ b/examples/undocumented/libshogun/so_fg_model.cpp @@ -43,6 +43,8 @@ void test(int32_t num_samples) CFactorGraphLabels* labels = new CFactorGraphLabels(num_samples); SG_REF(labels); + auto prng = get_prng(); + std::uniform_real_distribution dist(0.0, 1.0); for (int32_t n = 0; n < num_samples; ++n) { // factor graph @@ -50,11 +52,10 @@ void test(int32_t num_samples) SGVector::fill_vector(vc.vector, vc.vlen, 2); CFactorGraph* fg = new CFactorGraph(vc); - auto m_rng = std::unique_ptr(new CRandom()); // add factors SGVector data1(2); - data1[0] = 2.0 * m_rng->random(0.0, 1.0) - 1.0; - data1[1] = 2.0 * m_rng->random(0.0, 1.0) - 1.0; + data1[0] = 2.0 * dist(prng) - 1.0; + data1[1] = 2.0 * dist(prng) - 1.0; SGVector var_index1(2); var_index1[0] = 0; var_index1[1] = 1; @@ -62,8 +63,8 @@ void test(int32_t num_samples) fg->add_factor(fac1); SGVector data2(2); - data2[0] = 2.0 * m_rng->random(0.0, 1.0) - 1.0; - data2[1] = 2.0 * m_rng->random(0.0, 1.0) - 1.0; + data2[0] = 2.0 * dist(prng) - 1.0; + data2[1] = 2.0 * dist(prng) - 1.0; SGVector var_index2(2); var_index2[0] = 1; var_index2[1] = 2; diff --git a/examples/undocumented/libshogun/so_multiclass.cpp b/examples/undocumented/libshogun/so_multiclass.cpp index ef4b4878565..561e7579031 100644 --- a/examples/undocumented/libshogun/so_multiclass.cpp +++ b/examples/undocumented/libshogun/so_multiclass.cpp @@ -34,7 +34,9 @@ void gen_rand_data(SGVector< float64_t > labs, SGMatrix< float64_t > feats) { float64_t means[DIMS]; float64_t stds[DIMS]; - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::uniform_int_distribution dist_m(-100, 100); + std::uniform_int_distribution dist_s(1, 5); FILE* pfile = fopen(FNAME, "w"); @@ -42,8 +44,8 @@ void gen_rand_data(SGVector< float64_t > labs, SGMatrix< float64_t > feats) { for ( int32_t j = 0 ; j < DIMS ; ++j ) { - means[j] = m_rng->random(-100, 100); - stds[j] = m_rng->random(1, 5); + means[j] = dist_m(prng); + stds[j] = dist_s(prng); } for ( int32_t i = 0 ; i < NUM_SAMPLES ; ++i ) @@ -54,8 +56,8 @@ void gen_rand_data(SGVector< float64_t > labs, SGMatrix< float64_t > feats) for ( int32_t j = 0 ; j < DIMS ; ++j ) { - feats[(c * NUM_SAMPLES + i) * DIMS + j] = - m_rng->normal_random(means[j], stds[j]); + std::normal_distribution dist(means[j], stds[j]); + feats[(c * NUM_SAMPLES + i) * DIMS + j] = dist(prng); fprintf(pfile, " %f", feats[(c*NUM_SAMPLES+i)*DIMS + j]); } diff --git a/examples/undocumented/libshogun/so_multiclass_BMRM.cpp b/examples/undocumented/libshogun/so_multiclass_BMRM.cpp index 6dcbd3caac5..b4dc5be9deb 100644 --- a/examples/undocumented/libshogun/so_multiclass_BMRM.cpp +++ b/examples/undocumented/libshogun/so_multiclass_BMRM.cpp @@ -88,14 +88,16 @@ void gen_rand_data(SGVector< float64_t > labs, SGMatrix< float64_t > feats) FILE* pfile = fopen(FNAME, "w"); - auto m_rng = std::unique_ptr(new CRandom(17)); + auto prng = get_prng(); + std::uniform_int_distribution dist_m(-100, 100); + std::uniform_int_distribution dist_s(1, 5); for ( int32_t c = 0 ; c < NUM_CLASSES ; ++c ) { for ( int32_t j = 0 ; j < DIMS ; ++j ) { - means[j] = m_rng->random(-100, 100); - stds[j] = m_rng->random(1, 5); + means[j] = dist_m(prng); + stds[j] = dist_s(prng); } for ( int32_t i = 0 ; i < NUM_SAMPLES ; ++i ) @@ -106,8 +108,8 @@ void gen_rand_data(SGVector< float64_t > labs, SGMatrix< float64_t > feats) for ( int32_t j = 0 ; j < DIMS ; ++j ) { - feats[(c * NUM_SAMPLES + i) * DIMS + j] = - m_rng->normal_random(means[j], stds[j]); + std::normal_distribution dist(means[j], stds[j]); + feats[(c * NUM_SAMPLES + i) * DIMS + j] = dist(prng); fprintf(pfile, " %d:%f", j+1, feats[(c*NUM_SAMPLES+i)*DIMS + j]); } diff --git a/examples/undocumented/libshogun/splitting_LOO_crossvalidation.cpp b/examples/undocumented/libshogun/splitting_LOO_crossvalidation.cpp index 17a79a26357..2f97308af31 100644 --- a/examples/undocumented/libshogun/splitting_LOO_crossvalidation.cpp +++ b/examples/undocumented/libshogun/splitting_LOO_crossvalidation.cpp @@ -16,19 +16,21 @@ int main(int argc, char **argv) index_t num_labels; index_t runs=10; - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::uniform_real_distribution dist_sl(-10.0, 10.0); + std::uniform_int_distribution dist_l(10, 50); while (runs-->0) { - num_labels = m_rng->random(10, 50); + num_labels = dist_l(prng) - //SG_SPRINT("num_labels=%d\n\n", num_labels); + // SG_SPRINT("num_labels=%d\n\n", num_labels); - /* build labels */ - CRegressionLabels* labels=new CRegressionLabels(num_labels); + /* build labels */ + CRegressionLabels* labels = new CRegressionLabels(num_labels); for (index_t i=0; iset_label(i, m_rng->random(-10.0, 10.0)); + labels->set_label(i, dist_sl(prng)); // SG_SPRINT("label(%d)=%.18g\n", i, labels->get_label(i)); } diff --git a/examples/undocumented/libshogun/splitting_standard_crossvalidation.cpp b/examples/undocumented/libshogun/splitting_standard_crossvalidation.cpp index b7dfa806e37..47e052a1198 100644 --- a/examples/undocumented/libshogun/splitting_standard_crossvalidation.cpp +++ b/examples/undocumented/libshogun/splitting_standard_crossvalidation.cpp @@ -22,12 +22,15 @@ int main(int argc, char **argv) index_t num_labels; index_t num_subsets; index_t runs=100; - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::uniform_real_distribution dist_l(-10.0, 10.0); + std::uniform_int_distribution dist_nl(10, 150); + std::uniform_int_distribution dist_ns(1, 5); while (runs-->0) { - num_labels = m_rng->random(10, 150); - num_subsets = m_rng->random(1, 5); + num_labels = dist_nl(prng); + num_subsets = dist_ns(prng); index_t desired_size=CMath::round( (float64_t)num_labels/(float64_t)num_subsets); @@ -41,7 +44,7 @@ int main(int argc, char **argv) CRegressionLabels* labels=new CRegressionLabels(num_labels); for (index_t i=0; iset_label(i, m_rng->random(-10.0, 10.0)); + labels->set_label(i, dist_l(prng)); SG_SPRINT("label(%d)=%.18g\n", i, labels->get_label(i)); } SG_SPRINT("\n"); diff --git a/examples/undocumented/libshogun/splitting_stratified_crossvalidation.cpp b/examples/undocumented/libshogun/splitting_stratified_crossvalidation.cpp index 400b666f8c7..fae373a904d 100644 --- a/examples/undocumented/libshogun/splitting_stratified_crossvalidation.cpp +++ b/examples/undocumented/libshogun/splitting_stratified_crossvalidation.cpp @@ -21,13 +21,16 @@ int main(int argc, char **argv) index_t num_labels, num_classes, num_subsets; index_t runs=50; - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::uniform_int_distribution dist_nl(5, 100); + std::uniform_int_distribution dist_nc(2, 10); + std::uniform_int_distribution dist_ns(1, 10); while (runs-->0) { - num_labels = m_rng->random(5, 100); - num_classes = m_rng->random(2, 10); - num_subsets = m_rng->random(1, 10); + num_labels = dist_nl(prng); + num_classes = dist_nc(prng); + num_subsets = dist_ns(prng); /* this will throw an error */ if (num_labelsset_label(i, m_rng->random_64() % num_classes); + labels->set_label(i, prng() % num_classes); SG_SPRINT("label(%d)=%.18g\n", i, labels->get_label(i)); } SG_SPRINT("\n"); diff --git a/examples/undocumented/libshogun/streaming_from_dense.cpp b/examples/undocumented/libshogun/streaming_from_dense.cpp index e0855fb141f..d5d3d97ffbc 100644 --- a/examples/undocumented/libshogun/streaming_from_dense.cpp +++ b/examples/undocumented/libshogun/streaming_from_dense.cpp @@ -23,13 +23,14 @@ using namespace shogun; void gen_rand_data(SGMatrix feat, SGVector lab) { - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::uniform_real_distribution dist(0.0, 1.0); for (int32_t i=0; irandom(0.0, 1.0) + DIST; + feat[i * DIMS + j] = dist(prng) + DIST; if (lab.vector) lab[i]=0; @@ -37,7 +38,7 @@ void gen_rand_data(SGMatrix feat, SGVector lab) else { for (int32_t j=0; jrandom(0.0, 1.0) - DIST; + feat[i * DIMS + j] = dist(prng) - DIST; if (lab.vector) lab[i]=1; diff --git a/src/interfaces/swig/Mathematics.i b/src/interfaces/swig/Mathematics.i index 9fc4993afe0..771f366c68e 100644 --- a/src/interfaces/swig/Mathematics.i +++ b/src/interfaces/swig/Mathematics.i @@ -23,32 +23,15 @@ namespace shogun { #ifdef USE_INT32 %rename(pow_int32) CMath::pow(int32_t,int32_t); -%rename(random_int32) CMath::random(int32_t,int32_t); -#endif - -#ifdef USE_UINT32 -%rename(random_uint32) CMath::random(uint32_t,uint32_t); -#endif - -#ifdef USE_INT64 -%rename(random_int64) CMath::random(int64_t,int64_t); -#endif - -#ifdef USE_UINT64 -%rename(random_uint64) CMath::random(uint64_t,uint64_t); #endif #ifdef USE_FLOAT32 -%rename(normal_random_float32) CMath::normal_random(float32_t,float32_t); -%rename(random_float32) CMath::random(float32_t,float32_t); %rename(sqrt_float32) CMath::sqrt(float32_t); #endif #ifdef USE_FLOAT64 -%rename(normal_random_float64) CMath::normal_random(float64_t,float64_t); %rename(pow_float64_int32) CMath::pow(float64_t,int32_t); %rename(pow_float64_float64) CMath::pow(float64_t,float64_t); -%rename(random_float64) CMath::random(float64_t,float64_t); %rename(sqrt_float64) CMath::sqrt(float64_t); } #endif diff --git a/src/shogun/base/DynArray.h b/src/shogun/base/DynArray.h index a0adc7fa11b..d33087d8157 100644 --- a/src/shogun/base/DynArray.h +++ b/src/shogun/base/DynArray.h @@ -445,18 +445,25 @@ template class DynArray /** randomizes the array (not thread safe!) */ void shuffle() { - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); for (index_t i=0; i<=current_num_elements-1; ++i) - CMath::swap( - array[i], - array[m_rng->random(i, current_num_elements - 1)]); + { + std::uniform_int_distribution dist( + i, current_num_elements - 1); + CMath::swap(array[i], array[dist(prng)]); + } } /** randomizes the array with external random state */ - void shuffle(CRandom * rand) + template + void shuffle(RandomGenerator& prng) { for (index_t i=0; i<=current_num_elements-1; ++i) - CMath::swap(array[i], array[rand->random(i, current_num_elements-1)]); + { + std::uniform_int_distribution dist( + i, current_num_elements - 1); + CMath::swap(array[i], array[dist(prng)]); + } } /** set array with a constant */ diff --git a/src/shogun/base/SGObject.cpp b/src/shogun/base/SGObject.cpp index aa4ffce7ed1..9a73e4a50bc 100644 --- a/src/shogun/base/SGObject.cpp +++ b/src/shogun/base/SGObject.cpp @@ -510,7 +510,6 @@ void CSGObject::init() m_parameters = new Parameter(); m_model_selection_parameters = new Parameter(); m_gradient_parameters=new Parameter(); - m_rng = std::unique_ptr(new CRandom()); m_generic = PT_NOT_GENERIC; m_load_pre_called = false; m_load_post_called = false; diff --git a/src/shogun/base/SGObject.h b/src/shogun/base/SGObject.h index 9df5c8770a6..649c0b7b913 100644 --- a/src/shogun/base/SGObject.h +++ b/src/shogun/base/SGObject.h @@ -23,6 +23,7 @@ #include #include +#include #include #include @@ -39,7 +40,6 @@ class Parallel; class Parameter; class CSerializableFile; class ParameterObserverInterface; -class CRandom; template class CMap; @@ -743,10 +743,6 @@ class CSGObject /** Hash of parameter values*/ uint32_t m_hash; -protected: - /** random generator */ - std::unique_ptr m_rng; - private: EPrimitiveType m_generic; diff --git a/src/shogun/base/init.h b/src/shogun/base/init.h index ba4529a33ad..6aaccdbb6c4 100644 --- a/src/shogun/base/init.h +++ b/src/shogun/base/init.h @@ -21,7 +21,6 @@ namespace shogun class CMath; class Version; class Parallel; - class CRandom; class SGLinalg; class CSignal; extern uint32_t sg_random_seed; @@ -140,7 +139,7 @@ namespace shogun */ uint32_t generate_seed(); - template + template T get_prng() { return T(sg_random_seed); diff --git a/src/shogun/classifier/svm/GNPPLib.cpp b/src/shogun/classifier/svm/GNPPLib.cpp index d3715a6ee8d..7befd439cc6 100644 --- a/src/shogun/classifier/svm/GNPPLib.cpp +++ b/src/shogun/classifier/svm/GNPPLib.cpp @@ -7,7 +7,6 @@ #include #include #include -#include #include #include diff --git a/src/shogun/classifier/svm/LibLinear.cpp b/src/shogun/classifier/svm/LibLinear.cpp index 94a0db644fe..2289f07dad6 100644 --- a/src/shogun/classifier/svm/LibLinear.cpp +++ b/src/shogun/classifier/svm/LibLinear.cpp @@ -325,7 +325,8 @@ void CLibLinear::solve_l2r_l1l2_svc( auto pb = progress(range(10)); CTime start_time; - while (iter < get_max_iterations() && !cancel_computation()) + auto prng = get_prng(); + while (iter < max_iterations && !cancel_computation()) { if (m_max_train_time > 0 && start_time.cur_time_diff() > m_max_train_time) @@ -336,7 +337,9 @@ void CLibLinear::solve_l2r_l1l2_svc( for (i = 0; i < active_size; i++) { - int j = m_rng->random(i, active_size - 1); + std::uniform_int_distribution uniform_int_dist( + i, active_size - 1); + int j = uniform_int_dist(prng); CMath::swap(index[i], index[j]); } @@ -538,7 +541,8 @@ void CLibLinear::solve_l1r_l2_svc( auto pb = progress(range(10)); CTime start_time; - while (iter < get_max_iterations() && !cancel_computation()) + auto prng = get_prng(); + while (iter < max_iterations && !cancel_computation()) { if (m_max_train_time > 0 && start_time.cur_time_diff() > m_max_train_time) @@ -548,7 +552,9 @@ void CLibLinear::solve_l1r_l2_svc( for (j = 0; j < active_size; j++) { - int i = m_rng->random(j, active_size - 1); + std::uniform_int_distribution uniform_int_dist( + j, active_size - 1); + int i = uniform_int_dist(prng); CMath::swap(index[i], index[j]); } @@ -912,7 +918,12 @@ void CLibLinear::solve_l1r_lr( auto pb = progress(range(10)); CTime start_time; +<<<<<<< HEAD while (iter < get_max_iterations() && !cancel_computation()) +======= + auto prng = get_prng(); + while (iter < max_iterations && !cancel_computation()) +>>>>>>> use get_prng and move to C-11 { if (m_max_train_time > 0 && start_time.cur_time_diff() > m_max_train_time) @@ -922,7 +933,9 @@ void CLibLinear::solve_l1r_lr( for (j = 0; j < active_size; j++) { - int i = m_rng->random(j, active_size - 1); + std::uniform_int_distribution uniform_int_dist( + j, active_size - 1); + int i = uniform_int_dist(prng); CMath::swap(index[i], index[j]); } @@ -1267,11 +1280,13 @@ void CLibLinear::solve_l2r_lr_dual( } auto pb = progress(range(10)); + auto prng = get_prng(); while (iter < max_iter) { for (i = 0; i < l; i++) { - int j = m_rng->random(i, l - 1); + std::uniform_int_distribution uniform_int_dist(i, l - 1); + int j = uniform_int_dist(prng); CMath::swap(index[i], index[j]); } int newton_iter = 0; diff --git a/src/shogun/clustering/GMM.cpp b/src/shogun/clustering/GMM.cpp index 9891ad4d412..d41d46fdc3b 100644 --- a/src/shogun/clustering/GMM.cpp +++ b/src/shogun/clustering/GMM.cpp @@ -22,6 +22,7 @@ using namespace std; CGMM::CGMM() : CDistribution(), m_components(), m_coefficients() { + m_rng = get_prng(); register_params(); } @@ -36,7 +37,7 @@ CGMM::CGMM(int32_t n, ECovType cov_type) : CDistribution(), m_components(), m_co SG_REF(m_components[i]); m_components[i]->set_cov_type(cov_type); } - + m_rng = get_prng(); register_params(); } @@ -82,7 +83,7 @@ CGMM::CGMM(vector components, SGVector coefficients, bool m_coefficients[i]=coefficients[i]; } } - + m_rng = get_prng(); register_params(); } @@ -402,14 +403,14 @@ void CGMM::partial_em(int32_t comp1, int32_t comp2, int32_t comp3, float64_t min linalg::add(temp_mean_result, temp_mean, temp_mean_result, alpha1, alpha2); components[1]->set_mean(temp_mean_result); + std::normal_distribution normal_dist(0, 1); + auto prng = get_prng(); for (int32_t i=0; iget_mean().vector[i] = - components[0]->get_mean().vector[i] + - m_rng->std_normal_distrib() * noise_mag; + components[0]->get_mean().vector[i] + normal_dist(prng) * noise_mag; components[0]->get_mean().vector[i] = - components[0]->get_mean().vector[i] + - m_rng->std_normal_distrib() * noise_mag; + components[0]->get_mean().vector[i] + normal_dist(prng) * noise_mag; } coefficients.vector[1]=coefficients.vector[1]+coefficients.vector[2]; @@ -793,7 +794,9 @@ SGVector CGMM::sample() { REQUIRE(m_components.size()>0, "Number of mixture components is %d but " "must be positive\n", m_components.size()); - float64_t rand_num = m_rng->random(float64_t(0), float64_t(1)); + + std::normal_distribution normal_dist(0, 1); + float64_t rand_num = normal_dist(m_rng); float64_t cum_sum=0; for (int32_t i=0; i #include +#include #include namespace shogun @@ -242,6 +243,8 @@ class CGMM : public CDistribution std::vector m_components; /** Mixture coefficients */ SGVector m_coefficients; + + std::mt19937_64 m_rng; }; } #endif //_GMM_H__ diff --git a/src/shogun/clustering/KMeans.cpp b/src/shogun/clustering/KMeans.cpp index 387105c8f65..6d61b99682d 100644 --- a/src/shogun/clustering/KMeans.cpp +++ b/src/shogun/clustering/KMeans.cpp @@ -11,7 +11,6 @@ #include #include #include -#include #include using namespace Eigen; diff --git a/src/shogun/clustering/KMeansBase.cpp b/src/shogun/clustering/KMeansBase.cpp index 94bca347ef1..73b98bd8c3a 100644 --- a/src/shogun/clustering/KMeansBase.cpp +++ b/src/shogun/clustering/KMeansBase.cpp @@ -10,7 +10,6 @@ #include #include #include -#include #include using namespace shogun; @@ -268,9 +267,11 @@ SGMatrix CKMeansBase::kmeanspp() centers.zero(); SGVector min_dist=SGVector(lhs_size); min_dist.zero(); - + auto prng = get_prng(); + std::uniform_int_distribution uniform_int_dist( + (index_t)0, lhs_size - 1); /* First center is chosen at random */ - int32_t mu = m_rng->random((int32_t)0, lhs_size - 1); + int32_t mu = uniform_int_dist(prng); SGVector mu_first=lhs->get_feature_vector(mu); for(int32_t j=0; j CKMeansBase::kmeanspp() #endif //HAVE_LINALG int32_t n_rands=2 + int32_t(CMath::log(k)); + std::uniform_real_distribution dist_prob(0.0, 1.0); /* Choose centers with weighted probability */ for(int32_t i=1; i CKMeansBase::kmeanspp() float64_t temp_dist=0.0; SGVector temp_min_dist = SGVector(lhs_size); int32_t new_center = 0; - float64_t prob = m_rng->random(0.0, 1.0); - prob=prob*sum; + float64_t prob = dist_prob(prng); + prob = prob * sum; for(int32_t j=0; j #include #include -#include #ifdef _WIN32 #undef far @@ -128,12 +127,14 @@ SGVector CKMeansMiniBatch::mbchoose_rand(int32_t b, int32_t num) { SGVector chosen=SGVector(num); SGVector ret=SGVector(b); - auto rng = std::unique_ptr(new CRandom()); + + std::uniform_int_distribution uniform_int_dist(0, num - 1); chosen.zero(); int32_t ch=0; + auto prng = get_prng(); while (chrandom(0, num - 1); + const int32_t n = uniform_int_dist(prng); if (chosen[n]==0) { chosen[n]+=1; diff --git a/src/shogun/converter/ica/FastICA.cpp b/src/shogun/converter/ica/FastICA.cpp index e8a3f05bb25..490c587fb20 100644 --- a/src/shogun/converter/ica/FastICA.cpp +++ b/src/shogun/converter/ica/FastICA.cpp @@ -112,15 +112,16 @@ CFeatures* CFastICA::apply(CFeatures* features) WX = EX; } + std::normal_distribution normal_dist(0, 1); // Initial mixing matrix estimate if (m_mixing_matrix.num_rows != m || m_mixing_matrix.num_cols != m) { m_mixing_matrix = SGMatrix(m,m); - + auto prng = get_prng(); for (int i = 0; i < m; i++) { for (int j = 0; j < m; j++) - m_mixing_matrix(i, j) = m_rng->std_normal_distrib(); + m_mixing_matrix(i, j) = normal_dist(prng); } } diff --git a/src/shogun/distributions/Gaussian.cpp b/src/shogun/distributions/Gaussian.cpp index e1a04118787..8325a5eb271 100644 --- a/src/shogun/distributions/Gaussian.cpp +++ b/src/shogun/distributions/Gaussian.cpp @@ -44,6 +44,7 @@ CGaussian::CGaussian( void CGaussian::init() { m_constant=CMath::log(2*M_PI)*m_mean.vlen; + m_rng = get_prng(); switch (m_cov_type) { case FULL: @@ -406,9 +407,9 @@ SGVector CGaussian::sample() } SGVector random_vec(m_mean.vlen); - + std::normal_distribution dist(0, 1); for (int32_t i = 0; i < m_mean.vlen; i++) - random_vec.vector[i] = m_rng->std_normal_distrib(); + random_vec.vector[i] = dist(m_rng); if (m_cov_type == FULL) { diff --git a/src/shogun/distributions/Gaussian.h b/src/shogun/distributions/Gaussian.h index 54b5bdad0d1..ff702624f46 100644 --- a/src/shogun/distributions/Gaussian.h +++ b/src/shogun/distributions/Gaussian.h @@ -240,6 +240,8 @@ class CGaussian : public CDistribution SGVector m_mean; /** covariance type */ ECovType m_cov_type; + + std::mt19937_64 m_rng; }; } #endif //_GAUSSIAN_H__ diff --git a/src/shogun/distributions/HMM.cpp b/src/shogun/distributions/HMM.cpp index 5c5b993d87b..55941826b24 100644 --- a/src/shogun/distributions/HMM.cpp +++ b/src/shogun/distributions/HMM.cpp @@ -21,9 +21,8 @@ #define VAL_MACRO \ [&]() { \ - return log( \ - (default_value == 0) ? (m_rng->random(MIN_RAND, MAX_RAND)) \ - : default_value); \ + std::uniform_real_distribution dist(MIN_RAND, MAX_RAND); \ + return log((default_value == 0) ? (dist(prng)) : default_value); \ } #define ARRAY_SIZE 65336 @@ -2448,13 +2447,15 @@ void CHMM::init_model_random() float64_t sum; int32_t i,j; + auto prng = get_prng(); + std::uniform_real_distribution dist(MIN_RAND, 1.0); //initialize a with random values for (i=0; irandom(MIN_RAND, 1.0)); + set_a(i, j, dist(prng)); sum+=get_a(i,j); } @@ -2467,7 +2468,7 @@ void CHMM::init_model_random() sum=0; for (i=0; irandom(MIN_RAND, 1.0)); + set_p(i, dist(prng)); sum+=get_p(i); } @@ -2479,7 +2480,7 @@ void CHMM::init_model_random() sum=0; for (i=0; irandom(MIN_RAND, 1.0)); + set_q(i, dist(prng)); sum+=get_q(i); } @@ -2493,7 +2494,7 @@ void CHMM::init_model_random() sum=0; for (j=0; jrandom(MIN_RAND, 1.0)); + set_b(i, j, dist(prng)); sum+=get_b(i,j); } @@ -2531,11 +2532,12 @@ void CHMM::init_model_defined() for (j=0; j dist(MIN_RAND, 1.0); //initialize a values that have to be learned float64_t *R=SG_MALLOC(float64_t, N); for (r = 0; r < N; r++) - R[r] = m_rng->random(MIN_RAND, 1.0); + R[r] = dist(prng); i=0; sum=0; k=i; j=model->get_learn_a(i,0); while (model->get_learn_a(i,0)!=-1 || krandom(MIN_RAND, 1.0); + R[r] = dist(prng); } } SG_FREE(R); R=NULL ; @@ -2565,7 +2567,7 @@ void CHMM::init_model_defined() //initialize b values that have to be learned R=SG_MALLOC(float64_t, M); for (r = 0; r < M; r++) - R[r] = m_rng->random(MIN_RAND, 1.0); + R[r] = dist(prng); i=0; sum=0; k=0 ; j=model->get_learn_b(i,0); while (model->get_learn_b(i,0)!=-1 || krandom(MIN_RAND, 1.0); + R[r] = dist(prng); } } SG_FREE(R); R=NULL ; @@ -2634,7 +2636,7 @@ void CHMM::init_model_defined() sum=0; while (model->get_learn_p(i)!=-1) { - set_p(model->get_learn_p(i), m_rng->random(MIN_RAND, 1.0)); + set_p(model->get_learn_p(i), dist(prng)); sum+=get_p(model->get_learn_p(i)) ; i++ ; } ; @@ -2650,7 +2652,7 @@ void CHMM::init_model_defined() sum=0; while (model->get_learn_q(i)!=-1) { - set_q(model->get_learn_q(i), m_rng->random(MIN_RAND, 1.0)); + set_q(model->get_learn_q(i), dist(prng)); sum+=get_q(model->get_learn_q(i)) ; i++ ; } ; @@ -5091,7 +5093,7 @@ void CHMM::add_states(int32_t num_states, float64_t default_value) for (j=0; j CGaussianDistribution::sample(int32_t num_samples, - SGMatrix pre_samples) const +SGMatrix CGaussianDistribution::sample( + int32_t num_samples, SGMatrix pre_samples) { REQUIRE(num_samples>0, "Number of samples (%d) must be positive\n", num_samples); @@ -85,10 +85,11 @@ SGMatrix CGaussianDistribution::sample(int32_t num_samples, } else { + std::normal_distribution normal_dist(0, 1); /* allocate memory and sample from std normal */ samples=SGMatrix(m_dimension, num_samples); for (index_t i=0; istd_normal_distrib(); + samples.matrix[i] = normal_dist(m_rng); } /* map into desired Gaussian covariance */ @@ -164,6 +165,7 @@ SGVector CGaussianDistribution::log_pdf_multiple(SGMatrix void CGaussianDistribution::init() { + m_rng = get_prng(); SG_ADD(&m_mean, "mean", "Mean of the Gaussian.", MS_NOT_AVAILABLE); SG_ADD(&m_L, "L", "Lower factor of covariance matrix, " "depending on the factorization type.", MS_NOT_AVAILABLE); diff --git a/src/shogun/distributions/classical/GaussianDistribution.h b/src/shogun/distributions/classical/GaussianDistribution.h index 2370d56f9b6..5239027b10f 100644 --- a/src/shogun/distributions/classical/GaussianDistribution.h +++ b/src/shogun/distributions/classical/GaussianDistribution.h @@ -33,9 +33,9 @@ #ifndef GAUSSIANDISTRIBUTION_H #define GAUSSIANDISTRIBUTION_H +#include #include - #include #include #include @@ -86,8 +86,9 @@ class CGaussianDistribution: public CProbabilityDistribution * will be modified. * @return matrix with samples (column vectors) */ - virtual SGMatrix sample(int32_t num_samples, - SGMatrix pre_samples=SGMatrix()) const; + virtual SGMatrix sample( + int32_t num_samples, + SGMatrix pre_samples = SGMatrix()); /** Computes the log-pdf for all provided samples. That is * @@ -139,6 +140,8 @@ class CGaussianDistribution: public CProbabilityDistribution /** Lower factor of covariance matrix (depends on factorization type). * Covariance (approximation) is given by \f$\Sigma=LL^T\f$ */ SGMatrix m_L; + + std::mt19937_64 m_rng; }; } diff --git a/src/shogun/distributions/classical/ProbabilityDistribution.cpp b/src/shogun/distributions/classical/ProbabilityDistribution.cpp index 72607694dfc..8fd2fb6c0c7 100644 --- a/src/shogun/distributions/classical/ProbabilityDistribution.cpp +++ b/src/shogun/distributions/classical/ProbabilityDistribution.cpp @@ -33,14 +33,14 @@ CProbabilityDistribution::~CProbabilityDistribution() } -SGMatrix CProbabilityDistribution::sample(int32_t num_samples, - SGMatrix pre_samples) const +SGMatrix CProbabilityDistribution::sample( + int32_t num_samples, SGMatrix pre_samples) { SG_ERROR("Not implemented in sub-class\n"); return SGMatrix(); } -SGVector CProbabilityDistribution::sample() const +SGVector CProbabilityDistribution::sample() { SGMatrix s=sample(1); SGVector result(m_dimension); diff --git a/src/shogun/distributions/classical/ProbabilityDistribution.h b/src/shogun/distributions/classical/ProbabilityDistribution.h index 114b09dac93..c6d32090a1e 100644 --- a/src/shogun/distributions/classical/ProbabilityDistribution.h +++ b/src/shogun/distributions/classical/ProbabilityDistribution.h @@ -42,15 +42,16 @@ class CProbabilityDistribution: public CSGObject * CGaussianDistribution. For reproducible results. Ignored by default. * @return matrix with samples (column vectors) */ - virtual SGMatrix sample(int32_t num_samples, - SGMatrix pre_samples=SGMatrix()) const; + virtual SGMatrix sample( + int32_t num_samples, + SGMatrix pre_samples = SGMatrix()); /** Samples from the distribution once. Wrapper method. No pre-sample * passing is possible with this method. * * @return vector with single sample */ - virtual SGVector sample() const; + virtual SGVector sample(); /** Computes the log-pdf for all provided samples * diff --git a/src/shogun/evaluation/CrossValidationSplitting.cpp b/src/shogun/evaluation/CrossValidationSplitting.cpp index 4a8df09c14a..974f7147bc1 100644 --- a/src/shogun/evaluation/CrossValidationSplitting.cpp +++ b/src/shogun/evaluation/CrossValidationSplitting.cpp @@ -30,7 +30,8 @@ void CCrossValidationSplitting::build_subsets() /* permute indices */ SGVector indices(m_labels->get_num_labels()); indices.range_fill(); - CMath::permute(indices, m_rng.get()); + auto prng = get_prng(); + CMath::permute(indices, prng); index_t num_subsets=m_subset_indices->get_num_elements(); @@ -55,5 +56,5 @@ void CCrossValidationSplitting::build_subsets() /* finally shuffle to avoid that subsets with low indices have more * elements, which happens if the number of class labels is not equal to * the number of subsets (external random state important for threads) */ - m_subset_indices->shuffle(m_rng.get()); + m_subset_indices->shuffle(prng); } diff --git a/src/shogun/evaluation/StratifiedCrossValidationSplitting.cpp b/src/shogun/evaluation/StratifiedCrossValidationSplitting.cpp index df9bd93a2e8..7f05b7d5225 100644 --- a/src/shogun/evaluation/StratifiedCrossValidationSplitting.cpp +++ b/src/shogun/evaluation/StratifiedCrossValidationSplitting.cpp @@ -72,7 +72,7 @@ void CStratifiedCrossValidationSplitting::build_subsets() /* ensure that subsets are empty and set flag to filled */ reset_subsets(); m_is_filled=true; - + auto prng = get_prng(); SGVector unique_labels; if (m_labels->get_label_type() == LT_MULTICLASS) @@ -118,7 +118,7 @@ void CStratifiedCrossValidationSplitting::build_subsets() label_indices.get_element(i); // external random state important for threads - current->shuffle(m_rng.get()); + current->shuffle(prng); SG_UNREF(current); } @@ -146,5 +146,5 @@ void CStratifiedCrossValidationSplitting::build_subsets() /* finally shuffle to avoid that subsets with low indices have more * elements, which happens if the number of class labels is not equal to * the number of subsets (external random state important for threads) */ - m_subset_indices->shuffle(m_rng.get()); + m_subset_indices->shuffle(prng); } diff --git a/src/shogun/features/DataGenerator.cpp b/src/shogun/features/DataGenerator.cpp index 08e040951e8..f3e966e1587 100644 --- a/src/shogun/features/DataGenerator.cpp +++ b/src/shogun/features/DataGenerator.cpp @@ -31,7 +31,8 @@ SGMatrix CDataGenerator::generate_checkboard_data(int32_t num_classes int32_t dim, int32_t num_points, float64_t overlap) { int32_t points_per_class = num_points / num_classes; - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::uniform_real_distribution uniform_real_dist(0.0, 1.0); int32_t grid_size = (int32_t ) CMath::ceil(CMath::sqrt((float64_t ) num_classes)); float64_t cell_size = (float64_t ) 1 / grid_size; @@ -53,12 +54,13 @@ SGMatrix CDataGenerator::generate_checkboard_data(int32_t num_classes { do { - points(i, p) = m_rng->normal_random( + std::normal_distribution normal_dist( class_dim_centers[i], cell_size * 0.5); + points(i, p) = normal_dist(prng); if ((points(i, p)>(grid_idx[i]+1)*cell_size) || (points(i, p)random(0.0, 1.0) < overlap)) + if (!(uniform_real_dist(prng) < overlap)) continue; } break; @@ -86,13 +88,14 @@ SGMatrix CDataGenerator::generate_mean_data(index_t m, /* evtl. allocate space */ SGMatrix result=SGMatrix::get_allocated_matrix( dim, 2*m, target); - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::normal_distribution normal_dist(0, 1); /* fill matrix with normal data */ for (index_t i=0; i<2*m; ++i) { for (index_t j=0; jstd_normal_distrib(); + result(j, i) = normal_dist(prng); /* mean shift for second half */ if (i>=m) @@ -108,7 +111,9 @@ SGMatrix CDataGenerator::generate_sym_mix_gauss(index_t m, /* evtl. allocate space */ SGMatrix result=SGMatrix::get_allocated_matrix( 2, m, target); - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::normal_distribution normal_dist(0, 1); + std::uniform_int_distribution uniform_int_dist(0, 1); /* rotation matrix */ SGMatrix rot=SGMatrix(2,2); rot(0, 0)=CMath::cos(angle); @@ -120,10 +125,8 @@ SGMatrix CDataGenerator::generate_sym_mix_gauss(index_t m, * Gaussians */ for (index_t i=0; istd_normal_distrib() + (m_rng->random(0, 1) ? d : -d); - result(1, i) = - m_rng->std_normal_distrib() + (m_rng->random(0, 1) ? d : -d); + result(0, i) = normal_dist(prng) + (uniform_int_dist(prng) ? d : -d); + result(1, i) = normal_dist(prng) + (uniform_int_dist(prng) ? d : -d); } /* rotate result */ @@ -140,6 +143,7 @@ SGMatrix CDataGenerator::generate_gaussians(index_t m, index_t n, ind SGMatrix::get_allocated_matrix(dim, n*m); float64_t grid_distance = 5.0; + auto prng = get_prng(); for (index_t i = 0; i < n; ++i) { SGVector mean(dim); diff --git a/src/shogun/features/RandomFourierDotFeatures.cpp b/src/shogun/features/RandomFourierDotFeatures.cpp index 046102bc32c..6286f0c03ed 100644 --- a/src/shogun/features/RandomFourierDotFeatures.cpp +++ b/src/shogun/features/RandomFourierDotFeatures.cpp @@ -80,17 +80,21 @@ float64_t CRandomFourierDotFeatures::post_dot(float64_t dot_result, index_t par_ SGVector CRandomFourierDotFeatures::generate_random_parameter_vector() { + std::uniform_real_distribution uniform_real_dist( + 0.0, 2 * CMath::PI); + std::normal_distribution normal_dist(0, 1); SGVector vec(feats->get_dim_feature_space()+1); + auto prng = get_prng(); switch (kernel) { case GAUSSIAN: for (index_t i=0; inormal_random(0.0, 1); + CMath::sqrt(2.0) * uniform_real_dist(prng); } - vec[vec.vlen - 1] = m_rng->random(0.0, 2 * CMath::PI); + vec[vec.vlen - 1] = normal_dist(prng); break; default: diff --git a/src/shogun/features/streaming/generators/GaussianBlobsDataGenerator.cpp b/src/shogun/features/streaming/generators/GaussianBlobsDataGenerator.cpp index d69c3f41190..23f04216931 100644 --- a/src/shogun/features/streaming/generators/GaussianBlobsDataGenerator.cpp +++ b/src/shogun/features/streaming/generators/GaussianBlobsDataGenerator.cpp @@ -70,6 +70,7 @@ void CGaussianBlobsDataGenerator::init() m_stretch=1; m_angle=0; m_cholesky=SGMatrix(2, 2); + m_rng = get_prng(); m_cholesky(0, 0)=1; m_cholesky(0, 1)=0; m_cholesky(1, 0)=0; @@ -85,13 +86,16 @@ bool CGaussianBlobsDataGenerator::get_next_example() /* allocate space */ SGVector result=SGVector(2); + std::uniform_int_distribution uniform_int_dist( + 0, m_sqrt_num_blobs - 1); + std::normal_distribution normal_dist(0, 1); /* sample latent distribution to compute offsets */ - index_t x_offset = m_rng->random(0, m_sqrt_num_blobs - 1) * m_distance; - index_t y_offset = m_rng->random(0, m_sqrt_num_blobs - 1) * m_distance; + index_t x_offset = uniform_int_dist(m_rng) * m_distance; + index_t y_offset = uniform_int_dist(m_rng) * m_distance; /* sample from std Gaussian */ - float64_t x = m_rng->std_normal_distrib(); - float64_t y = m_rng->std_normal_distrib(); + float64_t x = normal_dist(m_rng); + float64_t y = normal_dist(m_rng); /* transform through cholesky and add offset */ result[0]=m_cholesky(0, 0)*x+m_cholesky(0, 1)*y+x_offset; diff --git a/src/shogun/features/streaming/generators/GaussianBlobsDataGenerator.h b/src/shogun/features/streaming/generators/GaussianBlobsDataGenerator.h index bf71ef7867a..b167714fdeb 100644 --- a/src/shogun/features/streaming/generators/GaussianBlobsDataGenerator.h +++ b/src/shogun/features/streaming/generators/GaussianBlobsDataGenerator.h @@ -83,6 +83,8 @@ class CGaussianBlobsDataGenerator: public CStreamingDenseFeatures /** Cholesky factor of covariance matrix of single Gaussians. Stored to * increase sampling performance */ SGMatrix m_cholesky; + + std::mt19937_64 m_rng; }; } diff --git a/src/shogun/features/streaming/generators/MeanShiftDataGenerator.cpp b/src/shogun/features/streaming/generators/MeanShiftDataGenerator.cpp index 4759c74fa26..ecf1452a386 100644 --- a/src/shogun/features/streaming/generators/MeanShiftDataGenerator.cpp +++ b/src/shogun/features/streaming/generators/MeanShiftDataGenerator.cpp @@ -47,10 +47,10 @@ void CMeanShiftDataGenerator::init() SG_ADD(&m_dimension_shift, "m_dimension_shift", "Dimension of mean shift", MS_NOT_AVAILABLE); + m_rng = get_prng(); m_dimension=0; m_mean_shift=0; m_dimension_shift=0; - unset_generic(); } @@ -61,9 +61,10 @@ bool CMeanShiftDataGenerator::get_next_example() /* allocate space */ SGVector result=SGVector(m_dimension); + std::normal_distribution normal_dist(0, 1); /* fill with std normal data */ for (index_t i=0; istd_normal_distrib(); + result[i] = normal_dist(m_rng); /* mean shift in selected dimension */ result[m_dimension_shift]+=m_mean_shift; diff --git a/src/shogun/features/streaming/generators/MeanShiftDataGenerator.h b/src/shogun/features/streaming/generators/MeanShiftDataGenerator.h index 87e3f73ddac..c91c12dd2c5 100644 --- a/src/shogun/features/streaming/generators/MeanShiftDataGenerator.h +++ b/src/shogun/features/streaming/generators/MeanShiftDataGenerator.h @@ -80,6 +80,8 @@ class CMeanShiftDataGenerator: public CStreamingDenseFeatures /** Dimension that is shifted */ index_t m_dimension_shift; + + std::mt19937_64 m_rng; }; } diff --git a/src/shogun/kernel/PyramidChi2.cpp b/src/shogun/kernel/PyramidChi2.cpp index bc18c49b06f..a7ccdb069ca 100644 --- a/src/shogun/kernel/PyramidChi2.cpp +++ b/src/shogun/kernel/PyramidChi2.cpp @@ -132,7 +132,7 @@ float64_t CPyramidChi2::compute(int32_t idx_a, int32_t idx_b) int32_t dims=alen/num_cells; - + auto prng = get_prng(); if(width<=0) { if(width_computation_type >0) @@ -154,10 +154,13 @@ float64_t CPyramidChi2::compute(int32_t idx_a, int32_t idx_b) if (num_randfeats_forwidthcomputation >0) { for(int32_t i=0; i< numind;++i) - featindices[i] = m_rng->random( + { + std::uniform_int_distribution dist( 0, ((CDenseFeatures*)lhs)->get_num_vectors() - 1); + featindices[i] = dist(prng); + } } else { diff --git a/src/shogun/lib/DynamicArray.h b/src/shogun/lib/DynamicArray.h index 29fdc01bd1e..8bc75beb174 100644 --- a/src/shogun/lib/DynamicArray.h +++ b/src/shogun/lib/DynamicArray.h @@ -541,7 +541,11 @@ template class CDynamicArray :public CSGObject inline void shuffle() { m_array.shuffle(); } /** shuffles the array with external random state */ - inline void shuffle(CRandom * rand) { m_array.shuffle(rand); } + template + inline void shuffle(RandomGenerator& rand) + { + m_array.shuffle(rand); + } /** display this array */ inline void display_array() diff --git a/src/shogun/lib/DynamicObjectArray.h b/src/shogun/lib/DynamicObjectArray.h index 4f2020310ff..d5be1f0b977 100644 --- a/src/shogun/lib/DynamicObjectArray.h +++ b/src/shogun/lib/DynamicObjectArray.h @@ -402,7 +402,11 @@ class CDynamicObjectArray : public CSGObject inline void shuffle() { m_array.shuffle(); } /** shuffles the array with external random state */ - inline void shuffle(CRandom * rand) { m_array.shuffle(rand); } + template + inline void shuffle(RandomGenerator& rand) + { + m_array.shuffle(rand); + } /** @return object name */ virtual const char* get_name() const diff --git a/src/shogun/lib/SGVector.cpp b/src/shogun/lib/SGVector.cpp index 7eac2a90d09..53742a3871d 100644 --- a/src/shogun/lib/SGVector.cpp +++ b/src/shogun/lib/SGVector.cpp @@ -644,9 +644,10 @@ void SGVector::vec1_plus_scalar_times_vec2(float32_t* vec1, template void SGVector::random_vector(T* vec, int32_t len, T min_value, T max_value) { - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::uniform_int_distribution dist(min_value, max_value); for (int32_t i=0; irandom(min_value, max_value); + vec[i] = dist(prng); } template <> diff --git a/src/shogun/lib/SGVector.h b/src/shogun/lib/SGVector.h index db33ba20c85..e18c9c82b71 100644 --- a/src/shogun/lib/SGVector.h +++ b/src/shogun/lib/SGVector.h @@ -33,7 +33,6 @@ namespace shogun template class SGSparseVector; template class SGMatrix; class CFile; - class CRandom; /** @brief shogun vector */ template class SGVector : public SGReferencedData diff --git a/src/shogun/lib/tapkee/tapkee_shogun.cpp b/src/shogun/lib/tapkee/tapkee_shogun.cpp index b5d750704ed..69bcc4a9836 100644 --- a/src/shogun/lib/tapkee/tapkee_shogun.cpp +++ b/src/shogun/lib/tapkee/tapkee_shogun.cpp @@ -7,22 +7,24 @@ #define CUSTOM_UNIFORM_RANDOM_INDEX_FUNCTION \ []() -> uint64_t { \ - auto rng = std::unique_ptr(new CRandom()); \ - return rng->random_64(); \ + auto prng = get_prng(); \ + return prng(); \ } #define CUSTOM_UNIFORM_RANDOM_FUNCTION \ - []() { \ - auto rng = std::unique_ptr(new CRandom()); \ - return rng->random( \ + []() -> float64_t { \ + auto prng = get_prng(); \ + std::uniform_real_distribution dist( \ static_cast(0), \ static_cast(1)); \ + return dist(prng); \ } #define CUSTOM_GAUSSIAN_RANDOM_FUNCTION \ - []() { \ - auto rng = std::unique_ptr(new CRandom()); \ - return rng->normal_random( \ + []() -> float64_t { \ + auto prng = get_prng(); \ + std::normal_distribution dist( \ static_cast(0), \ static_cast(1)); \ + return dist(prng); \ } #define TAPKEE_EIGEN_INCLUDE_FILE diff --git a/src/shogun/machine/BaggingMachine.cpp b/src/shogun/machine/BaggingMachine.cpp index 33e3841669c..05a4d308d64 100644 --- a/src/shogun/machine/BaggingMachine.cpp +++ b/src/shogun/machine/BaggingMachine.cpp @@ -149,6 +149,7 @@ bool CBaggingMachine::train_machine(CFeatures* data) { REQUIRE(m_machine != NULL, "Machine is not set!"); REQUIRE(m_num_bags > 0, "Number of bag is not set!"); + auto prng = get_prng(); if (data) { @@ -172,10 +173,12 @@ bool CBaggingMachine::train_machine(CFeatures* data) SG_UNREF(m_oob_indices); m_oob_indices = new CDynamicObjectArray(); - SGMatrix rnd_indicies(m_bag_size, m_num_bags); for (index_t i = 0; i < m_num_bags*m_bag_size; ++i) - rnd_indicies.matrix[i] = m_rng->random(0, m_bag_size - 1); + { + std::uniform_int_distribution dist(0, m_bag_size - 1); + rnd_indicies.matrix[i] = dist(prng); + } #pragma omp parallel for for (int32_t i = 0; i < m_num_bags; ++i) diff --git a/src/shogun/machine/gp/EPInferenceMethod.cpp b/src/shogun/machine/gp/EPInferenceMethod.cpp index e76a1b9ad3f..cdd2d3384fc 100644 --- a/src/shogun/machine/gp/EPInferenceMethod.cpp +++ b/src/shogun/machine/gp/EPInferenceMethod.cpp @@ -230,7 +230,7 @@ void CEPInferenceMethod::update() float64_t nlZ_old=CMath::INFTY; uint32_t sweep=0; - auto prng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); while ((CMath::abs(m_nlZ-nlZ_old)>m_tol && sweep mean=get_posterior_mean(); - CGaussianDistribution* post_approx=new CGaussianDistribution(mean, cov); SGMatrix samples=post_approx->sample(num_importance_samples); diff --git a/src/shogun/mathematics/Math.h b/src/shogun/mathematics/Math.h index fa863040328..4692ab77631 100644 --- a/src/shogun/mathematics/Math.h +++ b/src/shogun/mathematics/Math.h @@ -12,12 +12,13 @@ #define __MATHEMATICS_H_ #include +#include #include +#include #include #include #include #include -#include #ifndef _USE_MATH_DEFINES #define _USE_MATH_DEFINES @@ -810,19 +811,28 @@ class CMath : public CSGObject return 0 == a ? b : a; } - template - static void permute(SGVector v, CRandom* rand = NULL) + template < + class T, + class RandomGenerator = std::uniform_int_distribution> + static void permute(SGVector v) { - if (rand) + auto prng = get_prng(); + for (index_t i = 0; i < v.vlen; ++i) { - for (index_t i = 0; i < v.vlen; ++i) - swap(v[i], v[rand->random(i, v.vlen - 1)]); + RandomGenerator dist(i, v.vlen - 1); + swap(v[i], v[dist(prng)]); } - else + } + + template < + class T, class RandomGenerator, + class Distribution = std::uniform_int_distribution> + static void permute(SGVector v, RandomGenerator prng) + { + for (index_t i = 0; i < v.vlen; ++i) { - auto m_rng = std::unique_ptr(new CRandom()); - for (index_t i = 0; i < v.vlen; ++i) - swap(v[i], v[m_rng->random(i, v.vlen - 1)]); + Distribution dist(i, v.vlen - 1); + swap(v[i], v[dist(prng)]); } } diff --git a/src/shogun/mathematics/Random.cpp b/src/shogun/mathematics/Random.cpp deleted file mode 100644 index 91541129acc..00000000000 --- a/src/shogun/mathematics/Random.cpp +++ /dev/null @@ -1,375 +0,0 @@ -/* - * This software is distributed under BSD 3-clause license (see LICENSE file). - * - * Authors: Viktor Gal, Björn Esser, Thoralf Klein, Heiko Strathmann, - * Soeren Sonnenburg - */ -#ifdef _WIN32 -#define _CRT_RAND_S -#include -#endif - -#include -#include -#include -#include -#include -#include -#include - -#ifdef DEV_RANDOM -#include -#endif - -using namespace shogun; - -CRandom::CRandom() - : m_sfmt_32(NULL), - m_sfmt_64(NULL), - m_dsfmt(NULL) -{ - m_seed = sg_random_seed; - init(); -} - -CRandom::CRandom(uint32_t seed) - : m_seed(seed), - m_sfmt_32(NULL), - m_sfmt_64(NULL), - m_dsfmt(NULL) -{ - init(); -} - -CRandom::~CRandom() -{ - SG_FREE(m_x); - SG_FREE(m_y); - SG_FREE(m_xComp); - SG_FREE(m_sfmt_32); - SG_FREE(m_sfmt_64); - SG_FREE(m_dsfmt); -} - -void CRandom::set_seed(uint32_t seed) -{ - reinit(seed); -} - -uint32_t CRandom::get_seed() const -{ - return m_seed; -} - -void CRandom::init() -{ - /** init ziggurat variables */ - m_blockCount = 128; - m_R = 3.442619855899; - m_A = 9.91256303526217e-3; - m_uint32ToU = 1.0 / (float64_t)std::numeric_limits::max(); - - m_x = SG_MALLOC(float64_t, m_blockCount + 1); - m_y = SG_MALLOC(float64_t, m_blockCount); - m_xComp = SG_MALLOC(uint32_t, m_blockCount); - - // Initialise rectangle position data. - // m_x[i] and m_y[i] describe the top-right position ox Box i. - - // Determine top right position of the base rectangle/box (the rectangle with the Gaussian tale attached). - // We call this Box 0 or B0 for short. - // Note. x[0] also describes the right-hand edge of B1. (See diagram). - m_x[0] = m_R; - m_y[0] = GaussianPdfDenorm(m_R); - - // The next box (B1) has a right hand X edge the same as B0. - // Note. B1's height is the box area divided by its width, hence B1 has a smaller height than B0 because - // B0's total area includes the attached distribution tail. - m_x[1] = m_R; - m_y[1] = m_y[0] + (m_A / m_x[1]); - - // Calc positions of all remaining rectangles. - for(int i=2; i < m_blockCount; i++) - { - m_x[i] = GaussianPdfDenormInv(m_y[i-1]); - m_y[i] = m_y[i-1] + (m_A / m_x[i]); - } - - // For completeness we define the right-hand edge of a notional box 6 as being zero (a box with no area). - m_x[m_blockCount] = 0.0; - - // Useful precomputed values. - m_A_div_y0 = m_A / m_y[0]; - - // Special case for base box. m_xComp[0] stores the area of B0 as a proportion of R - // (recalling that all segments have area A, but that the base segment is the combination of B0 and the distribution tail). - // Thus -m_xComp[0] is the probability that a sample point is within the box part of the segment. - m_xComp[0] = (uint32_t)(((m_R * m_y[0]) / m_A) * (float64_t)std::numeric_limits::max()); - - for(int32_t i=1; i < m_blockCount-1; i++) - { - m_xComp[i] = (uint32_t)((m_x[i+1] / m_x[i]) * (float64_t)std::numeric_limits::max()); - } - m_xComp[m_blockCount-1] = 0; // Shown for completeness. - - // Sanity check. Test that the top edge of the topmost rectangle is at y=1.0. - // Note. We expect there to be a tiny drift away from 1.0 due to the inexactness of floating - // point arithmetic. - ASSERT(CMath::abs(1.0 - m_y[m_blockCount-1]) < 1e-10); - - /** init SFMT and dSFMT */ - m_sfmt_32 = SG_MALLOC(sfmt_t, 1); - m_sfmt_64 = SG_MALLOC(sfmt_t, 1); - m_dsfmt = SG_MALLOC(dsfmt_t, 1); - reinit(m_seed); -} - -uint32_t CRandom::random_32() const -{ - m_state_lock.lock(); - uint32_t v = sfmt_genrand_uint32(m_sfmt_32); - m_state_lock.unlock(); - return v; -} - -uint64_t CRandom::random_64() const -{ - m_state_lock.lock(); - uint64_t v = sfmt_genrand_uint64(m_sfmt_64); - m_state_lock.unlock(); - return v; -} - -void CRandom::fill_array(uint32_t* array, int32_t size) const -{ -#if defined(USE_ALIGNED_MEMORY) || defined(DARWIN) - if ((size >= sfmt_get_min_array_size32(m_sfmt_32)) && (size % 4) == 0) - { - m_state_lock.lock(); - sfmt_fill_array32(m_sfmt_32, array, size); - m_state_lock.unlock(); - return; - } -#endif - for (int32_t i=0; i < size; i++) - array[i] = random_32(); -} - -void CRandom::fill_array(uint64_t* array, int32_t size) const -{ -#if defined(USE_ALIGNED_MEMORY) || defined(DARWIN) - if ((size >= sfmt_get_min_array_size64(m_sfmt_64)) && (size % 2) == 0) - { - m_state_lock.lock(); - sfmt_fill_array64(m_sfmt_64, array, size); - m_state_lock.unlock(); - return; - } -#endif - for (int32_t i=0; i < size; i++) - array[i] = random_64(); -} - -void CRandom::fill_array_oc(float64_t* array, int32_t size) const -{ - m_state_lock.lock(); -#if defined(USE_ALIGNED_MEMORY) || defined(DARWIN) - if ((size >= dsfmt_get_min_array_size()) && (size % 2) == 0) - { - dsfmt_fill_array_open_close(m_dsfmt, array, size); - m_state_lock.unlock(); - return; - } -#endif - for (int32_t i=0; i < size; i++) - array[i] = dsfmt_genrand_open_close(m_dsfmt); - m_state_lock.unlock(); -} - -void CRandom::fill_array_co(float64_t* array, int32_t size) const -{ - m_state_lock.lock(); -#if defined(USE_ALIGNED_MEMORY) || defined(DARWIN) - if ((size >= dsfmt_get_min_array_size()) && (size % 2) == 0) - { - dsfmt_fill_array_close_open(m_dsfmt, array, size); - m_state_lock.unlock(); - return; - } -#endif - for (int32_t i=0; i < size; i++) - array[i] = dsfmt_genrand_close_open(m_dsfmt); - m_state_lock.unlock(); -} - -void CRandom::fill_array_oo(float64_t* array, int32_t size) const -{ - m_state_lock.lock(); -#if defined(USE_ALIGNED_MEMORY) || defined(DARWIN) - if ((size >= dsfmt_get_min_array_size()) && (size % 2) == 0) - { - dsfmt_fill_array_open_open(m_dsfmt, array, size); - m_state_lock.unlock(); - return; - } -#endif - for (int32_t i=0; i < size; i++) - array[i] = dsfmt_genrand_open_open(m_dsfmt); - m_state_lock.unlock(); -} - -void CRandom::fill_array_c1o2(float64_t* array, int32_t size) const -{ - m_state_lock.lock(); -#if defined(USE_ALIGNED_MEMORY) || defined(DARWIN) - if ((size >= dsfmt_get_min_array_size()) && (size % 2) == 0) - { - dsfmt_fill_array_close1_open2(m_dsfmt, array, size); - m_state_lock.unlock(); - return; - } -#endif - for (int32_t i=0; i < size; i++) - array[i] = dsfmt_genrand_close1_open2(m_dsfmt); - m_state_lock.unlock(); -} - -float64_t CRandom::random_close() const -{ - m_state_lock.lock(); - float64_t v = sfmt_genrand_real1(m_sfmt_32); - m_state_lock.unlock(); - return v; -} - -float64_t CRandom::random_open() const -{ - m_state_lock.lock(); - float64_t v = dsfmt_genrand_open_open(m_dsfmt); - m_state_lock.unlock(); - return v; -} - -float64_t CRandom::random_half_open() const -{ - m_state_lock.lock(); - float64_t v = dsfmt_genrand_close_open(m_dsfmt); - m_state_lock.unlock(); - return v; -} - -float64_t CRandom::normal_distrib(float64_t mu, float64_t sigma) const -{ - return mu + (std_normal_distrib() * sigma); -} - -float64_t CRandom::std_normal_distrib() const -{ - for (;;) - { - // Select box at random. - uint8_t u = random_32(); - int32_t i = (int32_t)(u & 0x7F); - float64_t sign = ((u & 0x80) == 0) ? -1.0 : 1.0; - - // Generate uniform random value with range [0,0xffffffff]. - uint32_t u2 = random_32(); - - // Special case for the base segment. - if(0 == i) - { - if(u2 < m_xComp[0]) - { - // Generated x is within R0. - return u2 * m_uint32ToU * m_A_div_y0 * sign; - } - // Generated x is in the tail of the distribution. - return sample_tail() * sign; - } - - // All other segments. - if(u2 < m_xComp[i]) - { // Generated x is within the rectangle. - return u2 * m_uint32ToU * m_x[i] * sign; - } - - // Generated x is outside of the rectangle. - // Generate a random y coordinate and test if our (x,y) is within the distribution curve. - // This execution path is relatively slow/expensive (makes a call to Math.Exp()) but relatively rarely executed, - // although more often than the 'tail' path (above). - float64_t x = u2 * m_uint32ToU * m_x[i]; - if(m_y[i-1] + ((m_y[i] - m_y[i-1]) * random_half_open()) < GaussianPdfDenorm(x) ) { - return x * sign; - } - } -} - -float64_t CRandom::sample_tail() const -{ - float64_t x, y; - float64_t m_R_reciprocal = 1.0 / m_R; - do - { - x = -CMath::log(random_half_open()) * m_R_reciprocal; - y = -CMath::log(random_half_open()); - } while(y+y < x*x); - return m_R + x; -} - -float64_t CRandom::GaussianPdfDenorm(float64_t x) const -{ - return CMath::exp(-(x*x * 0.5)); -} - -float64_t CRandom::GaussianPdfDenormInv(float64_t y) const -{ - // Operates over the y range (0,1], which happens to be the y range of the pdf, - // with the exception that it does not include y=0, but we would never call with - // y=0 so it doesn't matter. Remember that a Gaussian effectively has a tail going - // off into x == infinity, hence asking what is x when y=0 is an invalid question - // in the context of this class. - return CMath::sqrt(-2.0 * CMath::log(y)); -} - -void CRandom::reinit(uint32_t seed) -{ - m_state_lock.lock(); - m_seed = seed; - sfmt_init_gen_rand(m_sfmt_32, m_seed); - sfmt_init_gen_rand(m_sfmt_64, m_seed); - dsfmt_init_gen_rand(m_dsfmt, m_seed); - m_state_lock.unlock(); -} - -float32_t CRandom::normal_random(float32_t mean, float32_t std_dev) -{ - // sets up variables & makes sure rand_s.range == (0,1) - float32_t ret; - float32_t rand_u; - float32_t rand_v; - float32_t rand_s; - do - { - rand_u = static_cast(random(-1.0, 1.0)); - rand_v = static_cast(random(-1.0, 1.0)); - rand_s = rand_u * rand_u + rand_v * rand_v; - } while ((rand_s == 0) || (rand_s >= 1)); - - // the meat & potatos, and then the mean & standard deviation - // shifting... - ret = static_cast( - rand_u * CMath::sqrt(-2.0 * CMath::log(rand_s) / rand_s)); - ret = std_dev * ret + mean; - return ret; -} - -float64_t CRandom::normal_random(float64_t mean, float64_t std_dev) -{ - float64_t result = normal_distrib(mean, std_dev); - return result; -} - -float32_t CRandom::randn_float() -{ - return static_cast(normal_random(0.0, 1.0)); -} diff --git a/src/shogun/mathematics/Random.h b/src/shogun/mathematics/Random.h deleted file mode 100644 index 00a911adcf6..00000000000 --- a/src/shogun/mathematics/Random.h +++ /dev/null @@ -1,369 +0,0 @@ -/* - * This software is distributed under BSD 3-clause license (see LICENSE file). - * - * Authors: Viktor Gal, Soeren Sonnenburg, Heiko Strathmann, Yuyu Zhang, - * Björn Esser - */ - -#ifndef __RANDOM_H__ -#define __RANDOM_H__ - -#include - -#include -#include -#include -#include - -/* opaque pointers */ -struct SFMT_T; -struct DSFMT_T; - -namespace shogun -{ - extern uint32_t sg_random_seed; - class CLock; - class CMath; - /** @brief: Pseudo random number geneartor - * - * It is based on SIMD oriented Fast Mersenne Twister(SFMT) pseudorandom - * number generator. - * - * */ - class CRandom - { - public: - /** default ctor */ - CRandom(); - - /** ctor - * @param seed the seed for the PRNG - */ - CRandom(uint32_t seed); - - /** dtor */ - virtual ~CRandom(); - - /** set seed - * - * @param seed seed for PRNG - */ - void set_seed(uint32_t seed); - - /** get seed - * - * @return seed - */ - uint32_t get_seed() const; - - /** - * Generate an unsigned 32-bit random integer - * - * @return the random 32-bit unsigned integer - */ - uint32_t random_32() const; - - /** - * Generate an unsigned 64-bit random integer - * - * @return the random 64-bit unsigned integer - */ - uint64_t random_64() const; - - /** - * Generate a signed 32-bit random integer - * - * @return the random 32-bit signed integer - */ - inline int32_t random_s32() const - { - return random_32() & ((uint32_t(-1)<<1)>>1); - } - - /** - * Generate a signed 64-bit random integer - * - * @return the random 64-bit signed integer - */ - inline int64_t random_s64() const - { - return random_64() & ((uint64_t(-1)<<1)>>1); - } - - /** generate an unsigned 64bit integer in the range - * [min_value, max_value] (closed interval!) - * - * @param min_value minimum value - * @param max_value maximum value - * @return random number - */ - inline uint64_t random(uint64_t min_value, uint64_t max_value) - { - return min_value + random_64() % (max_value-min_value+1); - } - - /** generate an signed 64bit integer in the range - * [min_value, max_value] (closed interval!) - * - * @param min_value minimum value - * @param max_value maximum value - * @return random number - */ - inline int64_t random(int64_t min_value, int64_t max_value) - { - return min_value + random_s64() % (max_value-min_value+1); - } - - /** generate an unsigned signed 32bit integer in the range - * [min_value, max_value] (closed interval!) - * - * @param min_value minimum value - * @param max_value maximum value - * @return random number - */ - inline uint32_t random(uint32_t min_value, uint32_t max_value) - { - return min_value + random_32() % (max_value-min_value+1); - } - - /** generate an signed 32bit integer in the range - * [min_value, max_value] (closed interval!) - * - * @param min_value minimum value - * @param max_value maximum value - * @return random number - */ - inline int32_t random(int32_t min_value, int32_t max_value) - { - return min_value + random_s32() % (max_value-min_value+1); - } - - /** generate an 32bit floating point number in the range - * [min_value, max_value] (closed interval!) - * - * @param min_value minimum value - * @param max_value maximum value - * @return random number - */ - inline float32_t random(float32_t min_value, float32_t max_value) - { - return min_value + ((max_value-min_value) * static_cast(random_close())); - } - - /** generate an 64bit floating point number in the range - * [min_value, max_value] (closed interval!) - * - * @param min_value minimum value - * @param max_value maximum value - * @return random number - */ - inline float64_t random(float64_t min_value, float64_t max_value) - { - return min_value + ((max_value-min_value) * random_close()); - } - - /** generate an 96-128bit floating point number (depending on the - * size of floatmax_t) in the range - * [min_value, max_value] (closed interval!) - * - * @param min_value minimum value - * @param max_value maximum value - * @return random number - */ - inline floatmax_t random(floatmax_t min_value, floatmax_t max_value) - { - return min_value + ((max_value-min_value) * random_close()); - } - - /** - * Fill an array of unsinged 32 bit integer - * - * @param array 32-bit unsigened int array to be filled - * @param size size of the array - */ - void fill_array(uint32_t* array, int32_t size) const; - - /** - * Fill an array of unsinged 64 bit integer - * - * @param array 64-bit unsigened int array to be filled - * @param size size of the array - */ - void fill_array(uint64_t* array, int32_t size) const; - - /** - * Fills an array of float64_t with randoms - * from the (0,1] interval - * - * @param array - * @param size - */ - void fill_array_oc(float64_t* array, int32_t size) const; - - /** - * Fills an array of float64_t with randoms - * from the [0,1) interval - * - * @param array - * @param size - */ - void fill_array_co(float64_t* array, int32_t size) const; - - /** - * Fills an array of float64_t with randoms - * from the (0,1) interval - * - * @param array - * @param size - */ - void fill_array_oo(float64_t* array, int32_t size) const; - - /** - * Fills an array of float64_t with randoms - * from the [1,2) interval - * - * @param array - * @param size - */ - - void fill_array_c1o2(float64_t* array, int32_t size) const; - - /** - * Get random - * @return a float64_t random from [0,1] interval - */ - float64_t random_close() const; - - /** - * Get random - * @return a float64_t random from (0,1) interval - */ - float64_t random_open() const; - - /** - * Get random - * - * @return a float64_t random from [0,1) interval - */ - float64_t random_half_open() const; - - /** - * Sample a normal distrbution. - * Using Ziggurat algorithm - * - * @param mu mean - * @param sigma variance - * @return sample from the desired normal distrib - */ - float64_t normal_distrib(float64_t mu, float64_t sigma) const; - - /** - * Sample a standard normal distribution, - * i.e. mean = 0, var = 1.0 - * - * @return sample from the std normal distrib - */ - float64_t std_normal_distrib() const; - - /** - *Returns a Gaussian or Normal random number. - *Using the polar form of the Box-Muller transform. - *http://en.wikipedia.org/wiki/Box%E2%80%93Muller_transform#Polar_form - */ - float32_t normal_random(float32_t mean, float32_t std_dev); - - /** - *Returns a Gaussian or Normal random number. - *Using the polar form of the Box-Muller transform. - *http://en.wikipedia.org/wiki/Box%E2%80%93Muller_transform#Polar_form - */ - float64_t normal_random(float64_t mean, float64_t std_dev); - - /* - *Convenience method for generating Standard Normal random numbers - *Float: Mean = 0 and Standard Deviation = 1 - */ - float32_t randn_float(); - - /** - * Generate a seed for PRNG - * - * @return entropy for PRNG - */ - static uint32_t generate_seed(); - - virtual const char* get_name() const - { - return "Random"; - } - - private: - /** initialise the object */ - void init(); - - /** reinit PRNG - * - * @param seed seed for the PRNG - */ - void reinit(uint32_t seed); - - /** - * Sample from the distribution tail (defined as having x >= R). - * - * @return - */ - float64_t sample_tail() const; - - /** - * Gaussian probability density function, denormailised, that is, y = e^-(x^2/2). - */ - float64_t GaussianPdfDenorm(float64_t x) const; - - /** - * Inverse function of GaussianPdfDenorm(x) - */ - float64_t GaussianPdfDenormInv(float64_t y) const; - - /** seed */ - uint32_t m_seed; - - /** SFMT struct for 32-bit random */ - SFMT_T* m_sfmt_32; - - /** SFMT struct for 64-bit random */ - SFMT_T* m_sfmt_64; - - /** dSFMT struct */ - DSFMT_T* m_dsfmt; - - /** Number of blocks */ - int32_t m_blockCount; //= 128; - - /** Right hand x coord of the base rectangle, thus also the left hand x coord of the tail */ - float64_t m_R;//= 3.442619855899; - - /** Area of each rectangle (pre-determined/computed for 128 blocks). */ - float64_t m_A;// = 9.91256303526217e-3; - - /** Scale factor for converting a UInt with range [0,0xffffffff] to a double with range [0,1]. */ - float64_t m_uint32ToU;// = 1.0 / (float64_t)UINT32_MAX; - - /** Area A divided by the height of B0 */ - float64_t m_A_div_y0; - - /** top-right position ox rectangle i */ - float64_t* m_x; - float64_t* m_y; - - /** The proprtion of each segment that is entirely within the distribution, expressed as uint where - a value of 0 indicates 0% and uint.MaxValue 100%. Expressing this as an integer allows some floating - points operations to be replaced with integer ones. - */ - uint32_t* m_xComp; - - /** state lock */ - mutable CLock m_state_lock; - }; -} - -#endif /* __RANDOM_H__ */ diff --git a/src/shogun/mathematics/Statistics.cpp b/src/shogun/mathematics/Statistics.cpp index 3b59223e2e6..79573da6fda 100644 --- a/src/shogun/mathematics/Statistics.cpp +++ b/src/shogun/mathematics/Statistics.cpp @@ -323,7 +323,7 @@ SGVector CStatistics::sample_indices(int32_t sample_size, int32_t N) int32_t* idxs=SG_MALLOC(int32_t,N); int32_t i, rnd; int32_t* permuted_idxs=SG_MALLOC(int32_t,sample_size); - auto rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); // reservoir sampling for (i=0; i CStatistics::sample_indices(int32_t sample_size, int32_t N) permuted_idxs[i]=idxs[i]; for (i=sample_size; irandom(1, i); + std::uniform_int_distribution uniform_int_dist(1, i); + rnd = uniform_int_dist(prng); if (rnd CStatistics::sample_from_gaussian(SGVector mean, int32_t dim=mean.vlen; Map mu(mean.vector, mean.vlen); Map c(cov.matrix, cov.num_rows, cov.num_cols); - auto rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::normal_distribution normal_dist(0, 1); // generate samples, z, from N(0, I), DxN SGMatrix S(dim, N); for( int32_t j=0; jstd_normal_distrib(); + S(i, j) = normal_dist(prng); // the cholesky factorization c=L*U MatrixXd U=c.llt().matrixU(); @@ -773,7 +775,8 @@ SGMatrix CStatistics::sample_from_gaussian(SGVector mean, typedef SparseMatrix MatrixType; const MatrixType &c=EigenSparseUtil::toEigenSparse(cov); - auto rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::normal_distribution normal_dist(0, 1); SimplicialLLT llt; @@ -781,7 +784,7 @@ SGMatrix CStatistics::sample_from_gaussian(SGVector mean, SGMatrix S(dim, N); for( int32_t j=0; jstd_normal_distrib(); + S(i, j) = normal_dist(prng); Map s(S.matrix, S.num_rows, S.num_cols); diff --git a/src/shogun/mathematics/ajd/QDiag.cpp b/src/shogun/mathematics/ajd/QDiag.cpp index 38ace2d5568..dd88762a108 100644 --- a/src/shogun/mathematics/ajd/QDiag.cpp +++ b/src/shogun/mathematics/ajd/QDiag.cpp @@ -16,7 +16,8 @@ SGMatrix CQDiag::diagonalize(SGNDArray C, SGMatrix V; - auto rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::normal_distribution dist(0, 1); if (V0.num_rows == N && V0.num_cols == N) { V = V0.clone(); @@ -28,7 +29,7 @@ SGMatrix CQDiag::diagonalize(SGNDArray C, SGMatrixstd_normal_distrib(); + V(i, j) = dist(prng); } } diff --git a/src/shogun/mathematics/linalg/ratapprox/tracesampler/NormalSampler.cpp b/src/shogun/mathematics/linalg/ratapprox/tracesampler/NormalSampler.cpp index 1cc61738302..35c0c49605c 100644 --- a/src/shogun/mathematics/linalg/ratapprox/tracesampler/NormalSampler.cpp +++ b/src/shogun/mathematics/linalg/ratapprox/tracesampler/NormalSampler.cpp @@ -6,7 +6,6 @@ #include #include -#include #include namespace shogun @@ -34,13 +33,14 @@ void CNormalSampler::precompute() m_num_samples=1; } -SGVector CNormalSampler::sample(index_t idx) const +SGVector CNormalSampler::sample(index_t idx) { // ignore idx since it doesnt matter, all samples are independent SGVector s(m_dimension); + std::normal_distribution dist(0, 1); for (index_t i=0; istd_normal_distrib(); + s[i] = dist(m_rng); return s; } diff --git a/src/shogun/mathematics/linalg/ratapprox/tracesampler/NormalSampler.h b/src/shogun/mathematics/linalg/ratapprox/tracesampler/NormalSampler.h index 9d6b1105fcb..9d6396f3cf1 100644 --- a/src/shogun/mathematics/linalg/ratapprox/tracesampler/NormalSampler.h +++ b/src/shogun/mathematics/linalg/ratapprox/tracesampler/NormalSampler.h @@ -34,7 +34,7 @@ class CNormalSampler : public CTraceSampler * @param idx the index (this is effectively ignored) * @return the sample vector */ - virtual SGVector sample(index_t idx) const; + virtual SGVector sample(index_t idx); /** precompute method that sets the num_samples of the base */ virtual void precompute(); diff --git a/src/shogun/mathematics/linalg/ratapprox/tracesampler/ProbingSampler.cpp b/src/shogun/mathematics/linalg/ratapprox/tracesampler/ProbingSampler.cpp index ee6459f3371..400d7441994 100644 --- a/src/shogun/mathematics/linalg/ratapprox/tracesampler/ProbingSampler.cpp +++ b/src/shogun/mathematics/linalg/ratapprox/tracesampler/ProbingSampler.cpp @@ -16,7 +16,6 @@ #include #include #include -#include #include #include @@ -186,7 +185,7 @@ void CProbingSampler::precompute() SG_DEBUG("Leaving\n"); } -SGVector CProbingSampler::sample(index_t idx) const +SGVector CProbingSampler::sample(index_t idx) { REQUIRE(idx CProbingSampler::sample(index_t idx) const SGVector s(m_dimension); s.set_const(0.0); + std::normal_distribution dist(0, 1); for (index_t i=0; istd_normal_distrib(); + float64_t x = dist(m_rng); s[i]=(x>0)-(x<0); } } diff --git a/src/shogun/mathematics/linalg/ratapprox/tracesampler/ProbingSampler.h b/src/shogun/mathematics/linalg/ratapprox/tracesampler/ProbingSampler.h index 4d4116594bd..58ff70782da 100644 --- a/src/shogun/mathematics/linalg/ratapprox/tracesampler/ProbingSampler.h +++ b/src/shogun/mathematics/linalg/ratapprox/tracesampler/ProbingSampler.h @@ -86,7 +86,7 @@ class CProbingSampler : public CTraceSampler * @param idx the index * @return the sample vector */ - virtual SGVector sample(index_t idx) const; + virtual SGVector sample(index_t idx); /** precompute method that sets the num_samples of the base */ virtual void precompute(); diff --git a/src/shogun/mathematics/linalg/ratapprox/tracesampler/TraceSampler.h b/src/shogun/mathematics/linalg/ratapprox/tracesampler/TraceSampler.h index 2731f1a1d44..e78a0265c62 100644 --- a/src/shogun/mathematics/linalg/ratapprox/tracesampler/TraceSampler.h +++ b/src/shogun/mathematics/linalg/ratapprox/tracesampler/TraceSampler.h @@ -56,7 +56,7 @@ class CTraceSampler : public CSGObject * @param idx the index which determines which sample to draw * @return the sample vector */ - virtual SGVector sample(index_t idx) const = 0; + virtual SGVector sample(index_t idx) = 0; /** * abstract method for initializing the sampler, number of samples etc, @@ -89,12 +89,15 @@ class CTraceSampler : public CSGObject /** the number of samples this sampler will generate, set by implementation */ index_t m_num_samples; + std::mt19937_64 m_rng; + private: /** initialize with default values and register params */ void init() { m_num_samples=0; m_dimension=0; + m_rng = get_prng(); SG_ADD(&m_num_samples, "num_samples", "Number of samples this sampler can generate", MS_NOT_AVAILABLE); diff --git a/src/shogun/modelselection/ModelSelectionParameters.cpp b/src/shogun/modelselection/ModelSelectionParameters.cpp index e69f52347cb..ae58390155f 100644 --- a/src/shogun/modelselection/ModelSelectionParameters.cpp +++ b/src/shogun/modelselection/ModelSelectionParameters.cpp @@ -191,15 +191,17 @@ void CModelSelectionParameters::build_values(EMSParamType value_type, void* min, CParameterCombination* CModelSelectionParameters::get_single_combination( bool is_rand) { + auto prng = get_prng(); /* If this is a value node, then randomly pick a value from the built * range */ if (m_values) { - + std::uniform_int_distribution normal_init_dist( + 0, m_values_length - 1); index_t i = 0; if (is_rand) - i = m_rng->random(0, m_values_length - 1); + i = normal_init_dist(prng); Parameter* p=new Parameter(); @@ -212,7 +214,7 @@ CParameterCombination* CModelSelectionParameters::get_single_combination( for (index_t j = 0; j < param_vect->vlen; j++) { if (is_rand) - i = m_rng->random(0, m_values_length - 1); + i = normal_init_dist(prng); (*param_vect)[j] = ((float64_t*)m_values)[i]; } p->add(param_vect, m_node_name); @@ -225,7 +227,7 @@ CParameterCombination* CModelSelectionParameters::get_single_combination( for (index_t j = 0; j < *m_vector_length; j++) { if (is_rand) - i = m_rng->random(0, m_values_length - 1); + i = normal_init_dist(prng); (param_vect)[j] = ((float64_t*)m_values)[i]; } p->add_vector(¶m_vect, m_vector_length, m_node_name); @@ -240,7 +242,7 @@ CParameterCombination* CModelSelectionParameters::get_single_combination( for (index_t j = 0; j < param_vect->vlen; j++) { if (is_rand) - i = m_rng->random(0, m_values_length - 1); + i = normal_init_dist(prng); (*param_vect)[j] = ((int32_t*)m_values)[i]; } p->add(param_vect, m_node_name); @@ -253,7 +255,7 @@ CParameterCombination* CModelSelectionParameters::get_single_combination( for (index_t j = 0; j < *m_vector_length; j++) { if (is_rand) - i = m_rng->random(0, m_values_length - 1); + i = normal_init_dist(prng); (param_vect)[j] = ((int32_t*)m_values)[i]; } p->add_vector(¶m_vect, m_vector_length, m_node_name); diff --git a/src/shogun/multiclass/LaRank.cpp b/src/shogun/multiclass/LaRank.cpp index 744412fe258..c8846efb522 100644 --- a/src/shogun/multiclass/LaRank.cpp +++ b/src/shogun/multiclass/LaRank.cpp @@ -743,6 +743,7 @@ int32_t CLaRank::add (int32_t x_id, int32_t yi) n_pro++; w_pro = 0.05 * coeff + (1 - 0.05) * w_pro; + auto prng = get_prng(); // ProcessOld & Optimize until ready for a new processnew // (Adaptative schedule here) for (;;) @@ -756,7 +757,8 @@ int32_t CLaRank::add (int32_t x_id, int32_t yi) if (w_opt < prop_min) w_opt = prop_min; w_sum = w_pro + w_rep + w_opt; - float64_t r = m_rng->random(0.0, w_sum); + std::uniform_real_distribution dist(0.0, w_sum); + float64_t r = dist(prng); if (r <= w_pro) { break; diff --git a/src/shogun/multiclass/LaRank.h b/src/shogun/multiclass/LaRank.h index 972bfe40b6a..067d6c63d2a 100644 --- a/src/shogun/multiclass/LaRank.h +++ b/src/shogun/multiclass/LaRank.h @@ -249,12 +249,13 @@ namespace shogun LaRankPattern & sample () { - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::uniform_int_distribution dist( + uint32_t(0), uint32_t(patterns.size() - 1)); ASSERT(!empty()) while (true) { - uint32_t r = m_rng->random( - uint32_t(0), uint32_t(patterns.size() - 1)); + uint32_t r = dist(prng); if (patterns[r].exists()) return patterns[r]; } diff --git a/src/shogun/multiclass/ecoc/ECOCDiscriminantEncoder.cpp b/src/shogun/multiclass/ecoc/ECOCDiscriminantEncoder.cpp index 1db5ebc6f58..e2c16f2636e 100644 --- a/src/shogun/multiclass/ecoc/ECOCDiscriminantEncoder.cpp +++ b/src/shogun/multiclass/ecoc/ECOCDiscriminantEncoder.cpp @@ -133,7 +133,9 @@ float64_t CECOCDiscriminantEncoder::sffs_iteration(float64_t MI, vector if (part1.size() <= 1) return MI; - int32_t iclas = m_rng->random(0, int32_t(part1.size() - 1)); + auto prng = get_prng(); + std::uniform_int_distribution dist(0, int32_t(part1.size() - 1)); + int32_t iclas = dist(prng); int32_t clas = part1[iclas]; // move clas from part1 to part2 diff --git a/src/shogun/multiclass/ecoc/ECOCRandomDenseEncoder.cpp b/src/shogun/multiclass/ecoc/ECOCRandomDenseEncoder.cpp index c783afb4f3b..03e81491c99 100644 --- a/src/shogun/multiclass/ecoc/ECOCRandomDenseEncoder.cpp +++ b/src/shogun/multiclass/ecoc/ECOCRandomDenseEncoder.cpp @@ -48,15 +48,18 @@ SGMatrix CECOCRandomDenseEncoder::create_codebook(int32_t num_classes) SGMatrix codebook(codelen, num_classes); int32_t n_iter = 0; - while (true) - { - // fill codebook - codebook.zero(); - for (int32_t i=0; i < codelen; ++i) - { - for (int32_t j=0; j < num_classes; ++j) - { - float64_t randval = m_rng->random(0.0, 1.0); + + auto prng = get_prng(); + std::uniform_real_distribution dist(0.0, 1.0); + while (true) + { + // fill codebook + codebook.zero(); + for (int32_t i = 0; i < codelen; ++i) + { + for (int32_t j = 0; j < num_classes; ++j) + { + float64_t randval = dist(prng); if (randval > m_pposone) codebook(i, j) = -1; else diff --git a/src/shogun/multiclass/ecoc/ECOCRandomSparseEncoder.cpp b/src/shogun/multiclass/ecoc/ECOCRandomSparseEncoder.cpp index 5278ee48aaf..a7d20a48b8b 100644 --- a/src/shogun/multiclass/ecoc/ECOCRandomSparseEncoder.cpp +++ b/src/shogun/multiclass/ecoc/ECOCRandomSparseEncoder.cpp @@ -56,17 +56,19 @@ SGMatrix CECOCRandomSparseEncoder::create_codebook(int32_t num_classes) std::vector random_sel(num_classes); int32_t n_iter = 0; - while (true) - { - // fill codebook - codebook.zero(); - for (int32_t i=0; i < codelen; ++i) - { - // randomly select two positions - for (int32_t j=0; j < num_classes; ++j) - random_sel[j] = j; - std::random_shuffle(random_sel.begin(), random_sel.end()); - if (m_rng->random(0.0, 1.0) > 0.5) + auto prng = get_prng(); + std::uniform_real_distribution dist(0.0, 1.0); + while (true) + { + // fill codebook + codebook.zero(); + for (int32_t i = 0; i < codelen; ++i) + { + // randomly select two positions + for (int32_t j = 0; j < num_classes; ++j) + random_sel[j] = j; + std::random_shuffle(random_sel.begin(), random_sel.end()); + if (dist(prng) > 0.5) { codebook(i, random_sel[0]) = +1; codebook(i, random_sel[1]) = -1; @@ -80,7 +82,7 @@ SGMatrix CECOCRandomSparseEncoder::create_codebook(int32_t num_classes) // assign the remaining positions for (int32_t j = 2; j < num_classes; ++j) { - float64_t randval = m_rng->random(0.0, 1.0); + float64_t randval = dist(prng); if (randval > m_pzero) { if (randval > m_pzero + m_pposone) diff --git a/src/shogun/multiclass/tree/ConditionalProbabilityTree.h b/src/shogun/multiclass/tree/ConditionalProbabilityTree.h index 35ad6d49ca2..ad0ae90f2ba 100644 --- a/src/shogun/multiclass/tree/ConditionalProbabilityTree.h +++ b/src/shogun/multiclass/tree/ConditionalProbabilityTree.h @@ -9,6 +9,7 @@ #define CONDITIONALPROBABILITYTREE_H__ #include +#include #include @@ -34,6 +35,7 @@ class CConditionalProbabilityTree: public CTreeMachine(); } /** destructor */ @@ -134,6 +136,7 @@ class CConditionalProbabilityTree: public CTreeMachine m_leaves; ///< class => leaf mapping CStreamingDenseFeatures *m_feats; ///< online features + std::mt19937_64 m_rng; }; } /* shogun */ diff --git a/src/shogun/multiclass/tree/RandomConditionalProbabilityTree.cpp b/src/shogun/multiclass/tree/RandomConditionalProbabilityTree.cpp index 3dcb041b916..4c7315b73af 100644 --- a/src/shogun/multiclass/tree/RandomConditionalProbabilityTree.cpp +++ b/src/shogun/multiclass/tree/RandomConditionalProbabilityTree.cpp @@ -11,7 +11,8 @@ using namespace shogun; bool CRandomConditionalProbabilityTree::which_subtree(bnode_t *node, SGVector ex) { - if (m_rng->random(0.0, 1.0) > 0.5) + std::uniform_real_distribution dist(0.0, 1.0); + if (dist(m_rng) > 0.5) return true; return false; } diff --git a/src/shogun/neuralnets/DeepBeliefNetwork.cpp b/src/shogun/neuralnets/DeepBeliefNetwork.cpp index 2a3605a1a63..8bb527e9376 100644 --- a/src/shogun/neuralnets/DeepBeliefNetwork.cpp +++ b/src/shogun/neuralnets/DeepBeliefNetwork.cpp @@ -76,7 +76,7 @@ void CDeepBeliefNetwork::initialize_neural_network(float64_t sigma) { m_bias_index_offsets = SGVector(m_num_layers); m_weights_index_offsets = SGVector(m_num_layers-1); - + auto prng = get_prng(); m_num_params = 0; for (int32_t i=0; i dist(0, sigma); m_params = SGVector(m_num_params); for (int32_t i=0; inormal_random(0.0, sigma); + m_params[i] = dist(prng); pt_cd_num_steps = SGVector(m_num_layers-1); pt_cd_num_steps.set_const(1); @@ -350,10 +351,12 @@ CDenseFeatures* CDeepBeliefNetwork::sample( void CDeepBeliefNetwork::reset_chain() { + auto prng = get_prng(); + std::uniform_real_distribution dist(0.0, 1.0); SGMatrix s = m_states[m_num_layers-2]; for (int32_t i=0; irandom(0.0, 1.0) > 0.5; + s[i] = dist(prng) > 0.5; } CNeuralNetwork* CDeepBeliefNetwork::convert_to_neural_network( @@ -394,7 +397,7 @@ void CDeepBeliefNetwork::down_step(int32_t index, SGVector< float64_t > params, { typedef Eigen::Map EMatrix; typedef Eigen::Map EVector; - + auto prng = get_prng(); EMatrix In(input.matrix, input.num_rows, input.num_cols); EMatrix Out(result.matrix, result.num_rows, result.num_cols); EVector B(get_biases(index,params).vector, m_layer_sizes->element(index)); @@ -433,9 +436,10 @@ void CDeepBeliefNetwork::down_step(int32_t index, SGVector< float64_t > params, if (sample_states && index>0) { + std::uniform_real_distribution dist(0.0, 1.0); int32_t len = m_layer_sizes->element(index)*m_batch_size; for (int32_t i=0; irandom(0.0, 1.0) < result[i]; + result[i] = dist(prng) < result[i]; } } @@ -444,7 +448,7 @@ void CDeepBeliefNetwork::up_step(int32_t index, SGVector< float64_t > params, { typedef Eigen::Map EMatrix; typedef Eigen::Map EVector; - + auto prng = get_prng(); EMatrix In(input.matrix, input.num_rows, input.num_cols); EMatrix Out(result.matrix, result.num_rows, result.num_cols); EVector C(get_biases(index, params).vector, m_layer_sizes->element(index)); @@ -464,8 +468,9 @@ void CDeepBeliefNetwork::up_step(int32_t index, SGVector< float64_t > params, if (sample_states && index>0) { + std::uniform_real_distribution dist(0.0, 1.0); for (int32_t i=0; irandom(0.0, 1.0) < result[i]; + result[i] = dist(prng) < result[i]; } } diff --git a/src/shogun/neuralnets/NeuralConvolutionalLayer.cpp b/src/shogun/neuralnets/NeuralConvolutionalLayer.cpp index 0dd103eb131..779f2b24da5 100644 --- a/src/shogun/neuralnets/NeuralConvolutionalLayer.cpp +++ b/src/shogun/neuralnets/NeuralConvolutionalLayer.cpp @@ -128,7 +128,7 @@ void CNeuralConvolutionalLayer::initialize_parameters(SGVector parame { int32_t num_parameters_per_map = 1 + m_input_num_channels*(2*m_radius_x+1)*(2*m_radius_y+1); - + auto prng = get_prng(); for (int32_t m=0; m parame { if (m_initialization_mode == NORMAL) { - map_params[i] = m_rng->normal_random(0.0, sigma); + std::normal_distribution dist(0, sigma); + map_params[i] = dist(prng); // turn off regularization for the bias, on for the rest of the parameters map_param_regularizable[i] = (i != 0); } else // for the case when m_initialization_mode = RE_NORMAL { - map_params[i] = m_rng->normal_random( + std::normal_distribution dist( 0.0, CMath::sqrt( 2.0 / (m_input_height * m_input_width * m_input_num_channels))); + map_params[i] = dist(prng); // initialize b=0 map_param_regularizable[i] = 0; } diff --git a/src/shogun/neuralnets/NeuralInputLayer.cpp b/src/shogun/neuralnets/NeuralInputLayer.cpp index 70e77d27999..644e8c5381e 100644 --- a/src/shogun/neuralnets/NeuralInputLayer.cpp +++ b/src/shogun/neuralnets/NeuralInputLayer.cpp @@ -58,6 +58,7 @@ CNeuralInputLayer::CNeuralInputLayer(int32_t width, int32_t height, void CNeuralInputLayer::compute_activations(SGMatrix< float64_t > inputs) { + auto prng = get_prng(); if (m_start_index == 0) { sg_memcpy(m_activations.matrix, inputs.matrix, @@ -71,9 +72,10 @@ void CNeuralInputLayer::compute_activations(SGMatrix< float64_t > inputs) } if (gaussian_noise > 0) { + std::normal_distribution dist(0.0, gaussian_noise); int32_t len = m_num_neurons*m_batch_size; for (int32_t k=0; knormal_random(0.0, gaussian_noise); + m_activations[k] += dist(prng); } } diff --git a/src/shogun/neuralnets/NeuralLayer.cpp b/src/shogun/neuralnets/NeuralLayer.cpp index ea84fa0ab73..690312dfaf1 100644 --- a/src/shogun/neuralnets/NeuralLayer.cpp +++ b/src/shogun/neuralnets/NeuralLayer.cpp @@ -90,13 +90,14 @@ void CNeuralLayer::set_batch_size(int32_t batch_size) void CNeuralLayer::dropout_activations() { if (dropout_prop==0.0) return; - + auto prng = get_prng(); + std::uniform_real_distribution dist(0.0, 1.0); if (is_training) { int32_t len = m_num_neurons*m_batch_size; for (int32_t i=0; irandom(0.0, 1.0) >= dropout_prop; + m_dropout_mask[i] = dist(prng) >= dropout_prop; m_activations[i] *= m_dropout_mask[i]; } } diff --git a/src/shogun/neuralnets/NeuralLinearLayer.cpp b/src/shogun/neuralnets/NeuralLinearLayer.cpp index 4b3fd070e3e..4d41ffaab0c 100644 --- a/src/shogun/neuralnets/NeuralLinearLayer.cpp +++ b/src/shogun/neuralnets/NeuralLinearLayer.cpp @@ -62,10 +62,12 @@ void CNeuralLinearLayer::initialize_parameters(SGVector parameters, SGVector parameter_regularizable, float64_t sigma) { + auto prng = get_prng(); + std::normal_distribution dist(0, sigma); for (int32_t i=0; inormal_random(0.0, sigma); + parameters[i] = dist(prng); // turn regularization off for the biases, on for the weights parameter_regularizable[i] = (i>=m_num_neurons); diff --git a/src/shogun/neuralnets/NeuralNetwork.cpp b/src/shogun/neuralnets/NeuralNetwork.cpp index 3ff8ce7d6d5..0cd9ea8f00b 100644 --- a/src/shogun/neuralnets/NeuralNetwork.cpp +++ b/src/shogun/neuralnets/NeuralNetwork.cpp @@ -31,12 +31,13 @@ * Written (W) 2014 Khaled Nasr */ -#include -#include -#include +#include #include #include +#include #include +#include +#include using namespace shogun; @@ -559,14 +560,15 @@ float64_t CNeuralNetwork::check_gradients(float64_t approx_epsilon, float64_t s) // some random inputs and ouputs SGMatrix x(m_num_inputs,1); SGMatrix y(get_num_outputs(),1); - + auto prng = get_prng(); + std::uniform_real_distribution dist(0.0, 1.0); for (int32_t i=0; irandom(0.0, 1.0); + x[i] = dist(prng); // the outputs are set up in the form of a probability distribution (in case // that is required by the output layer, i.e softmax) for (int32_t i=0; irandom(0.0, 1.0); + y[i] = dist(prng); float64_t y_sum = SGVector::sum(y.matrix, y.num_rows); for (int32_t i=0; i(m_num_params); + std::normal_distribution dist(0, sigma); for (int32_t i=0; inormal_random(0.0, sigma); + m_params[i] = dist(m_rng); } void CRBM::set_batch_size(int32_t batch_size) @@ -264,9 +265,10 @@ CDenseFeatures< float64_t >* CRBM::sample_group_with_evidence(int32_t V, void CRBM::reset_chain() { + std::uniform_real_distribution dist(0.0, 1.0); for (int32_t i=0; irandom(0.0, 1.0) > 0.5; + visible_state(i, j) = dist(m_rng) > 0.5; } float64_t CRBM::free_energy(SGMatrix< float64_t > visible, SGMatrix< float64_t > buffer) @@ -429,9 +431,10 @@ float64_t CRBM::pseudo_likelihood(SGMatrix< float64_t > visible, if (buffer.num_rows==0) buffer = SGMatrix(m_num_hidden, m_batch_size); + std::uniform_int_distribution dist(0, m_num_visible - 1); SGVector indices(m_batch_size); for (int32_t i=0; irandom(0, m_num_visible - 1); + indices[i] = dist(m_rng); float64_t f1 = free_energy(visible, buffer); @@ -517,9 +520,10 @@ void CRBM::mean_visible(SGMatrix< float64_t > hidden, SGMatrix< float64_t > resu void CRBM::sample_hidden(SGMatrix< float64_t > mean, SGMatrix< float64_t > result) { + std::uniform_real_distribution dist(0.0, 1.0); int32_t length = result.num_rows*result.num_cols; for (int32_t i=0; irandom(0.0, 1.0) < mean[i]; + result[i] = dist(m_rng) < mean[i]; } void CRBM::sample_visible(SGMatrix< float64_t > mean, SGMatrix< float64_t > result) @@ -535,12 +539,12 @@ void CRBM::sample_visible(int32_t index, { int32_t offset = m_visible_state_offsets->element(index); + std::uniform_real_distribution dist(0.0, 1.0); if (m_visible_group_types->element(index)==RBMVUT_BINARY) { for (int32_t i=0; ielement(index); i++) for (int32_t j=0; jrandom(0.0, 1.0) < mean(i + offset, j); + result(i + offset, j) = dist(m_rng) < mean(i + offset, j); } if (m_visible_group_types->element(index)==RBMVUT_SOFTMAX) @@ -551,7 +555,7 @@ void CRBM::sample_visible(int32_t index, for (int32_t j=0; jrandom(0.0, 1.0); + int32_t r = dist(m_rng); float64_t sum = 0; for (int32_t i=0; ielement(index); i++) { @@ -619,6 +623,7 @@ void CRBM::init() m_visible_state_offsets = new CDynamicArray(); m_num_params = 0; m_batch_size = 0; + m_rng = get_prng(); SG_ADD(&cd_num_steps, "cd_num_steps", "Number of CD Steps", MS_NOT_AVAILABLE); SG_ADD(&cd_persistent, "cd_persistent", "Whether to use PCD", MS_NOT_AVAILABLE); diff --git a/src/shogun/neuralnets/RBM.h b/src/shogun/neuralnets/RBM.h index 9b7d1116970..521df370723 100644 --- a/src/shogun/neuralnets/RBM.h +++ b/src/shogun/neuralnets/RBM.h @@ -459,6 +459,8 @@ friend class CDeepBeliefNetwork; /** Parameters */ SGVector m_params; + + std::mt19937_64 m_rng; }; } diff --git a/src/shogun/optimization/liblinear/shogun_liblinear.cpp b/src/shogun/optimization/liblinear/shogun_liblinear.cpp index c00af618d8f..a2789c68795 100644 --- a/src/shogun/optimization/liblinear/shogun_liblinear.cpp +++ b/src/shogun/optimization/liblinear/shogun_liblinear.cpp @@ -516,7 +516,7 @@ void Solver_MCSVM_CS::solve() state->inited = true; } - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); // TODO: replace with the new signal // while(iter < max_iter && !CSignal::cancel_computations()) while (iter < max_iter) @@ -524,7 +524,8 @@ void Solver_MCSVM_CS::solve() double stopping = -CMath::INFTY; for(i=0;irandom(i, active_size - 1); + std::uniform_int_distribution dist(i, active_size - 1); + int j = dist(prng); CMath::swap(index[i], index[j]); } for(s=0;s dist_p((float64_t)0.0, 2 * pi); + std::uniform_real_distribution dist_q( + (float64_t)-1.0, (float64_t)1.0); for (int32_t i = 0; i < cur_dim_feature_space; ++i) { - randomcoeff_additive[i] = m_rng->random((float64_t)0.0, 2 * pi); + randomcoeff_additive[i] = dist_p(prng); } for (int32_t i = 0; i < cur_dim_feature_space; ++i) { @@ -251,8 +255,8 @@ bool CRandomFourierGaussPreproc::init_randomcoefficients() { float64_t s = 2; while ((s >= 1) ) { // Marsaglia polar for gaussian - x1 = m_rng->random((float64_t)-1.0, (float64_t)1.0); - x2 = m_rng->random((float64_t)-1.0, (float64_t)1.0); + x1 = dist_q(prng); + x2 = dist_q(prng); s=x1*x1+x2*x2; } diff --git a/src/shogun/regression/svr/LibLinearRegression.cpp b/src/shogun/regression/svr/LibLinearRegression.cpp index 4a0503ff399..2d172c00236 100644 --- a/src/shogun/regression/svr/LibLinearRegression.cpp +++ b/src/shogun/regression/svr/LibLinearRegression.cpp @@ -205,14 +205,17 @@ void CLibLinearRegression::solve_l2r_l1l2_svr(SGVector& w, const libl } auto pb = progress(range(10)); + while(iter < max_iter) { Gmax_new = 0; Gnorm1_new = 0; + auto prng = get_prng(); for(i=0; irandom(i, active_size - 1); + std::uniform_int_distribution dist(i, active_size - 1); + int j = dist(prng); CMath::swap(index[i], index[j]); } diff --git a/src/shogun/statistical_testing/QuadraticTimeMMD.cpp b/src/shogun/statistical_testing/QuadraticTimeMMD.cpp index 98db8ed1ed2..f675e06b1ac 100644 --- a/src/shogun/statistical_testing/QuadraticTimeMMD.cpp +++ b/src/shogun/statistical_testing/QuadraticTimeMMD.cpp @@ -401,13 +401,15 @@ SGVector CQuadraticTimeMMD::Self::sample_null_spectrum() SGVector null_samples(owner.get_num_null_samples()); + auto prng = get_prng(); + std::normal_distribution dist(0, 1); /* finally, sample from null distribution */ for (auto i=0; istd_normal_distrib(); + float64_t z_j = dist(prng); float64_t multiple=CMath::sq(z_j); /* take largest EV, scale by 1/(m+n) on the fly and take abs value*/ diff --git a/src/shogun/statistical_testing/internals/mmd/CrossValidationMMD.h b/src/shogun/statistical_testing/internals/mmd/CrossValidationMMD.h index 1e9c7e051aa..3686f2f0391 100644 --- a/src/shogun/statistical_testing/internals/mmd/CrossValidationMMD.h +++ b/src/shogun/statistical_testing/internals/mmd/CrossValidationMMD.h @@ -114,11 +114,11 @@ struct CrossValidationMMD : PermutationMMD m_permuted_inds=SGVector(m_xy_inds.size()); m_inverted_permuted_inds.set_const(-1); - auto prng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); for (auto n=0; nadd_subset(m_permuted_inds); SGVector inds=m_stack->get_last_subset()->get_subset_idx(); diff --git a/src/shogun/statistical_testing/internals/mmd/PermutationMMD.h b/src/shogun/statistical_testing/internals/mmd/PermutationMMD.h index ad48ee32f61..9a147da6941 100644 --- a/src/shogun/statistical_testing/internals/mmd/PermutationMMD.h +++ b/src/shogun/statistical_testing/internals/mmd/PermutationMMD.h @@ -200,11 +200,11 @@ struct PermutationMMD : ComputeMMD { ASSERT(m_num_null_samples>0); allocate_permutation_inds(); - auto prng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); for (auto n=0; n &assignment_expect, float64_t &min_energy_expect, int32_t N) { - m_rng->set_seed(17); - + auto prng = get_prng(); + std::uniform_real_distribution dist(0.0, 1.0); // ftype SGVector card(2); card[0] = 2; @@ -129,8 +129,8 @@ CFactorGraph* CFactorGraphDataGenerator::random_chain_graph(SGVector &assig for (int32_t x = 0; x < N; ++x) { SGVector data(2); - data[0] = m_rng->random(0.0, 1.0); - data[1] = m_rng->random(0.0, 1.0); + data[0] = dist(prng); + data[1] = dist(prng); SGVector var_index(1); var_index[0] = y * N + x; @@ -146,10 +146,10 @@ CFactorGraph* CFactorGraphDataGenerator::random_chain_graph(SGVector &assig if (x > 0) { SGVector data(4); - float64_t A = m_rng->random(0.0, 1.0); // E(0,0)->A - float64_t C = m_rng->random(0.0, 1.0); // E(1,0)->C - float64_t B = m_rng->random(0.0, 1.0); // E(0,1)->B - float64_t D = m_rng->random(0.0, 1.0); // E(1,1)->D + float64_t A = dist(prng); // E(0,0)->A + float64_t C = dist(prng); // E(1,0)->C + float64_t B = dist(prng); // E(0,1)->B + float64_t D = dist(prng); // E(1,1)->D // Add truncation to ensure submodularity truncate_energy(A, B, C, D); @@ -169,10 +169,10 @@ CFactorGraph* CFactorGraphDataGenerator::random_chain_graph(SGVector &assig if (x == 0 && y > 0) { SGVector data(4); - float64_t A = m_rng->random(0.0, 1.0); // E(0,0)->A - float64_t C = m_rng->random(0.0, 1.0); // E(1,0)->C - float64_t B = m_rng->random(0.0, 1.0); // E(0,1)->B - float64_t D = m_rng->random(0.0, 1.0); // E(1,1)->D + float64_t A = dist(prng); // E(0,0)->A + float64_t C = dist(prng); // E(1,0)->C + float64_t B = dist(prng); // E(0,1)->B + float64_t D = dist(prng); // E(1,1)->D // Add truncation to ensure submodularity truncate_energy(A, B, C, D); @@ -339,7 +339,7 @@ void CFactorGraphDataGenerator::generate_data(int32_t len_label, int32_t len_fea feats = SGMatrix(len_feat, size_data); labels = SGMatrix(len_label, size_data); - + auto prng = get_prng(); for (int32_t k = 0; k < size_data; k++) { // generate a label vector @@ -351,7 +351,7 @@ void CFactorGraphDataGenerator::generate_data(int32_t len_label, int32_t len_fea // generate feature vector SGVector random_indices(len_feat); random_indices.range_fill(); - CMath::permute(random_indices, m_rng.get()); + CMath::permute(random_indices, prng); SGVector v_feat(len_feat); v_feat.zero(); @@ -490,7 +490,6 @@ float64_t CFactorGraphDataGenerator::test_sosvm(EMAPInferType infer_type) SGMatrix feats_train; // Generate random data - m_rng->set_seed(10); // fix the random seed generate_data(4, 12, 8, feats_train, labels_train); int32_t num_sample_train = labels_train.num_cols; diff --git a/src/shogun/structure/StochasticSOSVM.cpp b/src/shogun/structure/StochasticSOSVM.cpp index 96715a8aba6..c96855c11e4 100644 --- a/src/shogun/structure/StochasticSOSVM.cpp +++ b/src/shogun/structure/StochasticSOSVM.cpp @@ -105,12 +105,14 @@ bool CStochasticSOSVM::train_machine(CFeatures* data) // Main loop int32_t k = 0; + auto prng = get_prng(); for (int32_t pi = 0; pi < m_num_iter; ++pi) { for (int32_t si = 0; si < N; ++si) { + std::uniform_int_distribution dist(0, N - 1); // 1) Picking random example - int32_t i = m_rng->random(0, N - 1); + int32_t i = dist(prng); // 2) solve the loss-augmented inference for point i CResultSet* result = m_model->argmax(m_w, i); diff --git a/src/shogun/structure/TwoStateModel.cpp b/src/shogun/structure/TwoStateModel.cpp index 47ee065bab7..c28ecc562e8 100644 --- a/src/shogun/structure/TwoStateModel.cpp +++ b/src/shogun/structure/TwoStateModel.cpp @@ -265,23 +265,20 @@ CHMSVMModel* CTwoStateModel::simulate_data(int32_t num_exm, int32_t exm_len, SGVector< int32_t > ll(num_exm*exm_len); ll.zero(); int32_t rnb, rl, rp; - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::uniform_real_distribution dist(0.0, 1.0); for ( int32_t i = 0 ; i < num_exm ; ++i) { SGVector< int32_t > lab(exm_len); lab.zero(); rnb = num_blocks[0] + - CMath::ceil( - (num_blocks[1] - num_blocks[0]) * m_rng->random(0.0, 1.0)) - - 1; + CMath::ceil((num_blocks[1] - num_blocks[0]) * dist(prng)) - 1; for ( int32_t j = 0 ; j < rnb ; ++j ) { rl = block_len[0] + - CMath::ceil( - (block_len[1] - block_len[0]) * m_rng->random(0.0, 1.0)) - - 1; - rp = CMath::ceil((exm_len - rl) * m_rng->random(0.0, 1.0)); + CMath::ceil((block_len[1] - block_len[0]) * dist(prng)) - 1; + rp = CMath::ceil((exm_len - rl) * dist(prng)); for ( int32_t idx = rp-1 ; idx < rp+rl ; ++idx ) { @@ -305,11 +302,10 @@ CHMSVMModel* CTwoStateModel::simulate_data(int32_t num_exm, int32_t exm_len, SGMatrix< float64_t > signal(num_features, distort.vlen); distort.range_fill(); - auto prng = std::unique_ptr(new CRandom()); for ( int32_t i = 0 ; i < num_features ; ++i ) { lf = ll; - CMath::permute(distort, prng.get()); + CMath::permute(distort, prng); for ( int32_t j = 0 ; j < d1.vlen ; ++j ) d1[j] = distort[j]; @@ -322,8 +318,11 @@ CHMSVMModel* CTwoStateModel::simulate_data(int32_t num_exm, int32_t exm_len, int32_t idx = i*signal.num_cols; for ( int32_t j = 0 ; j < signal.num_cols ; ++j ) - signal[idx++] = - lf[j] + noise_std * m_rng->normal_random((float64_t)0.0, 1.0); + { + std::normal_distribution dist_signal( + (float64_t)0.0, 1.0); + signal[idx++] = lf[j] + noise_std * dist_signal(prng); + } } // Substitute some features by pure noise @@ -331,8 +330,11 @@ CHMSVMModel* CTwoStateModel::simulate_data(int32_t num_exm, int32_t exm_len, { int32_t idx = i*signal.num_cols; for ( int32_t j = 0 ; j < signal.num_cols ; ++j ) - signal[idx++] = - noise_std * m_rng->normal_random((float64_t)0.0, 1.0); + { + std::normal_distribution dist_signal( + (float64_t)0.0, 1.0); + signal[idx++] = lf[j] + noise_std * dist_signal(prng); + } } CMatrixFeatures< float64_t >* features = diff --git a/src/shogun/transfer/multitask/LibLinearMTL.cpp b/src/shogun/transfer/multitask/LibLinearMTL.cpp index f0851c23a95..2f4eeabf02c 100644 --- a/src/shogun/transfer/multitask/LibLinearMTL.cpp +++ b/src/shogun/transfer/multitask/LibLinearMTL.cpp @@ -251,6 +251,7 @@ void CLibLinearMTL::solve_l2r_l1l2_svc(const liblinear_problem *prob, double eps auto pb = progress(range(10)); CTime start_time; + auto prng = get_prng(); while (iter < max_iterations && !cancel_computation()) { if (m_max_train_time > 0 && start_time.cur_time_diff() > m_max_train_time) @@ -261,7 +262,8 @@ void CLibLinearMTL::solve_l2r_l1l2_svc(const liblinear_problem *prob, double eps for (i=0; irandom(i, active_size - 1); + std::uniform_int_distribution dist(i, active_size - 1); + int j = dist(prng); CMath::swap(index[i], index[j]); } diff --git a/tests/unit/base/SGObject_unittest.cc b/tests/unit/base/SGObject_unittest.cc index cb633d658c1..f6fc80059b2 100644 --- a/tests/unit/base/SGObject_unittest.cc +++ b/tests/unit/base/SGObject_unittest.cc @@ -278,11 +278,12 @@ TEST(SGObject,equals_complex_equal) SGMatrix X(1, n); SGMatrix X_test(1, n); SGVector Y(n); - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); for (index_t i=0; irandom(0.0, x_range); + std::uniform_real_distribution dist(0.0, x_range); + X[i] = dist(prng); X_test[i]=(float64_t)i / n*x_range; Y[i]=CMath::sin(X[i]); } diff --git a/tests/unit/base/Serialization_unittest.cc b/tests/unit/base/Serialization_unittest.cc index 40014ae54ec..5ddfa2ef758 100644 --- a/tests/unit/base/Serialization_unittest.cc +++ b/tests/unit/base/Serialization_unittest.cc @@ -23,7 +23,8 @@ TEST(Serialization,multiclass_labels) index_t n_class=3; CMulticlassLabels* labels=new CMulticlassLabels(); - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::normal_distribution dist(0, 1); SGVector lab(n); for (index_t i=0; iallocate_confidences_for(n_class); SGVector conf(n_class); for (index_t i=0; istd_normal_distrib(); + conf[i] = dist(prng); - for (index_t i=0; iset_multiclass_confidences(i, conf); /* create serialized copy */ diff --git a/tests/unit/classifier/svm/LibLinear_unittest.cc b/tests/unit/classifier/svm/LibLinear_unittest.cc index 93e63d3d80f..a3632bf8be9 100644 --- a/tests/unit/classifier/svm/LibLinear_unittest.cc +++ b/tests/unit/classifier/svm/LibLinear_unittest.cc @@ -1192,8 +1192,8 @@ TEST(LibLinear,simple_set_train_L1R_L2LOSS_SVC_BIAS) liblin_accuracy = eval->evaluate(pred, ground_truth); for(int i=0;iget_w()[i], t_w[i], 1e-5); - EXPECT_NEAR(ll->get_bias(), t_w[2], 1e-5); + EXPECT_NEAR(ll->get_w()[i], t_w[i], 1e-4); + EXPECT_NEAR(ll->get_bias(), t_w[2], 1e-4); EXPECT_NEAR(liblin_accuracy, 1.0, 1e-5); diff --git a/tests/unit/converter/Isomap_unittest.cc b/tests/unit/converter/Isomap_unittest.cc index 21d387ed4c6..cfe91b0a7e3 100644 --- a/tests/unit/converter/Isomap_unittest.cc +++ b/tests/unit/converter/Isomap_unittest.cc @@ -200,14 +200,15 @@ void check_similarity_of_sets(const std::set& first_set,const std::set< void fill_matrix_with_test_data(SGMatrix& matrix_to_fill) { index_t num_cols = matrix_to_fill.num_cols, num_rows = matrix_to_fill.num_rows; - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::normal_distribution dist(0, 1); for (index_t i = 0; i < num_cols; ++i) { for (index_t j = 0; j < num_rows - 1; ++j) { matrix_to_fill(j, i) = i; } - matrix_to_fill(num_rows - 1, i) = m_rng->std_normal_distrib(); + matrix_to_fill(num_rows - 1, i) = dist(prng); } } diff --git a/tests/unit/distribution/MixtureModel_unittest.cc b/tests/unit/distribution/MixtureModel_unittest.cc index c0ef987ea64..f3f90b0d265 100644 --- a/tests/unit/distribution/MixtureModel_unittest.cc +++ b/tests/unit/distribution/MixtureModel_unittest.cc @@ -40,12 +40,14 @@ using namespace shogun; TEST(MixtureModel,gaussian_mixture_model) { - auto m_rng = std::unique_ptr(new CRandom(2)); + set_global_seed(2); + auto prng = get_prng(); + std::normal_distribution dist(0, 1); SGMatrix data(1,400); for (int32_t i=0;i<100;i++) - data(0, i) = m_rng->std_normal_distrib(); + data(0, i) = dist(prng); for (int32_t i=100;i<400;i++) - data(0, i) = m_rng->std_normal_distrib() + 10; + data(0, i) = dist(prng) + 10; CDenseFeatures* feats=new CDenseFeatures(data); @@ -78,8 +80,8 @@ TEST(MixtureModel,gaussian_mixture_model) SGMatrix cov=outg->get_cov(); float64_t eps=1e-8; - EXPECT_NEAR(m[0],9.863760378,eps); - EXPECT_NEAR(cov(0,0),0.956568199,eps); + EXPECT_NEAR(m[0], 10.0139574310753, eps); + EXPECT_NEAR(cov(0, 0), 0.88920007801, eps); SG_UNREF(outg); SG_UNREF(distr); @@ -89,8 +91,8 @@ TEST(MixtureModel,gaussian_mixture_model) m=outg->get_mean(); cov=outg->get_cov(); - EXPECT_NEAR(m[0],-0.208122793,eps); - EXPECT_NEAR(cov(0,0),1.095106568,eps); + EXPECT_NEAR(m[0], -0.170370848432, eps); + EXPECT_NEAR(cov(0, 0), 1.15629910281, eps); SG_UNREF(outg); SG_UNREF(distr); diff --git a/tests/unit/ensemble/MajorityVote_unittest.cc b/tests/unit/ensemble/MajorityVote_unittest.cc index 4999fcc998c..b34a3614159 100644 --- a/tests/unit/ensemble/MajorityVote_unittest.cc +++ b/tests/unit/ensemble/MajorityVote_unittest.cc @@ -45,10 +45,11 @@ TEST(MajorityVote, binary_combine_vector) expected.zero(); v.zero(); - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::uniform_int_distribution dist(0, 1); for (index_t i = 0; i < num_classifiers; ++i) { - int32_t r = m_rng->random(0, 1); + int32_t r = dist(prng); v[i] = (r == 0) ? -1 : r; if (max < ++expected[r]) @@ -73,12 +74,13 @@ TEST(MajorityVote, multiclass_combine_vector) v.zero(); hist.zero(); - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::uniform_int_distribution dist(0, 2); int64_t max_label = -1; int64_t max = -1; for (index_t i = 0; i < num_classifiers; ++i) { - v[i] = m_rng->random(0, 2); + v[i] = dist(prng); if (max < ++hist[index_t(v[i])]) { max = hist[index_t(v[i])]; diff --git a/tests/unit/ensemble/WeightedMajorityVote_unittest.cc b/tests/unit/ensemble/WeightedMajorityVote_unittest.cc index bcdfcfd8c61..7d8196d3ee1 100644 --- a/tests/unit/ensemble/WeightedMajorityVote_unittest.cc +++ b/tests/unit/ensemble/WeightedMajorityVote_unittest.cc @@ -11,7 +11,8 @@ void generate_random_ensemble_matrix(SGMatrix& em, const SGVector& w) { int32_t num_classes = 3; - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::uniform_int_distribution dist(0, num_classes - 1); for (index_t i = 0; i < em.num_rows; ++i) { SGVector hist(num_classes); @@ -19,7 +20,7 @@ void generate_random_ensemble_matrix(SGMatrix& em, float64_t max = CMath::ALMOST_NEG_INFTY; for (index_t j = 0; j < em.num_cols; ++j) { - int32_t r = m_rng->random(0, num_classes - 1); + int32_t r = dist(prng); em(i,j) = r; hist[r] += w[j]; // if there's a tie mark it the first element will be the winner @@ -70,10 +71,11 @@ TEST(WeightedMajorityVote, binary_combine_vector) expected.zero(); v.zero(); - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::uniform_int_distribution dist(0, 1); for (index_t i = 0; i < num_classifiers; ++i) { - int32_t r = m_rng->random(0, 1); + int32_t r = dist(prng); v[i] = (r == 0) ? -1 : r; expected[r] += weights[i]; @@ -96,7 +98,8 @@ TEST(WeightedMajorityVote, multiclass_combine_vector) SGVector weights(num_classifiers); weights.random(0.5, 2.0); CWeightedMajorityVote* mv = new CWeightedMajorityVote(weights); - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::uniform_int_distribution dist(0, 2); SGVector v(num_classifiers); SGVector hist(3); @@ -107,7 +110,7 @@ TEST(WeightedMajorityVote, multiclass_combine_vector) float64_t max = -1; for (index_t i = 0; i < num_classifiers; ++i) { - v[i] = m_rng->random(0, 2); + v[i] = dist(prng); hist[index_t(v[i])] += weights[i]; if (max < hist[index_t(v[i])]) { diff --git a/tests/unit/evaluation/CrossValidation_multithread_unittest.cc b/tests/unit/evaluation/CrossValidation_multithread_unittest.cc index 7da2b15307e..41bbb51a38a 100644 --- a/tests/unit/evaluation/CrossValidation_multithread_unittest.cc +++ b/tests/unit/evaluation/CrossValidation_multithread_unittest.cc @@ -48,11 +48,11 @@ using namespace shogun; void generate_data(SGMatrix& mat, SGVector &lab) { int32_t num=lab.size(); - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::normal_distribution dist(0, 1); for (index_t i=0; istd_normal_distrib() * 4) - : 100 + (m_rng->std_normal_distrib() * 4); + mat(0, i) = i < num / 2 ? 0 + (dist(prng) * 4) : 100 + (dist(prng) * 4); mat(1,i)=i; } diff --git a/tests/unit/evaluation/SplittingStrategy_unittest.cc b/tests/unit/evaluation/SplittingStrategy_unittest.cc index 27a7ad6d3ce..2214780a132 100644 --- a/tests/unit/evaluation/SplittingStrategy_unittest.cc +++ b/tests/unit/evaluation/SplittingStrategy_unittest.cc @@ -23,20 +23,23 @@ TEST(SplittingStrategy,standard) index_t num_labels; index_t num_subsets; index_t runs=100; - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::uniform_int_distribution dist_nl(10, 150); + std::uniform_int_distribution dist_nc(1, 5); + std::uniform_real_distribution dist_sl(-10.0, 10.0); while (runs-->0) { fold_sizes=0; - num_labels = m_rng->random(10, 150); - num_subsets = m_rng->random(1, 5); + num_labels = dist_nl(prng); + num_subsets = dist_nc(prng); index_t desired_size=CMath::round( (float64_t)num_labels/(float64_t)num_subsets); /* build labels */ CRegressionLabels* labels=new CRegressionLabels(num_labels); for (index_t i=0; iset_label(i, m_rng->random(-10.0, 10.0)); + labels->set_label(i, dist_sl(prng)); /* build splitting strategy */ CCrossValidationSplitting* splitting= @@ -90,19 +93,22 @@ TEST(SplittingStrategy,stratified_subsets_disjoint_cover) { index_t num_labels, num_classes, num_subsets, fold_sizes; index_t runs=50; - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::uniform_int_distribution dist_nl(11, 100); + std::uniform_int_distribution dist_nc(2, 10); + std::uniform_int_distribution dist_ns(1, 10); while (runs-->0) { fold_sizes=0; - num_labels = m_rng->random(11, 100); - num_classes = m_rng->random(2, 10); - num_subsets = m_rng->random(1, 10); + num_labels = dist_nl(prng); + num_classes = dist_nc(prng); + num_subsets = dist_ns(prng); /* build labels */ CMulticlassLabels* labels=new CMulticlassLabels(num_labels); for (index_t i=0; iset_label(i, m_rng->random_64() % num_classes); + labels->set_label(i, prng() % num_classes); SGVector classes=labels->get_unique_labels(); @@ -170,18 +176,21 @@ TEST(SplittingStrategy,stratified_subset_label_ratio) { index_t num_labels, num_classes, num_subsets; index_t runs=50; - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::uniform_int_distribution dist_nl(11, 100); + std::uniform_int_distribution dist_nc(2, 10); + std::uniform_int_distribution dist_ns(1, 10); while (runs-->0) { - num_labels = m_rng->random(11, 100); - num_classes = m_rng->random(2, 10); - num_subsets = m_rng->random(1, 10); + num_labels = dist_nl(prng); + num_classes = dist_nc(prng); + num_subsets = dist_ns(prng); /* build labels */ CMulticlassLabels* labels=new CMulticlassLabels(num_labels); for (index_t i=0; iset_label(i, m_rng->random_64() % num_classes); + labels->set_label(i, prng() % num_classes); /*No. of labels belonging to one class*/ SGVector class_labels(num_classes); @@ -244,17 +253,19 @@ TEST(SplittingStrategy,LOO) { index_t num_labels, fold_sizes; index_t runs=10; - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::uniform_real_distribution dist_nl(10, 50); + std::uniform_int_distribution dist_sl(-10.0, 10.0); while (runs-->0) { fold_sizes=0; - num_labels = m_rng->random(10, 50); + num_labels = dist_nl(prng); /* build labels */ CRegressionLabels* labels=new CRegressionLabels(num_labels); for (index_t i=0; iset_label(i, m_rng->random(-10.0, 10.0)); + labels->set_label(i, dist_sl(prng)); /* build Leave one out splitting strategy */ CLOOCrossValidationSplitting* splitting= diff --git a/tests/unit/features/CombinedFeatures_unittest.cc b/tests/unit/features/CombinedFeatures_unittest.cc index 60b557b6163..34b193f3d6e 100644 --- a/tests/unit/features/CombinedFeatures_unittest.cc +++ b/tests/unit/features/CombinedFeatures_unittest.cc @@ -72,12 +72,13 @@ TEST(CombinedFeaturesTest,create_merged_copy) SGMatrix data_1(dim,n_1); for (index_t i=0; i(new CRandom()); + auto prng = get_prng(); + std::normal_distribution dist(0, 1); // data_1.display_matrix("data_1"); SGMatrix data_2(dim,n_2); for (index_t i=0; istd_normal_distrib(); + data_2.matrix[i] = dist(prng); // data_1.display_matrix("data_2"); diff --git a/tests/unit/features/DenseFeatures_unittest.cc b/tests/unit/features/DenseFeatures_unittest.cc index d16d24e263d..7f5c4f33982 100644 --- a/tests/unit/features/DenseFeatures_unittest.cc +++ b/tests/unit/features/DenseFeatures_unittest.cc @@ -45,13 +45,14 @@ TEST(DenseFeaturesTest,create_merged_copy) SGMatrix data_1(dim,n_1); for (index_t i=0; i(new CRandom()); + auto prng = get_prng(); + std::normal_distribution dist(0, 1); //data_1.display_matrix("data_1"); SGMatrix data_2(dim,n_2); for (index_t i=0; istd_normal_distrib(); + data_2.matrix[i] = dist(prng); //data_2.display_matrix("data_2"); @@ -132,11 +133,12 @@ TEST(DenseFeaturesTest, copy_dimension_subset) data.matrix[i]=i; CDenseFeatures* features=new CDenseFeatures(data); - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::uniform_int_distribution dist(0, dim - 1); SGVector dims(dim/2); for (index_t i=0; irandom(0, dim - 1); + dims[i] = dist(prng); CDenseFeatures* f_reduced=(CDenseFeatures*) features->copy_dimension_subset(dims); @@ -163,17 +165,23 @@ TEST(DenseFeaturesTest, copy_dimension_subset_with_subsets) data.matrix[i]=i; CDenseFeatures* features=new CDenseFeatures(data); - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); SGVector inds(n/2); for (index_t i=0; irandom(0, n - 1); + { + std::uniform_int_distribution dist_inds(0, n - 1); + inds[i] = dist_inds(prng); + } features->add_subset(inds); SGVector dims(dim/2); for (index_t i=0; irandom(0, dim - 1); + { + std::uniform_int_distribution dist_dim(0, dim - 1); + dims[i] = dist_dim(prng); + } CDenseFeatures* f_reduced=(CDenseFeatures*) features->copy_dimension_subset(dims); diff --git a/tests/unit/features/HashedDenseFeatures_unittest.cc b/tests/unit/features/HashedDenseFeatures_unittest.cc index 6397415a144..b581b652db8 100644 --- a/tests/unit/features/HashedDenseFeatures_unittest.cc +++ b/tests/unit/features/HashedDenseFeatures_unittest.cc @@ -320,11 +320,12 @@ TEST(HashedDenseFeaturesTest, dense_comparison) int32_t hashing_dim = 300; CHashedDenseFeatures* h_feats = new CHashedDenseFeatures(data, hashing_dim); CDenseFeatures* d_feats = new CDenseFeatures(data); - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::uniform_int_distribution dist(-hashing_dim, hashing_dim); SGVector dense_vec(hashing_dim); for (index_t i=0; irandom(-hashing_dim, hashing_dim); + dense_vec[i] = dist(prng); for (index_t i=0; idot(i, h_feats, i), d_feats->dot(i, d_feats, i)); diff --git a/tests/unit/features/HashedDocDotFeatures_unittest.cc b/tests/unit/features/HashedDocDotFeatures_unittest.cc index a9ffd88a3cb..961a7ddea57 100644 --- a/tests/unit/features/HashedDocDotFeatures_unittest.cc +++ b/tests/unit/features/HashedDocDotFeatures_unittest.cc @@ -77,7 +77,7 @@ TEST(HashedDocDotFeaturesTest, dense_dot_test) const char* doc_1 = "You're never too old to rock and roll, if you're too young to die"; const char* doc_2 = "Give me some rope, tie me to dream, give me the hope to run out of steam"; const char* doc_3 = "Thank you Jack Daniels, Old Number Seven, Tennessee Whiskey got me drinking in heaven"; - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); SGString string_1(65); for (index_t i=0; i<65; i++) string_1.string[i] = doc_1[i]; @@ -109,10 +109,10 @@ TEST(HashedDocDotFeaturesTest, dense_dot_test) CHashedDocConverter* converter = new CHashedDocConverter(tokenizer, hash_bits, false); CSparseFeatures* converted_docs = (CSparseFeatures* ) converter->apply(doc_collection); - + std::uniform_int_distribution dist(-dimension, dimension); SGVector vec(dimension); for (index_t i=0; irandom(-dimension, dimension); + vec[i] = dist(prng); for (index_t i=0; i<3; i++) { diff --git a/tests/unit/features/StreamingDenseFeatures_unittest.cc b/tests/unit/features/StreamingDenseFeatures_unittest.cc index 6a64b09b7b0..a0eabdd340b 100644 --- a/tests/unit/features/StreamingDenseFeatures_unittest.cc +++ b/tests/unit/features/StreamingDenseFeatures_unittest.cc @@ -25,10 +25,11 @@ TEST(StreamingDenseFeaturesTest, example_reading_from_file) index_t dim=2; char fname[] = "StreamingDenseFeatures_reading.XXXXXX"; generate_temp_filename(fname); - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::normal_distribution dist(0, 1); SGMatrix data(dim,n); for (index_t i=0; istd_normal_distrib(); + data.matrix[i] = dist(prng); CDenseFeatures* orig_feats=new CDenseFeatures(data); CCSVFile* saved_features = new CCSVFile(fname, 'w'); @@ -68,10 +69,11 @@ TEST(StreamingDenseFeaturesTest, example_reading_from_features) { index_t n=20; index_t dim=2; - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::normal_distribution dist(0, 1); SGMatrix data(dim,n); for (index_t i=0; istd_normal_distrib(); + data.matrix[i] = dist(prng); CDenseFeatures* orig_feats=new CDenseFeatures(data); CStreamingDenseFeatures* feats = new CStreamingDenseFeatures(orig_feats); @@ -100,10 +102,11 @@ TEST(StreamingDenseFeaturesTest, reset_stream) { index_t n=20; index_t dim=2; - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::normal_distribution dist(0, 1); SGMatrix data(dim,n); for (index_t i=0; istd_normal_distrib(); + data.matrix[i] = dist(prng); CDenseFeatures* orig_feats=new CDenseFeatures(data); CStreamingDenseFeatures* feats=new CStreamingDenseFeatures(orig_feats); diff --git a/tests/unit/features/StreamingHashedDocDotFeatures_unittest.cc b/tests/unit/features/StreamingHashedDocDotFeatures_unittest.cc index e74a7ab3662..e8ca3629089 100644 --- a/tests/unit/features/StreamingHashedDocDotFeatures_unittest.cc +++ b/tests/unit/features/StreamingHashedDocDotFeatures_unittest.cc @@ -81,7 +81,7 @@ TEST(StreamingHashedDocFeaturesTest, dot_tests) const char* doc_1 = "You're never too old to rock and roll, if you're too young to die"; const char* doc_2 = "Give me some rope, tie me to dream, give me the hope to run out of steam"; const char* doc_3 = "Thank you Jack Daniels, Old Number Seven, Tennessee Whiskey got me drinking in heaven"; - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); SGString string_1(65); for (index_t i=0; i<65; i++) string_1.string[i] = doc_1[i]; @@ -111,8 +111,9 @@ TEST(StreamingHashedDocFeaturesTest, dot_tests) feats->start_parser(); SGVector dense_vec(32); + std::uniform_real_distribution dist(0.0, 1.0); for (index_t j=0; j<32; j++) - dense_vec[j] = m_rng->random(0.0, 1.0); + dense_vec[j] = dist(prng); index_t i = 0; while (feats->get_next_example()) diff --git a/tests/unit/features/StreamingSparseFeatures_unittest.cc b/tests/unit/features/StreamingSparseFeatures_unittest.cc index 34da51f628d..eec8d528ecf 100644 --- a/tests/unit/features/StreamingSparseFeatures_unittest.cc +++ b/tests/unit/features/StreamingSparseFeatures_unittest.cc @@ -27,7 +27,10 @@ TEST(StreamingSparseFeaturesTest, parse_file) int32_t max_num_entries=20; int32_t max_label_value=1; float64_t max_entry_value=1; - CRandom* rand=new CRandom(); + auto prng = get_prng(); + std::uniform_int_distribution dist_p( + -max_label_value, max_label_value); + std::uniform_real_distribution dist_q(0, max_num_entries); int32_t num_vec=10; int32_t num_feat=0; @@ -36,22 +39,21 @@ TEST(StreamingSparseFeaturesTest, parse_file) float64_t* labels=SG_MALLOC(float64_t, num_vec); for (int32_t i=0; i(rand->random(0, max_num_entries)); - labels[i]=(float64_t) rand->random(-max_label_value, max_label_value); - for (int32_t j=0; jnum_feat) - num_feat=feat_index; + data[i] = SGSparseVector(dist_q(prng)); + labels[i] = (float64_t)dist_p(prng); + for (int32_t j = 0; j < data[i].num_feat_entries; j++) + { + int32_t feat_index = (j + 1) * 2; + if (feat_index > num_feat) + num_feat = feat_index; - data[i].features[j].feat_index=feat_index-1; - data[i].features[j].entry=rand->random(0., max_entry_value); - } + data[i].features[j].feat_index = feat_index - 1; + data[i].features[j].entry = dist_q(prng); + } } CLibSVMFile* fout = new CLibSVMFile(fname, 'w', NULL); fout->set_sparse_matrix(data, num_feat, num_vec, labels); SG_UNREF(fout); - SG_FREE(rand); CStreamingAsciiFile *file = new CStreamingAsciiFile(fname); CStreamingSparseFeatures *stream_features = diff --git a/tests/unit/features/StringFeatures_unittest.cc b/tests/unit/features/StringFeatures_unittest.cc index 4ae344af5f2..8bf042e95e1 100644 --- a/tests/unit/features/StringFeatures_unittest.cc +++ b/tests/unit/features/StringFeatures_unittest.cc @@ -17,19 +17,22 @@ using namespace shogun; SGStringList generateRandomData(index_t num_strings=10, index_t max_string_length=20, index_t min_string_length=10) { SGStringList strings(num_strings, max_string_length); - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::uniform_int_distribution dist_len( + min_string_length, max_string_length); + std::uniform_int_distribution dist_str('A', 'Z'); //SG_SPRINT("original string data:\n"); for (index_t i=0; irandom(min_string_length, max_string_length); + index_t len = dist_len(prng); SGString current(len); //SG_SPRINT("[%i]: \"", i); /* fill with random uppercase letters (ASCII) */ for (index_t j=0; jrandom('A', 'Z'); + current.string[j] = (char)dist_str(prng); /* attach \0 to print letter */ char* string=SG_MALLOC(char, 2); diff --git a/tests/unit/io/CSVFile_unittest.cc b/tests/unit/io/CSVFile_unittest.cc index 43721520ef3..2caf5091d0b 100644 --- a/tests/unit/io/CSVFile_unittest.cc +++ b/tests/unit/io/CSVFile_unittest.cc @@ -1,8 +1,8 @@ +#include #include -#include -#include #include -#include +#include +#include #include #include @@ -13,12 +13,13 @@ using namespace shogun; TEST(CSVFileTest, vector_int32) { - CRandom* rand=new CRandom(); + auto prng = get_prng(); int32_t len=512*512; + std::uniform_int_distribution dist(0, len); SGVector data(len); for (int32_t i=0; irandom(0, len); + data[i] = (int32_t)dist(prng); CCSVFile* fin; CCSVFile* fout; @@ -39,18 +40,18 @@ TEST(CSVFileTest, vector_int32) EXPECT_EQ(data_from_file[i], data[i]); } SG_UNREF(fin); - SG_FREE(rand); unlink("CSVFileTest_vector_int32_output.txt"); } TEST(CSVFileTest, vector_float64) { - CRandom* rand=new CRandom(); + auto prng = get_prng(); + std::uniform_real_distribution dist(0.0, 1.0); int32_t len=128*128; SGVector data(len); for (int32_t i=0; irandom(0., 1.); + data[i] = (float64_t)dist(prng); CCSVFile* fin; CCSVFile* fout; @@ -71,21 +72,21 @@ TEST(CSVFileTest, vector_float64) EXPECT_NEAR(data_from_file[i], data[i], 1E-14); } SG_UNREF(fin); - SG_FREE(rand); unlink("CSVFileTest_vector_float64_output.txt"); } TEST(CSVFileTest, matrix_int32) { - CRandom* rand=new CRandom(); + auto prng = get_prng(); - int32_t num_rows=512; - int32_t num_cols=512; - SGMatrix data(num_rows, num_cols); - for (int32_t i=0; i data(num_rows, num_cols); + std::uniform_int_distribution dist(0, num_rows); + for (index_t i = 0; i < num_rows; i++) { - for (int32_t j=0; jrandom(0, num_rows); + for (index_t j = 0; j < num_cols; j++) + data(i, j) = (index_t)dist(prng); } CCSVFile* fin; @@ -110,13 +111,13 @@ TEST(CSVFileTest, matrix_int32) } SG_UNREF(fin); - SG_FREE(rand); unlink("CSVFileTest_matrix_int32_output.txt"); } TEST(CSVFileTest, matrix_float64) { - CRandom* rand=new CRandom(); + auto prng = get_prng(); + std::uniform_real_distribution dist(0.0, 1.0); int32_t num_rows=128; int32_t num_cols=128; @@ -124,7 +125,7 @@ TEST(CSVFileTest, matrix_float64) for (int32_t i=0; irandom(0., 1.); + data(i, j) = (float64_t)dist(prng); } CCSVFile* fin; @@ -149,7 +150,6 @@ TEST(CSVFileTest, matrix_float64) } SG_UNREF(fin); - SG_FREE(rand); unlink("CSVFileTest_matrix_float64_output.txt"); } diff --git a/tests/unit/io/LibSVMFile_unittest.cc b/tests/unit/io/LibSVMFile_unittest.cc index fddd21b24ce..82d4e539e2c 100644 --- a/tests/unit/io/LibSVMFile_unittest.cc +++ b/tests/unit/io/LibSVMFile_unittest.cc @@ -1,6 +1,6 @@ +#include #include #include -#include #include @@ -13,7 +13,11 @@ TEST(LibSVMFileTest, sparse_matrix_int32) int32_t max_num_entries = 512; int32_t max_label_value = 1; int32_t max_entry_value = 1024; - CRandom * rand = new CRandom(); + auto prng = get_prng(); + std::uniform_int_distribution dist_d(0, max_num_entries); + std::uniform_int_distribution dist_l( + -max_label_value, max_label_value); + std::uniform_int_distribution dist_e(0, max_entry_value); int32_t num_vec = 10; int32_t num_feat = 0; @@ -32,11 +36,11 @@ TEST(LibSVMFileTest, sparse_matrix_int32) for (int32_t i = 0; i < num_vec; i++) { - data[i] = SGSparseVector(rand->random(0, max_num_entries)); + data[i] = SGSparseVector(dist_d(prng)); if (i > 2) { labels[i] = SGVector(1); - labels[i][0] = rand->random(-max_label_value, max_label_value); + labels[i][0] = dist_l(prng); } for (int32_t j = 0; j < data[i].num_feat_entries; j++) { @@ -47,7 +51,7 @@ TEST(LibSVMFileTest, sparse_matrix_int32) } data[i].features[j].feat_index = feat_index - 1; - data[i].features[j].entry = rand->random(0, max_entry_value); + data[i].features[j].entry = dist_e(prng); } } @@ -85,7 +89,6 @@ TEST(LibSVMFileTest, sparse_matrix_int32) } SG_UNREF(fin); - SG_FREE(rand); SG_FREE(data); SG_FREE(labels); SG_FREE(data_from_file); @@ -98,7 +101,11 @@ TEST(LibSVMFileTest, sparse_matrix_float64) { int32_t max_num_entries = 512; int32_t max_label_value = 1; - CRandom * rand = new CRandom(); + auto prng = get_prng(); + std::uniform_int_distribution dist_d(0, max_num_entries); + std::uniform_int_distribution dist_l( + -max_label_value, max_label_value); + std::uniform_real_distribution dist_e(0.0, 1.0); int32_t num_vec = 1024; int32_t num_feat = 0; @@ -117,11 +124,11 @@ TEST(LibSVMFileTest, sparse_matrix_float64) for (int32_t i = 0; i < num_vec; i++) { - data[i] = SGSparseVector(rand->random(0, max_num_entries)); + data[i] = SGSparseVector(dist_d(prng)); if (i > 2) { labels[i] = SGVector(1); - labels[i][0] = rand->random(-max_label_value, max_label_value); + labels[i][0] = dist_l(prng); } for (int32_t j = 0; j < data[i].num_feat_entries; j++) @@ -133,7 +140,7 @@ TEST(LibSVMFileTest, sparse_matrix_float64) } data[i].features[j].feat_index = feat_index - 1; - data[i].features[j].entry = rand->random(0., 1.); + data[i].features[j].entry = dist_e(prng); } } @@ -171,7 +178,6 @@ TEST(LibSVMFileTest, sparse_matrix_float64) } SG_UNREF(fin); - SG_FREE(rand); SG_FREE(data); SG_FREE(labels); SG_FREE(data_from_file); diff --git a/tests/unit/io/ProtobufFile_unittest.cc b/tests/unit/io/ProtobufFile_unittest.cc index 6f720683191..d582699e48d 100644 --- a/tests/unit/io/ProtobufFile_unittest.cc +++ b/tests/unit/io/ProtobufFile_unittest.cc @@ -1,8 +1,8 @@ -#include +#include #include #include #include -#include +#include #include @@ -18,12 +18,13 @@ using namespace shogun; TEST(ProtobufFileTest, vector_int32) { - CRandom* rand=new CRandom(); + auto prng = get_prng(); int32_t len=1024*1024; + std::uniform_int_distribution dist(0, len); SGVector data(len); for (int32_t i=0; irandom(0, len); + data[i] = (int32_t)dist(prng); CProtobufFile* fin; CProtobufFile* fout; @@ -42,18 +43,18 @@ TEST(ProtobufFileTest, vector_int32) EXPECT_EQ(data_from_file[i], data[i]); } SG_UNREF(fin); - SG_FREE(rand); unlink("ProtobufFileTest_vector_int32_output.txt"); } TEST(ProtobufFileTest, vector_float64) { - CRandom* rand=new CRandom(); + auto prng = get_prng(); + std::uniform_real_distribution dist(0.0, 1.0); int32_t len=1024*1024; SGVector data(len); for (int32_t i=0; irandom(0, 1); + data[i] = (float64_t)dist(prng); CProtobufFile* fin; CProtobufFile* fout; @@ -72,21 +73,21 @@ TEST(ProtobufFileTest, vector_float64) EXPECT_NEAR(data_from_file[i], data[i], 1E-14); } SG_UNREF(fin); - SG_FREE(rand); unlink("ProtobufFileTest_vector_float64_output.txt"); } TEST(ProtobufFileTest, matrix_int32) { - CRandom* rand=new CRandom(); + auto prng = get_prng(); int32_t num_rows=1024; int32_t num_cols=512; + std::uniform_int_distribution dist(0, num_rows); SGMatrix data(num_rows, num_cols); for (int32_t i=0; irandom(0, num_rows); + data(i, j) = (int32_t)dist(prng); } CProtobufFile* fin; @@ -109,13 +110,13 @@ TEST(ProtobufFileTest, matrix_int32) } SG_UNREF(fin); - SG_FREE(rand); unlink("ProtobufFileTest_matrix_int32_output.txt"); } TEST(ProtobufFileTest, matrix_float64) { - CRandom* rand=new CRandom(); + auto prng = get_prng(); + std::uniform_real_distribution dist(0.0, 1.0); int32_t num_rows=1024; int32_t num_cols=512; @@ -123,7 +124,7 @@ TEST(ProtobufFileTest, matrix_float64) for (int32_t i=0; irandom(0, 1); + data(i, j) = (float64_t)dist(prng); } CProtobufFile* fin; @@ -146,7 +147,6 @@ TEST(ProtobufFileTest, matrix_float64) } SG_UNREF(fin); - SG_FREE(rand); unlink("ProtobufFileTest_matrix_float64_output.txt"); } @@ -154,7 +154,9 @@ TEST(ProtobufFileTest, sparse_matrix_int32) { int32_t max_num_entries=512; int32_t max_entry_value=1024; - CRandom* rand=new CRandom(); + auto prng = get_prng(); + std::uniform_int_distribution dist_p(0, max_num_entries); + std::uniform_int_distribution dist_q(0, max_entry_value); int32_t num_vec=1024; int32_t num_feat=0; @@ -162,7 +164,7 @@ TEST(ProtobufFileTest, sparse_matrix_int32) SGSparseVector* data=SG_MALLOC(SGSparseVector, num_vec); for (int32_t i=0; i(rand->random(0, max_num_entries)); + data[i] = SGSparseVector(dist_p(prng)); for (int32_t j=0; jrandom(0, max_entry_value); + data[i].features[j].entry = dist_q(prng); } } @@ -200,7 +202,6 @@ TEST(ProtobufFileTest, sparse_matrix_int32) } SG_UNREF(fin); - SG_FREE(rand); SG_FREE(data); SG_FREE(data_from_file); @@ -211,7 +212,9 @@ TEST(ProtobufFileTest, sparse_matrix_int32) TEST(ProtobufFileTest, sparse_matrix_float64) { int32_t max_num_entries=512; - CRandom* rand=new CRandom(); + auto prng = get_prng(); + std::uniform_int_distribution dist_p(0, max_num_entries); + std::uniform_real_distribution dist_q(0.0, 1.0); int32_t num_vec=1024; int32_t num_feat=0; @@ -219,7 +222,7 @@ TEST(ProtobufFileTest, sparse_matrix_float64) SGSparseVector* data=SG_MALLOC(SGSparseVector, num_vec); for (int32_t i=0; i(rand->random(0, max_num_entries)); + data[i] = SGSparseVector(dist_p(prng)); for (int32_t j=0; jrandom(0., 1.); + data[i].features[j].entry = dist_q(prng); } } @@ -257,7 +260,6 @@ TEST(ProtobufFileTest, sparse_matrix_float64) } SG_UNREF(fin); - SG_FREE(rand); SG_FREE(data); SG_FREE(data_from_file); @@ -267,16 +269,18 @@ TEST(ProtobufFileTest, sparse_matrix_float64) TEST(ProtobufFileTest, DISABLED_string_list_char) { - CRandom* rand=new CRandom(); + auto prng = get_prng(); + std::uniform_real_distribution dist_q(0, 255); int32_t num_str=1024; int32_t max_string_len=1024; + std::uniform_int_distribution dist_p(0, max_string_len); SGString* strings=SG_MALLOC(SGString, num_str); for (int32_t i=0; i((int32_t) rand->random(1, max_string_len)); + strings[i] = SGString((int32_t)dist_p(prng)); for (int32_t j=0; jrandom(0, 255); + strings[i].string[j] = (char)dist_q(prng); } CProtobufFile* fin; @@ -300,7 +304,6 @@ TEST(ProtobufFileTest, DISABLED_string_list_char) } SG_UNREF(fin); - SG_FREE(rand); SG_FREE(strings); SG_FREE(data_from_file); diff --git a/tests/unit/kernel/CustomKernel_unittest.cc b/tests/unit/kernel/CustomKernel_unittest.cc index 15c31895c12..82626a3501c 100644 --- a/tests/unit/kernel/CustomKernel_unittest.cc +++ b/tests/unit/kernel/CustomKernel_unittest.cc @@ -15,7 +15,6 @@ #include #include #include -#include #include using namespace shogun; @@ -36,10 +35,10 @@ TEST(CustomKernelTest,add_row_subset) inds.range_fill(); index_t num_runs=10; - auto prng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); for (index_t i=0; iadd_subset(inds); custom->add_row_subset(inds); diff --git a/tests/unit/kernel/Kernel_unittest.cc b/tests/unit/kernel/Kernel_unittest.cc index 2e0b9e299d3..8e16ae6a4aa 100644 --- a/tests/unit/kernel/Kernel_unittest.cc +++ b/tests/unit/kernel/Kernel_unittest.cc @@ -41,11 +41,12 @@ static SGMatrix generate_std_norm_matrix(const index_t num_feats, const index_t dim) { SGMatrix data(dim, num_feats); - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::normal_distribution dist(0, 1); for (index_t i=0; istd_normal_distrib(); + data(j, i) = dist(prng); } return data; } diff --git a/tests/unit/kernel/SubsequenceStringKernel_unittest.cc b/tests/unit/kernel/SubsequenceStringKernel_unittest.cc index 19caffbd768..c25f1e1b0ee 100644 --- a/tests/unit/kernel/SubsequenceStringKernel_unittest.cc +++ b/tests/unit/kernel/SubsequenceStringKernel_unittest.cc @@ -61,19 +61,23 @@ TEST(SubsequenceStringKernel, psd_random_feat) const index_t min_len=max_len/2; SGStringList list(num_strings, max_len); - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::uniform_int_distribution dist_cl(min_len, max_len); + std::uniform_int_distribution dist_str('A', 'Z'); + std::uniform_int_distribution dist_sl(1, min_len); + std::uniform_real_distribution dist_ld(0.0, 1.0); for (index_t i=0; irandom(min_len, max_len); + index_t cur_len = dist_cl(prng); SGString str(cur_len); for (index_t l=0; lrandom('A', 'Z')); + str.string[l] = char(dist_str(prng)); list.strings[i]=str; } CStringFeatures* s_feats=new CStringFeatures(list, ALPHANUM); - int32_t s_len = m_rng->random(1, min_len); - float64_t lambda = m_rng->random(0.0, 1.0); + int32_t s_len = dist_sl(prng); + float64_t lambda = dist_ld(prng); CSubsequenceStringKernel* kernel=new CSubsequenceStringKernel(s_feats, s_feats, s_len, lambda); SGMatrix kernel_matrix=kernel->get_kernel_matrix(); diff --git a/tests/unit/lib/DynamicArray_unittest.cc b/tests/unit/lib/DynamicArray_unittest.cc index a5e0305f020..cc40052e001 100644 --- a/tests/unit/lib/DynamicArray_unittest.cc +++ b/tests/unit/lib/DynamicArray_unittest.cc @@ -62,10 +62,11 @@ TYPED_TEST(CDynamicArrayFixture, set_array) this->wrapper_array->reset_array(); EXPECT_EQ(this->wrapper_array->get_num_elements(), 0); TypeParam* array = SG_MALLOC(TypeParam, 5); - auto prng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::uniform_int_distribution dist(1, 10); for (int32_t i = 0; i < 5; i++) { - array[i] = (TypeParam)prng->random(1, 10); + array[i] = (TypeParam)dist(prng); } this->wrapper_array->set_array(array, 5); @@ -80,10 +81,11 @@ TYPED_TEST(CDynamicArrayFixture, set_array) TYPED_TEST(CDynamicArrayFixture, const_set_array) { TypeParam* array = SG_MALLOC(TypeParam, 5); - auto prng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::uniform_int_distribution dist(1, 10); for (int32_t i = 0; i < 5; i++) { - array[i] = (TypeParam)prng->random(1, 10); + array[i] = (TypeParam)dist(prng); } const TypeParam* const_array = array; this->wrapper_array->reset_array(); diff --git a/tests/unit/lib/Memory_unittest.cc b/tests/unit/lib/Memory_unittest.cc index 561ec1e9115..4849fb6550a 100644 --- a/tests/unit/lib/Memory_unittest.cc +++ b/tests/unit/lib/Memory_unittest.cc @@ -67,9 +67,10 @@ TEST(MemoryTest, sg_memcpy) { const index_t size = 10; auto src = SG_CALLOC(float64_t, size); - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::normal_distribution dist(0, 1); for (index_t i=0; istd_normal_distrib(); + src[i] = dist(prng); auto dest = SG_CALLOC(float64_t, size); diff --git a/tests/unit/lib/SGMatrix_unittest.cc b/tests/unit/lib/SGMatrix_unittest.cc index fc10fa59d9e..dd8b1b2d79b 100644 --- a/tests/unit/lib/SGMatrix_unittest.cc +++ b/tests/unit/lib/SGMatrix_unittest.cc @@ -252,14 +252,15 @@ TEST(SGMatrixTest,is_symmetric_float32_false_old_plus_eps) { const index_t size=2; SGMatrix mat(size, size); - auto m_rng = std::unique_ptr(new CRandom(100)); + auto prng = get_prng(); + std::normal_distribution dist(0, 1); // create a symmetric matrix for (index_t i=0; irandn_float(); + mat(i, j) = dist(prng); mat(j, i)=mat(i, j); } } @@ -289,14 +290,15 @@ TEST(SGMatrixTest,is_symmetric_float32_false_old_minus_eps) { const index_t size=2; SGMatrix mat(size, size); - auto m_rng = std::unique_ptr(new CRandom(100)); + auto prng = get_prng(); + std::normal_distribution dist(0, 1); // create a symmetric matrix for (index_t i=0; irandn_float(); + mat(i, j) = dist(prng); mat(j, i)=mat(i, j); } } @@ -326,12 +328,13 @@ TEST(SGMatrixTest,is_symmetric_float32_true) { const index_t size=2; SGMatrix mat(size, size); - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::normal_distribution dist(0, 1); for (index_t i=0; irandn_float(); + mat(i, j) = dist(prng); mat(j, i)=mat(i, j); } } @@ -342,15 +345,16 @@ TEST(SGMatrixTest,is_symmetric_float64_false_old_plus_eps) { const index_t size=2; SGMatrix mat(size, size); - auto m_rng = std::unique_ptr(new CRandom(100)); + auto prng = get_prng(); + std::normal_distribution dist(0, 1); // create a symmetric matrix for (index_t i=0; istd_normal_distrib(); - mat(j, i)=mat(i, j); + mat(i, j) = dist(prng); + mat(j, i) = mat(i, j); } } @@ -379,15 +383,16 @@ TEST(SGMatrixTest,is_symmetric_float64_false_old_minus_eps) { const index_t size=2; SGMatrix mat(size, size); - auto m_rng = std::unique_ptr(new CRandom(100)); + auto prng = get_prng(); + std::normal_distribution dist(0, 1); // create a symmetric matrix for (index_t i=0; istd_normal_distrib(); - mat(j, i)=mat(i, j); + mat(i, j) = dist(prng); + mat(j, i) = mat(i, j); } } @@ -416,12 +421,13 @@ TEST(SGMatrixTest,is_symmetric_float64_true) { const index_t size=2; SGMatrix mat(size, size); - auto m_rng = std::unique_ptr(new CRandom(100)); + auto prng = get_prng(); + std::normal_distribution dist(0, 1); for (index_t i=0; istd_normal_distrib(); + mat(i, j) = dist(prng); mat(j, i)=mat(i, j); } } @@ -432,15 +438,15 @@ TEST(SGMatrixTest,is_symmetric_complex128_false_old_plus_eps) { const index_t size=2; SGMatrix mat(size, size); - auto m_rng = std::unique_ptr(new CRandom(100)); + auto prng = get_prng(); + std::normal_distribution dist(0, 1); // create a symmetric matrix for (index_t i=0; istd_normal_distrib(), m_rng->std_normal_distrib()); + mat(i, j) = complex128_t(dist(prng), dist(prng)); mat(j, i)=mat(i, j); } } @@ -478,15 +484,15 @@ TEST(SGMatrixTest,is_symmetric_complex128_false_old_minus_eps) { const index_t size=2; SGMatrix mat(size, size); - auto m_rng = std::unique_ptr(new CRandom(100)); + auto prng = get_prng(); + std::normal_distribution dist(0, 1); // create a symmetric matrix for (index_t i=0; istd_normal_distrib(), m_rng->std_normal_distrib()); + mat(i, j) = complex128_t(dist(prng), dist(prng)); mat(j, i)=mat(i, j); } } @@ -524,13 +530,13 @@ TEST(SGMatrixTest,is_symmetric_complex128_true) { const index_t size=2; SGMatrix mat(size, size); - auto m_rng = std::unique_ptr(new CRandom(100)); + auto prng = get_prng(); + std::normal_distribution dist(0, 1); for (index_t i=0; istd_normal_distrib(), m_rng->std_normal_distrib()); + mat(i, j) = complex128_t(dist(prng), dist(prng)); mat(j, i)=mat(i, j); } } @@ -574,22 +580,23 @@ TEST(SGMatrixTest, equals) EXPECT_TRUE(mat.equals(mat)); EXPECT_TRUE(mat.equals(copy)); - auto m_rng = std::unique_ptr(new CRandom(100)); + auto prng = get_prng(); + std::normal_distribution dist(0, 1); mat=SGMatrix(size, size); for (int64_t i=0; irandn_float(); + mat.matrix[i] = dist(prng); EXPECT_TRUE(mat.equals(mat)); EXPECT_FALSE(mat.equals(copy)); copy=SGMatrix(size, size); EXPECT_FALSE(mat.equals(copy)); - m_rng->set_seed(100); + auto prng_copy = get_prng(); for (int64_t i=0; irandn_float(); + copy.matrix[i] = dist(prng_copy); EXPECT_TRUE(mat.equals(copy)); } @@ -598,9 +605,10 @@ TEST(SGMatrixTest, clone) { const index_t size=10; SGMatrix mat(size, size); - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::normal_distribution dist(0, 1); for (int64_t i=0; irandn_float(); + mat.matrix[i] = dist(prng); SGMatrix copy=mat.clone(); @@ -622,8 +630,9 @@ TEST(SGMatrixTest, set_const) { const index_t size=10; SGMatrix mat(size, size); - auto m_rng = std::unique_ptr(new CRandom()); - const auto value = m_rng->std_normal_distrib(); + auto prng = get_prng(); + std::normal_distribution dist(0, 1); + const auto value = dist(prng); mat.set_const(value); for (int64_t i=0; i mat(size, size); - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::normal_distribution dist(0, 1); for (int64_t i=0; irandn_float(); + mat.matrix[i] = dist(prng); auto max=mat.max_single(); for (int64_t i=0; i mat(n_rows, n_cols); - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::normal_distribution dist(0, 1); for (index_t i = 0; i < n_rows * n_cols; ++i) - mat[i] = m_rng->std_normal_distrib(); + mat[i] = dist(prng); auto vec = mat.get_column_vector(col); @@ -685,9 +696,10 @@ TEST(SGMatrixTest, set_column) SGMatrix mat(n_rows, n_cols); SGVector vec(n_rows); - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::normal_distribution dist(0, 1); for (index_t i = 0; i < n_rows; ++i) - vec[i] = m_rng->std_normal_distrib(); + vec[i] = dist(prng); mat.set_column(col, vec); diff --git a/tests/unit/lib/SGSparseMatrix_unittest.cc b/tests/unit/lib/SGSparseMatrix_unittest.cc index 6f62d40d710..ffc5d5bc9f3 100644 --- a/tests/unit/lib/SGSparseMatrix_unittest.cc +++ b/tests/unit/lib/SGSparseMatrix_unittest.cc @@ -11,13 +11,13 @@ #include #include +#include #include #include #include #include #include #include -#include using namespace shogun; @@ -28,11 +28,12 @@ using namespace Eigen; template void GenerateMatrix(float64_t sparseLevel, int32_t m, int32_t n, int32_t randSeed, MatrixType* matrix) { - CRandom randGenerator(randSeed); + auto prng = get_prng(); + std::uniform_real_distribution dist(0.0, 1.0); for (index_t i=0; i(new CRandom(100)); + auto prng = get_prng(); + std::normal_distribution dist(0, 1); SGMatrix mat(n_rows, n_cols); for (index_t i = 0; i < mat.size(); ++i) - mat[i] = m_rng->std_normal_distrib(); + mat[i] = dist(prng); auto vec = SGVector(mat); diff --git a/tests/unit/machine/StochasticGBMachine_unittest.cc b/tests/unit/machine/StochasticGBMachine_unittest.cc index 86c36e4b388..8df4f5375fd 100644 --- a/tests/unit/machine/StochasticGBMachine_unittest.cc +++ b/tests/unit/machine/StochasticGBMachine_unittest.cc @@ -101,16 +101,16 @@ TEST(StochasticGBMachine,sinusoid_curve_fitting) SGVector ret=ret_labels->get_labels(); float64_t epsilon=1e-8; - EXPECT_NEAR(ret[0],-0.943157980,epsilon); - EXPECT_NEAR(ret[1],0.769725470,epsilon); - EXPECT_NEAR(ret[2],-0.065691733,epsilon); - EXPECT_NEAR(ret[3],0.251266829,epsilon); - EXPECT_NEAR(ret[4],-0.577155330,epsilon); - EXPECT_NEAR(ret[5],0.113875818,epsilon); - EXPECT_NEAR(ret[6],0.427405429,epsilon); - EXPECT_NEAR(ret[7],-0.098310066,epsilon); - EXPECT_NEAR(ret[8],-0.416565932,epsilon); - EXPECT_NEAR(ret[9],0.542023083,epsilon); + EXPECT_NEAR(ret[0], -0.91580992928965543, epsilon); + EXPECT_NEAR(ret[1], 0.83302568373135366, epsilon); + EXPECT_NEAR(ret[2], 0.42519621523857321, epsilon); + EXPECT_NEAR(ret[3], -0.54396234032218127, epsilon); + EXPECT_NEAR(ret[4], -0.54396234032218127, epsilon); + EXPECT_NEAR(ret[5], 0.64891735887560409, epsilon); + EXPECT_NEAR(ret[6], 0.8330256837313536, epsilon); + EXPECT_NEAR(ret[7], -0.76318443378750656, epsilon); + EXPECT_NEAR(ret[8], -0.52743316035159316, epsilon); + EXPECT_NEAR(ret[9], 0.13643452136869369, epsilon); SG_UNREF(train_feats); SG_UNREF(test_feats); diff --git a/tests/unit/machine/kerneldensity_unittest.cc b/tests/unit/machine/kerneldensity_unittest.cc index a7fdbbb9f30..c5bae890969 100644 --- a/tests/unit/machine/kerneldensity_unittest.cc +++ b/tests/unit/machine/kerneldensity_unittest.cc @@ -169,14 +169,23 @@ TEST(KernelDensity,dual_tree) TEST(KernelDensity,dual_tree_single_tree_equivalence) { - auto m_rng = std::unique_ptr(new CRandom(1)); + auto prng = get_prng(); + std::uniform_real_distribution dist(0.0, 1.0); SGMatrix data(5,100); - m_rng->fill_array_oo(data.matrix, 500); + for (index_t i = 0; i < 5; ++i) + for (index_t j = 0; j < 5; ++j) + { + data[i, j] = dist(prng); + } CDenseFeatures* feats=new CDenseFeatures(data); SGMatrix test(5,20); - m_rng->fill_array_oo(test.matrix, 100); + for (index_t i = 0; i < 5; ++i) + for (index_t j = 0; j < 20; ++j) + { + data[i, j] = dist(prng); + } CDenseFeatures* testfeats=new CDenseFeatures(test); diff --git a/tests/unit/mathematics/Math_unittest.cc b/tests/unit/mathematics/Math_unittest.cc index 95c85254844..400fdd854c7 100644 --- a/tests/unit/mathematics/Math_unittest.cc +++ b/tests/unit/mathematics/Math_unittest.cc @@ -388,25 +388,26 @@ TEST(CMath, permute) { SGVector v(4); v.range_fill(0); - auto random = std::unique_ptr(new CRandom(2)); - CMath::permute(v, random.get()); - EXPECT_EQ(v[0], 2); - EXPECT_EQ(v[1], 1); + set_global_seed(2); + CMath::permute(v); + EXPECT_EQ(v[0], 0); + EXPECT_EQ(v[1], 2); EXPECT_EQ(v[2], 3); - EXPECT_EQ(v[3], 0); + EXPECT_EQ(v[3], 1); } TEST(CMath, permute_with_random) { SGVector v(4); v.range_fill(0); - auto random = std::unique_ptr(new CRandom(2)); - CMath::permute(v, random.get()); + set_global_seed(2); + auto prng = get_prng(); + CMath::permute(v, prng); - EXPECT_EQ(v[0], 2); - EXPECT_EQ(v[1], 1); + EXPECT_EQ(v[0], 0); + EXPECT_EQ(v[1], 2); EXPECT_EQ(v[2], 3); - EXPECT_EQ(v[3], 0); + EXPECT_EQ(v[3], 1); } TEST(CMath,misc) diff --git a/tests/unit/mathematics/Random_unittest.cc b/tests/unit/mathematics/Random_unittest.cc deleted file mode 100644 index c69b638d6f9..00000000000 --- a/tests/unit/mathematics/Random_unittest.cc +++ /dev/null @@ -1,358 +0,0 @@ -#include -#include -#include -#include -#include -#include -#include -#include - -using namespace shogun; - -const uint32_t n_runs=1200000; -const uint32_t array_len=23; - -/** - * NOTE: these unit tests were generated with MEXP=19937 - * with other exponents it is expected to fail! - */ - -TEST(Random, uint32_t) -{ - CRandom* prng = new CRandom(12345); - uint32_t r = prng->random_32(); - SG_FREE(prng); - EXPECT_EQ(1811630862U, r); -} - -TEST(Random, uint64_t) -{ - CRandom* prng = new CRandom(12345); - uint64_t r = prng->random_64(); - SG_FREE(prng); - EXPECT_EQ(18328733385137801998U, r); -} - -TEST(Random, fill_array_uint32) -{ - CRandom* prng = new CRandom(12345); - uint32_t t = 2228230814U; - SGVector rv(2*SFMT_N32+1); - prng->fill_array(rv.vector, rv.vlen); - SG_FREE(prng); - - EXPECT_EQ(t, rv[SFMT_N32]); -} - -#ifdef HAVE_SSE2 -TEST(Random, fill_array_uint32_simd) -{ - CRandom* prng = new CRandom(12345); - uint32_t t = 2228230814U; - SGVector rv(2*SFMT_N32); - prng->fill_array(rv.vector, rv.vlen); - SG_FREE(prng); - - EXPECT_EQ(t, rv[SFMT_N32]); -} -#endif - -TEST(Random, fill_array_uint64) -{ - CRandom* prng = new CRandom(12345); - uint64_t t = 9564086722318310046U; - SGVector rv(2*SFMT_N64+1); - prng->fill_array(rv.vector, rv.vlen); - SG_FREE(prng); - - EXPECT_EQ(t, rv[SFMT_N64]); -} - -#ifdef HAVE_SSE2 -TEST(Random, fill_array_uint64_simd) -{ - CRandom* prng = new CRandom(12345); - uint64_t t = 9564086722318310046U; - SGVector rv(2*SFMT_N64); - prng->fill_array(rv.vector, rv.vlen); - SG_FREE(prng); - - EXPECT_EQ(t, rv[SFMT_N64]); -} -#endif - -TEST(Random, fill_array_oc) -{ - CRandom* prng = new CRandom(12345); - float64_t t = 0.25551924513287405; - SGVector rv(2*dsfmt_get_min_array_size()+1); - prng->fill_array_oc(rv.vector, rv.vlen); - SG_FREE(prng); - - EXPECT_DOUBLE_EQ(t, rv[dsfmt_get_min_array_size()]); -} - -#ifdef HAVE_SSE2 -TEST(Random, fill_array_oc_simd) -{ - CRandom* prng = new CRandom(12345); - float64_t t = 0.25551924513287405; - SGVector rv(2*dsfmt_get_min_array_size()); - prng->fill_array_oc(rv.vector, rv.vlen); - SG_FREE(prng); - - EXPECT_DOUBLE_EQ(t, rv[dsfmt_get_min_array_size()]); -} -#endif - -TEST(Random, normal_distrib) -{ - CRandom* prng = new CRandom(12345); - float64_t t = 75.567130769021162; - float64_t r = prng->normal_distrib(100.0, 10.0); - SG_FREE(prng); - - EXPECT_DOUBLE_EQ(t, r); -} - -TEST(Random, random_uint64_1_2) -{ - auto m_rng = std::unique_ptr(new CRandom(17)); - for (int32_t i=0; i<10000; i++) - { - uint64_t r = m_rng->random((uint64_t)1, (uint64_t)2); - EXPECT_TRUE(r == 1 || r == 2); - } -} - -TEST(Random, random_uint64_0_10) -{ - CRandom* prng = new CRandom(17); - int rnds[10] = {0,0,0,0,0,0}; - for (int32_t i=0; i<10000; i++) - { - uint64_t r = prng->random((uint64_t)0, (uint64_t)9); - rnds[r]++; - } - - for (int32_t i=0; i<10; i++) { - EXPECT_TRUE(rnds[i]>0); - } - SG_FREE(prng); -} - -TEST(Random, random_int64_1_2) -{ - CRandom* prng = new CRandom(17); - for (int32_t i=0; i<10000; i++) - { - int64_t r = prng->random((int64_t)1, (int64_t)2); - EXPECT_TRUE(r == 1 || r == 2); - } -} - -TEST(Random, random_int64_0_10) -{ - CRandom* prng = new CRandom(17); - int rnds[10] = {0,0,0,0,0,0}; - for (int32_t i=0; i<10000; i++) - { - int64_t r = prng->random((int64_t)0, (int64_t)9); - rnds[r]++; - } - - for (int32_t i=0; i<10; i++) { - EXPECT_TRUE(rnds[i]>0); - } - SG_FREE(prng); -} - -TEST(Random, random_uint32_1_2) -{ - CRandom* prng = new CRandom(17); - for (int32_t i=0; i<10000; i++) - { - uint32_t r = prng->random((uint32_t)1, (uint32_t)2); - EXPECT_TRUE(r == 1 || r == 2); - } - SG_FREE(prng); -} - -TEST(Random, random_uint32_0_10) -{ - CRandom* prng = new CRandom(17); - int rnds[10] = {0,0,0,0,0,0}; - for (int32_t i=0; i<10000; i++) - { - uint32_t r = prng->random((uint32_t)0, (uint32_t)9); - rnds[r]++; - } - - for (int32_t i=0; i<10; i++) { - EXPECT_TRUE(rnds[i]>0); - } - SG_FREE(prng); -} - -TEST(Random, random_int32_1_2) -{ - CRandom* prng = new CRandom(17); - for (int32_t i=0; i<10000; i++) - { - int32_t r = prng->random((int32_t)1, (int32_t)2); - EXPECT_TRUE(r == 1 || r == 2); - } - SG_FREE(prng); -} - -TEST(Random, random_int64_range) -{ - CRandom* prng = new CRandom(17); - int rnds[array_len]; - for (uint32_t i=0; irandom((int64_t)0, (int64_t)array_len - 1); - rnds[r]++; - } - - for (uint32_t i=0; irandom((uint64_t)0, (uint64_t)array_len - 1); - rnds[r]++; - } - - for (uint32_t i=0; irandom((int32_t)0, (int32_t)array_len - 1); - rnds[r]++; - } - - for (uint32_t i=0; irandom((uint32_t)0, (uint32_t)array_len - 1); - rnds[r]++; - } - - for (uint32_t i=0; iset_seed(17); - int rnds[array_len]; - for (uint32_t i=0; irandom_32() % array_len; - rnds[r]++; - } - - for (uint32_t i=0; i(new CRandom(17)); - for (uint32_t i=0; irandom((float64_t)0, (float64_t)array_len); - rnds[r]++; - } - - for (uint32_t i=0; irandom((float64_t)0, (float64_t)1.0); - min=CMath::min(min, r); - max=CMath::max(max, r); - } - EXPECT_GE(max, 0.99999); - EXPECT_LE(min, 0.00001); - SG_FREE(prng); -} - -TEST(Random, random_std_normal_quantiles) -{ - CRandom* prng = new CRandom(); - - int64_t m=10000000; - SGVector counts(10); - counts.zero(); - - for (int64_t i=0; istd_normal_distrib(), 1); - index_t idx=(int32_t)(quantile*counts.vlen); - counts[idx]++; - } - - SG_FREE(prng); - - for (index_t i=0; i C(C_dims, 3); - auto m_rng = std::unique_ptr(new CRandom(17)); + auto prng = get_prng(); + std::uniform_int_distribution dist(1, 5); for (int i = 0; i < C_dims[2]; i++) { @@ -34,7 +35,7 @@ TEST(CFFDiag, diagonalize) tmp.setIdentity(); for (int j = 0; j < C_dims[0]; j++) - tmp(j, j) *= CMath::abs(m_rng->random(1, 5)); + tmp(j, j) *= CMath::abs(dist(prng)); } // Mixing and demixing matrices diff --git a/tests/unit/mathematics/ajd/JADiagOrth_unittest.cc b/tests/unit/mathematics/ajd/JADiagOrth_unittest.cc index 76a43c30942..1f0a04290fb 100644 --- a/tests/unit/mathematics/ajd/JADiagOrth_unittest.cc +++ b/tests/unit/mathematics/ajd/JADiagOrth_unittest.cc @@ -26,7 +26,8 @@ TEST(CJADiagOrth, diagonalize) C_dims[1] = 10; C_dims[2] = 30; SGNDArray< float64_t > C(C_dims, 3); - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::uniform_int_distribution dist(1, 5); for (int i = 0; i < C_dims[2]; i++) { @@ -34,7 +35,7 @@ TEST(CJADiagOrth, diagonalize) tmp.setIdentity(); for (int j = 0; j < C_dims[0]; j++) - tmp(j, j) *= CMath::abs(m_rng->random(1, 5)); + tmp(j, j) *= CMath::abs(dist(prng)); } // Building a random orthonormal matrix A diff --git a/tests/unit/mathematics/ajd/JADiag_unittest.cc b/tests/unit/mathematics/ajd/JADiag_unittest.cc index 4189d9ff7bb..80463795522 100644 --- a/tests/unit/mathematics/ajd/JADiag_unittest.cc +++ b/tests/unit/mathematics/ajd/JADiag_unittest.cc @@ -25,8 +25,8 @@ TEST(CJADiag, diagonalize) C_dims[1] = 10; C_dims[2] = 30; SGNDArray< float64_t > C(C_dims, 3); - - auto m_rng = std::unique_ptr(new CRandom(17)); + auto prng = get_prng(); + std::uniform_int_distribution dist(1, 5); for (int i = 0; i < C_dims[2]; i++) { @@ -34,7 +34,7 @@ TEST(CJADiag, diagonalize) tmp.setIdentity(); for (int j = 0; j < C_dims[0]; j++) - tmp(j, j) *= CMath::abs(m_rng->random(1, 5)); + tmp(j, j) *= CMath::abs(dist(prng)); } // Mixing and demixing matrices diff --git a/tests/unit/mathematics/ajd/JediDiag_unittest.cc b/tests/unit/mathematics/ajd/JediDiag_unittest.cc index a04557e9a86..4562a38badc 100644 --- a/tests/unit/mathematics/ajd/JediDiag_unittest.cc +++ b/tests/unit/mathematics/ajd/JediDiag_unittest.cc @@ -26,7 +26,8 @@ TEST(CJediDiag, diagonalize) C_dims[2] = 30; SGNDArray< float64_t > C(C_dims, 3); - auto m_rng = std::unique_ptr(new CRandom(17)); + auto prng = get_prng(); + std::uniform_int_distribution dist(1, 5); for (int i = 0; i < C_dims[2]; i++) { @@ -34,7 +35,7 @@ TEST(CJediDiag, diagonalize) tmp.setIdentity(); for (int j = 0; j < C_dims[0]; j++) - tmp(j, j) *= CMath::abs(m_rng->random(1, 5)); + tmp(j, j) *= CMath::abs(dist(prng)); } // Mixing and demixing matrices diff --git a/tests/unit/mathematics/ajd/QDiag_unittest.cc b/tests/unit/mathematics/ajd/QDiag_unittest.cc index 0cd3aef57a3..3bc3cb91ddc 100644 --- a/tests/unit/mathematics/ajd/QDiag_unittest.cc +++ b/tests/unit/mathematics/ajd/QDiag_unittest.cc @@ -26,7 +26,8 @@ TEST(CQDiag, diagonalize) C_dims[2] = 30; SGNDArray< float64_t > C(C_dims, 3); - auto m_rng = std::unique_ptr(new CRandom(17)); + auto prng = get_prng(); + std::uniform_int_distribution dist(1, 5); for (int i = 0; i < C_dims[2]; i++) { @@ -34,7 +35,7 @@ TEST(CQDiag, diagonalize) tmp.setIdentity(); for (int j = 0; j < C_dims[0]; j++) - tmp(j, j) *= CMath::abs(m_rng->random(1, 5)); + tmp(j, j) *= CMath::abs(dist(prng)); } // Mixing and demixing matrices diff --git a/tests/unit/mathematics/ajd/UWedge_unittest.cc b/tests/unit/mathematics/ajd/UWedge_unittest.cc index 8a11a2ed209..e6e52c961a8 100644 --- a/tests/unit/mathematics/ajd/UWedge_unittest.cc +++ b/tests/unit/mathematics/ajd/UWedge_unittest.cc @@ -26,7 +26,8 @@ TEST(CUWedge, diagonalize) C_dims[2] = 30; SGNDArray< float64_t > C(C_dims, 3); - auto m_rng = std::unique_ptr(new CRandom(17)); + auto prng = get_prng(); + std::uniform_int_distribution dist(1, 5); for (int i = 0; i < C_dims[2]; i++) { @@ -35,7 +36,7 @@ TEST(CUWedge, diagonalize) for (int j = 0; j < C_dims[0]; j++) { - tmp(j, j) *= CMath::abs(m_rng->random(1, 5)); + tmp(j, j) *= CMath::abs(dist(prng)); } } diff --git a/tests/unit/mathematics/linalg/ConjugateOrthogonalCGSolver_unittest.cc b/tests/unit/mathematics/linalg/ConjugateOrthogonalCGSolver_unittest.cc index 730352d5093..23ec8f57c75 100644 --- a/tests/unit/mathematics/linalg/ConjugateOrthogonalCGSolver_unittest.cc +++ b/tests/unit/mathematics/linalg/ConjugateOrthogonalCGSolver_unittest.cc @@ -28,11 +28,12 @@ TEST(ConjugateOrthogonalCGSolver, solve) // diagonal non-Hermintian matrix with random complex entries SGVector diag(size); - auto m_rng = std::unique_ptr(new CRandom(100)); + auto prng = get_prng(); + std::normal_distribution dist(0, 1); for (index_t i=0; istd_normal_distrib(); - float64_t imag = m_rng->std_normal_distrib(); + float64_t real = dist(prng); + float64_t imag = dist(prng); diag[i]=complex128_t(real, imag); } A->set_diagonal(diag); @@ -40,7 +41,7 @@ TEST(ConjugateOrthogonalCGSolver, solve) // vector b of the system SGVector b(size); for (index_t i=0; istd_normal_distrib(); + b[i] = dist(prng); // Solve with COCG CConjugateOrthogonalCGSolver* cocg_linear_solver diff --git a/tests/unit/mathematics/linalg/DirectSparseLinearSolver_unittest.cc b/tests/unit/mathematics/linalg/DirectSparseLinearSolver_unittest.cc index 13ddeb25d66..2acc0a6c33b 100644 --- a/tests/unit/mathematics/linalg/DirectSparseLinearSolver_unittest.cc +++ b/tests/unit/mathematics/linalg/DirectSparseLinearSolver_unittest.cc @@ -14,7 +14,6 @@ #include #include #include -#include #include #include @@ -28,11 +27,10 @@ TEST(DirectSparseLinearSolver, solve) CSparseMatrixOperator* A=new CSparseMatrixOperator(sm); SGVector diag(size); float64_t difficulty=5; - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::normal_distribution dist(0, 1); for (index_t i=0; istd_normal_distrib()), difficulty) + - 0.0001; + diag[i] = CMath::pow(CMath::abs(dist(prng)), difficulty) + 0.0001; A->set_diagonal(diag); CDirectSparseLinearSolver* linear_solver=new CDirectSparseLinearSolver(); diff --git a/tests/unit/mathematics/linalg/LanczosEigenSolver_unittest.cc b/tests/unit/mathematics/linalg/LanczosEigenSolver_unittest.cc index 9a0ad176486..f87b8724362 100644 --- a/tests/unit/mathematics/linalg/LanczosEigenSolver_unittest.cc +++ b/tests/unit/mathematics/linalg/LanczosEigenSolver_unittest.cc @@ -29,12 +29,14 @@ TEST(LanczosEigenSolver, compute) { const int32_t size=4; SGMatrix m(size, size); - auto m_rng = std::unique_ptr(new CRandom()); - m.set_const(m_rng->random(50.0, 100.0)); + auto prng = get_prng(); + std::uniform_real_distribution dist(50.0, 100.0); + std::uniform_real_distribution dist_t(100.0, 10000.0); + m.set_const(dist(prng)); // Hermintian matrix for (index_t i=0; irandom(100.0, 10000.0); + m(i, i) = dist_t(prng); // Creating sparse linear operator to use with Lanczos CSparseFeatures feat(m); @@ -81,15 +83,15 @@ TEST(LanczosEigenSolver, compute_big_diag_matrix) SGSparseMatrix sm(size, size); CSparseMatrixOperator* op=new CSparseMatrixOperator(sm); SG_REF(op); - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::normal_distribution dist(0, 1); // set its diagonal SGVector diag(size); for (index_t i=0; istd_normal_distrib()), difficulty) + - min_eigenvalue; + CMath::pow(CMath::abs(dist(prng)), difficulty) + min_eigenvalue; } op->set_diagonal(diag); diff --git a/tests/unit/mathematics/linalg/LogDetEstimator_unittest.cc b/tests/unit/mathematics/linalg/LogDetEstimator_unittest.cc index 2fb90523a00..f15d655015d 100644 --- a/tests/unit/mathematics/linalg/LogDetEstimator_unittest.cc +++ b/tests/unit/mathematics/linalg/LogDetEstimator_unittest.cc @@ -12,7 +12,6 @@ #include #include -#include #include #include #include @@ -40,7 +39,6 @@ TEST(LogDetEstimator, sample) { CSerialComputationEngine* e=new CSerialComputationEngine; SG_REF(e); - const index_t size=2; SGMatrix mat(size, size); mat(0,0)=2.0; @@ -166,16 +164,18 @@ TEST(LogDetEstimator, sample_ratapp_dense) #ifdef HAVE_LAPACK TEST(LogDetEstimator, sample_ratapp_probing_sampler) { + set_global_seed(1); CSerialComputationEngine* e=new CSerialComputationEngine; SG_REF(e); const index_t size=16; SGMatrix mat(size, size); - auto m_rng = std::unique_ptr(new CRandom(1)); + auto prng = get_prng(); + std::normal_distribution dist(0, 1); mat.set_const(0.0); for (index_t i=0; istd_normal_distrib()) * 1000; + float64_t value = CMath::abs(dist(prng)) * 1000; mat(i,i)=value<1.0?10.0:value; } @@ -254,16 +254,18 @@ TEST(LogDetEstimator, sample_ratapp_probing_sampler) TEST(LogDetEstimator, sample_ratapp_probing_sampler_cgm) { + set_global_seed(1); CSerialComputationEngine* e=new CSerialComputationEngine; SG_REF(e); const index_t size=16; SGMatrix mat(size, size); - auto m_rng = std::unique_ptr(new CRandom(1)); + auto prng = get_prng(); + std::normal_distribution dist(0, 1); mat.set_const(0.0); for (index_t i=0; istd_normal_distrib()) * 1000; + float64_t value = CMath::abs(dist(prng)) * 1000; mat(i,i)=value<1.0?10.0:value; } @@ -338,6 +340,7 @@ TEST(LogDetEstimator, sample_ratapp_probing_sampler_cgm) TEST(LogDetEstimator, sample_ratapp_big_diag_matrix) { + set_global_seed(1); CSerialComputationEngine* e=new CSerialComputationEngine; SG_REF(e); @@ -351,14 +354,14 @@ TEST(LogDetEstimator, sample_ratapp_big_diag_matrix) CSparseMatrixOperator* op=new CSparseMatrixOperator(sm); SG_REF(op); - auto m_rng = std::unique_ptr(new CRandom(1)); + auto prng = get_prng(); + std::normal_distribution dist(0, 1); // set its diagonal SGVector diag(size); for (index_t i=0; istd_normal_distrib()), difficulty) + - min_eigenvalue; + CMath::pow(CMath::abs(dist(prng)), difficulty) + min_eigenvalue; } op->set_diagonal(diag); @@ -399,6 +402,7 @@ TEST(LogDetEstimator, sample_ratapp_big_diag_matrix) TEST(LogDetEstimator, sample_ratapp_big_matrix) { + set_global_seed(1); CSerialComputationEngine* e=new CSerialComputationEngine; SG_REF(e); @@ -412,12 +416,12 @@ TEST(LogDetEstimator, sample_ratapp_big_matrix) // set its diagonal SGVector diag(size); - auto m_rng = std::unique_ptr(new CRandom(1)); + auto prng = get_prng(); + std::normal_distribution dist(0, 1); for (index_t i=0; istd_normal_distrib()), difficulty) + - min_eigenvalue; + CMath::pow(CMath::abs(dist(prng)), difficulty) + min_eigenvalue; } // set its subdiagonal float64_t entry=min_eigenvalue/2; diff --git a/tests/unit/mathematics/linalg/NormalSampler_unittest.cc b/tests/unit/mathematics/linalg/NormalSampler_unittest.cc index 9519607a185..6e7c1e3edfa 100644 --- a/tests/unit/mathematics/linalg/NormalSampler_unittest.cc +++ b/tests/unit/mathematics/linalg/NormalSampler_unittest.cc @@ -20,6 +20,7 @@ using namespace Eigen; TEST(NormalSampler, sample) { + set_global_seed(1); const index_t dimension=2; const index_t num_samples=5000; SGMatrix samples(num_samples, dimension); diff --git a/tests/unit/mathematics/linalg/ProbingSampler_unittest.cc b/tests/unit/mathematics/linalg/ProbingSampler_unittest.cc index ad15e445b30..1090db18cfa 100644 --- a/tests/unit/mathematics/linalg/ProbingSampler_unittest.cc +++ b/tests/unit/mathematics/linalg/ProbingSampler_unittest.cc @@ -84,19 +84,20 @@ TEST(ProbingSampler, probing_samples_big_diag_matrix) float64_t difficulty=3; float64_t min_eigenvalue=0.0001; + set_global_seed(1); // create a sparse matrix const index_t size=10000; SGSparseMatrix sm(size, size); CSparseMatrixOperator* op=new CSparseMatrixOperator(sm); SG_REF(op); - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::normal_distribution dist(0, 1); // set its diagonal SGVector diag(size); for (index_t i=0; istd_normal_distrib()), difficulty) + - min_eigenvalue; + CMath::pow(CMath::abs(dist(prng)), difficulty) + min_eigenvalue; } op->set_diagonal(diag); diff --git a/tests/unit/multiclass/BaggingMachine_unittest.cc b/tests/unit/multiclass/BaggingMachine_unittest.cc index 696d9be0a88..1a186bca877 100644 --- a/tests/unit/multiclass/BaggingMachine_unittest.cc +++ b/tests/unit/multiclass/BaggingMachine_unittest.cc @@ -144,10 +144,10 @@ TEST_F(BaggingMachine, classify_CART) EXPECT_EQ(0.0,res_vector[1]); EXPECT_EQ(0.0,res_vector[2]); EXPECT_EQ(1.0,res_vector[3]); - EXPECT_EQ(1.0,res_vector[4]); + EXPECT_EQ(0.0, res_vector[4]); auto eval = some(); - EXPECT_NEAR(0.642857,c->get_oob_error(eval),1e-6); + EXPECT_NEAR(0.5714285, c->get_oob_error(eval), 1e-6); SG_UNREF(result); } diff --git a/tests/unit/multiclass/LaRank_unittest.cc b/tests/unit/multiclass/LaRank_unittest.cc index dd27bb8e9f9..79f267b0dba 100644 --- a/tests/unit/multiclass/LaRank_unittest.cc +++ b/tests/unit/multiclass/LaRank_unittest.cc @@ -18,14 +18,15 @@ TEST(LaRank,train) SGMatrix matrix_test(num_class, num_vec); CMulticlassLabels* labels=new CMulticlassLabels(num_vec); CMulticlassLabels* labels_test=new CMulticlassLabels(num_vec); - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::normal_distribution dist(0, 1); for (index_t i=0; istd_normal_distrib(); - matrix_test(j, i) = m_rng->std_normal_distrib(); + matrix(j, i) = dist(prng); + matrix_test(j, i) = dist(prng); labels->set_label(i, label); labels_test->set_label(i, label); } diff --git a/tests/unit/multiclass/MulticlassLibLinear_unittest.cc b/tests/unit/multiclass/MulticlassLibLinear_unittest.cc index ee73eb5966a..54d33d51508 100644 --- a/tests/unit/multiclass/MulticlassLibLinear_unittest.cc +++ b/tests/unit/multiclass/MulticlassLibLinear_unittest.cc @@ -16,14 +16,15 @@ TEST(MulticlassLibLinearTest,train_and_apply) SGMatrix matrix_test(num_class, num_vec); CMulticlassLabels* labels=new CMulticlassLabels(num_vec); CMulticlassLabels* labels_test=new CMulticlassLabels(num_vec); - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::normal_distribution dist(0, 1); for (index_t i=0; istd_normal_distrib(); - matrix_test(j, i) = m_rng->std_normal_distrib(); + matrix(j, i) = dist(prng); + matrix_test(j, i) = dist(prng); labels->set_label(i, label); labels_test->set_label(i, label); } diff --git a/tests/unit/multiclass/tree/RandomCARTree_unittest.cc b/tests/unit/multiclass/tree/RandomCARTree_unittest.cc index 34ff9732923..2099c77ebc6 100644 --- a/tests/unit/multiclass/tree/RandomCARTree_unittest.cc +++ b/tests/unit/multiclass/tree/RandomCARTree_unittest.cc @@ -191,7 +191,7 @@ TEST(RandomCARTree, classify_nominal) EXPECT_EQ(0.0,res_vector[1]); EXPECT_EQ(0.0,res_vector[2]); EXPECT_EQ(1.0,res_vector[3]); - EXPECT_EQ(0.0,res_vector[4]); + EXPECT_EQ(1.0, res_vector[4]); SG_UNREF(test_feats); SG_UNREF(result); diff --git a/tests/unit/multiclass/tree/RandomForest_unittest.cc b/tests/unit/multiclass/tree/RandomForest_unittest.cc index 7cdab87e262..5b7c16c2522 100644 --- a/tests/unit/multiclass/tree/RandomForest_unittest.cc +++ b/tests/unit/multiclass/tree/RandomForest_unittest.cc @@ -112,7 +112,7 @@ TEST_F(RandomForest, classify_nominal_test) EXPECT_EQ(0.0, res_vector[4]); CMulticlassAccuracy* eval=new CMulticlassAccuracy(); - EXPECT_NEAR(0.571428, c->get_oob_error(eval), 1e-6); + EXPECT_NEAR(0.78571428, c->get_oob_error(eval), 1e-6); SG_UNREF(result); SG_UNREF(c); @@ -146,7 +146,7 @@ TEST_F(RandomForest, classify_non_nominal_test) EXPECT_EQ(0.0, res_vector[4]); CMulticlassAccuracy* eval=new CMulticlassAccuracy(); - EXPECT_NEAR(0.571428, c->get_oob_error(eval), 1e-6); + EXPECT_NEAR(0.78571428, c->get_oob_error(eval), 1e-6); SG_UNREF(result); SG_UNREF(c); @@ -205,9 +205,12 @@ TEST_F(RandomForest, score_consistent_with_binary_trivial_data) SGMatrix data_B(1, num_train, false); + auto prng = get_prng(); + std::uniform_int_distribution dist_05(0, 5); + std::uniform_int_distribution dist_510(5, 10); for (auto i = 0; i < num_train; ++i) { - data_B(0, i) = i < 5 ? CMath::random(0, 5) : CMath::random(5, 10); + data_B(0, i) = i < 5 ? dist_05(prng) : dist_510(prng); } CDenseFeatures* features_train = new CDenseFeatures(data_B); @@ -217,10 +220,11 @@ TEST_F(RandomForest, score_consistent_with_binary_trivial_data) CMulticlassLabels* labels_train = new CMulticlassLabels(lab); SGMatrix test_data(1, num_test, false); - + std::uniform_int_distribution dist_04(0, 4); + std::uniform_int_distribution dist_610(6, 10); for (auto i = 0; i < num_test; ++i) { - test_data(0, i) = i < 5 ? CMath::random(0, 4) : CMath::random(6, 10); + test_data(0, i) = i < 5 ? dist_04(prng) : dist_610(prng); } CDenseFeatures* features_test = diff --git a/tests/unit/neuralnets/Autoencoder_unittest.cc b/tests/unit/neuralnets/Autoencoder_unittest.cc index af6572edd81..265e102dc48 100644 --- a/tests/unit/neuralnets/Autoencoder_unittest.cc +++ b/tests/unit/neuralnets/Autoencoder_unittest.cc @@ -44,7 +44,8 @@ using namespace shogun; TEST(Autoencoder, train) { - auto m_rng = std::unique_ptr(new CRandom(100)); + auto prng = get_prng(); + std::uniform_real_distribution dist(-1.0, 1.0); int32_t num_features = 10; int32_t num_examples = 100; @@ -52,7 +53,7 @@ TEST(Autoencoder, train) SGMatrix data(num_features, num_examples); for (int32_t i=0; irandom(-1.0, 1.0); + data[i] = dist(prng); CAutoencoder ae(num_features, new CNeuralRectifiedLinearLayer(num_hid)); diff --git a/tests/unit/neuralnets/ConvolutionalFeatureMap_unittest.cc b/tests/unit/neuralnets/ConvolutionalFeatureMap_unittest.cc index b689a85c173..ef4cb83a7ed 100644 --- a/tests/unit/neuralnets/ConvolutionalFeatureMap_unittest.cc +++ b/tests/unit/neuralnets/ConvolutionalFeatureMap_unittest.cc @@ -307,11 +307,13 @@ TEST(ConvolutionalFeatureMap, compute_parameter_gradients) const int32_t map_index = 1; const int32_t num_maps = 3; - auto m_rng = std::unique_ptr(new CRandom(10)); + auto prng = get_prng(); + std::uniform_real_distribution dist_uniform(-10.0, 10.0); + std::normal_distribution dist_normal(0.0, 0.01); SGMatrix x1(w*h,b); for (int32_t i=0; irandom(-10.0, 10.0); + x1[i] = dist_uniform(prng); CNeuralInputLayer* input1 = new CNeuralInputLayer (x1.num_rows); input1->set_batch_size(x1.num_cols); @@ -319,7 +321,7 @@ TEST(ConvolutionalFeatureMap, compute_parameter_gradients) // two channels SGMatrix x2(2*w*h,b); for (int32_t i=0; irandom(-10.0, 10.0); + x2[i] = dist_uniform(prng); CNeuralInputLayer* input2 = new CNeuralInputLayer (x2.num_rows); input2->set_batch_size(x2.num_cols); @@ -335,7 +337,7 @@ TEST(ConvolutionalFeatureMap, compute_parameter_gradients) CConvolutionalFeatureMap map(w,h,rx,ry,1,1,map_index); SGVector params(1+(2*rx+1)*(2*ry+1)*3); for (int32_t i=0; inormal_random(0.0, 0.01); + params[i] = dist_normal(prng); input1->compute_activations(x1); input2->compute_activations(x2); @@ -399,11 +401,13 @@ TEST(ConvolutionalFeatureMap, compute_parameter_gradients_with_stride) int32_t w_out = w/stride_x; int32_t h_out = h/stride_y; - auto m_rng = std::unique_ptr(new CRandom(10)); + auto prng = get_prng(); + std::uniform_real_distribution dist_uniform(-10.0, 10.0); + std::normal_distribution dist_normal(0.0, 0.01); SGMatrix x1(w*h,b); for (int32_t i=0; irandom(-10.0, 10.0); + x1[i] = dist_uniform(prng); CNeuralInputLayer* input1 = new CNeuralInputLayer (x1.num_rows); input1->set_batch_size(x1.num_cols); @@ -411,7 +415,7 @@ TEST(ConvolutionalFeatureMap, compute_parameter_gradients_with_stride) // two channels SGMatrix x2(2*w*h,b); for (int32_t i=0; irandom(-10.0, 10.0); + x2[i] = dist_uniform(prng); CNeuralInputLayer* input2 = new CNeuralInputLayer (x2.num_rows); input2->set_batch_size(x2.num_cols); @@ -427,7 +431,7 @@ TEST(ConvolutionalFeatureMap, compute_parameter_gradients_with_stride) CConvolutionalFeatureMap map(w,h,rx,ry,stride_x,stride_y,map_index); SGVector params(1+(2*rx+1)*(2*ry+1)*3); for (int32_t i=0; inormal_random(0.0, 0.01); + params[i] = dist_normal(prng); input1->compute_activations(x1); input2->compute_activations(x2); @@ -484,11 +488,13 @@ TEST(ConvolutionalFeatureMap, compute_parameter_gradients_logistic) const int32_t ry = 1; const int32_t b = 2; - auto m_rng = std::unique_ptr(new CRandom(10)); + auto prng = get_prng(); + std::uniform_real_distribution dist_uniform(-10.0, 10.0); + std::normal_distribution dist_normal(0.0, 0.01); SGMatrix x1(w*h,b); for (int32_t i=0; irandom(-10.0, 10.0); + x1[i] = dist_uniform(prng); CNeuralInputLayer* input1 = new CNeuralInputLayer (x1.num_rows); input1->set_batch_size(x1.num_cols); @@ -502,7 +508,7 @@ TEST(ConvolutionalFeatureMap, compute_parameter_gradients_logistic) CConvolutionalFeatureMap map(w,h,rx,ry,1,1,0, CMAF_LOGISTIC); SGVector params(1+(2*rx+1)*(2*ry+1)); for (int32_t i=0; inormal_random(0.0, 0.01); + params[i] = dist_normal(prng); input1->compute_activations(x1); @@ -558,11 +564,13 @@ TEST(ConvolutionalFeatureMap, compute_parameter_gradients_rectified_linear) const int32_t ry = 1; const int32_t b = 2; - auto m_rng = std::unique_ptr(new CRandom(10)); + auto prng = get_prng(); + std::uniform_real_distribution dist_uniform(-10.0, 10.0); + std::normal_distribution dist_normal(0.0, 0.01); SGMatrix x1(w*h,b); for (int32_t i=0; irandom(-10.0, 10.0); + x1[i] = dist_uniform(prng); CNeuralInputLayer* input1 = new CNeuralInputLayer (x1.num_rows); input1->set_batch_size(x1.num_cols); @@ -576,7 +584,7 @@ TEST(ConvolutionalFeatureMap, compute_parameter_gradients_rectified_linear) CConvolutionalFeatureMap map(w,h,rx,ry,1,1,0, CMAF_RECTIFIED_LINEAR); SGVector params(1+(2*rx+1)*(2*ry+1)); for (int32_t i=0; inormal_random(0.0, 0.01); + params[i] = dist_normal(prng); input1->compute_activations(x1); @@ -634,7 +642,9 @@ TEST(ConvolutionalFeatureMap, compute_input_gradients) const int32_t map_index = 0; const int32_t num_maps = 1; - auto m_rng = std::unique_ptr(new CRandom(100)); + auto prng = get_prng(); + std::uniform_real_distribution dist_uniform(-10.0, 10.0); + std::normal_distribution dist_normal(0.0, 0.01); CNeuralLinearLayer* input1 = new CNeuralLinearLayer (w*h); input1->set_batch_size(b); @@ -644,10 +654,10 @@ TEST(ConvolutionalFeatureMap, compute_input_gradients) input2->set_batch_size(b); for (int32_t i=0; iget_num_neurons()*b; i++) - input1->get_activations()[i] = m_rng->random(-10.0, 10.0); + input1->get_activations()[i] = dist_uniform(prng); for (int32_t i=0; iget_num_neurons()*b; i++) - input2->get_activations()[i] = m_rng->random(-10.0, 10.0); + input2->get_activations()[i] = dist_uniform(prng); CDynamicObjectArray* layers = new CDynamicObjectArray(); layers->append_element(input1); @@ -660,7 +670,7 @@ TEST(ConvolutionalFeatureMap, compute_input_gradients) CConvolutionalFeatureMap map(w,h,rx,ry,1,1,map_index); SGVector params(1+(2*rx+1)*(2*ry+1)*3); for (int32_t i=0; inormal_random(0.0, 0.01); + params[i] = dist_normal(prng); SGMatrix A(num_maps*w*h,b); A.zero(); diff --git a/tests/unit/neuralnets/DeepAutoencoder_unittest.cc b/tests/unit/neuralnets/DeepAutoencoder_unittest.cc index abf15e1d2bf..2a30ac4b132 100644 --- a/tests/unit/neuralnets/DeepAutoencoder_unittest.cc +++ b/tests/unit/neuralnets/DeepAutoencoder_unittest.cc @@ -44,14 +44,15 @@ using namespace shogun; TEST(DeepAutoencoder, pre_train) { - auto m_rng = std::unique_ptr(new CRandom(10)); + auto prng = get_prng(); + std::uniform_real_distribution dist(-1.0, 1.0); int32_t num_features = 10; int32_t num_examples = 100; SGMatrix data(num_features, num_examples); for (int32_t i=0; irandom(-1.0, 1.0); + data[i] = dist(prng); CDynamicObjectArray* layers = new CDynamicObjectArray(); layers->append_element(new CNeuralInputLayer(num_features)); @@ -83,7 +84,8 @@ TEST(DeepAutoencoder, pre_train) TEST(DeepAutoencoder, convert_to_neural_network) { - auto m_rng = std::unique_ptr(new CRandom(100)); + auto prng = get_prng(); + std::uniform_real_distribution dist(0.0, 1.0); CDynamicObjectArray* layers = new CDynamicObjectArray(); layers->append_element(new CNeuralInputLayer(10)); @@ -98,7 +100,7 @@ TEST(DeepAutoencoder, convert_to_neural_network) SGMatrix x(10, 3); for (int32_t i=0; irandom(0.0, 1.0); + x[i] = dist(prng); CDenseFeatures f(x); diff --git a/tests/unit/neuralnets/DeepBeliefNetwork_unittest.cc b/tests/unit/neuralnets/DeepBeliefNetwork_unittest.cc index 63cb3ca80de..c3573ea6fd6 100644 --- a/tests/unit/neuralnets/DeepBeliefNetwork_unittest.cc +++ b/tests/unit/neuralnets/DeepBeliefNetwork_unittest.cc @@ -41,7 +41,9 @@ using namespace shogun; TEST(DeepBeliefNetwork, convert_to_neural_network) { - auto m_rng = std::unique_ptr(new CRandom(100)); + set_global_seed(100); + auto prng = get_prng(); + std::uniform_real_distribution dist(0.0, 1.0); CDeepBeliefNetwork dbn(5, RBMVUT_BINARY); dbn.add_hidden_layer(6); @@ -54,7 +56,7 @@ TEST(DeepBeliefNetwork, convert_to_neural_network) SGMatrix x(5, 3); for (int32_t i=0; irandom(0.0, 1.0); + x[i] = dist(prng); CDenseFeatures f(x); diff --git a/tests/unit/neuralnets/NeuralInputLayer_unittest.cc b/tests/unit/neuralnets/NeuralInputLayer_unittest.cc index e5c9026654f..2d58676cdd5 100644 --- a/tests/unit/neuralnets/NeuralInputLayer_unittest.cc +++ b/tests/unit/neuralnets/NeuralInputLayer_unittest.cc @@ -38,10 +38,12 @@ using namespace shogun; TEST(NeuralInputLayer, compute_activations) { - auto m_rng = std::unique_ptr(new CRandom(100)); + set_global_seed(100); + auto prng = get_prng(); + std::uniform_int_distribution dist(-10, 10); SGMatrix x(12,3); for (int32_t i=0; irandom(-10.0, 10.0); + x[i] = dist(prng); CNeuralInputLayer layer(5, 4); layer.set_batch_size(x.num_cols); diff --git a/tests/unit/neuralnets/NeuralLeakyRectifiedLinearLayer_unittest.cc b/tests/unit/neuralnets/NeuralLeakyRectifiedLinearLayer_unittest.cc index 5ff137870c0..53487014d47 100644 --- a/tests/unit/neuralnets/NeuralLeakyRectifiedLinearLayer_unittest.cc +++ b/tests/unit/neuralnets/NeuralLeakyRectifiedLinearLayer_unittest.cc @@ -45,13 +45,15 @@ using namespace shogun; */ TEST(NeuralLeakyRectifiedLinearLayer, compute_activations) { + set_global_seed(100); CNeuralLeakyRectifiedLinearLayer layer(9); float64_t alpha = 0.02; // initialize some random inputs - auto m_rng = std::unique_ptr(new CRandom(100)); + auto prng = get_prng(); + std::uniform_int_distribution dist(-10, 10); SGMatrix x(12,3); for (int32_t i=0; irandom(-10.0, 10.0); + x[i] = dist(prng); CNeuralInputLayer* input = new CNeuralInputLayer (x.num_rows); input->set_batch_size(x.num_cols); diff --git a/tests/unit/neuralnets/NeuralLinearLayer_unittest.cc b/tests/unit/neuralnets/NeuralLinearLayer_unittest.cc index a3ca90f227a..29093929859 100644 --- a/tests/unit/neuralnets/NeuralLinearLayer_unittest.cc +++ b/tests/unit/neuralnets/NeuralLinearLayer_unittest.cc @@ -46,21 +46,22 @@ using namespace shogun; TEST(NeuralLinearLayer, compute_activations) { CNeuralLinearLayer layer(9); - + set_global_seed(100); // initialize some random inputs - auto m_rng = std::unique_ptr(new CRandom(100)); + auto prng = get_prng(); + std::uniform_real_distribution dist(-10.0, 10.0); SGMatrix x1(12,3); for (int32_t i=0; irandom(-10.0, 10.0); + x1[i] = dist(prng); - CNeuralInputLayer* input1 = new CNeuralInputLayer (x1.num_rows); + CNeuralInputLayer* input1 = new CNeuralInputLayer(x1.num_rows); input1->set_batch_size(x1.num_cols); SGMatrix x2(7,3); for (int32_t i=0; irandom(-10.0, 10.0); + x2[i] = dist(prng); - CNeuralInputLayer* input2 = new CNeuralInputLayer (x2.num_rows); + CNeuralInputLayer* input2 = new CNeuralInputLayer(x2.num_rows); input2->set_batch_size(x2.num_cols); CDynamicObjectArray* layers = new CDynamicObjectArray(); @@ -120,19 +121,21 @@ TEST(NeuralLinearLayer, compute_activations) */ TEST(NeuralLinearLayer, compute_error) { - auto m_rng = std::unique_ptr(new CRandom(100)); + set_global_seed(100); + auto prng = get_prng(); + std::uniform_real_distribution dist(-10.0, 10.0); SGMatrix x1(12,3); for (int32_t i=0; irandom(-10.0, 10.0); + x1[i] = dist(prng); - CNeuralInputLayer* input1 = new CNeuralInputLayer (x1.num_rows); + CNeuralInputLayer* input1 = new CNeuralInputLayer(x1.num_rows); input1->set_batch_size(x1.num_cols); SGMatrix x2(7,3); for (int32_t i=0; irandom(-10.0, 10.0); + x2[i] = dist(prng); - CNeuralInputLayer* input2 = new CNeuralInputLayer (x2.num_rows); + CNeuralInputLayer* input2 = new CNeuralInputLayer(x2.num_rows); input2->set_batch_size(x2.num_cols); CDynamicObjectArray* layers = new CDynamicObjectArray(); @@ -144,8 +147,9 @@ TEST(NeuralLinearLayer, compute_error) input_indices[1] = 1; SGMatrix y(9,3); + std::uniform_real_distribution dist_s(0.0, 1.0); for (int32_t i=0; irandom(0.0, 1.0); + y[i] = dist_s(prng); // initialize the layer CNeuralLinearLayer layer(y.num_rows); @@ -178,10 +182,12 @@ TEST(NeuralLinearLayer, compute_error) */ TEST(NeuralLinearLayer, compute_local_gradients) { - auto m_rng = std::unique_ptr(new CRandom(100)); + set_global_seed(100); + auto prng = get_prng(); + std::uniform_real_distribution dist(-10.0, 10.0); SGMatrix x(12,3); for (int32_t i=0; irandom(-10.0, 10.0); + x[i] = dist(prng); CNeuralInputLayer* input1 = new CNeuralInputLayer (x.num_rows); input1->set_batch_size(x.num_cols); @@ -193,8 +199,9 @@ TEST(NeuralLinearLayer, compute_local_gradients) input_indices[0] = 0; SGMatrix y(9,3); + std::uniform_real_distribution dist_s(0.0, 1.0); for (int32_t i=0; irandom(0.0, 1.0); + y[i] = dist_s(prng); // initialize the layer CNeuralLinearLayer layer(y.num_rows); @@ -240,17 +247,18 @@ TEST(NeuralLinearLayer, compute_local_gradients) */ TEST(NeuralLinearLayer, compute_parameter_gradients_output) { + set_global_seed(100); SGMatrix x1(12,3); - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::uniform_real_distribution dist(-10.0, 10.0); for (int32_t i=0; irandom(-10.0, 10.0); - + x1[i] = dist(prng); CNeuralInputLayer* input1 = new CNeuralInputLayer (x1.num_rows); input1->set_batch_size(x1.num_cols); SGMatrix x2(7,3); for (int32_t i=0; irandom(-10.0, 10.0); + x2[i] = dist(prng); CNeuralInputLayer* input2 = new CNeuralInputLayer (x2.num_rows); input2->set_batch_size(x2.num_cols); @@ -264,8 +272,9 @@ TEST(NeuralLinearLayer, compute_parameter_gradients_output) input_indices[1] = 1; SGMatrix y(9,3); + std::uniform_real_distribution dist_s(0.0, 1.0); for (int32_t i=0; irandom(0.0, 1.0); + y[i] = dist_s(prng); // initialize the layer CNeuralLinearLayer layer(y.num_rows); @@ -319,16 +328,17 @@ TEST(NeuralLinearLayer, compute_parameter_gradients_output) TEST(NeuralLinearLayer, compute_parameter_gradients_hidden) { SGMatrix x1(12,3); - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::uniform_real_distribution dist(-10.0, 10.0); for (int32_t i=0; irandom(-10.0, 10.0); + x1[i] = dist(prng); CNeuralInputLayer* input1 = new CNeuralInputLayer (x1.num_rows); input1->set_batch_size(x1.num_cols); SGMatrix x2(7,3); for (int32_t i=0; irandom(-10.0, 10.0); + x2[i] = dist(prng); CNeuralInputLayer* input2 = new CNeuralInputLayer (x2.num_rows); input2->set_batch_size(x2.num_cols); @@ -349,8 +359,9 @@ TEST(NeuralLinearLayer, compute_parameter_gradients_hidden) input_indices_out[0] = 2; SGMatrix y(9,3); + std::uniform_real_distribution dist_s(0.0, 1.0); for (int32_t i=0; irandom(0.0, 1.0); + y[i] = dist(prng); // initialize the hidden layer layer_hid->initialize_neural_layer(layers, input_indices_hid); @@ -407,7 +418,7 @@ TEST(NeuralLinearLayer, compute_parameter_gradients_hidden) // compare for (int32_t i=0; i(new CRandom(100)); + auto prng = get_prng(); + std::uniform_real_distribution dist(-10.0, 10.0); SGMatrix x(12,3); for (int32_t i=0; irandom(-10.0, 10.0); + x[i] = dist(prng); - CNeuralInputLayer* input = new CNeuralInputLayer (x.num_rows); + CNeuralInputLayer* input = new CNeuralInputLayer(x.num_rows); input->set_batch_size(x.num_cols); CDynamicObjectArray* layers = new CDynamicObjectArray(); @@ -108,13 +109,14 @@ TEST(NeuralLogisticLayer, compute_activations) TEST(NeuralLogisticLayer, compute_local_gradients) { CNeuralLogisticLayer layer(9); - - auto m_rng = std::unique_ptr(new CRandom(100)); + set_global_seed(100); + auto prng = get_prng(); + std::uniform_real_distribution dist(-10.0, 10.0); SGMatrix x(12,3); for (int32_t i=0; irandom(-10.0, 10.0); + x[i] = dist(prng); - CNeuralInputLayer* input = new CNeuralInputLayer (x.num_rows); + CNeuralInputLayer* input = new CNeuralInputLayer(x.num_rows); input->set_batch_size(x.num_cols); CDynamicObjectArray* layers = new CDynamicObjectArray(); @@ -131,8 +133,9 @@ TEST(NeuralLogisticLayer, compute_local_gradients) layer.set_batch_size(x.num_cols); SGMatrix y(layer.get_num_neurons(), x.num_cols); + std::uniform_real_distribution dist_s(0.0, 1.0); for (int32_t i=0; irandom(0.0, 1.0); + y[i] = dist_s(prng); // compute the layer's local gradients input->compute_activations(x); diff --git a/tests/unit/neuralnets/NeuralNetwork_unittest.cc b/tests/unit/neuralnets/NeuralNetwork_unittest.cc index 2c136c2dd53..fe54d83a2ce 100644 --- a/tests/unit/neuralnets/NeuralNetwork_unittest.cc +++ b/tests/unit/neuralnets/NeuralNetwork_unittest.cc @@ -250,7 +250,7 @@ TEST(NeuralNetwork, backpropagation_convolutional) /** tests a neural network on the binary XOR problem */ TEST(NeuralNetwork, binary_classification) { - set_global_seed(10); + set_global_seed(100); SGMatrix inputs_matrix(2,4); SGVector targets_vector(4); diff --git a/tests/unit/neuralnets/NeuralRectifiedLinearLayer_unittest.cc b/tests/unit/neuralnets/NeuralRectifiedLinearLayer_unittest.cc index 9010599daa0..e54fdab9f5c 100644 --- a/tests/unit/neuralnets/NeuralRectifiedLinearLayer_unittest.cc +++ b/tests/unit/neuralnets/NeuralRectifiedLinearLayer_unittest.cc @@ -45,13 +45,15 @@ using namespace shogun; */ TEST(NeuralRectifiedLinearLayer, compute_activations) { + set_global_seed(100); CNeuralRectifiedLinearLayer layer(9); // initialize some random inputs - auto m_rng = std::unique_ptr(new CRandom(100)); + auto prng = get_prng(); + std::uniform_real_distribution dist(-10.0, 10.0); SGMatrix x(12,3); for (int32_t i=0; irandom(-10.0, 10.0); + x[i] = dist(prng); CNeuralInputLayer* input = new CNeuralInputLayer (x.num_rows); input->set_batch_size(x.num_cols); @@ -108,19 +110,21 @@ TEST(NeuralRectifiedLinearLayer, compute_activations) */ TEST(NeuralRectifiedLinearLayer, compute_parameter_gradients_hidden) { + set_global_seed(100); SGMatrix x1(12,3); - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::uniform_real_distribution dist(-10.0, 10.0); for (int32_t i=0; irandom(-10.0, 10.0); + x1[i] = dist(prng); CNeuralInputLayer* input1 = new CNeuralInputLayer (x1.num_rows); input1->set_batch_size(x1.num_cols); SGMatrix x2(7,3); for (int32_t i=0; irandom(-10.0, 10.0); + x2[i] = dist(prng); - CNeuralInputLayer* input2 = new CNeuralInputLayer (x2.num_rows); + CNeuralInputLayer* input2 = new CNeuralInputLayer(x2.num_rows); input2->set_batch_size(x2.num_cols); // initialize hidden the layer @@ -139,8 +143,9 @@ TEST(NeuralRectifiedLinearLayer, compute_parameter_gradients_hidden) input_indices_out[0] = 2; SGMatrix y(9,3); + std::uniform_real_distribution dist_s(0.0, 1.0); for (int32_t i=0; irandom(0.0, 1.0); + y[i] = dist_s(prng); // initialize the hidden layer layer_hid->initialize_neural_layer(layers, input_indices_hid); diff --git a/tests/unit/neuralnets/NeuralSoftmaxLayer_unittest.cc b/tests/unit/neuralnets/NeuralSoftmaxLayer_unittest.cc index e2d6f081311..de1e6c98136 100644 --- a/tests/unit/neuralnets/NeuralSoftmaxLayer_unittest.cc +++ b/tests/unit/neuralnets/NeuralSoftmaxLayer_unittest.cc @@ -45,13 +45,15 @@ using namespace shogun; */ TEST(NeuralSoftmaxLayer, compute_activations) { + set_global_seed(100); CNeuralSoftmaxLayer layer(9); // initialize some random inputs - auto m_rng = std::unique_ptr(new CRandom(100)); + auto prng = get_prng(); + std::uniform_real_distribution dist(-10.0, 10.0); SGMatrix x(12,3); for (int32_t i=0; irandom(-10.0, 10.0); + x[i] = dist(prng); CNeuralInputLayer* input = new CNeuralInputLayer (x.num_rows); input->set_batch_size(x.num_cols); @@ -117,12 +119,14 @@ TEST(NeuralSoftmaxLayer, compute_activations) */ TEST(NeuralSoftmaxLayer, compute_error) { + set_global_seed(100); CNeuralSoftmaxLayer layer(9); - auto m_rng = std::unique_ptr(new CRandom(100)); + auto prng = get_prng(); + std::uniform_real_distribution dist(-10.0, 10.0); SGMatrix x(12,3); for (int32_t i=0; irandom(-10.0, 10.0); + x[i] = dist(prng); CNeuralInputLayer* input = new CNeuralInputLayer (x.num_rows); input->set_batch_size(x.num_cols); @@ -140,9 +144,10 @@ TEST(NeuralSoftmaxLayer, compute_error) layer.initialize_parameters(params, param_regularizable, 1.0); layer.set_batch_size(x.num_cols); + std::uniform_real_distribution dist_s(0.0, 1.0); SGMatrix y(layer.get_num_neurons(), x.num_cols); for (int32_t i=0; irandom(0.0, 1.0); + y[i] = dist_s(prng); // make sure y is in the form of a probability distribution for (int32_t j=0; j(new CRandom(100)); + auto prng = get_prng(); + std::uniform_real_distribution dist(-10.0, 10.0); SGMatrix x(12,3); for (int32_t i=0; irandom(-10.0, 10.0); + x[i] = dist(prng); - CNeuralInputLayer* input = new CNeuralInputLayer (x.num_rows); + CNeuralInputLayer* input = new CNeuralInputLayer(x.num_rows); input->set_batch_size(x.num_cols); CDynamicObjectArray* layers = new CDynamicObjectArray(); @@ -201,9 +208,10 @@ TEST(NeuralSoftmaxLayer, compute_local_gradients) layer.initialize_parameters(params, param_regularizable, 1.0); layer.set_batch_size(x.num_cols); + std::uniform_real_distribution dist_s(0.0, 1.0); SGMatrix y(layer.get_num_neurons(), x.num_cols); for (int32_t i=0; irandom(0.0, 1.0); + y[i] = dist_s(prng); // make sure y is in the form of a probability distribution for (int32_t j=0; j(new CRandom(100)); + set_global_seed(100); + auto prng = get_prng(); int32_t num_visible = 15; int32_t num_hidden = 6; @@ -126,7 +127,7 @@ TEST(RBM, free_energy_gradients) SGMatrix V(num_visible, batch_size); for (int32_t i=0; irandom_64() < 0.7; + V[i] = prng() < 0.7; SGVector gradients(rbm.get_num_parameters()); rbm.free_energy_gradients(V, gradients); diff --git a/tests/unit/preprocessor/Preprocessor_unittest.cc b/tests/unit/preprocessor/Preprocessor_unittest.cc index 21f041fddf7..c7327258b18 100644 --- a/tests/unit/preprocessor/Preprocessor_unittest.cc +++ b/tests/unit/preprocessor/Preprocessor_unittest.cc @@ -45,9 +45,10 @@ TEST(Preprocessor, dense_apply) const index_t dim=2; const index_t size=4; SGMatrix data(dim, size); - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::normal_distribution dist(0, 1); for (index_t i=0; istd_normal_distrib(); + data.matrix[i] = dist(prng); CDenseFeatures* features=new CDenseFeatures(data); CDensePreprocessor* preproc=new CNormOne(); @@ -70,16 +71,19 @@ TEST(Preprocessor, string_apply) const index_t min_string_length=max_string_length/2; SGStringList strings(num_strings, max_string_length); - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::uniform_int_distribution dist_len( + min_string_length, max_string_length); + std::uniform_int_distribution dist_asc('A', 'Z'); for (index_t i=0; irandom(min_string_length, max_string_length); + index_t len = dist_len(prng); SGString current(len); /* fill with random uppercase letters (ASCII) */ for (index_t j=0; jrandom('A', 'Z'); + current.string[j] = dist_asc(prng); strings.strings[i]=current; } diff --git a/tests/unit/regression/krrnystrom_unittest.cc b/tests/unit/regression/krrnystrom_unittest.cc index 21c58be35cc..b53b46c2afc 100644 --- a/tests/unit/regression/krrnystrom_unittest.cc +++ b/tests/unit/regression/krrnystrom_unittest.cc @@ -54,11 +54,12 @@ TEST(KRRNystrom, apply_and_compare_to_KRR_with_all_columns) /* fill data matrix and labels */ SGMatrix train_dat(num_features, num_vectors); SGMatrix test_dat(num_features, num_vectors); - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::uniform_real_distribution dist(0.0, 1.0); for (index_t i=0; inormal_random(0, 1.0); + lab.vector[i] = i + dist(prng); train_dat.matrix[i]=i; test_dat.matrix[i]=i; } @@ -114,7 +115,8 @@ TEST(KRRNystrom, apply_and_compare_to_KRR_with_column_subset) /* training label data */ SGVector lab(num_vectors); - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::uniform_real_distribution dist(0.0, 1.0); /* fill data matrix and labels */ SGMatrix train_dat(num_features, num_vectors); @@ -123,7 +125,7 @@ TEST(KRRNystrom, apply_and_compare_to_KRR_with_column_subset) { /* labels are linear plus noise */ float64_t point=(float64_t)i*10/num_vectors; - lab.vector[i] = point + m_rng->normal_random(0, 1.0); + lab.vector[i] = point + dist(prng); train_dat.matrix[i]=point; test_dat.matrix[i]=point; } diff --git a/tests/unit/regression/lars_unittest.cc b/tests/unit/regression/lars_unittest.cc index 4e6ed514f69..de9f20e4d22 100644 --- a/tests/unit/regression/lars_unittest.cc +++ b/tests/unit/regression/lars_unittest.cc @@ -377,13 +377,14 @@ TEST(LeastAngleRegression, cholesky_insert) SGVector vec(num_vec); vec.random(0.0,1.0); Map map_vec(vec.vector, vec.size()); - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::uniform_real_distribution dist(0.0, 1.0); for (index_t i=0; irandom(0.0, 1.0); + mat(i, j) = dist(prng); matnew(i,j)=mat(i,j); } } @@ -415,11 +416,12 @@ TEST(LeastAngleRegression, ols_equivalence) { int32_t n_feat=25, n_vec=100; SGMatrix data(n_feat, n_vec); - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::uniform_real_distribution dist(0.0, 1.0); for (index_t i=0; irandom(0.0, 1.0); + data(i, j) = dist(prng); } SGVector lab=SGVector(n_vec); diff --git a/tests/unit/statistical_testing/KernelSelection_unittest.cc b/tests/unit/statistical_testing/KernelSelection_unittest.cc index f4433a7a06c..6bed631b700 100644 --- a/tests/unit/statistical_testing/KernelSelection_unittest.cc +++ b/tests/unit/statistical_testing/KernelSelection_unittest.cc @@ -105,7 +105,7 @@ TEST(KernelSelectionMaxMMD, quadratic_time_single_kernel_dense) mmd->set_train_test_mode(false); auto selected_kernel=static_cast(mmd->get_kernel()); - EXPECT_NEAR(selected_kernel->get_width(), 0.0625, 1E-10); + EXPECT_NEAR(selected_kernel->get_width(), 0.03125, 1E-10); } TEST( @@ -320,7 +320,7 @@ TEST(KernelSelectionMaxCrossValidation, quadratic_time_single_kernel_dense) mmd->set_train_test_mode(false); auto selected_kernel=static_cast(mmd->get_kernel()); - EXPECT_NEAR(selected_kernel->get_width(), 0.125, 1E-10); + EXPECT_NEAR(selected_kernel->get_width(), 0.03125, 1E-10); } TEST(KernelSelectionMaxCrossValidation, linear_time_single_kernel_dense) @@ -389,7 +389,7 @@ TEST(KernelSelectionMedianHeuristic, quadratic_time_single_kernel_dense) mmd->set_train_test_mode(false); auto selected_kernel=static_cast(mmd->get_kernel()); - EXPECT_NEAR(selected_kernel->get_width(), 1.0, 1E-10); + EXPECT_NEAR(selected_kernel->get_width(), 0.03125, 1E-10); } TEST(KernelSelectionMedianHeuristic, linear_time_single_kernel_dense) diff --git a/tests/unit/statistical_testing/QuadraticTimeMMD_unittest.cc b/tests/unit/statistical_testing/QuadraticTimeMMD_unittest.cc index 6f993881a06..418212c5b4e 100644 --- a/tests/unit/statistical_testing/QuadraticTimeMMD_unittest.cc +++ b/tests/unit/statistical_testing/QuadraticTimeMMD_unittest.cc @@ -354,7 +354,7 @@ TEST(QuadraticTimeMMD, perform_test_permutation_biased_full) // assert against local machine computed result mmd->set_statistic_type(ST_BIASED_FULL); float64_t p_value=mmd->compute_p_value(mmd->compute_statistic()); - EXPECT_NEAR(p_value, 0.8, 1E-10); + EXPECT_NEAR(p_value, 0.0, 1E-10); } TEST(QuadraticTimeMMD, perform_test_permutation_unbiased_full) @@ -393,7 +393,7 @@ TEST(QuadraticTimeMMD, perform_test_permutation_unbiased_full) // assert against local machine computed result mmd->set_statistic_type(ST_UNBIASED_FULL); float64_t p_value=mmd->compute_p_value(mmd->compute_statistic()); - EXPECT_NEAR(p_value, 0.8, 1E-10); + EXPECT_NEAR(p_value, 0.0, 1E-10); } TEST(QuadraticTimeMMD, perform_test_permutation_unbiased_incomplete) @@ -432,7 +432,7 @@ TEST(QuadraticTimeMMD, perform_test_permutation_unbiased_incomplete) // assert against local machine computed result mmd->set_statistic_type(ST_UNBIASED_INCOMPLETE); float64_t p_value=mmd->compute_p_value(mmd->compute_statistic()); - EXPECT_NEAR(p_value, 0.6, 1E-10); + EXPECT_NEAR(p_value, 0.0, 1E-10); } TEST(QuadraticTimeMMD, perform_test_spectrum) @@ -475,7 +475,7 @@ TEST(QuadraticTimeMMD, perform_test_spectrum) // assert against local machine computed result mmd->set_statistic_type(ST_BIASED_FULL); float64_t p_value_spectrum=mmd->compute_p_value(mmd->compute_statistic()); - EXPECT_NEAR(p_value_spectrum, 0.8, 1E-10); + EXPECT_NEAR(p_value_spectrum, 0.0, 1E-10); // unbiased case @@ -483,7 +483,7 @@ TEST(QuadraticTimeMMD, perform_test_spectrum) // assert against local machine computed result mmd->set_statistic_type(ST_UNBIASED_FULL); p_value_spectrum=mmd->compute_p_value(mmd->compute_statistic()); - EXPECT_NEAR(p_value_spectrum, 0.8, 1E-10); + EXPECT_NEAR(p_value_spectrum, 0.0, 1E-10); } TEST(QuadraticTimeMMD, precomputed_vs_nonprecomputed) @@ -635,7 +635,7 @@ TEST(QuadraticTimeMMD, multikernel_compute_test_power) ASSERT_EQ(test_power_multiple.size(), test_power_single.size()); for (auto i=0; i(new CRandom()); + auto prng = get_prng(); + std::uniform_real_distribution dist(0.0, 1.0); SGMatrix data(dim, num_vec); for (auto i=0; irandom(0.0, 0.1); + data.matrix[i] = dist(prng); auto feats=some >(data); auto kernel=some(10, 2*sigma*sigma); diff --git a/tests/unit/statistical_testing/internals/PermutationMMD_unittest.cc b/tests/unit/statistical_testing/internals/PermutationMMD_unittest.cc index ddb8b205301..15c0d616476 100644 --- a/tests/unit/statistical_testing/internals/PermutationMMD_unittest.cc +++ b/tests/unit/statistical_testing/internals/PermutationMMD_unittest.cc @@ -103,13 +103,13 @@ TEST(PermutationMMD, biased_full_single_kernel) Map map(kernel_matrix.matrix, kernel_matrix.num_rows, kernel_matrix.num_cols); SGVector result_2(num_null_samples); set_global_seed(12345); - auto prng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); for (auto i=0; i perm(kernel_matrix.num_rows); perm.setIdentity(); SGVector perminds(perm.indices().data(), perm.indices().size(), false); - CMath::permute(perminds, prng.get()); + CMath::permute(perminds, prng); MatrixXf permuted = perm.transpose()*map*perm; SGMatrix permuted_km(permuted.data(), permuted.rows(), permuted.cols(), false); result_2[i]=compute_mmd(permuted_km); @@ -118,11 +118,10 @@ TEST(PermutationMMD, biased_full_single_kernel) SGVector inds(kernel_matrix.num_rows); SGVector result_3(num_null_samples); - prng->set_seed(12345); for (auto i=0; iadd_subset(inds); kernel->init(feats, feats); kernel_matrix=kernel->get_kernel_matrix(); @@ -184,13 +183,13 @@ TEST(PermutationMMD, unbiased_full_single_kernel) set_global_seed(12345); Map map(kernel_matrix.matrix, kernel_matrix.num_rows, kernel_matrix.num_cols); SGVector result_2(num_null_samples); - auto prng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); for (auto i=0; i perm(kernel_matrix.num_rows); perm.setIdentity(); SGVector perminds(perm.indices().data(), perm.indices().size(), false); - CMath::permute(perminds, prng.get()); + CMath::permute(perminds, prng); MatrixXf permuted = perm.transpose()*map*perm; SGMatrix permuted_km(permuted.data(), permuted.rows(), permuted.cols(), false); result_2[i]=compute_mmd(permuted_km); @@ -199,11 +198,10 @@ TEST(PermutationMMD, unbiased_full_single_kernel) SGVector inds(kernel_matrix.num_rows); SGVector result_3(num_null_samples); - prng->set_seed(12345); for (auto i=0; iadd_subset(inds); kernel->init(feats, feats); kernel_matrix=kernel->get_kernel_matrix(); @@ -265,27 +263,26 @@ TEST(PermutationMMD, unbiased_incomplete_single_kernel) Map map(kernel_matrix.matrix, kernel_matrix.num_rows, kernel_matrix.num_cols); set_global_seed(12345); - auto prng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); SGVector result_2(num_null_samples); for (auto i=0; i perm(kernel_matrix.num_rows); perm.setIdentity(); SGVector perminds(perm.indices().data(), perm.indices().size(), false); - CMath::permute(perminds, prng.get()); + CMath::permute(perminds, prng); MatrixXf permuted = perm.transpose()*map*perm; SGMatrix permuted_km(permuted.data(), permuted.rows(), permuted.cols(), false); result_2[i]=compute_mmd(permuted_km); } - prng->set_seed(12345); SGVector inds(kernel_matrix.num_rows); SGVector result_3(num_null_samples); for (auto i=0; iadd_subset(inds); kernel->init(feats, feats); kernel_matrix=kernel->get_kernel_matrix(); diff --git a/tests/unit/structure/HierarchicalMultilabelModel_unittest.cc b/tests/unit/structure/HierarchicalMultilabelModel_unittest.cc index 211da9a9ee1..73643686da0 100644 --- a/tests/unit/structure/HierarchicalMultilabelModel_unittest.cc +++ b/tests/unit/structure/HierarchicalMultilabelModel_unittest.cc @@ -20,11 +20,12 @@ TEST(HierarchicalMultilabelModel, get_joint_feature_vector_1) int32_t num_samples = 2; SGMatrix feats(dim_features, num_samples); - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::uniform_int_distribution dist(-100, 100); for (index_t i = 0; i < dim_features * num_samples; i++) { - feats[i] = m_rng->random(-100, 100); + feats[i] = dist(prng); } CSparseFeatures * features = new CSparseFeatures(feats); @@ -80,11 +81,12 @@ TEST(HierarchicalMultilabelModel, get_joint_feature_vector_2) int32_t num_samples = 2; SGMatrix feats(dim_features, num_samples); - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::uniform_int_distribution dist(-100, 100); for (index_t i = 0; i < dim_features * num_samples; i++) { - feats[i] = m_rng->random(-100, 100); + feats[i] = dist(prng); } CSparseFeatures * features = new CSparseFeatures(feats); @@ -207,11 +209,12 @@ TEST(HierarchicalMultilabelModel, argmax) int32_t num_samples = 2; SGMatrix feats(dim_features, num_samples); - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::uniform_int_distribution dist_p(-100, 100); for (index_t i = 0; i < dim_features * num_samples; i++) { - feats[i] = m_rng->random(-100, 100); + feats[i] = dist_p(prng); } CSparseFeatures * features = new CSparseFeatures(feats); @@ -242,9 +245,10 @@ TEST(HierarchicalMultilabelModel, argmax) SGVector w(model->get_dim()); + std::uniform_int_distribution dist_q(-1, 1); for (index_t i = 0; i < w.vlen; i++) { - w[i] = m_rng->random(-1, 1); + w[i] = dist_q(prng); } CResultSet * ret_1 = model->argmax(w, 0, true); @@ -319,11 +323,12 @@ TEST(HierarchicalMultilabelModel, argmax_leaf_nodes_mandatory) int32_t num_samples = 2; SGMatrix feats(dim_features, num_samples); - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::uniform_int_distribution dist_p(-100, 100); for (index_t i = 0; i < dim_features * num_samples; i++) { - feats[i] = m_rng->random(-100, 100); + feats[i] = dist_p(prng); } CSparseFeatures * features = new CSparseFeatures(feats); @@ -354,9 +359,10 @@ TEST(HierarchicalMultilabelModel, argmax_leaf_nodes_mandatory) SGVector w(model->get_dim()); + std::uniform_int_distribution dist_q(-1, 1); for (index_t i = 0; i < w.vlen; i++) { - w[i] = m_rng->random(-1, 1); + w[i] = dist_q(prng); } CResultSet * ret_1 = model->argmax(w, 0, true); diff --git a/tests/unit/structure/MultilabelCLRModel_unittest.cc b/tests/unit/structure/MultilabelCLRModel_unittest.cc index 312c909e9a1..b2d8e32039d 100644 --- a/tests/unit/structure/MultilabelCLRModel_unittest.cc +++ b/tests/unit/structure/MultilabelCLRModel_unittest.cc @@ -21,10 +21,11 @@ using namespace shogun; TEST(MultilabelCLRModel, get_joint_feature_vector_1) { SGMatrix feats(DIMS, NUM_SAMPLES); - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::uniform_int_distribution dist(-100, 100); for (index_t i = 0; i < DIMS * NUM_SAMPLES; i++) { - feats[i] = m_rng->random(-100, 100); + feats[i] = dist(prng); } CSparseFeatures * features = new CSparseFeatures(feats); @@ -70,10 +71,11 @@ TEST(MultilabelCLRModel, get_joint_feature_vector_1) TEST(MultilabelCLRModel, get_joint_feature_vector_2) { SGMatrix feats(DIMS, NUM_SAMPLES); - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::uniform_int_distribution dist(-100, 100); for (index_t i = 0; i < DIMS * NUM_SAMPLES; i++) { - feats[i] = m_rng->random(-100, 100); + feats[i] = dist(prng); } CSparseFeatures * features = new CSparseFeatures(feats); @@ -176,10 +178,11 @@ TEST(MultilabelCLRModel, delta_loss) TEST(MultilabelCLRModel, argmax) { SGMatrix feats(DIMS, NUM_SAMPLES); - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::uniform_int_distribution dist_p(-100, 100); for (index_t i = 0; i < DIMS * NUM_SAMPLES; i++) { - feats[i] = m_rng->random(-100, 100); + feats[i] = dist_p(prng); } CSparseFeatures * features = new CSparseFeatures(feats); @@ -201,9 +204,10 @@ TEST(MultilabelCLRModel, argmax) SGVector w(model->get_dim()); + std::uniform_int_distribution dist_q(-1, 1); for (index_t i = 0; i < w.vlen; i++) { - w[i] = m_rng->random(-1, 1); + w[i] = dist_q(prng); } CResultSet * ret_1 = model->argmax(w, 0, true); diff --git a/tests/unit/structure/PrimalMosekSOSVM_unittest.cc b/tests/unit/structure/PrimalMosekSOSVM_unittest.cc index f6ec0316e5c..aada3866bb8 100644 --- a/tests/unit/structure/PrimalMosekSOSVM_unittest.cc +++ b/tests/unit/structure/PrimalMosekSOSVM_unittest.cc @@ -18,7 +18,7 @@ using namespace shogun; TEST(PrimalMosekSOSVM, mosek_init_sosvm_w_bounds) { int32_t num_samples = 10; - auto m_rng = std::unique_ptr(new CRandom(17)); + std::uniform_real_distribution dist(0.0, 1.0); // define factor type SGVector card(2); @@ -53,8 +53,8 @@ TEST(PrimalMosekSOSVM, mosek_init_sosvm_w_bounds) // add factors SGVector data1(2); - data1[0] = 2.0 * m_rng->random(0.0, 1.0) - 1.0; - data1[1] = 2.0 * m_rng->random(0.0, 1.0) - 1.0; + data1[0] = 2.0 * dist(17) - 1.0; + data1[1] = 2.0 * dist(17) - 1.0; SGVector var_index1(2); var_index1[0] = 0; var_index1[1] = 1; @@ -62,8 +62,8 @@ TEST(PrimalMosekSOSVM, mosek_init_sosvm_w_bounds) fg->add_factor(fac1); SGVector data2(2); - data2[0] = 2.0 * m_rng->random(0.0, 1.0) - 1.0; - data2[1] = 2.0 * m_rng->random(0.0, 1.0) - 1.0; + data2[0] = 2.0 * dist(17) - 1.0; + data2[1] = 2.0 * dist(17) - 1.0; SGVector var_index2(2); var_index2[0] = 1; var_index2[1] = 2;