From 2d3324e5dd25e172f83f5b7712992555914b05b8 Mon Sep 17 00:00:00 2001 From: Tiramisu 1993 Date: Wed, 19 Jul 2017 23:24:14 +0800 Subject: [PATCH 1/9] remove-random-functions-in-CMath --- benchmarks/hasheddoc_benchmarks.cpp | 5 +- benchmarks/rf_feats_benchmark.cpp | 3 +- benchmarks/rf_feats_kernel_comp.cpp | 5 +- benchmarks/sparse_test.cpp | 2 +- .../libshogun/classifier_larank.cpp | 5 +- .../libshogun/classifier_latent_svm.cpp | 6 +- .../classifier_libsvm_probabilities.cpp | 43 ++-- ...sifier_mkl_svmlight_modelselection_bug.cpp | 3 +- ...ght_string_features_precomputed_kernel.cpp | 10 +- .../libshogun/clustering_kmeans.cpp | 3 +- .../libshogun/converter_jade_bss.cpp | 6 +- ...uation_cross_validation_classification.cpp | 5 +- ...ion_cross_validation_locked_comparison.cpp | 4 +- ...on_cross_validation_mkl_weight_storage.cpp | 6 +- ...uation_cross_validation_multiclass_mkl.cpp | 4 +- ...evaluation_cross_validation_regression.cpp | 4 +- .../libshogun/features_subset_labels.cpp | 3 +- .../features_subset_simple_features.cpp | 5 +- .../libshogun/hashed_features_example.cpp | 4 +- .../undocumented/libshogun/kernel_custom.cpp | 2 +- .../libshogun/kernel_custom_kernel.cpp | 2 +- .../libshogun/kernel_machine_train_locked.cpp | 4 +- .../libshogun/library_serialization.cpp | 4 +- ...lection_combined_kernel_sub_parameters.cpp | 3 +- .../modelselection_grid_search_kernel.cpp | 3 +- .../modelselection_grid_search_krr.cpp | 4 +- .../modelselection_grid_search_mkl.cpp | 3 +- ...elselection_grid_search_multiclass_svm.cpp | 3 +- ...delselection_grid_search_string_kernel.cpp | 6 +- .../libshogun/neuralnets_basic.cpp | 2 +- .../libshogun/neuralnets_convolutional.cpp | 2 +- .../libshogun/neuralnets_deep_autoencoder.cpp | 3 +- .../neuralnets_deep_belief_network.cpp | 6 +- .../libshogun/parameter_iterate_float64.cpp | 4 +- .../libshogun/parameter_iterate_sgobject.cpp | 7 +- .../preprocessor_randomfouriergauss.cpp | 6 +- .../libshogun/random_fourier_features.cpp | 5 +- ...gression_gaussian_process_simple_exact.cpp | 4 +- .../libshogun/regression_libsvr.cpp | 4 +- .../serialization_multiclass_labels.cpp | 3 +- .../undocumented/libshogun/so_fg_model.cpp | 12 +- .../undocumented/libshogun/so_multiclass.cpp | 9 +- .../libshogun/so_multiclass_BMRM.cpp | 10 +- .../splitting_LOO_crossvalidation.cpp | 8 +- .../splitting_standard_crossvalidation.cpp | 7 +- .../splitting_stratified_crossvalidation.cpp | 9 +- .../libshogun/streaming_from_dense.cpp | 5 +- src/gpl/shogun/classifier/svm/QPBSVMLib.cpp | 6 +- src/gpl/shogun/classifier/svm/WDSVMOcas.cpp | 13 +- src/shogun/base/DynArray.h | 6 +- src/shogun/base/SGObject.cpp | 8 +- src/shogun/base/SGObject.h | 5 - src/shogun/base/init.cpp | 44 ++++- src/shogun/base/init.h | 13 ++ src/shogun/classifier/svm/GNPPLib.cpp | 4 +- src/shogun/classifier/svm/LibLinear.cpp | 8 +- src/shogun/classifier/vw/VwRegressor.cpp | 2 +- src/shogun/clustering/GMM.cpp | 10 +- src/shogun/clustering/KMeans.cpp | 2 +- src/shogun/clustering/KMeansBase.cpp | 14 +- src/shogun/clustering/KMeansMiniBatch.cpp | 4 +- src/shogun/converter/ica/FastICA.cpp | 2 +- src/shogun/distributions/Gaussian.cpp | 2 +- src/shogun/distributions/HMM.cpp | 43 ++-- src/shogun/features/DataGenerator.cpp | 17 +- .../features/RandomFourierDotFeatures.cpp | 6 +- .../generators/GaussianBlobsDataGenerator.cpp | 8 +- .../generators/MeanShiftDataGenerator.cpp | 2 +- src/shogun/kernel/PyramidChi2.cpp | 5 +- src/shogun/lib/SGVector.cpp | 3 +- src/shogun/lib/tapkee/defines/random.hpp | 7 +- src/shogun/lib/tapkee/tapkee_shogun.cpp | 23 ++- src/shogun/machine/BaggingMachine.cpp | 4 +- src/shogun/mathematics/Math.cpp | 2 - src/shogun/mathematics/Math.h | 187 ++---------------- src/shogun/mathematics/Random.cpp | 59 +++--- src/shogun/mathematics/Random.h | 42 +++- src/shogun/mathematics/Statistics.cpp | 9 +- src/shogun/mathematics/ajd/QDiag.cpp | 3 +- .../ModelSelectionParameters.cpp | 10 +- src/shogun/multiclass/LaRank.cpp | 2 +- src/shogun/multiclass/LaRank.h | 83 ++++---- .../ecoc/ECOCDiscriminantEncoder.cpp | 67 ++++--- .../ecoc/ECOCRandomDenseEncoder.cpp | 112 +++++------ .../ecoc/ECOCRandomSparseEncoder.cpp | 104 +++++----- .../tree/RandomConditionalProbabilityTree.cpp | 2 +- src/shogun/neuralnets/DeepBeliefNetwork.cpp | 8 +- .../neuralnets/NeuralConvolutionalLayer.cpp | 8 +- src/shogun/neuralnets/NeuralInputLayer.cpp | 2 +- src/shogun/neuralnets/NeuralLayer.cpp | 2 +- src/shogun/neuralnets/NeuralLinearLayer.cpp | 2 +- src/shogun/neuralnets/NeuralNetwork.cpp | 4 +- src/shogun/neuralnets/RBM.cpp | 14 +- .../liblinear/shogun_liblinear.cpp | 4 +- .../RandomFourierGaussPreproc.cpp | 6 +- .../regression/svr/LibLinearRegression.cpp | 2 +- .../statistical_testing/QuadraticTimeMMD.cpp | 2 +- .../internals/DataFetcher.cpp | 2 +- .../internals/MaxCrossValidation.cpp | 3 - src/shogun/structure/StochasticSOSVM.cpp | 4 - src/shogun/structure/TwoStateModel.cpp | 22 ++- .../transfer/multitask/LibLinearMTL.cpp | 2 +- tests/unit/base/SGObject_unittest.cc | 6 +- tests/unit/base/Serialization_unittest.cc | 5 +- .../unit/classifier/svm/LibLinear_unittest.cc | 4 +- tests/unit/classifier/svm/SVMOcas_unittest.cc | 2 +- tests/unit/converter/Isomap_unittest.cc | 3 +- .../distribution/MixtureModel_unittest.cc | 6 +- .../unit/environments/LinearTestEnvironment.h | 1 + .../environments/MultiLabelTestEnvironment.h | 1 + .../CrossValidation_multithread_unittest.cc | 5 +- .../evaluation/SplittingStrategy_unittest.cc | 30 +-- .../features/CombinedFeatures_unittest.cc | 6 +- tests/unit/features/DenseFeatures_unittest.cc | 11 +- .../features/HashedDenseFeatures_unittest.cc | 3 +- .../features/HashedDocDotFeatures_unittest.cc | 4 +- .../StreamingHashedDocDotFeatures_unittest.cc | 4 +- .../unit/features/StringFeatures_unittest.cc | 5 +- tests/unit/kernel/CustomKernel_unittest.cc | 9 +- tests/unit/kernel/Kernel_unittest.cc | 23 +-- .../SubsequenceStringKernel_unittest.cc | 9 +- tests/unit/lib/Memory_unittest.cc | 3 +- tests/unit/lib/SGMatrix_unittest.cc | 62 +++--- tests/unit/lib/SGVector_unittest.cc | 10 +- .../machine/StochasticGBMachine_unittest.cc | 2 +- tests/unit/mathematics/Math_unittest.cc | 14 +- tests/unit/mathematics/Random_unittest.cc | 7 +- tests/unit/mathematics/ajd/FFDiag_unittest.cc | 5 +- .../mathematics/ajd/JADiagOrth_unittest.cc | 4 +- tests/unit/mathematics/ajd/JADiag_unittest.cc | 5 +- .../unit/mathematics/ajd/JediDiag_unittest.cc | 5 +- tests/unit/mathematics/ajd/QDiag_unittest.cc | 5 +- tests/unit/mathematics/ajd/UWedge_unittest.cc | 4 +- .../linalg/LanczosEigenSolver_unittest.cc | 5 +- .../linalg/LogDetEstimator_unittest.cc | 4 +- .../multiclass/BaggingMachine_unittest.cc | 2 +- tests/unit/multiclass/KNN_unittest.cc | 2 +- tests/unit/multiclass/LaRank_unittest.cc | 5 +- .../MulticlassLibLinear_unittest.cc | 5 +- .../multiclass/MulticlassOCAS_unittest.cc | 2 +- .../unit/multiclass/tree/CARTree_unittest.cc | 2 +- .../multiclass/tree/RandomCARTree_unittest.cc | 2 +- .../multiclass/tree/RandomForest_unittest.cc | 12 +- tests/unit/neuralnets/Autoencoder_unittest.cc | 18 +- .../ConvolutionalFeatureMap_unittest.cc | 36 ++-- .../neuralnets/DeepAutoencoder_unittest.cc | 8 +- .../neuralnets/DeepBeliefNetwork_unittest.cc | 4 +- .../neuralnets/NeuralInputLayer_unittest.cc | 4 +- ...euralLeakyRectifiedLinearLayer_unittest.cc | 4 +- .../neuralnets/NeuralLinearLayer_unittest.cc | 34 ++-- .../NeuralLogisticLayer_unittest.cc | 10 +- .../unit/neuralnets/NeuralNetwork_unittest.cc | 20 +- .../NeuralRectifiedLinearLayer_unittest.cc | 11 +- .../neuralnets/NeuralSoftmaxLayer_unittest.cc | 16 +- tests/unit/neuralnets/RBM_unittest.cc | 11 +- .../preprocessor/Preprocessor_unittest.cc | 8 +- .../preprocessor/RescaleFeatures_unittest.cc | 2 +- tests/unit/regression/krrnystrom_unittest.cc | 6 +- tests/unit/regression/lars_unittest.cc | 10 +- .../KernelSelection_unittest.cc | 24 +-- .../LinearTimeMMD_unittest.cc | 12 +- .../QuadraticTimeMMD_unittest.cc | 31 +-- .../TwoDistributionTest_unittest.cc | 6 +- .../internals/CrossValidationMMD_unittest.cc | 38 +--- .../internals/PermutationMMD_unittest.cc | 34 ++-- .../WithinBlockPermutation_unittest.cc | 18 +- .../HierarchicalMultilabelModel_unittest.cc | 16 +- .../structure/MultilabelCLRModel_unittest.cc | 14 +- .../structure/PrimalMosekSOSVM_unittest.cc | 10 +- tests/unit/transfer/MALSAR_unittest.cc | 2 +- 170 files changed, 1014 insertions(+), 993 deletions(-) diff --git a/benchmarks/hasheddoc_benchmarks.cpp b/benchmarks/hasheddoc_benchmarks.cpp index d569723e5b8..06101765884 100644 --- a/benchmarks/hasheddoc_benchmarks.cpp +++ b/benchmarks/hasheddoc_benchmarks.cpp @@ -13,7 +13,6 @@ #include #include #include -#include using namespace shogun; @@ -27,13 +26,13 @@ int main(int argv, char** argc) int32_t num_strings = 5000; int32_t max_str_length = 10000; SGStringList string_list(num_strings, max_str_length); - + auto m_rng = std::unique_ptr(new CRandom()); SG_SPRINT("Creating features...\n"); for (index_t i=0; i(max_str_length); for (index_t j=0; jrandom('A', 'Z'); } SG_SPRINT("Features were created.\n"); diff --git a/benchmarks/rf_feats_benchmark.cpp b/benchmarks/rf_feats_benchmark.cpp index e93d72a8459..9daf777223d 100644 --- a/benchmarks/rf_feats_benchmark.cpp +++ b/benchmarks/rf_feats_benchmark.cpp @@ -16,6 +16,7 @@ int main(int argv, char** argc) int32_t dims[] = {100, 300, 600}; CTime* timer = new CTime(); + auto m_rng = std::unique_ptr(new CRandom()); for (index_t d=0; d<3; d++) { int32_t num_dim = dims[d]; @@ -27,7 +28,7 @@ int main(int argv, char** argc) { for (index_t j=0; jrandom(0, 1) + 0.5; } } diff --git a/benchmarks/rf_feats_kernel_comp.cpp b/benchmarks/rf_feats_kernel_comp.cpp index a1acc106d6f..52bc49cf336 100644 --- a/benchmarks/rf_feats_kernel_comp.cpp +++ b/benchmarks/rf_feats_kernel_comp.cpp @@ -29,6 +29,7 @@ int main(int argv, char** argc) float64_t lin_C = 0.1; float64_t non_lin_C = 0.1; CPRCEvaluation* evaluator = new CPRCEvaluation(); + auto m_rng = std::unique_ptr(new CRandom()); CSqrtDiagKernelNormalizer* normalizer = new CSqrtDiagKernelNormalizer(true); SG_REF(normalizer); for (index_t d=0; d<4; d++) @@ -48,12 +49,12 @@ int main(int argv, char** argc) if ((i+j)%2==0) { labs[i] = -1; - mat(j,i) = CMath::random(0,1) + 0.5; + mat(j, i) = m_rng->random(0, 1) + 0.5; } else { labs[i] = 1; - mat(j,i) = CMath::random(0,1) - 0.5; + mat(j, i) = m_rng->random(0, 1) - 0.5; } } } diff --git a/benchmarks/sparse_test.cpp b/benchmarks/sparse_test.cpp index af5f728eedc..1f637f00d75 100644 --- a/benchmarks/sparse_test.cpp +++ b/benchmarks/sparse_test.cpp @@ -115,7 +115,7 @@ int main(int argc, char** argv) v.set_const(1.0); Map map_v(v.vector, v.vlen); CTime time; - CMath::init_random(17); + set_global_seed(17); SG_SPRINT("time\tshogun (s)\teigen3 (s)\n\n"); for (index_t t=0; t matrix_test(num_class, num_vec); CMulticlassLabels* labels=new CMulticlassLabels(num_vec); CMulticlassLabels* labels_test=new CMulticlassLabels(num_vec); + auto m_rng = std::unique_ptr(new CRandom()); for (index_t i=0; istd_normal_distrib(); + matrix_test(j, i) = m_rng->std_normal_distrib(); labels->set_label(i, label); labels_test->set_label(i, label); } diff --git a/examples/undocumented/libshogun/classifier_latent_svm.cpp b/examples/undocumented/libshogun/classifier_latent_svm.cpp index c39b1faadc4..38e3039fa6a 100644 --- a/examples/undocumented/libshogun/classifier_latent_svm.cpp +++ b/examples/undocumented/libshogun/classifier_latent_svm.cpp @@ -110,7 +110,7 @@ static void read_dataset(char* fname, CLatentFeatures*& feats, CLatentLabels*& l SG_REF(labels); CBinaryLabels* ys = new CBinaryLabels(num_examples); - + auto m_rng = std::unique_ptr(new CRandom()); feats = new CLatentFeatures(num_examples); SG_REF(feats); @@ -146,8 +146,8 @@ static void read_dataset(char* fname, CLatentFeatures*& feats, CLatentLabels*& l height = atoi(last_pchar); /* create latent label */ - int x = CMath::random(0, width-1); - int y = CMath::random(0, height-1); + int x = m_rng->random(0, width - 1); + int y = m_rng->random(0, height - 1); CBoundingBox* bb = new CBoundingBox(x,y); labels->add_latent_label(bb); diff --git a/examples/undocumented/libshogun/classifier_libsvm_probabilities.cpp b/examples/undocumented/libshogun/classifier_libsvm_probabilities.cpp index e1a875bf9a9..ab96153d89f 100644 --- a/examples/undocumented/libshogun/classifier_libsvm_probabilities.cpp +++ b/examples/undocumented/libshogun/classifier_libsvm_probabilities.cpp @@ -10,27 +10,28 @@ using namespace shogun; //generates data points (of different classes) randomly void gen_rand_data(SGMatrix features, SGVector labels, float64_t distance) { - index_t num_samples=labels.vlen; - index_t dimensions=features.num_rows; - for (int32_t i=0; i(new CRandom()); + index_t num_samples = labels.vlen; + index_t dimensions = features.num_rows; + for (int32_t i = 0; i < num_samples; i++) + { + if (i < num_samples / 2) + { + labels[i] = -1.0; + for (int32_t j = 0; j < dimensions; j++) + features(j, i) = m_rng->random(0.0, 1.0) + distance; + } + else + { + labels[i] = 1.0; + for (int32_t j = 0; j < dimensions; j++) + features(j, i) = m_rng->random(0.0, 1.0) - distance; + } + } + labels.display_vector("labels"); + std::cout << std::endl; + features.display_matrix("features"); + std::cout << std::endl; } int main(int argc, char** argv) diff --git a/examples/undocumented/libshogun/classifier_mkl_svmlight_modelselection_bug.cpp b/examples/undocumented/libshogun/classifier_mkl_svmlight_modelselection_bug.cpp index e55aae42ebb..25ce3c9d0e6 100644 --- a/examples/undocumented/libshogun/classifier_mkl_svmlight_modelselection_bug.cpp +++ b/examples/undocumented/libshogun/classifier_mkl_svmlight_modelselection_bug.cpp @@ -68,8 +68,9 @@ void test() /* create some data and labels */ SGMatrix matrix(dim_vectors, num_vectors); CBinaryLabels* labels=new CBinaryLabels(num_vectors); + auto m_rng = std::unique_ptr(new CRandom()); for (int32_t i=0; istd_normal_distrib(); /* create num_feautres 2-dimensional vectors */ CDenseFeatures* features=new CDenseFeatures(); diff --git a/examples/undocumented/libshogun/classifier_svmlight_string_features_precomputed_kernel.cpp b/examples/undocumented/libshogun/classifier_svmlight_string_features_precomputed_kernel.cpp index b03598b40f5..b081caebc76 100644 --- a/examples/undocumented/libshogun/classifier_svmlight_string_features_precomputed_kernel.cpp +++ b/examples/undocumented/libshogun/classifier_svmlight_string_features_precomputed_kernel.cpp @@ -30,24 +30,24 @@ void test_svmlight() float64_t p_x=0.5; // probability for class A float64_t mostly_prob=0.8; CDenseLabels* labels=new CBinaryLabels(num_train+num_test); - CMath::init_random(17); + auto m_rng = std::unique_ptr(new CRandom(17)); SGStringList data(num_train+num_test, max_length); for (index_t i=0; irandom(1, max_length); /* allocate string */ data.strings[i]=SGString(length); /* fill with elements and set label */ - if (p_xrandom(0.0, 1.0)) { labels->set_label(i, 1); for (index_t j=0; jrandom(0.0, 1.0) ? '0' : '1'; data.strings[i].string[j]=c; } } @@ -56,7 +56,7 @@ void test_svmlight() labels->set_label(i, -1); for (index_t j=0; jrandom(0.0, 1.0) ? '1' : '0'; data.strings[i].string[j]=c; } } diff --git a/examples/undocumented/libshogun/clustering_kmeans.cpp b/examples/undocumented/libshogun/clustering_kmeans.cpp index ea2889f10bb..4cfe9f68deb 100644 --- a/examples/undocumented/libshogun/clustering_kmeans.cpp +++ b/examples/undocumented/libshogun/clustering_kmeans.cpp @@ -39,6 +39,7 @@ int main(int argc, char **argv) int32_t dim_features=3; int32_t num_vectors_per_cluster=5; float64_t cluster_std_dev=2.0; + auto m_rng = std::unique_ptr(new CRandom()); /* build random cluster centers */ SGMatrix cluster_centers(dim_features, num_clusters); @@ -59,7 +60,7 @@ int main(int argc, char **argv) idx+=j; idx+=k*dim_features; float64_t entry=cluster_centers.matrix[i*dim_features+j]; - data.matrix[idx]=CMath::normal_random(entry, cluster_std_dev); + data.matrix[idx] = m_rng->normal_random(entry, cluster_std_dev); } } } diff --git a/examples/undocumented/libshogun/converter_jade_bss.cpp b/examples/undocumented/libshogun/converter_jade_bss.cpp index 8484cc449be..dd3f7fa9ca3 100644 --- a/examples/undocumented/libshogun/converter_jade_bss.cpp +++ b/examples/undocumented/libshogun/converter_jade_bss.cpp @@ -32,7 +32,7 @@ using namespace Eigen; void test() { // Generate sample data - CMath::init_random(0); + auto m_rng = std::unique_ptr(new CRandom(0)); int n_samples = 2000; VectorXd time(n_samples, true); time.setLinSpaced(n_samples,0,10); @@ -43,11 +43,11 @@ void test() { // Sin wave S(0,i) = sin(2*time[i]); - S(0,i) += 0.2*CMath::randn_double(); + S(0, i) += 0.2 * m_rng->std_normal_distrib(); // Square wave S(1,i) = sin(3*time[i]) < 0 ? -1 : 1; - S(1,i) += 0.2*CMath::randn_double(); + S(1, i) += 0.2 * m_rng->std_normal_distrib(); } // Standardize data diff --git a/examples/undocumented/libshogun/evaluation_cross_validation_classification.cpp b/examples/undocumented/libshogun/evaluation_cross_validation_classification.cpp index afd41cfaa44..8f5b61b4a79 100644 --- a/examples/undocumented/libshogun/evaluation_cross_validation_classification.cpp +++ b/examples/undocumented/libshogun/evaluation_cross_validation_classification.cpp @@ -29,7 +29,7 @@ void test_cross_validation() /* data matrix dimensions */ index_t num_vectors=40; index_t num_features=5; - + auto m_rng = std::unique_ptr(new CRandom()); /* data means -1, 1 in all components, std deviation of 3 */ SGVector mean_1(num_features); SGVector mean_2(num_features); @@ -47,7 +47,8 @@ void test_cross_validation() for (index_t j=0; jnormal_random(mean, sigma); } } diff --git a/examples/undocumented/libshogun/evaluation_cross_validation_locked_comparison.cpp b/examples/undocumented/libshogun/evaluation_cross_validation_locked_comparison.cpp index 69077d672ce..b8c381c522c 100644 --- a/examples/undocumented/libshogun/evaluation_cross_validation_locked_comparison.cpp +++ b/examples/undocumented/libshogun/evaluation_cross_validation_locked_comparison.cpp @@ -37,6 +37,7 @@ void test_cross_validation() SGVector::fill_vector(mean_1.vector, mean_1.vlen, -1.0); SGVector::fill_vector(mean_2.vector, mean_2.vlen, 1.0); float64_t sigma=1.5; + auto m_rng = std::unique_ptr(new CRandom()); /* fill data matrix around mean */ SGMatrix train_dat(num_features, num_vectors); @@ -45,7 +46,8 @@ void test_cross_validation() for (index_t j=0; jnormal_random(mean, sigma); } } diff --git a/examples/undocumented/libshogun/evaluation_cross_validation_mkl_weight_storage.cpp b/examples/undocumented/libshogun/evaluation_cross_validation_mkl_weight_storage.cpp index 7bc3e500f91..4e3029b7242 100644 --- a/examples/undocumented/libshogun/evaluation_cross_validation_mkl_weight_storage.cpp +++ b/examples/undocumented/libshogun/evaluation_cross_validation_mkl_weight_storage.cpp @@ -28,7 +28,7 @@ void gen_rand_data(SGVector lab, SGMatrix feat, { index_t dims=feat.num_rows; index_t num=lab.vlen; - + auto m_rng = std::unique_ptr(new CRandom()); for (int32_t i=0; i lab, SGMatrix feat, lab[i]=-1.0; for (int32_t j=0; jrandom(0.0, 1.0) + dist; } else { lab[i]=1.0; for (int32_t j=0; jrandom(0.0, 1.0) - dist; } } lab.display_vector("lab"); diff --git a/examples/undocumented/libshogun/evaluation_cross_validation_multiclass_mkl.cpp b/examples/undocumented/libshogun/evaluation_cross_validation_multiclass_mkl.cpp index 5d151d27aa1..cfb353e8ea3 100644 --- a/examples/undocumented/libshogun/evaluation_cross_validation_multiclass_mkl.cpp +++ b/examples/undocumented/libshogun/evaluation_cross_validation_multiclass_mkl.cpp @@ -33,7 +33,7 @@ const char fname_labels[]="../data/label_train_multiclass.dat"; void test_multiclass_mkl_cv() { - CMath::init_random(12); + set_global_seed(12); /* dense features from matrix */ CCSVFile* feature_file = new CCSVFile(fname_feats); SGMatrix mat=SGMatrix(); @@ -87,7 +87,7 @@ void test_multiclass_mkl_cv() CMulticlassAccuracy* eval_crit=new CMulticlassAccuracy(); CStratifiedCrossValidationSplitting* splitting= new CStratifiedCrossValidationSplitting(labels, n_folds); - splitting->set_seed(12); + CCrossValidation *cross=new CCrossValidation(mkl, cfeats, labels, splitting, eval_crit); cross->set_autolock(false); diff --git a/examples/undocumented/libshogun/evaluation_cross_validation_regression.cpp b/examples/undocumented/libshogun/evaluation_cross_validation_regression.cpp index abd7490a82f..287bf36d6db 100644 --- a/examples/undocumented/libshogun/evaluation_cross_validation_regression.cpp +++ b/examples/undocumented/libshogun/evaluation_cross_validation_regression.cpp @@ -33,6 +33,7 @@ void test_cross_validation() /* training label data */ SGVector lab(num_vectors); + auto m_rng = std::unique_ptr(new CRandom()); /* fill data matrix and labels */ SGMatrix train_dat(num_features, num_vectors); @@ -40,8 +41,7 @@ void test_cross_validation() for (index_t i=0; inormal_random(0, 1.0); } /* training features */ diff --git a/examples/undocumented/libshogun/features_subset_labels.cpp b/examples/undocumented/libshogun/features_subset_labels.cpp index 4922694ab38..cf2b29a9992 100644 --- a/examples/undocumented/libshogun/features_subset_labels.cpp +++ b/examples/undocumented/libshogun/features_subset_labels.cpp @@ -24,7 +24,8 @@ const int32_t num_classes=3; void test() { - const int32_t num_subset_idx=CMath::random(1, num_labels); + auto m_rng = std::unique_ptr(new CRandom()); + const int32_t num_subset_idx = m_rng->random(1, num_labels); /* create labels */ CMulticlassLabels* labels=new CMulticlassLabels(num_labels); diff --git a/examples/undocumented/libshogun/features_subset_simple_features.cpp b/examples/undocumented/libshogun/features_subset_simple_features.cpp index 074cb0eccf6..459a953af3b 100644 --- a/examples/undocumented/libshogun/features_subset_simple_features.cpp +++ b/examples/undocumented/libshogun/features_subset_simple_features.cpp @@ -49,7 +49,8 @@ const int32_t dim_features=6; void test() { - const int32_t num_subset_idx=CMath::random(1, num_vectors); + auto m_rng = std::unique_ptr(new CRandom()); + const int32_t num_subset_idx = m_rng->random(1, num_vectors); /* create feature data matrix */ SGMatrix data(dim_features, num_vectors); @@ -58,7 +59,7 @@ void test() for (index_t i=0; irandom(-5, 5); } /* create simple features */ diff --git a/examples/undocumented/libshogun/hashed_features_example.cpp b/examples/undocumented/libshogun/hashed_features_example.cpp index 0bc9c7bec4b..930c27e931d 100644 --- a/examples/undocumented/libshogun/hashed_features_example.cpp +++ b/examples/undocumented/libshogun/hashed_features_example.cpp @@ -12,12 +12,12 @@ int main() int32_t num_vectors = 5; int32_t dim = 20; - + auto m_rng = std::unique_ptr(new CRandom()); SGMatrix mat(dim, num_vectors); for (index_t v=0; vrandom(-dim, dim); } int32_t hashing_dim = 12; diff --git a/examples/undocumented/libshogun/kernel_custom.cpp b/examples/undocumented/libshogun/kernel_custom.cpp index 4e9f23b1f08..ce360e10446 100644 --- a/examples/undocumented/libshogun/kernel_custom.cpp +++ b/examples/undocumented/libshogun/kernel_custom.cpp @@ -38,7 +38,7 @@ void test_custom_kernel_subsets() { subset.range_fill(); CMath::permute(subset); -// subset.display_vector("permutation"); + // subset.display_vector("permutation"); features->add_subset(subset); k->init(features, features); l->add_row_subset(subset); diff --git a/examples/undocumented/libshogun/kernel_custom_kernel.cpp b/examples/undocumented/libshogun/kernel_custom_kernel.cpp index 02ab219c25e..c7f9c8560a5 100644 --- a/examples/undocumented/libshogun/kernel_custom_kernel.cpp +++ b/examples/undocumented/libshogun/kernel_custom_kernel.cpp @@ -36,7 +36,7 @@ void test_custom_kernel_subsets() { subset.range_fill(); CMath::permute(subset); -// subset.display_vector("permutation"); + // subset.display_vector("permutation"); features->add_subset(subset); k->init(features, features); l->add_row_subset(subset); diff --git a/examples/undocumented/libshogun/kernel_machine_train_locked.cpp b/examples/undocumented/libshogun/kernel_machine_train_locked.cpp index c930fdcb02a..4c69f4a528e 100644 --- a/examples/undocumented/libshogun/kernel_machine_train_locked.cpp +++ b/examples/undocumented/libshogun/kernel_machine_train_locked.cpp @@ -37,6 +37,7 @@ void test() SGVector::display_vector(mean_1.vector, mean_1.vlen, "mean 1"); SGVector::display_vector(mean_2.vector, mean_2.vlen, "mean 2"); + auto m_rng = std::unique_ptr(new CRandom()); /* fill data matrix around mean */ SGMatrix train_dat(num_features, num_vectors); @@ -45,7 +46,8 @@ void test() for (index_t j=0; jnormal_random(mean, sigma); } } diff --git a/examples/undocumented/libshogun/library_serialization.cpp b/examples/undocumented/libshogun/library_serialization.cpp index 7521126c4c3..d924f6920ba 100644 --- a/examples/undocumented/libshogun/library_serialization.cpp +++ b/examples/undocumented/libshogun/library_serialization.cpp @@ -12,14 +12,14 @@ int main(int argc, char** argv) /* create feature data matrix */ SGMatrix data(3, 20); - + auto m_rng = std::unique_ptr(new CRandom()); /* fill matrix with random data */ for (index_t i=0; i<20*3; ++i) { if (i%2==0) data.matrix[i]=0; else - data.matrix[i]=CMath::random(1, 9); + data.matrix[i] = m_rng->random(1, 9); } /* create sparse features */ diff --git a/examples/undocumented/libshogun/modelselection_combined_kernel_sub_parameters.cpp b/examples/undocumented/libshogun/modelselection_combined_kernel_sub_parameters.cpp index de5d2b4b8de..cd52ecdbf6b 100644 --- a/examples/undocumented/libshogun/modelselection_combined_kernel_sub_parameters.cpp +++ b/examples/undocumented/libshogun/modelselection_combined_kernel_sub_parameters.cpp @@ -99,9 +99,10 @@ void modelselection_combined_kernel() /* create some data and labels */ SGMatrix matrix(dim_vectors, num_vectors); CBinaryLabels* labels=new CBinaryLabels(num_vectors); + auto m_rng = std::unique_ptr(new CRandom()); for (int32_t i=0; istd_normal_distrib(); /* create num_feautres 2-dimensional vectors */ CDenseFeatures* features=new CDenseFeatures(matrix); diff --git a/examples/undocumented/libshogun/modelselection_grid_search_kernel.cpp b/examples/undocumented/libshogun/modelselection_grid_search_kernel.cpp index 0537d80f052..71d7bfe0572 100644 --- a/examples/undocumented/libshogun/modelselection_grid_search_kernel.cpp +++ b/examples/undocumented/libshogun/modelselection_grid_search_kernel.cpp @@ -103,9 +103,10 @@ int main(int argc, char **argv) /* create some data and labels */ SGMatrix matrix(dim_vectors, num_vectors); CBinaryLabels* labels=new CBinaryLabels(num_vectors); + auto m_rng = std::unique_ptr(new CRandom()); for (int32_t i=0; istd_normal_distrib(); /* create num_feautres 2-dimensional vectors */ CDenseFeatures* features=new CDenseFeatures(matrix); diff --git a/examples/undocumented/libshogun/modelselection_grid_search_krr.cpp b/examples/undocumented/libshogun/modelselection_grid_search_krr.cpp index 8ce24ade9c7..e3b30f0a0b9 100644 --- a/examples/undocumented/libshogun/modelselection_grid_search_krr.cpp +++ b/examples/undocumented/libshogun/modelselection_grid_search_krr.cpp @@ -77,6 +77,7 @@ void test_cross_validation() /* training label data */ SGVector lab(num_vectors); + auto m_rng = std::unique_ptr(new CRandom()); /* fill data matrix and labels */ SGMatrix train_dat(num_features, num_vectors); @@ -84,8 +85,7 @@ void test_cross_validation() for (index_t i=0; inormal_random(0, 1.0); } /* training features */ diff --git a/examples/undocumented/libshogun/modelselection_grid_search_mkl.cpp b/examples/undocumented/libshogun/modelselection_grid_search_mkl.cpp index faf5000ef5f..414d727b737 100644 --- a/examples/undocumented/libshogun/modelselection_grid_search_mkl.cpp +++ b/examples/undocumented/libshogun/modelselection_grid_search_mkl.cpp @@ -64,8 +64,9 @@ void test() /* create some data and labels */ SGMatrix matrix(dim_vectors, num_vectors); + auto m_rng = std::unique_ptr(new CRandom()); for (int32_t i=0; istd_normal_distrib(); /* create feature object */ CDenseFeatures* features=new CDenseFeatures (); diff --git a/examples/undocumented/libshogun/modelselection_grid_search_multiclass_svm.cpp b/examples/undocumented/libshogun/modelselection_grid_search_multiclass_svm.cpp index f7d38f46568..af4922675b9 100644 --- a/examples/undocumented/libshogun/modelselection_grid_search_multiclass_svm.cpp +++ b/examples/undocumented/libshogun/modelselection_grid_search_multiclass_svm.cpp @@ -53,12 +53,13 @@ void test() /* create data: some easy multiclass data */ SGMatrix feat=SGMatrix(dim_vectors, num_vectors); SGVector lab(num_vectors); + auto m_rng = std::unique_ptr(new CRandom()); for (index_t j=0; jstd_normal_distrib(); /* make sure classes are (alomst) linearly seperable against each other */ feat(lab[j],j)+=distance; diff --git a/examples/undocumented/libshogun/modelselection_grid_search_string_kernel.cpp b/examples/undocumented/libshogun/modelselection_grid_search_string_kernel.cpp index e771746d889..bbb1558ad4b 100644 --- a/examples/undocumented/libshogun/modelselection_grid_search_string_kernel.cpp +++ b/examples/undocumented/libshogun/modelselection_grid_search_string_kernel.cpp @@ -76,17 +76,17 @@ int main(int argc, char **argv) index_t num_subsets=num_strings/3; SGStringList strings(num_strings, max_string_length); - + auto m_rng = std::unique_ptr(new CRandom()); for (index_t i=0; irandom(min_string_length, max_string_length); SGString current(len); SG_SPRINT("string %i: \"", i); /* fill with random uppercase letters (ASCII) */ for (index_t j=0; jrandom('A', 'Z'); char* string=new char[2]; string[0]=current.string[j]; diff --git a/examples/undocumented/libshogun/neuralnets_basic.cpp b/examples/undocumented/libshogun/neuralnets_basic.cpp index ee60c359835..d598f07071f 100644 --- a/examples/undocumented/libshogun/neuralnets_basic.cpp +++ b/examples/undocumented/libshogun/neuralnets_basic.cpp @@ -51,7 +51,7 @@ int main(int, char*[]) #ifdef HAVE_LAPACK // for CDataGenerator::generate_gaussian() // initialize the random number generator with a fixed seed, for repeatability - CMath::init_random(10); + set_global_seed(10); // Prepare the training data const int num_classes = 4; diff --git a/examples/undocumented/libshogun/neuralnets_convolutional.cpp b/examples/undocumented/libshogun/neuralnets_convolutional.cpp index c63c4e85dc1..a10f0c5884b 100644 --- a/examples/undocumented/libshogun/neuralnets_convolutional.cpp +++ b/examples/undocumented/libshogun/neuralnets_convolutional.cpp @@ -54,7 +54,7 @@ int main(int, char*[]) #ifdef HAVE_LAPACK // for CDataGenerator::generate_gaussian() // initialize the random number generator with a fixed seed, for repeatability - CMath::init_random(10); + set_global_seed(10); // Prepare the training data const int width = 4; diff --git a/examples/undocumented/libshogun/neuralnets_deep_autoencoder.cpp b/examples/undocumented/libshogun/neuralnets_deep_autoencoder.cpp index 2fb7340c496..40165c4bc4b 100644 --- a/examples/undocumented/libshogun/neuralnets_deep_autoencoder.cpp +++ b/examples/undocumented/libshogun/neuralnets_deep_autoencoder.cpp @@ -49,8 +49,7 @@ int main(int, char*[]) #ifdef HAVE_LAPACK // for CDataGenerator::generate_gaussian() // initialize the random number generator with a fixed seed, for repeatability - CMath::init_random(10); - + set_global_seed(10); // Prepare the training data const int num_features = 20; const int num_classes = 4; diff --git a/examples/undocumented/libshogun/neuralnets_deep_belief_network.cpp b/examples/undocumented/libshogun/neuralnets_deep_belief_network.cpp index 14959f4a7aa..98389862d2c 100644 --- a/examples/undocumented/libshogun/neuralnets_deep_belief_network.cpp +++ b/examples/undocumented/libshogun/neuralnets_deep_belief_network.cpp @@ -45,7 +45,7 @@ int main(int, char*[]) init_shogun_with_defaults(); // initialize the random number generator with a fixed seed, for repeatability - CMath::init_random(10); + auto m_rng = std::unique_ptr(new CRandom(10)); // Prepare the training data const int num_features = 5; @@ -67,11 +67,11 @@ int main(int, char*[]) } for (int32_t i=0; irandom(-1.0, 1.0); for (int32_t i=0; inormal_random(means[i], 1.0); CDenseFeatures* features = new CDenseFeatures(X); diff --git a/examples/undocumented/libshogun/parameter_iterate_float64.cpp b/examples/undocumented/libshogun/parameter_iterate_float64.cpp index 72bbe1a710a..c3e719c7ff0 100644 --- a/examples/undocumented/libshogun/parameter_iterate_float64.cpp +++ b/examples/undocumented/libshogun/parameter_iterate_float64.cpp @@ -32,9 +32,9 @@ int main(int argc, char** argv) /* create some random data */ SGMatrix matrix(n,n); - + auto m_rng = std::unique_ptr(new CRandom()); for(int32_t i=0; irandom((float64_t)-n, (float64_t)n); SGMatrix::display_matrix(matrix.matrix, n, n); diff --git a/examples/undocumented/libshogun/parameter_iterate_sgobject.cpp b/examples/undocumented/libshogun/parameter_iterate_sgobject.cpp index 72cabbdf1b4..b68cc1c24a3 100644 --- a/examples/undocumented/libshogun/parameter_iterate_sgobject.cpp +++ b/examples/undocumented/libshogun/parameter_iterate_sgobject.cpp @@ -29,11 +29,11 @@ int main(int argc, char** argv) const int32_t n=7; init_shogun(&print_message); - + auto m_rng = std::unique_ptr(new CRandom()); /* create some random data and hand it to each kernel */ SGMatrix matrix(n,n); for (int32_t k=0; krandom((float64_t)-n, (float64_t)n); SG_SPRINT("feature data:\n"); SGMatrix::display_matrix(matrix.matrix, n, n); @@ -44,7 +44,8 @@ int main(int argc, char** argv) CGaussianKernel** kernels=SG_MALLOC(CGaussianKernel*, n); for (int32_t i=0; irandom(0.0, (float64_t)n * n)); /* hand data to kernel */ kernels[i]->init(features, features); diff --git a/examples/undocumented/libshogun/preprocessor_randomfouriergauss.cpp b/examples/undocumented/libshogun/preprocessor_randomfouriergauss.cpp index 8a809d8e347..290d7e42ec2 100644 --- a/examples/undocumented/libshogun/preprocessor_randomfouriergauss.cpp +++ b/examples/undocumented/libshogun/preprocessor_randomfouriergauss.cpp @@ -32,7 +32,7 @@ void gen_rand_data(float64_t* & feat, float64_t* & lab,const int32_t num,const i { lab=SG_MALLOC(float64_t, num); feat=SG_MALLOC(float64_t, num*dims); - + auto m_rng = std::unique_ptr(new CRandom()); for (int32_t i=0; irandom(0.0, 1.0) + dist; } else { lab[i]=1.0; for (int32_t j=0; jrandom(0.0, 1.0) - dist; } } CMath::display_vector(lab,num); diff --git a/examples/undocumented/libshogun/random_fourier_features.cpp b/examples/undocumented/libshogun/random_fourier_features.cpp index 22e99c7c7ce..250bef2ea61 100644 --- a/examples/undocumented/libshogun/random_fourier_features.cpp +++ b/examples/undocumented/libshogun/random_fourier_features.cpp @@ -25,6 +25,7 @@ void load_data(int32_t num_dim, int32_t num_vecs, { SGMatrix mat(num_dim, num_vecs); SGVector labs(num_vecs); + auto m_rng = std::unique_ptr(new CRandom()); for (index_t i=0; irandom(0, 1) + 0.5; } else { labs[i] = 1; - mat(j,i) = CMath::random(0,1) - 0.5; + mat(j, i) = m_rng->random(0, 1) - 0.5; } } } diff --git a/examples/undocumented/libshogun/regression_gaussian_process_simple_exact.cpp b/examples/undocumented/libshogun/regression_gaussian_process_simple_exact.cpp index a6f2f64a993..ff86686188e 100644 --- a/examples/undocumented/libshogun/regression_gaussian_process_simple_exact.cpp +++ b/examples/undocumented/libshogun/regression_gaussian_process_simple_exact.cpp @@ -31,10 +31,10 @@ void test() SGMatrix X(1, n); SGMatrix X_test(1, n); SGVector Y(n); - + auto m_rng = std::unique_ptr(new CRandom()); for (index_t i=0; irandom(0.0, x_range); X_test[i]=(float64_t)i / n*x_range; Y[i]=CMath::sin(X[i]); } diff --git a/examples/undocumented/libshogun/regression_libsvr.cpp b/examples/undocumented/libshogun/regression_libsvr.cpp index b4e62c26163..a9a829ccf52 100644 --- a/examples/undocumented/libshogun/regression_libsvr.cpp +++ b/examples/undocumented/libshogun/regression_libsvr.cpp @@ -25,7 +25,7 @@ void test_libsvr() /* create some easy regression data: 1d noisy sine wave */ index_t n=100; float64_t x_range=6; - + auto m_rng = std::unique_ptr(new CRandom()); SGMatrix feat_train(1, n); SGMatrix feat_test(1, n); SGVector lab_train(n); @@ -33,7 +33,7 @@ void test_libsvr() for (index_t i=0; irandom(0.0, x_range); feat_test[i]=(float64_t)i/n*x_range; lab_train[i]=CMath::sin(feat_train[i]); lab_test[i]=CMath::sin(feat_test[i]); diff --git a/examples/undocumented/libshogun/serialization_multiclass_labels.cpp b/examples/undocumented/libshogun/serialization_multiclass_labels.cpp index 3130a24ac61..c2a40340e0a 100644 --- a/examples/undocumented/libshogun/serialization_multiclass_labels.cpp +++ b/examples/undocumented/libshogun/serialization_multiclass_labels.cpp @@ -28,8 +28,9 @@ void test() labels->allocate_confidences_for(n_class); SGVector conf(n_class); + auto m_rng = std::unique_ptr(new CRandom()); for (index_t i=0; istd_normal_distrib(); for (index_t i=0; iset_multiclass_confidences(i, conf); diff --git a/examples/undocumented/libshogun/so_fg_model.cpp b/examples/undocumented/libshogun/so_fg_model.cpp index efbede0fc0a..5202a2c136d 100644 --- a/examples/undocumented/libshogun/so_fg_model.cpp +++ b/examples/undocumented/libshogun/so_fg_model.cpp @@ -18,7 +18,7 @@ using namespace shogun; void test(int32_t num_samples) { - CMath::init_random(17); + set_global_seed(17); // define factor type SGVector card(2); @@ -50,11 +50,11 @@ void test(int32_t num_samples) SGVector::fill_vector(vc.vector, vc.vlen, 2); CFactorGraph* fg = new CFactorGraph(vc); - + auto m_rng = std::unique_ptr(new CRandom()); // add factors SGVector data1(2); - data1[0] = 2.0 * CMath::random(0.0, 1.0) - 1.0; - data1[1] = 2.0 * CMath::random(0.0, 1.0) - 1.0; + data1[0] = 2.0 * m_rng->random(0.0, 1.0) - 1.0; + data1[1] = 2.0 * m_rng->random(0.0, 1.0) - 1.0; SGVector var_index1(2); var_index1[0] = 0; var_index1[1] = 1; @@ -62,8 +62,8 @@ void test(int32_t num_samples) fg->add_factor(fac1); SGVector data2(2); - data2[0] = 2.0 * CMath::random(0.0, 1.0) - 1.0; - data2[1] = 2.0 * CMath::random(0.0, 1.0) - 1.0; + data2[0] = 2.0 * m_rng->random(0.0, 1.0) - 1.0; + data2[1] = 2.0 * m_rng->random(0.0, 1.0) - 1.0; SGVector var_index2(2); var_index2[0] = 1; var_index2[1] = 2; diff --git a/examples/undocumented/libshogun/so_multiclass.cpp b/examples/undocumented/libshogun/so_multiclass.cpp index cd3a9f1932b..ef4b4878565 100644 --- a/examples/undocumented/libshogun/so_multiclass.cpp +++ b/examples/undocumented/libshogun/so_multiclass.cpp @@ -34,6 +34,7 @@ void gen_rand_data(SGVector< float64_t > labs, SGMatrix< float64_t > feats) { float64_t means[DIMS]; float64_t stds[DIMS]; + auto m_rng = std::unique_ptr(new CRandom()); FILE* pfile = fopen(FNAME, "w"); @@ -41,8 +42,8 @@ void gen_rand_data(SGVector< float64_t > labs, SGMatrix< float64_t > feats) { for ( int32_t j = 0 ; j < DIMS ; ++j ) { - means[j] = CMath::random(-100, 100); - stds[j] = CMath::random( 1, 5); + means[j] = m_rng->random(-100, 100); + stds[j] = m_rng->random(1, 5); } for ( int32_t i = 0 ; i < NUM_SAMPLES ; ++i ) @@ -53,8 +54,8 @@ void gen_rand_data(SGVector< float64_t > labs, SGMatrix< float64_t > feats) for ( int32_t j = 0 ; j < DIMS ; ++j ) { - feats[(c*NUM_SAMPLES+i)*DIMS + j] = - CMath::normal_random(means[j], stds[j]); + feats[(c * NUM_SAMPLES + i) * DIMS + j] = + m_rng->normal_random(means[j], stds[j]); fprintf(pfile, " %f", feats[(c*NUM_SAMPLES+i)*DIMS + j]); } diff --git a/examples/undocumented/libshogun/so_multiclass_BMRM.cpp b/examples/undocumented/libshogun/so_multiclass_BMRM.cpp index a05a3257b22..732ce56a47a 100644 --- a/examples/undocumented/libshogun/so_multiclass_BMRM.cpp +++ b/examples/undocumented/libshogun/so_multiclass_BMRM.cpp @@ -90,14 +90,14 @@ void gen_rand_data(SGVector< float64_t > labs, SGMatrix< float64_t > feats) FILE* pfile = fopen(FNAME, "w"); - CMath::init_random(17); + auto m_rng = std::unique_ptr(new CRandom(17)); for ( int32_t c = 0 ; c < NUM_CLASSES ; ++c ) { for ( int32_t j = 0 ; j < DIMS ; ++j ) { - means[j] = CMath::random(-100, 100); - stds[j] = CMath::random( 1, 5); + means[j] = m_rng->random(-100, 100); + stds[j] = m_rng->random(1, 5); } for ( int32_t i = 0 ; i < NUM_SAMPLES ; ++i ) @@ -108,8 +108,8 @@ void gen_rand_data(SGVector< float64_t > labs, SGMatrix< float64_t > feats) for ( int32_t j = 0 ; j < DIMS ; ++j ) { - feats[(c*NUM_SAMPLES+i)*DIMS + j] = - CMath::normal_random(means[j], stds[j]); + feats[(c * NUM_SAMPLES + i) * DIMS + j] = + m_rng->normal_random(means[j], stds[j]); fprintf(pfile, " %d:%f", j+1, feats[(c*NUM_SAMPLES+i)*DIMS + j]); } diff --git a/examples/undocumented/libshogun/splitting_LOO_crossvalidation.cpp b/examples/undocumented/libshogun/splitting_LOO_crossvalidation.cpp index 8defd4fc7a3..f97e5fe6a52 100644 --- a/examples/undocumented/libshogun/splitting_LOO_crossvalidation.cpp +++ b/examples/undocumented/libshogun/splitting_LOO_crossvalidation.cpp @@ -19,10 +19,11 @@ int main(int argc, char **argv) index_t num_labels; index_t runs=10; + auto m_rng = std::unique_ptr(new CRandom()); while (runs-->0) { - num_labels=CMath::random(10, 50); + num_labels = m_rng->random(10, 50); //SG_SPRINT("num_labels=%d\n\n", num_labels); @@ -30,9 +31,8 @@ int main(int argc, char **argv) CRegressionLabels* labels=new CRegressionLabels(num_labels); for (index_t i=0; iset_label(i, CMath::random(-10.0, 10.0)); - // SG_SPRINT("label(%d)=%.18g\n", i, labels->get_label(i)); - + labels->set_label(i, m_rng->random(-10.0, 10.0)); + // SG_SPRINT("label(%d)=%.18g\n", i, labels->get_label(i)); } //SG_SPRINT("\n"); diff --git a/examples/undocumented/libshogun/splitting_standard_crossvalidation.cpp b/examples/undocumented/libshogun/splitting_standard_crossvalidation.cpp index f3bf37426bc..7d0b54a56fb 100644 --- a/examples/undocumented/libshogun/splitting_standard_crossvalidation.cpp +++ b/examples/undocumented/libshogun/splitting_standard_crossvalidation.cpp @@ -26,11 +26,12 @@ int main(int argc, char **argv) index_t num_labels; index_t num_subsets; index_t runs=100; + auto m_rng = std::unique_ptr(new CRandom()); while (runs-->0) { - num_labels=CMath::random(10, 150); - num_subsets=CMath::random(1, 5); + num_labels = m_rng->random(10, 150); + num_subsets = m_rng->random(1, 5); index_t desired_size=CMath::round( (float64_t)num_labels/(float64_t)num_subsets); @@ -44,7 +45,7 @@ int main(int argc, char **argv) CRegressionLabels* labels=new CRegressionLabels(num_labels); for (index_t i=0; iset_label(i, CMath::random(-10.0, 10.0)); + labels->set_label(i, m_rng->random(-10.0, 10.0)); SG_SPRINT("label(%d)=%.18g\n", i, labels->get_label(i)); } SG_SPRINT("\n"); diff --git a/examples/undocumented/libshogun/splitting_stratified_crossvalidation.cpp b/examples/undocumented/libshogun/splitting_stratified_crossvalidation.cpp index 2619ac3a3ae..b2dc651d968 100644 --- a/examples/undocumented/libshogun/splitting_stratified_crossvalidation.cpp +++ b/examples/undocumented/libshogun/splitting_stratified_crossvalidation.cpp @@ -25,12 +25,13 @@ int main(int argc, char **argv) index_t num_labels, num_classes, num_subsets; index_t runs=50; + auto m_rng = std::unique_ptr(new CRandom()); while (runs-->0) { - num_labels=CMath::random(5, 100); - num_classes=CMath::random(2, 10); - num_subsets=CMath::random(1, 10); + num_labels = m_rng->random(5, 100); + num_classes = m_rng->random(2, 10); + num_subsets = m_rng->random(1, 10); /* this will throw an error */ if (num_labelsset_label(i, CMath::random()%num_classes); + labels->set_label(i, m_rng->random_64() % num_classes); SG_SPRINT("label(%d)=%.18g\n", i, labels->get_label(i)); } SG_SPRINT("\n"); diff --git a/examples/undocumented/libshogun/streaming_from_dense.cpp b/examples/undocumented/libshogun/streaming_from_dense.cpp index aa57c37e946..51d3aeb10a4 100644 --- a/examples/undocumented/libshogun/streaming_from_dense.cpp +++ b/examples/undocumented/libshogun/streaming_from_dense.cpp @@ -32,12 +32,13 @@ using namespace shogun; void gen_rand_data(SGMatrix feat, SGVector lab) { + auto m_rng = std::unique_ptr(new CRandom()); for (int32_t i=0; irandom(0.0, 1.0) + DIST; if (lab.vector) lab[i]=0; @@ -45,7 +46,7 @@ void gen_rand_data(SGMatrix feat, SGVector lab) else { for (int32_t j=0; jrandom(0.0, 1.0) - DIST; if (lab.vector) lab[i]=1; diff --git a/src/gpl/shogun/classifier/svm/QPBSVMLib.cpp b/src/gpl/shogun/classifier/svm/QPBSVMLib.cpp index b91a2f1ec24..cca1ea747fe 100644 --- a/src/gpl/shogun/classifier/svm/QPBSVMLib.cpp +++ b/src/gpl/shogun/classifier/svm/QPBSVMLib.cpp @@ -53,7 +53,7 @@ #include #include -#include +#include #include #include @@ -593,7 +593,7 @@ int32_t CQPBSVMLib::qpbsvm_gauss_seidel(float64_t *x, int32_t verb) { for (int32_t i=0; irandom(0.0, 1.0); for (int32_t t=0; t<200; t++) { @@ -624,7 +624,7 @@ int32_t CQPBSVMLib::qpbsvm_gradient_descent(float64_t *x, int32_t verb) { for (int32_t i=0; irandom(0.0, 1.0); for (int32_t t=0; t<2000; t++) { diff --git a/src/gpl/shogun/classifier/svm/WDSVMOcas.cpp b/src/gpl/shogun/classifier/svm/WDSVMOcas.cpp index 64b5e70fbbc..2f1baffb771 100644 --- a/src/gpl/shogun/classifier/svm/WDSVMOcas.cpp +++ b/src/gpl/shogun/classifier/svm/WDSVMOcas.cpp @@ -12,17 +12,16 @@ #include #ifdef USE_GPL_SHOGUN +#include +#include +#include +#include #include -#include #include #include -#include -#include #include -#include -#include -#include -#include +#include +#include using namespace shogun; diff --git a/src/shogun/base/DynArray.h b/src/shogun/base/DynArray.h index 2e363769f30..d5041e5f095 100644 --- a/src/shogun/base/DynArray.h +++ b/src/shogun/base/DynArray.h @@ -12,6 +12,7 @@ #ifndef _DYNARRAY_H_ #define _DYNARRAY_H_ +#include #include #include @@ -447,8 +448,11 @@ template class DynArray /** randomizes the array (not thread safe!) */ void shuffle() { + auto m_rng = std::unique_ptr(new CRandom(sg_random_seed)); for (index_t i=0; i<=current_num_elements-1; ++i) - CMath::swap(array[i], array[CMath::random(i, current_num_elements-1)]); + CMath::swap( + array[i], + array[m_rng->random(i, current_num_elements - 1)]); } /** randomizes the array with external random state */ diff --git a/src/shogun/base/SGObject.cpp b/src/shogun/base/SGObject.cpp index 7d950b65c33..7484a432bb5 100644 --- a/src/shogun/base/SGObject.cpp +++ b/src/shogun/base/SGObject.cpp @@ -36,6 +36,7 @@ namespace shogun { + extern uint32_t sg_random_seed; #ifdef HAVE_CXX11 typedef std::unordered_map ParametersMap; #else @@ -494,7 +495,7 @@ void CSGObject::init() m_parameters = new Parameter(); m_model_selection_parameters = new Parameter(); m_gradient_parameters=new Parameter(); - m_rng = std::unique_ptr(new CRandom()); + m_rng = std::unique_ptr(new CRandom(sg_random_seed)); m_generic = PT_NOT_GENERIC; m_load_pre_called = false; m_load_post_called = false; @@ -801,8 +802,3 @@ bool CSGObject::type_erased_has(const BaseTag& _tag) const { return self->has(_tag); } - -void CSGObject::set_seed(int32_t seed) -{ - m_rng->set_seed(seed); -} diff --git a/src/shogun/base/SGObject.h b/src/shogun/base/SGObject.h index ebd013a2b60..9143118f994 100644 --- a/src/shogun/base/SGObject.h +++ b/src/shogun/base/SGObject.h @@ -494,11 +494,6 @@ class CSGObject */ virtual CSGObject* clone(); - /** Set random seed - * @param seed seed for random generator - */ - void set_seed(int32_t seed); - protected: /* Iteratively clones all parameters of the provided instance into this instance. * This will fail if the objects have different sets of registered parameters, diff --git a/src/shogun/base/init.cpp b/src/shogun/base/init.cpp index bccbcbb2332..335def0eb98 100644 --- a/src/shogun/base/init.cpp +++ b/src/shogun/base/init.cpp @@ -13,7 +13,6 @@ #include #include -#include #include #include #include @@ -32,12 +31,22 @@ shogun::CMap* sg_mallocs=NULL; #include #endif +#ifdef _WIN32 +#define _CRT_RAND_S +#include +#endif + +#ifdef DEV_RANDOM +#include +#endif + namespace shogun { Parallel* sg_parallel=NULL; SGIO* sg_io=NULL; Version* sg_version=NULL; CMath* sg_math=NULL; + uint32_t sg_random_seed = generate_seed(); std::unique_ptr sg_linalg(nullptr); @@ -223,4 +232,37 @@ namespace shogun } #endif } + + uint32_t generate_seed() + { + uint32_t seed; +#if defined(_WIN32) + rand_s(&seed); +#elif defined(HAVE_ARC4RANDOM) + seed = arc4random(); +#elif defined(DEV_RANDOM) + int fd = open(DEV_RANDOM, O_RDONLY); + ASSERT(fd >= 0); + ssize_t actual_read = + read(fd, reinterpret_cast(&seed), sizeof(seed)); + close(fd); + ASSERT(actual_read == sizeof(seed)); +#else + SG_SWARNING("Not safe seed for the PRNG\n"); + struct timeval tv; + gettimeofday(&tv, NULL); + seed = (uint32_t)(4223517 * getpid() * tv.tv_sec * tv.tv_usec); +#endif + return seed; + } + + void set_global_seed(uint32_t seed) + { + sg_random_seed = seed; + } + + uint32_t get_global_seed() + { + return sg_random_seed; + } } diff --git a/src/shogun/base/init.h b/src/shogun/base/init.h index 8856ae6cad8..982a1494dcf 100644 --- a/src/shogun/base/init.h +++ b/src/shogun/base/init.h @@ -11,6 +11,7 @@ #ifndef __SG_INIT_H__ #define __SG_INIT_H__ +#include #include #include @@ -97,6 +98,18 @@ void set_global_math(CMath* math); */ CMath* get_global_math(); +/** Set global random seed + * @param seed seed for random generator + */ +void set_global_seed(uint32_t seed); + +/** get global random seed + * @return random seed + */ +uint32_t get_global_seed(); + +uint32_t generate_seed(); + #ifndef SWIG // SWIG should skip this part /** get the global linalg library object * diff --git a/src/shogun/classifier/svm/GNPPLib.cpp b/src/shogun/classifier/svm/GNPPLib.cpp index 070a68119d5..da7a8feeb36 100644 --- a/src/shogun/classifier/svm/GNPPLib.cpp +++ b/src/shogun/classifier/svm/GNPPLib.cpp @@ -13,9 +13,9 @@ -------------------------------------------------------------------- */ #include -#include #include -#include +#include +#include #include #include diff --git a/src/shogun/classifier/svm/LibLinear.cpp b/src/shogun/classifier/svm/LibLinear.cpp index 473ed66b47d..e759f3beed2 100644 --- a/src/shogun/classifier/svm/LibLinear.cpp +++ b/src/shogun/classifier/svm/LibLinear.cpp @@ -326,7 +326,7 @@ void CLibLinear::solve_l2r_l1l2_svc( for (i=0; irandom(i, active_size - 1); CMath::swap(index[i], index[j]); } @@ -535,7 +535,7 @@ void CLibLinear::solve_l1r_l2_svc( for(j=0; jrandom(j, active_size - 1); CMath::swap(index[i], index[j]); } @@ -907,7 +907,7 @@ void CLibLinear::solve_l1r_lr( for(j=0; jrandom(j, active_size - 1); CMath::swap(index[i], index[j]); } @@ -1241,7 +1241,7 @@ void CLibLinear::solve_l2r_lr_dual(SGVector& w, const liblinear_probl { for (i=0; irandom(i, l - 1); CMath::swap(index[i], index[j]); } int newton_iter = 0; diff --git a/src/shogun/classifier/vw/VwRegressor.cpp b/src/shogun/classifier/vw/VwRegressor.cpp index 780c89a9681..d978cc367d9 100644 --- a/src/shogun/classifier/vw/VwRegressor.cpp +++ b/src/shogun/classifier/vw/VwRegressor.cpp @@ -76,7 +76,7 @@ void CVwRegressor::init(CVwEnvironment* env_to_use) if (env->random_weights) { for (vw_size_t j = 0; j < length/num_threads; j++) - weight_vectors[i][j] = CMath::random(-0.5, 0.5); + weight_vectors[i][j] = m_rng->random(-0.5, 0.5); } if (env->initial_weight != 0.) diff --git a/src/shogun/clustering/GMM.cpp b/src/shogun/clustering/GMM.cpp index f2877377871..4dd03847bf0 100644 --- a/src/shogun/clustering/GMM.cpp +++ b/src/shogun/clustering/GMM.cpp @@ -390,8 +390,12 @@ void CGMM::partial_em(int32_t comp1, int32_t comp2, int32_t comp3, float64_t min for (int32_t i=0; iget_mean().vector[i]=components[0]->get_mean().vector[i]+CMath::randn_double()*noise_mag; - components[0]->get_mean().vector[i]=components[0]->get_mean().vector[i]+CMath::randn_double()*noise_mag; + components[2]->get_mean().vector[i] = + components[0]->get_mean().vector[i] + + m_rng->std_normal_distrib() * noise_mag; + components[0]->get_mean().vector[i] = + components[0]->get_mean().vector[i] + + m_rng->std_normal_distrib() * noise_mag; } coefficients.vector[1]=coefficients.vector[1]+coefficients.vector[2]; @@ -766,7 +770,7 @@ SGVector CGMM::sample() { REQUIRE(m_components.size()>0, "Number of mixture components is %d but " "must be positive\n", m_components.size()); - float64_t rand_num=CMath::random(float64_t(0), float64_t(1)); + float64_t rand_num = m_rng->random(float64_t(0), float64_t(1)); float64_t cum_sum=0; for (int32_t i=0; i #include #include -#include #include +#include #include using namespace Eigen; diff --git a/src/shogun/clustering/KMeansBase.cpp b/src/shogun/clustering/KMeansBase.cpp index e26bd383e69..08a4a9add0d 100644 --- a/src/shogun/clustering/KMeansBase.cpp +++ b/src/shogun/clustering/KMeansBase.cpp @@ -9,13 +9,13 @@ * Copyright (C) 1999-2009 Fraunhofer Institute FIRST and Max-Planck-Society */ +#include #include #include #include -#include #include -#include -#include +#include +#include #include using namespace shogun; @@ -275,7 +275,7 @@ SGMatrix CKMeansBase::kmeanspp() min_dist.zero(); /* First center is chosen at random */ - int32_t mu=CMath::random((int32_t) 0, lhs_size-1); + int32_t mu = m_rng->random((int32_t)0, lhs_size - 1); SGVector mu_first=lhs->get_feature_vector(mu); for(int32_t j=0; j CKMeansBase::kmeanspp() { float64_t temp_sum=0.0; float64_t temp_dist=0.0; - SGVector temp_min_dist=SGVector(lhs_size); - int32_t new_center=0; - float64_t prob=CMath::random(0.0, 1.0); + SGVector temp_min_dist = SGVector(lhs_size); + int32_t new_center = 0; + float64_t prob = m_rng->random(0.0, 1.0); prob=prob*sum; for(int32_t j=0; j -#include #include #include +#include #ifdef _WIN32 #undef far @@ -131,7 +131,7 @@ SGVector CKMeansMiniBatch::mbchoose_rand(int32_t b, int32_t num) { SGVector chosen=SGVector(num); SGVector ret=SGVector(b); - auto rng = std::unique_ptr(new CRandom()); + auto rng = std::unique_ptr(new CRandom(sg_random_seed)); chosen.zero(); int32_t ch=0; while (chstd_normal_distrib(); } } diff --git a/src/shogun/distributions/Gaussian.cpp b/src/shogun/distributions/Gaussian.cpp index d5c36c6e9e4..6558b8ef379 100644 --- a/src/shogun/distributions/Gaussian.cpp +++ b/src/shogun/distributions/Gaussian.cpp @@ -412,7 +412,7 @@ SGVector CGaussian::sample() SGVector random_vec(m_mean.vlen); for (int32_t i = 0; i < m_mean.vlen; i++) - random_vec.vector[i] = CMath::randn_double(); + random_vec.vector[i] = m_rng->std_normal_distrib(); if (m_cov_type == FULL) { diff --git a/src/shogun/distributions/HMM.cpp b/src/shogun/distributions/HMM.cpp index 3bc2dd5c556..b7ff4a4fe12 100644 --- a/src/shogun/distributions/HMM.cpp +++ b/src/shogun/distributions/HMM.cpp @@ -22,7 +22,12 @@ #include #include -#define VAL_MACRO log((default_value == 0) ? (CMath::random(MIN_RAND, MAX_RAND)) : default_value) +#define VAL_MACRO \ + [&]() { \ + return log( \ + (default_value == 0) ? (m_rng->random(MIN_RAND, MAX_RAND)) \ + : default_value); \ + } #define ARRAY_SIZE 65336 using namespace shogun; @@ -2452,7 +2457,7 @@ void CHMM::init_model_random() sum=0; for (j=0; jrandom(MIN_RAND, 1.0)); sum+=get_a(i,j); } @@ -2465,7 +2470,7 @@ void CHMM::init_model_random() sum=0; for (i=0; irandom(MIN_RAND, 1.0)); sum+=get_p(i); } @@ -2477,7 +2482,7 @@ void CHMM::init_model_random() sum=0; for (i=0; irandom(MIN_RAND, 1.0)); sum+=get_q(i); } @@ -2491,7 +2496,7 @@ void CHMM::init_model_random() sum=0; for (j=0; jrandom(MIN_RAND, 1.0)); sum+=get_b(i,j); } @@ -2532,7 +2537,8 @@ void CHMM::init_model_defined() //initialize a values that have to be learned float64_t *R=SG_MALLOC(float64_t, N); - for (r=0; rrandom(MIN_RAND, 1.0); i=0; sum=0; k=i; j=model->get_learn_a(i,0); while (model->get_learn_a(i,0)!=-1 || kget_learn_a(i,0); k=i; sum=0; - for (r=0; rrandom(MIN_RAND, 1.0); } } SG_FREE(R); R=NULL ; //initialize b values that have to be learned R=SG_MALLOC(float64_t, M); - for (r=0; rrandom(MIN_RAND, 1.0); i=0; sum=0; k=0 ; j=model->get_learn_b(i,0); while (model->get_learn_b(i,0)!=-1 || kget_learn_b(i,0); k=i; sum=0; - for (r=0; rrandom(MIN_RAND, 1.0); } } SG_FREE(R); R=NULL ; @@ -2628,7 +2637,7 @@ void CHMM::init_model_defined() sum=0; while (model->get_learn_p(i)!=-1) { - set_p(model->get_learn_p(i),CMath::random(MIN_RAND,1.0)) ; + set_p(model->get_learn_p(i), m_rng->random(MIN_RAND, 1.0)); sum+=get_p(model->get_learn_p(i)) ; i++ ; } ; @@ -2644,7 +2653,7 @@ void CHMM::init_model_defined() sum=0; while (model->get_learn_q(i)!=-1) { - set_q(model->get_learn_q(i),CMath::random(MIN_RAND,1.0)) ; + set_q(model->get_learn_q(i), m_rng->random(MIN_RAND, 1.0)); sum+=get_q(model->get_learn_q(i)) ; i++ ; } ; @@ -5074,7 +5083,7 @@ void CHMM::add_states(int32_t num_states, float64_t default_value) // warning pay attention to the ordering of // transition_matrix_a, observation_matrix_b !!! - for (i=0; i CDataGenerator::generate_checkboard_data(int32_t num_classes int32_t dim, int32_t num_points, float64_t overlap) { int32_t points_per_class = num_points / num_classes; + auto m_rng = std::unique_ptr(new CRandom(sg_random_seed)); int32_t grid_size = (int32_t ) CMath::ceil(CMath::sqrt((float64_t ) num_classes)); float64_t cell_size = (float64_t ) 1 / grid_size; @@ -54,11 +55,12 @@ SGMatrix CDataGenerator::generate_checkboard_data(int32_t num_classes { do { - points(i, p) = CMath::normal_random(class_dim_centers[i], cell_size*0.5); + points(i, p) = m_rng->normal_random( + class_dim_centers[i], cell_size * 0.5); if ((points(i, p)>(grid_idx[i]+1)*cell_size) || (points(i, p)random(0.0, 1.0) < overlap)) continue; } break; @@ -86,12 +88,13 @@ SGMatrix CDataGenerator::generate_mean_data(index_t m, /* evtl. allocate space */ SGMatrix result=SGMatrix::get_allocated_matrix( dim, 2*m, target); + auto m_rng = std::unique_ptr(new CRandom(sg_random_seed)); /* fill matrix with normal data */ for (index_t i=0; i<2*m; ++i) { for (index_t j=0; jstd_normal_distrib(); /* mean shift for second half */ if (i>=m) @@ -107,7 +110,7 @@ SGMatrix CDataGenerator::generate_sym_mix_gauss(index_t m, /* evtl. allocate space */ SGMatrix result=SGMatrix::get_allocated_matrix( 2, m, target); - + auto m_rng = std::unique_ptr(new CRandom(sg_random_seed)); /* rotation matrix */ SGMatrix rot=SGMatrix(2,2); rot(0, 0)=CMath::cos(angle); @@ -119,8 +122,10 @@ SGMatrix CDataGenerator::generate_sym_mix_gauss(index_t m, * Gaussians */ for (index_t i=0; istd_normal_distrib() + (m_rng->random(0, 1) ? d : -d); + result(1, i) = + m_rng->std_normal_distrib() + (m_rng->random(0, 1) ? d : -d); } /* rotate result */ diff --git a/src/shogun/features/RandomFourierDotFeatures.cpp b/src/shogun/features/RandomFourierDotFeatures.cpp index 64b47b4df83..4c047e6b6bd 100644 --- a/src/shogun/features/RandomFourierDotFeatures.cpp +++ b/src/shogun/features/RandomFourierDotFeatures.cpp @@ -90,11 +90,11 @@ SGVector CRandomFourierDotFeatures::generate_random_parameter_vector( case GAUSSIAN: for (index_t i=0; inormal_random(0.0, 1); } - vec[vec.vlen-1] = CMath::random(0.0, 2 * CMath::PI); + vec[vec.vlen - 1] = m_rng->random(0.0, 2 * CMath::PI); break; default: diff --git a/src/shogun/features/streaming/generators/GaussianBlobsDataGenerator.cpp b/src/shogun/features/streaming/generators/GaussianBlobsDataGenerator.cpp index fd5b5f808a5..367fbd45443 100644 --- a/src/shogun/features/streaming/generators/GaussianBlobsDataGenerator.cpp +++ b/src/shogun/features/streaming/generators/GaussianBlobsDataGenerator.cpp @@ -89,12 +89,12 @@ bool CGaussianBlobsDataGenerator::get_next_example() SGVector result=SGVector(2); /* sample latent distribution to compute offsets */ - index_t x_offset=CMath::random(0, m_sqrt_num_blobs-1)*m_distance; - index_t y_offset=CMath::random(0, m_sqrt_num_blobs-1)*m_distance; + index_t x_offset = m_rng->random(0, m_sqrt_num_blobs - 1) * m_distance; + index_t y_offset = m_rng->random(0, m_sqrt_num_blobs - 1) * m_distance; /* sample from std Gaussian */ - float64_t x=CMath::randn_double(); - float64_t y=CMath::randn_double(); + float64_t x = m_rng->std_normal_distrib(); + float64_t y = m_rng->std_normal_distrib(); /* transform through cholesky and add offset */ result[0]=m_cholesky(0, 0)*x+m_cholesky(0, 1)*y+x_offset; diff --git a/src/shogun/features/streaming/generators/MeanShiftDataGenerator.cpp b/src/shogun/features/streaming/generators/MeanShiftDataGenerator.cpp index 1337ea98771..9ff4da905a0 100644 --- a/src/shogun/features/streaming/generators/MeanShiftDataGenerator.cpp +++ b/src/shogun/features/streaming/generators/MeanShiftDataGenerator.cpp @@ -66,7 +66,7 @@ bool CMeanShiftDataGenerator::get_next_example() /* fill with std normal data */ for (index_t i=0; istd_normal_distrib(); /* mean shift in selected dimension */ result[m_dimension_shift]+=m_mean_shift; diff --git a/src/shogun/kernel/PyramidChi2.cpp b/src/shogun/kernel/PyramidChi2.cpp index 7953ffc3bdb..b8fe52fe1e2 100644 --- a/src/shogun/kernel/PyramidChi2.cpp +++ b/src/shogun/kernel/PyramidChi2.cpp @@ -158,7 +158,10 @@ float64_t CPyramidChi2::compute(int32_t idx_a, int32_t idx_b) if (num_randfeats_forwidthcomputation >0) { for(int32_t i=0; i< numind;++i) - featindices[i]=CMath::random(0, ((CDenseFeatures*) lhs)->get_num_vectors()-1); + featindices[i] = m_rng->random( + 0, + ((CDenseFeatures*)lhs)->get_num_vectors() - + 1); } else { diff --git a/src/shogun/lib/SGVector.cpp b/src/shogun/lib/SGVector.cpp index 2621f47d7ff..5ed8177c017 100644 --- a/src/shogun/lib/SGVector.cpp +++ b/src/shogun/lib/SGVector.cpp @@ -614,8 +614,9 @@ void SGVector::vec1_plus_scalar_times_vec2(float32_t* vec1, template void SGVector::random_vector(T* vec, int32_t len, T min_value, T max_value) { + auto m_rng = std::unique_ptr(new CRandom(sg_random_seed)); for (int32_t i=0; irandom(min_value, max_value); } template <> diff --git a/src/shogun/lib/tapkee/defines/random.hpp b/src/shogun/lib/tapkee/defines/random.hpp index 6d6bc266bc7..66dafd3dfdc 100644 --- a/src/shogun/lib/tapkee/defines/random.hpp +++ b/src/shogun/lib/tapkee/defines/random.hpp @@ -16,7 +16,8 @@ namespace tapkee inline IndexType uniform_random_index() { #ifdef CUSTOM_UNIFORM_RANDOM_INDEX_FUNCTION - return CUSTOM_UNIFORM_RANDOM_INDEX_FUNCTION % std::numeric_limits::max(); + return CUSTOM_UNIFORM_RANDOM_INDEX_FUNCTION() % + std::numeric_limits::max(); #else return std::rand(); #endif @@ -30,7 +31,7 @@ inline IndexType uniform_random_index_bounded(IndexType upper) inline ScalarType uniform_random() { #ifdef CUSTOM_UNIFORM_RANDOM_FUNCTION - return CUSTOM_UNIFORM_RANDOM_FUNCTION; + return CUSTOM_UNIFORM_RANDOM_FUNCTION(); #else return std::rand()/((double)RAND_MAX+1); #endif @@ -39,7 +40,7 @@ inline ScalarType uniform_random() inline ScalarType gaussian_random() { #ifdef CUSTOM_GAUSSIAN_RANDOM_FUNCTION - return CUSTOM_GAUSSIAN_RANDOM_FUNCTION; + return CUSTOM_GAUSSIAN_RANDOM_FUNCTION(); #else ScalarType x, y, radius; do { diff --git a/src/shogun/lib/tapkee/tapkee_shogun.cpp b/src/shogun/lib/tapkee/tapkee_shogun.cpp index a3aa2f677df..a712a8a6432 100644 --- a/src/shogun/lib/tapkee/tapkee_shogun.cpp +++ b/src/shogun/lib/tapkee/tapkee_shogun.cpp @@ -9,10 +9,25 @@ #include - -#define CUSTOM_UNIFORM_RANDOM_INDEX_FUNCTION shogun::CMath::random() -#define CUSTOM_UNIFORM_RANDOM_FUNCTION shogun::CMath::random(static_cast(0),static_cast(1)) -#define CUSTOM_GAUSSIAN_RANDOM_FUNCTION shogun::CMath::normal_random(static_cast(0),static_cast(1)) +#define CUSTOM_UNIFORM_RANDOM_INDEX_FUNCTION \ + []() -> uint64_t { \ + auto rng = std::unique_ptr(new CRandom()); \ + return rng->random_64(); \ + } +#define CUSTOM_UNIFORM_RANDOM_FUNCTION \ + []() { \ + auto rng = std::unique_ptr(new CRandom()); \ + return rng->random( \ + static_cast(0), \ + static_cast(1)); \ + } +#define CUSTOM_GAUSSIAN_RANDOM_FUNCTION \ + []() { \ + auto rng = std::unique_ptr(new CRandom()); \ + return rng->normal_random( \ + static_cast(0), \ + static_cast(1)); \ + } #define TAPKEE_EIGEN_INCLUDE_FILE #ifdef HAVE_ARPACK diff --git a/src/shogun/machine/BaggingMachine.cpp b/src/shogun/machine/BaggingMachine.cpp index 4a9802680bc..840b0f22ded 100644 --- a/src/shogun/machine/BaggingMachine.cpp +++ b/src/shogun/machine/BaggingMachine.cpp @@ -130,9 +130,9 @@ bool CBaggingMachine::train_machine(CFeatures* data) SGMatrix rnd_indicies(m_bag_size, m_num_bags); for (index_t i = 0; i < m_num_bags*m_bag_size; ++i) - rnd_indicies.matrix[i] = CMath::random(0, m_bag_size-1); + rnd_indicies.matrix[i] = m_rng->random(0, m_bag_size - 1); - #pragma omp parallel for +#pragma omp parallel for for (int32_t i = 0; i < m_num_bags; ++i) { CMachine* c=dynamic_cast(m_machine->clone()); diff --git a/src/shogun/mathematics/Math.cpp b/src/shogun/mathematics/Math.cpp index ae6ffb4b7fc..56d94c92256 100644 --- a/src/shogun/mathematics/Math.cpp +++ b/src/shogun/mathematics/Math.cpp @@ -36,8 +36,6 @@ int32_t CMath::LOGACCURACY = 0; // 100000 steps per integer int32_t CMath::LOGRANGE = 0; // range for logtable: log(1+exp(x)) -25 <= x <= 0 -CRandom* CMath::m_rng = new CRandom(12345); - const float64_t CMath::NOT_A_NUMBER = NAN; const float64_t CMath::INFTY = INFINITY; // infinity const float64_t CMath::ALMOST_INFTY = +1e+300; //a large number diff --git a/src/shogun/mathematics/Math.h b/src/shogun/mathematics/Math.h index 143506084b5..9d343f84a2e 100644 --- a/src/shogun/mathematics/Math.h +++ b/src/shogun/mathematics/Math.h @@ -20,13 +20,13 @@ #ifndef __MATHEMATICS_H_ #define __MATHEMATICS_H_ -#include - -#include +#include #include -#include +#include #include -#include +#include +#include +#include #ifndef _USE_MATH_DEFINES #define _USE_MATH_DEFINES @@ -988,152 +988,6 @@ class CMath : public CSGObject res*=i ; return res ; } - - /** - * @name Random Functions - */ - //@{ - /** Initiates seed for pseudo random generator - * @param initseed value of seed - */ - static void init_random(uint32_t initseed=0) - { - if (initseed==0) - m_rng->set_seed(CRandom::generate_seed()); - else - m_rng->set_seed(initseed); - } - - /** Returns random number - * @return unsigned 64 bit integer - */ - static inline uint64_t random() - { - uint64_t result = m_rng->random_64(); - return result; - } - - /** Returns random number - * @return unsigned 64 bit integer - */ - static inline uint64_t random(uint64_t min_value, uint64_t max_value) - { - uint64_t result = m_rng->random(min_value, max_value); - return result; - } - - /** Returns random number between minimum and maximum value - * @param min_value minimum value (64 bit integer) - * @param max_value maximum value (64 bit integer) - * @return signed 64 bit integer - */ - static inline int64_t random(int64_t min_value, int64_t max_value) - { - int64_t result = m_rng->random(min_value, max_value); - return result; - } - - /** Returns random number between minimum and maximum value - * @param min_value minimum value (32 bit unsigned integer) - * @param max_value maximum value (32 bit unsigned integer) - * @return unsigned 32 bit integer - */ - static inline uint32_t random(uint32_t min_value, uint32_t max_value) - { - uint32_t result = m_rng->random(min_value, max_value); - return result; - } - - /** Returns random number between minimum and maximum value - * @param min_value minimum value (32 bit signed integer) - * @param max_value maximum value (32 bit signed integer) - * @return signed 32 bit integer - */ - static inline int32_t random(int32_t min_value, int32_t max_value) - { - int32_t result = m_rng->random(min_value, max_value); - return result; - } - - /** Returns random number between minimum and maximum value - * @param min_value minimum value (32 bit float) - * @param max_value maximum value (32 bit float) - * @return 32 bit float - */ - static inline float32_t random(float32_t min_value, float32_t max_value) - { - float32_t result = m_rng->random(min_value, max_value); - return result; - } - - /** Returns random number between minimum and maximum value - * @param min_value minimum value (64 bit float) - * @param max_value maximum value (64 bit float) - * @return 64 bit float - */ - static inline float64_t random(float64_t min_value, float64_t max_value) - { - float64_t result = m_rng->random(min_value, max_value); - return result; - } - - /** Returns random number between minimum and maximum value - * @param min_value minimum value (128 bit float) - * @param max_value maximum value (128 bit float) - * @return 128 bit float - */ - static inline floatmax_t random(floatmax_t min_value, floatmax_t max_value) - { - floatmax_t result = m_rng->random(min_value, max_value); - return result; - } - - /// Returns a Gaussian or Normal random number. - /// Using the polar form of the Box-Muller transform. - /// http://en.wikipedia.org/wiki/Box%E2%80%93Muller_transform#Polar_form - static inline float32_t normal_random(float32_t mean, float32_t std_dev) - { - // sets up variables & makes sure rand_s.range == (0,1) - float32_t ret; - float32_t rand_u; - float32_t rand_v; - float32_t rand_s; - do - { - rand_u = static_cast(CMath::random(-1.0, 1.0)); - rand_v = static_cast(CMath::random(-1.0, 1.0)); - rand_s = rand_u*rand_u + rand_v*rand_v; - } while ((rand_s == 0) || (rand_s >= 1)); - - // the meat & potatos, and then the mean & standard deviation shifting... - ret = static_cast(rand_u*CMath::sqrt(-2.0*CMath::log(rand_s)/rand_s)); - ret = std_dev*ret + mean; - return ret; - } - - /// Returns a Gaussian or Normal random number. - /// Using the polar form of the Box-Muller transform. - /// http://en.wikipedia.org/wiki/Box%E2%80%93Muller_transform#Polar_form - static inline float64_t normal_random(float64_t mean, float64_t std_dev) - { - float64_t result = m_rng->normal_distrib(mean, std_dev); - return result; - } - - /// Convenience method for generating Standard Normal random numbers - /// Float: Mean = 0 and Standard Deviation = 1 - static inline float32_t randn_float() - { - return static_cast(normal_random(0.0, 1.0)); - } - - /// Convenience method for generating Standard Normal random numbers - /// Double: Mean = 0 and Standard Deviation = 1 - static inline float64_t randn_double() - { - float64_t result = m_rng->std_normal_distrib(); - return result; - } //@} /** Implements the greatest common divisor (gcd) via modulo operations. @@ -1162,25 +1016,22 @@ class CMath : public CSGObject return 0 == a ? b : a; } - /** Permute randomly the elements of the vector. If provided, use the - * random object to generate the permutations. - * @param v the vector to permute. - * @param rand random object that might be used to generate the permutations. - */ template - static void permute(SGVector v, CRandom* rand=NULL) + static void permute(SGVector v, CRandom* rand = NULL) + { + if (rand) { - if (rand) - { - for (index_t i=0; irandom(i, v.vlen-1)]); - } - else - { - for (index_t i=0; irandom(i, v.vlen - 1)]); } + else + { + auto m_rng = + std::unique_ptr(new CRandom(sg_random_seed)); + for (index_t i = 0; i < v.vlen; ++i) + swap(v[i], v[m_rng->random(i, v.vlen - 1)]); + } + } /** Computes sum of non-zero elements * @param vec vector @@ -2092,8 +1943,6 @@ class CMath : public CSGObject static const float32_t F_MIN_VAL32; static const float64_t F_MIN_VAL64; - static CRandom* m_rng; - protected: /// range for logtable: log(1+exp(x)) -LOGRANGE <= x <= 0 static int32_t LOGRANGE; diff --git a/src/shogun/mathematics/Random.cpp b/src/shogun/mathematics/Random.cpp index 8159137baeb..a449fd824e3 100644 --- a/src/shogun/mathematics/Random.cpp +++ b/src/shogun/mathematics/Random.cpp @@ -12,12 +12,13 @@ #include #endif -#include #include +#include +#include #include #include -#include -#include +#include +#include #ifdef DEV_RANDOM #include @@ -30,7 +31,7 @@ CRandom::CRandom() m_sfmt_64(NULL), m_dsfmt(NULL) { - m_seed = CRandom::generate_seed(); + m_seed = sg_random_seed; init(); } @@ -343,25 +344,35 @@ void CRandom::reinit(uint32_t seed) m_state_lock.unlock(); } -uint32_t CRandom::generate_seed() +float32_t CRandom::normal_random(float32_t mean, float32_t std_dev) { - uint32_t seed; -#if defined(_WIN32) - rand_s(&seed); -#elif defined(HAVE_ARC4RANDOM) - seed = arc4random(); -#elif defined(DEV_RANDOM) - int fd = open(DEV_RANDOM, O_RDONLY); - ASSERT(fd >= 0); - ssize_t actual_read = - read(fd, reinterpret_cast(&seed), sizeof(seed)); - close(fd); - ASSERT(actual_read == sizeof(seed)); -#else - SG_SWARNING("Not safe seed for the PRNG\n"); - struct timeval tv; - gettimeofday(&tv, NULL); - seed=(uint32_t) (4223517*getpid()*tv.tv_sec*tv.tv_usec); -#endif - return seed; + // sets up variables & makes sure rand_s.range == (0,1) + float32_t ret; + float32_t rand_u; + float32_t rand_v; + float32_t rand_s; + do + { + rand_u = static_cast(random(-1.0, 1.0)); + rand_v = static_cast(random(-1.0, 1.0)); + rand_s = rand_u * rand_u + rand_v * rand_v; + } while ((rand_s == 0) || (rand_s >= 1)); + + // the meat & potatos, and then the mean & standard deviation + // shifting... + ret = static_cast( + rand_u * CMath::sqrt(-2.0 * CMath::log(rand_s) / rand_s)); + ret = std_dev * ret + mean; + return ret; +} + +float64_t CRandom::normal_random(float64_t mean, float64_t std_dev) +{ + float64_t result = normal_distrib(mean, std_dev); + return result; +} + +float32_t CRandom::randn_float() +{ + return static_cast(normal_random(0.0, 1.0)); } diff --git a/src/shogun/mathematics/Random.h b/src/shogun/mathematics/Random.h index 444f41e7a2b..c26da99b3f3 100644 --- a/src/shogun/mathematics/Random.h +++ b/src/shogun/mathematics/Random.h @@ -17,7 +17,6 @@ #include #include #include -#include /* opaque pointers */ struct SFMT_T; @@ -25,7 +24,9 @@ struct DSFMT_T; namespace shogun { + extern uint32_t sg_random_seed; class CLock; + class CMath; /** @brief: Pseudo random number geneartor * * It is based on SIMD oriented Fast Mersenne Twister(SFMT) pseudorandom @@ -267,14 +268,37 @@ namespace shogun */ float64_t std_normal_distrib() const; - /** - * Generate a seed for PRNG - * - * @return entropy for PRNG - */ - static uint32_t generate_seed(); - - virtual const char* get_name() const { return "Random"; } + /** + *Returns a Gaussian or Normal random number. + *Using the polar form of the Box-Muller transform. + *http://en.wikipedia.org/wiki/Box%E2%80%93Muller_transform#Polar_form + */ + float32_t normal_random(float32_t mean, float32_t std_dev); + + /** + *Returns a Gaussian or Normal random number. + *Using the polar form of the Box-Muller transform. + *http://en.wikipedia.org/wiki/Box%E2%80%93Muller_transform#Polar_form + */ + float64_t normal_random(float64_t mean, float64_t std_dev); + + /* + *Convenience method for generating Standard Normal random numbers + *Float: Mean = 0 and Standard Deviation = 1 + */ + float32_t randn_float(); + + /** + * Generate a seed for PRNG + * + * @return entropy for PRNG + */ + static uint32_t generate_seed(); + + virtual const char* get_name() const + { + return "Random"; + } private: /** initialise the object */ diff --git a/src/shogun/mathematics/Statistics.cpp b/src/shogun/mathematics/Statistics.cpp index 439544e6eed..943ac9cb8a9 100644 --- a/src/shogun/mathematics/Statistics.cpp +++ b/src/shogun/mathematics/Statistics.cpp @@ -325,6 +325,7 @@ SGVector CStatistics::sample_indices(int32_t sample_size, int32_t N) int32_t* idxs=SG_MALLOC(int32_t,N); int32_t i, rnd; int32_t* permuted_idxs=SG_MALLOC(int32_t,sample_size); + auto rng = std::unique_ptr(new CRandom(sg_random_seed)); // reservoir sampling for (i=0; i CStatistics::sample_indices(int32_t sample_size, int32_t N) permuted_idxs[i]=idxs[i]; for (i=sample_size; irandom(1, i); if (rnd CStatistics::sample_from_gaussian(SGVector mean, int32_t dim=mean.vlen; Map mu(mean.vector, mean.vlen); Map c(cov.matrix, cov.num_rows, cov.num_cols); + auto rng = std::unique_ptr(new CRandom(sg_random_seed)); // generate samples, z, from N(0, I), DxN SGMatrix S(dim, N); for( int32_t j=0; jstd_normal_distrib(); // the cholesky factorization c=L*U MatrixXd U=c.llt().matrixU(); @@ -773,6 +775,7 @@ SGMatrix CStatistics::sample_from_gaussian(SGVector mean, typedef SparseMatrix MatrixType; const MatrixType &c=EigenSparseUtil::toEigenSparse(cov); + auto rng = std::unique_ptr(new CRandom(sg_random_seed)); SimplicialLLT llt; @@ -780,7 +783,7 @@ SGMatrix CStatistics::sample_from_gaussian(SGVector mean, SGMatrix S(dim, N); for( int32_t j=0; jstd_normal_distrib(); Map s(S.matrix, S.num_rows, S.num_cols); diff --git a/src/shogun/mathematics/ajd/QDiag.cpp b/src/shogun/mathematics/ajd/QDiag.cpp index c4aed34e7db..c8e2df57616 100644 --- a/src/shogun/mathematics/ajd/QDiag.cpp +++ b/src/shogun/mathematics/ajd/QDiag.cpp @@ -16,6 +16,7 @@ SGMatrix CQDiag::diagonalize(SGNDArray C, SGMatrix V; + auto rng = std::unique_ptr(new CRandom(sg_random_seed)); if (V0.num_rows == N && V0.num_cols == N) { V = V0.clone(); @@ -27,7 +28,7 @@ SGMatrix CQDiag::diagonalize(SGNDArray C, SGMatrixstd_normal_distrib(); } } diff --git a/src/shogun/modelselection/ModelSelectionParameters.cpp b/src/shogun/modelselection/ModelSelectionParameters.cpp index f911c3a2f92..cea8febd272 100644 --- a/src/shogun/modelselection/ModelSelectionParameters.cpp +++ b/src/shogun/modelselection/ModelSelectionParameters.cpp @@ -204,7 +204,7 @@ CParameterCombination* CModelSelectionParameters::get_single_combination( index_t i = 0; if (is_rand) - i = CMath::random(0, m_values_length-1); + i = m_rng->random(0, m_values_length - 1); Parameter* p=new Parameter(); @@ -217,7 +217,7 @@ CParameterCombination* CModelSelectionParameters::get_single_combination( for (index_t j = 0; j < param_vect->vlen; j++) { if (is_rand) - i = CMath::random(0, m_values_length-1); + i = m_rng->random(0, m_values_length - 1); (*param_vect)[j] = ((float64_t*)m_values)[i]; } p->add(param_vect, m_node_name); @@ -230,7 +230,7 @@ CParameterCombination* CModelSelectionParameters::get_single_combination( for (index_t j = 0; j < *m_vector_length; j++) { if (is_rand) - i = CMath::random(0, m_values_length-1); + i = m_rng->random(0, m_values_length - 1); (param_vect)[j] = ((float64_t*)m_values)[i]; } p->add_vector(¶m_vect, m_vector_length, m_node_name); @@ -243,7 +243,7 @@ CParameterCombination* CModelSelectionParameters::get_single_combination( for (index_t j = 0; j < param_vect->vlen; j++) { if (is_rand) - i = CMath::random(0, m_values_length-1); + i = m_rng->random(0, m_values_length - 1); (*param_vect)[j] = ((int32_t*)m_values)[i]; } p->add(param_vect, m_node_name); @@ -256,7 +256,7 @@ CParameterCombination* CModelSelectionParameters::get_single_combination( for (index_t j = 0; j < *m_vector_length; j++) { if (is_rand) - i = CMath::random(0, m_values_length-1); + i = m_rng->random(0, m_values_length - 1); (param_vect)[j] = ((int32_t*)m_values)[i]; } p->add_vector(¶m_vect, m_vector_length, m_node_name); diff --git a/src/shogun/multiclass/LaRank.cpp b/src/shogun/multiclass/LaRank.cpp index 51e2e800d99..da895d1f54d 100644 --- a/src/shogun/multiclass/LaRank.cpp +++ b/src/shogun/multiclass/LaRank.cpp @@ -760,7 +760,7 @@ int32_t CLaRank::add (int32_t x_id, int32_t yi) if (w_opt < prop_min) w_opt = prop_min; w_sum = w_pro + w_rep + w_opt; - float64_t r = CMath::random(0.0, w_sum); + float64_t r = m_rng->random(0.0, w_sum); if (r <= w_pro) { break; diff --git a/src/shogun/multiclass/LaRank.h b/src/shogun/multiclass/LaRank.h index 4036e071413..ddfa33cdf93 100644 --- a/src/shogun/multiclass/LaRank.h +++ b/src/shogun/multiclass/LaRank.h @@ -249,46 +249,49 @@ namespace shogun LaRankPattern & sample () { - ASSERT (!empty ()) - while (true) - { - uint32_t r = CMath::random(uint32_t(0), uint32_t(patterns.size ()-1)); - if (patterns[r].exists ()) - return patterns[r]; - } - return patterns[0]; - } - - uint32_t getPatternRank (int32_t x_id) - { - return x_id2rank[x_id]; - } - - bool isPattern (int32_t x_id) - { - return x_id2rank[x_id] != 0; - } - - LaRankPattern & getPattern (int32_t x_id) - { - uint32_t rank = x_id2rank[x_id]; - return patterns[rank]; - } - - uint32_t maxcount () const - { - return patterns.size (); - } - - LaRankPattern & operator [] (uint32_t i) - { - return patterns[i]; - } - - const LaRankPattern & operator [] (uint32_t i) const - { - return patterns[i]; - } + auto m_rng = + std::unique_ptr(new CRandom(sg_random_seed)); + ASSERT(!empty()) + while (true) + { + uint32_t r = m_rng->random( + uint32_t(0), uint32_t(patterns.size() - 1)); + if (patterns[r].exists()) + return patterns[r]; + } + return patterns[0]; + } + + uint32_t getPatternRank(int32_t x_id) + { + return x_id2rank[x_id]; + } + + bool isPattern(int32_t x_id) + { + return x_id2rank[x_id] != 0; + } + + LaRankPattern& getPattern(int32_t x_id) + { + uint32_t rank = x_id2rank[x_id]; + return patterns[rank]; + } + + uint32_t maxcount() const + { + return patterns.size(); + } + + LaRankPattern& operator[](uint32_t i) + { + return patterns[i]; + } + + const LaRankPattern& operator[](uint32_t i) const + { + return patterns[i]; + } private: std_hash_set < uint32_t >freeidx; diff --git a/src/shogun/multiclass/ecoc/ECOCDiscriminantEncoder.cpp b/src/shogun/multiclass/ecoc/ECOCDiscriminantEncoder.cpp index 15af37bab79..72119ceceaa 100644 --- a/src/shogun/multiclass/ecoc/ECOCDiscriminantEncoder.cpp +++ b/src/shogun/multiclass/ecoc/ECOCDiscriminantEncoder.cpp @@ -137,40 +137,39 @@ float64_t CECOCDiscriminantEncoder::sffs_iteration(float64_t MI, vector if (part1.size() <= 1) return MI; - int32_t iclas = CMath::random(0, int32_t(part1.size()-1)); - int32_t clas = part1[iclas]; - - // move clas from part1 to part2 - for (int32_t i=0; i < m_labels->get_num_labels(); ++i) - { - if (((CMulticlassLabels*) m_labels)->get_int_label(i) == clas) - { - idata1.erase(i); - idata2.insert(i); - } - } - - float64_t new_MI = compute_MI(idata1, idata2); - if (new_MI < MI) - { - part2.push_back(clas); - part1.erase(part1.begin() + iclas); - return new_MI; - } - else - { - // revert changes - for (int32_t i=0; i < m_labels->get_num_labels(); ++i) - { - if (((CMulticlassLabels*) m_labels)->get_int_label(i) == clas) - { - idata2.erase(i); - idata1.insert(i); - } - } - return MI; - } - + int32_t iclas = m_rng->random(0, int32_t(part1.size() - 1)); + int32_t clas = part1[iclas]; + + // move clas from part1 to part2 + for (int32_t i = 0; i < m_labels->get_num_labels(); ++i) + { + if (((CMulticlassLabels*)m_labels)->get_int_label(i) == clas) + { + idata1.erase(i); + idata2.insert(i); + } + } + + float64_t new_MI = compute_MI(idata1, idata2); + if (new_MI < MI) + { + part2.push_back(clas); + part1.erase(part1.begin() + iclas); + return new_MI; + } + else + { + // revert changes + for (int32_t i = 0; i < m_labels->get_num_labels(); ++i) + { + if (((CMulticlassLabels*)m_labels)->get_int_label(i) == clas) + { + idata2.erase(i); + idata1.insert(i); + } + } + return MI; + } } float64_t CECOCDiscriminantEncoder::compute_MI(const std::set& idata1, const std::set& idata2) diff --git a/src/shogun/multiclass/ecoc/ECOCRandomDenseEncoder.cpp b/src/shogun/multiclass/ecoc/ECOCRandomDenseEncoder.cpp index bf6d00a6eb0..cb5ffa51343 100644 --- a/src/shogun/multiclass/ecoc/ECOCRandomDenseEncoder.cpp +++ b/src/shogun/multiclass/ecoc/ECOCRandomDenseEncoder.cpp @@ -60,59 +60,61 @@ SGMatrix CECOCRandomDenseEncoder::create_codebook(int32_t num_classes) { for (int32_t j=0; j < num_classes; ++j) { - float64_t randval = CMath::random(0.0, 1.0); - if (randval > m_pposone) - codebook(i, j) = -1; - else - codebook(i, j) = +1; - } - } - - bool valid = true; - for (int32_t i=0; i < codelen; ++i) - { - bool p1_occur = false, n1_occur = false; - for (int32_t j=0; j < num_classes; ++j) - if (codebook(i, j) == 1) - p1_occur = true; - else if (codebook(i, j) == -1) - n1_occur = true; - - if (!p1_occur || !n1_occur) - { - valid = false; - break; - } - } - - if (valid) - { - // see if this is a better codebook - // compute the minimum pairwise code distance - int32_t min_dist = std::numeric_limits::max(); - for (int32_t i=0; i < num_classes; ++i) - { - for (int32_t j=i+1; j < num_classes; ++j) - { - int32_t dist = CECOCUtil::hamming_distance(codebook.get_column_vector(i), - codebook.get_column_vector(j), codelen); - if (dist < min_dist) - min_dist = dist; - } - } - - if (min_dist > best_dist) - { - best_dist = min_dist; - std::copy(codebook.matrix, codebook.matrix + codelen*num_classes, - best_codebook.matrix); - } - } - - if (++n_iter >= m_maxiter) - if (best_dist > 0) // already obtained a good codebook - break; - } - - return best_codebook; + float64_t randval = m_rng->random(0.0, 1.0); + if (randval > m_pposone) + codebook(i, j) = -1; + else + codebook(i, j) = +1; + } + } + + bool valid = true; + for (int32_t i = 0; i < codelen; ++i) + { + bool p1_occur = false, n1_occur = false; + for (int32_t j = 0; j < num_classes; ++j) + if (codebook(i, j) == 1) + p1_occur = true; + else if (codebook(i, j) == -1) + n1_occur = true; + + if (!p1_occur || !n1_occur) + { + valid = false; + break; + } + } + + if (valid) + { + // see if this is a better codebook + // compute the minimum pairwise code distance + int32_t min_dist = std::numeric_limits::max(); + for (int32_t i = 0; i < num_classes; ++i) + { + for (int32_t j = i + 1; j < num_classes; ++j) + { + int32_t dist = CECOCUtil::hamming_distance( + codebook.get_column_vector(i), + codebook.get_column_vector(j), codelen); + if (dist < min_dist) + min_dist = dist; + } + } + + if (min_dist > best_dist) + { + best_dist = min_dist; + std::copy( + codebook.matrix, codebook.matrix + codelen * num_classes, + best_codebook.matrix); + } + } + + if (++n_iter >= m_maxiter) + if (best_dist > 0) // already obtained a good codebook + break; + } + + return best_codebook; } diff --git a/src/shogun/multiclass/ecoc/ECOCRandomSparseEncoder.cpp b/src/shogun/multiclass/ecoc/ECOCRandomSparseEncoder.cpp index 48922903738..9af00bda2e3 100644 --- a/src/shogun/multiclass/ecoc/ECOCRandomSparseEncoder.cpp +++ b/src/shogun/multiclass/ecoc/ECOCRandomSparseEncoder.cpp @@ -70,55 +70,57 @@ SGMatrix CECOCRandomSparseEncoder::create_codebook(int32_t num_classes) for (int32_t j=0; j < num_classes; ++j) random_sel[j] = j; std::random_shuffle(random_sel.begin(), random_sel.end()); - if (CMath::random(0.0, 1.0) > 0.5) - { - codebook(i, random_sel[0]) = +1; - codebook(i, random_sel[1]) = -1; - } - else - { - codebook(i, random_sel[0]) = -1; - codebook(i, random_sel[1]) = +1; - } - - // assign the remaining positions - for (int32_t j=2; j < num_classes; ++j) - { - float64_t randval = CMath::random(0.0, 1.0); - if (randval > m_pzero) - { - if (randval > m_pzero+m_pposone) - codebook(i, random_sel[j]) = -1; - else - codebook(i, random_sel[j]) = +1; - } - } - } - - // see if this is a better codebook - // compute the minimum pairwise code distance - int32_t min_dist = std::numeric_limits::max(); - for (int32_t i=0; i < num_classes; ++i) - { - for (int32_t j=i+1; j < num_classes; ++j) - { - int32_t dist = CECOCUtil::hamming_distance(codebook.get_column_vector(i), - codebook.get_column_vector(j), codelen); - if (dist < min_dist) - min_dist = dist; - } - } - - if (min_dist > best_dist) - { - best_dist = min_dist; - std::copy(codebook.matrix, codebook.matrix + codelen*num_classes, - best_codebook.matrix); - } - - if (++n_iter >= m_maxiter) - break; - } - - return best_codebook; + if (m_rng->random(0.0, 1.0) > 0.5) + { + codebook(i, random_sel[0]) = +1; + codebook(i, random_sel[1]) = -1; + } + else + { + codebook(i, random_sel[0]) = -1; + codebook(i, random_sel[1]) = +1; + } + + // assign the remaining positions + for (int32_t j = 2; j < num_classes; ++j) + { + float64_t randval = m_rng->random(0.0, 1.0); + if (randval > m_pzero) + { + if (randval > m_pzero + m_pposone) + codebook(i, random_sel[j]) = -1; + else + codebook(i, random_sel[j]) = +1; + } + } + } + + // see if this is a better codebook + // compute the minimum pairwise code distance + int32_t min_dist = std::numeric_limits::max(); + for (int32_t i = 0; i < num_classes; ++i) + { + for (int32_t j = i + 1; j < num_classes; ++j) + { + int32_t dist = CECOCUtil::hamming_distance( + codebook.get_column_vector(i), + codebook.get_column_vector(j), codelen); + if (dist < min_dist) + min_dist = dist; + } + } + + if (min_dist > best_dist) + { + best_dist = min_dist; + std::copy( + codebook.matrix, codebook.matrix + codelen * num_classes, + best_codebook.matrix); + } + + if (++n_iter >= m_maxiter) + break; + } + + return best_codebook; } diff --git a/src/shogun/multiclass/tree/RandomConditionalProbabilityTree.cpp b/src/shogun/multiclass/tree/RandomConditionalProbabilityTree.cpp index 27ec61ab696..ef2bf6106d5 100644 --- a/src/shogun/multiclass/tree/RandomConditionalProbabilityTree.cpp +++ b/src/shogun/multiclass/tree/RandomConditionalProbabilityTree.cpp @@ -15,7 +15,7 @@ using namespace shogun; bool CRandomConditionalProbabilityTree::which_subtree(bnode_t *node, SGVector ex) { - if (CMath::random(0.0, 1.0) > 0.5) + if (m_rng->random(0.0, 1.0) > 0.5) return true; return false; } diff --git a/src/shogun/neuralnets/DeepBeliefNetwork.cpp b/src/shogun/neuralnets/DeepBeliefNetwork.cpp index 2a0cdcd4762..2a3605a1a63 100644 --- a/src/shogun/neuralnets/DeepBeliefNetwork.cpp +++ b/src/shogun/neuralnets/DeepBeliefNetwork.cpp @@ -92,7 +92,7 @@ void CDeepBeliefNetwork::initialize_neural_network(float64_t sigma) m_params = SGVector(m_num_params); for (int32_t i=0; inormal_random(0.0, sigma); pt_cd_num_steps = SGVector(m_num_layers-1); pt_cd_num_steps.set_const(1); @@ -353,7 +353,7 @@ void CDeepBeliefNetwork::reset_chain() SGMatrix s = m_states[m_num_layers-2]; for (int32_t i=0; i 0.5; + s[i] = m_rng->random(0.0, 1.0) > 0.5; } CNeuralNetwork* CDeepBeliefNetwork::convert_to_neural_network( @@ -435,7 +435,7 @@ void CDeepBeliefNetwork::down_step(int32_t index, SGVector< float64_t > params, { int32_t len = m_layer_sizes->element(index)*m_batch_size; for (int32_t i=0; irandom(0.0, 1.0) < result[i]; } } @@ -465,7 +465,7 @@ void CDeepBeliefNetwork::up_step(int32_t index, SGVector< float64_t > params, if (sample_states && index>0) { for (int32_t i=0; irandom(0.0, 1.0) < result[i]; } } diff --git a/src/shogun/neuralnets/NeuralConvolutionalLayer.cpp b/src/shogun/neuralnets/NeuralConvolutionalLayer.cpp index 5f77349ad98..0dd103eb131 100644 --- a/src/shogun/neuralnets/NeuralConvolutionalLayer.cpp +++ b/src/shogun/neuralnets/NeuralConvolutionalLayer.cpp @@ -139,14 +139,16 @@ void CNeuralConvolutionalLayer::initialize_parameters(SGVector parame { if (m_initialization_mode == NORMAL) { - map_params[i] = CMath::normal_random(0.0, sigma); + map_params[i] = m_rng->normal_random(0.0, sigma); // turn off regularization for the bias, on for the rest of the parameters map_param_regularizable[i] = (i != 0); } else // for the case when m_initialization_mode = RE_NORMAL { - map_params[i] = CMath::normal_random(0.0, - CMath::sqrt(2.0/(m_input_height*m_input_width*m_input_num_channels))); + map_params[i] = m_rng->normal_random( + 0.0, CMath::sqrt( + 2.0 / (m_input_height * m_input_width * + m_input_num_channels))); // initialize b=0 map_param_regularizable[i] = 0; } diff --git a/src/shogun/neuralnets/NeuralInputLayer.cpp b/src/shogun/neuralnets/NeuralInputLayer.cpp index b9191ac7090..70e77d27999 100644 --- a/src/shogun/neuralnets/NeuralInputLayer.cpp +++ b/src/shogun/neuralnets/NeuralInputLayer.cpp @@ -73,7 +73,7 @@ void CNeuralInputLayer::compute_activations(SGMatrix< float64_t > inputs) { int32_t len = m_num_neurons*m_batch_size; for (int32_t k=0; knormal_random(0.0, gaussian_noise); } } diff --git a/src/shogun/neuralnets/NeuralLayer.cpp b/src/shogun/neuralnets/NeuralLayer.cpp index 70a40a0becc..ea84fa0ab73 100644 --- a/src/shogun/neuralnets/NeuralLayer.cpp +++ b/src/shogun/neuralnets/NeuralLayer.cpp @@ -96,7 +96,7 @@ void CNeuralLayer::dropout_activations() int32_t len = m_num_neurons*m_batch_size; for (int32_t i=0; i= dropout_prop; + m_dropout_mask[i] = m_rng->random(0.0, 1.0) >= dropout_prop; m_activations[i] *= m_dropout_mask[i]; } } diff --git a/src/shogun/neuralnets/NeuralLinearLayer.cpp b/src/shogun/neuralnets/NeuralLinearLayer.cpp index 9896b783d12..4b3fd070e3e 100644 --- a/src/shogun/neuralnets/NeuralLinearLayer.cpp +++ b/src/shogun/neuralnets/NeuralLinearLayer.cpp @@ -65,7 +65,7 @@ void CNeuralLinearLayer::initialize_parameters(SGVector parameters, for (int32_t i=0; inormal_random(0.0, sigma); // turn regularization off for the biases, on for the weights parameter_regularizable[i] = (i>=m_num_neurons); diff --git a/src/shogun/neuralnets/NeuralNetwork.cpp b/src/shogun/neuralnets/NeuralNetwork.cpp index 8b04e788a44..3ff8ce7d6d5 100644 --- a/src/shogun/neuralnets/NeuralNetwork.cpp +++ b/src/shogun/neuralnets/NeuralNetwork.cpp @@ -561,12 +561,12 @@ float64_t CNeuralNetwork::check_gradients(float64_t approx_epsilon, float64_t s) SGMatrix y(get_num_outputs(),1); for (int32_t i=0; irandom(0.0, 1.0); // the outputs are set up in the form of a probability distribution (in case // that is required by the output layer, i.e softmax) for (int32_t i=0; irandom(0.0, 1.0); float64_t y_sum = SGVector::sum(y.matrix, y.num_rows); for (int32_t i=0; i(m_num_params); for (int32_t i=0; inormal_random(0.0, sigma); } void CRBM::set_batch_size(int32_t batch_size) @@ -266,7 +266,7 @@ void CRBM::reset_chain() { for (int32_t i=0; i 0.5; + visible_state(i, j) = m_rng->random(0.0, 1.0) > 0.5; } float64_t CRBM::free_energy(SGMatrix< float64_t > visible, SGMatrix< float64_t > buffer) @@ -431,8 +431,7 @@ float64_t CRBM::pseudo_likelihood(SGMatrix< float64_t > visible, SGVector indices(m_batch_size); for (int32_t i=0; irandom(0, m_num_visible - 1); float64_t f1 = free_energy(visible, buffer); @@ -520,7 +519,7 @@ void CRBM::sample_hidden(SGMatrix< float64_t > mean, SGMatrix< float64_t > resul { int32_t length = result.num_rows*result.num_cols; for (int32_t i=0; irandom(0.0, 1.0) < mean[i]; } void CRBM::sample_visible(SGMatrix< float64_t > mean, SGMatrix< float64_t > result) @@ -540,7 +539,8 @@ void CRBM::sample_visible(int32_t index, { for (int32_t i=0; ielement(index); i++) for (int32_t j=0; jrandom(0.0, 1.0) < mean(i + offset, j); } if (m_visible_group_types->element(index)==RBMVUT_SOFTMAX) @@ -551,7 +551,7 @@ void CRBM::sample_visible(int32_t index, for (int32_t j=0; jrandom(0.0, 1.0); float64_t sum = 0; for (int32_t i=0; ielement(index); i++) { diff --git a/src/shogun/optimization/liblinear/shogun_liblinear.cpp b/src/shogun/optimization/liblinear/shogun_liblinear.cpp index 1489d84c011..5f24dc7e2d2 100644 --- a/src/shogun/optimization/liblinear/shogun_liblinear.cpp +++ b/src/shogun/optimization/liblinear/shogun_liblinear.cpp @@ -512,13 +512,13 @@ void Solver_MCSVM_CS::solve() } state->inited = true; } - + auto m_rng = std::unique_ptr(new CRandom(sg_random_seed)); while(iter < max_iter && !CSignal::cancel_computations()) { double stopping = -CMath::INFTY; for(i=0;irandom(i, active_size - 1); CMath::swap(index[i], index[j]); } for(s=0;srandom((float64_t)0.0, 2 * pi); } for (int32_t i = 0; i < cur_dim_feature_space; ++i) { @@ -241,8 +241,8 @@ bool CRandomFourierGaussPreproc::init_randomcoefficients() { float64_t s = 2; while ((s >= 1) ) { // Marsaglia polar for gaussian - x1 = CMath::random((float64_t) -1.0, (float64_t) 1.0); - x2 = CMath::random((float64_t) -1.0, (float64_t) 1.0); + x1 = m_rng->random((float64_t)-1.0, (float64_t)1.0); + x2 = m_rng->random((float64_t)-1.0, (float64_t)1.0); s=x1*x1+x2*x2; } diff --git a/src/shogun/regression/svr/LibLinearRegression.cpp b/src/shogun/regression/svr/LibLinearRegression.cpp index 47d55579406..f559d4ad657 100644 --- a/src/shogun/regression/svr/LibLinearRegression.cpp +++ b/src/shogun/regression/svr/LibLinearRegression.cpp @@ -215,7 +215,7 @@ void CLibLinearRegression::solve_l2r_l1l2_svr(SGVector& w, const libl for(i=0; irandom(i, active_size - 1); CMath::swap(index[i], index[j]); } diff --git a/src/shogun/statistical_testing/QuadraticTimeMMD.cpp b/src/shogun/statistical_testing/QuadraticTimeMMD.cpp index a6a2c8c1ca7..98db8ed1ed2 100644 --- a/src/shogun/statistical_testing/QuadraticTimeMMD.cpp +++ b/src/shogun/statistical_testing/QuadraticTimeMMD.cpp @@ -407,7 +407,7 @@ SGVector CQuadraticTimeMMD::Self::sample_null_spectrum() float64_t null_sample=0; for (index_t j=0; jstd_normal_distrib(); float64_t multiple=CMath::sq(z_j); /* take largest EV, scale by 1/(m+n) on the fly and take abs value*/ diff --git a/src/shogun/statistical_testing/internals/DataFetcher.cpp b/src/shogun/statistical_testing/internals/DataFetcher.cpp index 13e19373e7a..1c1df42d66a 100644 --- a/src/shogun/statistical_testing/internals/DataFetcher.cpp +++ b/src/shogun/statistical_testing/internals/DataFetcher.cpp @@ -119,7 +119,7 @@ void DataFetcher::shuffle_features() } std::iota(shuffle_subset.data(), shuffle_subset.data()+shuffle_subset.size(), 0); CMath::permute(shuffle_subset); -// shuffle_subset.display_vector("shuffle_subset"); + // shuffle_subset.display_vector("shuffle_subset"); SG_SDEBUG("Shuffling %d feature vectors\n", size); m_samples->add_subset(shuffle_subset); diff --git a/src/shogun/statistical_testing/kernelselection/internals/MaxCrossValidation.cpp b/src/shogun/statistical_testing/kernelselection/internals/MaxCrossValidation.cpp index 87eaf01ad1b..a6810a807fa 100644 --- a/src/shogun/statistical_testing/kernelselection/internals/MaxCrossValidation.cpp +++ b/src/shogun/statistical_testing/kernelselection/internals/MaxCrossValidation.cpp @@ -97,14 +97,11 @@ void MaxCrossValidation::compute_measures() auto Ny=estimator->get_num_samples_q(); auto num_null_samples=estimator->get_num_null_samples(); auto stype=estimator->get_statistic_type(); - auto seed = estimator->get_random_seed(); CrossValidationMMD compute(Nx, Ny, num_folds, num_null_samples); compute.m_stype=stype; compute.m_alpha=alpha; compute.m_num_runs=num_runs; compute.m_rejections=rejections; - compute.m_kfold_x->set_seed(seed); - compute.m_kfold_y->set_seed(seed); if (kernel_mgr.same_distance_type()) { CDistance* distance=kernel_mgr.get_distance_instance(); diff --git a/src/shogun/structure/StochasticSOSVM.cpp b/src/shogun/structure/StochasticSOSVM.cpp index e32bca66603..d22f3d4388d 100644 --- a/src/shogun/structure/StochasticSOSVM.cpp +++ b/src/shogun/structure/StochasticSOSVM.cpp @@ -46,13 +46,11 @@ void CStochasticSOSVM::init() SG_ADD(&m_num_iter, "num_iter", "Number of iterations", MS_NOT_AVAILABLE); SG_ADD(&m_do_weighted_averaging, "do_weighted_averaging", "Do weighted averaging", MS_NOT_AVAILABLE); SG_ADD(&m_debug_multiplier, "debug_multiplier", "Debug multiplier", MS_NOT_AVAILABLE); - SG_ADD(&m_rand_seed, "rand_seed", "Random seed", MS_NOT_AVAILABLE); m_lambda = 1.0; m_num_iter = 50; m_do_weighted_averaging = true; m_debug_multiplier = 0; - m_rand_seed = 1; } CStochasticSOSVM::~CStochasticSOSVM() @@ -108,8 +106,6 @@ bool CStochasticSOSVM::train_machine(CFeatures* data) m_debug_multiplier = 100; } - m_rng->set_seed(m_rand_seed); - // Main loop int32_t k = 0; for (int32_t pi = 0; pi < m_num_iter; ++pi) diff --git a/src/shogun/structure/TwoStateModel.cpp b/src/shogun/structure/TwoStateModel.cpp index b2e56b11b87..b8a585e8436 100644 --- a/src/shogun/structure/TwoStateModel.cpp +++ b/src/shogun/structure/TwoStateModel.cpp @@ -269,19 +269,23 @@ CHMSVMModel* CTwoStateModel::simulate_data(int32_t num_exm, int32_t exm_len, SGVector< int32_t > ll(num_exm*exm_len); ll.zero(); int32_t rnb, rl, rp; - + auto m_rng = std::unique_ptr(new CRandom(sg_random_seed)); for ( int32_t i = 0 ; i < num_exm ; ++i) { SGVector< int32_t > lab(exm_len); lab.zero(); - rnb = num_blocks[0] + CMath::ceil((num_blocks[1]-num_blocks[0])* - CMath::random(0.0, 1.0)) - 1; + rnb = num_blocks[0] + + CMath::ceil( + (num_blocks[1] - num_blocks[0]) * m_rng->random(0.0, 1.0)) - + 1; for ( int32_t j = 0 ; j < rnb ; ++j ) { - rl = block_len[0] + CMath::ceil((block_len[1]-block_len[0])* - CMath::random(0.0, 1.0)) - 1; - rp = CMath::ceil((exm_len-rl)*CMath::random(0.0, 1.0)); + rl = block_len[0] + + CMath::ceil( + (block_len[1] - block_len[0]) * m_rng->random(0.0, 1.0)) - + 1; + rp = CMath::ceil((exm_len - rl) * m_rng->random(0.0, 1.0)); for ( int32_t idx = rp-1 ; idx < rp+rl ; ++idx ) { @@ -321,7 +325,8 @@ CHMSVMModel* CTwoStateModel::simulate_data(int32_t num_exm, int32_t exm_len, int32_t idx = i*signal.num_cols; for ( int32_t j = 0 ; j < signal.num_cols ; ++j ) - signal[idx++] = lf[j] + noise_std*CMath::normal_random((float64_t)0.0, 1.0); + signal[idx++] = + lf[j] + noise_std * m_rng->normal_random((float64_t)0.0, 1.0); } // Substitute some features by pure noise @@ -329,7 +334,8 @@ CHMSVMModel* CTwoStateModel::simulate_data(int32_t num_exm, int32_t exm_len, { int32_t idx = i*signal.num_cols; for ( int32_t j = 0 ; j < signal.num_cols ; ++j ) - signal[idx++] = noise_std*CMath::normal_random((float64_t)0.0, 1.0); + signal[idx++] = + noise_std * m_rng->normal_random((float64_t)0.0, 1.0); } CMatrixFeatures< float64_t >* features = diff --git a/src/shogun/transfer/multitask/LibLinearMTL.cpp b/src/shogun/transfer/multitask/LibLinearMTL.cpp index d354e7920b2..d6379799dce 100644 --- a/src/shogun/transfer/multitask/LibLinearMTL.cpp +++ b/src/shogun/transfer/multitask/LibLinearMTL.cpp @@ -266,7 +266,7 @@ void CLibLinearMTL::solve_l2r_l1l2_svc(const liblinear_problem *prob, double eps for (i=0; irandom(i, active_size - 1); CMath::swap(index[i], index[j]); } diff --git a/tests/unit/base/SGObject_unittest.cc b/tests/unit/base/SGObject_unittest.cc index 6248022fb57..57bbae989e2 100644 --- a/tests/unit/base/SGObject_unittest.cc +++ b/tests/unit/base/SGObject_unittest.cc @@ -37,9 +37,10 @@ TEST(SGObject,equals_same) TEST(SGObject,equals_NULL_parameter) { + auto m_rng = std::unique_ptr(new CRandom()); SGMatrix data(3,10); for (index_t i=0; istd_normal_distrib(); CDenseFeatures* feats=new CDenseFeatures(data); CGaussianKernel* kernel=new CGaussianKernel(); @@ -151,10 +152,11 @@ TEST(SGObject,equals_complex_equal) SGMatrix X(1, n); SGMatrix X_test(1, n); SGVector Y(n); + auto m_rng = std::unique_ptr(new CRandom()); for (index_t i=0; irandom(0.0, x_range); X_test[i]=(float64_t)i / n*x_range; Y[i]=CMath::sin(X[i]); } diff --git a/tests/unit/base/Serialization_unittest.cc b/tests/unit/base/Serialization_unittest.cc index 9f0a889bb51..40014ae54ec 100644 --- a/tests/unit/base/Serialization_unittest.cc +++ b/tests/unit/base/Serialization_unittest.cc @@ -23,6 +23,7 @@ TEST(Serialization,multiclass_labels) index_t n_class=3; CMulticlassLabels* labels=new CMulticlassLabels(); + auto m_rng = std::unique_ptr(new CRandom()); SGVector lab(n); for (index_t i=0; iallocate_confidences_for(n_class); SGVector conf(n_class); for (index_t i=0; istd_normal_distrib(); for (index_t i=0; iset_multiclass_confidences(i, conf); @@ -75,7 +76,7 @@ TEST(Serialization,multiclass_labels) TEST(Serialization, liblinear) { index_t num_samples = 50; - CMath::init_random(13); + set_global_seed(13); SGMatrix data = CDataGenerator::generate_gaussians(num_samples, 2, 2); CDenseFeatures features(data); diff --git a/tests/unit/classifier/svm/LibLinear_unittest.cc b/tests/unit/classifier/svm/LibLinear_unittest.cc index ec1999d306d..93e63d3d80f 100644 --- a/tests/unit/classifier/svm/LibLinear_unittest.cc +++ b/tests/unit/classifier/svm/LibLinear_unittest.cc @@ -25,7 +25,7 @@ void generate_data_l1(CDenseFeatures* &train_feats, CBinaryLabels* &ground_truth) { index_t num_samples = 50; - CMath::init_random(5); + set_global_seed(5); SGMatrix data = CDataGenerator::generate_gaussians(num_samples, 2, 2); CDenseFeatures features(data); @@ -65,7 +65,7 @@ void generate_data_l2(CDenseFeatures* &train_feats, CBinaryLabels* &ground_truth) { index_t num_samples = 50; - CMath::init_random(5); + set_global_seed(5); SGMatrix data = CDataGenerator::generate_gaussians(num_samples, 2, 2); CDenseFeatures features(data); diff --git a/tests/unit/classifier/svm/SVMOcas_unittest.cc b/tests/unit/classifier/svm/SVMOcas_unittest.cc index 25505062d76..cf8c62a47fe 100644 --- a/tests/unit/classifier/svm/SVMOcas_unittest.cc +++ b/tests/unit/classifier/svm/SVMOcas_unittest.cc @@ -18,7 +18,7 @@ extern LinearTestEnvironment* linear_test_env; #ifdef HAVE_LAPACK TEST(SVMOcasTest,train) { - CMath::init_random(5); + set_global_seed(5); std::shared_ptr mockData = linear_test_env->getBinaryLabelData(); diff --git a/tests/unit/converter/Isomap_unittest.cc b/tests/unit/converter/Isomap_unittest.cc index dedb67f3fac..21d387ed4c6 100644 --- a/tests/unit/converter/Isomap_unittest.cc +++ b/tests/unit/converter/Isomap_unittest.cc @@ -200,13 +200,14 @@ void check_similarity_of_sets(const std::set& first_set,const std::set< void fill_matrix_with_test_data(SGMatrix& matrix_to_fill) { index_t num_cols = matrix_to_fill.num_cols, num_rows = matrix_to_fill.num_rows; + auto m_rng = std::unique_ptr(new CRandom()); for (index_t i = 0; i < num_cols; ++i) { for (index_t j = 0; j < num_rows - 1; ++j) { matrix_to_fill(j, i) = i; } - matrix_to_fill(num_rows - 1, i) = CMath::randn_double(); + matrix_to_fill(num_rows - 1, i) = m_rng->std_normal_distrib(); } } diff --git a/tests/unit/distribution/MixtureModel_unittest.cc b/tests/unit/distribution/MixtureModel_unittest.cc index bc07825153b..c0ef987ea64 100644 --- a/tests/unit/distribution/MixtureModel_unittest.cc +++ b/tests/unit/distribution/MixtureModel_unittest.cc @@ -40,12 +40,12 @@ using namespace shogun; TEST(MixtureModel,gaussian_mixture_model) { + auto m_rng = std::unique_ptr(new CRandom(2)); SGMatrix data(1,400); - CMath::init_random(2); for (int32_t i=0;i<100;i++) - data(0,i)=CMath::randn_double(); + data(0, i) = m_rng->std_normal_distrib(); for (int32_t i=100;i<400;i++) - data(0,i)=CMath::randn_double()+10; + data(0, i) = m_rng->std_normal_distrib() + 10; CDenseFeatures* feats=new CDenseFeatures(data); diff --git a/tests/unit/environments/LinearTestEnvironment.h b/tests/unit/environments/LinearTestEnvironment.h index d349034391a..46daacac785 100644 --- a/tests/unit/environments/LinearTestEnvironment.h +++ b/tests/unit/environments/LinearTestEnvironment.h @@ -45,6 +45,7 @@ class LinearTestEnvironment : public ::testing::Environment public: virtual void SetUp() { + set_global_seed(12345); mBinaryLabelData = std::shared_ptr( new GaussianCheckerboard(100, 2, 2)); } diff --git a/tests/unit/environments/MultiLabelTestEnvironment.h b/tests/unit/environments/MultiLabelTestEnvironment.h index 12e50da1897..083cbe7ff0e 100644 --- a/tests/unit/environments/MultiLabelTestEnvironment.h +++ b/tests/unit/environments/MultiLabelTestEnvironment.h @@ -45,6 +45,7 @@ class MultiLabelTestEnvironment : public ::testing::Environment public: virtual void SetUp() { + set_global_seed(12345); mMulticlassFixture = std::shared_ptr( new GaussianCheckerboard(100, 3, 3)); } diff --git a/tests/unit/evaluation/CrossValidation_multithread_unittest.cc b/tests/unit/evaluation/CrossValidation_multithread_unittest.cc index 0c9d6020aa7..1f06d5be6d0 100644 --- a/tests/unit/evaluation/CrossValidation_multithread_unittest.cc +++ b/tests/unit/evaluation/CrossValidation_multithread_unittest.cc @@ -48,10 +48,11 @@ using namespace shogun; void generate_data(SGMatrix& mat, SGVector &lab) { int32_t num=lab.size(); - + auto m_rng = std::unique_ptr(new CRandom()); for (index_t i=0; istd_normal_distrib() * 4) + : 100 + (m_rng->std_normal_distrib() * 4); mat(1,i)=i; } diff --git a/tests/unit/evaluation/SplittingStrategy_unittest.cc b/tests/unit/evaluation/SplittingStrategy_unittest.cc index 0c71c4c89ca..ae42124ed10 100644 --- a/tests/unit/evaluation/SplittingStrategy_unittest.cc +++ b/tests/unit/evaluation/SplittingStrategy_unittest.cc @@ -23,19 +23,20 @@ TEST(SplittingStrategy,standard) index_t num_labels; index_t num_subsets; index_t runs=100; + auto m_rng = std::unique_ptr(new CRandom()); while (runs-->0) { fold_sizes=0; - num_labels=CMath::random(10, 150); - num_subsets=CMath::random(1, 5); + num_labels = m_rng->random(10, 150); + num_subsets = m_rng->random(1, 5); index_t desired_size=CMath::round( (float64_t)num_labels/(float64_t)num_subsets); /* build labels */ CRegressionLabels* labels=new CRegressionLabels(num_labels); for (index_t i=0; iset_label(i, CMath::random(-10.0, 10.0)); + labels->set_label(i, m_rng->random(-10.0, 10.0)); /* build splitting strategy */ CCrossValidationSplitting* splitting= @@ -89,18 +90,19 @@ TEST(SplittingStrategy,stratified_subsets_disjoint_cover) { index_t num_labels, num_classes, num_subsets, fold_sizes; index_t runs=50; + auto m_rng = std::unique_ptr(new CRandom()); while (runs-->0) { fold_sizes=0; - num_labels=CMath::random(11, 100); - num_classes=CMath::random(2, 10); - num_subsets=CMath::random(1, 10); + num_labels = m_rng->random(11, 100); + num_classes = m_rng->random(2, 10); + num_subsets = m_rng->random(1, 10); /* build labels */ CMulticlassLabels* labels=new CMulticlassLabels(num_labels); for (index_t i=0; iset_label(i, CMath::random()%num_classes); + labels->set_label(i, m_rng->random_64() % num_classes); SGVector classes=labels->get_unique_labels(); @@ -168,17 +170,18 @@ TEST(SplittingStrategy,stratified_subset_label_ratio) { index_t num_labels, num_classes, num_subsets; index_t runs=50; + auto m_rng = std::unique_ptr(new CRandom()); while (runs-->0) { - num_labels=CMath::random(11, 100); - num_classes=CMath::random(2, 10); - num_subsets=CMath::random(1, 10); + num_labels = m_rng->random(11, 100); + num_classes = m_rng->random(2, 10); + num_subsets = m_rng->random(1, 10); /* build labels */ CMulticlassLabels* labels=new CMulticlassLabels(num_labels); for (index_t i=0; iset_label(i, CMath::random()%num_classes); + labels->set_label(i, m_rng->random_64() % num_classes); /*No. of labels belonging to one class*/ SGVector class_labels(num_classes); @@ -241,16 +244,17 @@ TEST(SplittingStrategy,LOO) { index_t num_labels, fold_sizes; index_t runs=10; + auto m_rng = std::unique_ptr(new CRandom()); while (runs-->0) { fold_sizes=0; - num_labels=CMath::random(10, 50); + num_labels = m_rng->random(10, 50); /* build labels */ CRegressionLabels* labels=new CRegressionLabels(num_labels); for (index_t i=0; iset_label(i, CMath::random(-10.0, 10.0)); + labels->set_label(i, m_rng->random(-10.0, 10.0)); /* build Leave one out splitting strategy */ CLOOCrossValidationSplitting* splitting= diff --git a/tests/unit/features/CombinedFeatures_unittest.cc b/tests/unit/features/CombinedFeatures_unittest.cc index aa49a22f1da..60b557b6163 100644 --- a/tests/unit/features/CombinedFeatures_unittest.cc +++ b/tests/unit/features/CombinedFeatures_unittest.cc @@ -72,12 +72,12 @@ TEST(CombinedFeaturesTest,create_merged_copy) SGMatrix data_1(dim,n_1); for (index_t i=0; i(new CRandom()); + // data_1.display_matrix("data_1"); SGMatrix data_2(dim,n_2); for (index_t i=0; istd_normal_distrib(); // data_1.display_matrix("data_2"); diff --git a/tests/unit/features/DenseFeatures_unittest.cc b/tests/unit/features/DenseFeatures_unittest.cc index 07191e84519..d16d24e263d 100644 --- a/tests/unit/features/DenseFeatures_unittest.cc +++ b/tests/unit/features/DenseFeatures_unittest.cc @@ -45,12 +45,13 @@ TEST(DenseFeaturesTest,create_merged_copy) SGMatrix data_1(dim,n_1); for (index_t i=0; i(new CRandom()); //data_1.display_matrix("data_1"); SGMatrix data_2(dim,n_2); for (index_t i=0; istd_normal_distrib(); //data_2.display_matrix("data_2"); @@ -131,10 +132,11 @@ TEST(DenseFeaturesTest, copy_dimension_subset) data.matrix[i]=i; CDenseFeatures* features=new CDenseFeatures(data); + auto m_rng = std::unique_ptr(new CRandom()); SGVector dims(dim/2); for (index_t i=0; irandom(0, dim - 1); CDenseFeatures* f_reduced=(CDenseFeatures*) features->copy_dimension_subset(dims); @@ -161,16 +163,17 @@ TEST(DenseFeaturesTest, copy_dimension_subset_with_subsets) data.matrix[i]=i; CDenseFeatures* features=new CDenseFeatures(data); + auto m_rng = std::unique_ptr(new CRandom()); SGVector inds(n/2); for (index_t i=0; irandom(0, n - 1); features->add_subset(inds); SGVector dims(dim/2); for (index_t i=0; irandom(0, dim - 1); CDenseFeatures* f_reduced=(CDenseFeatures*) features->copy_dimension_subset(dims); diff --git a/tests/unit/features/HashedDenseFeatures_unittest.cc b/tests/unit/features/HashedDenseFeatures_unittest.cc index 68ca8cd22aa..6397415a144 100644 --- a/tests/unit/features/HashedDenseFeatures_unittest.cc +++ b/tests/unit/features/HashedDenseFeatures_unittest.cc @@ -320,10 +320,11 @@ TEST(HashedDenseFeaturesTest, dense_comparison) int32_t hashing_dim = 300; CHashedDenseFeatures* h_feats = new CHashedDenseFeatures(data, hashing_dim); CDenseFeatures* d_feats = new CDenseFeatures(data); + auto m_rng = std::unique_ptr(new CRandom()); SGVector dense_vec(hashing_dim); for (index_t i=0; irandom(-hashing_dim, hashing_dim); for (index_t i=0; idot(i, h_feats, i), d_feats->dot(i, d_feats, i)); diff --git a/tests/unit/features/HashedDocDotFeatures_unittest.cc b/tests/unit/features/HashedDocDotFeatures_unittest.cc index d86dd081e5a..a9ffd88a3cb 100644 --- a/tests/unit/features/HashedDocDotFeatures_unittest.cc +++ b/tests/unit/features/HashedDocDotFeatures_unittest.cc @@ -77,7 +77,7 @@ TEST(HashedDocDotFeaturesTest, dense_dot_test) const char* doc_1 = "You're never too old to rock and roll, if you're too young to die"; const char* doc_2 = "Give me some rope, tie me to dream, give me the hope to run out of steam"; const char* doc_3 = "Thank you Jack Daniels, Old Number Seven, Tennessee Whiskey got me drinking in heaven"; - + auto m_rng = std::unique_ptr(new CRandom()); SGString string_1(65); for (index_t i=0; i<65; i++) string_1.string[i] = doc_1[i]; @@ -112,7 +112,7 @@ TEST(HashedDocDotFeaturesTest, dense_dot_test) SGVector vec(dimension); for (index_t i=0; irandom(-dimension, dimension); for (index_t i=0; i<3; i++) { diff --git a/tests/unit/features/StreamingHashedDocDotFeatures_unittest.cc b/tests/unit/features/StreamingHashedDocDotFeatures_unittest.cc index 6e79b9b2a68..e74a7ab3662 100644 --- a/tests/unit/features/StreamingHashedDocDotFeatures_unittest.cc +++ b/tests/unit/features/StreamingHashedDocDotFeatures_unittest.cc @@ -81,7 +81,7 @@ TEST(StreamingHashedDocFeaturesTest, dot_tests) const char* doc_1 = "You're never too old to rock and roll, if you're too young to die"; const char* doc_2 = "Give me some rope, tie me to dream, give me the hope to run out of steam"; const char* doc_3 = "Thank you Jack Daniels, Old Number Seven, Tennessee Whiskey got me drinking in heaven"; - + auto m_rng = std::unique_ptr(new CRandom()); SGString string_1(65); for (index_t i=0; i<65; i++) string_1.string[i] = doc_1[i]; @@ -112,7 +112,7 @@ TEST(StreamingHashedDocFeaturesTest, dot_tests) SGVector dense_vec(32); for (index_t j=0; j<32; j++) - dense_vec[j] = CMath::random(0.0, 1.0); + dense_vec[j] = m_rng->random(0.0, 1.0); index_t i = 0; while (feats->get_next_example()) diff --git a/tests/unit/features/StringFeatures_unittest.cc b/tests/unit/features/StringFeatures_unittest.cc index dd6ab6259ce..4ae344af5f2 100644 --- a/tests/unit/features/StringFeatures_unittest.cc +++ b/tests/unit/features/StringFeatures_unittest.cc @@ -17,18 +17,19 @@ using namespace shogun; SGStringList generateRandomData(index_t num_strings=10, index_t max_string_length=20, index_t min_string_length=10) { SGStringList strings(num_strings, max_string_length); + auto m_rng = std::unique_ptr(new CRandom()); //SG_SPRINT("original string data:\n"); for (index_t i=0; irandom(min_string_length, max_string_length); SGString current(len); //SG_SPRINT("[%i]: \"", i); /* fill with random uppercase letters (ASCII) */ for (index_t j=0; jrandom('A', 'Z'); /* attach \0 to print letter */ char* string=SG_MALLOC(char, 2); diff --git a/tests/unit/kernel/CustomKernel_unittest.cc b/tests/unit/kernel/CustomKernel_unittest.cc index 24405caf01c..dad2f6a30ef 100644 --- a/tests/unit/kernel/CustomKernel_unittest.cc +++ b/tests/unit/kernel/CustomKernel_unittest.cc @@ -7,14 +7,15 @@ * Written (W) 2013 Heiko Strathmann */ -#include -#include +#include #include #include #include -#include +#include +#include #include -#include +#include +#include using namespace shogun; using namespace Eigen; diff --git a/tests/unit/kernel/Kernel_unittest.cc b/tests/unit/kernel/Kernel_unittest.cc index e591df1120a..2e0b9e299d3 100644 --- a/tests/unit/kernel/Kernel_unittest.cc +++ b/tests/unit/kernel/Kernel_unittest.cc @@ -41,10 +41,11 @@ static SGMatrix generate_std_norm_matrix(const index_t num_feats, const index_t dim) { SGMatrix data(dim, num_feats); + auto m_rng = std::unique_ptr(new CRandom()); for (index_t i=0; istd_normal_distrib(); } return data; } @@ -55,7 +56,7 @@ TEST(Kernel, sum_symmetric_block_no_diag) const index_t dim=3; // create random data - CMath::init_random(100); + set_global_seed(100); SGMatrix data = generate_std_norm_matrix(num_feats, dim); CDenseFeatures* feats=new CDenseFeatures(data); @@ -84,7 +85,7 @@ TEST(Kernel, sum_symmetric_block_with_diag) const index_t dim=3; // create random data - CMath::init_random(100); + set_global_seed(100); SGMatrix data = generate_std_norm_matrix(num_feats, dim); CDenseFeatures* feats=new CDenseFeatures(data); @@ -114,7 +115,7 @@ TEST(Kernel, sum_block_with_diag) const index_t dim=3; // create random data - CMath::init_random(100); + set_global_seed(100); SGMatrix data_p = generate_std_norm_matrix(num_feats_p, dim); SGMatrix data_q = generate_std_norm_matrix(num_feats_q, dim); @@ -147,7 +148,7 @@ TEST(Kernel, sum_block_no_diag) const index_t dim=3; // create random data - CMath::init_random(100); + set_global_seed(100); SGMatrix data_p = generate_std_norm_matrix(num_feats_p, dim); SGMatrix data_q = generate_std_norm_matrix(num_feats_q, dim); @@ -179,7 +180,7 @@ TEST(Kernel, row_wise_sum_symmetric_block_no_diag) const index_t dim=3; // create random data - CMath::init_random(100); + set_global_seed(100); SGMatrix data = generate_std_norm_matrix(num_feats, dim); CDenseFeatures* feats=new CDenseFeatures(data); @@ -208,7 +209,7 @@ TEST(Kernel, row_wise_sum_symmetric_block_with_diag) const index_t dim=3; // create random data - CMath::init_random(100); + set_global_seed(100); SGMatrix data = generate_std_norm_matrix(num_feats, dim); CDenseFeatures* feats=new CDenseFeatures(data); @@ -237,7 +238,7 @@ TEST(Kernel, row_wise_sum_squared_sum_symmetric_block_no_diag) const index_t dim=3; // create random data - CMath::init_random(100); + set_global_seed(100); SGMatrix data = generate_std_norm_matrix(num_feats, dim); CDenseFeatures* feats=new CDenseFeatures(data); @@ -272,7 +273,7 @@ TEST(Kernel, row_wise_sum_squared_sum_symmetric_block_with_diag) const index_t dim=3; // create random data - CMath::init_random(100); + set_global_seed(100); SGMatrix data = generate_std_norm_matrix(num_feats, dim); CDenseFeatures* feats=new CDenseFeatures(data); @@ -308,7 +309,7 @@ TEST(Kernel, row_col_wise_sum_block_with_diag) const index_t dim=3; // create random data - CMath::init_random(100); + set_global_seed(100); SGMatrix data_p = generate_std_norm_matrix(num_feats_p, dim); SGMatrix data_q = generate_std_norm_matrix(num_feats_q, dim); @@ -349,7 +350,7 @@ TEST(Kernel, row_col_wise_sum_block_no_diag) const index_t dim=3; // create random data - CMath::init_random(100); + set_global_seed(100); SGMatrix data_p = generate_std_norm_matrix(num_feats_p, dim); SGMatrix data_q = generate_std_norm_matrix(num_feats_q, dim); diff --git a/tests/unit/kernel/SubsequenceStringKernel_unittest.cc b/tests/unit/kernel/SubsequenceStringKernel_unittest.cc index 6fa359c3796..19caffbd768 100644 --- a/tests/unit/kernel/SubsequenceStringKernel_unittest.cc +++ b/tests/unit/kernel/SubsequenceStringKernel_unittest.cc @@ -61,18 +61,19 @@ TEST(SubsequenceStringKernel, psd_random_feat) const index_t min_len=max_len/2; SGStringList list(num_strings, max_len); + auto m_rng = std::unique_ptr(new CRandom()); for (index_t i=0; irandom(min_len, max_len); SGString str(cur_len); for (index_t l=0; lrandom('A', 'Z')); list.strings[i]=str; } CStringFeatures* s_feats=new CStringFeatures(list, ALPHANUM); - int32_t s_len=CMath::random(1, min_len); - float64_t lambda=CMath::random(0.0, 1.0); + int32_t s_len = m_rng->random(1, min_len); + float64_t lambda = m_rng->random(0.0, 1.0); CSubsequenceStringKernel* kernel=new CSubsequenceStringKernel(s_feats, s_feats, s_len, lambda); SGMatrix kernel_matrix=kernel->get_kernel_matrix(); diff --git a/tests/unit/lib/Memory_unittest.cc b/tests/unit/lib/Memory_unittest.cc index 654776878da..f63610daba4 100644 --- a/tests/unit/lib/Memory_unittest.cc +++ b/tests/unit/lib/Memory_unittest.cc @@ -66,8 +66,9 @@ TEST(MemoryTest, sg_memcpy) { const index_t size = 10; auto src = SG_CALLOC(float64_t, size); + auto m_rng = std::unique_ptr(new CRandom()); for (index_t i=0; istd_normal_distrib(); auto dest = SG_CALLOC(float64_t, size); diff --git a/tests/unit/lib/SGMatrix_unittest.cc b/tests/unit/lib/SGMatrix_unittest.cc index c8a291cbc80..ae5bb677338 100644 --- a/tests/unit/lib/SGMatrix_unittest.cc +++ b/tests/unit/lib/SGMatrix_unittest.cc @@ -242,14 +242,14 @@ TEST(SGMatrixTest,is_symmetric_float32_false_old_plus_eps) { const index_t size=2; SGMatrix mat(size, size); - CMath::init_random(100); + auto m_rng = std::unique_ptr(new CRandom(100)); // create a symmetric matrix for (index_t i=0; irandn_float(); mat(j, i)=mat(i, j); } } @@ -279,14 +279,14 @@ TEST(SGMatrixTest,is_symmetric_float32_false_old_minus_eps) { const index_t size=2; SGMatrix mat(size, size); - CMath::init_random(100); + auto m_rng = std::unique_ptr(new CRandom(100)); // create a symmetric matrix for (index_t i=0; irandn_float(); mat(j, i)=mat(i, j); } } @@ -316,11 +316,12 @@ TEST(SGMatrixTest,is_symmetric_float32_true) { const index_t size=2; SGMatrix mat(size, size); + auto m_rng = std::unique_ptr(new CRandom()); for (index_t i=0; irandn_float(); mat(j, i)=mat(i, j); } } @@ -331,14 +332,14 @@ TEST(SGMatrixTest,is_symmetric_float64_false_old_plus_eps) { const index_t size=2; SGMatrix mat(size, size); - CMath::init_random(100); + auto m_rng = std::unique_ptr(new CRandom(100)); // create a symmetric matrix for (index_t i=0; istd_normal_distrib(); mat(j, i)=mat(i, j); } } @@ -368,14 +369,14 @@ TEST(SGMatrixTest,is_symmetric_float64_false_old_minus_eps) { const index_t size=2; SGMatrix mat(size, size); - CMath::init_random(100); + auto m_rng = std::unique_ptr(new CRandom(100)); // create a symmetric matrix for (index_t i=0; istd_normal_distrib(); mat(j, i)=mat(i, j); } } @@ -405,12 +406,12 @@ TEST(SGMatrixTest,is_symmetric_float64_true) { const index_t size=2; SGMatrix mat(size, size); - CMath::init_random(100); + auto m_rng = std::unique_ptr(new CRandom(100)); for (index_t i=0; istd_normal_distrib(); mat(j, i)=mat(i, j); } } @@ -421,14 +422,15 @@ TEST(SGMatrixTest,is_symmetric_complex128_false_old_plus_eps) { const index_t size=2; SGMatrix mat(size, size); - CMath::init_random(100); + auto m_rng = std::unique_ptr(new CRandom(100)); // create a symmetric matrix for (index_t i=0; istd_normal_distrib(), m_rng->std_normal_distrib()); mat(j, i)=mat(i, j); } } @@ -466,14 +468,15 @@ TEST(SGMatrixTest,is_symmetric_complex128_false_old_minus_eps) { const index_t size=2; SGMatrix mat(size, size); - CMath::init_random(100); + auto m_rng = std::unique_ptr(new CRandom(100)); // create a symmetric matrix for (index_t i=0; istd_normal_distrib(), m_rng->std_normal_distrib()); mat(j, i)=mat(i, j); } } @@ -511,12 +514,13 @@ TEST(SGMatrixTest,is_symmetric_complex128_true) { const index_t size=2; SGMatrix mat(size, size); - CMath::init_random(100); + auto m_rng = std::unique_ptr(new CRandom(100)); for (index_t i=0; istd_normal_distrib(), m_rng->std_normal_distrib()); mat(j, i)=mat(i, j); } } @@ -560,21 +564,21 @@ TEST(SGMatrixTest, equals) EXPECT_TRUE(mat.equals(mat)); EXPECT_TRUE(mat.equals(copy)); + auto m_rng = std::unique_ptr(new CRandom(100)); mat=SGMatrix(size, size); - CMath::init_random(100); for (uint64_t i=0; irandn_float(); EXPECT_TRUE(mat.equals(mat)); EXPECT_FALSE(mat.equals(copy)); copy=SGMatrix(size, size); EXPECT_FALSE(mat.equals(copy)); + m_rng->set_seed(100); - CMath::init_random(100); for (uint64_t i=0; irandn_float(); EXPECT_TRUE(mat.equals(copy)); } @@ -583,8 +587,9 @@ TEST(SGMatrixTest, clone) { const index_t size=10; SGMatrix mat(size, size); + auto m_rng = std::unique_ptr(new CRandom()); for (uint64_t i=0; irandn_float(); SGMatrix copy=mat.clone(); @@ -595,7 +600,8 @@ TEST(SGMatrixTest, set_const) { const index_t size=10; SGMatrix mat(size, size); - const auto value=CMath::randn_double(); + auto m_rng = std::unique_ptr(new CRandom()); + const auto value = m_rng->std_normal_distrib(); mat.set_const(value); for (uint64_t i=0; i mat(size, size); + auto m_rng = std::unique_ptr(new CRandom()); for (uint64_t i=0; irandn_float(); auto max=mat.max_single(); for (uint64_t i=0; i mat(n_rows, n_cols); + auto m_rng = std::unique_ptr(new CRandom()); for (index_t i = 0; i < n_rows * n_cols; ++i) - mat[i] = CMath::randn_double(); + mat[i] = m_rng->std_normal_distrib(); auto vec = mat.get_column_vector(col); @@ -636,9 +644,9 @@ TEST(SGMatrixTest, set_column) SGMatrix mat(n_rows, n_cols); SGVector vec(n_rows); - + auto m_rng = std::unique_ptr(new CRandom()); for (index_t i = 0; i < n_rows; ++i) - vec[i] = CMath::randn_double(); + vec[i] = m_rng->std_normal_distrib(); mat.set_column(col, vec); diff --git a/tests/unit/lib/SGVector_unittest.cc b/tests/unit/lib/SGVector_unittest.cc index accba8d019e..936003474e1 100644 --- a/tests/unit/lib/SGVector_unittest.cc +++ b/tests/unit/lib/SGVector_unittest.cc @@ -41,10 +41,10 @@ TEST(SGVectorTest,ctor) TEST(SGVectorTest, ctor_from_matrix) { const index_t n_rows = 5, n_cols = 4; - + auto m_rng = std::unique_ptr(new CRandom(100)); SGMatrix mat(n_rows, n_cols); for (index_t i = 0; i < mat.size(); ++i) - mat[i] = CMath::randn_double(); + mat[i] = m_rng->std_normal_distrib(); auto vec = SGVector(mat); @@ -78,7 +78,7 @@ TEST(SGVectorTest,setget) TEST(SGVectorTest,add) { - CMath::init_random(17); + set_global_seed(17); SGVector a(10); SGVector b(10); a.random(0.0, 1024.0); @@ -98,7 +98,7 @@ TEST(SGVectorTest,add) TEST(SGVectorTest,norm) { - CMath::init_random(17); + set_global_seed(17); SGVector a(10); a.random(-50.0, 1024.0); @@ -122,7 +122,7 @@ TEST(SGVectorTest,norm) TEST(SGVectorTest,misc) { - CMath::init_random(17); + set_global_seed(17); SGVector a(10); a.random(-1024.0, 1024.0); diff --git a/tests/unit/machine/StochasticGBMachine_unittest.cc b/tests/unit/machine/StochasticGBMachine_unittest.cc index 3dbd5ad1074..86c36e4b388 100644 --- a/tests/unit/machine/StochasticGBMachine_unittest.cc +++ b/tests/unit/machine/StochasticGBMachine_unittest.cc @@ -52,7 +52,7 @@ SGMatrix get_sinusoid_samples(int32_t num_samples, SGVector lab(num_train_samples); SGMatrix data=get_sinusoid_samples(num_train_samples,lab); diff --git a/tests/unit/mathematics/Math_unittest.cc b/tests/unit/mathematics/Math_unittest.cc index 2c226008899..c37d325ff0d 100644 --- a/tests/unit/mathematics/Math_unittest.cc +++ b/tests/unit/mathematics/Math_unittest.cc @@ -388,9 +388,8 @@ TEST(CMath, permute) { SGVector v(4); v.range_fill(0); - CRandom* random = new CRandom(2); - CMath::permute(v, random); - SG_FREE(random); + auto random = std::unique_ptr(new CRandom(2)); + CMath::permute(v, random.get()); EXPECT_EQ(v[0], 2); EXPECT_EQ(v[1], 1); EXPECT_EQ(v[2], 3); @@ -401,9 +400,8 @@ TEST(CMath, permute_with_random) { SGVector v(4); v.range_fill(0); - CRandom* random = new CRandom(2); - CMath::permute(v, random); - SG_FREE(random); + auto random = std::unique_ptr(new CRandom(2)); + CMath::permute(v, random.get()); EXPECT_EQ(v[0], 2); EXPECT_EQ(v[1], 1); @@ -413,7 +411,7 @@ TEST(CMath, permute_with_random) TEST(CMath,misc) { - CMath::init_random(17); + set_global_seed(17); SGVector a(10); a.random(-1024.0, 1024.0); @@ -502,7 +500,7 @@ TEST(CMath,is_sorted_2) TEST(CMath, dot) { - CMath::init_random(17); + set_global_seed(17); SGVector a(10); a.random(0.0, 1024.0); float64_t dot_val = 0.0; diff --git a/tests/unit/mathematics/Random_unittest.cc b/tests/unit/mathematics/Random_unittest.cc index b55e3a53c8a..c69b638d6f9 100644 --- a/tests/unit/mathematics/Random_unittest.cc +++ b/tests/unit/mathematics/Random_unittest.cc @@ -117,10 +117,10 @@ TEST(Random, normal_distrib) TEST(Random, random_uint64_1_2) { - CMath::init_random(17); + auto m_rng = std::unique_ptr(new CRandom(17)); for (int32_t i=0; i<10000; i++) { - uint64_t r=CMath::random((uint64_t) 1, (uint64_t) 2); + uint64_t r = m_rng->random((uint64_t)1, (uint64_t)2); EXPECT_TRUE(r == 1 || r == 2); } } @@ -304,11 +304,12 @@ TEST(Random, random_uint32_random_range) TEST(Random, random_float64_range) { int rnds[array_len]; + auto m_rng = std::unique_ptr(new CRandom(17)); for (uint32_t i=0; irandom((float64_t)0, (float64_t)array_len); rnds[r]++; } diff --git a/tests/unit/mathematics/ajd/FFDiag_unittest.cc b/tests/unit/mathematics/ajd/FFDiag_unittest.cc index 8090ba3f22b..6b7acd9251b 100644 --- a/tests/unit/mathematics/ajd/FFDiag_unittest.cc +++ b/tests/unit/mathematics/ajd/FFDiag_unittest.cc @@ -26,7 +26,7 @@ TEST(CFFDiag, diagonalize) C_dims[2] = 30; SGNDArray< float64_t > C(C_dims, 3); - CMath::init_random(17); + auto m_rng = std::unique_ptr(new CRandom(17)); for (int i = 0; i < C_dims[2]; i++) { @@ -34,8 +34,7 @@ TEST(CFFDiag, diagonalize) tmp.setIdentity(); for (int j = 0; j < C_dims[0]; j++) - tmp(j,j) *= CMath::abs(CMath::random(1,5)); - + tmp(j, j) *= CMath::abs(m_rng->random(1, 5)); } // Mixing and demixing matrices diff --git a/tests/unit/mathematics/ajd/JADiagOrth_unittest.cc b/tests/unit/mathematics/ajd/JADiagOrth_unittest.cc index c02c234d839..76a43c30942 100644 --- a/tests/unit/mathematics/ajd/JADiagOrth_unittest.cc +++ b/tests/unit/mathematics/ajd/JADiagOrth_unittest.cc @@ -26,6 +26,7 @@ TEST(CJADiagOrth, diagonalize) C_dims[1] = 10; C_dims[2] = 30; SGNDArray< float64_t > C(C_dims, 3); + auto m_rng = std::unique_ptr(new CRandom()); for (int i = 0; i < C_dims[2]; i++) { @@ -33,8 +34,7 @@ TEST(CJADiagOrth, diagonalize) tmp.setIdentity(); for (int j = 0; j < C_dims[0]; j++) - tmp(j,j) *= CMath::abs(CMath::random(1,5)); - + tmp(j, j) *= CMath::abs(m_rng->random(1, 5)); } // Building a random orthonormal matrix A diff --git a/tests/unit/mathematics/ajd/JADiag_unittest.cc b/tests/unit/mathematics/ajd/JADiag_unittest.cc index 2e64b11becc..4189d9ff7bb 100644 --- a/tests/unit/mathematics/ajd/JADiag_unittest.cc +++ b/tests/unit/mathematics/ajd/JADiag_unittest.cc @@ -26,7 +26,7 @@ TEST(CJADiag, diagonalize) C_dims[2] = 30; SGNDArray< float64_t > C(C_dims, 3); - CMath::init_random(17); + auto m_rng = std::unique_ptr(new CRandom(17)); for (int i = 0; i < C_dims[2]; i++) { @@ -34,8 +34,7 @@ TEST(CJADiag, diagonalize) tmp.setIdentity(); for (int j = 0; j < C_dims[0]; j++) - tmp(j,j) *= CMath::abs(CMath::random(1,5)); - + tmp(j, j) *= CMath::abs(m_rng->random(1, 5)); } // Mixing and demixing matrices diff --git a/tests/unit/mathematics/ajd/JediDiag_unittest.cc b/tests/unit/mathematics/ajd/JediDiag_unittest.cc index a4c139cf37a..a04557e9a86 100644 --- a/tests/unit/mathematics/ajd/JediDiag_unittest.cc +++ b/tests/unit/mathematics/ajd/JediDiag_unittest.cc @@ -26,7 +26,7 @@ TEST(CJediDiag, diagonalize) C_dims[2] = 30; SGNDArray< float64_t > C(C_dims, 3); - CMath::init_random(17); + auto m_rng = std::unique_ptr(new CRandom(17)); for (int i = 0; i < C_dims[2]; i++) { @@ -34,8 +34,7 @@ TEST(CJediDiag, diagonalize) tmp.setIdentity(); for (int j = 0; j < C_dims[0]; j++) - tmp(j,j) *= CMath::abs(CMath::random(1,5)); - + tmp(j, j) *= CMath::abs(m_rng->random(1, 5)); } // Mixing and demixing matrices diff --git a/tests/unit/mathematics/ajd/QDiag_unittest.cc b/tests/unit/mathematics/ajd/QDiag_unittest.cc index 5a0f38e3f8e..0cd3aef57a3 100644 --- a/tests/unit/mathematics/ajd/QDiag_unittest.cc +++ b/tests/unit/mathematics/ajd/QDiag_unittest.cc @@ -26,7 +26,7 @@ TEST(CQDiag, diagonalize) C_dims[2] = 30; SGNDArray< float64_t > C(C_dims, 3); - CMath::init_random(17); + auto m_rng = std::unique_ptr(new CRandom(17)); for (int i = 0; i < C_dims[2]; i++) { @@ -34,8 +34,7 @@ TEST(CQDiag, diagonalize) tmp.setIdentity(); for (int j = 0; j < C_dims[0]; j++) - tmp(j,j) *= CMath::abs(CMath::random(1,5)); - + tmp(j, j) *= CMath::abs(m_rng->random(1, 5)); } // Mixing and demixing matrices diff --git a/tests/unit/mathematics/ajd/UWedge_unittest.cc b/tests/unit/mathematics/ajd/UWedge_unittest.cc index 820cd0f0cf4..8a11a2ed209 100644 --- a/tests/unit/mathematics/ajd/UWedge_unittest.cc +++ b/tests/unit/mathematics/ajd/UWedge_unittest.cc @@ -26,7 +26,7 @@ TEST(CUWedge, diagonalize) C_dims[2] = 30; SGNDArray< float64_t > C(C_dims, 3); - CMath::init_random(17); + auto m_rng = std::unique_ptr(new CRandom(17)); for (int i = 0; i < C_dims[2]; i++) { @@ -35,7 +35,7 @@ TEST(CUWedge, diagonalize) for (int j = 0; j < C_dims[0]; j++) { - tmp(j,j) *= CMath::abs(CMath::random(1,5)); + tmp(j, j) *= CMath::abs(m_rng->random(1, 5)); } } diff --git a/tests/unit/mathematics/linalg/LanczosEigenSolver_unittest.cc b/tests/unit/mathematics/linalg/LanczosEigenSolver_unittest.cc index 8af7fecbea4..a9ebbf0099a 100644 --- a/tests/unit/mathematics/linalg/LanczosEigenSolver_unittest.cc +++ b/tests/unit/mathematics/linalg/LanczosEigenSolver_unittest.cc @@ -29,11 +29,12 @@ TEST(LanczosEigenSolver, compute) { const int32_t size=4; SGMatrix m(size, size); - m.set_const(CMath::random(50.0, 100.0)); + auto m_rng = std::unique_ptr(new CRandom()); + m.set_const(m_rng->random(50.0, 100.0)); // Hermintian matrix for (index_t i=0; irandom(100.0, 10000.0); // Creating sparse linear operator to use with Lanczos CSparseFeatures feat(m); diff --git a/tests/unit/mathematics/linalg/LogDetEstimator_unittest.cc b/tests/unit/mathematics/linalg/LogDetEstimator_unittest.cc index 530305c3a0a..d9773754ca3 100644 --- a/tests/unit/mathematics/linalg/LogDetEstimator_unittest.cc +++ b/tests/unit/mathematics/linalg/LogDetEstimator_unittest.cc @@ -120,7 +120,7 @@ TEST(LogDetEstimator, sample_ratapp_dense) mat(1,0)=0.5; mat(1,1)=1000.0; - CMath::init_random(1); + set_global_seed(1); float64_t accuracy=1E-5; CDenseMatrixOperator* op=new CDenseMatrixOperator(mat); @@ -139,7 +139,7 @@ TEST(LogDetEstimator, sample_ratapp_dense) SG_REF(op_func); CNormalSampler* trace_sampler=new CNormalSampler(size); - trace_sampler->set_seed(1); + SG_REF(trace_sampler); CLogDetEstimator estimator(trace_sampler, op_func, e); diff --git a/tests/unit/multiclass/BaggingMachine_unittest.cc b/tests/unit/multiclass/BaggingMachine_unittest.cc index 29e5a6999ed..fe5601ff8f9 100644 --- a/tests/unit/multiclass/BaggingMachine_unittest.cc +++ b/tests/unit/multiclass/BaggingMachine_unittest.cc @@ -83,7 +83,7 @@ TEST(BaggingMachine, mock_train) TEST(BaggingMachine,classify_CART) { - CMath::init_random(1); + set_global_seed(1); SGMatrix data(4,14); //vector = [Outlook Temperature Humidity Wind] diff --git a/tests/unit/multiclass/KNN_unittest.cc b/tests/unit/multiclass/KNN_unittest.cc index 1be12719e07..f37c2e6a108 100644 --- a/tests/unit/multiclass/KNN_unittest.cc +++ b/tests/unit/multiclass/KNN_unittest.cc @@ -12,7 +12,7 @@ using namespace shogun; void generate_knn_data(SGMatrix& feat, SGVector& lab, int32_t num, int32_t classes, int32_t feats) { - CMath::init_random(1); + set_global_seed(1); feat = CDataGenerator::generate_gaussians(num,classes,feats); for( int i = 0 ; i < classes ; ++i ) for( int j = 0 ; j < num ; ++j ) diff --git a/tests/unit/multiclass/LaRank_unittest.cc b/tests/unit/multiclass/LaRank_unittest.cc index 157ec7b2604..dd27bb8e9f9 100644 --- a/tests/unit/multiclass/LaRank_unittest.cc +++ b/tests/unit/multiclass/LaRank_unittest.cc @@ -18,13 +18,14 @@ TEST(LaRank,train) SGMatrix matrix_test(num_class, num_vec); CMulticlassLabels* labels=new CMulticlassLabels(num_vec); CMulticlassLabels* labels_test=new CMulticlassLabels(num_vec); + auto m_rng = std::unique_ptr(new CRandom()); for (index_t i=0; istd_normal_distrib(); + matrix_test(j, i) = m_rng->std_normal_distrib(); labels->set_label(i, label); labels_test->set_label(i, label); } diff --git a/tests/unit/multiclass/MulticlassLibLinear_unittest.cc b/tests/unit/multiclass/MulticlassLibLinear_unittest.cc index 56fec676fe0..ee73eb5966a 100644 --- a/tests/unit/multiclass/MulticlassLibLinear_unittest.cc +++ b/tests/unit/multiclass/MulticlassLibLinear_unittest.cc @@ -16,13 +16,14 @@ TEST(MulticlassLibLinearTest,train_and_apply) SGMatrix matrix_test(num_class, num_vec); CMulticlassLabels* labels=new CMulticlassLabels(num_vec); CMulticlassLabels* labels_test=new CMulticlassLabels(num_vec); + auto m_rng = std::unique_ptr(new CRandom()); for (index_t i=0; istd_normal_distrib(); + matrix_test(j, i) = m_rng->std_normal_distrib(); labels->set_label(i, label); labels_test->set_label(i, label); } diff --git a/tests/unit/multiclass/MulticlassOCAS_unittest.cc b/tests/unit/multiclass/MulticlassOCAS_unittest.cc index 1ed8421fb93..d6bfffc03fc 100644 --- a/tests/unit/multiclass/MulticlassOCAS_unittest.cc +++ b/tests/unit/multiclass/MulticlassOCAS_unittest.cc @@ -13,7 +13,7 @@ extern MultiLabelTestEnvironment* multilabel_test_env; #ifdef HAVE_LAPACK TEST(MulticlassOCASTest,train) { - CMath::init_random(17); + set_global_seed(17); float64_t C = 1.0; std::shared_ptr mockData = multilabel_test_env->getMulticlassFixture(); diff --git a/tests/unit/multiclass/tree/CARTree_unittest.cc b/tests/unit/multiclass/tree/CARTree_unittest.cc index 5e1c0af9b56..b98d86c03f9 100644 --- a/tests/unit/multiclass/tree/CARTree_unittest.cc +++ b/tests/unit/multiclass/tree/CARTree_unittest.cc @@ -546,7 +546,7 @@ TEST(CARTree, form_t1_test) TEST(CARTree,cv_prune_simple) { - CMath::init_random(1); + set_global_seed(1); SGMatrix data(2,20); data(0,0)=2; data(1,0)=2; diff --git a/tests/unit/multiclass/tree/RandomCARTree_unittest.cc b/tests/unit/multiclass/tree/RandomCARTree_unittest.cc index dff490ad801..34ff9732923 100644 --- a/tests/unit/multiclass/tree/RandomCARTree_unittest.cc +++ b/tests/unit/multiclass/tree/RandomCARTree_unittest.cc @@ -51,7 +51,7 @@ using namespace shogun; TEST(RandomCARTree, classify_nominal) { - CMath::init_random(2); + set_global_seed(2); SGMatrix data(4,14); //vector = [Outlook Temperature Humidity Wind] diff --git a/tests/unit/multiclass/tree/RandomForest_unittest.cc b/tests/unit/multiclass/tree/RandomForest_unittest.cc index 91d417b9f30..e100bd43c0f 100644 --- a/tests/unit/multiclass/tree/RandomForest_unittest.cc +++ b/tests/unit/multiclass/tree/RandomForest_unittest.cc @@ -142,7 +142,7 @@ void generate_nm_data(SGMatrix& data, SGVector& lab) TEST(RandomForest,classify_nominal_test) { - CMath::init_random(1); + set_global_seed(1); SGMatrix data(4,14); SGVector lab(14); @@ -198,10 +198,10 @@ TEST(RandomForest,classify_nominal_test) EXPECT_EQ(0.0,res_vector[1]); EXPECT_EQ(0.0,res_vector[2]); EXPECT_EQ(1.0,res_vector[3]); - EXPECT_EQ(1.0,res_vector[4]); + EXPECT_EQ(0.0, res_vector[4]); CMulticlassAccuracy* eval=new CMulticlassAccuracy(); - EXPECT_NEAR(0.642857,c->get_oob_error(eval),1e-6); + EXPECT_NEAR(0.571428, c->get_oob_error(eval), 1e-6); SG_UNREF(test_feats); SG_UNREF(result); @@ -211,7 +211,7 @@ TEST(RandomForest,classify_nominal_test) TEST(RandomForest,classify_non_nominal_test) { - CMath::init_random(1); + set_global_seed(1); SGMatrix data(4,14); SGVector lab(14); @@ -267,10 +267,10 @@ TEST(RandomForest,classify_non_nominal_test) EXPECT_EQ(0.0,res_vector[1]); EXPECT_EQ(0.0,res_vector[2]); EXPECT_EQ(1.0,res_vector[3]); - EXPECT_EQ(1.0,res_vector[4]); + EXPECT_EQ(0.0, res_vector[4]); CMulticlassAccuracy* eval=new CMulticlassAccuracy(); - EXPECT_NEAR(0.714285,c->get_oob_error(eval),1e-6); + EXPECT_NEAR(0.571428, c->get_oob_error(eval), 1e-6); SG_UNREF(test_feats); SG_UNREF(result); diff --git a/tests/unit/neuralnets/Autoencoder_unittest.cc b/tests/unit/neuralnets/Autoencoder_unittest.cc index f4ebf089872..af6572edd81 100644 --- a/tests/unit/neuralnets/Autoencoder_unittest.cc +++ b/tests/unit/neuralnets/Autoencoder_unittest.cc @@ -44,7 +44,7 @@ using namespace shogun; TEST(Autoencoder, train) { - CMath::init_random(100); + auto m_rng = std::unique_ptr(new CRandom(100)); int32_t num_features = 10; int32_t num_examples = 100; @@ -52,7 +52,7 @@ TEST(Autoencoder, train) SGMatrix data(num_features, num_examples); for (int32_t i=0; irandom(-1.0, 1.0); CAutoencoder ae(num_features, new CNeuralRectifiedLinearLayer(num_hid)); @@ -81,7 +81,7 @@ TEST(Autoencoder, contractive_linear) { float64_t tolerance = 1e-9; - CMath::init_random(10); + set_global_seed(10); CAutoencoder ae(10, new CNeuralLinearLayer(15)); @@ -98,7 +98,7 @@ TEST(Autoencoder, contractive_rectified_linear) { float64_t tolerance = 1e-9; - CMath::init_random(10); + set_global_seed(10); CAutoencoder ae(10, new CNeuralRectifiedLinearLayer(15)); @@ -115,7 +115,7 @@ TEST(Autoencoder, contractive_logistic) { float64_t tolerance = 1e-6; - CMath::init_random(10); + set_global_seed(10); CAutoencoder ae(10, new CNeuralLogisticLayer(15)); ae.initialize_neural_network(); @@ -135,7 +135,7 @@ TEST(Autoencoder, convolutional) float64_t tolerance = 1e-9; - CMath::init_random(10); + set_global_seed(10); CAutoencoder ae(w,h,3, new CNeuralConvolutionalLayer(CMAF_IDENTITY, 2, 1,1, 1,1, 1,1), @@ -154,7 +154,7 @@ TEST(Autoencoder, convolutional_with_pooling) float64_t tolerance = 1e-9; - CMath::init_random(10); + set_global_seed(10); CAutoencoder ae(w,h,3, new CNeuralConvolutionalLayer(CMAF_IDENTITY, 2, 1,1, 3,2, 1,1), @@ -173,7 +173,7 @@ TEST(Autoencoder, convolutional_with_stride) float64_t tolerance = 1e-9; - CMath::init_random(10); + set_global_seed(10); CAutoencoder ae(w,h,3, new CNeuralConvolutionalLayer(CMAF_IDENTITY, 2, 1,1, 1,1, 3,2), @@ -192,7 +192,7 @@ TEST(Autoencoder, convolutional_with_stride_and_pooling) float64_t tolerance = 1e-9; - CMath::init_random(10); + set_global_seed(10); CAutoencoder ae(w,h,3, new CNeuralConvolutionalLayer(CMAF_IDENTITY, 2, 1,1, 2,2, 2,2), diff --git a/tests/unit/neuralnets/ConvolutionalFeatureMap_unittest.cc b/tests/unit/neuralnets/ConvolutionalFeatureMap_unittest.cc index 226637a7da8..b689a85c173 100644 --- a/tests/unit/neuralnets/ConvolutionalFeatureMap_unittest.cc +++ b/tests/unit/neuralnets/ConvolutionalFeatureMap_unittest.cc @@ -307,11 +307,11 @@ TEST(ConvolutionalFeatureMap, compute_parameter_gradients) const int32_t map_index = 1; const int32_t num_maps = 3; - CMath::init_random(10); + auto m_rng = std::unique_ptr(new CRandom(10)); SGMatrix x1(w*h,b); for (int32_t i=0; irandom(-10.0, 10.0); CNeuralInputLayer* input1 = new CNeuralInputLayer (x1.num_rows); input1->set_batch_size(x1.num_cols); @@ -319,7 +319,7 @@ TEST(ConvolutionalFeatureMap, compute_parameter_gradients) // two channels SGMatrix x2(2*w*h,b); for (int32_t i=0; irandom(-10.0, 10.0); CNeuralInputLayer* input2 = new CNeuralInputLayer (x2.num_rows); input2->set_batch_size(x2.num_cols); @@ -335,7 +335,7 @@ TEST(ConvolutionalFeatureMap, compute_parameter_gradients) CConvolutionalFeatureMap map(w,h,rx,ry,1,1,map_index); SGVector params(1+(2*rx+1)*(2*ry+1)*3); for (int32_t i=0; inormal_random(0.0, 0.01); input1->compute_activations(x1); input2->compute_activations(x2); @@ -399,11 +399,11 @@ TEST(ConvolutionalFeatureMap, compute_parameter_gradients_with_stride) int32_t w_out = w/stride_x; int32_t h_out = h/stride_y; - CMath::init_random(10); + auto m_rng = std::unique_ptr(new CRandom(10)); SGMatrix x1(w*h,b); for (int32_t i=0; irandom(-10.0, 10.0); CNeuralInputLayer* input1 = new CNeuralInputLayer (x1.num_rows); input1->set_batch_size(x1.num_cols); @@ -411,7 +411,7 @@ TEST(ConvolutionalFeatureMap, compute_parameter_gradients_with_stride) // two channels SGMatrix x2(2*w*h,b); for (int32_t i=0; irandom(-10.0, 10.0); CNeuralInputLayer* input2 = new CNeuralInputLayer (x2.num_rows); input2->set_batch_size(x2.num_cols); @@ -427,7 +427,7 @@ TEST(ConvolutionalFeatureMap, compute_parameter_gradients_with_stride) CConvolutionalFeatureMap map(w,h,rx,ry,stride_x,stride_y,map_index); SGVector params(1+(2*rx+1)*(2*ry+1)*3); for (int32_t i=0; inormal_random(0.0, 0.01); input1->compute_activations(x1); input2->compute_activations(x2); @@ -484,11 +484,11 @@ TEST(ConvolutionalFeatureMap, compute_parameter_gradients_logistic) const int32_t ry = 1; const int32_t b = 2; - CMath::init_random(10); + auto m_rng = std::unique_ptr(new CRandom(10)); SGMatrix x1(w*h,b); for (int32_t i=0; irandom(-10.0, 10.0); CNeuralInputLayer* input1 = new CNeuralInputLayer (x1.num_rows); input1->set_batch_size(x1.num_cols); @@ -502,7 +502,7 @@ TEST(ConvolutionalFeatureMap, compute_parameter_gradients_logistic) CConvolutionalFeatureMap map(w,h,rx,ry,1,1,0, CMAF_LOGISTIC); SGVector params(1+(2*rx+1)*(2*ry+1)); for (int32_t i=0; inormal_random(0.0, 0.01); input1->compute_activations(x1); @@ -558,11 +558,11 @@ TEST(ConvolutionalFeatureMap, compute_parameter_gradients_rectified_linear) const int32_t ry = 1; const int32_t b = 2; - CMath::init_random(10); + auto m_rng = std::unique_ptr(new CRandom(10)); SGMatrix x1(w*h,b); for (int32_t i=0; irandom(-10.0, 10.0); CNeuralInputLayer* input1 = new CNeuralInputLayer (x1.num_rows); input1->set_batch_size(x1.num_cols); @@ -576,7 +576,7 @@ TEST(ConvolutionalFeatureMap, compute_parameter_gradients_rectified_linear) CConvolutionalFeatureMap map(w,h,rx,ry,1,1,0, CMAF_RECTIFIED_LINEAR); SGVector params(1+(2*rx+1)*(2*ry+1)); for (int32_t i=0; inormal_random(0.0, 0.01); input1->compute_activations(x1); @@ -634,7 +634,7 @@ TEST(ConvolutionalFeatureMap, compute_input_gradients) const int32_t map_index = 0; const int32_t num_maps = 1; - CMath::init_random(100); + auto m_rng = std::unique_ptr(new CRandom(100)); CNeuralLinearLayer* input1 = new CNeuralLinearLayer (w*h); input1->set_batch_size(b); @@ -644,10 +644,10 @@ TEST(ConvolutionalFeatureMap, compute_input_gradients) input2->set_batch_size(b); for (int32_t i=0; iget_num_neurons()*b; i++) - input1->get_activations()[i] = CMath::random(-10.0,10.0); + input1->get_activations()[i] = m_rng->random(-10.0, 10.0); for (int32_t i=0; iget_num_neurons()*b; i++) - input2->get_activations()[i] = CMath::random(-10.0,10.0); + input2->get_activations()[i] = m_rng->random(-10.0, 10.0); CDynamicObjectArray* layers = new CDynamicObjectArray(); layers->append_element(input1); @@ -660,7 +660,7 @@ TEST(ConvolutionalFeatureMap, compute_input_gradients) CConvolutionalFeatureMap map(w,h,rx,ry,1,1,map_index); SGVector params(1+(2*rx+1)*(2*ry+1)*3); for (int32_t i=0; inormal_random(0.0, 0.01); SGMatrix A(num_maps*w*h,b); A.zero(); diff --git a/tests/unit/neuralnets/DeepAutoencoder_unittest.cc b/tests/unit/neuralnets/DeepAutoencoder_unittest.cc index 31d3bdfa96a..abf15e1d2bf 100644 --- a/tests/unit/neuralnets/DeepAutoencoder_unittest.cc +++ b/tests/unit/neuralnets/DeepAutoencoder_unittest.cc @@ -44,14 +44,14 @@ using namespace shogun; TEST(DeepAutoencoder, pre_train) { - CMath::init_random(10); + auto m_rng = std::unique_ptr(new CRandom(10)); int32_t num_features = 10; int32_t num_examples = 100; SGMatrix data(num_features, num_examples); for (int32_t i=0; irandom(-1.0, 1.0); CDynamicObjectArray* layers = new CDynamicObjectArray(); layers->append_element(new CNeuralInputLayer(num_features)); @@ -83,7 +83,7 @@ TEST(DeepAutoencoder, pre_train) TEST(DeepAutoencoder, convert_to_neural_network) { - CMath::init_random(100); + auto m_rng = std::unique_ptr(new CRandom(100)); CDynamicObjectArray* layers = new CDynamicObjectArray(); layers->append_element(new CNeuralInputLayer(10)); @@ -98,7 +98,7 @@ TEST(DeepAutoencoder, convert_to_neural_network) SGMatrix x(10, 3); for (int32_t i=0; irandom(0.0, 1.0); CDenseFeatures f(x); diff --git a/tests/unit/neuralnets/DeepBeliefNetwork_unittest.cc b/tests/unit/neuralnets/DeepBeliefNetwork_unittest.cc index e8836ec29ec..63cb3ca80de 100644 --- a/tests/unit/neuralnets/DeepBeliefNetwork_unittest.cc +++ b/tests/unit/neuralnets/DeepBeliefNetwork_unittest.cc @@ -41,7 +41,7 @@ using namespace shogun; TEST(DeepBeliefNetwork, convert_to_neural_network) { - CMath::init_random(100); + auto m_rng = std::unique_ptr(new CRandom(100)); CDeepBeliefNetwork dbn(5, RBMVUT_BINARY); dbn.add_hidden_layer(6); @@ -54,7 +54,7 @@ TEST(DeepBeliefNetwork, convert_to_neural_network) SGMatrix x(5, 3); for (int32_t i=0; irandom(0.0, 1.0); CDenseFeatures f(x); diff --git a/tests/unit/neuralnets/NeuralInputLayer_unittest.cc b/tests/unit/neuralnets/NeuralInputLayer_unittest.cc index a4b1a11feb0..e5c9026654f 100644 --- a/tests/unit/neuralnets/NeuralInputLayer_unittest.cc +++ b/tests/unit/neuralnets/NeuralInputLayer_unittest.cc @@ -38,10 +38,10 @@ using namespace shogun; TEST(NeuralInputLayer, compute_activations) { - CMath::init_random(100); + auto m_rng = std::unique_ptr(new CRandom(100)); SGMatrix x(12,3); for (int32_t i=0; irandom(-10.0, 10.0); CNeuralInputLayer layer(5, 4); layer.set_batch_size(x.num_cols); diff --git a/tests/unit/neuralnets/NeuralLeakyRectifiedLinearLayer_unittest.cc b/tests/unit/neuralnets/NeuralLeakyRectifiedLinearLayer_unittest.cc index fd1f1128939..5ff137870c0 100644 --- a/tests/unit/neuralnets/NeuralLeakyRectifiedLinearLayer_unittest.cc +++ b/tests/unit/neuralnets/NeuralLeakyRectifiedLinearLayer_unittest.cc @@ -48,10 +48,10 @@ TEST(NeuralLeakyRectifiedLinearLayer, compute_activations) CNeuralLeakyRectifiedLinearLayer layer(9); float64_t alpha = 0.02; // initialize some random inputs - CMath::init_random(100); + auto m_rng = std::unique_ptr(new CRandom(100)); SGMatrix x(12,3); for (int32_t i=0; irandom(-10.0, 10.0); CNeuralInputLayer* input = new CNeuralInputLayer (x.num_rows); input->set_batch_size(x.num_cols); diff --git a/tests/unit/neuralnets/NeuralLinearLayer_unittest.cc b/tests/unit/neuralnets/NeuralLinearLayer_unittest.cc index 773ac220fc4..b550858a8e3 100644 --- a/tests/unit/neuralnets/NeuralLinearLayer_unittest.cc +++ b/tests/unit/neuralnets/NeuralLinearLayer_unittest.cc @@ -48,17 +48,17 @@ TEST(NeuralLinearLayer, compute_activations) CNeuralLinearLayer layer(9); // initialize some random inputs - CMath::init_random(100); + auto m_rng = std::unique_ptr(new CRandom(100)); SGMatrix x1(12,3); for (int32_t i=0; irandom(-10.0, 10.0); CNeuralInputLayer* input1 = new CNeuralInputLayer (x1.num_rows); input1->set_batch_size(x1.num_cols); SGMatrix x2(7,3); for (int32_t i=0; irandom(-10.0, 10.0); CNeuralInputLayer* input2 = new CNeuralInputLayer (x2.num_rows); input2->set_batch_size(x2.num_cols); @@ -120,17 +120,17 @@ TEST(NeuralLinearLayer, compute_activations) */ TEST(NeuralLinearLayer, compute_error) { - CMath::init_random(100); + auto m_rng = std::unique_ptr(new CRandom(100)); SGMatrix x1(12,3); for (int32_t i=0; irandom(-10.0, 10.0); CNeuralInputLayer* input1 = new CNeuralInputLayer (x1.num_rows); input1->set_batch_size(x1.num_cols); SGMatrix x2(7,3); for (int32_t i=0; irandom(-10.0, 10.0); CNeuralInputLayer* input2 = new CNeuralInputLayer (x2.num_rows); input2->set_batch_size(x2.num_cols); @@ -145,7 +145,7 @@ TEST(NeuralLinearLayer, compute_error) SGMatrix y(9,3); for (int32_t i=0; irandom(0.0, 1.0); // initialize the layer CNeuralLinearLayer layer(y.num_rows); @@ -178,10 +178,10 @@ TEST(NeuralLinearLayer, compute_error) */ TEST(NeuralLinearLayer, compute_local_gradients) { - CMath::init_random(100); + auto m_rng = std::unique_ptr(new CRandom(100)); SGMatrix x(12,3); for (int32_t i=0; irandom(-10.0, 10.0); CNeuralInputLayer* input1 = new CNeuralInputLayer (x.num_rows); input1->set_batch_size(x.num_cols); @@ -194,7 +194,7 @@ TEST(NeuralLinearLayer, compute_local_gradients) SGMatrix y(9,3); for (int32_t i=0; irandom(0.0, 1.0); // initialize the layer CNeuralLinearLayer layer(y.num_rows); @@ -241,15 +241,16 @@ TEST(NeuralLinearLayer, compute_local_gradients) TEST(NeuralLinearLayer, compute_parameter_gradients_output) { SGMatrix x1(12,3); + auto m_rng = std::unique_ptr(new CRandom()); for (int32_t i=0; irandom(-10.0, 10.0); CNeuralInputLayer* input1 = new CNeuralInputLayer (x1.num_rows); input1->set_batch_size(x1.num_cols); SGMatrix x2(7,3); for (int32_t i=0; irandom(-10.0, 10.0); CNeuralInputLayer* input2 = new CNeuralInputLayer (x2.num_rows); input2->set_batch_size(x2.num_cols); @@ -264,7 +265,7 @@ TEST(NeuralLinearLayer, compute_parameter_gradients_output) SGMatrix y(9,3); for (int32_t i=0; irandom(0.0, 1.0); // initialize the layer CNeuralLinearLayer layer(y.num_rows); @@ -318,15 +319,16 @@ TEST(NeuralLinearLayer, compute_parameter_gradients_output) TEST(NeuralLinearLayer, compute_parameter_gradients_hidden) { SGMatrix x1(12,3); + auto m_rng = std::unique_ptr(new CRandom()); for (int32_t i=0; irandom(-10.0, 10.0); CNeuralInputLayer* input1 = new CNeuralInputLayer (x1.num_rows); input1->set_batch_size(x1.num_cols); SGMatrix x2(7,3); for (int32_t i=0; irandom(-10.0, 10.0); CNeuralInputLayer* input2 = new CNeuralInputLayer (x2.num_rows); input2->set_batch_size(x2.num_cols); @@ -348,7 +350,7 @@ TEST(NeuralLinearLayer, compute_parameter_gradients_hidden) SGMatrix y(9,3); for (int32_t i=0; irandom(0.0, 1.0); // initialize the hidden layer layer_hid->initialize_neural_layer(layers, input_indices_hid); diff --git a/tests/unit/neuralnets/NeuralLogisticLayer_unittest.cc b/tests/unit/neuralnets/NeuralLogisticLayer_unittest.cc index 68847c45b66..af5be5fbbc1 100644 --- a/tests/unit/neuralnets/NeuralLogisticLayer_unittest.cc +++ b/tests/unit/neuralnets/NeuralLogisticLayer_unittest.cc @@ -48,10 +48,10 @@ TEST(NeuralLogisticLayer, compute_activations) CNeuralLogisticLayer layer(9); // initialize some random inputs - CMath::init_random(100); + auto m_rng = std::unique_ptr(new CRandom(100)); SGMatrix x(12,3); for (int32_t i=0; irandom(-10.0, 10.0); CNeuralInputLayer* input = new CNeuralInputLayer (x.num_rows); input->set_batch_size(x.num_cols); @@ -109,10 +109,10 @@ TEST(NeuralLogisticLayer, compute_local_gradients) { CNeuralLogisticLayer layer(9); - CMath::init_random(100); + auto m_rng = std::unique_ptr(new CRandom(100)); SGMatrix x(12,3); for (int32_t i=0; irandom(-10.0, 10.0); CNeuralInputLayer* input = new CNeuralInputLayer (x.num_rows); input->set_batch_size(x.num_cols); @@ -132,7 +132,7 @@ TEST(NeuralLogisticLayer, compute_local_gradients) SGMatrix y(layer.get_num_neurons(), x.num_cols); for (int32_t i=0; irandom(0.0, 1.0); // compute the layer's local gradients input->compute_activations(x); diff --git a/tests/unit/neuralnets/NeuralNetwork_unittest.cc b/tests/unit/neuralnets/NeuralNetwork_unittest.cc index b88acdce749..f1080283052 100644 --- a/tests/unit/neuralnets/NeuralNetwork_unittest.cc +++ b/tests/unit/neuralnets/NeuralNetwork_unittest.cc @@ -56,7 +56,7 @@ TEST(NeuralNetwork, backpropagation_linear) { float64_t tolerance = 1e-9; - CMath::init_random(10); + set_global_seed(100); CDynamicObjectArray* layers = new CDynamicObjectArray(); layers->append_element(new CNeuralInputLayer(5)); @@ -88,7 +88,7 @@ TEST(NeuralNetwork, neural_layers_builder) { float64_t tolerance = 1e-9; - CMath::init_random(10); + set_global_seed(100); CNeuralLayers* layers = new CNeuralLayers(); layers->input(5) @@ -123,7 +123,7 @@ TEST(NeuralNetwork, backpropagation_logistic) { float64_t tolerance = 1e-9; - CMath::init_random(10); + set_global_seed(100); CDynamicObjectArray* layers = new CDynamicObjectArray(); layers->append_element(new CNeuralInputLayer(5)); @@ -155,7 +155,7 @@ TEST(NeuralNetwork, backpropagation_softmax) { float64_t tolerance = 1e-9; - CMath::init_random(10); + set_global_seed(100); CDynamicObjectArray* layers = new CDynamicObjectArray(); layers->append_element(new CNeuralInputLayer(5)); @@ -187,7 +187,7 @@ TEST(NeuralNetwork, backpropagation_rectified_linear) { float64_t tolerance = 1e-9; - CMath::init_random(10); + set_global_seed(10); CDynamicObjectArray* layers = new CDynamicObjectArray(); layers->append_element(new CNeuralInputLayer(5)); @@ -219,7 +219,7 @@ TEST(NeuralNetwork, backpropagation_convolutional) { float64_t tolerance = 1e-9; - CMath::init_random(10); + set_global_seed(10); CDynamicObjectArray* layers = new CDynamicObjectArray(); layers->append_element(new CNeuralInputLayer(6,4)); @@ -250,7 +250,7 @@ TEST(NeuralNetwork, backpropagation_convolutional) /** tests a neural network on the binary XOR problem */ TEST(NeuralNetwork, binary_classification) { - CMath::init_random(100); + set_global_seed(10); SGMatrix inputs_matrix(2,4); SGVector targets_vector(4); @@ -309,7 +309,7 @@ TEST(NeuralNetwork, binary_classification) */ TEST(NeuralNetwork, multiclass_classification) { - CMath::init_random(100); + set_global_seed(100); SGMatrix inputs_matrix(2,4); SGVector targets_vector(4); @@ -368,7 +368,7 @@ TEST(NeuralNetwork, multiclass_classification) /** tests a neural network on a very simple regression problem */ TEST(NeuralNetwork, regression) { - CMath::init_random(100); + set_global_seed(100); int32_t N = 20; SGMatrix inputs_matrix(1,N); @@ -414,7 +414,7 @@ TEST(NeuralNetwork, regression) */ TEST(NeuralNetwork, gradient_descent) { - CMath::init_random(100); + set_global_seed(100); SGMatrix inputs_matrix(2,4); SGVector targets_vector(4); diff --git a/tests/unit/neuralnets/NeuralRectifiedLinearLayer_unittest.cc b/tests/unit/neuralnets/NeuralRectifiedLinearLayer_unittest.cc index 4b065eb2e7d..9010599daa0 100644 --- a/tests/unit/neuralnets/NeuralRectifiedLinearLayer_unittest.cc +++ b/tests/unit/neuralnets/NeuralRectifiedLinearLayer_unittest.cc @@ -48,10 +48,10 @@ TEST(NeuralRectifiedLinearLayer, compute_activations) CNeuralRectifiedLinearLayer layer(9); // initialize some random inputs - CMath::init_random(100); + auto m_rng = std::unique_ptr(new CRandom(100)); SGMatrix x(12,3); for (int32_t i=0; irandom(-10.0, 10.0); CNeuralInputLayer* input = new CNeuralInputLayer (x.num_rows); input->set_batch_size(x.num_cols); @@ -109,15 +109,16 @@ TEST(NeuralRectifiedLinearLayer, compute_activations) TEST(NeuralRectifiedLinearLayer, compute_parameter_gradients_hidden) { SGMatrix x1(12,3); + auto m_rng = std::unique_ptr(new CRandom()); for (int32_t i=0; irandom(-10.0, 10.0); CNeuralInputLayer* input1 = new CNeuralInputLayer (x1.num_rows); input1->set_batch_size(x1.num_cols); SGMatrix x2(7,3); for (int32_t i=0; irandom(-10.0, 10.0); CNeuralInputLayer* input2 = new CNeuralInputLayer (x2.num_rows); input2->set_batch_size(x2.num_cols); @@ -139,7 +140,7 @@ TEST(NeuralRectifiedLinearLayer, compute_parameter_gradients_hidden) SGMatrix y(9,3); for (int32_t i=0; irandom(0.0, 1.0); // initialize the hidden layer layer_hid->initialize_neural_layer(layers, input_indices_hid); diff --git a/tests/unit/neuralnets/NeuralSoftmaxLayer_unittest.cc b/tests/unit/neuralnets/NeuralSoftmaxLayer_unittest.cc index 50b9f286d91..e2d6f081311 100644 --- a/tests/unit/neuralnets/NeuralSoftmaxLayer_unittest.cc +++ b/tests/unit/neuralnets/NeuralSoftmaxLayer_unittest.cc @@ -48,10 +48,10 @@ TEST(NeuralSoftmaxLayer, compute_activations) CNeuralSoftmaxLayer layer(9); // initialize some random inputs - CMath::init_random(100); + auto m_rng = std::unique_ptr(new CRandom(100)); SGMatrix x(12,3); for (int32_t i=0; irandom(-10.0, 10.0); CNeuralInputLayer* input = new CNeuralInputLayer (x.num_rows); input->set_batch_size(x.num_cols); @@ -119,10 +119,10 @@ TEST(NeuralSoftmaxLayer, compute_error) { CNeuralSoftmaxLayer layer(9); - CMath::init_random(100); + auto m_rng = std::unique_ptr(new CRandom(100)); SGMatrix x(12,3); for (int32_t i=0; irandom(-10.0, 10.0); CNeuralInputLayer* input = new CNeuralInputLayer (x.num_rows); input->set_batch_size(x.num_cols); @@ -142,7 +142,7 @@ TEST(NeuralSoftmaxLayer, compute_error) SGMatrix y(layer.get_num_neurons(), x.num_cols); for (int32_t i=0; irandom(0.0, 1.0); // make sure y is in the form of a probability distribution for (int32_t j=0; j(new CRandom(100)); SGMatrix x(12,3); for (int32_t i=0; irandom(-10.0, 10.0); CNeuralInputLayer* input = new CNeuralInputLayer (x.num_rows); input->set_batch_size(x.num_cols); @@ -203,7 +203,7 @@ TEST(NeuralSoftmaxLayer, compute_local_gradients) SGMatrix y(layer.get_num_neurons(), x.num_cols); for (int32_t i=0; irandom(0.0, 1.0); // make sure y is in the form of a probability distribution for (int32_t j=0; j(new CRandom(100)); int32_t num_visible = 15; int32_t num_hidden = 6; @@ -126,7 +126,7 @@ TEST(RBM, free_energy_gradients) SGMatrix V(num_visible, batch_size); for (int32_t i=0; irandom_64() < 0.7; SGVector gradients(rbm.get_num_parameters()); rbm.free_energy_gradients(V, gradients); @@ -153,11 +153,10 @@ TEST(RBM, free_energy_gradients) TEST(RBM, pseudo_likelihood_binary) { - CMath::init_random(100); - int32_t num_visible = 5; int32_t num_hidden = 6; int32_t batch_size = 1; + set_global_seed(100); CRBM rbm(num_hidden, num_visible, RBMVUT_BINARY); rbm.initialize_neural_network(); diff --git a/tests/unit/preprocessor/Preprocessor_unittest.cc b/tests/unit/preprocessor/Preprocessor_unittest.cc index 1459be0ded1..21f041fddf7 100644 --- a/tests/unit/preprocessor/Preprocessor_unittest.cc +++ b/tests/unit/preprocessor/Preprocessor_unittest.cc @@ -45,8 +45,9 @@ TEST(Preprocessor, dense_apply) const index_t dim=2; const index_t size=4; SGMatrix data(dim, size); + auto m_rng = std::unique_ptr(new CRandom()); for (index_t i=0; istd_normal_distrib(); CDenseFeatures* features=new CDenseFeatures(data); CDensePreprocessor* preproc=new CNormOne(); @@ -69,15 +70,16 @@ TEST(Preprocessor, string_apply) const index_t min_string_length=max_string_length/2; SGStringList strings(num_strings, max_string_length); + auto m_rng = std::unique_ptr(new CRandom()); for (index_t i=0; irandom(min_string_length, max_string_length); SGString current(len); /* fill with random uppercase letters (ASCII) */ for (index_t j=0; jrandom('A', 'Z'); strings.strings[i]=current; } diff --git a/tests/unit/preprocessor/RescaleFeatures_unittest.cc b/tests/unit/preprocessor/RescaleFeatures_unittest.cc index 3289df56b0d..99a46cc536f 100644 --- a/tests/unit/preprocessor/RescaleFeatures_unittest.cc +++ b/tests/unit/preprocessor/RescaleFeatures_unittest.cc @@ -18,7 +18,7 @@ TEST(RescaleFeatures, apply_to_feature_matrix) index_t num_vectors = 10; SGVector min(num_features), range(num_features); SGVector v(num_features*num_vectors), ev; - CMath::init_random(12345); + set_global_seed(12345); v.random(-1024, 1024); ev = v.clone(); diff --git a/tests/unit/regression/krrnystrom_unittest.cc b/tests/unit/regression/krrnystrom_unittest.cc index b8770462e72..c80c27d22da 100644 --- a/tests/unit/regression/krrnystrom_unittest.cc +++ b/tests/unit/regression/krrnystrom_unittest.cc @@ -56,10 +56,11 @@ TEST(KRRNystrom, apply_and_compare_to_KRR_with_all_columns) /* fill data matrix and labels */ SGMatrix train_dat(num_features, num_vectors); SGMatrix test_dat(num_features, num_vectors); + auto m_rng = std::unique_ptr(new CRandom()); for (index_t i=0; inormal_random(0, 1.0); train_dat.matrix[i]=i; test_dat.matrix[i]=i; } @@ -113,6 +114,7 @@ TEST(KRRNystrom, apply_and_compare_to_KRR_with_column_subset) /* training label data */ SGVector lab(num_vectors); + auto m_rng = std::unique_ptr(new CRandom()); /* fill data matrix and labels */ SGMatrix train_dat(num_features, num_vectors); @@ -121,7 +123,7 @@ TEST(KRRNystrom, apply_and_compare_to_KRR_with_column_subset) { /* labels are linear plus noise */ float64_t point=(float64_t)i*10/num_vectors; - lab.vector[i]=point+CMath::normal_random(0, 1.0); + lab.vector[i] = point + m_rng->normal_random(0, 1.0); train_dat.matrix[i]=point; test_dat.matrix[i]=point; } diff --git a/tests/unit/regression/lars_unittest.cc b/tests/unit/regression/lars_unittest.cc index 4ea6cf804de..ee2e6d1e18a 100644 --- a/tests/unit/regression/lars_unittest.cc +++ b/tests/unit/regression/lars_unittest.cc @@ -377,13 +377,14 @@ TEST(LeastAngleRegression, cholesky_insert) SGVector vec(num_vec); vec.random(0.0,1.0); - Map map_vec(vec.vector, vec.size()); + Map map_vec(vec.vector, vec.size()); + auto m_rng = std::unique_ptr(new CRandom()); for (index_t i=0; irandom(0.0, 1.0); matnew(i,j)=mat(i,j); } } @@ -415,11 +416,12 @@ TEST(LeastAngleRegression, cholesky_insert) TEST(LeastAngleRegression, ols_equivalence) { int32_t n_feat=25, n_vec=100; - SGMatrix data(n_feat, n_vec); + SGMatrix data(n_feat, n_vec); + auto m_rng = std::unique_ptr(new CRandom()); for (index_t i=0; irandom(0.0, 1.0); } SGVector lab=SGVector(n_vec); diff --git a/tests/unit/statistical_testing/KernelSelection_unittest.cc b/tests/unit/statistical_testing/KernelSelection_unittest.cc index e217b0d9e35..900f709bcea 100644 --- a/tests/unit/statistical_testing/KernelSelection_unittest.cc +++ b/tests/unit/statistical_testing/KernelSelection_unittest.cc @@ -50,7 +50,7 @@ TEST(KernelSelectionMaxMMD, linear_time_single_kernel_streaming) const float64_t difference=0.5; const index_t num_kernels=10; - CMath::init_random(12345); + set_global_seed(12345); auto gen_p=new CMeanShiftDataGenerator(0, dim, 0); auto gen_q=new CMeanShiftDataGenerator(difference, dim, 0); @@ -83,7 +83,7 @@ TEST(KernelSelectionMaxMMD, quadratic_time_single_kernel_dense) const float64_t difference=0.5; const index_t num_kernels=10; - CMath::init_random(12345); + set_global_seed(12345); auto gen_p=some(0, dim, 0); auto gen_q=some(difference, dim, 0); @@ -105,7 +105,7 @@ TEST(KernelSelectionMaxMMD, quadratic_time_single_kernel_dense) mmd->set_train_test_mode(false); auto selected_kernel=static_cast(mmd->get_kernel()); - EXPECT_NEAR(selected_kernel->get_width(), 0.25, 1E-10); + EXPECT_NEAR(selected_kernel->get_width(), 0.0625, 1E-10); } #ifdef USE_GPL_SHOGUN @@ -117,7 +117,7 @@ TEST(KernelSelectionMaxMMD, linear_time_weighted_kernel_streaming) const float64_t difference=0.5; const index_t num_kernels=10; - CMath::init_random(12345); + set_global_seed(12345); auto gen_p=new CMeanShiftDataGenerator(0, dim, 0); auto gen_q=new CMeanShiftDataGenerator(difference, dim, 0); @@ -156,7 +156,7 @@ TEST(KernelSelectionMaxTestPower, linear_time_single_kernel_streaming) const float64_t difference=0.5; const index_t num_kernels=10; - CMath::init_random(12345); + set_global_seed(12345); auto gen_p=new CMeanShiftDataGenerator(0, dim, 0); auto gen_q=new CMeanShiftDataGenerator(difference, dim, 0); @@ -189,7 +189,7 @@ TEST(KernelSelectionMaxTestPower, quadratic_time_single_kernel) const float64_t difference=0.5; const index_t num_kernels=10; - CMath::init_random(12345); + set_global_seed(12345); auto gen_p=new CMeanShiftDataGenerator(0, dim, 0); auto gen_q=new CMeanShiftDataGenerator(difference, dim, 0); @@ -210,7 +210,7 @@ TEST(KernelSelectionMaxTestPower, quadratic_time_single_kernel) mmd->set_train_test_mode(false); auto selected_kernel=static_cast(mmd->get_kernel()); - EXPECT_NEAR(selected_kernel->get_width(), 0.25, 1E-10); + EXPECT_NEAR(selected_kernel->get_width(), 0.03125, 1E-10); } #ifdef USE_GPL_SHOGUN @@ -222,7 +222,7 @@ TEST(KernelSelectionMaxTestPower, linear_time_weighted_kernel_streaming) const float64_t difference=0.5; const index_t num_kernels=10; - CMath::init_random(12345); + set_global_seed(12345); auto gen_p=new CMeanShiftDataGenerator(0, dim, 0); auto gen_q=new CMeanShiftDataGenerator(difference, dim, 0); @@ -287,7 +287,7 @@ TEST(KernelSelectionMaxCrossValidation, quadratic_time_single_kernel_dense) mmd->set_train_test_mode(false); auto selected_kernel=static_cast(mmd->get_kernel()); - EXPECT_NEAR(selected_kernel->get_width(), 0.25, 1E-10); + EXPECT_NEAR(selected_kernel->get_width(), 0.0625, 1E-10); } TEST(KernelSelectionMaxCrossValidation, linear_time_single_kernel_dense) @@ -302,7 +302,7 @@ TEST(KernelSelectionMaxCrossValidation, linear_time_single_kernel_dense) const float64_t train_test_ratio=3; const float64_t alpha=0.05; - CMath::init_random(12345); + set_global_seed(12345); auto gen_p=some(0, dim, 0); auto gen_q=some(difference, dim, 0); @@ -335,7 +335,7 @@ TEST(KernelSelectionMedianHeuristic, quadratic_time_single_kernel_dense) const float64_t difference=0.5; const index_t num_kernels=10; - CMath::init_random(12345); + set_global_seed(12345); auto gen_p=new CMeanShiftDataGenerator(0, dim, 0); auto gen_q=new CMeanShiftDataGenerator(difference, dim, 0); @@ -367,7 +367,7 @@ TEST(KernelSelectionMedianHeuristic, linear_time_single_kernel_dense) const float64_t difference=0.5; const index_t num_kernels=10; - CMath::init_random(12345); + set_global_seed(12345); auto gen_p=new CMeanShiftDataGenerator(0, dim, 0); auto gen_q=new CMeanShiftDataGenerator(difference, dim, 0); diff --git a/tests/unit/statistical_testing/LinearTimeMMD_unittest.cc b/tests/unit/statistical_testing/LinearTimeMMD_unittest.cc index 86e3adf9876..a72172d36dc 100644 --- a/tests/unit/statistical_testing/LinearTimeMMD_unittest.cc +++ b/tests/unit/statistical_testing/LinearTimeMMD_unittest.cc @@ -318,7 +318,7 @@ TEST(LinearTimeMMD, perform_test_gaussian_biased_full) const index_t dim=3; // use fixed seed - CMath::init_random(12345); + set_global_seed(12345); float64_t difference=0.5; // streaming data generator for mean shift distributions @@ -347,7 +347,7 @@ TEST(LinearTimeMMD, perform_test_gaussian_biased_full) // assert against local machine computed result mmd->set_statistic_type(ST_BIASED_FULL); float64_t p_value_gaussian=mmd->compute_p_value(mmd->compute_statistic()); - EXPECT_NEAR(p_value_gaussian, 0.0, 1E-10); + EXPECT_NEAR(p_value_gaussian, 0.0, 1E-6); } TEST(LinearTimeMMD, perform_test_gaussian_unbiased_full) @@ -357,7 +357,7 @@ TEST(LinearTimeMMD, perform_test_gaussian_unbiased_full) const index_t dim=3; float64_t difference=0.5; - CMath::init_random(12345); + set_global_seed(12345); // streaming data generator for mean shift distributions auto gen_p=new CMeanShiftDataGenerator(0, dim, 0); auto gen_q=new CMeanShiftDataGenerator(difference, dim, 0); @@ -384,7 +384,7 @@ TEST(LinearTimeMMD, perform_test_gaussian_unbiased_full) // assert against local machine computed result mmd->set_statistic_type(ST_UNBIASED_FULL); float64_t p_value_gaussian=mmd->compute_p_value(mmd->compute_statistic()); - EXPECT_NEAR(p_value_gaussian, 0.060947882185221292, 1E-6); + EXPECT_NEAR(p_value_gaussian, 0.78999099853119159, 1E-6); } TEST(LinearTimeMMD, perform_test_gaussian_unbiased_incomplete) @@ -393,7 +393,7 @@ TEST(LinearTimeMMD, perform_test_gaussian_unbiased_incomplete) const index_t n=20; const index_t dim=3; - CMath::init_random(12345); + set_global_seed(12345); float64_t difference=0.5; // streaming data generator for mean shift distributions @@ -422,5 +422,5 @@ TEST(LinearTimeMMD, perform_test_gaussian_unbiased_incomplete) // assert against local machine computed result mmd->set_statistic_type(ST_UNBIASED_INCOMPLETE); float64_t p_value_gaussian=mmd->compute_p_value(mmd->compute_statistic()); - EXPECT_NEAR(p_value_gaussian, 0.40645354706402292, 1E-6); + EXPECT_NEAR(p_value_gaussian, 0.48342157360749094, 1E-6); } diff --git a/tests/unit/statistical_testing/QuadraticTimeMMD_unittest.cc b/tests/unit/statistical_testing/QuadraticTimeMMD_unittest.cc index d0b50e714ec..86305093866 100644 --- a/tests/unit/statistical_testing/QuadraticTimeMMD_unittest.cc +++ b/tests/unit/statistical_testing/QuadraticTimeMMD_unittest.cc @@ -324,7 +324,9 @@ TEST(QuadraticTimeMMD, perform_test_permutation_biased_full) const index_t n=30; const index_t dim=3; - CMath::init_random(12345); + // use fixed seed + set_global_seed(12345); + float64_t difference=0.5; // streaming data generator for mean shift distributions @@ -361,7 +363,9 @@ TEST(QuadraticTimeMMD, perform_test_permutation_unbiased_full) const index_t n=30; const index_t dim=3; - CMath::init_random(12345); + // use fixed seed + set_global_seed(12345); + float64_t difference=0.5; // streaming data generator for mean shift distributions @@ -398,7 +402,9 @@ TEST(QuadraticTimeMMD, perform_test_permutation_unbiased_incomplete) const index_t n=20; const index_t dim=3; - CMath::init_random(12345); + // use fixed seed + set_global_seed(12345); + float64_t difference=0.5; // streaming data generator for mean shift distributions @@ -435,7 +441,9 @@ TEST(QuadraticTimeMMD, perform_test_spectrum) const index_t n=30; const index_t dim=3; - CMath::init_random(12345); + // use fixed seed + set_global_seed(12345); + float64_t difference=0.5; // streaming data generator for mean shift distributions @@ -503,11 +511,11 @@ TEST(QuadraticTimeMMD, precomputed_vs_nonprecomputed) mmd->set_num_null_samples(num_null_samples); mmd->set_null_approximation_method(NAM_PERMUTATION); - CMath::init_random(12345); + set_global_seed(12345); SGVector result_1=mmd->sample_null(); mmd->precompute_kernel_matrix(false); - CMath::init_random(12345); + set_global_seed(12345); SGVector result_2=mmd->sample_null(); ASSERT_EQ(result_1.size(), result_2.size()); @@ -522,8 +530,8 @@ TEST(QuadraticTimeMMD, multikernel_compute_statistic) const index_t dim=1; const index_t num_kernels=10; - CMath::init_random(12345); float64_t difference=0.5; + set_global_seed(12345); auto gen_p=some(0, dim, 0); auto gen_q=some(difference, dim, 0); @@ -560,8 +568,8 @@ TEST(QuadraticTimeMMD, multikernel_compute_variance_h1) const index_t dim=1; const index_t num_kernels=10; - CMath::init_random(12345); float64_t difference=0.5; + set_global_seed(12345); auto gen_p=some(0, dim, 0); auto gen_q=some(difference, dim, 0); @@ -598,8 +606,8 @@ TEST(QuadraticTimeMMD, multikernel_compute_test_power) const index_t dim=1; const index_t num_kernels=10; - CMath::init_random(12345); float64_t difference=0.5; + set_global_seed(12345); auto gen_p=some(0, dim, 0); auto gen_q=some(difference, dim, 0); @@ -640,7 +648,6 @@ TEST(QuadraticTimeMMD, multikernel_perform_test) const index_t num_null_samples=200; const index_t cache_size=10; - CMath::init_random(12345); float64_t difference=0.5; auto gen_p=some(0, dim, 0); @@ -656,7 +663,7 @@ TEST(QuadraticTimeMMD, multikernel_perform_test) float64_t tau=pow(2, sigma); mmd->multikernel()->add_kernel(new CGaussianKernel(cache_size, tau)); } - CMath::init_random(12345); + set_global_seed(12345); SGVector rejections_multiple=mmd->multikernel()->perform_test(alpha); mmd->multikernel()->cleanup(); @@ -665,7 +672,7 @@ TEST(QuadraticTimeMMD, multikernel_perform_test) { float64_t tau=pow(2, sigma); mmd->set_kernel(new CGaussianKernel(cache_size, tau)); - CMath::init_random(12345); + set_global_seed(12345); rejections_single[i]=mmd->perform_test(alpha); } diff --git a/tests/unit/statistical_testing/TwoDistributionTest_unittest.cc b/tests/unit/statistical_testing/TwoDistributionTest_unittest.cc index dc7da4c7328..51c1cfdefb1 100644 --- a/tests/unit/statistical_testing/TwoDistributionTest_unittest.cc +++ b/tests/unit/statistical_testing/TwoDistributionTest_unittest.cc @@ -140,12 +140,11 @@ TEST(TwoDistributionTest, compute_distance_streaming) mock_obj->set_num_samples_p(m); mock_obj->set_num_samples_q(n); - CMath::init_random(12345); + set_global_seed(12345); auto euclidean_distance=some(); auto distance=mock_obj->compute_distance(euclidean_distance); auto distance_mat1=distance->get_distance_matrix(); - CMath::init_random(12345); auto feats_p=static_cast*>(gen_p->get_streamed_features(m)); auto feats_q=static_cast*>(gen_q->get_streamed_features(n)); euclidean_distance->init(feats_p, feats_q); @@ -175,12 +174,11 @@ TEST(TwoDistributionTest, compute_joint_distance_streaming) mock_obj->set_num_samples_p(m); mock_obj->set_num_samples_q(n); - CMath::init_random(12345); + set_global_seed(12345); auto euclidean_distance=some(); auto distance=mock_obj->compute_joint_distance(euclidean_distance); auto distance_mat1=distance->get_distance_matrix(); - CMath::init_random(12345); auto feats_p=static_cast*>(gen_p->get_streamed_features(m)); auto feats_q=static_cast*>(gen_q->get_streamed_features(n)); diff --git a/tests/unit/statistical_testing/internals/CrossValidationMMD_unittest.cc b/tests/unit/statistical_testing/internals/CrossValidationMMD_unittest.cc index 2147fb1d937..8427c16659d 100644 --- a/tests/unit/statistical_testing/internals/CrossValidationMMD_unittest.cc +++ b/tests/unit/statistical_testing/internals/CrossValidationMMD_unittest.cc @@ -91,11 +91,7 @@ TEST(CrossValidationMMD, biased_full) cv.m_num_runs=num_runs; cv.m_rejections=SGMatrix(num_runs*num_folds, num_kernels); - // set seed like this is about to make CrossValidationSplitting will have a - // same seed during this test. Not sure if it's a good thing to do. - cv.m_kfold_x->set_seed(12345); - cv.m_kfold_y->set_seed(12345); - CMath::init_random(12345); + set_global_seed(12345); cv(kernel_mgr); kernel_mgr.unset_precomputed_distance(); @@ -105,15 +101,11 @@ TEST(CrossValidationMMD, biased_full) auto kfold_p=some(new CBinaryLabels(dummy_labels_p), num_folds); auto kfold_q=some(new CBinaryLabels(dummy_labels_q), num_folds); - // set the seed for CrossValidationSplitting - kfold_p->set_seed(12345); - kfold_q->set_seed(12345); - auto permutation_mmd=PermutationMMD(); permutation_mmd.m_stype=stype; permutation_mmd.m_num_null_samples=num_null_samples; - CMath::init_random(12345); + set_global_seed(12345); for (auto k=0; k(num_runs*num_folds, num_kernels); - // set seed like this is about to make CrossValidationSplitting will have a - // same seed during this test. Not sure if it's a good thing to do. - cv.m_kfold_x->set_seed(12345); - cv.m_kfold_y->set_seed(12345); - CMath::init_random(12345); - CMath::init_random(12345); cv(kernel_mgr); kernel_mgr.unset_precomputed_distance(); @@ -214,10 +200,7 @@ TEST(CrossValidationMMD, unbiased_full) permutation_mmd.m_stype=stype; permutation_mmd.m_num_null_samples=num_null_samples; - // set the seed for CrossValidationSplitting - kfold_p->set_seed(12345); - kfold_q->set_seed(12345); - CMath::init_random(12345); + set_global_seed(12345); for (auto k=0; k(num_runs*num_folds, num_kernels); - // set seed like this is about to make CrossValidationSplitting will have a - // same seed during this test. Not sure if it's a good thing to do. - cv.m_kfold_x->set_seed(12345); - cv.m_kfold_y->set_seed(12345); - CMath::init_random(12345); - CMath::init_random(12345); - CMath::init_random(12345); + set_global_seed(12345); + set_global_seed(12345); + set_global_seed(12345); cv(kernel_mgr); kernel_mgr.unset_precomputed_distance(); @@ -319,10 +298,7 @@ TEST(CrossValidationMMD, unbiased_incomplete) permutation_mmd.m_stype=stype; permutation_mmd.m_num_null_samples=num_null_samples; - // set the seed for CrossValidationSplitting - kfold_p->set_seed(12345); - kfold_q->set_seed(12345); - CMath::init_random(12345); + set_global_seed(12345); for (auto k=0; k result_1=permutation_mmd(kernel_matrix); auto compute_mmd=ComputeMMD(); @@ -101,7 +101,7 @@ TEST(PermutationMMD, biased_full_single_kernel) Map map(kernel_matrix.matrix, kernel_matrix.num_rows, kernel_matrix.num_cols); SGVector result_2(num_null_samples); - CMath::init_random(12345); + set_global_seed(12345); for (auto i=0; i perm(kernel_matrix.num_rows); @@ -115,7 +115,7 @@ TEST(PermutationMMD, biased_full_single_kernel) SGVector inds(kernel_matrix.num_rows); SGVector result_3(num_null_samples); - CMath::init_random(12345); + set_global_seed(12345); for (auto i=0; i result_1=permutation_mmd(kernel_matrix); auto compute_mmd=ComputeMMD(); compute_mmd.m_n_x=n; compute_mmd.m_n_y=m; compute_mmd.m_stype=stype; - CMath::init_random(12345); + set_global_seed(12345); Map map(kernel_matrix.matrix, kernel_matrix.num_rows, kernel_matrix.num_cols); SGVector result_2(num_null_samples); @@ -196,7 +196,7 @@ TEST(PermutationMMD, unbiased_full_single_kernel) SGVector inds(kernel_matrix.num_rows); SGVector result_3(num_null_samples); - CMath::init_random(12345); + set_global_seed(12345); for (auto i=0; i result_1=permutation_mmd(kernel_matrix); auto compute_mmd=ComputeMMD(); @@ -261,7 +261,7 @@ TEST(PermutationMMD, unbiased_incomplete_single_kernel) Map map(kernel_matrix.matrix, kernel_matrix.num_rows, kernel_matrix.num_cols); - CMath::init_random(12345); + set_global_seed(12345); SGVector result_2(num_null_samples); for (auto i=0; i inds(kernel_matrix.num_rows); SGVector result_3(num_null_samples); @@ -333,10 +333,10 @@ TEST(PermutationMMD, precomputed_vs_non_precomputed_single_kernel) permutation_mmd.m_stype=stype; permutation_mmd.m_num_null_samples=num_null_samples; - CMath::init_random(12345); + set_global_seed(12345); SGVector result_1=permutation_mmd(kernel_matrix); - CMath::init_random(12345); + set_global_seed(12345); SGVector result_2=permutation_mmd(Kernel(kernel)); EXPECT_TRUE(result_1.size()==result_2.size()); @@ -389,7 +389,7 @@ TEST(PermutationMMD, biased_full_multi_kernel) permutation_mmd.m_stype=stype; permutation_mmd.m_num_null_samples=num_null_samples; - CMath::init_random(12345); + set_global_seed(12345); SGMatrix null_samples=permutation_mmd(kernel_mgr); kernel_mgr.unset_precomputed_distance(); @@ -400,7 +400,7 @@ TEST(PermutationMMD, biased_full_multi_kernel) { CKernel* kernel=kernel_mgr.kernel_at(k); kernel->init(merged_feats, merged_feats); - CMath::init_random(12345); + set_global_seed(12345); SGVector curr_null_samples=permutation_mmd(kernel->get_kernel_matrix()); ASSERT_EQ(curr_null_samples.size(), null_samples.num_rows); @@ -455,7 +455,7 @@ TEST(PermutationMMD, unbiased_full_multi_kernel) permutation_mmd.m_stype=stype; permutation_mmd.m_num_null_samples=num_null_samples; - CMath::init_random(12345); + set_global_seed(12345); SGMatrix null_samples=permutation_mmd(kernel_mgr); kernel_mgr.unset_precomputed_distance(); @@ -466,7 +466,7 @@ TEST(PermutationMMD, unbiased_full_multi_kernel) { CKernel* kernel=kernel_mgr.kernel_at(k); kernel->init(merged_feats, merged_feats); - CMath::init_random(12345); + set_global_seed(12345); SGVector curr_null_samples=permutation_mmd(kernel->get_kernel_matrix()); ASSERT_EQ(curr_null_samples.size(), null_samples.num_rows); @@ -521,7 +521,7 @@ TEST(PermutationMMD, unbiased_incomplete_multi_kernel) permutation_mmd.m_stype=stype; permutation_mmd.m_num_null_samples=num_null_samples; - CMath::init_random(12345); + set_global_seed(12345); SGMatrix null_samples=permutation_mmd(kernel_mgr); kernel_mgr.unset_precomputed_distance(); @@ -532,7 +532,7 @@ TEST(PermutationMMD, unbiased_incomplete_multi_kernel) { CKernel* kernel=kernel_mgr.kernel_at(k); kernel->init(merged_feats, merged_feats); - CMath::init_random(12345); + set_global_seed(12345); SGVector curr_null_samples=permutation_mmd(kernel->get_kernel_matrix()); ASSERT_EQ(curr_null_samples.size(), null_samples.num_rows); diff --git a/tests/unit/statistical_testing/internals/WithinBlockPermutation_unittest.cc b/tests/unit/statistical_testing/internals/WithinBlockPermutation_unittest.cc index 8d4f1ae0d51..2e94f768e90 100644 --- a/tests/unit/statistical_testing/internals/WithinBlockPermutation_unittest.cc +++ b/tests/unit/statistical_testing/internals/WithinBlockPermutation_unittest.cc @@ -80,7 +80,7 @@ TEST(WithinBlockPermutation, biased_full) // compute using within-block-permutation functor operation compute=shogun::internal::mmd::WithinBlockPermutation(n, m, ST_BIASED_FULL); - CMath::init_random(12345); + set_global_seed(12345); auto result_1=compute(mat); auto mmd=shogun::internal::mmd::ComputeMMD(); @@ -97,7 +97,7 @@ TEST(WithinBlockPermutation, biased_full) SGVector perminds(perm.indices().data(), perm.indices().size(), false); - CMath::init_random(12345); + set_global_seed(12345); CMath::permute(perminds); MatrixXf permuted = perm.transpose()*map*perm; SGMatrix permuted_km(permuted.data(), permuted.rows(), permuted.cols(), false); @@ -108,7 +108,7 @@ TEST(WithinBlockPermutation, biased_full) SGVector inds(mat.num_rows); std::iota(inds.vector, inds.vector+inds.vlen, 0); - CMath::init_random(12345); + set_global_seed(12345); CMath::permute(inds); feats->add_subset(inds); kernel->init(feats, feats); @@ -153,7 +153,7 @@ TEST(WithinBlockPermutation, unbiased_full) // compute using within-block-permutation functor operation compute=shogun::internal::mmd::WithinBlockPermutation(n, m, ST_UNBIASED_FULL); - CMath::init_random(12345); + set_global_seed(12345); auto result_1=compute(mat); auto mmd=shogun::internal::mmd::ComputeMMD(); @@ -169,7 +169,7 @@ TEST(WithinBlockPermutation, unbiased_full) perm.setIdentity(); SGVector perminds(perm.indices().data(), perm.indices().size(), false); - CMath::init_random(12345); + set_global_seed(12345); CMath::permute(perminds); MatrixXf permuted = perm.transpose()*map*perm; SGMatrix permuted_km(permuted.data(), permuted.rows(), permuted.cols(), false); @@ -180,7 +180,7 @@ TEST(WithinBlockPermutation, unbiased_full) SGVector inds(mat.num_rows); std::iota(inds.vector, inds.vector+inds.vlen, 0); - CMath::init_random(12345); + set_global_seed(12345); CMath::permute(inds); feats->add_subset(inds); kernel->init(feats, feats); @@ -224,7 +224,7 @@ TEST(WithinBlockPermutation, unbiased_incomplete) // compute using within-block-permutation functor operation compute=shogun::internal::mmd::WithinBlockPermutation(n, n, ST_UNBIASED_INCOMPLETE); - CMath::init_random(12345); + set_global_seed(12345); auto result_1=compute(mat); auto mmd=shogun::internal::mmd::ComputeMMD(); @@ -240,7 +240,7 @@ TEST(WithinBlockPermutation, unbiased_incomplete) perm.setIdentity(); SGVector perminds(perm.indices().data(), perm.indices().size(), false); - CMath::init_random(12345); + set_global_seed(12345); CMath::permute(perminds); MatrixXf permuted = perm.transpose()*map*perm; SGMatrix permuted_km(permuted.data(), permuted.rows(), permuted.cols(), false); @@ -251,7 +251,7 @@ TEST(WithinBlockPermutation, unbiased_incomplete) SGVector inds(mat.num_rows); std::iota(inds.vector, inds.vector+inds.vlen, 0); - CMath::init_random(12345); + set_global_seed(12345); CMath::permute(inds); feats->add_subset(inds); kernel->init(feats, feats); diff --git a/tests/unit/structure/HierarchicalMultilabelModel_unittest.cc b/tests/unit/structure/HierarchicalMultilabelModel_unittest.cc index faa744330bd..211da9a9ee1 100644 --- a/tests/unit/structure/HierarchicalMultilabelModel_unittest.cc +++ b/tests/unit/structure/HierarchicalMultilabelModel_unittest.cc @@ -20,10 +20,11 @@ TEST(HierarchicalMultilabelModel, get_joint_feature_vector_1) int32_t num_samples = 2; SGMatrix feats(dim_features, num_samples); + auto m_rng = std::unique_ptr(new CRandom()); for (index_t i = 0; i < dim_features * num_samples; i++) { - feats[i] = CMath::random(-100, 100); + feats[i] = m_rng->random(-100, 100); } CSparseFeatures * features = new CSparseFeatures(feats); @@ -79,10 +80,11 @@ TEST(HierarchicalMultilabelModel, get_joint_feature_vector_2) int32_t num_samples = 2; SGMatrix feats(dim_features, num_samples); + auto m_rng = std::unique_ptr(new CRandom()); for (index_t i = 0; i < dim_features * num_samples; i++) { - feats[i] = CMath::random(-100, 100); + feats[i] = m_rng->random(-100, 100); } CSparseFeatures * features = new CSparseFeatures(feats); @@ -205,10 +207,11 @@ TEST(HierarchicalMultilabelModel, argmax) int32_t num_samples = 2; SGMatrix feats(dim_features, num_samples); + auto m_rng = std::unique_ptr(new CRandom()); for (index_t i = 0; i < dim_features * num_samples; i++) { - feats[i] = CMath::random(-100, 100); + feats[i] = m_rng->random(-100, 100); } CSparseFeatures * features = new CSparseFeatures(feats); @@ -241,7 +244,7 @@ TEST(HierarchicalMultilabelModel, argmax) for (index_t i = 0; i < w.vlen; i++) { - w[i] = CMath::random(-1, 1); + w[i] = m_rng->random(-1, 1); } CResultSet * ret_1 = model->argmax(w, 0, true); @@ -316,10 +319,11 @@ TEST(HierarchicalMultilabelModel, argmax_leaf_nodes_mandatory) int32_t num_samples = 2; SGMatrix feats(dim_features, num_samples); + auto m_rng = std::unique_ptr(new CRandom()); for (index_t i = 0; i < dim_features * num_samples; i++) { - feats[i] = CMath::random(-100, 100); + feats[i] = m_rng->random(-100, 100); } CSparseFeatures * features = new CSparseFeatures(feats); @@ -352,7 +356,7 @@ TEST(HierarchicalMultilabelModel, argmax_leaf_nodes_mandatory) for (index_t i = 0; i < w.vlen; i++) { - w[i] = CMath::random(-1, 1); + w[i] = m_rng->random(-1, 1); } CResultSet * ret_1 = model->argmax(w, 0, true); diff --git a/tests/unit/structure/MultilabelCLRModel_unittest.cc b/tests/unit/structure/MultilabelCLRModel_unittest.cc index 71e97666688..312c909e9a1 100644 --- a/tests/unit/structure/MultilabelCLRModel_unittest.cc +++ b/tests/unit/structure/MultilabelCLRModel_unittest.cc @@ -21,10 +21,10 @@ using namespace shogun; TEST(MultilabelCLRModel, get_joint_feature_vector_1) { SGMatrix feats(DIMS, NUM_SAMPLES); - + auto m_rng = std::unique_ptr(new CRandom()); for (index_t i = 0; i < DIMS * NUM_SAMPLES; i++) { - feats[i] = CMath::random(-100, 100); + feats[i] = m_rng->random(-100, 100); } CSparseFeatures * features = new CSparseFeatures(feats); @@ -70,10 +70,10 @@ TEST(MultilabelCLRModel, get_joint_feature_vector_1) TEST(MultilabelCLRModel, get_joint_feature_vector_2) { SGMatrix feats(DIMS, NUM_SAMPLES); - + auto m_rng = std::unique_ptr(new CRandom()); for (index_t i = 0; i < DIMS * NUM_SAMPLES; i++) { - feats[i] = CMath::random(-100, 100); + feats[i] = m_rng->random(-100, 100); } CSparseFeatures * features = new CSparseFeatures(feats); @@ -176,10 +176,10 @@ TEST(MultilabelCLRModel, delta_loss) TEST(MultilabelCLRModel, argmax) { SGMatrix feats(DIMS, NUM_SAMPLES); - + auto m_rng = std::unique_ptr(new CRandom()); for (index_t i = 0; i < DIMS * NUM_SAMPLES; i++) { - feats[i] = CMath::random(-100, 100); + feats[i] = m_rng->random(-100, 100); } CSparseFeatures * features = new CSparseFeatures(feats); @@ -203,7 +203,7 @@ TEST(MultilabelCLRModel, argmax) for (index_t i = 0; i < w.vlen; i++) { - w[i] = CMath::random(-1, 1); + w[i] = m_rng->random(-1, 1); } CResultSet * ret_1 = model->argmax(w, 0, true); diff --git a/tests/unit/structure/PrimalMosekSOSVM_unittest.cc b/tests/unit/structure/PrimalMosekSOSVM_unittest.cc index 678e3ecc07a..f6ec0316e5c 100644 --- a/tests/unit/structure/PrimalMosekSOSVM_unittest.cc +++ b/tests/unit/structure/PrimalMosekSOSVM_unittest.cc @@ -18,7 +18,7 @@ using namespace shogun; TEST(PrimalMosekSOSVM, mosek_init_sosvm_w_bounds) { int32_t num_samples = 10; - CMath::init_random(17); + auto m_rng = std::unique_ptr(new CRandom(17)); // define factor type SGVector card(2); @@ -53,8 +53,8 @@ TEST(PrimalMosekSOSVM, mosek_init_sosvm_w_bounds) // add factors SGVector data1(2); - data1[0] = 2.0 * CMath::random(0.0, 1.0) - 1.0; - data1[1] = 2.0 * CMath::random(0.0, 1.0) - 1.0; + data1[0] = 2.0 * m_rng->random(0.0, 1.0) - 1.0; + data1[1] = 2.0 * m_rng->random(0.0, 1.0) - 1.0; SGVector var_index1(2); var_index1[0] = 0; var_index1[1] = 1; @@ -62,8 +62,8 @@ TEST(PrimalMosekSOSVM, mosek_init_sosvm_w_bounds) fg->add_factor(fac1); SGVector data2(2); - data2[0] = 2.0 * CMath::random(0.0, 1.0) - 1.0; - data2[1] = 2.0 * CMath::random(0.0, 1.0) - 1.0; + data2[0] = 2.0 * m_rng->random(0.0, 1.0) - 1.0; + data2[1] = 2.0 * m_rng->random(0.0, 1.0) - 1.0; SGVector var_index2(2); var_index2[0] = 1; var_index2[1] = 2; diff --git a/tests/unit/transfer/MALSAR_unittest.cc b/tests/unit/transfer/MALSAR_unittest.cc index e7f5e382d50..17ac5c90bb7 100644 --- a/tests/unit/transfer/MALSAR_unittest.cc +++ b/tests/unit/transfer/MALSAR_unittest.cc @@ -20,7 +20,7 @@ typedef std::pair SplittedDataset; SplittedDataset generate_data() { index_t num_samples = 50; - CMath::init_random(5); + set_global_seed(5); SGMatrix data = CDataGenerator::generate_gaussians(num_samples, 2, 2); CDenseFeatures* features = new CDenseFeatures(data); From fc97d5d2f4dc09fdaac7b30be91e96022a83a35d Mon Sep 17 00:00:00 2001 From: Tiramisu 1993 Date: Wed, 19 Jul 2017 23:25:35 +0800 Subject: [PATCH 2/9] restore some changed seed --- src/shogun/base/DynArray.h | 2 +- src/shogun/clustering/KMeansMiniBatch.cpp | 2 +- src/shogun/features/DataGenerator.cpp | 6 +++--- src/shogun/lib/SGVector.cpp | 2 +- src/shogun/mathematics/Math.h | 2 +- src/shogun/mathematics/Statistics.cpp | 6 +++--- src/shogun/mathematics/ajd/QDiag.cpp | 2 +- src/shogun/multiclass/LaRank.h | 2 +- src/shogun/optimization/liblinear/shogun_liblinear.cpp | 2 +- src/shogun/structure/TwoStateModel.cpp | 2 +- tests/unit/neuralnets/NeuralNetwork_unittest.cc | 8 ++++---- tests/unit/neuralnets/RBM_unittest.cc | 2 +- 12 files changed, 19 insertions(+), 19 deletions(-) diff --git a/src/shogun/base/DynArray.h b/src/shogun/base/DynArray.h index d5041e5f095..bb5a941604c 100644 --- a/src/shogun/base/DynArray.h +++ b/src/shogun/base/DynArray.h @@ -448,7 +448,7 @@ template class DynArray /** randomizes the array (not thread safe!) */ void shuffle() { - auto m_rng = std::unique_ptr(new CRandom(sg_random_seed)); + auto m_rng = std::unique_ptr(new CRandom()); for (index_t i=0; i<=current_num_elements-1; ++i) CMath::swap( array[i], diff --git a/src/shogun/clustering/KMeansMiniBatch.cpp b/src/shogun/clustering/KMeansMiniBatch.cpp index 4598e4df467..21447aa8701 100644 --- a/src/shogun/clustering/KMeansMiniBatch.cpp +++ b/src/shogun/clustering/KMeansMiniBatch.cpp @@ -131,7 +131,7 @@ SGVector CKMeansMiniBatch::mbchoose_rand(int32_t b, int32_t num) { SGVector chosen=SGVector(num); SGVector ret=SGVector(b); - auto rng = std::unique_ptr(new CRandom(sg_random_seed)); + auto rng = std::unique_ptr(new CRandom()); chosen.zero(); int32_t ch=0; while (ch CDataGenerator::generate_checkboard_data(int32_t num_classes int32_t dim, int32_t num_points, float64_t overlap) { int32_t points_per_class = num_points / num_classes; - auto m_rng = std::unique_ptr(new CRandom(sg_random_seed)); + auto m_rng = std::unique_ptr(new CRandom()); int32_t grid_size = (int32_t ) CMath::ceil(CMath::sqrt((float64_t ) num_classes)); float64_t cell_size = (float64_t ) 1 / grid_size; @@ -88,7 +88,7 @@ SGMatrix CDataGenerator::generate_mean_data(index_t m, /* evtl. allocate space */ SGMatrix result=SGMatrix::get_allocated_matrix( dim, 2*m, target); - auto m_rng = std::unique_ptr(new CRandom(sg_random_seed)); + auto m_rng = std::unique_ptr(new CRandom()); /* fill matrix with normal data */ for (index_t i=0; i<2*m; ++i) @@ -110,7 +110,7 @@ SGMatrix CDataGenerator::generate_sym_mix_gauss(index_t m, /* evtl. allocate space */ SGMatrix result=SGMatrix::get_allocated_matrix( 2, m, target); - auto m_rng = std::unique_ptr(new CRandom(sg_random_seed)); + auto m_rng = std::unique_ptr(new CRandom()); /* rotation matrix */ SGMatrix rot=SGMatrix(2,2); rot(0, 0)=CMath::cos(angle); diff --git a/src/shogun/lib/SGVector.cpp b/src/shogun/lib/SGVector.cpp index 5ed8177c017..4071c8b75f0 100644 --- a/src/shogun/lib/SGVector.cpp +++ b/src/shogun/lib/SGVector.cpp @@ -614,7 +614,7 @@ void SGVector::vec1_plus_scalar_times_vec2(float32_t* vec1, template void SGVector::random_vector(T* vec, int32_t len, T min_value, T max_value) { - auto m_rng = std::unique_ptr(new CRandom(sg_random_seed)); + auto m_rng = std::unique_ptr(new CRandom()); for (int32_t i=0; irandom(min_value, max_value); } diff --git a/src/shogun/mathematics/Math.h b/src/shogun/mathematics/Math.h index 9d343f84a2e..348b88cc55d 100644 --- a/src/shogun/mathematics/Math.h +++ b/src/shogun/mathematics/Math.h @@ -1027,7 +1027,7 @@ class CMath : public CSGObject else { auto m_rng = - std::unique_ptr(new CRandom(sg_random_seed)); + std::unique_ptr(new CRandom()); for (index_t i = 0; i < v.vlen; ++i) swap(v[i], v[m_rng->random(i, v.vlen - 1)]); } diff --git a/src/shogun/mathematics/Statistics.cpp b/src/shogun/mathematics/Statistics.cpp index 943ac9cb8a9..47e4913a06e 100644 --- a/src/shogun/mathematics/Statistics.cpp +++ b/src/shogun/mathematics/Statistics.cpp @@ -325,7 +325,7 @@ SGVector CStatistics::sample_indices(int32_t sample_size, int32_t N) int32_t* idxs=SG_MALLOC(int32_t,N); int32_t i, rnd; int32_t* permuted_idxs=SG_MALLOC(int32_t,sample_size); - auto rng = std::unique_ptr(new CRandom(sg_random_seed)); + auto rng = std::unique_ptr(new CRandom()); // reservoir sampling for (i=0; i CStatistics::sample_from_gaussian(SGVector mean, int32_t dim=mean.vlen; Map mu(mean.vector, mean.vlen); Map c(cov.matrix, cov.num_rows, cov.num_cols); - auto rng = std::unique_ptr(new CRandom(sg_random_seed)); + auto rng = std::unique_ptr(new CRandom()); // generate samples, z, from N(0, I), DxN SGMatrix S(dim, N); @@ -775,7 +775,7 @@ SGMatrix CStatistics::sample_from_gaussian(SGVector mean, typedef SparseMatrix MatrixType; const MatrixType &c=EigenSparseUtil::toEigenSparse(cov); - auto rng = std::unique_ptr(new CRandom(sg_random_seed)); + auto rng = std::unique_ptr(new CRandom()); SimplicialLLT llt; diff --git a/src/shogun/mathematics/ajd/QDiag.cpp b/src/shogun/mathematics/ajd/QDiag.cpp index c8e2df57616..38ace2d5568 100644 --- a/src/shogun/mathematics/ajd/QDiag.cpp +++ b/src/shogun/mathematics/ajd/QDiag.cpp @@ -16,7 +16,7 @@ SGMatrix CQDiag::diagonalize(SGNDArray C, SGMatrix V; - auto rng = std::unique_ptr(new CRandom(sg_random_seed)); + auto rng = std::unique_ptr(new CRandom()); if (V0.num_rows == N && V0.num_cols == N) { V = V0.clone(); diff --git a/src/shogun/multiclass/LaRank.h b/src/shogun/multiclass/LaRank.h index ddfa33cdf93..b951f5d03f7 100644 --- a/src/shogun/multiclass/LaRank.h +++ b/src/shogun/multiclass/LaRank.h @@ -250,7 +250,7 @@ namespace shogun LaRankPattern & sample () { auto m_rng = - std::unique_ptr(new CRandom(sg_random_seed)); + std::unique_ptr(new CRandom()); ASSERT(!empty()) while (true) { diff --git a/src/shogun/optimization/liblinear/shogun_liblinear.cpp b/src/shogun/optimization/liblinear/shogun_liblinear.cpp index 5f24dc7e2d2..2d8637ffdc9 100644 --- a/src/shogun/optimization/liblinear/shogun_liblinear.cpp +++ b/src/shogun/optimization/liblinear/shogun_liblinear.cpp @@ -512,7 +512,7 @@ void Solver_MCSVM_CS::solve() } state->inited = true; } - auto m_rng = std::unique_ptr(new CRandom(sg_random_seed)); + auto m_rng = std::unique_ptr(new CRandom()); while(iter < max_iter && !CSignal::cancel_computations()) { double stopping = -CMath::INFTY; diff --git a/src/shogun/structure/TwoStateModel.cpp b/src/shogun/structure/TwoStateModel.cpp index b8a585e8436..5ddc43f6ddc 100644 --- a/src/shogun/structure/TwoStateModel.cpp +++ b/src/shogun/structure/TwoStateModel.cpp @@ -269,7 +269,7 @@ CHMSVMModel* CTwoStateModel::simulate_data(int32_t num_exm, int32_t exm_len, SGVector< int32_t > ll(num_exm*exm_len); ll.zero(); int32_t rnb, rl, rp; - auto m_rng = std::unique_ptr(new CRandom(sg_random_seed)); + auto m_rng = std::unique_ptr(new CRandom()); for ( int32_t i = 0 ; i < num_exm ; ++i) { SGVector< int32_t > lab(exm_len); diff --git a/tests/unit/neuralnets/NeuralNetwork_unittest.cc b/tests/unit/neuralnets/NeuralNetwork_unittest.cc index f1080283052..2c136c2dd53 100644 --- a/tests/unit/neuralnets/NeuralNetwork_unittest.cc +++ b/tests/unit/neuralnets/NeuralNetwork_unittest.cc @@ -56,7 +56,7 @@ TEST(NeuralNetwork, backpropagation_linear) { float64_t tolerance = 1e-9; - set_global_seed(100); + set_global_seed(10); CDynamicObjectArray* layers = new CDynamicObjectArray(); layers->append_element(new CNeuralInputLayer(5)); @@ -88,7 +88,7 @@ TEST(NeuralNetwork, neural_layers_builder) { float64_t tolerance = 1e-9; - set_global_seed(100); + set_global_seed(10); CNeuralLayers* layers = new CNeuralLayers(); layers->input(5) @@ -123,7 +123,7 @@ TEST(NeuralNetwork, backpropagation_logistic) { float64_t tolerance = 1e-9; - set_global_seed(100); + set_global_seed(10); CDynamicObjectArray* layers = new CDynamicObjectArray(); layers->append_element(new CNeuralInputLayer(5)); @@ -155,7 +155,7 @@ TEST(NeuralNetwork, backpropagation_softmax) { float64_t tolerance = 1e-9; - set_global_seed(100); + set_global_seed(10); CDynamicObjectArray* layers = new CDynamicObjectArray(); layers->append_element(new CNeuralInputLayer(5)); diff --git a/tests/unit/neuralnets/RBM_unittest.cc b/tests/unit/neuralnets/RBM_unittest.cc index 26e5e0b92b7..bc53de63ec5 100644 --- a/tests/unit/neuralnets/RBM_unittest.cc +++ b/tests/unit/neuralnets/RBM_unittest.cc @@ -84,7 +84,7 @@ TEST(RBM, gibbs_sampling) TEST(RBM, free_energy_binary) { - set_global_seed(10); + set_global_seed(100); int32_t num_visible = 5; int32_t num_hidden = 6; From e5a20fe8e67a54a465a1502cd561237791caff0c Mon Sep 17 00:00:00 2001 From: Tiramisu 1993 Date: Wed, 19 Jul 2017 23:26:22 +0800 Subject: [PATCH 3/9] add C-11 random feature --- src/shogun/base/SGObject.cpp | 3 +- src/shogun/base/init.cpp | 29 +----- src/shogun/base/init.h | 184 ++++++++++++++++++--------------- src/shogun/mathematics/Math.h | 3 +- src/shogun/multiclass/LaRank.h | 3 +- 5 files changed, 103 insertions(+), 119 deletions(-) diff --git a/src/shogun/base/SGObject.cpp b/src/shogun/base/SGObject.cpp index 7484a432bb5..f5d10e3224f 100644 --- a/src/shogun/base/SGObject.cpp +++ b/src/shogun/base/SGObject.cpp @@ -36,7 +36,6 @@ namespace shogun { - extern uint32_t sg_random_seed; #ifdef HAVE_CXX11 typedef std::unordered_map ParametersMap; #else @@ -495,7 +494,7 @@ void CSGObject::init() m_parameters = new Parameter(); m_model_selection_parameters = new Parameter(); m_gradient_parameters=new Parameter(); - m_rng = std::unique_ptr(new CRandom(sg_random_seed)); + m_rng = std::unique_ptr(new CRandom()); m_generic = PT_NOT_GENERIC; m_load_pre_called = false; m_load_post_called = false; diff --git a/src/shogun/base/init.cpp b/src/shogun/base/init.cpp index 335def0eb98..ae64184719a 100644 --- a/src/shogun/base/init.cpp +++ b/src/shogun/base/init.cpp @@ -31,15 +31,6 @@ shogun::CMap* sg_mallocs=NULL; #include #endif -#ifdef _WIN32 -#define _CRT_RAND_S -#include -#endif - -#ifdef DEV_RANDOM -#include -#endif - namespace shogun { Parallel* sg_parallel=NULL; @@ -235,25 +226,7 @@ namespace shogun uint32_t generate_seed() { - uint32_t seed; -#if defined(_WIN32) - rand_s(&seed); -#elif defined(HAVE_ARC4RANDOM) - seed = arc4random(); -#elif defined(DEV_RANDOM) - int fd = open(DEV_RANDOM, O_RDONLY); - ASSERT(fd >= 0); - ssize_t actual_read = - read(fd, reinterpret_cast(&seed), sizeof(seed)); - close(fd); - ASSERT(actual_read == sizeof(seed)); -#else - SG_SWARNING("Not safe seed for the PRNG\n"); - struct timeval tv; - gettimeofday(&tv, NULL); - seed = (uint32_t)(4223517 * getpid() * tv.tv_sec * tv.tv_usec); -#endif - return seed; + return std::random_device()(); } void set_global_seed(uint32_t seed) diff --git a/src/shogun/base/init.h b/src/shogun/base/init.h index 982a1494dcf..1b3c1087624 100644 --- a/src/shogun/base/init.h +++ b/src/shogun/base/init.h @@ -14,6 +14,7 @@ #include #include +#include #include namespace shogun @@ -24,91 +25,104 @@ namespace shogun class Parallel; class CRandom; class SGLinalg; - -/** This function must be called before libshogun is used. Usually shogun does - * not provide any output messages (neither debugging nor error; apart from - * exceptions). This function allows one to specify customized output - * callback functions and a callback function to check for exceptions: - * - * @param print_message function pointer to print a message - * @param print_warning function pointer to print a warning message - * @param print_error function pointer to print an error message (this will be - * printed before shogun throws an exception) - * - * @param cancel_computations function pointer to check for exception - * - */ -void init_shogun(void (*print_message)(FILE* target, const char* str) = NULL, - void (*print_warning)(FILE* target, const char* str) = NULL, - void (*print_error)(FILE* target, const char* str) = NULL, - void (*cancel_computations)(bool &delayed, bool &immediately)=NULL); - -/** init shogun with defaults */ -void init_shogun_with_defaults(); - -/** This function must be called when one stops using libshogun. It will - * perform a number of cleanups */ -void exit_shogun(); - -/** set the global io object - * - * @param io io object to use - */ -void set_global_io(SGIO* io); - -/** get the global io object - * - * @return io object - */ -SGIO* get_global_io(); - -/** set the global parallel object - * - * @param parallel parallel object to use - */ -void set_global_parallel(Parallel* parallel); - -/** get the global parallel object - * - * @return parallel object - */ -Parallel* get_global_parallel(); - -/** set the global version object - * - * @param version version object to use - */ -void set_global_version(Version* version); - -/** get the global version object - * - * @return version object - */ -Version* get_global_version(); - -/** set the global math object - * - * @param math math object to use - */ -void set_global_math(CMath* math); - -/** get the global math object - * - * @return math object - */ -CMath* get_global_math(); - -/** Set global random seed - * @param seed seed for random generator - */ -void set_global_seed(uint32_t seed); - -/** get global random seed - * @return random seed - */ -uint32_t get_global_seed(); - -uint32_t generate_seed(); + extern uint32_t sg_random_seed; + + /** This function must be called before libshogun is used. Usually shogun + * does not provide any output messages (neither debugging nor error; apart + * from exceptions). This function allows one to specify customized output + * callback functions and a callback function to check for exceptions: + * + * @param print_message function pointer to print a message + * @param print_warning function pointer to print a warning message + * @param print_error function pointer to print an error message (this will + * be printed before shogun throws an exception) + * + * @param cancel_computations function pointer to check for exception + * + */ + void init_shogun( + void (*print_message)(FILE* target, const char* str) = NULL, + void (*print_warning)(FILE* target, const char* str) = NULL, + void (*print_error)(FILE* target, const char* str) = NULL, + void (*cancel_computations)(bool& delayed, bool& immediately) = NULL); + + /** init shogun with defaults */ + void init_shogun_with_defaults(); + + /** This function must be called when one stops using libshogun. It will + * perform a number of cleanups */ + void exit_shogun(); + + /** set the global io object + * + * @param io io object to use + */ + void set_global_io(SGIO* io); + + /** get the global io object + * + * @return io object + */ + SGIO* get_global_io(); + + /** set the global parallel object + * + * @param parallel parallel object to use + */ + void set_global_parallel(Parallel* parallel); + + /** get the global parallel object + * + * @return parallel object + */ + Parallel* get_global_parallel(); + + /** set the global version object + * + * @param version version object to use + */ + void set_global_version(Version* version); + + /** get the global version object + * + * @return version object + */ + Version* get_global_version(); + + /** set the global math object + * + * @param math math object to use + */ + void set_global_math(CMath* math); + + /** get the global math object + * + * @return math object + */ + CMath* get_global_math(); + + /** Set global random seed + * @param seed seed for random generator + */ + void set_global_seed(uint32_t seed); + + /** get global random seed + * @return random seed + */ + uint32_t get_global_seed(); + + /** + * Generate a seed for PRNG + * + * @return entropy for PRNG + */ + uint32_t generate_seed(); + + template + T get_prng() + { + return T(sg_random_seed); + } #ifndef SWIG // SWIG should skip this part /** get the global linalg library object diff --git a/src/shogun/mathematics/Math.h b/src/shogun/mathematics/Math.h index 348b88cc55d..4e9fb8ddd16 100644 --- a/src/shogun/mathematics/Math.h +++ b/src/shogun/mathematics/Math.h @@ -1026,8 +1026,7 @@ class CMath : public CSGObject } else { - auto m_rng = - std::unique_ptr(new CRandom()); + auto m_rng = std::unique_ptr(new CRandom()); for (index_t i = 0; i < v.vlen; ++i) swap(v[i], v[m_rng->random(i, v.vlen - 1)]); } diff --git a/src/shogun/multiclass/LaRank.h b/src/shogun/multiclass/LaRank.h index b951f5d03f7..905597efb0d 100644 --- a/src/shogun/multiclass/LaRank.h +++ b/src/shogun/multiclass/LaRank.h @@ -249,8 +249,7 @@ namespace shogun LaRankPattern & sample () { - auto m_rng = - std::unique_ptr(new CRandom()); + auto m_rng = std::unique_ptr(new CRandom()); ASSERT(!empty()) while (true) { From a3ebf996e016fb85c1f4356213d3b21a24d5523c Mon Sep 17 00:00:00 2001 From: Tiramisu 1993 Date: Wed, 19 Jul 2017 23:28:17 +0800 Subject: [PATCH 4/9] remove unnecessary cmake config --- src/shogun/CMakeLists.txt | 12 ------------ src/shogun/lib/config.h.in | 4 ---- 2 files changed, 16 deletions(-) diff --git a/src/shogun/CMakeLists.txt b/src/shogun/CMakeLists.txt index dc706b7e724..376b5b064ef 100644 --- a/src/shogun/CMakeLists.txt +++ b/src/shogun/CMakeLists.txt @@ -426,18 +426,6 @@ SHOGUN_DEPENDENCIES( SCOPE PRIVATE CONFIG_FLAG HAVE_ARPREC) -###### checks for random -CHECK_FUNCTION_EXISTS(arc4random HAVE_ARC4RANDOM) -IF(NOT HAVE_ARC4RANDOM) - # assume that /dev/random is non-blocking if /dev/urandom does not exist - if(EXISTS /dev/urandom) - set(DEV_RANDOM "/dev/urandom" CACHE INTERNAL "" FORCE) - elseif( EXISTS /dev/random ) - set(DEV_RANDOM "/dev/random" CACHE INTERNAL "" FORCE) - endif() -ENDIF() - - ################## linker optimisations OPTION(INCREMENTAL_LINKING "Enable incremantal linking") SET(INCREMENTAL_LINKING_DIR ${CMAKE_BINARY_DIR}/linker_cache diff --git a/src/shogun/lib/config.h.in b/src/shogun/lib/config.h.in index b0a17581ec1..f4441b4ccd6 100644 --- a/src/shogun/lib/config.h.in +++ b/src/shogun/lib/config.h.in @@ -134,8 +134,4 @@ /* does the compiler support abi::__cxa_demangle */ #cmakedefine HAVE_CXA_DEMANGLE 1 -/* random related defines */ -#cmakedefine HAVE_ARC4RANDOM 1 -#cmakedefine DEV_RANDOM "@DEV_RANDOM@" - #endif /* __SHOGUN_LIB_CONFIG_H__ */ From 80f7376150eb80981d9c47d1eded32edd3c28fdf Mon Sep 17 00:00:00 2001 From: Tiramisu 1993 Date: Wed, 19 Jul 2017 23:29:18 +0800 Subject: [PATCH 5/9] fix unit test of CQuadraticTimeMMD and TwoDistribut --- .../undocumented/libshogun/kernel_custom.cpp | 4 ++-- .../libshogun/kernel_custom_kernel.cpp | 4 ++-- src/shogun/machine/gp/EPInferenceMethod.cpp | 4 ++-- .../internals/mmd/CrossValidationMMD.h | 4 ++-- .../internals/mmd/PermutationMMD.h | 3 ++- src/shogun/structure/TwoStateModel.cpp | 3 ++- tests/unit/kernel/CustomKernel_unittest.cc | 3 ++- .../neuralnets/NeuralLinearLayer_unittest.cc | 2 +- .../KernelSelection_unittest.cc | 2 +- .../QuadraticTimeMMD_unittest.cc | 10 ++++---- .../TwoDistributionTest_unittest.cc | 20 ++++++++++++---- .../internals/CrossValidationMMD_unittest.cc | 2 -- .../internals/PermutationMMD_unittest.cc | 23 +++++++++++-------- 13 files changed, 49 insertions(+), 35 deletions(-) diff --git a/examples/undocumented/libshogun/kernel_custom.cpp b/examples/undocumented/libshogun/kernel_custom.cpp index ce360e10446..2a96e0685e3 100644 --- a/examples/undocumented/libshogun/kernel_custom.cpp +++ b/examples/undocumented/libshogun/kernel_custom.cpp @@ -33,11 +33,11 @@ void test_custom_kernel_subsets() /* create a random permutation */ SGVector subset(m); - + auto prng = std::unique_ptr(new CRandom()); for (index_t run=0; run<100; ++run) { subset.range_fill(); - CMath::permute(subset); + CMath::permute(subset, prng.get()); // subset.display_vector("permutation"); features->add_subset(subset); k->init(features, features); diff --git a/examples/undocumented/libshogun/kernel_custom_kernel.cpp b/examples/undocumented/libshogun/kernel_custom_kernel.cpp index c7f9c8560a5..c2d36c2ace2 100644 --- a/examples/undocumented/libshogun/kernel_custom_kernel.cpp +++ b/examples/undocumented/libshogun/kernel_custom_kernel.cpp @@ -31,11 +31,11 @@ void test_custom_kernel_subsets() /* create a random permutation */ SGVector subset(m); - + auto prng = std::unique_ptr(new CRandom()); for (index_t run=0; run<100; ++run) { subset.range_fill(); - CMath::permute(subset); + CMath::permute(subset, prng.get()); // subset.display_vector("permutation"); features->add_subset(subset); k->init(features, features); diff --git a/src/shogun/machine/gp/EPInferenceMethod.cpp b/src/shogun/machine/gp/EPInferenceMethod.cpp index 5ee6ae0ca9c..e76a1b9ad3f 100644 --- a/src/shogun/machine/gp/EPInferenceMethod.cpp +++ b/src/shogun/machine/gp/EPInferenceMethod.cpp @@ -230,7 +230,7 @@ void CEPInferenceMethod::update() float64_t nlZ_old=CMath::INFTY; uint32_t sweep=0; - + auto prng = std::unique_ptr(new CRandom()); while ((CMath::abs(m_nlZ-nlZ_old)>m_tol && sweep(m_xy_inds.size()); m_inverted_permuted_inds.set_const(-1); - + auto prng = std::unique_ptr(new CRandom()); for (auto n=0; nadd_subset(m_permuted_inds); SGVector inds=m_stack->get_last_subset()->get_subset_idx(); diff --git a/src/shogun/statistical_testing/internals/mmd/PermutationMMD.h b/src/shogun/statistical_testing/internals/mmd/PermutationMMD.h index 2008dea1ca4..ad48ee32f61 100644 --- a/src/shogun/statistical_testing/internals/mmd/PermutationMMD.h +++ b/src/shogun/statistical_testing/internals/mmd/PermutationMMD.h @@ -200,10 +200,11 @@ struct PermutationMMD : ComputeMMD { ASSERT(m_num_null_samples>0); allocate_permutation_inds(); + auto prng = std::unique_ptr(new CRandom()); for (auto n=0; n signal(num_features, distort.vlen); distort.range_fill(); + auto prng = std::unique_ptr(new CRandom()); for ( int32_t i = 0 ; i < num_features ; ++i ) { lf = ll; - CMath::permute(distort); + CMath::permute(distort, prng.get()); for ( int32_t j = 0 ; j < d1.vlen ; ++j ) d1[j] = distort[j]; diff --git a/tests/unit/kernel/CustomKernel_unittest.cc b/tests/unit/kernel/CustomKernel_unittest.cc index dad2f6a30ef..545585e3874 100644 --- a/tests/unit/kernel/CustomKernel_unittest.cc +++ b/tests/unit/kernel/CustomKernel_unittest.cc @@ -35,9 +35,10 @@ TEST(CustomKernelTest,add_row_subset) inds.range_fill(); index_t num_runs=10; + auto prng = std::unique_ptr(new CRandom()); for (index_t i=0; iadd_subset(inds); custom->add_row_subset(inds); diff --git a/tests/unit/neuralnets/NeuralLinearLayer_unittest.cc b/tests/unit/neuralnets/NeuralLinearLayer_unittest.cc index b550858a8e3..a3ca90f227a 100644 --- a/tests/unit/neuralnets/NeuralLinearLayer_unittest.cc +++ b/tests/unit/neuralnets/NeuralLinearLayer_unittest.cc @@ -168,7 +168,7 @@ TEST(NeuralLinearLayer, compute_error) error_ref += 0.5*CMath::pow(y[i]-A[i],2)/y.num_cols; // compare - EXPECT_NEAR(error_ref, error, 1e-12); + EXPECT_NEAR(error_ref, error, 1e-10); SG_UNREF(layers); } diff --git a/tests/unit/statistical_testing/KernelSelection_unittest.cc b/tests/unit/statistical_testing/KernelSelection_unittest.cc index 900f709bcea..fe27a9fabfb 100644 --- a/tests/unit/statistical_testing/KernelSelection_unittest.cc +++ b/tests/unit/statistical_testing/KernelSelection_unittest.cc @@ -287,7 +287,7 @@ TEST(KernelSelectionMaxCrossValidation, quadratic_time_single_kernel_dense) mmd->set_train_test_mode(false); auto selected_kernel=static_cast(mmd->get_kernel()); - EXPECT_NEAR(selected_kernel->get_width(), 0.0625, 1E-10); + EXPECT_NEAR(selected_kernel->get_width(), 0.03125, 1E-10); } TEST(KernelSelectionMaxCrossValidation, linear_time_single_kernel_dense) diff --git a/tests/unit/statistical_testing/QuadraticTimeMMD_unittest.cc b/tests/unit/statistical_testing/QuadraticTimeMMD_unittest.cc index 86305093866..4db0131ff92 100644 --- a/tests/unit/statistical_testing/QuadraticTimeMMD_unittest.cc +++ b/tests/unit/statistical_testing/QuadraticTimeMMD_unittest.cc @@ -354,7 +354,7 @@ TEST(QuadraticTimeMMD, perform_test_permutation_biased_full) // assert against local machine computed result mmd->set_statistic_type(ST_BIASED_FULL); float64_t p_value=mmd->compute_p_value(mmd->compute_statistic()); - EXPECT_NEAR(p_value, 0.0, 1E-10); + EXPECT_NEAR(p_value, 0.8, 1E-10); } TEST(QuadraticTimeMMD, perform_test_permutation_unbiased_full) @@ -393,7 +393,7 @@ TEST(QuadraticTimeMMD, perform_test_permutation_unbiased_full) // assert against local machine computed result mmd->set_statistic_type(ST_UNBIASED_FULL); float64_t p_value=mmd->compute_p_value(mmd->compute_statistic()); - EXPECT_NEAR(p_value, 0.0, 1E-10); + EXPECT_NEAR(p_value, 0.8, 1E-10); } TEST(QuadraticTimeMMD, perform_test_permutation_unbiased_incomplete) @@ -432,7 +432,7 @@ TEST(QuadraticTimeMMD, perform_test_permutation_unbiased_incomplete) // assert against local machine computed result mmd->set_statistic_type(ST_UNBIASED_INCOMPLETE); float64_t p_value=mmd->compute_p_value(mmd->compute_statistic()); - EXPECT_NEAR(p_value, 0.0, 1E-10); + EXPECT_NEAR(p_value, 0.6, 1E-10); } TEST(QuadraticTimeMMD, perform_test_spectrum) @@ -475,7 +475,7 @@ TEST(QuadraticTimeMMD, perform_test_spectrum) // assert against local machine computed result mmd->set_statistic_type(ST_BIASED_FULL); float64_t p_value_spectrum=mmd->compute_p_value(mmd->compute_statistic()); - EXPECT_NEAR(p_value_spectrum, 0.0, 1E-10); + EXPECT_NEAR(p_value_spectrum, 0.8, 1E-10); // unbiased case @@ -483,7 +483,7 @@ TEST(QuadraticTimeMMD, perform_test_spectrum) // assert against local machine computed result mmd->set_statistic_type(ST_UNBIASED_FULL); p_value_spectrum=mmd->compute_p_value(mmd->compute_statistic()); - EXPECT_NEAR(p_value_spectrum, 0.0, 1E-10); + EXPECT_NEAR(p_value_spectrum, 0.8, 1E-10); } TEST(QuadraticTimeMMD, precomputed_vs_nonprecomputed) diff --git a/tests/unit/statistical_testing/TwoDistributionTest_unittest.cc b/tests/unit/statistical_testing/TwoDistributionTest_unittest.cc index 51c1cfdefb1..32997ace8d0 100644 --- a/tests/unit/statistical_testing/TwoDistributionTest_unittest.cc +++ b/tests/unit/statistical_testing/TwoDistributionTest_unittest.cc @@ -130,23 +130,28 @@ TEST(TwoDistributionTest, compute_distance_streaming) const index_t n=10; const index_t dim=1; const float64_t difference=0.5; + set_global_seed(12345); auto gen_p=new CMeanShiftDataGenerator(0, dim, 0); auto gen_q=new CMeanShiftDataGenerator(difference, dim, 0); + auto gen_p1 = new CMeanShiftDataGenerator(0, dim, 0); + auto gen_q1 = new CMeanShiftDataGenerator(difference, dim, 0); + auto mock_obj=some(); mock_obj->set_p(gen_p); mock_obj->set_q(gen_q); mock_obj->set_num_samples_p(m); mock_obj->set_num_samples_q(n); - set_global_seed(12345); auto euclidean_distance=some(); auto distance=mock_obj->compute_distance(euclidean_distance); auto distance_mat1=distance->get_distance_matrix(); - auto feats_p=static_cast*>(gen_p->get_streamed_features(m)); - auto feats_q=static_cast*>(gen_q->get_streamed_features(n)); + auto feats_p = static_cast*>( + gen_p1->get_streamed_features(m)); + auto feats_q = static_cast*>( + gen_q1->get_streamed_features(n)); euclidean_distance->init(feats_p, feats_q); auto distance_mat2=euclidean_distance->get_distance_matrix(); @@ -168,6 +173,9 @@ TEST(TwoDistributionTest, compute_joint_distance_streaming) auto gen_p=new CMeanShiftDataGenerator(0, dim, 0); auto gen_q=new CMeanShiftDataGenerator(difference, dim, 0); + auto gen_p1 = new CMeanShiftDataGenerator(0, dim, 0); + auto gen_q1 = new CMeanShiftDataGenerator(difference, dim, 0); + auto mock_obj=some(); mock_obj->set_p(gen_p); mock_obj->set_q(gen_q); @@ -179,8 +187,10 @@ TEST(TwoDistributionTest, compute_joint_distance_streaming) auto distance=mock_obj->compute_joint_distance(euclidean_distance); auto distance_mat1=distance->get_distance_matrix(); - auto feats_p=static_cast*>(gen_p->get_streamed_features(m)); - auto feats_q=static_cast*>(gen_q->get_streamed_features(n)); + auto feats_p = static_cast*>( + gen_p1->get_streamed_features(m)); + auto feats_q = static_cast*>( + gen_q1->get_streamed_features(n)); SGMatrix data_p_and_q(dim, m+n); auto data_p=feats_p->get_feature_matrix(); diff --git a/tests/unit/statistical_testing/internals/CrossValidationMMD_unittest.cc b/tests/unit/statistical_testing/internals/CrossValidationMMD_unittest.cc index 8427c16659d..417853b5175 100644 --- a/tests/unit/statistical_testing/internals/CrossValidationMMD_unittest.cc +++ b/tests/unit/statistical_testing/internals/CrossValidationMMD_unittest.cc @@ -282,8 +282,6 @@ TEST(CrossValidationMMD, unbiased_incomplete) cv.m_num_runs=num_runs; cv.m_rejections=SGMatrix(num_runs*num_folds, num_kernels); - set_global_seed(12345); - set_global_seed(12345); set_global_seed(12345); cv(kernel_mgr); kernel_mgr.unset_precomputed_distance(); diff --git a/tests/unit/statistical_testing/internals/PermutationMMD_unittest.cc b/tests/unit/statistical_testing/internals/PermutationMMD_unittest.cc index a8cbb63b4ee..8bad2dbf096 100644 --- a/tests/unit/statistical_testing/internals/PermutationMMD_unittest.cc +++ b/tests/unit/statistical_testing/internals/PermutationMMD_unittest.cc @@ -102,12 +102,13 @@ TEST(PermutationMMD, biased_full_single_kernel) Map map(kernel_matrix.matrix, kernel_matrix.num_rows, kernel_matrix.num_cols); SGVector result_2(num_null_samples); set_global_seed(12345); + auto prng = std::unique_ptr(new CRandom()); for (auto i=0; i perm(kernel_matrix.num_rows); perm.setIdentity(); SGVector perminds(perm.indices().data(), perm.indices().size(), false); - CMath::permute(perminds); + CMath::permute(perminds, prng.get()); MatrixXf permuted = perm.transpose()*map*perm; SGMatrix permuted_km(permuted.data(), permuted.rows(), permuted.cols(), false); result_2[i]=compute_mmd(permuted_km); @@ -115,11 +116,12 @@ TEST(PermutationMMD, biased_full_single_kernel) SGVector inds(kernel_matrix.num_rows); SGVector result_3(num_null_samples); - set_global_seed(12345); + + prng->set_seed(12345); for (auto i=0; iadd_subset(inds); kernel->init(feats, feats); kernel_matrix=kernel->get_kernel_matrix(); @@ -181,13 +183,13 @@ TEST(PermutationMMD, unbiased_full_single_kernel) set_global_seed(12345); Map map(kernel_matrix.matrix, kernel_matrix.num_rows, kernel_matrix.num_cols); SGVector result_2(num_null_samples); - + auto prng = std::unique_ptr(new CRandom()); for (auto i=0; i perm(kernel_matrix.num_rows); perm.setIdentity(); SGVector perminds(perm.indices().data(), perm.indices().size(), false); - CMath::permute(perminds); + CMath::permute(perminds, prng.get()); MatrixXf permuted = perm.transpose()*map*perm; SGMatrix permuted_km(permuted.data(), permuted.rows(), permuted.cols(), false); result_2[i]=compute_mmd(permuted_km); @@ -196,11 +198,11 @@ TEST(PermutationMMD, unbiased_full_single_kernel) SGVector inds(kernel_matrix.num_rows); SGVector result_3(num_null_samples); - set_global_seed(12345); + prng->set_seed(12345); for (auto i=0; iadd_subset(inds); kernel->init(feats, feats); kernel_matrix=kernel->get_kernel_matrix(); @@ -262,26 +264,27 @@ TEST(PermutationMMD, unbiased_incomplete_single_kernel) Map map(kernel_matrix.matrix, kernel_matrix.num_rows, kernel_matrix.num_cols); set_global_seed(12345); + auto prng = std::unique_ptr(new CRandom()); SGVector result_2(num_null_samples); for (auto i=0; i perm(kernel_matrix.num_rows); perm.setIdentity(); SGVector perminds(perm.indices().data(), perm.indices().size(), false); - CMath::permute(perminds); + CMath::permute(perminds, prng.get()); MatrixXf permuted = perm.transpose()*map*perm; SGMatrix permuted_km(permuted.data(), permuted.rows(), permuted.cols(), false); result_2[i]=compute_mmd(permuted_km); } - set_global_seed(12345); + prng->set_seed(12345); SGVector inds(kernel_matrix.num_rows); SGVector result_3(num_null_samples); for (auto i=0; iadd_subset(inds); kernel->init(feats, feats); kernel_matrix=kernel->get_kernel_matrix(); From b2276776cb52962ae62645d7ab36de42026435a4 Mon Sep 17 00:00:00 2001 From: Tiramisu 1993 Date: Wed, 19 Jul 2017 23:29:50 +0800 Subject: [PATCH 6/9] fix the meta test and some confilcts --- examples/meta/generator/translate.py | 2 +- examples/meta/src/clustering/gmm.sg | 2 +- examples/meta/src/clustering/kmeans.sg | 2 +- examples/meta/src/converter/ica_fast.sg | 2 +- examples/meta/src/converter/ica_ff_sep.sg | 2 +- examples/meta/src/converter/ica_jade.sg | 2 +- examples/meta/src/converter/ica_jedi_sep.sg | 2 +- examples/meta/src/converter/ica_sobi.sg | 2 +- .../src/gaussian_processes/gaussian_process_classifier.sg | 2 +- examples/meta/src/meta_api/calls.sg | 4 +++- examples/meta/src/multiclass_classifier/cartree.sg | 2 +- .../src/multiclass_classifier/multiclass_ecoc_random.sg | 2 +- examples/meta/src/multiclass_classifier/random_forest.sg | 2 +- .../meta/src/neural_nets/feedforward_net_classification.sg | 2 +- examples/meta/src/neural_nets/feedforward_net_regression.sg | 2 +- examples/meta/src/regression/random_forest_regression.sg | 2 +- tests/unit/environments/RegressionTestEnvironment.h | 2 +- tests/unit/lib/DynamicArray_unittest.cc | 6 ++++-- tests/unit/statistical_testing/KernelSelection_unittest.cc | 2 +- .../internals/CrossValidationMMD_unittest.cc | 3 +-- 20 files changed, 25 insertions(+), 22 deletions(-) diff --git a/examples/meta/generator/translate.py b/examples/meta/generator/translate.py index d6e73551ad2..bf3b13b5e6c 100644 --- a/examples/meta/generator/translate.py +++ b/examples/meta/generator/translate.py @@ -483,7 +483,7 @@ def translateExpr(self, expr): method = expr[key][0]["Identifier"] argsList = None try: - argsList = expr[key][2] + argsList = expr[key][1] except IndexError: pass translatedArgsList = self.translateArgumentList(argsList) diff --git a/examples/meta/src/clustering/gmm.sg b/examples/meta/src/clustering/gmm.sg index 7d120b211e1..5dcf462ce5a 100644 --- a/examples/meta/src/clustering/gmm.sg +++ b/examples/meta/src/clustering/gmm.sg @@ -1,6 +1,6 @@ CSVFile f_feats_train("../../data/classifier_4class_2d_linear_features_train.dat") -Math:init_random(1) +set_global_seed(1) #![create_features] RealFeatures features_train(f_feats_train) diff --git a/examples/meta/src/clustering/kmeans.sg b/examples/meta/src/clustering/kmeans.sg index fc452590ef0..6bc5f69e461 100644 --- a/examples/meta/src/clustering/kmeans.sg +++ b/examples/meta/src/clustering/kmeans.sg @@ -1,5 +1,5 @@ CSVFile f_feats_train("../../data/classifier_binary_2d_linear_features_train.dat") -Math:init_random(1) +set_global_seed(1) #![create_features] RealFeatures features_train(f_feats_train) diff --git a/examples/meta/src/converter/ica_fast.sg b/examples/meta/src/converter/ica_fast.sg index 616f6767275..e053b166608 100644 --- a/examples/meta/src/converter/ica_fast.sg +++ b/examples/meta/src/converter/ica_fast.sg @@ -1,6 +1,6 @@ CSVFile f_feats("../../data/ica_2_sources.dat") -Math:init_random(1) +set_global_seed(1) #![create_features] RealFeatures features(f_feats) diff --git a/examples/meta/src/converter/ica_ff_sep.sg b/examples/meta/src/converter/ica_ff_sep.sg index c8d6b47af0d..8732d3520b8 100644 --- a/examples/meta/src/converter/ica_ff_sep.sg +++ b/examples/meta/src/converter/ica_ff_sep.sg @@ -1,6 +1,6 @@ CSVFile f_feats("../../data/ica_2_sources.dat") -Math:init_random(1) +set_global_seed(1) #![create_features] RealFeatures features(f_feats) diff --git a/examples/meta/src/converter/ica_jade.sg b/examples/meta/src/converter/ica_jade.sg index 633e0eedf45..7e31c78a06d 100644 --- a/examples/meta/src/converter/ica_jade.sg +++ b/examples/meta/src/converter/ica_jade.sg @@ -1,6 +1,6 @@ CSVFile f_feats("../../data/ica_2_sources.dat") -Math:init_random(1) +set_global_seed(1) #![create_features] RealFeatures features(f_feats) diff --git a/examples/meta/src/converter/ica_jedi_sep.sg b/examples/meta/src/converter/ica_jedi_sep.sg index 68501bc9ab5..aea98215566 100644 --- a/examples/meta/src/converter/ica_jedi_sep.sg +++ b/examples/meta/src/converter/ica_jedi_sep.sg @@ -1,6 +1,6 @@ CSVFile f_feats("../../data/ica_2_sources.dat") -Math:init_random(1) +set_global_seed(1) #![create_features] RealFeatures features(f_feats) diff --git a/examples/meta/src/converter/ica_sobi.sg b/examples/meta/src/converter/ica_sobi.sg index 51ddd07c713..1d9031933df 100644 --- a/examples/meta/src/converter/ica_sobi.sg +++ b/examples/meta/src/converter/ica_sobi.sg @@ -1,6 +1,6 @@ CSVFile f_feats("../../data/ica_2_sources.dat") -Math:init_random(1) +set_global_seed(1) #![create_features] RealFeatures features(f_feats) diff --git a/examples/meta/src/gaussian_processes/gaussian_process_classifier.sg b/examples/meta/src/gaussian_processes/gaussian_process_classifier.sg index 65c50803c73..b57c5194976 100644 --- a/examples/meta/src/gaussian_processes/gaussian_process_classifier.sg +++ b/examples/meta/src/gaussian_processes/gaussian_process_classifier.sg @@ -2,7 +2,7 @@ CSVFile f_feats_train("../../data/classifier_4class_2d_linear_features_train.dat CSVFile f_feats_test("../../data/classifier_4class_2d_linear_features_test.dat") CSVFile f_labels_train("../../data/classifier_4class_2d_linear_labels_train.dat") CSVFile f_labels_test("../../data/classifier_4class_2d_linear_labels_test.dat") -Math:init_random(1) +set_global_seed(1) #![create_features] RealFeatures features_train(f_feats_train) diff --git a/examples/meta/src/meta_api/calls.sg b/examples/meta/src/meta_api/calls.sg index 14b0ef034e6..29fae042f0b 100644 --- a/examples/meta/src/meta_api/calls.sg +++ b/examples/meta/src/meta_api/calls.sg @@ -1,9 +1,11 @@ # static call -Math:init_random(1) +# Math:init_random(1) # global function call get_global_io() +set_global_seed(1) + # member function call GaussianKernel k() k.set_width(1) diff --git a/examples/meta/src/multiclass_classifier/cartree.sg b/examples/meta/src/multiclass_classifier/cartree.sg index 48772e95ec5..7ec91bc39e1 100644 --- a/examples/meta/src/multiclass_classifier/cartree.sg +++ b/examples/meta/src/multiclass_classifier/cartree.sg @@ -2,7 +2,7 @@ CSVFile f_feats_train("../../data/classifier_4class_2d_linear_features_train.dat CSVFile f_feats_test("../../data/classifier_4class_2d_linear_features_test.dat") CSVFile f_labels_train("../../data/classifier_4class_2d_linear_labels_train.dat") CSVFile f_labels_test("../../data/classifier_4class_2d_linear_labels_test.dat") -Math:init_random(1) +set_global_seed(1) #![create_features] RealFeatures features_train(f_feats_train) diff --git a/examples/meta/src/multiclass_classifier/multiclass_ecoc_random.sg b/examples/meta/src/multiclass_classifier/multiclass_ecoc_random.sg index 8d9f459ae19..1e91ef0b569 100644 --- a/examples/meta/src/multiclass_classifier/multiclass_ecoc_random.sg +++ b/examples/meta/src/multiclass_classifier/multiclass_ecoc_random.sg @@ -1,4 +1,4 @@ -Math:init_random(1) +set_global_seed(1) CSVFile f_feats_train("../../data/classifier_4class_2d_linear_features_train.dat") CSVFile f_feats_test("../../data/classifier_4class_2d_linear_features_test.dat") diff --git a/examples/meta/src/multiclass_classifier/random_forest.sg b/examples/meta/src/multiclass_classifier/random_forest.sg index ceb4a4437aa..92857016ebb 100644 --- a/examples/meta/src/multiclass_classifier/random_forest.sg +++ b/examples/meta/src/multiclass_classifier/random_forest.sg @@ -2,7 +2,7 @@ CSVFile f_feats_train("../../data/classifier_4class_2d_linear_features_train.dat CSVFile f_feats_test("../../data/classifier_4class_2d_linear_features_test.dat") CSVFile f_labels_train("../../data/classifier_4class_2d_linear_labels_train.dat") CSVFile f_labels_test("../../data/classifier_4class_2d_linear_labels_test.dat") -Math:init_random(1) +set_global_seed(1) #![create_features] RealFeatures features_train(f_feats_train) diff --git a/examples/meta/src/neural_nets/feedforward_net_classification.sg b/examples/meta/src/neural_nets/feedforward_net_classification.sg index bef86f66715..e983bb9a898 100644 --- a/examples/meta/src/neural_nets/feedforward_net_classification.sg +++ b/examples/meta/src/neural_nets/feedforward_net_classification.sg @@ -3,7 +3,7 @@ CSVFile f_feats_test("../../data/classifier_binary_2d_nonlinear_features_test.da CSVFile f_labels_train("../../data/classifier_binary_2d_nonlinear_labels_train.dat") CSVFile f_labels_test("../../data/classifier_binary_2d_nonlinear_labels_test.dat") -Math:init_random(1) +set_global_seed(1) #![create_features] RealFeatures features_train(f_feats_train) diff --git a/examples/meta/src/neural_nets/feedforward_net_regression.sg b/examples/meta/src/neural_nets/feedforward_net_regression.sg index fca6427d76a..05df346cdce 100644 --- a/examples/meta/src/neural_nets/feedforward_net_regression.sg +++ b/examples/meta/src/neural_nets/feedforward_net_regression.sg @@ -3,7 +3,7 @@ CSVFile f_feats_test("../../data/regression_1d_sinc_features_test_with_9d_noise. CSVFile f_labels_train("../../data/regression_1d_sinc_labels_train.dat") CSVFile f_labels_test("../../data/regression_1d_sinc_labels_test.dat") -Math:init_random(1) +set_global_seed(1) #![create_features] RealFeatures features_train(f_feats_train) diff --git a/examples/meta/src/regression/random_forest_regression.sg b/examples/meta/src/regression/random_forest_regression.sg index 4f8aa2f3a8e..c659d3d9a53 100644 --- a/examples/meta/src/regression/random_forest_regression.sg +++ b/examples/meta/src/regression/random_forest_regression.sg @@ -2,7 +2,7 @@ CSVFile f_feats_train("../../data/regression_1d_linear_features_train.dat") CSVFile f_feats_test("../../data/regression_1d_linear_features_test.dat") CSVFile f_labels_train("../../data/regression_1d_linear_labels_train.dat") CSVFile f_labels_test("../../data/regression_1d_linear_labels_test.dat") -Math:init_random(1) +set_global_seed(1) #![create_features] RealFeatures features_train(f_feats_train) diff --git a/tests/unit/environments/RegressionTestEnvironment.h b/tests/unit/environments/RegressionTestEnvironment.h index abba24cc702..7e2632b8d25 100644 --- a/tests/unit/environments/RegressionTestEnvironment.h +++ b/tests/unit/environments/RegressionTestEnvironment.h @@ -54,7 +54,7 @@ class RegressionTestEnvironment : public ::testing::Environment public: virtual void SetUp() { - sg_rand->set_seed(57); + set_global_seed(57); SGMatrix feat_train_data = CDataGenerator::generate_gaussians(n_train, 1, n_dim); diff --git a/tests/unit/lib/DynamicArray_unittest.cc b/tests/unit/lib/DynamicArray_unittest.cc index 3224688ccf7..a5e0305f020 100644 --- a/tests/unit/lib/DynamicArray_unittest.cc +++ b/tests/unit/lib/DynamicArray_unittest.cc @@ -62,9 +62,10 @@ TYPED_TEST(CDynamicArrayFixture, set_array) this->wrapper_array->reset_array(); EXPECT_EQ(this->wrapper_array->get_num_elements(), 0); TypeParam* array = SG_MALLOC(TypeParam, 5); + auto prng = std::unique_ptr(new CRandom()); for (int32_t i = 0; i < 5; i++) { - array[i] = (TypeParam)CMath::random(1, 10); + array[i] = (TypeParam)prng->random(1, 10); } this->wrapper_array->set_array(array, 5); @@ -79,9 +80,10 @@ TYPED_TEST(CDynamicArrayFixture, set_array) TYPED_TEST(CDynamicArrayFixture, const_set_array) { TypeParam* array = SG_MALLOC(TypeParam, 5); + auto prng = std::unique_ptr(new CRandom()); for (int32_t i = 0; i < 5; i++) { - array[i] = (TypeParam)CMath::random(1, 10); + array[i] = (TypeParam)prng->random(1, 10); } const TypeParam* const_array = array; this->wrapper_array->reset_array(); diff --git a/tests/unit/statistical_testing/KernelSelection_unittest.cc b/tests/unit/statistical_testing/KernelSelection_unittest.cc index fe27a9fabfb..c425c849773 100644 --- a/tests/unit/statistical_testing/KernelSelection_unittest.cc +++ b/tests/unit/statistical_testing/KernelSelection_unittest.cc @@ -287,7 +287,7 @@ TEST(KernelSelectionMaxCrossValidation, quadratic_time_single_kernel_dense) mmd->set_train_test_mode(false); auto selected_kernel=static_cast(mmd->get_kernel()); - EXPECT_NEAR(selected_kernel->get_width(), 0.03125, 1E-10); + EXPECT_NEAR(selected_kernel->get_width(), 0.125, 1E-10); } TEST(KernelSelectionMaxCrossValidation, linear_time_single_kernel_dense) diff --git a/tests/unit/statistical_testing/internals/CrossValidationMMD_unittest.cc b/tests/unit/statistical_testing/internals/CrossValidationMMD_unittest.cc index 417853b5175..854c3a86590 100644 --- a/tests/unit/statistical_testing/internals/CrossValidationMMD_unittest.cc +++ b/tests/unit/statistical_testing/internals/CrossValidationMMD_unittest.cc @@ -61,6 +61,7 @@ TEST(CrossValidationMMD, biased_full) const float64_t difference=0.5; const float64_t alpha=0.05; const auto stype=ST_BIASED_FULL; + set_global_seed(12345); auto gen_p=some(0, dim, 0); auto gen_q=some(difference, dim, 0); @@ -91,7 +92,6 @@ TEST(CrossValidationMMD, biased_full) cv.m_num_runs=num_runs; cv.m_rejections=SGMatrix(num_runs*num_folds, num_kernels); - set_global_seed(12345); cv(kernel_mgr); kernel_mgr.unset_precomputed_distance(); @@ -105,7 +105,6 @@ TEST(CrossValidationMMD, biased_full) permutation_mmd.m_stype=stype; permutation_mmd.m_num_null_samples=num_null_samples; - set_global_seed(12345); for (auto k=0; k Date: Thu, 20 Jul 2017 00:03:29 +0800 Subject: [PATCH 7/9] use get_prng and move to C-11 --- benchmarks/hasheddoc_benchmarks.cpp | 5 +- benchmarks/rf_feats_benchmark.cpp | 5 +- benchmarks/rf_feats_kernel_comp.cpp | 7 +- .../libshogun/classifier_larank.cpp | 7 +- .../libshogun/classifier_latent_svm.cpp | 9 +- .../classifier_libsvm_probabilities.cpp | 7 +- ...sifier_mkl_svmlight_modelselection_bug.cpp | 5 +- ...ght_string_features_precomputed_kernel.cpp | 12 +- .../libshogun/clustering_kmeans.cpp | 6 +- .../libshogun/converter_jade_bss.cpp | 7 +- ...uation_cross_validation_classification.cpp | 6 +- ...ion_cross_validation_locked_comparison.cpp | 6 +- ...on_cross_validation_mkl_weight_storage.cpp | 7 +- ...evaluation_cross_validation_regression.cpp | 5 +- .../libshogun/features_subset_labels.cpp | 5 +- .../features_subset_simple_features.cpp | 8 +- .../libshogun/hashed_features_example.cpp | 5 +- .../undocumented/libshogun/kernel_custom.cpp | 4 +- .../libshogun/kernel_custom_kernel.cpp | 4 +- .../libshogun/kernel_machine_train_locked.cpp | 6 +- .../libshogun/library_serialization.cpp | 5 +- ...lection_combined_kernel_sub_parameters.cpp | 5 +- .../modelselection_grid_search_kernel.cpp | 5 +- .../modelselection_grid_search_krr.cpp | 5 +- .../modelselection_grid_search_mkl.cpp | 5 +- ...elselection_grid_search_multiclass_svm.cpp | 5 +- ...delselection_grid_search_string_kernel.cpp | 9 +- .../neuralnets_deep_belief_network.cpp | 10 +- .../libshogun/parameter_iterate_float64.cpp | 8 +- .../libshogun/parameter_iterate_sgobject.cpp | 12 +- .../preprocessor_randomfouriergauss.cpp | 7 +- .../libshogun/random_fourier_features.cpp | 7 +- ...gression_gaussian_process_simple_exact.cpp | 5 +- .../libshogun/regression_libsvr.cpp | 7 +- .../serialization_multiclass_labels.cpp | 7 +- .../undocumented/libshogun/so_fg_model.cpp | 11 +- .../undocumented/libshogun/so_multiclass.cpp | 12 +- .../libshogun/so_multiclass_BMRM.cpp | 12 +- .../splitting_LOO_crossvalidation.cpp | 14 +- .../splitting_standard_crossvalidation.cpp | 11 +- .../splitting_stratified_crossvalidation.cpp | 13 +- .../libshogun/streaming_from_dense.cpp | 7 +- src/gpl/shogun/classifier/svm/QPBSVMLib.cpp | 9 +- src/gpl/shogun/classifier/svm/WDSVMOcas.cpp | 1 - src/interfaces/swig/Mathematics.i | 6 - src/shogun/base/DynArray.h | 19 +- src/shogun/base/SGObject.cpp | 1 - src/shogun/base/SGObject.h | 16 +- src/shogun/base/init.h | 3 +- src/shogun/classifier/svm/GNPPLib.cpp | 1 - src/shogun/classifier/svm/LibLinear.cpp | 19 +- src/shogun/classifier/vw/VwRegressor.cpp | 4 +- src/shogun/clustering/GMM.cpp | 17 +- src/shogun/clustering/GMM.h | 3 + src/shogun/clustering/KMeans.cpp | 1 - src/shogun/clustering/KMeansBase.cpp | 14 +- src/shogun/clustering/KMeansMiniBatch.cpp | 7 +- src/shogun/converter/ica/FastICA.cpp | 5 +- src/shogun/distributions/Gaussian.cpp | 5 +- src/shogun/distributions/Gaussian.h | 2 + src/shogun/distributions/HMM.cpp | 32 +- .../classical/GaussianDistribution.cpp | 8 +- .../classical/GaussianDistribution.h | 9 +- .../classical/ProbabilityDistribution.cpp | 6 +- .../classical/ProbabilityDistribution.h | 7 +- .../evaluation/CrossValidationSplitting.cpp | 5 +- .../StratifiedCrossValidationSplitting.cpp | 6 +- src/shogun/features/DataGenerator.cpp | 24 +- .../features/RandomFourierDotFeatures.cpp | 8 +- .../generators/GaussianBlobsDataGenerator.cpp | 12 +- .../generators/GaussianBlobsDataGenerator.h | 2 + .../generators/MeanShiftDataGenerator.cpp | 5 +- .../generators/MeanShiftDataGenerator.h | 2 + src/shogun/kernel/PyramidChi2.cpp | 7 +- src/shogun/lib/DynamicArray.h | 6 +- src/shogun/lib/DynamicObjectArray.h | 6 +- src/shogun/lib/SGVector.cpp | 5 +- src/shogun/lib/SGVector.h | 1 - src/shogun/lib/tapkee/tapkee_shogun.cpp | 18 +- src/shogun/machine/BaggingMachine.cpp | 7 +- src/shogun/machine/gp/EPInferenceMethod.cpp | 4 +- src/shogun/machine/gp/Inference.cpp | 1 - src/shogun/mathematics/Math.h | 30 +- src/shogun/mathematics/Random.cpp | 378 ------------------ src/shogun/mathematics/Random.h | 372 ----------------- src/shogun/mathematics/Statistics.cpp | 15 +- src/shogun/mathematics/ajd/QDiag.cpp | 5 +- .../ratapprox/tracesampler/NormalSampler.cpp | 6 +- .../ratapprox/tracesampler/NormalSampler.h | 2 +- .../ratapprox/tracesampler/ProbingSampler.cpp | 6 +- .../ratapprox/tracesampler/ProbingSampler.h | 2 +- .../ratapprox/tracesampler/TraceSampler.h | 5 +- .../ModelSelectionParameters.cpp | 14 +- src/shogun/multiclass/LaRank.cpp | 4 +- src/shogun/multiclass/LaRank.h | 7 +- .../ecoc/ECOCDiscriminantEncoder.cpp | 4 +- .../ecoc/ECOCRandomDenseEncoder.cpp | 21 +- .../ecoc/ECOCRandomSparseEncoder.cpp | 26 +- .../tree/ConditionalProbabilityTree.h | 3 + .../tree/RandomConditionalProbabilityTree.cpp | 3 +- src/shogun/neuralnets/DeepBeliefNetwork.cpp | 19 +- .../neuralnets/NeuralConvolutionalLayer.cpp | 8 +- src/shogun/neuralnets/NeuralInputLayer.cpp | 4 +- src/shogun/neuralnets/NeuralLayer.cpp | 5 +- src/shogun/neuralnets/NeuralLinearLayer.cpp | 4 +- src/shogun/neuralnets/NeuralNetwork.cpp | 14 +- src/shogun/neuralnets/RBM.cpp | 19 +- src/shogun/neuralnets/RBM.h | 2 + .../liblinear/shogun_liblinear.cpp | 5 +- .../RandomFourierGaussPreproc.cpp | 10 +- .../regression/svr/LibLinearRegression.cpp | 5 +- .../statistical_testing/QuadraticTimeMMD.cpp | 4 +- .../internals/mmd/CrossValidationMMD.h | 4 +- .../internals/mmd/PermutationMMD.h | 4 +- .../structure/FactorGraphDataGenerator.cpp | 29 +- src/shogun/structure/StochasticSOSVM.cpp | 4 +- src/shogun/structure/TwoStateModel.cpp | 30 +- .../transfer/multitask/LibLinearMTL.cpp | 4 +- tests/unit/base/SGObject_unittest.cc | 10 +- tests/unit/base/Serialization_unittest.cc | 7 +- .../unit/classifier/svm/LibLinear_unittest.cc | 4 +- tests/unit/converter/Isomap_unittest.cc | 5 +- .../distribution/MixtureModel_unittest.cc | 16 +- tests/unit/ensemble/MajorityVote_unittest.cc | 10 +- .../ensemble/WeightedMajorityVote_unittest.cc | 15 +- .../CrossValidation_multithread_unittest.cc | 6 +- .../evaluation/SplittingStrategy_unittest.cc | 45 ++- .../features/CombinedFeatures_unittest.cc | 5 +- tests/unit/features/DenseFeatures_unittest.cc | 22 +- .../features/HashedDenseFeatures_unittest.cc | 5 +- .../features/HashedDocDotFeatures_unittest.cc | 6 +- .../StreamingDenseFeatures_unittest.cc | 15 +- .../StreamingHashedDocDotFeatures_unittest.cc | 5 +- .../StreamingSparseFeatures_unittest.cc | 26 +- .../unit/features/StringFeatures_unittest.cc | 9 +- tests/unit/io/CSVFile_unittest.cc | 40 +- tests/unit/io/LibSVMFile_unittest.cc | 28 +- tests/unit/io/ProtobufFile_unittest.cc | 55 +-- tests/unit/kernel/CustomKernel_unittest.cc | 5 +- tests/unit/kernel/Kernel_unittest.cc | 5 +- .../SubsequenceStringKernel_unittest.cc | 14 +- tests/unit/lib/DynamicArray_unittest.cc | 10 +- tests/unit/lib/Memory_unittest.cc | 5 +- tests/unit/lib/SGMatrix_unittest.cc | 86 ++-- tests/unit/lib/SGSparseMatrix_unittest.cc | 15 +- tests/unit/lib/SGVector_unittest.cc | 5 +- .../machine/StochasticGBMachine_unittest.cc | 20 +- tests/unit/machine/kerneldensity_unittest.cc | 15 +- tests/unit/mathematics/Math_unittest.cc | 21 +- tests/unit/mathematics/Random_unittest.cc | 358 ----------------- tests/unit/mathematics/ajd/FFDiag_unittest.cc | 5 +- .../mathematics/ajd/JADiagOrth_unittest.cc | 5 +- tests/unit/mathematics/ajd/JADiag_unittest.cc | 6 +- .../unit/mathematics/ajd/JediDiag_unittest.cc | 5 +- tests/unit/mathematics/ajd/QDiag_unittest.cc | 5 +- tests/unit/mathematics/ajd/UWedge_unittest.cc | 5 +- .../ConjugateOrthogonalCGSolver_unittest.cc | 9 +- .../DirectSparseLinearSolver_unittest.cc | 8 +- .../linalg/LanczosEigenSolver_unittest.cc | 14 +- .../linalg/LogDetEstimator_unittest.cc | 28 +- .../linalg/NormalSampler_unittest.cc | 1 + .../linalg/ProbingSampler_unittest.cc | 7 +- .../multiclass/BaggingMachine_unittest.cc | 4 +- tests/unit/multiclass/LaRank_unittest.cc | 7 +- .../MulticlassLibLinear_unittest.cc | 7 +- .../multiclass/tree/RandomCARTree_unittest.cc | 2 +- .../multiclass/tree/RandomForest_unittest.cc | 4 +- tests/unit/neuralnets/Autoencoder_unittest.cc | 5 +- .../ConvolutionalFeatureMap_unittest.cc | 46 ++- .../neuralnets/DeepAutoencoder_unittest.cc | 10 +- .../neuralnets/DeepBeliefNetwork_unittest.cc | 6 +- .../neuralnets/NeuralInputLayer_unittest.cc | 6 +- ...euralLeakyRectifiedLinearLayer_unittest.cc | 6 +- .../neuralnets/NeuralLinearLayer_unittest.cc | 61 +-- .../NeuralLogisticLayer_unittest.cc | 21 +- .../unit/neuralnets/NeuralNetwork_unittest.cc | 2 +- .../NeuralRectifiedLinearLayer_unittest.cc | 19 +- .../neuralnets/NeuralSoftmaxLayer_unittest.cc | 26 +- tests/unit/neuralnets/RBM_unittest.cc | 5 +- .../preprocessor/Preprocessor_unittest.cc | 14 +- tests/unit/regression/krrnystrom_unittest.cc | 10 +- tests/unit/regression/lars_unittest.cc | 10 +- .../KernelSelection_unittest.cc | 6 +- .../QuadraticTimeMMD_unittest.cc | 12 +- .../internals/Kernel_unittest.cc | 5 +- .../internals/PermutationMMD_unittest.cc | 21 +- .../HierarchicalMultilabelModel_unittest.cc | 26 +- .../structure/MultilabelCLRModel_unittest.cc | 18 +- .../structure/PrimalMosekSOSVM_unittest.cc | 10 +- 189 files changed, 1149 insertions(+), 1910 deletions(-) delete mode 100644 src/shogun/mathematics/Random.cpp delete mode 100644 src/shogun/mathematics/Random.h delete mode 100644 tests/unit/mathematics/Random_unittest.cc diff --git a/benchmarks/hasheddoc_benchmarks.cpp b/benchmarks/hasheddoc_benchmarks.cpp index 06101765884..54ba7933c0d 100644 --- a/benchmarks/hasheddoc_benchmarks.cpp +++ b/benchmarks/hasheddoc_benchmarks.cpp @@ -26,13 +26,14 @@ int main(int argv, char** argc) int32_t num_strings = 5000; int32_t max_str_length = 10000; SGStringList string_list(num_strings, max_str_length); - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::uniform_int_distribution dist('A', 'Z'); SG_SPRINT("Creating features...\n"); for (index_t i=0; i(max_str_length); for (index_t j=0; jrandom('A', 'Z'); + string_list.strings[i].string[j] = (char)dist(prng); } SG_SPRINT("Features were created.\n"); diff --git a/benchmarks/rf_feats_benchmark.cpp b/benchmarks/rf_feats_benchmark.cpp index 9daf777223d..0947f1d99e1 100644 --- a/benchmarks/rf_feats_benchmark.cpp +++ b/benchmarks/rf_feats_benchmark.cpp @@ -16,7 +16,8 @@ int main(int argv, char** argc) int32_t dims[] = {100, 300, 600}; CTime* timer = new CTime(); - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::uniform_int_distribution dist(0, 1); for (index_t d=0; d<3; d++) { int32_t num_dim = dims[d]; @@ -28,7 +29,7 @@ int main(int argv, char** argc) { for (index_t j=0; jrandom(0, 1) + 0.5; + mat(j, i) = dist(prng) + 0.5; } } diff --git a/benchmarks/rf_feats_kernel_comp.cpp b/benchmarks/rf_feats_kernel_comp.cpp index 52bc49cf336..5126c5a41cd 100644 --- a/benchmarks/rf_feats_kernel_comp.cpp +++ b/benchmarks/rf_feats_kernel_comp.cpp @@ -29,7 +29,8 @@ int main(int argv, char** argc) float64_t lin_C = 0.1; float64_t non_lin_C = 0.1; CPRCEvaluation* evaluator = new CPRCEvaluation(); - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::uniform_int_distribution dist(0, 1); CSqrtDiagKernelNormalizer* normalizer = new CSqrtDiagKernelNormalizer(true); SG_REF(normalizer); for (index_t d=0; d<4; d++) @@ -49,12 +50,12 @@ int main(int argv, char** argc) if ((i+j)%2==0) { labs[i] = -1; - mat(j, i) = m_rng->random(0, 1) + 0.5; + mat(j, i) = dist(prng) + 0.5; } else { labs[i] = 1; - mat(j, i) = m_rng->random(0, 1) - 0.5; + mat(j, i) = dist(prng) - 0.5; } } } diff --git a/examples/undocumented/libshogun/classifier_larank.cpp b/examples/undocumented/libshogun/classifier_larank.cpp index 913eb41dfa6..fa900d48c92 100644 --- a/examples/undocumented/libshogun/classifier_larank.cpp +++ b/examples/undocumented/libshogun/classifier_larank.cpp @@ -27,14 +27,15 @@ void test() SGMatrix matrix_test(num_class, num_vec); CMulticlassLabels* labels=new CMulticlassLabels(num_vec); CMulticlassLabels* labels_test=new CMulticlassLabels(num_vec); - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::normal_distribution dist(0, 1); for (index_t i=0; istd_normal_distrib(); - matrix_test(j, i) = m_rng->std_normal_distrib(); + matrix(j, i) = dist(prng); + matrix_test(j, i) = dist(prng); labels->set_label(i, label); labels_test->set_label(i, label); } diff --git a/examples/undocumented/libshogun/classifier_latent_svm.cpp b/examples/undocumented/libshogun/classifier_latent_svm.cpp index 38e3039fa6a..63407e80d75 100644 --- a/examples/undocumented/libshogun/classifier_latent_svm.cpp +++ b/examples/undocumented/libshogun/classifier_latent_svm.cpp @@ -110,7 +110,7 @@ static void read_dataset(char* fname, CLatentFeatures*& feats, CLatentLabels*& l SG_REF(labels); CBinaryLabels* ys = new CBinaryLabels(num_examples); - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); feats = new CLatentFeatures(num_examples); SG_REF(feats); @@ -144,10 +144,11 @@ static void read_dataset(char* fname, CLatentFeatures*& feats, CLatentLabels*& l while ((*pchar)!='\n') pchar++; *pchar = '\0'; height = atoi(last_pchar); - + std::uniform_int_distribution dist_w(0, width - 1); + std::uniform_int_distribution dist_h(0, height - 1); /* create latent label */ - int x = m_rng->random(0, width - 1); - int y = m_rng->random(0, height - 1); + int x = dist_w(prng); + int y = dist_h(prng); CBoundingBox* bb = new CBoundingBox(x,y); labels->add_latent_label(bb); diff --git a/examples/undocumented/libshogun/classifier_libsvm_probabilities.cpp b/examples/undocumented/libshogun/classifier_libsvm_probabilities.cpp index ab96153d89f..4ab6f2a6c7e 100644 --- a/examples/undocumented/libshogun/classifier_libsvm_probabilities.cpp +++ b/examples/undocumented/libshogun/classifier_libsvm_probabilities.cpp @@ -10,7 +10,8 @@ using namespace shogun; //generates data points (of different classes) randomly void gen_rand_data(SGMatrix features, SGVector labels, float64_t distance) { - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::uniform_real_distribution dist(0, 1.0); index_t num_samples = labels.vlen; index_t dimensions = features.num_rows; for (int32_t i = 0; i < num_samples; i++) @@ -19,13 +20,13 @@ void gen_rand_data(SGMatrix features, SGVector labels, flo { labels[i] = -1.0; for (int32_t j = 0; j < dimensions; j++) - features(j, i) = m_rng->random(0.0, 1.0) + distance; + features(j, i) = dist(prng) + distance; } else { labels[i] = 1.0; for (int32_t j = 0; j < dimensions; j++) - features(j, i) = m_rng->random(0.0, 1.0) - distance; + features(j, i) = dist(prng) - distance; } } labels.display_vector("labels"); diff --git a/examples/undocumented/libshogun/classifier_mkl_svmlight_modelselection_bug.cpp b/examples/undocumented/libshogun/classifier_mkl_svmlight_modelselection_bug.cpp index 25ce3c9d0e6..30c35c6782d 100644 --- a/examples/undocumented/libshogun/classifier_mkl_svmlight_modelselection_bug.cpp +++ b/examples/undocumented/libshogun/classifier_mkl_svmlight_modelselection_bug.cpp @@ -68,9 +68,10 @@ void test() /* create some data and labels */ SGMatrix matrix(dim_vectors, num_vectors); CBinaryLabels* labels=new CBinaryLabels(num_vectors); - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::normal_distribution dist(0, 1); for (int32_t i=0; istd_normal_distrib(); + matrix.matrix[i] = dist(prng); /* create num_feautres 2-dimensional vectors */ CDenseFeatures* features=new CDenseFeatures(); diff --git a/examples/undocumented/libshogun/classifier_svmlight_string_features_precomputed_kernel.cpp b/examples/undocumented/libshogun/classifier_svmlight_string_features_precomputed_kernel.cpp index b081caebc76..c9819396b23 100644 --- a/examples/undocumented/libshogun/classifier_svmlight_string_features_precomputed_kernel.cpp +++ b/examples/undocumented/libshogun/classifier_svmlight_string_features_precomputed_kernel.cpp @@ -30,24 +30,26 @@ void test_svmlight() float64_t p_x=0.5; // probability for class A float64_t mostly_prob=0.8; CDenseLabels* labels=new CBinaryLabels(num_train+num_test); - auto m_rng = std::unique_ptr(new CRandom(17)); + auto prng = get_prng(); + std::uniform_real_distribution dist_real(0.0, 1.0); + std::uniform_int_distribution dist_int(1, max_length); SGStringList data(num_train+num_test, max_length); for (index_t i=0; irandom(1, max_length); + index_t length = dist_int(prng); /* allocate string */ data.strings[i]=SGString(length); /* fill with elements and set label */ - if (p_x < m_rng->random(0.0, 1.0)) + if (p_x < dist_real(prng)) { labels->set_label(i, 1); for (index_t j=0; jrandom(0.0, 1.0) ? '0' : '1'; + char c = mostly_prob < dist_real(prng) ? '0' : '1'; data.strings[i].string[j]=c; } } @@ -56,7 +58,7 @@ void test_svmlight() labels->set_label(i, -1); for (index_t j=0; jrandom(0.0, 1.0) ? '1' : '0'; + char c = mostly_prob < dist_real(prng) ? '1' : '0'; data.strings[i].string[j]=c; } } diff --git a/examples/undocumented/libshogun/clustering_kmeans.cpp b/examples/undocumented/libshogun/clustering_kmeans.cpp index 4cfe9f68deb..77f9a389199 100644 --- a/examples/undocumented/libshogun/clustering_kmeans.cpp +++ b/examples/undocumented/libshogun/clustering_kmeans.cpp @@ -39,7 +39,7 @@ int main(int argc, char **argv) int32_t dim_features=3; int32_t num_vectors_per_cluster=5; float64_t cluster_std_dev=2.0; - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); /* build random cluster centers */ SGMatrix cluster_centers(dim_features, num_clusters); @@ -60,7 +60,9 @@ int main(int argc, char **argv) idx+=j; idx+=k*dim_features; float64_t entry=cluster_centers.matrix[i*dim_features+j]; - data.matrix[idx] = m_rng->normal_random(entry, cluster_std_dev); + std::normal_distribution dist( + entry, cluster_std_dev); + data.matrix[idx] = dist(prng); } } } diff --git a/examples/undocumented/libshogun/converter_jade_bss.cpp b/examples/undocumented/libshogun/converter_jade_bss.cpp index dd3f7fa9ca3..b6161eaecd8 100644 --- a/examples/undocumented/libshogun/converter_jade_bss.cpp +++ b/examples/undocumented/libshogun/converter_jade_bss.cpp @@ -32,7 +32,8 @@ using namespace Eigen; void test() { // Generate sample data - auto m_rng = std::unique_ptr(new CRandom(0)); + auto prng = get_prng(); + std::normal_distribution dist(0, 1); int n_samples = 2000; VectorXd time(n_samples, true); time.setLinSpaced(n_samples,0,10); @@ -43,11 +44,11 @@ void test() { // Sin wave S(0,i) = sin(2*time[i]); - S(0, i) += 0.2 * m_rng->std_normal_distrib(); + S(0, i) += 0.2 * dist(prng); // Square wave S(1,i) = sin(3*time[i]) < 0 ? -1 : 1; - S(1, i) += 0.2 * m_rng->std_normal_distrib(); + S(1, i) += 0.2 * dist(prng); } // Standardize data diff --git a/examples/undocumented/libshogun/evaluation_cross_validation_classification.cpp b/examples/undocumented/libshogun/evaluation_cross_validation_classification.cpp index 8f5b61b4a79..c7f18bde4f9 100644 --- a/examples/undocumented/libshogun/evaluation_cross_validation_classification.cpp +++ b/examples/undocumented/libshogun/evaluation_cross_validation_classification.cpp @@ -29,7 +29,7 @@ void test_cross_validation() /* data matrix dimensions */ index_t num_vectors=40; index_t num_features=5; - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); /* data means -1, 1 in all components, std deviation of 3 */ SGVector mean_1(num_features); SGVector mean_2(num_features); @@ -47,8 +47,8 @@ void test_cross_validation() for (index_t j=0; jnormal_random(mean, sigma); + std::normal_distribution dist(mean, sigma); + train_dat.matrix[i * num_features + j] = dist(prng); } } diff --git a/examples/undocumented/libshogun/evaluation_cross_validation_locked_comparison.cpp b/examples/undocumented/libshogun/evaluation_cross_validation_locked_comparison.cpp index b8c381c522c..f8ddeca44ba 100644 --- a/examples/undocumented/libshogun/evaluation_cross_validation_locked_comparison.cpp +++ b/examples/undocumented/libshogun/evaluation_cross_validation_locked_comparison.cpp @@ -37,7 +37,8 @@ void test_cross_validation() SGVector::fill_vector(mean_1.vector, mean_1.vlen, -1.0); SGVector::fill_vector(mean_2.vector, mean_2.vlen, 1.0); float64_t sigma=1.5; - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::normal_distribution dist(0, 1); /* fill data matrix around mean */ SGMatrix train_dat(num_features, num_vectors); @@ -46,8 +47,7 @@ void test_cross_validation() for (index_t j=0; jnormal_random(mean, sigma); + train_dat.matrix[i * num_features + j] = dist(prng); } } diff --git a/examples/undocumented/libshogun/evaluation_cross_validation_mkl_weight_storage.cpp b/examples/undocumented/libshogun/evaluation_cross_validation_mkl_weight_storage.cpp index 4e3029b7242..26b21e0187b 100644 --- a/examples/undocumented/libshogun/evaluation_cross_validation_mkl_weight_storage.cpp +++ b/examples/undocumented/libshogun/evaluation_cross_validation_mkl_weight_storage.cpp @@ -28,7 +28,8 @@ void gen_rand_data(SGVector lab, SGMatrix feat, { index_t dims=feat.num_rows; index_t num=lab.vlen; - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::uniform_real_distribution uniform_dist(0.0, 1.0); for (int32_t i=0; i lab, SGMatrix feat, lab[i]=-1.0; for (int32_t j=0; jrandom(0.0, 1.0) + dist; + feat(j, i) = uniform_dist(prng) + dist; } else { lab[i]=1.0; for (int32_t j=0; jrandom(0.0, 1.0) - dist; + feat(j, i) = uniform_dist(prng) - dist; } } lab.display_vector("lab"); diff --git a/examples/undocumented/libshogun/evaluation_cross_validation_regression.cpp b/examples/undocumented/libshogun/evaluation_cross_validation_regression.cpp index 287bf36d6db..1c1ba31403c 100644 --- a/examples/undocumented/libshogun/evaluation_cross_validation_regression.cpp +++ b/examples/undocumented/libshogun/evaluation_cross_validation_regression.cpp @@ -33,7 +33,8 @@ void test_cross_validation() /* training label data */ SGVector lab(num_vectors); - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::uniform_real_distribution dist(0.0, 1.0); /* fill data matrix and labels */ SGMatrix train_dat(num_features, num_vectors); @@ -41,7 +42,7 @@ void test_cross_validation() for (index_t i=0; inormal_random(0, 1.0); + lab.vector[i] = i + dist(prng); } /* training features */ diff --git a/examples/undocumented/libshogun/features_subset_labels.cpp b/examples/undocumented/libshogun/features_subset_labels.cpp index cf2b29a9992..42428c11a0e 100644 --- a/examples/undocumented/libshogun/features_subset_labels.cpp +++ b/examples/undocumented/libshogun/features_subset_labels.cpp @@ -24,8 +24,9 @@ const int32_t num_classes=3; void test() { - auto m_rng = std::unique_ptr(new CRandom()); - const int32_t num_subset_idx = m_rng->random(1, num_labels); + auto prng = get_prng(); + std::uniform_int_distribution dist(1, num_labels); + const int32_t num_subset_idx = dist(prng); /* create labels */ CMulticlassLabels* labels=new CMulticlassLabels(num_labels); diff --git a/examples/undocumented/libshogun/features_subset_simple_features.cpp b/examples/undocumented/libshogun/features_subset_simple_features.cpp index 459a953af3b..c9d7ad1cd40 100644 --- a/examples/undocumented/libshogun/features_subset_simple_features.cpp +++ b/examples/undocumented/libshogun/features_subset_simple_features.cpp @@ -49,17 +49,19 @@ const int32_t dim_features=6; void test() { - auto m_rng = std::unique_ptr(new CRandom()); - const int32_t num_subset_idx = m_rng->random(1, num_vectors); + auto prng = get_prng(); + std::uniform_int_distribution dist(1, num_vectors); + const int32_t num_subset_idx = dist(prng); /* create feature data matrix */ SGMatrix data(dim_features, num_vectors); /* fill matrix with random data */ + std::uniform_int_distribution dist_s(-5, 5); for (index_t i=0; irandom(-5, 5); + data.matrix[i * dim_features + j] = dist_s(prng); } /* create simple features */ diff --git a/examples/undocumented/libshogun/hashed_features_example.cpp b/examples/undocumented/libshogun/hashed_features_example.cpp index 930c27e931d..237585bc4ca 100644 --- a/examples/undocumented/libshogun/hashed_features_example.cpp +++ b/examples/undocumented/libshogun/hashed_features_example.cpp @@ -12,12 +12,13 @@ int main() int32_t num_vectors = 5; int32_t dim = 20; - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::uniform_int_distribution dist(-dim, dim); SGMatrix mat(dim, num_vectors); for (index_t v=0; vrandom(-dim, dim); + mat(d, v) = dist(prng); } int32_t hashing_dim = 12; diff --git a/examples/undocumented/libshogun/kernel_custom.cpp b/examples/undocumented/libshogun/kernel_custom.cpp index 2a96e0685e3..14c001b374b 100644 --- a/examples/undocumented/libshogun/kernel_custom.cpp +++ b/examples/undocumented/libshogun/kernel_custom.cpp @@ -33,11 +33,11 @@ void test_custom_kernel_subsets() /* create a random permutation */ SGVector subset(m); - auto prng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); for (index_t run=0; run<100; ++run) { subset.range_fill(); - CMath::permute(subset, prng.get()); + CMath::permute(subset, prng); // subset.display_vector("permutation"); features->add_subset(subset); k->init(features, features); diff --git a/examples/undocumented/libshogun/kernel_custom_kernel.cpp b/examples/undocumented/libshogun/kernel_custom_kernel.cpp index c2d36c2ace2..b8800dc8f2d 100644 --- a/examples/undocumented/libshogun/kernel_custom_kernel.cpp +++ b/examples/undocumented/libshogun/kernel_custom_kernel.cpp @@ -31,11 +31,11 @@ void test_custom_kernel_subsets() /* create a random permutation */ SGVector subset(m); - auto prng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); for (index_t run=0; run<100; ++run) { subset.range_fill(); - CMath::permute(subset, prng.get()); + CMath::permute(subset, prng); // subset.display_vector("permutation"); features->add_subset(subset); k->init(features, features); diff --git a/examples/undocumented/libshogun/kernel_machine_train_locked.cpp b/examples/undocumented/libshogun/kernel_machine_train_locked.cpp index 4c69f4a528e..29b4039307a 100644 --- a/examples/undocumented/libshogun/kernel_machine_train_locked.cpp +++ b/examples/undocumented/libshogun/kernel_machine_train_locked.cpp @@ -37,7 +37,7 @@ void test() SGVector::display_vector(mean_1.vector, mean_1.vlen, "mean 1"); SGVector::display_vector(mean_2.vector, mean_2.vlen, "mean 2"); - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); /* fill data matrix around mean */ SGMatrix train_dat(num_features, num_vectors); @@ -46,8 +46,8 @@ void test() for (index_t j=0; jnormal_random(mean, sigma); + std::normal_distribution dist(mean, sigma); + train_dat.matrix[i * num_features + j] = dist(prng); } } diff --git a/examples/undocumented/libshogun/library_serialization.cpp b/examples/undocumented/libshogun/library_serialization.cpp index d924f6920ba..61abcdf9d60 100644 --- a/examples/undocumented/libshogun/library_serialization.cpp +++ b/examples/undocumented/libshogun/library_serialization.cpp @@ -12,14 +12,15 @@ int main(int argc, char** argv) /* create feature data matrix */ SGMatrix data(3, 20); - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::uniform_int_distribution dist(1, 9); /* fill matrix with random data */ for (index_t i=0; i<20*3; ++i) { if (i%2==0) data.matrix[i]=0; else - data.matrix[i] = m_rng->random(1, 9); + data.matrix[i] = dist(prng); } /* create sparse features */ diff --git a/examples/undocumented/libshogun/modelselection_combined_kernel_sub_parameters.cpp b/examples/undocumented/libshogun/modelselection_combined_kernel_sub_parameters.cpp index cd52ecdbf6b..1a53788d49b 100644 --- a/examples/undocumented/libshogun/modelselection_combined_kernel_sub_parameters.cpp +++ b/examples/undocumented/libshogun/modelselection_combined_kernel_sub_parameters.cpp @@ -99,10 +99,11 @@ void modelselection_combined_kernel() /* create some data and labels */ SGMatrix matrix(dim_vectors, num_vectors); CBinaryLabels* labels=new CBinaryLabels(num_vectors); - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::normal_distribution dist(0, 1); for (int32_t i=0; istd_normal_distrib(); + matrix.matrix[i] = dist(prng); /* create num_feautres 2-dimensional vectors */ CDenseFeatures* features=new CDenseFeatures(matrix); diff --git a/examples/undocumented/libshogun/modelselection_grid_search_kernel.cpp b/examples/undocumented/libshogun/modelselection_grid_search_kernel.cpp index 71d7bfe0572..57ae0e651e3 100644 --- a/examples/undocumented/libshogun/modelselection_grid_search_kernel.cpp +++ b/examples/undocumented/libshogun/modelselection_grid_search_kernel.cpp @@ -103,10 +103,11 @@ int main(int argc, char **argv) /* create some data and labels */ SGMatrix matrix(dim_vectors, num_vectors); CBinaryLabels* labels=new CBinaryLabels(num_vectors); - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::normal_distribution dist(0, 1); for (int32_t i=0; istd_normal_distrib(); + matrix.matrix[i] = dist(prng); /* create num_feautres 2-dimensional vectors */ CDenseFeatures* features=new CDenseFeatures(matrix); diff --git a/examples/undocumented/libshogun/modelselection_grid_search_krr.cpp b/examples/undocumented/libshogun/modelselection_grid_search_krr.cpp index e3b30f0a0b9..207255be5cc 100644 --- a/examples/undocumented/libshogun/modelselection_grid_search_krr.cpp +++ b/examples/undocumented/libshogun/modelselection_grid_search_krr.cpp @@ -77,7 +77,8 @@ void test_cross_validation() /* training label data */ SGVector lab(num_vectors); - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::normal_distribution dist(0, 1); /* fill data matrix and labels */ SGMatrix train_dat(num_features, num_vectors); @@ -85,7 +86,7 @@ void test_cross_validation() for (index_t i=0; inormal_random(0, 1.0); + lab.vector[i] = i + dist(prng); } /* training features */ diff --git a/examples/undocumented/libshogun/modelselection_grid_search_mkl.cpp b/examples/undocumented/libshogun/modelselection_grid_search_mkl.cpp index 414d727b737..f04801b2dcb 100644 --- a/examples/undocumented/libshogun/modelselection_grid_search_mkl.cpp +++ b/examples/undocumented/libshogun/modelselection_grid_search_mkl.cpp @@ -64,9 +64,10 @@ void test() /* create some data and labels */ SGMatrix matrix(dim_vectors, num_vectors); - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::normal_distribution dist(0, 1); for (int32_t i=0; istd_normal_distrib(); + matrix.matrix[i] = dist(prng); /* create feature object */ CDenseFeatures* features=new CDenseFeatures (); diff --git a/examples/undocumented/libshogun/modelselection_grid_search_multiclass_svm.cpp b/examples/undocumented/libshogun/modelselection_grid_search_multiclass_svm.cpp index af4922675b9..c902e49bc95 100644 --- a/examples/undocumented/libshogun/modelselection_grid_search_multiclass_svm.cpp +++ b/examples/undocumented/libshogun/modelselection_grid_search_multiclass_svm.cpp @@ -53,13 +53,14 @@ void test() /* create data: some easy multiclass data */ SGMatrix feat=SGMatrix(dim_vectors, num_vectors); SGVector lab(num_vectors); - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::normal_distribution dist(0, 1); for (index_t j=0; jstd_normal_distrib(); + feat(i, j) = dist(prng); /* make sure classes are (alomst) linearly seperable against each other */ feat(lab[j],j)+=distance; diff --git a/examples/undocumented/libshogun/modelselection_grid_search_string_kernel.cpp b/examples/undocumented/libshogun/modelselection_grid_search_string_kernel.cpp index bbb1558ad4b..e62d239c7ec 100644 --- a/examples/undocumented/libshogun/modelselection_grid_search_string_kernel.cpp +++ b/examples/undocumented/libshogun/modelselection_grid_search_string_kernel.cpp @@ -76,17 +76,20 @@ int main(int argc, char **argv) index_t num_subsets=num_strings/3; SGStringList strings(num_strings, max_string_length); - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::uniform_int_distribution dist_len( + min_string_length, max_string_length); + std::uniform_int_distribution dist_asc('A', 'Z'); for (index_t i=0; irandom(min_string_length, max_string_length); + index_t len = dist_len(prng); SGString current(len); SG_SPRINT("string %i: \"", i); /* fill with random uppercase letters (ASCII) */ for (index_t j=0; jrandom('A', 'Z'); + current.string[j] = (char)dist_asc(prng); char* string=new char[2]; string[0]=current.string[j]; diff --git a/examples/undocumented/libshogun/neuralnets_deep_belief_network.cpp b/examples/undocumented/libshogun/neuralnets_deep_belief_network.cpp index 98389862d2c..4163aa41528 100644 --- a/examples/undocumented/libshogun/neuralnets_deep_belief_network.cpp +++ b/examples/undocumented/libshogun/neuralnets_deep_belief_network.cpp @@ -45,7 +45,8 @@ int main(int, char*[]) init_shogun_with_defaults(); // initialize the random number generator with a fixed seed, for repeatability - auto m_rng = std::unique_ptr(new CRandom(10)); + auto prng = get_prng(); + std::uniform_real_distribution dist(-1.0, 1.0); // Prepare the training data const int num_features = 5; @@ -67,11 +68,14 @@ int main(int, char*[]) } for (int32_t i=0; irandom(-1.0, 1.0); + means[i] = dist(prng); for (int32_t i=0; inormal_random(means[i], 1.0); + { + std::normal_distribution dist_x(means[i], 1.0); + X(i, j) = dist_x(prng); + } CDenseFeatures* features = new CDenseFeatures(X); diff --git a/examples/undocumented/libshogun/parameter_iterate_float64.cpp b/examples/undocumented/libshogun/parameter_iterate_float64.cpp index c3e719c7ff0..139ebbd20be 100644 --- a/examples/undocumented/libshogun/parameter_iterate_float64.cpp +++ b/examples/undocumented/libshogun/parameter_iterate_float64.cpp @@ -32,9 +32,13 @@ int main(int argc, char** argv) /* create some random data */ SGMatrix matrix(n,n); - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); for(int32_t i=0; irandom((float64_t)-n, (float64_t)n); + { + std::uniform_real_distribution dist( + (float64_t)-n, (float64_t)n); + matrix.matrix[i] = dist(prng); + } SGMatrix::display_matrix(matrix.matrix, n, n); diff --git a/examples/undocumented/libshogun/parameter_iterate_sgobject.cpp b/examples/undocumented/libshogun/parameter_iterate_sgobject.cpp index b68cc1c24a3..5c1051b87ed 100644 --- a/examples/undocumented/libshogun/parameter_iterate_sgobject.cpp +++ b/examples/undocumented/libshogun/parameter_iterate_sgobject.cpp @@ -29,11 +29,15 @@ int main(int argc, char** argv) const int32_t n=7; init_shogun(&print_message); - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); /* create some random data and hand it to each kernel */ SGMatrix matrix(n,n); for (int32_t k=0; krandom((float64_t)-n, (float64_t)n); + { + std::uniform_real_distribution dist( + (float64_t)-n, (float64_t)n); + matrix.matrix[k] = dist(prng); + } SG_SPRINT("feature data:\n"); SGMatrix::display_matrix(matrix.matrix, n, n); @@ -44,8 +48,8 @@ int main(int argc, char** argv) CGaussianKernel** kernels=SG_MALLOC(CGaussianKernel*, n); for (int32_t i=0; irandom(0.0, (float64_t)n * n)); + std::uniform_real_distribution dist(0.0, (float64_t)n * n); + kernels[i] = new CGaussianKernel(10, dist(prng)); /* hand data to kernel */ kernels[i]->init(features, features); diff --git a/examples/undocumented/libshogun/preprocessor_randomfouriergauss.cpp b/examples/undocumented/libshogun/preprocessor_randomfouriergauss.cpp index 290d7e42ec2..eb1aadde517 100644 --- a/examples/undocumented/libshogun/preprocessor_randomfouriergauss.cpp +++ b/examples/undocumented/libshogun/preprocessor_randomfouriergauss.cpp @@ -32,7 +32,8 @@ void gen_rand_data(float64_t* & feat, float64_t* & lab,const int32_t num,const i { lab=SG_MALLOC(float64_t, num); feat=SG_MALLOC(float64_t, num*dims); - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::uniform_real_distribution dist(0.0, 1.0); for (int32_t i=0; irandom(0.0, 1.0) + dist; + feat[i * dims + j] = dist(prng) + dist; } else { lab[i]=1.0; for (int32_t j=0; jrandom(0.0, 1.0) - dist; + feat[i * dims + j] = dist(prng) - dist; } } CMath::display_vector(lab,num); diff --git a/examples/undocumented/libshogun/random_fourier_features.cpp b/examples/undocumented/libshogun/random_fourier_features.cpp index 250bef2ea61..deae89635bf 100644 --- a/examples/undocumented/libshogun/random_fourier_features.cpp +++ b/examples/undocumented/libshogun/random_fourier_features.cpp @@ -25,7 +25,8 @@ void load_data(int32_t num_dim, int32_t num_vecs, { SGMatrix mat(num_dim, num_vecs); SGVector labs(num_vecs); - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::uniform_int_distribution dist(0, 1); for (index_t i=0; irandom(0, 1) + 0.5; + mat(j, i) = dist(prng) + 0.5; } else { labs[i] = 1; - mat(j, i) = m_rng->random(0, 1) - 0.5; + mat(j, i) = dist(prng) - 0.5; } } } diff --git a/examples/undocumented/libshogun/regression_gaussian_process_simple_exact.cpp b/examples/undocumented/libshogun/regression_gaussian_process_simple_exact.cpp index ff86686188e..3cc584d76bf 100644 --- a/examples/undocumented/libshogun/regression_gaussian_process_simple_exact.cpp +++ b/examples/undocumented/libshogun/regression_gaussian_process_simple_exact.cpp @@ -31,10 +31,11 @@ void test() SGMatrix X(1, n); SGMatrix X_test(1, n); SGVector Y(n); - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::uniform_real_distribution dist(0.0, x_range); for (index_t i=0; irandom(0.0, x_range); + X[i] = dist(prng); X_test[i]=(float64_t)i / n*x_range; Y[i]=CMath::sin(X[i]); } diff --git a/examples/undocumented/libshogun/regression_libsvr.cpp b/examples/undocumented/libshogun/regression_libsvr.cpp index a9a829ccf52..5fbd925fcb4 100644 --- a/examples/undocumented/libshogun/regression_libsvr.cpp +++ b/examples/undocumented/libshogun/regression_libsvr.cpp @@ -25,7 +25,8 @@ void test_libsvr() /* create some easy regression data: 1d noisy sine wave */ index_t n=100; float64_t x_range=6; - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::uniform_real_distribution dist(0.0, x_range); SGMatrix feat_train(1, n); SGMatrix feat_test(1, n); SGVector lab_train(n); @@ -33,8 +34,8 @@ void test_libsvr() for (index_t i=0; irandom(0.0, x_range); - feat_test[i]=(float64_t)i/n*x_range; + feat_train[i] = dist(prng); + feat_test[i] = (float64_t)i / n * x_range; lab_train[i]=CMath::sin(feat_train[i]); lab_test[i]=CMath::sin(feat_test[i]); } diff --git a/examples/undocumented/libshogun/serialization_multiclass_labels.cpp b/examples/undocumented/libshogun/serialization_multiclass_labels.cpp index c2a40340e0a..e86602aec3f 100644 --- a/examples/undocumented/libshogun/serialization_multiclass_labels.cpp +++ b/examples/undocumented/libshogun/serialization_multiclass_labels.cpp @@ -28,11 +28,12 @@ void test() labels->allocate_confidences_for(n_class); SGVector conf(n_class); - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::normal_distribution dist(0, 1); for (index_t i=0; istd_normal_distrib(); + conf[i] = dist(prng); - for (index_t i=0; iset_multiclass_confidences(i, conf); /* create serialized copy */ diff --git a/examples/undocumented/libshogun/so_fg_model.cpp b/examples/undocumented/libshogun/so_fg_model.cpp index 5202a2c136d..36ef4dd2403 100644 --- a/examples/undocumented/libshogun/so_fg_model.cpp +++ b/examples/undocumented/libshogun/so_fg_model.cpp @@ -43,6 +43,8 @@ void test(int32_t num_samples) CFactorGraphLabels* labels = new CFactorGraphLabels(num_samples); SG_REF(labels); + auto prng = get_prng(); + std::uniform_real_distribution dist(0.0, 1.0); for (int32_t n = 0; n < num_samples; ++n) { // factor graph @@ -50,11 +52,10 @@ void test(int32_t num_samples) SGVector::fill_vector(vc.vector, vc.vlen, 2); CFactorGraph* fg = new CFactorGraph(vc); - auto m_rng = std::unique_ptr(new CRandom()); // add factors SGVector data1(2); - data1[0] = 2.0 * m_rng->random(0.0, 1.0) - 1.0; - data1[1] = 2.0 * m_rng->random(0.0, 1.0) - 1.0; + data1[0] = 2.0 * dist(prng) - 1.0; + data1[1] = 2.0 * dist(prng) - 1.0; SGVector var_index1(2); var_index1[0] = 0; var_index1[1] = 1; @@ -62,8 +63,8 @@ void test(int32_t num_samples) fg->add_factor(fac1); SGVector data2(2); - data2[0] = 2.0 * m_rng->random(0.0, 1.0) - 1.0; - data2[1] = 2.0 * m_rng->random(0.0, 1.0) - 1.0; + data2[0] = 2.0 * dist(prng) - 1.0; + data2[1] = 2.0 * dist(prng) - 1.0; SGVector var_index2(2); var_index2[0] = 1; var_index2[1] = 2; diff --git a/examples/undocumented/libshogun/so_multiclass.cpp b/examples/undocumented/libshogun/so_multiclass.cpp index ef4b4878565..561e7579031 100644 --- a/examples/undocumented/libshogun/so_multiclass.cpp +++ b/examples/undocumented/libshogun/so_multiclass.cpp @@ -34,7 +34,9 @@ void gen_rand_data(SGVector< float64_t > labs, SGMatrix< float64_t > feats) { float64_t means[DIMS]; float64_t stds[DIMS]; - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::uniform_int_distribution dist_m(-100, 100); + std::uniform_int_distribution dist_s(1, 5); FILE* pfile = fopen(FNAME, "w"); @@ -42,8 +44,8 @@ void gen_rand_data(SGVector< float64_t > labs, SGMatrix< float64_t > feats) { for ( int32_t j = 0 ; j < DIMS ; ++j ) { - means[j] = m_rng->random(-100, 100); - stds[j] = m_rng->random(1, 5); + means[j] = dist_m(prng); + stds[j] = dist_s(prng); } for ( int32_t i = 0 ; i < NUM_SAMPLES ; ++i ) @@ -54,8 +56,8 @@ void gen_rand_data(SGVector< float64_t > labs, SGMatrix< float64_t > feats) for ( int32_t j = 0 ; j < DIMS ; ++j ) { - feats[(c * NUM_SAMPLES + i) * DIMS + j] = - m_rng->normal_random(means[j], stds[j]); + std::normal_distribution dist(means[j], stds[j]); + feats[(c * NUM_SAMPLES + i) * DIMS + j] = dist(prng); fprintf(pfile, " %f", feats[(c*NUM_SAMPLES+i)*DIMS + j]); } diff --git a/examples/undocumented/libshogun/so_multiclass_BMRM.cpp b/examples/undocumented/libshogun/so_multiclass_BMRM.cpp index 732ce56a47a..3937360645c 100644 --- a/examples/undocumented/libshogun/so_multiclass_BMRM.cpp +++ b/examples/undocumented/libshogun/so_multiclass_BMRM.cpp @@ -90,14 +90,16 @@ void gen_rand_data(SGVector< float64_t > labs, SGMatrix< float64_t > feats) FILE* pfile = fopen(FNAME, "w"); - auto m_rng = std::unique_ptr(new CRandom(17)); + auto prng = get_prng(); + std::uniform_int_distribution dist_m(-100, 100); + std::uniform_int_distribution dist_s(1, 5); for ( int32_t c = 0 ; c < NUM_CLASSES ; ++c ) { for ( int32_t j = 0 ; j < DIMS ; ++j ) { - means[j] = m_rng->random(-100, 100); - stds[j] = m_rng->random(1, 5); + means[j] = dist_m(prng); + stds[j] = dist_s(prng); } for ( int32_t i = 0 ; i < NUM_SAMPLES ; ++i ) @@ -108,8 +110,8 @@ void gen_rand_data(SGVector< float64_t > labs, SGMatrix< float64_t > feats) for ( int32_t j = 0 ; j < DIMS ; ++j ) { - feats[(c * NUM_SAMPLES + i) * DIMS + j] = - m_rng->normal_random(means[j], stds[j]); + std::normal_distribution dist(means[j], stds[j]); + feats[(c * NUM_SAMPLES + i) * DIMS + j] = dist(prng); fprintf(pfile, " %d:%f", j+1, feats[(c*NUM_SAMPLES+i)*DIMS + j]); } diff --git a/examples/undocumented/libshogun/splitting_LOO_crossvalidation.cpp b/examples/undocumented/libshogun/splitting_LOO_crossvalidation.cpp index f97e5fe6a52..b2db09ff407 100644 --- a/examples/undocumented/libshogun/splitting_LOO_crossvalidation.cpp +++ b/examples/undocumented/libshogun/splitting_LOO_crossvalidation.cpp @@ -19,19 +19,21 @@ int main(int argc, char **argv) index_t num_labels; index_t runs=10; - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::uniform_real_distribution dist_sl(-10.0, 10.0); + std::uniform_int_distribution dist_l(10, 50); while (runs-->0) { - num_labels = m_rng->random(10, 50); + num_labels = dist_l(prng) - //SG_SPRINT("num_labels=%d\n\n", num_labels); + // SG_SPRINT("num_labels=%d\n\n", num_labels); - /* build labels */ - CRegressionLabels* labels=new CRegressionLabels(num_labels); + /* build labels */ + CRegressionLabels* labels = new CRegressionLabels(num_labels); for (index_t i=0; iset_label(i, m_rng->random(-10.0, 10.0)); + labels->set_label(i, dist_sl(prng)); // SG_SPRINT("label(%d)=%.18g\n", i, labels->get_label(i)); } diff --git a/examples/undocumented/libshogun/splitting_standard_crossvalidation.cpp b/examples/undocumented/libshogun/splitting_standard_crossvalidation.cpp index 7d0b54a56fb..c17318638da 100644 --- a/examples/undocumented/libshogun/splitting_standard_crossvalidation.cpp +++ b/examples/undocumented/libshogun/splitting_standard_crossvalidation.cpp @@ -26,12 +26,15 @@ int main(int argc, char **argv) index_t num_labels; index_t num_subsets; index_t runs=100; - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::uniform_real_distribution dist_l(-10.0, 10.0); + std::uniform_int_distribution dist_nl(10, 150); + std::uniform_int_distribution dist_ns(1, 5); while (runs-->0) { - num_labels = m_rng->random(10, 150); - num_subsets = m_rng->random(1, 5); + num_labels = dist_nl(prng); + num_subsets = dist_ns(prng); index_t desired_size=CMath::round( (float64_t)num_labels/(float64_t)num_subsets); @@ -45,7 +48,7 @@ int main(int argc, char **argv) CRegressionLabels* labels=new CRegressionLabels(num_labels); for (index_t i=0; iset_label(i, m_rng->random(-10.0, 10.0)); + labels->set_label(i, dist_l(prng)); SG_SPRINT("label(%d)=%.18g\n", i, labels->get_label(i)); } SG_SPRINT("\n"); diff --git a/examples/undocumented/libshogun/splitting_stratified_crossvalidation.cpp b/examples/undocumented/libshogun/splitting_stratified_crossvalidation.cpp index b2dc651d968..7c4e1718b33 100644 --- a/examples/undocumented/libshogun/splitting_stratified_crossvalidation.cpp +++ b/examples/undocumented/libshogun/splitting_stratified_crossvalidation.cpp @@ -25,13 +25,16 @@ int main(int argc, char **argv) index_t num_labels, num_classes, num_subsets; index_t runs=50; - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::uniform_int_distribution dist_nl(5, 100); + std::uniform_int_distribution dist_nc(2, 10); + std::uniform_int_distribution dist_ns(1, 10); while (runs-->0) { - num_labels = m_rng->random(5, 100); - num_classes = m_rng->random(2, 10); - num_subsets = m_rng->random(1, 10); + num_labels = dist_nl(prng); + num_classes = dist_nc(prng); + num_subsets = dist_ns(prng); /* this will throw an error */ if (num_labelsset_label(i, m_rng->random_64() % num_classes); + labels->set_label(i, prng() % num_classes); SG_SPRINT("label(%d)=%.18g\n", i, labels->get_label(i)); } SG_SPRINT("\n"); diff --git a/examples/undocumented/libshogun/streaming_from_dense.cpp b/examples/undocumented/libshogun/streaming_from_dense.cpp index 51d3aeb10a4..141ae4be811 100644 --- a/examples/undocumented/libshogun/streaming_from_dense.cpp +++ b/examples/undocumented/libshogun/streaming_from_dense.cpp @@ -32,13 +32,14 @@ using namespace shogun; void gen_rand_data(SGMatrix feat, SGVector lab) { - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::uniform_real_distribution dist(0.0, 1.0); for (int32_t i=0; irandom(0.0, 1.0) + DIST; + feat[i * DIMS + j] = dist(prng) + DIST; if (lab.vector) lab[i]=0; @@ -46,7 +47,7 @@ void gen_rand_data(SGMatrix feat, SGVector lab) else { for (int32_t j=0; jrandom(0.0, 1.0) - DIST; + feat[i * DIMS + j] = dist(prng) - DIST; if (lab.vector) lab[i]=1; diff --git a/src/gpl/shogun/classifier/svm/QPBSVMLib.cpp b/src/gpl/shogun/classifier/svm/QPBSVMLib.cpp index cca1ea747fe..481f177713c 100644 --- a/src/gpl/shogun/classifier/svm/QPBSVMLib.cpp +++ b/src/gpl/shogun/classifier/svm/QPBSVMLib.cpp @@ -53,7 +53,6 @@ #include #include -#include #include #include @@ -592,8 +591,10 @@ int32_t CQPBSVMLib::qpbsvm_gauss_seidel(float64_t *x, float64_t **ptr_History, int32_t verb) { + auto prng = get_prng(); + std::uniform_real_distribution dist(0.0, 1.0); for (int32_t i=0; irandom(0.0, 1.0); + x[i] = dist(prng); for (int32_t t=0; t<200; t++) { @@ -623,8 +624,10 @@ int32_t CQPBSVMLib::qpbsvm_gradient_descent(float64_t *x, float64_t **ptr_History, int32_t verb) { + auto prng = get_prng(); + std::uniform_real_distribution dist(0.0, 1.0); for (int32_t i=0; irandom(0.0, 1.0); + x[i] = dist(prng); for (int32_t t=0; t<2000; t++) { diff --git a/src/gpl/shogun/classifier/svm/WDSVMOcas.cpp b/src/gpl/shogun/classifier/svm/WDSVMOcas.cpp index 2f1baffb771..227c007c060 100644 --- a/src/gpl/shogun/classifier/svm/WDSVMOcas.cpp +++ b/src/gpl/shogun/classifier/svm/WDSVMOcas.cpp @@ -21,7 +21,6 @@ #include #include #include -#include using namespace shogun; diff --git a/src/interfaces/swig/Mathematics.i b/src/interfaces/swig/Mathematics.i index f6df4cc18ce..66c584c599c 100644 --- a/src/interfaces/swig/Mathematics.i +++ b/src/interfaces/swig/Mathematics.i @@ -40,28 +40,23 @@ namespace shogun #ifdef USE_INT32 %rename(dot_int32) CMath::dot(int32_t const *,int32_t const *,int32_t); %rename(pow_int32) CMath::pow(int32_t,int32_t); -%rename(random_int32) CMath::random(int32_t,int32_t); #endif #ifdef USE_UINT32 %rename(dot_uint32) CMath::dot(uint32_t const *,uint32_t const *,int32_t); -%rename(random_uint32) CMath::random(uint32_t,uint32_t); #endif #ifdef USE_INT64 %rename(dot_int64) CMath::dot(int64_t const *,int64_t const *,int32_t); -%rename(random_int64) CMath::random(int64_t,int64_t); #endif #ifdef USE_UINT64 %rename(dot_uint64) CMath::dot(uint64_t const *,uint64_t const *,int32_t); -%rename(random_uint64) CMath::random(uint64_t,uint64_t); #endif #ifdef USE_FLOAT32 %rename(dot_float32) CMath::dot(float32_t const *,float32_t const *,int32_t); %rename(normal_random_float32) CMath::normal_random(float32_t,float32_t); -%rename(random_float32) CMath::random(float32_t,float32_t); %rename(sqrt_float32) CMath::sqrt(float32_t); #endif @@ -70,7 +65,6 @@ namespace shogun %rename(normal_random_float64) CMath::normal_random(float64_t,float64_t); %rename(pow_float64_int32) CMath::pow(float64_t,int32_t); %rename(pow_float64_float64) CMath::pow(float64_t,float64_t); -%rename(random_float64) CMath::random(float64_t,float64_t); %rename(sqrt_float64) CMath::sqrt(float64_t); } #endif diff --git a/src/shogun/base/DynArray.h b/src/shogun/base/DynArray.h index bb5a941604c..1ee70f9cfb4 100644 --- a/src/shogun/base/DynArray.h +++ b/src/shogun/base/DynArray.h @@ -448,18 +448,25 @@ template class DynArray /** randomizes the array (not thread safe!) */ void shuffle() { - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); for (index_t i=0; i<=current_num_elements-1; ++i) - CMath::swap( - array[i], - array[m_rng->random(i, current_num_elements - 1)]); + { + std::uniform_int_distribution dist( + i, current_num_elements - 1); + CMath::swap(array[i], array[dist(prng)]); + } } /** randomizes the array with external random state */ - void shuffle(CRandom * rand) + template + void shuffle(RandomGenerator& prng) { for (index_t i=0; i<=current_num_elements-1; ++i) - CMath::swap(array[i], array[rand->random(i, current_num_elements-1)]); + { + std::uniform_int_distribution dist( + i, current_num_elements - 1); + CMath::swap(array[i], array[dist(prng)]); + } } /** set array with a constant */ diff --git a/src/shogun/base/SGObject.cpp b/src/shogun/base/SGObject.cpp index f5d10e3224f..2dec53ac746 100644 --- a/src/shogun/base/SGObject.cpp +++ b/src/shogun/base/SGObject.cpp @@ -494,7 +494,6 @@ void CSGObject::init() m_parameters = new Parameter(); m_model_selection_parameters = new Parameter(); m_gradient_parameters=new Parameter(); - m_rng = std::unique_ptr(new CRandom()); m_generic = PT_NOT_GENERIC; m_load_pre_called = false; m_load_post_called = false; diff --git a/src/shogun/base/SGObject.h b/src/shogun/base/SGObject.h index 9143118f994..5e59613756e 100644 --- a/src/shogun/base/SGObject.h +++ b/src/shogun/base/SGObject.h @@ -13,15 +13,16 @@ #ifndef __SGOBJECT_H__ #define __SGOBJECT_H__ -#include -#include -#include -#include +#include #include #include #include -#include +#include +#include #include +#include +#include +#include #include @@ -35,7 +36,6 @@ class SGIO; class Parallel; class Parameter; class CSerializableFile; -class CRandom; template class CMap; @@ -570,10 +570,6 @@ class CSGObject /** Hash of parameter values*/ uint32_t m_hash; -protected: - /** random generator */ - std::unique_ptr m_rng; - private: EPrimitiveType m_generic; diff --git a/src/shogun/base/init.h b/src/shogun/base/init.h index 1b3c1087624..c048cea1721 100644 --- a/src/shogun/base/init.h +++ b/src/shogun/base/init.h @@ -23,7 +23,6 @@ namespace shogun class CMath; class Version; class Parallel; - class CRandom; class SGLinalg; extern uint32_t sg_random_seed; @@ -118,7 +117,7 @@ namespace shogun */ uint32_t generate_seed(); - template + template T get_prng() { return T(sg_random_seed); diff --git a/src/shogun/classifier/svm/GNPPLib.cpp b/src/shogun/classifier/svm/GNPPLib.cpp index da7a8feeb36..d9810130797 100644 --- a/src/shogun/classifier/svm/GNPPLib.cpp +++ b/src/shogun/classifier/svm/GNPPLib.cpp @@ -15,7 +15,6 @@ #include #include #include -#include #include #include diff --git a/src/shogun/classifier/svm/LibLinear.cpp b/src/shogun/classifier/svm/LibLinear.cpp index e759f3beed2..3b9d2a86b19 100644 --- a/src/shogun/classifier/svm/LibLinear.cpp +++ b/src/shogun/classifier/svm/LibLinear.cpp @@ -316,6 +316,7 @@ void CLibLinear::solve_l2r_l1l2_svc( auto pb = progress(range(10)); CTime start_time; + auto prng = get_prng(); while (iter < max_iterations && !CSignal::cancel_computations()) { if (m_max_train_time > 0 && start_time.cur_time_diff() > m_max_train_time) @@ -326,7 +327,9 @@ void CLibLinear::solve_l2r_l1l2_svc( for (i=0; irandom(i, active_size - 1); + std::uniform_int_distribution uniform_int_dist( + i, active_size - 1); + int j = uniform_int_dist(prng); CMath::swap(index[i], index[j]); } @@ -526,6 +529,7 @@ void CLibLinear::solve_l1r_l2_svc( auto pb = progress(range(10)); CTime start_time; + auto prng = get_prng(); while (iter < max_iterations && !CSignal::cancel_computations()) { if (m_max_train_time > 0 && start_time.cur_time_diff() > m_max_train_time) @@ -535,7 +539,9 @@ void CLibLinear::solve_l1r_l2_svc( for(j=0; jrandom(j, active_size - 1); + std::uniform_int_distribution uniform_int_dist( + j, active_size - 1); + int i = uniform_int_dist(prng); CMath::swap(index[i], index[j]); } @@ -898,6 +904,7 @@ void CLibLinear::solve_l1r_lr( auto pb = progress(range(10)); CTime start_time; + auto prng = get_prng(); while (iter < max_iterations && !CSignal::cancel_computations()) { if (m_max_train_time > 0 && start_time.cur_time_diff() > m_max_train_time) @@ -907,7 +914,9 @@ void CLibLinear::solve_l1r_lr( for(j=0; jrandom(j, active_size - 1); + std::uniform_int_distribution uniform_int_dist( + j, active_size - 1); + int i = uniform_int_dist(prng); CMath::swap(index[i], index[j]); } @@ -1237,11 +1246,13 @@ void CLibLinear::solve_l2r_lr_dual(SGVector& w, const liblinear_probl } auto pb = progress(range(10)); + auto prng = get_prng(); while (iter < max_iter) { for (i=0; irandom(i, l - 1); + std::uniform_int_distribution uniform_int_dist(i, l - 1); + int j = uniform_int_dist(prng); CMath::swap(index[i], index[j]); } int newton_iter = 0; diff --git a/src/shogun/classifier/vw/VwRegressor.cpp b/src/shogun/classifier/vw/VwRegressor.cpp index d978cc367d9..4b52af0af7a 100644 --- a/src/shogun/classifier/vw/VwRegressor.cpp +++ b/src/shogun/classifier/vw/VwRegressor.cpp @@ -69,6 +69,8 @@ void CVwRegressor::init(CVwEnvironment* env_to_use) vw_size_t num_threads = 1; weight_vectors = SG_MALLOC(float32_t*, num_threads); + std::uniform_real_distribution uniform_real_dist(-0.5, 0.5); + auto prng = get_prng(); for (vw_size_t i = 0; i < num_threads; i++) { weight_vectors[i] = SG_CALLOC(float32_t, env->stride * length / num_threads); @@ -76,7 +78,7 @@ void CVwRegressor::init(CVwEnvironment* env_to_use) if (env->random_weights) { for (vw_size_t j = 0; j < length/num_threads; j++) - weight_vectors[i][j] = m_rng->random(-0.5, 0.5); + weight_vectors[i][j] = uniform_real_dist(prng); } if (env->initial_weight != 0.) diff --git a/src/shogun/clustering/GMM.cpp b/src/shogun/clustering/GMM.cpp index 4dd03847bf0..900735699bc 100644 --- a/src/shogun/clustering/GMM.cpp +++ b/src/shogun/clustering/GMM.cpp @@ -27,6 +27,7 @@ using namespace std; CGMM::CGMM() : CDistribution(), m_components(), m_coefficients() { + m_rng = get_prng(); register_params(); } @@ -42,7 +43,7 @@ CGMM::CGMM(int32_t n, ECovType cov_type) : CDistribution(), m_components(), m_co SG_REF(m_components[i]); m_components[i]->set_cov_type(cov_type); } - + m_rng = get_prng(); register_params(); } @@ -91,7 +92,7 @@ CGMM::CGMM(vector components, SGVector coefficients, bool m_coefficients[i]=coefficients[i]; } } - + m_rng = get_prng(); register_params(); } @@ -388,14 +389,14 @@ void CGMM::partial_em(int32_t comp1, int32_t comp2, int32_t comp3, float64_t min SGVector::add(components[1]->get_mean().vector, alpha1, components[1]->get_mean().vector, alpha2, components[2]->get_mean().vector, dim_n); + std::normal_distribution normal_dist(0, 1); + auto prng = get_prng(); for (int32_t i=0; iget_mean().vector[i] = - components[0]->get_mean().vector[i] + - m_rng->std_normal_distrib() * noise_mag; + components[0]->get_mean().vector[i] + normal_dist(prng) * noise_mag; components[0]->get_mean().vector[i] = - components[0]->get_mean().vector[i] + - m_rng->std_normal_distrib() * noise_mag; + components[0]->get_mean().vector[i] + normal_dist(prng) * noise_mag; } coefficients.vector[1]=coefficients.vector[1]+coefficients.vector[2]; @@ -770,7 +771,9 @@ SGVector CGMM::sample() { REQUIRE(m_components.size()>0, "Number of mixture components is %d but " "must be positive\n", m_components.size()); - float64_t rand_num = m_rng->random(float64_t(0), float64_t(1)); + + std::normal_distribution normal_dist(0, 1); + float64_t rand_num = normal_dist(m_rng); float64_t cum_sum=0; for (int32_t i=0; i #include +#include #include namespace shogun @@ -247,6 +248,8 @@ class CGMM : public CDistribution std::vector m_components; /** Mixture coefficients */ SGVector m_coefficients; + + std::mt19937_64 m_rng; }; } #endif //HAVE_LAPACK diff --git a/src/shogun/clustering/KMeans.cpp b/src/shogun/clustering/KMeans.cpp index f8c0c0974c6..b191d2fdd1d 100644 --- a/src/shogun/clustering/KMeans.cpp +++ b/src/shogun/clustering/KMeans.cpp @@ -13,7 +13,6 @@ #include #include #include -#include #include using namespace Eigen; diff --git a/src/shogun/clustering/KMeansBase.cpp b/src/shogun/clustering/KMeansBase.cpp index 08a4a9add0d..92c8a084178 100644 --- a/src/shogun/clustering/KMeansBase.cpp +++ b/src/shogun/clustering/KMeansBase.cpp @@ -15,7 +15,6 @@ #include #include #include -#include #include using namespace shogun; @@ -273,9 +272,11 @@ SGMatrix CKMeansBase::kmeanspp() centers.zero(); SGVector min_dist=SGVector(lhs_size); min_dist.zero(); - + auto prng = get_prng(); + std::uniform_int_distribution uniform_int_dist( + (index_t)0, lhs_size - 1); /* First center is chosen at random */ - int32_t mu = m_rng->random((int32_t)0, lhs_size - 1); + int32_t mu = uniform_int_dist(prng); SGVector mu_first=lhs->get_feature_vector(mu); for(int32_t j=0; j CKMeansBase::kmeanspp() #endif //HAVE_LINALG int32_t n_rands=2 + int32_t(CMath::log(k)); + std::uniform_real_distribution dist_prob(0.0, 1.0); /* Choose centers with weighted probability */ for(int32_t i=1; i CKMeansBase::kmeanspp() float64_t temp_dist=0.0; SGVector temp_min_dist = SGVector(lhs_size); int32_t new_center = 0; - float64_t prob = m_rng->random(0.0, 1.0); - prob=prob*sum; - + float64_t prob = dist_prob(prng); + prob = prob * sum; + for(int32_t j=0; j #include #include -#include #ifdef _WIN32 #undef far @@ -131,12 +130,14 @@ SGVector CKMeansMiniBatch::mbchoose_rand(int32_t b, int32_t num) { SGVector chosen=SGVector(num); SGVector ret=SGVector(b); - auto rng = std::unique_ptr(new CRandom()); + + std::uniform_int_distribution uniform_int_dist(0, num - 1); chosen.zero(); int32_t ch=0; + auto prng = get_prng(); while (chrandom(0, num - 1); + const int32_t n = uniform_int_dist(prng); if (chosen[n]==0) { chosen[n]+=1; diff --git a/src/shogun/converter/ica/FastICA.cpp b/src/shogun/converter/ica/FastICA.cpp index 49e48d98c96..383f7944f91 100644 --- a/src/shogun/converter/ica/FastICA.cpp +++ b/src/shogun/converter/ica/FastICA.cpp @@ -116,15 +116,16 @@ CFeatures* CFastICA::apply(CFeatures* features) WX = EX; } + std::normal_distribution normal_dist(0, 1); // Initial mixing matrix estimate if (m_mixing_matrix.num_rows != m || m_mixing_matrix.num_cols != m) { m_mixing_matrix = SGMatrix(m,m); - + auto prng = get_prng(); for (int i = 0; i < m; i++) { for (int j = 0; j < m; j++) - m_mixing_matrix(i, j) = m_rng->std_normal_distrib(); + m_mixing_matrix(i, j) = normal_dist(prng); } } diff --git a/src/shogun/distributions/Gaussian.cpp b/src/shogun/distributions/Gaussian.cpp index 6558b8ef379..8bb9e638867 100644 --- a/src/shogun/distributions/Gaussian.cpp +++ b/src/shogun/distributions/Gaussian.cpp @@ -48,6 +48,7 @@ CGaussian::CGaussian( void CGaussian::init() { m_constant=CMath::log(2*M_PI)*m_mean.vlen; + m_rng = get_prng(); switch (m_cov_type) { case FULL: @@ -410,9 +411,9 @@ SGVector CGaussian::sample() } SGVector random_vec(m_mean.vlen); - + std::normal_distribution dist(0, 1); for (int32_t i = 0; i < m_mean.vlen; i++) - random_vec.vector[i] = m_rng->std_normal_distrib(); + random_vec.vector[i] = dist(m_rng); if (m_cov_type == FULL) { diff --git a/src/shogun/distributions/Gaussian.h b/src/shogun/distributions/Gaussian.h index 202c7f31cff..b887b0f38b6 100644 --- a/src/shogun/distributions/Gaussian.h +++ b/src/shogun/distributions/Gaussian.h @@ -241,6 +241,8 @@ class CGaussian : public CDistribution SGVector m_mean; /** covariance type */ ECovType m_cov_type; + + std::mt19937_64 m_rng; }; } #endif //_GAUSSIAN_H__ diff --git a/src/shogun/distributions/HMM.cpp b/src/shogun/distributions/HMM.cpp index b7ff4a4fe12..542041f37a0 100644 --- a/src/shogun/distributions/HMM.cpp +++ b/src/shogun/distributions/HMM.cpp @@ -24,9 +24,8 @@ #define VAL_MACRO \ [&]() { \ - return log( \ - (default_value == 0) ? (m_rng->random(MIN_RAND, MAX_RAND)) \ - : default_value); \ + std::uniform_real_distribution dist(MIN_RAND, MAX_RAND); \ + return log((default_value == 0) ? (dist(prng)) : default_value); \ } #define ARRAY_SIZE 65336 @@ -2451,13 +2450,15 @@ void CHMM::init_model_random() float64_t sum; int32_t i,j; + auto prng = get_prng(); + std::uniform_real_distribution dist(MIN_RAND, 1.0); //initialize a with random values for (i=0; irandom(MIN_RAND, 1.0)); + set_a(i, j, dist(prng)); sum+=get_a(i,j); } @@ -2470,7 +2471,7 @@ void CHMM::init_model_random() sum=0; for (i=0; irandom(MIN_RAND, 1.0)); + set_p(i, dist(prng)); sum+=get_p(i); } @@ -2482,7 +2483,7 @@ void CHMM::init_model_random() sum=0; for (i=0; irandom(MIN_RAND, 1.0)); + set_q(i, dist(prng)); sum+=get_q(i); } @@ -2496,7 +2497,7 @@ void CHMM::init_model_random() sum=0; for (j=0; jrandom(MIN_RAND, 1.0)); + set_b(i, j, dist(prng)); sum+=get_b(i,j); } @@ -2534,11 +2535,12 @@ void CHMM::init_model_defined() for (j=0; j dist(MIN_RAND, 1.0); //initialize a values that have to be learned float64_t *R=SG_MALLOC(float64_t, N); for (r = 0; r < N; r++) - R[r] = m_rng->random(MIN_RAND, 1.0); + R[r] = dist(prng); i=0; sum=0; k=i; j=model->get_learn_a(i,0); while (model->get_learn_a(i,0)!=-1 || krandom(MIN_RAND, 1.0); + R[r] = dist(prng); } } SG_FREE(R); R=NULL ; @@ -2568,7 +2570,7 @@ void CHMM::init_model_defined() //initialize b values that have to be learned R=SG_MALLOC(float64_t, M); for (r = 0; r < M; r++) - R[r] = m_rng->random(MIN_RAND, 1.0); + R[r] = dist(prng); i=0; sum=0; k=0 ; j=model->get_learn_b(i,0); while (model->get_learn_b(i,0)!=-1 || krandom(MIN_RAND, 1.0); + R[r] = dist(prng); } } SG_FREE(R); R=NULL ; @@ -2637,7 +2639,7 @@ void CHMM::init_model_defined() sum=0; while (model->get_learn_p(i)!=-1) { - set_p(model->get_learn_p(i), m_rng->random(MIN_RAND, 1.0)); + set_p(model->get_learn_p(i), dist(prng)); sum+=get_p(model->get_learn_p(i)) ; i++ ; } ; @@ -2653,7 +2655,7 @@ void CHMM::init_model_defined() sum=0; while (model->get_learn_q(i)!=-1) { - set_q(model->get_learn_q(i), m_rng->random(MIN_RAND, 1.0)); + set_q(model->get_learn_q(i), dist(prng)); sum+=get_q(model->get_learn_q(i)) ; i++ ; } ; @@ -5094,7 +5096,7 @@ void CHMM::add_states(int32_t num_states, float64_t default_value) for (j=0; j CGaussianDistribution::sample(int32_t num_samples, - SGMatrix pre_samples) const +SGMatrix CGaussianDistribution::sample( + int32_t num_samples, SGMatrix pre_samples) { REQUIRE(num_samples>0, "Number of samples (%d) must be positive\n", num_samples); @@ -88,10 +88,11 @@ SGMatrix CGaussianDistribution::sample(int32_t num_samples, } else { + std::normal_distribution normal_dist(0, 1); /* allocate memory and sample from std normal */ samples=SGMatrix(m_dimension, num_samples); for (index_t i=0; istd_normal_distrib(); + samples.matrix[i] = normal_dist(m_rng); } /* map into desired Gaussian covariance */ @@ -167,6 +168,7 @@ SGVector CGaussianDistribution::log_pdf_multiple(SGMatrix void CGaussianDistribution::init() { + m_rng = get_prng(); SG_ADD(&m_mean, "mean", "Mean of the Gaussian.", MS_NOT_AVAILABLE); SG_ADD(&m_L, "L", "Lower factor of covariance matrix, " "depending on the factorization type.", MS_NOT_AVAILABLE); diff --git a/src/shogun/distributions/classical/GaussianDistribution.h b/src/shogun/distributions/classical/GaussianDistribution.h index 2370d56f9b6..5239027b10f 100644 --- a/src/shogun/distributions/classical/GaussianDistribution.h +++ b/src/shogun/distributions/classical/GaussianDistribution.h @@ -33,9 +33,9 @@ #ifndef GAUSSIANDISTRIBUTION_H #define GAUSSIANDISTRIBUTION_H +#include #include - #include #include #include @@ -86,8 +86,9 @@ class CGaussianDistribution: public CProbabilityDistribution * will be modified. * @return matrix with samples (column vectors) */ - virtual SGMatrix sample(int32_t num_samples, - SGMatrix pre_samples=SGMatrix()) const; + virtual SGMatrix sample( + int32_t num_samples, + SGMatrix pre_samples = SGMatrix()); /** Computes the log-pdf for all provided samples. That is * @@ -139,6 +140,8 @@ class CGaussianDistribution: public CProbabilityDistribution /** Lower factor of covariance matrix (depends on factorization type). * Covariance (approximation) is given by \f$\Sigma=LL^T\f$ */ SGMatrix m_L; + + std::mt19937_64 m_rng; }; } diff --git a/src/shogun/distributions/classical/ProbabilityDistribution.cpp b/src/shogun/distributions/classical/ProbabilityDistribution.cpp index 5a5e34146b5..c19fe15210c 100644 --- a/src/shogun/distributions/classical/ProbabilityDistribution.cpp +++ b/src/shogun/distributions/classical/ProbabilityDistribution.cpp @@ -36,14 +36,14 @@ CProbabilityDistribution::~CProbabilityDistribution() } -SGMatrix CProbabilityDistribution::sample(int32_t num_samples, - SGMatrix pre_samples) const +SGMatrix CProbabilityDistribution::sample( + int32_t num_samples, SGMatrix pre_samples) { SG_ERROR("Not implemented in sub-class\n"); return SGMatrix(); } -SGVector CProbabilityDistribution::sample() const +SGVector CProbabilityDistribution::sample() { SGMatrix s=sample(1); SGVector result(m_dimension); diff --git a/src/shogun/distributions/classical/ProbabilityDistribution.h b/src/shogun/distributions/classical/ProbabilityDistribution.h index 367b6097823..881d8b153d6 100644 --- a/src/shogun/distributions/classical/ProbabilityDistribution.h +++ b/src/shogun/distributions/classical/ProbabilityDistribution.h @@ -44,15 +44,16 @@ class CProbabilityDistribution: public CSGObject * CGaussianDistribution. For reproducible results. Ignored by default. * @return matrix with samples (column vectors) */ - virtual SGMatrix sample(int32_t num_samples, - SGMatrix pre_samples=SGMatrix()) const; + virtual SGMatrix sample( + int32_t num_samples, + SGMatrix pre_samples = SGMatrix()); /** Samples from the distribution once. Wrapper method. No pre-sample * passing is possible with this method. * * @return vector with single sample */ - virtual SGVector sample() const; + virtual SGVector sample(); /** Computes the log-pdf for all provided samples * diff --git a/src/shogun/evaluation/CrossValidationSplitting.cpp b/src/shogun/evaluation/CrossValidationSplitting.cpp index de2b6964b17..e8c9cea1e18 100644 --- a/src/shogun/evaluation/CrossValidationSplitting.cpp +++ b/src/shogun/evaluation/CrossValidationSplitting.cpp @@ -33,7 +33,8 @@ void CCrossValidationSplitting::build_subsets() /* permute indices */ SGVector indices(m_labels->get_num_labels()); indices.range_fill(); - CMath::permute(indices, m_rng.get()); + auto prng = get_prng(); + CMath::permute(indices, prng); index_t num_subsets=m_subset_indices->get_num_elements(); @@ -58,5 +59,5 @@ void CCrossValidationSplitting::build_subsets() /* finally shuffle to avoid that subsets with low indices have more * elements, which happens if the number of class labels is not equal to * the number of subsets (external random state important for threads) */ - m_subset_indices->shuffle(m_rng.get()); + m_subset_indices->shuffle(prng); } diff --git a/src/shogun/evaluation/StratifiedCrossValidationSplitting.cpp b/src/shogun/evaluation/StratifiedCrossValidationSplitting.cpp index 206f4829d2f..4616f6ea83d 100644 --- a/src/shogun/evaluation/StratifiedCrossValidationSplitting.cpp +++ b/src/shogun/evaluation/StratifiedCrossValidationSplitting.cpp @@ -76,7 +76,7 @@ void CStratifiedCrossValidationSplitting::build_subsets() /* ensure that subsets are empty and set flag to filled */ reset_subsets(); m_is_filled=true; - + auto prng = get_prng(); SGVector unique_labels; if (m_labels->get_label_type() == LT_MULTICLASS) @@ -122,7 +122,7 @@ void CStratifiedCrossValidationSplitting::build_subsets() label_indices.get_element(i); // external random state important for threads - current->shuffle(m_rng.get()); + current->shuffle(prng); SG_UNREF(current); } @@ -150,5 +150,5 @@ void CStratifiedCrossValidationSplitting::build_subsets() /* finally shuffle to avoid that subsets with low indices have more * elements, which happens if the number of class labels is not equal to * the number of subsets (external random state important for threads) */ - m_subset_indices->shuffle(m_rng.get()); + m_subset_indices->shuffle(prng); } diff --git a/src/shogun/features/DataGenerator.cpp b/src/shogun/features/DataGenerator.cpp index 62a93628c10..40a3fc12789 100644 --- a/src/shogun/features/DataGenerator.cpp +++ b/src/shogun/features/DataGenerator.cpp @@ -33,7 +33,8 @@ SGMatrix CDataGenerator::generate_checkboard_data(int32_t num_classes int32_t dim, int32_t num_points, float64_t overlap) { int32_t points_per_class = num_points / num_classes; - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::uniform_real_distribution uniform_real_dist(0.0, 1.0); int32_t grid_size = (int32_t ) CMath::ceil(CMath::sqrt((float64_t ) num_classes)); float64_t cell_size = (float64_t ) 1 / grid_size; @@ -55,12 +56,13 @@ SGMatrix CDataGenerator::generate_checkboard_data(int32_t num_classes { do { - points(i, p) = m_rng->normal_random( + std::normal_distribution normal_dist( class_dim_centers[i], cell_size * 0.5); + points(i, p) = normal_dist(prng); if ((points(i, p)>(grid_idx[i]+1)*cell_size) || (points(i, p)random(0.0, 1.0) < overlap)) + if (!(uniform_real_dist(prng) < overlap)) continue; } break; @@ -88,13 +90,14 @@ SGMatrix CDataGenerator::generate_mean_data(index_t m, /* evtl. allocate space */ SGMatrix result=SGMatrix::get_allocated_matrix( dim, 2*m, target); - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::normal_distribution normal_dist(0, 1); /* fill matrix with normal data */ for (index_t i=0; i<2*m; ++i) { for (index_t j=0; jstd_normal_distrib(); + result(j, i) = normal_dist(prng); /* mean shift for second half */ if (i>=m) @@ -110,7 +113,9 @@ SGMatrix CDataGenerator::generate_sym_mix_gauss(index_t m, /* evtl. allocate space */ SGMatrix result=SGMatrix::get_allocated_matrix( 2, m, target); - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::normal_distribution normal_dist(0, 1); + std::uniform_int_distribution uniform_int_dist(0, 1); /* rotation matrix */ SGMatrix rot=SGMatrix(2,2); rot(0, 0)=CMath::cos(angle); @@ -122,10 +127,8 @@ SGMatrix CDataGenerator::generate_sym_mix_gauss(index_t m, * Gaussians */ for (index_t i=0; istd_normal_distrib() + (m_rng->random(0, 1) ? d : -d); - result(1, i) = - m_rng->std_normal_distrib() + (m_rng->random(0, 1) ? d : -d); + result(0, i) = normal_dist(prng) + (uniform_int_dist(prng) ? d : -d); + result(1, i) = normal_dist(prng) + (uniform_int_dist(prng) ? d : -d); } /* rotate result */ @@ -142,6 +145,7 @@ SGMatrix CDataGenerator::generate_gaussians(index_t m, index_t n, ind SGMatrix::get_allocated_matrix(dim, n*m); float64_t grid_distance = 5.0; + auto prng = get_prng(); for (index_t i = 0; i < n; ++i) { SGVector mean(dim); diff --git a/src/shogun/features/RandomFourierDotFeatures.cpp b/src/shogun/features/RandomFourierDotFeatures.cpp index 4c047e6b6bd..1ce8b4d0fbc 100644 --- a/src/shogun/features/RandomFourierDotFeatures.cpp +++ b/src/shogun/features/RandomFourierDotFeatures.cpp @@ -84,17 +84,21 @@ float64_t CRandomFourierDotFeatures::post_dot(float64_t dot_result, index_t par_ SGVector CRandomFourierDotFeatures::generate_random_parameter_vector() { + std::uniform_real_distribution uniform_real_dist( + 0.0, 2 * CMath::PI); + std::normal_distribution normal_dist(0, 1); SGVector vec(feats->get_dim_feature_space()+1); + auto prng = get_prng(); switch (kernel) { case GAUSSIAN: for (index_t i=0; inormal_random(0.0, 1); + CMath::sqrt(2.0) * uniform_real_dist(prng); } - vec[vec.vlen - 1] = m_rng->random(0.0, 2 * CMath::PI); + vec[vec.vlen - 1] = normal_dist(prng); break; default: diff --git a/src/shogun/features/streaming/generators/GaussianBlobsDataGenerator.cpp b/src/shogun/features/streaming/generators/GaussianBlobsDataGenerator.cpp index 367fbd45443..644f3a41f32 100644 --- a/src/shogun/features/streaming/generators/GaussianBlobsDataGenerator.cpp +++ b/src/shogun/features/streaming/generators/GaussianBlobsDataGenerator.cpp @@ -73,6 +73,7 @@ void CGaussianBlobsDataGenerator::init() m_stretch=1; m_angle=0; m_cholesky=SGMatrix(2, 2); + m_rng = get_prng(); m_cholesky(0, 0)=1; m_cholesky(0, 1)=0; m_cholesky(1, 0)=0; @@ -88,13 +89,16 @@ bool CGaussianBlobsDataGenerator::get_next_example() /* allocate space */ SGVector result=SGVector(2); + std::uniform_int_distribution uniform_int_dist( + 0, m_sqrt_num_blobs - 1); + std::normal_distribution normal_dist(0, 1); /* sample latent distribution to compute offsets */ - index_t x_offset = m_rng->random(0, m_sqrt_num_blobs - 1) * m_distance; - index_t y_offset = m_rng->random(0, m_sqrt_num_blobs - 1) * m_distance; + index_t x_offset = uniform_int_dist(m_rng) * m_distance; + index_t y_offset = uniform_int_dist(m_rng) * m_distance; /* sample from std Gaussian */ - float64_t x = m_rng->std_normal_distrib(); - float64_t y = m_rng->std_normal_distrib(); + float64_t x = normal_dist(m_rng); + float64_t y = normal_dist(m_rng); /* transform through cholesky and add offset */ result[0]=m_cholesky(0, 0)*x+m_cholesky(0, 1)*y+x_offset; diff --git a/src/shogun/features/streaming/generators/GaussianBlobsDataGenerator.h b/src/shogun/features/streaming/generators/GaussianBlobsDataGenerator.h index 83f557aafc7..c505a7b41ae 100644 --- a/src/shogun/features/streaming/generators/GaussianBlobsDataGenerator.h +++ b/src/shogun/features/streaming/generators/GaussianBlobsDataGenerator.h @@ -86,6 +86,8 @@ class CGaussianBlobsDataGenerator: public CStreamingDenseFeatures /** Cholesky factor of covariance matrix of single Gaussians. Stored to * increase sampling performance */ SGMatrix m_cholesky; + + std::mt19937_64 m_rng; }; } diff --git a/src/shogun/features/streaming/generators/MeanShiftDataGenerator.cpp b/src/shogun/features/streaming/generators/MeanShiftDataGenerator.cpp index 9ff4da905a0..9cf7716ffdc 100644 --- a/src/shogun/features/streaming/generators/MeanShiftDataGenerator.cpp +++ b/src/shogun/features/streaming/generators/MeanShiftDataGenerator.cpp @@ -50,10 +50,10 @@ void CMeanShiftDataGenerator::init() SG_ADD(&m_dimension_shift, "m_dimension_shift", "Dimension of mean shift", MS_NOT_AVAILABLE); + m_rng = get_prng(); m_dimension=0; m_mean_shift=0; m_dimension_shift=0; - unset_generic(); } @@ -64,9 +64,10 @@ bool CMeanShiftDataGenerator::get_next_example() /* allocate space */ SGVector result=SGVector(m_dimension); + std::normal_distribution normal_dist(0, 1); /* fill with std normal data */ for (index_t i=0; istd_normal_distrib(); + result[i] = normal_dist(m_rng); /* mean shift in selected dimension */ result[m_dimension_shift]+=m_mean_shift; diff --git a/src/shogun/features/streaming/generators/MeanShiftDataGenerator.h b/src/shogun/features/streaming/generators/MeanShiftDataGenerator.h index 07bdb0a0e73..070fbdf2f6e 100644 --- a/src/shogun/features/streaming/generators/MeanShiftDataGenerator.h +++ b/src/shogun/features/streaming/generators/MeanShiftDataGenerator.h @@ -83,6 +83,8 @@ class CMeanShiftDataGenerator: public CStreamingDenseFeatures /** Dimension that is shifted */ index_t m_dimension_shift; + + std::mt19937_64 m_rng; }; } diff --git a/src/shogun/kernel/PyramidChi2.cpp b/src/shogun/kernel/PyramidChi2.cpp index b8fe52fe1e2..a1a84ca2e14 100644 --- a/src/shogun/kernel/PyramidChi2.cpp +++ b/src/shogun/kernel/PyramidChi2.cpp @@ -136,7 +136,7 @@ float64_t CPyramidChi2::compute(int32_t idx_a, int32_t idx_b) int32_t dims=alen/num_cells; - + auto prng = get_prng(); if(width<=0) { if(width_computation_type >0) @@ -158,10 +158,13 @@ float64_t CPyramidChi2::compute(int32_t idx_a, int32_t idx_b) if (num_randfeats_forwidthcomputation >0) { for(int32_t i=0; i< numind;++i) - featindices[i] = m_rng->random( + { + std::uniform_int_distribution dist( 0, ((CDenseFeatures*)lhs)->get_num_vectors() - 1); + featindices[i] = dist(prng); + } } else { diff --git a/src/shogun/lib/DynamicArray.h b/src/shogun/lib/DynamicArray.h index 17eec32be59..2a92cefac66 100644 --- a/src/shogun/lib/DynamicArray.h +++ b/src/shogun/lib/DynamicArray.h @@ -543,7 +543,11 @@ template class CDynamicArray :public CSGObject inline void shuffle() { m_array.shuffle(); } /** shuffles the array with external random state */ - inline void shuffle(CRandom * rand) { m_array.shuffle(rand); } + template + inline void shuffle(RandomGenerator& rand) + { + m_array.shuffle(rand); + } /** display this array */ inline void display_array() diff --git a/src/shogun/lib/DynamicObjectArray.h b/src/shogun/lib/DynamicObjectArray.h index 7209c058576..f9a2c37c0a0 100644 --- a/src/shogun/lib/DynamicObjectArray.h +++ b/src/shogun/lib/DynamicObjectArray.h @@ -391,7 +391,11 @@ class CDynamicObjectArray : public CSGObject inline void shuffle() { m_array.shuffle(); } /** shuffles the array with external random state */ - inline void shuffle(CRandom * rand) { m_array.shuffle(rand); } + template + inline void shuffle(RandomGenerator& rand) + { + m_array.shuffle(rand); + } /** @return object name */ virtual const char* get_name() const diff --git a/src/shogun/lib/SGVector.cpp b/src/shogun/lib/SGVector.cpp index 4071c8b75f0..418737605c1 100644 --- a/src/shogun/lib/SGVector.cpp +++ b/src/shogun/lib/SGVector.cpp @@ -614,9 +614,10 @@ void SGVector::vec1_plus_scalar_times_vec2(float32_t* vec1, template void SGVector::random_vector(T* vec, int32_t len, T min_value, T max_value) { - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::uniform_int_distribution dist(min_value, max_value); for (int32_t i=0; irandom(min_value, max_value); + vec[i] = dist(prng); } template <> diff --git a/src/shogun/lib/SGVector.h b/src/shogun/lib/SGVector.h index 9ac5322a82a..054af21c522 100644 --- a/src/shogun/lib/SGVector.h +++ b/src/shogun/lib/SGVector.h @@ -36,7 +36,6 @@ namespace shogun template class SGSparseVector; template class SGMatrix; class CFile; - class CRandom; /** @brief shogun vector */ template class SGVector : public SGReferencedData diff --git a/src/shogun/lib/tapkee/tapkee_shogun.cpp b/src/shogun/lib/tapkee/tapkee_shogun.cpp index a712a8a6432..195d73a9f9a 100644 --- a/src/shogun/lib/tapkee/tapkee_shogun.cpp +++ b/src/shogun/lib/tapkee/tapkee_shogun.cpp @@ -11,22 +11,24 @@ #define CUSTOM_UNIFORM_RANDOM_INDEX_FUNCTION \ []() -> uint64_t { \ - auto rng = std::unique_ptr(new CRandom()); \ - return rng->random_64(); \ + auto prng = get_prng(); \ + return prng(); \ } #define CUSTOM_UNIFORM_RANDOM_FUNCTION \ - []() { \ - auto rng = std::unique_ptr(new CRandom()); \ - return rng->random( \ + []() -> float64_t { \ + auto prng = get_prng(); \ + std::uniform_real_distribution dist( \ static_cast(0), \ static_cast(1)); \ + return dist(prng); \ } #define CUSTOM_GAUSSIAN_RANDOM_FUNCTION \ - []() { \ - auto rng = std::unique_ptr(new CRandom()); \ - return rng->normal_random( \ + []() -> float64_t { \ + auto prng = get_prng(); \ + std::normal_distribution dist( \ static_cast(0), \ static_cast(1)); \ + return dist(prng); \ } #define TAPKEE_EIGEN_INCLUDE_FILE diff --git a/src/shogun/machine/BaggingMachine.cpp b/src/shogun/machine/BaggingMachine.cpp index 840b0f22ded..db70d0f1e4c 100644 --- a/src/shogun/machine/BaggingMachine.cpp +++ b/src/shogun/machine/BaggingMachine.cpp @@ -104,6 +104,7 @@ bool CBaggingMachine::train_machine(CFeatures* data) { REQUIRE(m_machine != NULL, "Machine is not set!"); REQUIRE(m_num_bags > 0, "Number of bag is not set!"); + auto prng = get_prng(); if (data) { @@ -127,10 +128,12 @@ bool CBaggingMachine::train_machine(CFeatures* data) SG_UNREF(m_oob_indices); m_oob_indices = new CDynamicObjectArray(); - SGMatrix rnd_indicies(m_bag_size, m_num_bags); for (index_t i = 0; i < m_num_bags*m_bag_size; ++i) - rnd_indicies.matrix[i] = m_rng->random(0, m_bag_size - 1); + { + std::uniform_int_distribution dist(0, m_bag_size - 1); + rnd_indicies.matrix[i] = dist(prng); + } #pragma omp parallel for for (int32_t i = 0; i < m_num_bags; ++i) diff --git a/src/shogun/machine/gp/EPInferenceMethod.cpp b/src/shogun/machine/gp/EPInferenceMethod.cpp index e76a1b9ad3f..cdd2d3384fc 100644 --- a/src/shogun/machine/gp/EPInferenceMethod.cpp +++ b/src/shogun/machine/gp/EPInferenceMethod.cpp @@ -230,7 +230,7 @@ void CEPInferenceMethod::update() float64_t nlZ_old=CMath::INFTY; uint32_t sweep=0; - auto prng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); while ((CMath::abs(m_nlZ-nlZ_old)>m_tol && sweep mean=get_posterior_mean(); - CGaussianDistribution* post_approx=new CGaussianDistribution(mean, cov); SGMatrix samples=post_approx->sample(num_importance_samples); diff --git a/src/shogun/mathematics/Math.h b/src/shogun/mathematics/Math.h index 4e9fb8ddd16..67dd27c3001 100644 --- a/src/shogun/mathematics/Math.h +++ b/src/shogun/mathematics/Math.h @@ -21,12 +21,13 @@ #define __MATHEMATICS_H_ #include +#include #include +#include #include #include #include #include -#include #ifndef _USE_MATH_DEFINES #define _USE_MATH_DEFINES @@ -1016,19 +1017,28 @@ class CMath : public CSGObject return 0 == a ? b : a; } - template - static void permute(SGVector v, CRandom* rand = NULL) + template < + class T, + class RandomGenerator = std::uniform_int_distribution> + static void permute(SGVector v) { - if (rand) + auto prng = get_prng(); + for (index_t i = 0; i < v.vlen; ++i) { - for (index_t i = 0; i < v.vlen; ++i) - swap(v[i], v[rand->random(i, v.vlen - 1)]); + RandomGenerator dist(i, v.vlen - 1); + swap(v[i], v[dist(prng)]); } - else + } + + template < + class T, class RandomGenerator, + class Distribution = std::uniform_int_distribution> + static void permute(SGVector v, RandomGenerator prng) + { + for (index_t i = 0; i < v.vlen; ++i) { - auto m_rng = std::unique_ptr(new CRandom()); - for (index_t i = 0; i < v.vlen; ++i) - swap(v[i], v[m_rng->random(i, v.vlen - 1)]); + Distribution dist(i, v.vlen - 1); + swap(v[i], v[dist(prng)]); } } diff --git a/src/shogun/mathematics/Random.cpp b/src/shogun/mathematics/Random.cpp deleted file mode 100644 index a449fd824e3..00000000000 --- a/src/shogun/mathematics/Random.cpp +++ /dev/null @@ -1,378 +0,0 @@ -/* - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 3 of the License, or - * (at your option) any later version. - * - * Written (W) 2013 Viktor Gal - * Copyright (C) 2013 Viktor Gal - */ -#ifdef _WIN32 -#define _CRT_RAND_S -#include -#endif - -#include -#include -#include -#include -#include -#include -#include - -#ifdef DEV_RANDOM -#include -#endif - -using namespace shogun; - -CRandom::CRandom() - : m_sfmt_32(NULL), - m_sfmt_64(NULL), - m_dsfmt(NULL) -{ - m_seed = sg_random_seed; - init(); -} - -CRandom::CRandom(uint32_t seed) - : m_seed(seed), - m_sfmt_32(NULL), - m_sfmt_64(NULL), - m_dsfmt(NULL) -{ - init(); -} - -CRandom::~CRandom() -{ - SG_FREE(m_x); - SG_FREE(m_y); - SG_FREE(m_xComp); - SG_FREE(m_sfmt_32); - SG_FREE(m_sfmt_64); - SG_FREE(m_dsfmt); -} - -void CRandom::set_seed(uint32_t seed) -{ - reinit(seed); -} - -uint32_t CRandom::get_seed() const -{ - return m_seed; -} - -void CRandom::init() -{ - /** init ziggurat variables */ - m_blockCount = 128; - m_R = 3.442619855899; - m_A = 9.91256303526217e-3; - m_uint32ToU = 1.0 / (float64_t)std::numeric_limits::max(); - - m_x = SG_MALLOC(float64_t, m_blockCount + 1); - m_y = SG_MALLOC(float64_t, m_blockCount); - m_xComp = SG_MALLOC(uint32_t, m_blockCount); - - // Initialise rectangle position data. - // m_x[i] and m_y[i] describe the top-right position ox Box i. - - // Determine top right position of the base rectangle/box (the rectangle with the Gaussian tale attached). - // We call this Box 0 or B0 for short. - // Note. x[0] also describes the right-hand edge of B1. (See diagram). - m_x[0] = m_R; - m_y[0] = GaussianPdfDenorm(m_R); - - // The next box (B1) has a right hand X edge the same as B0. - // Note. B1's height is the box area divided by its width, hence B1 has a smaller height than B0 because - // B0's total area includes the attached distribution tail. - m_x[1] = m_R; - m_y[1] = m_y[0] + (m_A / m_x[1]); - - // Calc positions of all remaining rectangles. - for(int i=2; i < m_blockCount; i++) - { - m_x[i] = GaussianPdfDenormInv(m_y[i-1]); - m_y[i] = m_y[i-1] + (m_A / m_x[i]); - } - - // For completeness we define the right-hand edge of a notional box 6 as being zero (a box with no area). - m_x[m_blockCount] = 0.0; - - // Useful precomputed values. - m_A_div_y0 = m_A / m_y[0]; - - // Special case for base box. m_xComp[0] stores the area of B0 as a proportion of R - // (recalling that all segments have area A, but that the base segment is the combination of B0 and the distribution tail). - // Thus -m_xComp[0] is the probability that a sample point is within the box part of the segment. - m_xComp[0] = (uint32_t)(((m_R * m_y[0]) / m_A) * (float64_t)std::numeric_limits::max()); - - for(int32_t i=1; i < m_blockCount-1; i++) - { - m_xComp[i] = (uint32_t)((m_x[i+1] / m_x[i]) * (float64_t)std::numeric_limits::max()); - } - m_xComp[m_blockCount-1] = 0; // Shown for completeness. - - // Sanity check. Test that the top edge of the topmost rectangle is at y=1.0. - // Note. We expect there to be a tiny drift away from 1.0 due to the inexactness of floating - // point arithmetic. - ASSERT(CMath::abs(1.0 - m_y[m_blockCount-1]) < 1e-10); - - /** init SFMT and dSFMT */ - m_sfmt_32 = SG_MALLOC(sfmt_t, 1); - m_sfmt_64 = SG_MALLOC(sfmt_t, 1); - m_dsfmt = SG_MALLOC(dsfmt_t, 1); - reinit(m_seed); -} - -uint32_t CRandom::random_32() const -{ - m_state_lock.lock(); - uint32_t v = sfmt_genrand_uint32(m_sfmt_32); - m_state_lock.unlock(); - return v; -} - -uint64_t CRandom::random_64() const -{ - m_state_lock.lock(); - uint64_t v = sfmt_genrand_uint64(m_sfmt_64); - m_state_lock.unlock(); - return v; -} - -void CRandom::fill_array(uint32_t* array, int32_t size) const -{ -#if defined(USE_ALIGNED_MEMORY) || defined(DARWIN) - if ((size >= sfmt_get_min_array_size32(m_sfmt_32)) && (size % 4) == 0) - { - m_state_lock.lock(); - sfmt_fill_array32(m_sfmt_32, array, size); - m_state_lock.unlock(); - return; - } -#endif - for (int32_t i=0; i < size; i++) - array[i] = random_32(); -} - -void CRandom::fill_array(uint64_t* array, int32_t size) const -{ -#if defined(USE_ALIGNED_MEMORY) || defined(DARWIN) - if ((size >= sfmt_get_min_array_size64(m_sfmt_64)) && (size % 2) == 0) - { - m_state_lock.lock(); - sfmt_fill_array64(m_sfmt_64, array, size); - m_state_lock.unlock(); - return; - } -#endif - for (int32_t i=0; i < size; i++) - array[i] = random_64(); -} - -void CRandom::fill_array_oc(float64_t* array, int32_t size) const -{ - m_state_lock.lock(); -#if defined(USE_ALIGNED_MEMORY) || defined(DARWIN) - if ((size >= dsfmt_get_min_array_size()) && (size % 2) == 0) - { - dsfmt_fill_array_open_close(m_dsfmt, array, size); - m_state_lock.unlock(); - return; - } -#endif - for (int32_t i=0; i < size; i++) - array[i] = dsfmt_genrand_open_close(m_dsfmt); - m_state_lock.unlock(); -} - -void CRandom::fill_array_co(float64_t* array, int32_t size) const -{ - m_state_lock.lock(); -#if defined(USE_ALIGNED_MEMORY) || defined(DARWIN) - if ((size >= dsfmt_get_min_array_size()) && (size % 2) == 0) - { - dsfmt_fill_array_close_open(m_dsfmt, array, size); - m_state_lock.unlock(); - return; - } -#endif - for (int32_t i=0; i < size; i++) - array[i] = dsfmt_genrand_close_open(m_dsfmt); - m_state_lock.unlock(); -} - -void CRandom::fill_array_oo(float64_t* array, int32_t size) const -{ - m_state_lock.lock(); -#if defined(USE_ALIGNED_MEMORY) || defined(DARWIN) - if ((size >= dsfmt_get_min_array_size()) && (size % 2) == 0) - { - dsfmt_fill_array_open_open(m_dsfmt, array, size); - m_state_lock.unlock(); - return; - } -#endif - for (int32_t i=0; i < size; i++) - array[i] = dsfmt_genrand_open_open(m_dsfmt); - m_state_lock.unlock(); -} - -void CRandom::fill_array_c1o2(float64_t* array, int32_t size) const -{ - m_state_lock.lock(); -#if defined(USE_ALIGNED_MEMORY) || defined(DARWIN) - if ((size >= dsfmt_get_min_array_size()) && (size % 2) == 0) - { - dsfmt_fill_array_close1_open2(m_dsfmt, array, size); - m_state_lock.unlock(); - return; - } -#endif - for (int32_t i=0; i < size; i++) - array[i] = dsfmt_genrand_close1_open2(m_dsfmt); - m_state_lock.unlock(); -} - -float64_t CRandom::random_close() const -{ - m_state_lock.lock(); - float64_t v = sfmt_genrand_real1(m_sfmt_32); - m_state_lock.unlock(); - return v; -} - -float64_t CRandom::random_open() const -{ - m_state_lock.lock(); - float64_t v = dsfmt_genrand_open_open(m_dsfmt); - m_state_lock.unlock(); - return v; -} - -float64_t CRandom::random_half_open() const -{ - m_state_lock.lock(); - float64_t v = dsfmt_genrand_close_open(m_dsfmt); - m_state_lock.unlock(); - return v; -} - -float64_t CRandom::normal_distrib(float64_t mu, float64_t sigma) const -{ - return mu + (std_normal_distrib() * sigma); -} - -float64_t CRandom::std_normal_distrib() const -{ - for (;;) - { - // Select box at random. - uint8_t u = random_32(); - int32_t i = (int32_t)(u & 0x7F); - float64_t sign = ((u & 0x80) == 0) ? -1.0 : 1.0; - - // Generate uniform random value with range [0,0xffffffff]. - uint32_t u2 = random_32(); - - // Special case for the base segment. - if(0 == i) - { - if(u2 < m_xComp[0]) - { - // Generated x is within R0. - return u2 * m_uint32ToU * m_A_div_y0 * sign; - } - // Generated x is in the tail of the distribution. - return sample_tail() * sign; - } - - // All other segments. - if(u2 < m_xComp[i]) - { // Generated x is within the rectangle. - return u2 * m_uint32ToU * m_x[i] * sign; - } - - // Generated x is outside of the rectangle. - // Generate a random y coordinate and test if our (x,y) is within the distribution curve. - // This execution path is relatively slow/expensive (makes a call to Math.Exp()) but relatively rarely executed, - // although more often than the 'tail' path (above). - float64_t x = u2 * m_uint32ToU * m_x[i]; - if(m_y[i-1] + ((m_y[i] - m_y[i-1]) * random_half_open()) < GaussianPdfDenorm(x) ) { - return x * sign; - } - } -} - -float64_t CRandom::sample_tail() const -{ - float64_t x, y; - float64_t m_R_reciprocal = 1.0 / m_R; - do - { - x = -CMath::log(random_half_open()) * m_R_reciprocal; - y = -CMath::log(random_half_open()); - } while(y+y < x*x); - return m_R + x; -} - -float64_t CRandom::GaussianPdfDenorm(float64_t x) const -{ - return CMath::exp(-(x*x * 0.5)); -} - -float64_t CRandom::GaussianPdfDenormInv(float64_t y) const -{ - // Operates over the y range (0,1], which happens to be the y range of the pdf, - // with the exception that it does not include y=0, but we would never call with - // y=0 so it doesn't matter. Remember that a Gaussian effectively has a tail going - // off into x == infinity, hence asking what is x when y=0 is an invalid question - // in the context of this class. - return CMath::sqrt(-2.0 * CMath::log(y)); -} - -void CRandom::reinit(uint32_t seed) -{ - m_state_lock.lock(); - m_seed = seed; - sfmt_init_gen_rand(m_sfmt_32, m_seed); - sfmt_init_gen_rand(m_sfmt_64, m_seed); - dsfmt_init_gen_rand(m_dsfmt, m_seed); - m_state_lock.unlock(); -} - -float32_t CRandom::normal_random(float32_t mean, float32_t std_dev) -{ - // sets up variables & makes sure rand_s.range == (0,1) - float32_t ret; - float32_t rand_u; - float32_t rand_v; - float32_t rand_s; - do - { - rand_u = static_cast(random(-1.0, 1.0)); - rand_v = static_cast(random(-1.0, 1.0)); - rand_s = rand_u * rand_u + rand_v * rand_v; - } while ((rand_s == 0) || (rand_s >= 1)); - - // the meat & potatos, and then the mean & standard deviation - // shifting... - ret = static_cast( - rand_u * CMath::sqrt(-2.0 * CMath::log(rand_s) / rand_s)); - ret = std_dev * ret + mean; - return ret; -} - -float64_t CRandom::normal_random(float64_t mean, float64_t std_dev) -{ - float64_t result = normal_distrib(mean, std_dev); - return result; -} - -float32_t CRandom::randn_float() -{ - return static_cast(normal_random(0.0, 1.0)); -} diff --git a/src/shogun/mathematics/Random.h b/src/shogun/mathematics/Random.h deleted file mode 100644 index c26da99b3f3..00000000000 --- a/src/shogun/mathematics/Random.h +++ /dev/null @@ -1,372 +0,0 @@ -/* - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 3 of the License, or - * (at your option) any later version. - * - * Written (W) 2013 Viktor Gal - * Copyright (C) 2013 Viktor Gal - */ - -#ifndef __RANDOM_H__ -#define __RANDOM_H__ - -#include - -#include -#include -#include -#include - -/* opaque pointers */ -struct SFMT_T; -struct DSFMT_T; - -namespace shogun -{ - extern uint32_t sg_random_seed; - class CLock; - class CMath; - /** @brief: Pseudo random number geneartor - * - * It is based on SIMD oriented Fast Mersenne Twister(SFMT) pseudorandom - * number generator. - * - * */ - class CRandom - { - public: - /** default ctor */ - CRandom(); - - /** ctor - * @param seed the seed for the PRNG - */ - CRandom(uint32_t seed); - - /** dtor */ - virtual ~CRandom(); - - /** set seed - * - * @param seed seed for PRNG - */ - void set_seed(uint32_t seed); - - /** get seed - * - * @return seed - */ - uint32_t get_seed() const; - - /** - * Generate an unsigned 32-bit random integer - * - * @return the random 32-bit unsigned integer - */ - uint32_t random_32() const; - - /** - * Generate an unsigned 64-bit random integer - * - * @return the random 64-bit unsigned integer - */ - uint64_t random_64() const; - - /** - * Generate a signed 32-bit random integer - * - * @return the random 32-bit signed integer - */ - inline int32_t random_s32() const - { - return random_32() & ((uint32_t(-1)<<1)>>1); - } - - /** - * Generate a signed 64-bit random integer - * - * @return the random 64-bit signed integer - */ - inline int64_t random_s64() const - { - return random_64() & ((uint64_t(-1)<<1)>>1); - } - - /** generate an unsigned 64bit integer in the range - * [min_value, max_value] (closed interval!) - * - * @param min_value minimum value - * @param max_value maximum value - * @return random number - */ - inline uint64_t random(uint64_t min_value, uint64_t max_value) - { - return min_value + random_64() % (max_value-min_value+1); - } - - /** generate an signed 64bit integer in the range - * [min_value, max_value] (closed interval!) - * - * @param min_value minimum value - * @param max_value maximum value - * @return random number - */ - inline int64_t random(int64_t min_value, int64_t max_value) - { - return min_value + random_s64() % (max_value-min_value+1); - } - - /** generate an unsigned signed 32bit integer in the range - * [min_value, max_value] (closed interval!) - * - * @param min_value minimum value - * @param max_value maximum value - * @return random number - */ - inline uint32_t random(uint32_t min_value, uint32_t max_value) - { - return min_value + random_32() % (max_value-min_value+1); - } - - /** generate an signed 32bit integer in the range - * [min_value, max_value] (closed interval!) - * - * @param min_value minimum value - * @param max_value maximum value - * @return random number - */ - inline int32_t random(int32_t min_value, int32_t max_value) - { - return min_value + random_s32() % (max_value-min_value+1); - } - - /** generate an 32bit floating point number in the range - * [min_value, max_value] (closed interval!) - * - * @param min_value minimum value - * @param max_value maximum value - * @return random number - */ - inline float32_t random(float32_t min_value, float32_t max_value) - { - return min_value + ((max_value-min_value) * static_cast(random_close())); - } - - /** generate an 64bit floating point number in the range - * [min_value, max_value] (closed interval!) - * - * @param min_value minimum value - * @param max_value maximum value - * @return random number - */ - inline float64_t random(float64_t min_value, float64_t max_value) - { - return min_value + ((max_value-min_value) * random_close()); - } - - /** generate an 96-128bit floating point number (depending on the - * size of floatmax_t) in the range - * [min_value, max_value] (closed interval!) - * - * @param min_value minimum value - * @param max_value maximum value - * @return random number - */ - inline floatmax_t random(floatmax_t min_value, floatmax_t max_value) - { - return min_value + ((max_value-min_value) * random_close()); - } - - /** - * Fill an array of unsinged 32 bit integer - * - * @param array 32-bit unsigened int array to be filled - * @param size size of the array - */ - void fill_array(uint32_t* array, int32_t size) const; - - /** - * Fill an array of unsinged 64 bit integer - * - * @param array 64-bit unsigened int array to be filled - * @param size size of the array - */ - void fill_array(uint64_t* array, int32_t size) const; - - /** - * Fills an array of float64_t with randoms - * from the (0,1] interval - * - * @param array - * @param size - */ - void fill_array_oc(float64_t* array, int32_t size) const; - - /** - * Fills an array of float64_t with randoms - * from the [0,1) interval - * - * @param array - * @param size - */ - void fill_array_co(float64_t* array, int32_t size) const; - - /** - * Fills an array of float64_t with randoms - * from the (0,1) interval - * - * @param array - * @param size - */ - void fill_array_oo(float64_t* array, int32_t size) const; - - /** - * Fills an array of float64_t with randoms - * from the [1,2) interval - * - * @param array - * @param size - */ - - void fill_array_c1o2(float64_t* array, int32_t size) const; - - /** - * Get random - * @return a float64_t random from [0,1] interval - */ - float64_t random_close() const; - - /** - * Get random - * @return a float64_t random from (0,1) interval - */ - float64_t random_open() const; - - /** - * Get random - * - * @return a float64_t random from [0,1) interval - */ - float64_t random_half_open() const; - - /** - * Sample a normal distrbution. - * Using Ziggurat algorithm - * - * @param mu mean - * @param sigma variance - * @return sample from the desired normal distrib - */ - float64_t normal_distrib(float64_t mu, float64_t sigma) const; - - /** - * Sample a standard normal distribution, - * i.e. mean = 0, var = 1.0 - * - * @return sample from the std normal distrib - */ - float64_t std_normal_distrib() const; - - /** - *Returns a Gaussian or Normal random number. - *Using the polar form of the Box-Muller transform. - *http://en.wikipedia.org/wiki/Box%E2%80%93Muller_transform#Polar_form - */ - float32_t normal_random(float32_t mean, float32_t std_dev); - - /** - *Returns a Gaussian or Normal random number. - *Using the polar form of the Box-Muller transform. - *http://en.wikipedia.org/wiki/Box%E2%80%93Muller_transform#Polar_form - */ - float64_t normal_random(float64_t mean, float64_t std_dev); - - /* - *Convenience method for generating Standard Normal random numbers - *Float: Mean = 0 and Standard Deviation = 1 - */ - float32_t randn_float(); - - /** - * Generate a seed for PRNG - * - * @return entropy for PRNG - */ - static uint32_t generate_seed(); - - virtual const char* get_name() const - { - return "Random"; - } - - private: - /** initialise the object */ - void init(); - - /** reinit PRNG - * - * @param seed seed for the PRNG - */ - void reinit(uint32_t seed); - - /** - * Sample from the distribution tail (defined as having x >= R). - * - * @return - */ - float64_t sample_tail() const; - - /** - * Gaussian probability density function, denormailised, that is, y = e^-(x^2/2). - */ - float64_t GaussianPdfDenorm(float64_t x) const; - - /** - * Inverse function of GaussianPdfDenorm(x) - */ - float64_t GaussianPdfDenormInv(float64_t y) const; - - /** seed */ - uint32_t m_seed; - - /** SFMT struct for 32-bit random */ - SFMT_T* m_sfmt_32; - - /** SFMT struct for 64-bit random */ - SFMT_T* m_sfmt_64; - - /** dSFMT struct */ - DSFMT_T* m_dsfmt; - - /** Number of blocks */ - int32_t m_blockCount; //= 128; - - /** Right hand x coord of the base rectangle, thus also the left hand x coord of the tail */ - float64_t m_R;//= 3.442619855899; - - /** Area of each rectangle (pre-determined/computed for 128 blocks). */ - float64_t m_A;// = 9.91256303526217e-3; - - /** Scale factor for converting a UInt with range [0,0xffffffff] to a double with range [0,1]. */ - float64_t m_uint32ToU;// = 1.0 / (float64_t)UINT32_MAX; - - /** Area A divided by the height of B0 */ - float64_t m_A_div_y0; - - /** top-right position ox rectangle i */ - float64_t* m_x; - float64_t* m_y; - - /** The proprtion of each segment that is entirely within the distribution, expressed as uint where - a value of 0 indicates 0% and uint.MaxValue 100%. Expressing this as an integer allows some floating - points operations to be replaced with integer ones. - */ - uint32_t* m_xComp; - - /** state lock */ - mutable CLock m_state_lock; - }; -} - -#endif /* __RANDOM_H__ */ diff --git a/src/shogun/mathematics/Statistics.cpp b/src/shogun/mathematics/Statistics.cpp index 47e4913a06e..3a4ba844f40 100644 --- a/src/shogun/mathematics/Statistics.cpp +++ b/src/shogun/mathematics/Statistics.cpp @@ -325,7 +325,7 @@ SGVector CStatistics::sample_indices(int32_t sample_size, int32_t N) int32_t* idxs=SG_MALLOC(int32_t,N); int32_t i, rnd; int32_t* permuted_idxs=SG_MALLOC(int32_t,sample_size); - auto rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); // reservoir sampling for (i=0; i CStatistics::sample_indices(int32_t sample_size, int32_t N) permuted_idxs[i]=idxs[i]; for (i=sample_size; irandom(1, i); + std::uniform_int_distribution uniform_int_dist(1, i); + rnd = uniform_int_dist(prng); if (rnd CStatistics::sample_from_gaussian(SGVector mean, int32_t dim=mean.vlen; Map mu(mean.vector, mean.vlen); Map c(cov.matrix, cov.num_rows, cov.num_cols); - auto rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::normal_distribution normal_dist(0, 1); // generate samples, z, from N(0, I), DxN SGMatrix S(dim, N); for( int32_t j=0; jstd_normal_distrib(); + S(i, j) = normal_dist(prng); // the cholesky factorization c=L*U MatrixXd U=c.llt().matrixU(); @@ -775,7 +777,8 @@ SGMatrix CStatistics::sample_from_gaussian(SGVector mean, typedef SparseMatrix MatrixType; const MatrixType &c=EigenSparseUtil::toEigenSparse(cov); - auto rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::normal_distribution normal_dist(0, 1); SimplicialLLT llt; @@ -783,7 +786,7 @@ SGMatrix CStatistics::sample_from_gaussian(SGVector mean, SGMatrix S(dim, N); for( int32_t j=0; jstd_normal_distrib(); + S(i, j) = normal_dist(prng); Map s(S.matrix, S.num_rows, S.num_cols); diff --git a/src/shogun/mathematics/ajd/QDiag.cpp b/src/shogun/mathematics/ajd/QDiag.cpp index 38ace2d5568..dd88762a108 100644 --- a/src/shogun/mathematics/ajd/QDiag.cpp +++ b/src/shogun/mathematics/ajd/QDiag.cpp @@ -16,7 +16,8 @@ SGMatrix CQDiag::diagonalize(SGNDArray C, SGMatrix V; - auto rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::normal_distribution dist(0, 1); if (V0.num_rows == N && V0.num_cols == N) { V = V0.clone(); @@ -28,7 +29,7 @@ SGMatrix CQDiag::diagonalize(SGNDArray C, SGMatrixstd_normal_distrib(); + V(i, j) = dist(prng); } } diff --git a/src/shogun/mathematics/linalg/ratapprox/tracesampler/NormalSampler.cpp b/src/shogun/mathematics/linalg/ratapprox/tracesampler/NormalSampler.cpp index bcae35d6912..cd98b3b5f9d 100644 --- a/src/shogun/mathematics/linalg/ratapprox/tracesampler/NormalSampler.cpp +++ b/src/shogun/mathematics/linalg/ratapprox/tracesampler/NormalSampler.cpp @@ -9,7 +9,6 @@ #include #include -#include #include namespace shogun @@ -37,13 +36,14 @@ void CNormalSampler::precompute() m_num_samples=1; } -SGVector CNormalSampler::sample(index_t idx) const +SGVector CNormalSampler::sample(index_t idx) { // ignore idx since it doesnt matter, all samples are independent SGVector s(m_dimension); + std::normal_distribution dist(0, 1); for (index_t i=0; istd_normal_distrib(); + s[i] = dist(m_rng); return s; } diff --git a/src/shogun/mathematics/linalg/ratapprox/tracesampler/NormalSampler.h b/src/shogun/mathematics/linalg/ratapprox/tracesampler/NormalSampler.h index cafb51ed91b..7d17481d50c 100644 --- a/src/shogun/mathematics/linalg/ratapprox/tracesampler/NormalSampler.h +++ b/src/shogun/mathematics/linalg/ratapprox/tracesampler/NormalSampler.h @@ -37,7 +37,7 @@ class CNormalSampler : public CTraceSampler * @param idx the index (this is effectively ignored) * @return the sample vector */ - virtual SGVector sample(index_t idx) const; + virtual SGVector sample(index_t idx); /** precompute method that sets the num_samples of the base */ virtual void precompute(); diff --git a/src/shogun/mathematics/linalg/ratapprox/tracesampler/ProbingSampler.cpp b/src/shogun/mathematics/linalg/ratapprox/tracesampler/ProbingSampler.cpp index 5324d98131d..890092fa878 100644 --- a/src/shogun/mathematics/linalg/ratapprox/tracesampler/ProbingSampler.cpp +++ b/src/shogun/mathematics/linalg/ratapprox/tracesampler/ProbingSampler.cpp @@ -18,7 +18,6 @@ #include #include #include -#include #include #include #include @@ -189,7 +188,7 @@ void CProbingSampler::precompute() SG_DEBUG("Leaving\n"); } -SGVector CProbingSampler::sample(index_t idx) const +SGVector CProbingSampler::sample(index_t idx) { REQUIRE(idx CProbingSampler::sample(index_t idx) const SGVector s(m_dimension); s.set_const(0.0); + std::normal_distribution dist(0, 1); for (index_t i=0; istd_normal_distrib(); + float64_t x = dist(m_rng); s[i]=(x>0)-(x<0); } } diff --git a/src/shogun/mathematics/linalg/ratapprox/tracesampler/ProbingSampler.h b/src/shogun/mathematics/linalg/ratapprox/tracesampler/ProbingSampler.h index 992fcd63247..ef7ac1e6499 100644 --- a/src/shogun/mathematics/linalg/ratapprox/tracesampler/ProbingSampler.h +++ b/src/shogun/mathematics/linalg/ratapprox/tracesampler/ProbingSampler.h @@ -89,7 +89,7 @@ class CProbingSampler : public CTraceSampler * @param idx the index * @return the sample vector */ - virtual SGVector sample(index_t idx) const; + virtual SGVector sample(index_t idx); /** precompute method that sets the num_samples of the base */ virtual void precompute(); diff --git a/src/shogun/mathematics/linalg/ratapprox/tracesampler/TraceSampler.h b/src/shogun/mathematics/linalg/ratapprox/tracesampler/TraceSampler.h index 0fea8bea840..033f6e60704 100644 --- a/src/shogun/mathematics/linalg/ratapprox/tracesampler/TraceSampler.h +++ b/src/shogun/mathematics/linalg/ratapprox/tracesampler/TraceSampler.h @@ -59,7 +59,7 @@ class CTraceSampler : public CSGObject * @param idx the index which determines which sample to draw * @return the sample vector */ - virtual SGVector sample(index_t idx) const = 0; + virtual SGVector sample(index_t idx) = 0; /** * abstract method for initializing the sampler, number of samples etc, @@ -92,12 +92,15 @@ class CTraceSampler : public CSGObject /** the number of samples this sampler will generate, set by implementation */ index_t m_num_samples; + std::mt19937_64 m_rng; + private: /** initialize with default values and register params */ void init() { m_num_samples=0; m_dimension=0; + m_rng = get_prng(); SG_ADD(&m_num_samples, "num_samples", "Number of samples this sampler can generate", MS_NOT_AVAILABLE); diff --git a/src/shogun/modelselection/ModelSelectionParameters.cpp b/src/shogun/modelselection/ModelSelectionParameters.cpp index cea8febd272..309f98ba731 100644 --- a/src/shogun/modelselection/ModelSelectionParameters.cpp +++ b/src/shogun/modelselection/ModelSelectionParameters.cpp @@ -196,15 +196,17 @@ void CModelSelectionParameters::build_values(EMSParamType value_type, void* min, CParameterCombination* CModelSelectionParameters::get_single_combination( bool is_rand) { + auto prng = get_prng(); /* If this is a value node, then randomly pick a value from the built * range */ if (m_values) { - + std::uniform_int_distribution normal_init_dist( + 0, m_values_length - 1); index_t i = 0; if (is_rand) - i = m_rng->random(0, m_values_length - 1); + i = normal_init_dist(prng); Parameter* p=new Parameter(); @@ -217,7 +219,7 @@ CParameterCombination* CModelSelectionParameters::get_single_combination( for (index_t j = 0; j < param_vect->vlen; j++) { if (is_rand) - i = m_rng->random(0, m_values_length - 1); + i = normal_init_dist(prng); (*param_vect)[j] = ((float64_t*)m_values)[i]; } p->add(param_vect, m_node_name); @@ -230,7 +232,7 @@ CParameterCombination* CModelSelectionParameters::get_single_combination( for (index_t j = 0; j < *m_vector_length; j++) { if (is_rand) - i = m_rng->random(0, m_values_length - 1); + i = normal_init_dist(prng); (param_vect)[j] = ((float64_t*)m_values)[i]; } p->add_vector(¶m_vect, m_vector_length, m_node_name); @@ -243,7 +245,7 @@ CParameterCombination* CModelSelectionParameters::get_single_combination( for (index_t j = 0; j < param_vect->vlen; j++) { if (is_rand) - i = m_rng->random(0, m_values_length - 1); + i = normal_init_dist(prng); (*param_vect)[j] = ((int32_t*)m_values)[i]; } p->add(param_vect, m_node_name); @@ -256,7 +258,7 @@ CParameterCombination* CModelSelectionParameters::get_single_combination( for (index_t j = 0; j < *m_vector_length; j++) { if (is_rand) - i = m_rng->random(0, m_values_length - 1); + i = normal_init_dist(prng); (param_vect)[j] = ((int32_t*)m_values)[i]; } p->add_vector(¶m_vect, m_vector_length, m_node_name); diff --git a/src/shogun/multiclass/LaRank.cpp b/src/shogun/multiclass/LaRank.cpp index da895d1f54d..65cc83540e9 100644 --- a/src/shogun/multiclass/LaRank.cpp +++ b/src/shogun/multiclass/LaRank.cpp @@ -747,6 +747,7 @@ int32_t CLaRank::add (int32_t x_id, int32_t yi) n_pro++; w_pro = 0.05 * coeff + (1 - 0.05) * w_pro; + auto prng = get_prng(); // ProcessOld & Optimize until ready for a new processnew // (Adaptative schedule here) for (;;) @@ -760,7 +761,8 @@ int32_t CLaRank::add (int32_t x_id, int32_t yi) if (w_opt < prop_min) w_opt = prop_min; w_sum = w_pro + w_rep + w_opt; - float64_t r = m_rng->random(0.0, w_sum); + std::uniform_real_distribution dist(0.0, w_sum); + float64_t r = dist(prng); if (r <= w_pro) { break; diff --git a/src/shogun/multiclass/LaRank.h b/src/shogun/multiclass/LaRank.h index 905597efb0d..3fd6b21187b 100644 --- a/src/shogun/multiclass/LaRank.h +++ b/src/shogun/multiclass/LaRank.h @@ -249,12 +249,13 @@ namespace shogun LaRankPattern & sample () { - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::uniform_int_distribution dist( + uint32_t(0), uint32_t(patterns.size() - 1)); ASSERT(!empty()) while (true) { - uint32_t r = m_rng->random( - uint32_t(0), uint32_t(patterns.size() - 1)); + uint32_t r = dist(prng); if (patterns[r].exists()) return patterns[r]; } diff --git a/src/shogun/multiclass/ecoc/ECOCDiscriminantEncoder.cpp b/src/shogun/multiclass/ecoc/ECOCDiscriminantEncoder.cpp index 72119ceceaa..c511bec66ca 100644 --- a/src/shogun/multiclass/ecoc/ECOCDiscriminantEncoder.cpp +++ b/src/shogun/multiclass/ecoc/ECOCDiscriminantEncoder.cpp @@ -137,7 +137,9 @@ float64_t CECOCDiscriminantEncoder::sffs_iteration(float64_t MI, vector if (part1.size() <= 1) return MI; - int32_t iclas = m_rng->random(0, int32_t(part1.size() - 1)); + auto prng = get_prng(); + std::uniform_int_distribution dist(0, int32_t(part1.size() - 1)); + int32_t iclas = dist(prng); int32_t clas = part1[iclas]; // move clas from part1 to part2 diff --git a/src/shogun/multiclass/ecoc/ECOCRandomDenseEncoder.cpp b/src/shogun/multiclass/ecoc/ECOCRandomDenseEncoder.cpp index cb5ffa51343..44998b849ed 100644 --- a/src/shogun/multiclass/ecoc/ECOCRandomDenseEncoder.cpp +++ b/src/shogun/multiclass/ecoc/ECOCRandomDenseEncoder.cpp @@ -52,15 +52,18 @@ SGMatrix CECOCRandomDenseEncoder::create_codebook(int32_t num_classes) SGMatrix codebook(codelen, num_classes); int32_t n_iter = 0; - while (true) - { - // fill codebook - codebook.zero(); - for (int32_t i=0; i < codelen; ++i) - { - for (int32_t j=0; j < num_classes; ++j) - { - float64_t randval = m_rng->random(0.0, 1.0); + + auto prng = get_prng(); + std::uniform_real_distribution dist(0.0, 1.0); + while (true) + { + // fill codebook + codebook.zero(); + for (int32_t i = 0; i < codelen; ++i) + { + for (int32_t j = 0; j < num_classes; ++j) + { + float64_t randval = dist(prng); if (randval > m_pposone) codebook(i, j) = -1; else diff --git a/src/shogun/multiclass/ecoc/ECOCRandomSparseEncoder.cpp b/src/shogun/multiclass/ecoc/ECOCRandomSparseEncoder.cpp index 9af00bda2e3..6ce812a2645 100644 --- a/src/shogun/multiclass/ecoc/ECOCRandomSparseEncoder.cpp +++ b/src/shogun/multiclass/ecoc/ECOCRandomSparseEncoder.cpp @@ -60,17 +60,19 @@ SGMatrix CECOCRandomSparseEncoder::create_codebook(int32_t num_classes) std::vector random_sel(num_classes); int32_t n_iter = 0; - while (true) - { - // fill codebook - codebook.zero(); - for (int32_t i=0; i < codelen; ++i) - { - // randomly select two positions - for (int32_t j=0; j < num_classes; ++j) - random_sel[j] = j; - std::random_shuffle(random_sel.begin(), random_sel.end()); - if (m_rng->random(0.0, 1.0) > 0.5) + auto prng = get_prng(); + std::uniform_real_distribution dist(0.0, 1.0); + while (true) + { + // fill codebook + codebook.zero(); + for (int32_t i = 0; i < codelen; ++i) + { + // randomly select two positions + for (int32_t j = 0; j < num_classes; ++j) + random_sel[j] = j; + std::random_shuffle(random_sel.begin(), random_sel.end()); + if (dist(prng) > 0.5) { codebook(i, random_sel[0]) = +1; codebook(i, random_sel[1]) = -1; @@ -84,7 +86,7 @@ SGMatrix CECOCRandomSparseEncoder::create_codebook(int32_t num_classes) // assign the remaining positions for (int32_t j = 2; j < num_classes; ++j) { - float64_t randval = m_rng->random(0.0, 1.0); + float64_t randval = dist(prng); if (randval > m_pzero) { if (randval > m_pzero + m_pposone) diff --git a/src/shogun/multiclass/tree/ConditionalProbabilityTree.h b/src/shogun/multiclass/tree/ConditionalProbabilityTree.h index 52cae1b25e6..43e16fadf74 100644 --- a/src/shogun/multiclass/tree/ConditionalProbabilityTree.h +++ b/src/shogun/multiclass/tree/ConditionalProbabilityTree.h @@ -12,6 +12,7 @@ #define CONDITIONALPROBABILITYTREE_H__ #include +#include #include @@ -37,6 +38,7 @@ class CConditionalProbabilityTree: public CTreeMachine(); } /** destructor */ @@ -137,6 +139,7 @@ class CConditionalProbabilityTree: public CTreeMachine m_leaves; ///< class => leaf mapping CStreamingDenseFeatures *m_feats; ///< online features + std::mt19937_64 m_rng; }; } /* shogun */ diff --git a/src/shogun/multiclass/tree/RandomConditionalProbabilityTree.cpp b/src/shogun/multiclass/tree/RandomConditionalProbabilityTree.cpp index ef2bf6106d5..fce81338d41 100644 --- a/src/shogun/multiclass/tree/RandomConditionalProbabilityTree.cpp +++ b/src/shogun/multiclass/tree/RandomConditionalProbabilityTree.cpp @@ -15,7 +15,8 @@ using namespace shogun; bool CRandomConditionalProbabilityTree::which_subtree(bnode_t *node, SGVector ex) { - if (m_rng->random(0.0, 1.0) > 0.5) + std::uniform_real_distribution dist(0.0, 1.0); + if (dist(m_rng) > 0.5) return true; return false; } diff --git a/src/shogun/neuralnets/DeepBeliefNetwork.cpp b/src/shogun/neuralnets/DeepBeliefNetwork.cpp index 2a3605a1a63..8bb527e9376 100644 --- a/src/shogun/neuralnets/DeepBeliefNetwork.cpp +++ b/src/shogun/neuralnets/DeepBeliefNetwork.cpp @@ -76,7 +76,7 @@ void CDeepBeliefNetwork::initialize_neural_network(float64_t sigma) { m_bias_index_offsets = SGVector(m_num_layers); m_weights_index_offsets = SGVector(m_num_layers-1); - + auto prng = get_prng(); m_num_params = 0; for (int32_t i=0; i dist(0, sigma); m_params = SGVector(m_num_params); for (int32_t i=0; inormal_random(0.0, sigma); + m_params[i] = dist(prng); pt_cd_num_steps = SGVector(m_num_layers-1); pt_cd_num_steps.set_const(1); @@ -350,10 +351,12 @@ CDenseFeatures* CDeepBeliefNetwork::sample( void CDeepBeliefNetwork::reset_chain() { + auto prng = get_prng(); + std::uniform_real_distribution dist(0.0, 1.0); SGMatrix s = m_states[m_num_layers-2]; for (int32_t i=0; irandom(0.0, 1.0) > 0.5; + s[i] = dist(prng) > 0.5; } CNeuralNetwork* CDeepBeliefNetwork::convert_to_neural_network( @@ -394,7 +397,7 @@ void CDeepBeliefNetwork::down_step(int32_t index, SGVector< float64_t > params, { typedef Eigen::Map EMatrix; typedef Eigen::Map EVector; - + auto prng = get_prng(); EMatrix In(input.matrix, input.num_rows, input.num_cols); EMatrix Out(result.matrix, result.num_rows, result.num_cols); EVector B(get_biases(index,params).vector, m_layer_sizes->element(index)); @@ -433,9 +436,10 @@ void CDeepBeliefNetwork::down_step(int32_t index, SGVector< float64_t > params, if (sample_states && index>0) { + std::uniform_real_distribution dist(0.0, 1.0); int32_t len = m_layer_sizes->element(index)*m_batch_size; for (int32_t i=0; irandom(0.0, 1.0) < result[i]; + result[i] = dist(prng) < result[i]; } } @@ -444,7 +448,7 @@ void CDeepBeliefNetwork::up_step(int32_t index, SGVector< float64_t > params, { typedef Eigen::Map EMatrix; typedef Eigen::Map EVector; - + auto prng = get_prng(); EMatrix In(input.matrix, input.num_rows, input.num_cols); EMatrix Out(result.matrix, result.num_rows, result.num_cols); EVector C(get_biases(index, params).vector, m_layer_sizes->element(index)); @@ -464,8 +468,9 @@ void CDeepBeliefNetwork::up_step(int32_t index, SGVector< float64_t > params, if (sample_states && index>0) { + std::uniform_real_distribution dist(0.0, 1.0); for (int32_t i=0; irandom(0.0, 1.0) < result[i]; + result[i] = dist(prng) < result[i]; } } diff --git a/src/shogun/neuralnets/NeuralConvolutionalLayer.cpp b/src/shogun/neuralnets/NeuralConvolutionalLayer.cpp index 0dd103eb131..779f2b24da5 100644 --- a/src/shogun/neuralnets/NeuralConvolutionalLayer.cpp +++ b/src/shogun/neuralnets/NeuralConvolutionalLayer.cpp @@ -128,7 +128,7 @@ void CNeuralConvolutionalLayer::initialize_parameters(SGVector parame { int32_t num_parameters_per_map = 1 + m_input_num_channels*(2*m_radius_x+1)*(2*m_radius_y+1); - + auto prng = get_prng(); for (int32_t m=0; m parame { if (m_initialization_mode == NORMAL) { - map_params[i] = m_rng->normal_random(0.0, sigma); + std::normal_distribution dist(0, sigma); + map_params[i] = dist(prng); // turn off regularization for the bias, on for the rest of the parameters map_param_regularizable[i] = (i != 0); } else // for the case when m_initialization_mode = RE_NORMAL { - map_params[i] = m_rng->normal_random( + std::normal_distribution dist( 0.0, CMath::sqrt( 2.0 / (m_input_height * m_input_width * m_input_num_channels))); + map_params[i] = dist(prng); // initialize b=0 map_param_regularizable[i] = 0; } diff --git a/src/shogun/neuralnets/NeuralInputLayer.cpp b/src/shogun/neuralnets/NeuralInputLayer.cpp index 70e77d27999..644e8c5381e 100644 --- a/src/shogun/neuralnets/NeuralInputLayer.cpp +++ b/src/shogun/neuralnets/NeuralInputLayer.cpp @@ -58,6 +58,7 @@ CNeuralInputLayer::CNeuralInputLayer(int32_t width, int32_t height, void CNeuralInputLayer::compute_activations(SGMatrix< float64_t > inputs) { + auto prng = get_prng(); if (m_start_index == 0) { sg_memcpy(m_activations.matrix, inputs.matrix, @@ -71,9 +72,10 @@ void CNeuralInputLayer::compute_activations(SGMatrix< float64_t > inputs) } if (gaussian_noise > 0) { + std::normal_distribution dist(0.0, gaussian_noise); int32_t len = m_num_neurons*m_batch_size; for (int32_t k=0; knormal_random(0.0, gaussian_noise); + m_activations[k] += dist(prng); } } diff --git a/src/shogun/neuralnets/NeuralLayer.cpp b/src/shogun/neuralnets/NeuralLayer.cpp index ea84fa0ab73..690312dfaf1 100644 --- a/src/shogun/neuralnets/NeuralLayer.cpp +++ b/src/shogun/neuralnets/NeuralLayer.cpp @@ -90,13 +90,14 @@ void CNeuralLayer::set_batch_size(int32_t batch_size) void CNeuralLayer::dropout_activations() { if (dropout_prop==0.0) return; - + auto prng = get_prng(); + std::uniform_real_distribution dist(0.0, 1.0); if (is_training) { int32_t len = m_num_neurons*m_batch_size; for (int32_t i=0; irandom(0.0, 1.0) >= dropout_prop; + m_dropout_mask[i] = dist(prng) >= dropout_prop; m_activations[i] *= m_dropout_mask[i]; } } diff --git a/src/shogun/neuralnets/NeuralLinearLayer.cpp b/src/shogun/neuralnets/NeuralLinearLayer.cpp index 4b3fd070e3e..4d41ffaab0c 100644 --- a/src/shogun/neuralnets/NeuralLinearLayer.cpp +++ b/src/shogun/neuralnets/NeuralLinearLayer.cpp @@ -62,10 +62,12 @@ void CNeuralLinearLayer::initialize_parameters(SGVector parameters, SGVector parameter_regularizable, float64_t sigma) { + auto prng = get_prng(); + std::normal_distribution dist(0, sigma); for (int32_t i=0; inormal_random(0.0, sigma); + parameters[i] = dist(prng); // turn regularization off for the biases, on for the weights parameter_regularizable[i] = (i>=m_num_neurons); diff --git a/src/shogun/neuralnets/NeuralNetwork.cpp b/src/shogun/neuralnets/NeuralNetwork.cpp index 3ff8ce7d6d5..0cd9ea8f00b 100644 --- a/src/shogun/neuralnets/NeuralNetwork.cpp +++ b/src/shogun/neuralnets/NeuralNetwork.cpp @@ -31,12 +31,13 @@ * Written (W) 2014 Khaled Nasr */ -#include -#include -#include +#include #include #include +#include #include +#include +#include using namespace shogun; @@ -559,14 +560,15 @@ float64_t CNeuralNetwork::check_gradients(float64_t approx_epsilon, float64_t s) // some random inputs and ouputs SGMatrix x(m_num_inputs,1); SGMatrix y(get_num_outputs(),1); - + auto prng = get_prng(); + std::uniform_real_distribution dist(0.0, 1.0); for (int32_t i=0; irandom(0.0, 1.0); + x[i] = dist(prng); // the outputs are set up in the form of a probability distribution (in case // that is required by the output layer, i.e softmax) for (int32_t i=0; irandom(0.0, 1.0); + y[i] = dist(prng); float64_t y_sum = SGVector::sum(y.matrix, y.num_rows); for (int32_t i=0; i(m_num_params); + std::normal_distribution dist(0, sigma); for (int32_t i=0; inormal_random(0.0, sigma); + m_params[i] = dist(m_rng); } void CRBM::set_batch_size(int32_t batch_size) @@ -264,9 +265,10 @@ CDenseFeatures< float64_t >* CRBM::sample_group_with_evidence(int32_t V, void CRBM::reset_chain() { + std::uniform_real_distribution dist(0.0, 1.0); for (int32_t i=0; irandom(0.0, 1.0) > 0.5; + visible_state(i, j) = dist(m_rng) > 0.5; } float64_t CRBM::free_energy(SGMatrix< float64_t > visible, SGMatrix< float64_t > buffer) @@ -429,9 +431,10 @@ float64_t CRBM::pseudo_likelihood(SGMatrix< float64_t > visible, if (buffer.num_rows==0) buffer = SGMatrix(m_num_hidden, m_batch_size); + std::uniform_int_distribution dist(0, m_num_visible - 1); SGVector indices(m_batch_size); for (int32_t i=0; irandom(0, m_num_visible - 1); + indices[i] = dist(m_rng); float64_t f1 = free_energy(visible, buffer); @@ -517,9 +520,10 @@ void CRBM::mean_visible(SGMatrix< float64_t > hidden, SGMatrix< float64_t > resu void CRBM::sample_hidden(SGMatrix< float64_t > mean, SGMatrix< float64_t > result) { + std::uniform_real_distribution dist(0.0, 1.0); int32_t length = result.num_rows*result.num_cols; for (int32_t i=0; irandom(0.0, 1.0) < mean[i]; + result[i] = dist(m_rng) < mean[i]; } void CRBM::sample_visible(SGMatrix< float64_t > mean, SGMatrix< float64_t > result) @@ -535,12 +539,12 @@ void CRBM::sample_visible(int32_t index, { int32_t offset = m_visible_state_offsets->element(index); + std::uniform_real_distribution dist(0.0, 1.0); if (m_visible_group_types->element(index)==RBMVUT_BINARY) { for (int32_t i=0; ielement(index); i++) for (int32_t j=0; jrandom(0.0, 1.0) < mean(i + offset, j); + result(i + offset, j) = dist(m_rng) < mean(i + offset, j); } if (m_visible_group_types->element(index)==RBMVUT_SOFTMAX) @@ -551,7 +555,7 @@ void CRBM::sample_visible(int32_t index, for (int32_t j=0; jrandom(0.0, 1.0); + int32_t r = dist(m_rng); float64_t sum = 0; for (int32_t i=0; ielement(index); i++) { @@ -619,6 +623,7 @@ void CRBM::init() m_visible_state_offsets = new CDynamicArray(); m_num_params = 0; m_batch_size = 0; + m_rng = get_prng(); SG_ADD(&cd_num_steps, "cd_num_steps", "Number of CD Steps", MS_NOT_AVAILABLE); SG_ADD(&cd_persistent, "cd_persistent", "Whether to use PCD", MS_NOT_AVAILABLE); diff --git a/src/shogun/neuralnets/RBM.h b/src/shogun/neuralnets/RBM.h index 9b7d1116970..521df370723 100644 --- a/src/shogun/neuralnets/RBM.h +++ b/src/shogun/neuralnets/RBM.h @@ -459,6 +459,8 @@ friend class CDeepBeliefNetwork; /** Parameters */ SGVector m_params; + + std::mt19937_64 m_rng; }; } diff --git a/src/shogun/optimization/liblinear/shogun_liblinear.cpp b/src/shogun/optimization/liblinear/shogun_liblinear.cpp index 2d8637ffdc9..106feef3813 100644 --- a/src/shogun/optimization/liblinear/shogun_liblinear.cpp +++ b/src/shogun/optimization/liblinear/shogun_liblinear.cpp @@ -512,13 +512,14 @@ void Solver_MCSVM_CS::solve() } state->inited = true; } - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); while(iter < max_iter && !CSignal::cancel_computations()) { double stopping = -CMath::INFTY; for(i=0;irandom(i, active_size - 1); + std::uniform_int_distribution dist(i, active_size - 1); + int j = dist(prng); CMath::swap(index[i], index[j]); } for(s=0;s dist_p((float64_t)0.0, 2 * pi); + std::uniform_real_distribution dist_q( + (float64_t)-1.0, (float64_t)1.0); for (int32_t i = 0; i < cur_dim_feature_space; ++i) { - randomcoeff_additive[i] = m_rng->random((float64_t)0.0, 2 * pi); + randomcoeff_additive[i] = dist_p(prng); } for (int32_t i = 0; i < cur_dim_feature_space; ++i) { @@ -241,8 +245,8 @@ bool CRandomFourierGaussPreproc::init_randomcoefficients() { float64_t s = 2; while ((s >= 1) ) { // Marsaglia polar for gaussian - x1 = m_rng->random((float64_t)-1.0, (float64_t)1.0); - x2 = m_rng->random((float64_t)-1.0, (float64_t)1.0); + x1 = dist_q(prng); + x2 = dist_q(prng); s=x1*x1+x2*x2; } diff --git a/src/shogun/regression/svr/LibLinearRegression.cpp b/src/shogun/regression/svr/LibLinearRegression.cpp index f559d4ad657..9fdf82e0eee 100644 --- a/src/shogun/regression/svr/LibLinearRegression.cpp +++ b/src/shogun/regression/svr/LibLinearRegression.cpp @@ -208,14 +208,17 @@ void CLibLinearRegression::solve_l2r_l1l2_svr(SGVector& w, const libl } auto pb = progress(range(10)); + while(iter < max_iter) { Gmax_new = 0; Gnorm1_new = 0; + auto prng = get_prng(); for(i=0; irandom(i, active_size - 1); + std::uniform_int_distribution dist(i, active_size - 1); + int j = dist(prng); CMath::swap(index[i], index[j]); } diff --git a/src/shogun/statistical_testing/QuadraticTimeMMD.cpp b/src/shogun/statistical_testing/QuadraticTimeMMD.cpp index 98db8ed1ed2..f675e06b1ac 100644 --- a/src/shogun/statistical_testing/QuadraticTimeMMD.cpp +++ b/src/shogun/statistical_testing/QuadraticTimeMMD.cpp @@ -401,13 +401,15 @@ SGVector CQuadraticTimeMMD::Self::sample_null_spectrum() SGVector null_samples(owner.get_num_null_samples()); + auto prng = get_prng(); + std::normal_distribution dist(0, 1); /* finally, sample from null distribution */ for (auto i=0; istd_normal_distrib(); + float64_t z_j = dist(prng); float64_t multiple=CMath::sq(z_j); /* take largest EV, scale by 1/(m+n) on the fly and take abs value*/ diff --git a/src/shogun/statistical_testing/internals/mmd/CrossValidationMMD.h b/src/shogun/statistical_testing/internals/mmd/CrossValidationMMD.h index 1e9c7e051aa..3686f2f0391 100644 --- a/src/shogun/statistical_testing/internals/mmd/CrossValidationMMD.h +++ b/src/shogun/statistical_testing/internals/mmd/CrossValidationMMD.h @@ -114,11 +114,11 @@ struct CrossValidationMMD : PermutationMMD m_permuted_inds=SGVector(m_xy_inds.size()); m_inverted_permuted_inds.set_const(-1); - auto prng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); for (auto n=0; nadd_subset(m_permuted_inds); SGVector inds=m_stack->get_last_subset()->get_subset_idx(); diff --git a/src/shogun/statistical_testing/internals/mmd/PermutationMMD.h b/src/shogun/statistical_testing/internals/mmd/PermutationMMD.h index ad48ee32f61..9a147da6941 100644 --- a/src/shogun/statistical_testing/internals/mmd/PermutationMMD.h +++ b/src/shogun/statistical_testing/internals/mmd/PermutationMMD.h @@ -200,11 +200,11 @@ struct PermutationMMD : ComputeMMD { ASSERT(m_num_null_samples>0); allocate_permutation_inds(); - auto prng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); for (auto n=0; n &assignment_expect, float64_t &min_energy_expect, int32_t N) { - m_rng->set_seed(17); - + auto prng = get_prng(); + std::uniform_real_distribution dist(0.0, 1.0); // ftype SGVector card(2); card[0] = 2; @@ -133,8 +133,8 @@ CFactorGraph* CFactorGraphDataGenerator::random_chain_graph(SGVector &assig for (int32_t x = 0; x < N; ++x) { SGVector data(2); - data[0] = m_rng->random(0.0, 1.0); - data[1] = m_rng->random(0.0, 1.0); + data[0] = dist(prng); + data[1] = dist(prng); SGVector var_index(1); var_index[0] = y * N + x; @@ -150,10 +150,10 @@ CFactorGraph* CFactorGraphDataGenerator::random_chain_graph(SGVector &assig if (x > 0) { SGVector data(4); - float64_t A = m_rng->random(0.0, 1.0); // E(0,0)->A - float64_t C = m_rng->random(0.0, 1.0); // E(1,0)->C - float64_t B = m_rng->random(0.0, 1.0); // E(0,1)->B - float64_t D = m_rng->random(0.0, 1.0); // E(1,1)->D + float64_t A = dist(prng); // E(0,0)->A + float64_t C = dist(prng); // E(1,0)->C + float64_t B = dist(prng); // E(0,1)->B + float64_t D = dist(prng); // E(1,1)->D // Add truncation to ensure submodularity truncate_energy(A, B, C, D); @@ -173,10 +173,10 @@ CFactorGraph* CFactorGraphDataGenerator::random_chain_graph(SGVector &assig if (x == 0 && y > 0) { SGVector data(4); - float64_t A = m_rng->random(0.0, 1.0); // E(0,0)->A - float64_t C = m_rng->random(0.0, 1.0); // E(1,0)->C - float64_t B = m_rng->random(0.0, 1.0); // E(0,1)->B - float64_t D = m_rng->random(0.0, 1.0); // E(1,1)->D + float64_t A = dist(prng); // E(0,0)->A + float64_t C = dist(prng); // E(1,0)->C + float64_t B = dist(prng); // E(0,1)->B + float64_t D = dist(prng); // E(1,1)->D // Add truncation to ensure submodularity truncate_energy(A, B, C, D); @@ -343,7 +343,7 @@ void CFactorGraphDataGenerator::generate_data(int32_t len_label, int32_t len_fea feats = SGMatrix(len_feat, size_data); labels = SGMatrix(len_label, size_data); - + auto prng = get_prng(); for (int32_t k = 0; k < size_data; k++) { // generate a label vector @@ -355,7 +355,7 @@ void CFactorGraphDataGenerator::generate_data(int32_t len_label, int32_t len_fea // generate feature vector SGVector random_indices(len_feat); random_indices.range_fill(); - CMath::permute(random_indices, m_rng.get()); + CMath::permute(random_indices, prng); SGVector v_feat(len_feat); v_feat.zero(); @@ -494,7 +494,6 @@ float64_t CFactorGraphDataGenerator::test_sosvm(EMAPInferType infer_type) SGMatrix feats_train; // Generate random data - m_rng->set_seed(10); // fix the random seed generate_data(4, 12, 8, feats_train, labels_train); int32_t num_sample_train = labels_train.num_cols; diff --git a/src/shogun/structure/StochasticSOSVM.cpp b/src/shogun/structure/StochasticSOSVM.cpp index d22f3d4388d..7388868f56d 100644 --- a/src/shogun/structure/StochasticSOSVM.cpp +++ b/src/shogun/structure/StochasticSOSVM.cpp @@ -108,12 +108,14 @@ bool CStochasticSOSVM::train_machine(CFeatures* data) // Main loop int32_t k = 0; + auto prng = get_prng(); for (int32_t pi = 0; pi < m_num_iter; ++pi) { for (int32_t si = 0; si < N; ++si) { + std::uniform_int_distribution dist(0, N - 1); // 1) Picking random example - int32_t i = m_rng->random(0, N - 1); + int32_t i = dist(prng); // 2) solve the loss-augmented inference for point i CResultSet* result = m_model->argmax(m_w, i); diff --git a/src/shogun/structure/TwoStateModel.cpp b/src/shogun/structure/TwoStateModel.cpp index 893662d0ac6..60c0e032ae4 100644 --- a/src/shogun/structure/TwoStateModel.cpp +++ b/src/shogun/structure/TwoStateModel.cpp @@ -269,23 +269,20 @@ CHMSVMModel* CTwoStateModel::simulate_data(int32_t num_exm, int32_t exm_len, SGVector< int32_t > ll(num_exm*exm_len); ll.zero(); int32_t rnb, rl, rp; - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::uniform_real_distribution dist(0.0, 1.0); for ( int32_t i = 0 ; i < num_exm ; ++i) { SGVector< int32_t > lab(exm_len); lab.zero(); rnb = num_blocks[0] + - CMath::ceil( - (num_blocks[1] - num_blocks[0]) * m_rng->random(0.0, 1.0)) - - 1; + CMath::ceil((num_blocks[1] - num_blocks[0]) * dist(prng)) - 1; for ( int32_t j = 0 ; j < rnb ; ++j ) { rl = block_len[0] + - CMath::ceil( - (block_len[1] - block_len[0]) * m_rng->random(0.0, 1.0)) - - 1; - rp = CMath::ceil((exm_len - rl) * m_rng->random(0.0, 1.0)); + CMath::ceil((block_len[1] - block_len[0]) * dist(prng)) - 1; + rp = CMath::ceil((exm_len - rl) * dist(prng)); for ( int32_t idx = rp-1 ; idx < rp+rl ; ++idx ) { @@ -309,11 +306,10 @@ CHMSVMModel* CTwoStateModel::simulate_data(int32_t num_exm, int32_t exm_len, SGMatrix< float64_t > signal(num_features, distort.vlen); distort.range_fill(); - auto prng = std::unique_ptr(new CRandom()); for ( int32_t i = 0 ; i < num_features ; ++i ) { lf = ll; - CMath::permute(distort, prng.get()); + CMath::permute(distort, prng); for ( int32_t j = 0 ; j < d1.vlen ; ++j ) d1[j] = distort[j]; @@ -326,8 +322,11 @@ CHMSVMModel* CTwoStateModel::simulate_data(int32_t num_exm, int32_t exm_len, int32_t idx = i*signal.num_cols; for ( int32_t j = 0 ; j < signal.num_cols ; ++j ) - signal[idx++] = - lf[j] + noise_std * m_rng->normal_random((float64_t)0.0, 1.0); + { + std::normal_distribution dist_signal( + (float64_t)0.0, 1.0); + signal[idx++] = lf[j] + noise_std * dist_signal(prng); + } } // Substitute some features by pure noise @@ -335,8 +334,11 @@ CHMSVMModel* CTwoStateModel::simulate_data(int32_t num_exm, int32_t exm_len, { int32_t idx = i*signal.num_cols; for ( int32_t j = 0 ; j < signal.num_cols ; ++j ) - signal[idx++] = - noise_std * m_rng->normal_random((float64_t)0.0, 1.0); + { + std::normal_distribution dist_signal( + (float64_t)0.0, 1.0); + signal[idx++] = lf[j] + noise_std * dist_signal(prng); + } } CMatrixFeatures< float64_t >* features = diff --git a/src/shogun/transfer/multitask/LibLinearMTL.cpp b/src/shogun/transfer/multitask/LibLinearMTL.cpp index d6379799dce..079e9fd30b3 100644 --- a/src/shogun/transfer/multitask/LibLinearMTL.cpp +++ b/src/shogun/transfer/multitask/LibLinearMTL.cpp @@ -256,6 +256,7 @@ void CLibLinearMTL::solve_l2r_l1l2_svc(const liblinear_problem *prob, double eps auto pb = progress(range(10)); CTime start_time; + auto prng = get_prng(); while (iter < max_iterations && !CSignal::cancel_computations()) { if (m_max_train_time > 0 && start_time.cur_time_diff() > m_max_train_time) @@ -266,7 +267,8 @@ void CLibLinearMTL::solve_l2r_l1l2_svc(const liblinear_problem *prob, double eps for (i=0; irandom(i, active_size - 1); + std::uniform_int_distribution dist(i, active_size - 1); + int j = dist(prng); CMath::swap(index[i], index[j]); } diff --git a/tests/unit/base/SGObject_unittest.cc b/tests/unit/base/SGObject_unittest.cc index 57bbae989e2..e511b2906e6 100644 --- a/tests/unit/base/SGObject_unittest.cc +++ b/tests/unit/base/SGObject_unittest.cc @@ -37,10 +37,11 @@ TEST(SGObject,equals_same) TEST(SGObject,equals_NULL_parameter) { - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::normal_distribution dist(0, 1); SGMatrix data(3,10); for (index_t i=0; istd_normal_distrib(); + data.matrix[i] = dist(prng); CDenseFeatures* feats=new CDenseFeatures(data); CGaussianKernel* kernel=new CGaussianKernel(); @@ -152,11 +153,12 @@ TEST(SGObject,equals_complex_equal) SGMatrix X(1, n); SGMatrix X_test(1, n); SGVector Y(n); - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); for (index_t i=0; irandom(0.0, x_range); + std::uniform_real_distribution dist(0.0, x_range); + X[i] = dist(prng); X_test[i]=(float64_t)i / n*x_range; Y[i]=CMath::sin(X[i]); } diff --git a/tests/unit/base/Serialization_unittest.cc b/tests/unit/base/Serialization_unittest.cc index 40014ae54ec..5ddfa2ef758 100644 --- a/tests/unit/base/Serialization_unittest.cc +++ b/tests/unit/base/Serialization_unittest.cc @@ -23,7 +23,8 @@ TEST(Serialization,multiclass_labels) index_t n_class=3; CMulticlassLabels* labels=new CMulticlassLabels(); - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::normal_distribution dist(0, 1); SGVector lab(n); for (index_t i=0; iallocate_confidences_for(n_class); SGVector conf(n_class); for (index_t i=0; istd_normal_distrib(); + conf[i] = dist(prng); - for (index_t i=0; iset_multiclass_confidences(i, conf); /* create serialized copy */ diff --git a/tests/unit/classifier/svm/LibLinear_unittest.cc b/tests/unit/classifier/svm/LibLinear_unittest.cc index 93e63d3d80f..a3632bf8be9 100644 --- a/tests/unit/classifier/svm/LibLinear_unittest.cc +++ b/tests/unit/classifier/svm/LibLinear_unittest.cc @@ -1192,8 +1192,8 @@ TEST(LibLinear,simple_set_train_L1R_L2LOSS_SVC_BIAS) liblin_accuracy = eval->evaluate(pred, ground_truth); for(int i=0;iget_w()[i], t_w[i], 1e-5); - EXPECT_NEAR(ll->get_bias(), t_w[2], 1e-5); + EXPECT_NEAR(ll->get_w()[i], t_w[i], 1e-4); + EXPECT_NEAR(ll->get_bias(), t_w[2], 1e-4); EXPECT_NEAR(liblin_accuracy, 1.0, 1e-5); diff --git a/tests/unit/converter/Isomap_unittest.cc b/tests/unit/converter/Isomap_unittest.cc index 21d387ed4c6..cfe91b0a7e3 100644 --- a/tests/unit/converter/Isomap_unittest.cc +++ b/tests/unit/converter/Isomap_unittest.cc @@ -200,14 +200,15 @@ void check_similarity_of_sets(const std::set& first_set,const std::set< void fill_matrix_with_test_data(SGMatrix& matrix_to_fill) { index_t num_cols = matrix_to_fill.num_cols, num_rows = matrix_to_fill.num_rows; - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::normal_distribution dist(0, 1); for (index_t i = 0; i < num_cols; ++i) { for (index_t j = 0; j < num_rows - 1; ++j) { matrix_to_fill(j, i) = i; } - matrix_to_fill(num_rows - 1, i) = m_rng->std_normal_distrib(); + matrix_to_fill(num_rows - 1, i) = dist(prng); } } diff --git a/tests/unit/distribution/MixtureModel_unittest.cc b/tests/unit/distribution/MixtureModel_unittest.cc index c0ef987ea64..f3f90b0d265 100644 --- a/tests/unit/distribution/MixtureModel_unittest.cc +++ b/tests/unit/distribution/MixtureModel_unittest.cc @@ -40,12 +40,14 @@ using namespace shogun; TEST(MixtureModel,gaussian_mixture_model) { - auto m_rng = std::unique_ptr(new CRandom(2)); + set_global_seed(2); + auto prng = get_prng(); + std::normal_distribution dist(0, 1); SGMatrix data(1,400); for (int32_t i=0;i<100;i++) - data(0, i) = m_rng->std_normal_distrib(); + data(0, i) = dist(prng); for (int32_t i=100;i<400;i++) - data(0, i) = m_rng->std_normal_distrib() + 10; + data(0, i) = dist(prng) + 10; CDenseFeatures* feats=new CDenseFeatures(data); @@ -78,8 +80,8 @@ TEST(MixtureModel,gaussian_mixture_model) SGMatrix cov=outg->get_cov(); float64_t eps=1e-8; - EXPECT_NEAR(m[0],9.863760378,eps); - EXPECT_NEAR(cov(0,0),0.956568199,eps); + EXPECT_NEAR(m[0], 10.0139574310753, eps); + EXPECT_NEAR(cov(0, 0), 0.88920007801, eps); SG_UNREF(outg); SG_UNREF(distr); @@ -89,8 +91,8 @@ TEST(MixtureModel,gaussian_mixture_model) m=outg->get_mean(); cov=outg->get_cov(); - EXPECT_NEAR(m[0],-0.208122793,eps); - EXPECT_NEAR(cov(0,0),1.095106568,eps); + EXPECT_NEAR(m[0], -0.170370848432, eps); + EXPECT_NEAR(cov(0, 0), 1.15629910281, eps); SG_UNREF(outg); SG_UNREF(distr); diff --git a/tests/unit/ensemble/MajorityVote_unittest.cc b/tests/unit/ensemble/MajorityVote_unittest.cc index 4999fcc998c..b34a3614159 100644 --- a/tests/unit/ensemble/MajorityVote_unittest.cc +++ b/tests/unit/ensemble/MajorityVote_unittest.cc @@ -45,10 +45,11 @@ TEST(MajorityVote, binary_combine_vector) expected.zero(); v.zero(); - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::uniform_int_distribution dist(0, 1); for (index_t i = 0; i < num_classifiers; ++i) { - int32_t r = m_rng->random(0, 1); + int32_t r = dist(prng); v[i] = (r == 0) ? -1 : r; if (max < ++expected[r]) @@ -73,12 +74,13 @@ TEST(MajorityVote, multiclass_combine_vector) v.zero(); hist.zero(); - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::uniform_int_distribution dist(0, 2); int64_t max_label = -1; int64_t max = -1; for (index_t i = 0; i < num_classifiers; ++i) { - v[i] = m_rng->random(0, 2); + v[i] = dist(prng); if (max < ++hist[index_t(v[i])]) { max = hist[index_t(v[i])]; diff --git a/tests/unit/ensemble/WeightedMajorityVote_unittest.cc b/tests/unit/ensemble/WeightedMajorityVote_unittest.cc index bcdfcfd8c61..7d8196d3ee1 100644 --- a/tests/unit/ensemble/WeightedMajorityVote_unittest.cc +++ b/tests/unit/ensemble/WeightedMajorityVote_unittest.cc @@ -11,7 +11,8 @@ void generate_random_ensemble_matrix(SGMatrix& em, const SGVector& w) { int32_t num_classes = 3; - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::uniform_int_distribution dist(0, num_classes - 1); for (index_t i = 0; i < em.num_rows; ++i) { SGVector hist(num_classes); @@ -19,7 +20,7 @@ void generate_random_ensemble_matrix(SGMatrix& em, float64_t max = CMath::ALMOST_NEG_INFTY; for (index_t j = 0; j < em.num_cols; ++j) { - int32_t r = m_rng->random(0, num_classes - 1); + int32_t r = dist(prng); em(i,j) = r; hist[r] += w[j]; // if there's a tie mark it the first element will be the winner @@ -70,10 +71,11 @@ TEST(WeightedMajorityVote, binary_combine_vector) expected.zero(); v.zero(); - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::uniform_int_distribution dist(0, 1); for (index_t i = 0; i < num_classifiers; ++i) { - int32_t r = m_rng->random(0, 1); + int32_t r = dist(prng); v[i] = (r == 0) ? -1 : r; expected[r] += weights[i]; @@ -96,7 +98,8 @@ TEST(WeightedMajorityVote, multiclass_combine_vector) SGVector weights(num_classifiers); weights.random(0.5, 2.0); CWeightedMajorityVote* mv = new CWeightedMajorityVote(weights); - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::uniform_int_distribution dist(0, 2); SGVector v(num_classifiers); SGVector hist(3); @@ -107,7 +110,7 @@ TEST(WeightedMajorityVote, multiclass_combine_vector) float64_t max = -1; for (index_t i = 0; i < num_classifiers; ++i) { - v[i] = m_rng->random(0, 2); + v[i] = dist(prng); hist[index_t(v[i])] += weights[i]; if (max < hist[index_t(v[i])]) { diff --git a/tests/unit/evaluation/CrossValidation_multithread_unittest.cc b/tests/unit/evaluation/CrossValidation_multithread_unittest.cc index 1f06d5be6d0..f350b34a673 100644 --- a/tests/unit/evaluation/CrossValidation_multithread_unittest.cc +++ b/tests/unit/evaluation/CrossValidation_multithread_unittest.cc @@ -48,11 +48,11 @@ using namespace shogun; void generate_data(SGMatrix& mat, SGVector &lab) { int32_t num=lab.size(); - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::normal_distribution dist(0, 1); for (index_t i=0; istd_normal_distrib() * 4) - : 100 + (m_rng->std_normal_distrib() * 4); + mat(0, i) = i < num / 2 ? 0 + (dist(prng) * 4) : 100 + (dist(prng) * 4); mat(1,i)=i; } diff --git a/tests/unit/evaluation/SplittingStrategy_unittest.cc b/tests/unit/evaluation/SplittingStrategy_unittest.cc index ae42124ed10..14ad5bfd38d 100644 --- a/tests/unit/evaluation/SplittingStrategy_unittest.cc +++ b/tests/unit/evaluation/SplittingStrategy_unittest.cc @@ -23,20 +23,23 @@ TEST(SplittingStrategy,standard) index_t num_labels; index_t num_subsets; index_t runs=100; - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::uniform_int_distribution dist_nl(10, 150); + std::uniform_int_distribution dist_nc(1, 5); + std::uniform_real_distribution dist_sl(-10.0, 10.0); while (runs-->0) { fold_sizes=0; - num_labels = m_rng->random(10, 150); - num_subsets = m_rng->random(1, 5); + num_labels = dist_nl(prng); + num_subsets = dist_nc(prng); index_t desired_size=CMath::round( (float64_t)num_labels/(float64_t)num_subsets); /* build labels */ CRegressionLabels* labels=new CRegressionLabels(num_labels); for (index_t i=0; iset_label(i, m_rng->random(-10.0, 10.0)); + labels->set_label(i, dist_sl(prng)); /* build splitting strategy */ CCrossValidationSplitting* splitting= @@ -90,19 +93,22 @@ TEST(SplittingStrategy,stratified_subsets_disjoint_cover) { index_t num_labels, num_classes, num_subsets, fold_sizes; index_t runs=50; - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::uniform_int_distribution dist_nl(11, 100); + std::uniform_int_distribution dist_nc(2, 10); + std::uniform_int_distribution dist_ns(1, 10); while (runs-->0) { fold_sizes=0; - num_labels = m_rng->random(11, 100); - num_classes = m_rng->random(2, 10); - num_subsets = m_rng->random(1, 10); + num_labels = dist_nl(prng); + num_classes = dist_nc(prng); + num_subsets = dist_ns(prng); /* build labels */ CMulticlassLabels* labels=new CMulticlassLabels(num_labels); for (index_t i=0; iset_label(i, m_rng->random_64() % num_classes); + labels->set_label(i, prng() % num_classes); SGVector classes=labels->get_unique_labels(); @@ -170,18 +176,21 @@ TEST(SplittingStrategy,stratified_subset_label_ratio) { index_t num_labels, num_classes, num_subsets; index_t runs=50; - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::uniform_int_distribution dist_nl(11, 100); + std::uniform_int_distribution dist_nc(2, 10); + std::uniform_int_distribution dist_ns(1, 10); while (runs-->0) { - num_labels = m_rng->random(11, 100); - num_classes = m_rng->random(2, 10); - num_subsets = m_rng->random(1, 10); + num_labels = dist_nl(prng); + num_classes = dist_nc(prng); + num_subsets = dist_ns(prng); /* build labels */ CMulticlassLabels* labels=new CMulticlassLabels(num_labels); for (index_t i=0; iset_label(i, m_rng->random_64() % num_classes); + labels->set_label(i, prng() % num_classes); /*No. of labels belonging to one class*/ SGVector class_labels(num_classes); @@ -244,17 +253,19 @@ TEST(SplittingStrategy,LOO) { index_t num_labels, fold_sizes; index_t runs=10; - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::uniform_real_distribution dist_nl(10, 50); + std::uniform_int_distribution dist_sl(-10.0, 10.0); while (runs-->0) { fold_sizes=0; - num_labels = m_rng->random(10, 50); + num_labels = dist_nl(prng); /* build labels */ CRegressionLabels* labels=new CRegressionLabels(num_labels); for (index_t i=0; iset_label(i, m_rng->random(-10.0, 10.0)); + labels->set_label(i, dist_sl(prng)); /* build Leave one out splitting strategy */ CLOOCrossValidationSplitting* splitting= diff --git a/tests/unit/features/CombinedFeatures_unittest.cc b/tests/unit/features/CombinedFeatures_unittest.cc index 60b557b6163..34b193f3d6e 100644 --- a/tests/unit/features/CombinedFeatures_unittest.cc +++ b/tests/unit/features/CombinedFeatures_unittest.cc @@ -72,12 +72,13 @@ TEST(CombinedFeaturesTest,create_merged_copy) SGMatrix data_1(dim,n_1); for (index_t i=0; i(new CRandom()); + auto prng = get_prng(); + std::normal_distribution dist(0, 1); // data_1.display_matrix("data_1"); SGMatrix data_2(dim,n_2); for (index_t i=0; istd_normal_distrib(); + data_2.matrix[i] = dist(prng); // data_1.display_matrix("data_2"); diff --git a/tests/unit/features/DenseFeatures_unittest.cc b/tests/unit/features/DenseFeatures_unittest.cc index d16d24e263d..7f5c4f33982 100644 --- a/tests/unit/features/DenseFeatures_unittest.cc +++ b/tests/unit/features/DenseFeatures_unittest.cc @@ -45,13 +45,14 @@ TEST(DenseFeaturesTest,create_merged_copy) SGMatrix data_1(dim,n_1); for (index_t i=0; i(new CRandom()); + auto prng = get_prng(); + std::normal_distribution dist(0, 1); //data_1.display_matrix("data_1"); SGMatrix data_2(dim,n_2); for (index_t i=0; istd_normal_distrib(); + data_2.matrix[i] = dist(prng); //data_2.display_matrix("data_2"); @@ -132,11 +133,12 @@ TEST(DenseFeaturesTest, copy_dimension_subset) data.matrix[i]=i; CDenseFeatures* features=new CDenseFeatures(data); - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::uniform_int_distribution dist(0, dim - 1); SGVector dims(dim/2); for (index_t i=0; irandom(0, dim - 1); + dims[i] = dist(prng); CDenseFeatures* f_reduced=(CDenseFeatures*) features->copy_dimension_subset(dims); @@ -163,17 +165,23 @@ TEST(DenseFeaturesTest, copy_dimension_subset_with_subsets) data.matrix[i]=i; CDenseFeatures* features=new CDenseFeatures(data); - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); SGVector inds(n/2); for (index_t i=0; irandom(0, n - 1); + { + std::uniform_int_distribution dist_inds(0, n - 1); + inds[i] = dist_inds(prng); + } features->add_subset(inds); SGVector dims(dim/2); for (index_t i=0; irandom(0, dim - 1); + { + std::uniform_int_distribution dist_dim(0, dim - 1); + dims[i] = dist_dim(prng); + } CDenseFeatures* f_reduced=(CDenseFeatures*) features->copy_dimension_subset(dims); diff --git a/tests/unit/features/HashedDenseFeatures_unittest.cc b/tests/unit/features/HashedDenseFeatures_unittest.cc index 6397415a144..b581b652db8 100644 --- a/tests/unit/features/HashedDenseFeatures_unittest.cc +++ b/tests/unit/features/HashedDenseFeatures_unittest.cc @@ -320,11 +320,12 @@ TEST(HashedDenseFeaturesTest, dense_comparison) int32_t hashing_dim = 300; CHashedDenseFeatures* h_feats = new CHashedDenseFeatures(data, hashing_dim); CDenseFeatures* d_feats = new CDenseFeatures(data); - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::uniform_int_distribution dist(-hashing_dim, hashing_dim); SGVector dense_vec(hashing_dim); for (index_t i=0; irandom(-hashing_dim, hashing_dim); + dense_vec[i] = dist(prng); for (index_t i=0; idot(i, h_feats, i), d_feats->dot(i, d_feats, i)); diff --git a/tests/unit/features/HashedDocDotFeatures_unittest.cc b/tests/unit/features/HashedDocDotFeatures_unittest.cc index a9ffd88a3cb..961a7ddea57 100644 --- a/tests/unit/features/HashedDocDotFeatures_unittest.cc +++ b/tests/unit/features/HashedDocDotFeatures_unittest.cc @@ -77,7 +77,7 @@ TEST(HashedDocDotFeaturesTest, dense_dot_test) const char* doc_1 = "You're never too old to rock and roll, if you're too young to die"; const char* doc_2 = "Give me some rope, tie me to dream, give me the hope to run out of steam"; const char* doc_3 = "Thank you Jack Daniels, Old Number Seven, Tennessee Whiskey got me drinking in heaven"; - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); SGString string_1(65); for (index_t i=0; i<65; i++) string_1.string[i] = doc_1[i]; @@ -109,10 +109,10 @@ TEST(HashedDocDotFeaturesTest, dense_dot_test) CHashedDocConverter* converter = new CHashedDocConverter(tokenizer, hash_bits, false); CSparseFeatures* converted_docs = (CSparseFeatures* ) converter->apply(doc_collection); - + std::uniform_int_distribution dist(-dimension, dimension); SGVector vec(dimension); for (index_t i=0; irandom(-dimension, dimension); + vec[i] = dist(prng); for (index_t i=0; i<3; i++) { diff --git a/tests/unit/features/StreamingDenseFeatures_unittest.cc b/tests/unit/features/StreamingDenseFeatures_unittest.cc index 6a64b09b7b0..a0eabdd340b 100644 --- a/tests/unit/features/StreamingDenseFeatures_unittest.cc +++ b/tests/unit/features/StreamingDenseFeatures_unittest.cc @@ -25,10 +25,11 @@ TEST(StreamingDenseFeaturesTest, example_reading_from_file) index_t dim=2; char fname[] = "StreamingDenseFeatures_reading.XXXXXX"; generate_temp_filename(fname); - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::normal_distribution dist(0, 1); SGMatrix data(dim,n); for (index_t i=0; istd_normal_distrib(); + data.matrix[i] = dist(prng); CDenseFeatures* orig_feats=new CDenseFeatures(data); CCSVFile* saved_features = new CCSVFile(fname, 'w'); @@ -68,10 +69,11 @@ TEST(StreamingDenseFeaturesTest, example_reading_from_features) { index_t n=20; index_t dim=2; - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::normal_distribution dist(0, 1); SGMatrix data(dim,n); for (index_t i=0; istd_normal_distrib(); + data.matrix[i] = dist(prng); CDenseFeatures* orig_feats=new CDenseFeatures(data); CStreamingDenseFeatures* feats = new CStreamingDenseFeatures(orig_feats); @@ -100,10 +102,11 @@ TEST(StreamingDenseFeaturesTest, reset_stream) { index_t n=20; index_t dim=2; - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::normal_distribution dist(0, 1); SGMatrix data(dim,n); for (index_t i=0; istd_normal_distrib(); + data.matrix[i] = dist(prng); CDenseFeatures* orig_feats=new CDenseFeatures(data); CStreamingDenseFeatures* feats=new CStreamingDenseFeatures(orig_feats); diff --git a/tests/unit/features/StreamingHashedDocDotFeatures_unittest.cc b/tests/unit/features/StreamingHashedDocDotFeatures_unittest.cc index e74a7ab3662..e8ca3629089 100644 --- a/tests/unit/features/StreamingHashedDocDotFeatures_unittest.cc +++ b/tests/unit/features/StreamingHashedDocDotFeatures_unittest.cc @@ -81,7 +81,7 @@ TEST(StreamingHashedDocFeaturesTest, dot_tests) const char* doc_1 = "You're never too old to rock and roll, if you're too young to die"; const char* doc_2 = "Give me some rope, tie me to dream, give me the hope to run out of steam"; const char* doc_3 = "Thank you Jack Daniels, Old Number Seven, Tennessee Whiskey got me drinking in heaven"; - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); SGString string_1(65); for (index_t i=0; i<65; i++) string_1.string[i] = doc_1[i]; @@ -111,8 +111,9 @@ TEST(StreamingHashedDocFeaturesTest, dot_tests) feats->start_parser(); SGVector dense_vec(32); + std::uniform_real_distribution dist(0.0, 1.0); for (index_t j=0; j<32; j++) - dense_vec[j] = m_rng->random(0.0, 1.0); + dense_vec[j] = dist(prng); index_t i = 0; while (feats->get_next_example()) diff --git a/tests/unit/features/StreamingSparseFeatures_unittest.cc b/tests/unit/features/StreamingSparseFeatures_unittest.cc index 34da51f628d..eec8d528ecf 100644 --- a/tests/unit/features/StreamingSparseFeatures_unittest.cc +++ b/tests/unit/features/StreamingSparseFeatures_unittest.cc @@ -27,7 +27,10 @@ TEST(StreamingSparseFeaturesTest, parse_file) int32_t max_num_entries=20; int32_t max_label_value=1; float64_t max_entry_value=1; - CRandom* rand=new CRandom(); + auto prng = get_prng(); + std::uniform_int_distribution dist_p( + -max_label_value, max_label_value); + std::uniform_real_distribution dist_q(0, max_num_entries); int32_t num_vec=10; int32_t num_feat=0; @@ -36,22 +39,21 @@ TEST(StreamingSparseFeaturesTest, parse_file) float64_t* labels=SG_MALLOC(float64_t, num_vec); for (int32_t i=0; i(rand->random(0, max_num_entries)); - labels[i]=(float64_t) rand->random(-max_label_value, max_label_value); - for (int32_t j=0; jnum_feat) - num_feat=feat_index; + data[i] = SGSparseVector(dist_q(prng)); + labels[i] = (float64_t)dist_p(prng); + for (int32_t j = 0; j < data[i].num_feat_entries; j++) + { + int32_t feat_index = (j + 1) * 2; + if (feat_index > num_feat) + num_feat = feat_index; - data[i].features[j].feat_index=feat_index-1; - data[i].features[j].entry=rand->random(0., max_entry_value); - } + data[i].features[j].feat_index = feat_index - 1; + data[i].features[j].entry = dist_q(prng); + } } CLibSVMFile* fout = new CLibSVMFile(fname, 'w', NULL); fout->set_sparse_matrix(data, num_feat, num_vec, labels); SG_UNREF(fout); - SG_FREE(rand); CStreamingAsciiFile *file = new CStreamingAsciiFile(fname); CStreamingSparseFeatures *stream_features = diff --git a/tests/unit/features/StringFeatures_unittest.cc b/tests/unit/features/StringFeatures_unittest.cc index 4ae344af5f2..8bf042e95e1 100644 --- a/tests/unit/features/StringFeatures_unittest.cc +++ b/tests/unit/features/StringFeatures_unittest.cc @@ -17,19 +17,22 @@ using namespace shogun; SGStringList generateRandomData(index_t num_strings=10, index_t max_string_length=20, index_t min_string_length=10) { SGStringList strings(num_strings, max_string_length); - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::uniform_int_distribution dist_len( + min_string_length, max_string_length); + std::uniform_int_distribution dist_str('A', 'Z'); //SG_SPRINT("original string data:\n"); for (index_t i=0; irandom(min_string_length, max_string_length); + index_t len = dist_len(prng); SGString current(len); //SG_SPRINT("[%i]: \"", i); /* fill with random uppercase letters (ASCII) */ for (index_t j=0; jrandom('A', 'Z'); + current.string[j] = (char)dist_str(prng); /* attach \0 to print letter */ char* string=SG_MALLOC(char, 2); diff --git a/tests/unit/io/CSVFile_unittest.cc b/tests/unit/io/CSVFile_unittest.cc index 43721520ef3..2caf5091d0b 100644 --- a/tests/unit/io/CSVFile_unittest.cc +++ b/tests/unit/io/CSVFile_unittest.cc @@ -1,8 +1,8 @@ +#include #include -#include -#include #include -#include +#include +#include #include #include @@ -13,12 +13,13 @@ using namespace shogun; TEST(CSVFileTest, vector_int32) { - CRandom* rand=new CRandom(); + auto prng = get_prng(); int32_t len=512*512; + std::uniform_int_distribution dist(0, len); SGVector data(len); for (int32_t i=0; irandom(0, len); + data[i] = (int32_t)dist(prng); CCSVFile* fin; CCSVFile* fout; @@ -39,18 +40,18 @@ TEST(CSVFileTest, vector_int32) EXPECT_EQ(data_from_file[i], data[i]); } SG_UNREF(fin); - SG_FREE(rand); unlink("CSVFileTest_vector_int32_output.txt"); } TEST(CSVFileTest, vector_float64) { - CRandom* rand=new CRandom(); + auto prng = get_prng(); + std::uniform_real_distribution dist(0.0, 1.0); int32_t len=128*128; SGVector data(len); for (int32_t i=0; irandom(0., 1.); + data[i] = (float64_t)dist(prng); CCSVFile* fin; CCSVFile* fout; @@ -71,21 +72,21 @@ TEST(CSVFileTest, vector_float64) EXPECT_NEAR(data_from_file[i], data[i], 1E-14); } SG_UNREF(fin); - SG_FREE(rand); unlink("CSVFileTest_vector_float64_output.txt"); } TEST(CSVFileTest, matrix_int32) { - CRandom* rand=new CRandom(); + auto prng = get_prng(); - int32_t num_rows=512; - int32_t num_cols=512; - SGMatrix data(num_rows, num_cols); - for (int32_t i=0; i data(num_rows, num_cols); + std::uniform_int_distribution dist(0, num_rows); + for (index_t i = 0; i < num_rows; i++) { - for (int32_t j=0; jrandom(0, num_rows); + for (index_t j = 0; j < num_cols; j++) + data(i, j) = (index_t)dist(prng); } CCSVFile* fin; @@ -110,13 +111,13 @@ TEST(CSVFileTest, matrix_int32) } SG_UNREF(fin); - SG_FREE(rand); unlink("CSVFileTest_matrix_int32_output.txt"); } TEST(CSVFileTest, matrix_float64) { - CRandom* rand=new CRandom(); + auto prng = get_prng(); + std::uniform_real_distribution dist(0.0, 1.0); int32_t num_rows=128; int32_t num_cols=128; @@ -124,7 +125,7 @@ TEST(CSVFileTest, matrix_float64) for (int32_t i=0; irandom(0., 1.); + data(i, j) = (float64_t)dist(prng); } CCSVFile* fin; @@ -149,7 +150,6 @@ TEST(CSVFileTest, matrix_float64) } SG_UNREF(fin); - SG_FREE(rand); unlink("CSVFileTest_matrix_float64_output.txt"); } diff --git a/tests/unit/io/LibSVMFile_unittest.cc b/tests/unit/io/LibSVMFile_unittest.cc index fddd21b24ce..82d4e539e2c 100644 --- a/tests/unit/io/LibSVMFile_unittest.cc +++ b/tests/unit/io/LibSVMFile_unittest.cc @@ -1,6 +1,6 @@ +#include #include #include -#include #include @@ -13,7 +13,11 @@ TEST(LibSVMFileTest, sparse_matrix_int32) int32_t max_num_entries = 512; int32_t max_label_value = 1; int32_t max_entry_value = 1024; - CRandom * rand = new CRandom(); + auto prng = get_prng(); + std::uniform_int_distribution dist_d(0, max_num_entries); + std::uniform_int_distribution dist_l( + -max_label_value, max_label_value); + std::uniform_int_distribution dist_e(0, max_entry_value); int32_t num_vec = 10; int32_t num_feat = 0; @@ -32,11 +36,11 @@ TEST(LibSVMFileTest, sparse_matrix_int32) for (int32_t i = 0; i < num_vec; i++) { - data[i] = SGSparseVector(rand->random(0, max_num_entries)); + data[i] = SGSparseVector(dist_d(prng)); if (i > 2) { labels[i] = SGVector(1); - labels[i][0] = rand->random(-max_label_value, max_label_value); + labels[i][0] = dist_l(prng); } for (int32_t j = 0; j < data[i].num_feat_entries; j++) { @@ -47,7 +51,7 @@ TEST(LibSVMFileTest, sparse_matrix_int32) } data[i].features[j].feat_index = feat_index - 1; - data[i].features[j].entry = rand->random(0, max_entry_value); + data[i].features[j].entry = dist_e(prng); } } @@ -85,7 +89,6 @@ TEST(LibSVMFileTest, sparse_matrix_int32) } SG_UNREF(fin); - SG_FREE(rand); SG_FREE(data); SG_FREE(labels); SG_FREE(data_from_file); @@ -98,7 +101,11 @@ TEST(LibSVMFileTest, sparse_matrix_float64) { int32_t max_num_entries = 512; int32_t max_label_value = 1; - CRandom * rand = new CRandom(); + auto prng = get_prng(); + std::uniform_int_distribution dist_d(0, max_num_entries); + std::uniform_int_distribution dist_l( + -max_label_value, max_label_value); + std::uniform_real_distribution dist_e(0.0, 1.0); int32_t num_vec = 1024; int32_t num_feat = 0; @@ -117,11 +124,11 @@ TEST(LibSVMFileTest, sparse_matrix_float64) for (int32_t i = 0; i < num_vec; i++) { - data[i] = SGSparseVector(rand->random(0, max_num_entries)); + data[i] = SGSparseVector(dist_d(prng)); if (i > 2) { labels[i] = SGVector(1); - labels[i][0] = rand->random(-max_label_value, max_label_value); + labels[i][0] = dist_l(prng); } for (int32_t j = 0; j < data[i].num_feat_entries; j++) @@ -133,7 +140,7 @@ TEST(LibSVMFileTest, sparse_matrix_float64) } data[i].features[j].feat_index = feat_index - 1; - data[i].features[j].entry = rand->random(0., 1.); + data[i].features[j].entry = dist_e(prng); } } @@ -171,7 +178,6 @@ TEST(LibSVMFileTest, sparse_matrix_float64) } SG_UNREF(fin); - SG_FREE(rand); SG_FREE(data); SG_FREE(labels); SG_FREE(data_from_file); diff --git a/tests/unit/io/ProtobufFile_unittest.cc b/tests/unit/io/ProtobufFile_unittest.cc index 6f720683191..d582699e48d 100644 --- a/tests/unit/io/ProtobufFile_unittest.cc +++ b/tests/unit/io/ProtobufFile_unittest.cc @@ -1,8 +1,8 @@ -#include +#include #include #include #include -#include +#include #include @@ -18,12 +18,13 @@ using namespace shogun; TEST(ProtobufFileTest, vector_int32) { - CRandom* rand=new CRandom(); + auto prng = get_prng(); int32_t len=1024*1024; + std::uniform_int_distribution dist(0, len); SGVector data(len); for (int32_t i=0; irandom(0, len); + data[i] = (int32_t)dist(prng); CProtobufFile* fin; CProtobufFile* fout; @@ -42,18 +43,18 @@ TEST(ProtobufFileTest, vector_int32) EXPECT_EQ(data_from_file[i], data[i]); } SG_UNREF(fin); - SG_FREE(rand); unlink("ProtobufFileTest_vector_int32_output.txt"); } TEST(ProtobufFileTest, vector_float64) { - CRandom* rand=new CRandom(); + auto prng = get_prng(); + std::uniform_real_distribution dist(0.0, 1.0); int32_t len=1024*1024; SGVector data(len); for (int32_t i=0; irandom(0, 1); + data[i] = (float64_t)dist(prng); CProtobufFile* fin; CProtobufFile* fout; @@ -72,21 +73,21 @@ TEST(ProtobufFileTest, vector_float64) EXPECT_NEAR(data_from_file[i], data[i], 1E-14); } SG_UNREF(fin); - SG_FREE(rand); unlink("ProtobufFileTest_vector_float64_output.txt"); } TEST(ProtobufFileTest, matrix_int32) { - CRandom* rand=new CRandom(); + auto prng = get_prng(); int32_t num_rows=1024; int32_t num_cols=512; + std::uniform_int_distribution dist(0, num_rows); SGMatrix data(num_rows, num_cols); for (int32_t i=0; irandom(0, num_rows); + data(i, j) = (int32_t)dist(prng); } CProtobufFile* fin; @@ -109,13 +110,13 @@ TEST(ProtobufFileTest, matrix_int32) } SG_UNREF(fin); - SG_FREE(rand); unlink("ProtobufFileTest_matrix_int32_output.txt"); } TEST(ProtobufFileTest, matrix_float64) { - CRandom* rand=new CRandom(); + auto prng = get_prng(); + std::uniform_real_distribution dist(0.0, 1.0); int32_t num_rows=1024; int32_t num_cols=512; @@ -123,7 +124,7 @@ TEST(ProtobufFileTest, matrix_float64) for (int32_t i=0; irandom(0, 1); + data(i, j) = (float64_t)dist(prng); } CProtobufFile* fin; @@ -146,7 +147,6 @@ TEST(ProtobufFileTest, matrix_float64) } SG_UNREF(fin); - SG_FREE(rand); unlink("ProtobufFileTest_matrix_float64_output.txt"); } @@ -154,7 +154,9 @@ TEST(ProtobufFileTest, sparse_matrix_int32) { int32_t max_num_entries=512; int32_t max_entry_value=1024; - CRandom* rand=new CRandom(); + auto prng = get_prng(); + std::uniform_int_distribution dist_p(0, max_num_entries); + std::uniform_int_distribution dist_q(0, max_entry_value); int32_t num_vec=1024; int32_t num_feat=0; @@ -162,7 +164,7 @@ TEST(ProtobufFileTest, sparse_matrix_int32) SGSparseVector* data=SG_MALLOC(SGSparseVector, num_vec); for (int32_t i=0; i(rand->random(0, max_num_entries)); + data[i] = SGSparseVector(dist_p(prng)); for (int32_t j=0; jrandom(0, max_entry_value); + data[i].features[j].entry = dist_q(prng); } } @@ -200,7 +202,6 @@ TEST(ProtobufFileTest, sparse_matrix_int32) } SG_UNREF(fin); - SG_FREE(rand); SG_FREE(data); SG_FREE(data_from_file); @@ -211,7 +212,9 @@ TEST(ProtobufFileTest, sparse_matrix_int32) TEST(ProtobufFileTest, sparse_matrix_float64) { int32_t max_num_entries=512; - CRandom* rand=new CRandom(); + auto prng = get_prng(); + std::uniform_int_distribution dist_p(0, max_num_entries); + std::uniform_real_distribution dist_q(0.0, 1.0); int32_t num_vec=1024; int32_t num_feat=0; @@ -219,7 +222,7 @@ TEST(ProtobufFileTest, sparse_matrix_float64) SGSparseVector* data=SG_MALLOC(SGSparseVector, num_vec); for (int32_t i=0; i(rand->random(0, max_num_entries)); + data[i] = SGSparseVector(dist_p(prng)); for (int32_t j=0; jrandom(0., 1.); + data[i].features[j].entry = dist_q(prng); } } @@ -257,7 +260,6 @@ TEST(ProtobufFileTest, sparse_matrix_float64) } SG_UNREF(fin); - SG_FREE(rand); SG_FREE(data); SG_FREE(data_from_file); @@ -267,16 +269,18 @@ TEST(ProtobufFileTest, sparse_matrix_float64) TEST(ProtobufFileTest, DISABLED_string_list_char) { - CRandom* rand=new CRandom(); + auto prng = get_prng(); + std::uniform_real_distribution dist_q(0, 255); int32_t num_str=1024; int32_t max_string_len=1024; + std::uniform_int_distribution dist_p(0, max_string_len); SGString* strings=SG_MALLOC(SGString, num_str); for (int32_t i=0; i((int32_t) rand->random(1, max_string_len)); + strings[i] = SGString((int32_t)dist_p(prng)); for (int32_t j=0; jrandom(0, 255); + strings[i].string[j] = (char)dist_q(prng); } CProtobufFile* fin; @@ -300,7 +304,6 @@ TEST(ProtobufFileTest, DISABLED_string_list_char) } SG_UNREF(fin); - SG_FREE(rand); SG_FREE(strings); SG_FREE(data_from_file); diff --git a/tests/unit/kernel/CustomKernel_unittest.cc b/tests/unit/kernel/CustomKernel_unittest.cc index 545585e3874..5f39be1c338 100644 --- a/tests/unit/kernel/CustomKernel_unittest.cc +++ b/tests/unit/kernel/CustomKernel_unittest.cc @@ -14,7 +14,6 @@ #include #include #include -#include #include using namespace shogun; @@ -35,10 +34,10 @@ TEST(CustomKernelTest,add_row_subset) inds.range_fill(); index_t num_runs=10; - auto prng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); for (index_t i=0; iadd_subset(inds); custom->add_row_subset(inds); diff --git a/tests/unit/kernel/Kernel_unittest.cc b/tests/unit/kernel/Kernel_unittest.cc index 2e0b9e299d3..8e16ae6a4aa 100644 --- a/tests/unit/kernel/Kernel_unittest.cc +++ b/tests/unit/kernel/Kernel_unittest.cc @@ -41,11 +41,12 @@ static SGMatrix generate_std_norm_matrix(const index_t num_feats, const index_t dim) { SGMatrix data(dim, num_feats); - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::normal_distribution dist(0, 1); for (index_t i=0; istd_normal_distrib(); + data(j, i) = dist(prng); } return data; } diff --git a/tests/unit/kernel/SubsequenceStringKernel_unittest.cc b/tests/unit/kernel/SubsequenceStringKernel_unittest.cc index 19caffbd768..c25f1e1b0ee 100644 --- a/tests/unit/kernel/SubsequenceStringKernel_unittest.cc +++ b/tests/unit/kernel/SubsequenceStringKernel_unittest.cc @@ -61,19 +61,23 @@ TEST(SubsequenceStringKernel, psd_random_feat) const index_t min_len=max_len/2; SGStringList list(num_strings, max_len); - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::uniform_int_distribution dist_cl(min_len, max_len); + std::uniform_int_distribution dist_str('A', 'Z'); + std::uniform_int_distribution dist_sl(1, min_len); + std::uniform_real_distribution dist_ld(0.0, 1.0); for (index_t i=0; irandom(min_len, max_len); + index_t cur_len = dist_cl(prng); SGString str(cur_len); for (index_t l=0; lrandom('A', 'Z')); + str.string[l] = char(dist_str(prng)); list.strings[i]=str; } CStringFeatures* s_feats=new CStringFeatures(list, ALPHANUM); - int32_t s_len = m_rng->random(1, min_len); - float64_t lambda = m_rng->random(0.0, 1.0); + int32_t s_len = dist_sl(prng); + float64_t lambda = dist_ld(prng); CSubsequenceStringKernel* kernel=new CSubsequenceStringKernel(s_feats, s_feats, s_len, lambda); SGMatrix kernel_matrix=kernel->get_kernel_matrix(); diff --git a/tests/unit/lib/DynamicArray_unittest.cc b/tests/unit/lib/DynamicArray_unittest.cc index a5e0305f020..cc40052e001 100644 --- a/tests/unit/lib/DynamicArray_unittest.cc +++ b/tests/unit/lib/DynamicArray_unittest.cc @@ -62,10 +62,11 @@ TYPED_TEST(CDynamicArrayFixture, set_array) this->wrapper_array->reset_array(); EXPECT_EQ(this->wrapper_array->get_num_elements(), 0); TypeParam* array = SG_MALLOC(TypeParam, 5); - auto prng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::uniform_int_distribution dist(1, 10); for (int32_t i = 0; i < 5; i++) { - array[i] = (TypeParam)prng->random(1, 10); + array[i] = (TypeParam)dist(prng); } this->wrapper_array->set_array(array, 5); @@ -80,10 +81,11 @@ TYPED_TEST(CDynamicArrayFixture, set_array) TYPED_TEST(CDynamicArrayFixture, const_set_array) { TypeParam* array = SG_MALLOC(TypeParam, 5); - auto prng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::uniform_int_distribution dist(1, 10); for (int32_t i = 0; i < 5; i++) { - array[i] = (TypeParam)prng->random(1, 10); + array[i] = (TypeParam)dist(prng); } const TypeParam* const_array = array; this->wrapper_array->reset_array(); diff --git a/tests/unit/lib/Memory_unittest.cc b/tests/unit/lib/Memory_unittest.cc index f63610daba4..b90ad15dfbf 100644 --- a/tests/unit/lib/Memory_unittest.cc +++ b/tests/unit/lib/Memory_unittest.cc @@ -66,9 +66,10 @@ TEST(MemoryTest, sg_memcpy) { const index_t size = 10; auto src = SG_CALLOC(float64_t, size); - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::normal_distribution dist(0, 1); for (index_t i=0; istd_normal_distrib(); + src[i] = dist(prng); auto dest = SG_CALLOC(float64_t, size); diff --git a/tests/unit/lib/SGMatrix_unittest.cc b/tests/unit/lib/SGMatrix_unittest.cc index ae5bb677338..813b5e16fd2 100644 --- a/tests/unit/lib/SGMatrix_unittest.cc +++ b/tests/unit/lib/SGMatrix_unittest.cc @@ -242,14 +242,15 @@ TEST(SGMatrixTest,is_symmetric_float32_false_old_plus_eps) { const index_t size=2; SGMatrix mat(size, size); - auto m_rng = std::unique_ptr(new CRandom(100)); + auto prng = get_prng(); + std::normal_distribution dist(0, 1); // create a symmetric matrix for (index_t i=0; irandn_float(); + mat(i, j) = dist(prng); mat(j, i)=mat(i, j); } } @@ -279,14 +280,15 @@ TEST(SGMatrixTest,is_symmetric_float32_false_old_minus_eps) { const index_t size=2; SGMatrix mat(size, size); - auto m_rng = std::unique_ptr(new CRandom(100)); + auto prng = get_prng(); + std::normal_distribution dist(0, 1); // create a symmetric matrix for (index_t i=0; irandn_float(); + mat(i, j) = dist(prng); mat(j, i)=mat(i, j); } } @@ -316,12 +318,13 @@ TEST(SGMatrixTest,is_symmetric_float32_true) { const index_t size=2; SGMatrix mat(size, size); - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::normal_distribution dist(0, 1); for (index_t i=0; irandn_float(); + mat(i, j) = dist(prng); mat(j, i)=mat(i, j); } } @@ -332,15 +335,16 @@ TEST(SGMatrixTest,is_symmetric_float64_false_old_plus_eps) { const index_t size=2; SGMatrix mat(size, size); - auto m_rng = std::unique_ptr(new CRandom(100)); + auto prng = get_prng(); + std::normal_distribution dist(0, 1); // create a symmetric matrix for (index_t i=0; istd_normal_distrib(); - mat(j, i)=mat(i, j); + mat(i, j) = dist(prng); + mat(j, i) = mat(i, j); } } @@ -369,15 +373,16 @@ TEST(SGMatrixTest,is_symmetric_float64_false_old_minus_eps) { const index_t size=2; SGMatrix mat(size, size); - auto m_rng = std::unique_ptr(new CRandom(100)); + auto prng = get_prng(); + std::normal_distribution dist(0, 1); // create a symmetric matrix for (index_t i=0; istd_normal_distrib(); - mat(j, i)=mat(i, j); + mat(i, j) = dist(prng); + mat(j, i) = mat(i, j); } } @@ -406,12 +411,13 @@ TEST(SGMatrixTest,is_symmetric_float64_true) { const index_t size=2; SGMatrix mat(size, size); - auto m_rng = std::unique_ptr(new CRandom(100)); + auto prng = get_prng(); + std::normal_distribution dist(0, 1); for (index_t i=0; istd_normal_distrib(); + mat(i, j) = dist(prng); mat(j, i)=mat(i, j); } } @@ -422,15 +428,15 @@ TEST(SGMatrixTest,is_symmetric_complex128_false_old_plus_eps) { const index_t size=2; SGMatrix mat(size, size); - auto m_rng = std::unique_ptr(new CRandom(100)); + auto prng = get_prng(); + std::normal_distribution dist(0, 1); // create a symmetric matrix for (index_t i=0; istd_normal_distrib(), m_rng->std_normal_distrib()); + mat(i, j) = complex128_t(dist(prng), dist(prng)); mat(j, i)=mat(i, j); } } @@ -468,15 +474,15 @@ TEST(SGMatrixTest,is_symmetric_complex128_false_old_minus_eps) { const index_t size=2; SGMatrix mat(size, size); - auto m_rng = std::unique_ptr(new CRandom(100)); + auto prng = get_prng(); + std::normal_distribution dist(0, 1); // create a symmetric matrix for (index_t i=0; istd_normal_distrib(), m_rng->std_normal_distrib()); + mat(i, j) = complex128_t(dist(prng), dist(prng)); mat(j, i)=mat(i, j); } } @@ -514,13 +520,13 @@ TEST(SGMatrixTest,is_symmetric_complex128_true) { const index_t size=2; SGMatrix mat(size, size); - auto m_rng = std::unique_ptr(new CRandom(100)); + auto prng = get_prng(); + std::normal_distribution dist(0, 1); for (index_t i=0; istd_normal_distrib(), m_rng->std_normal_distrib()); + mat(i, j) = complex128_t(dist(prng), dist(prng)); mat(j, i)=mat(i, j); } } @@ -564,21 +570,22 @@ TEST(SGMatrixTest, equals) EXPECT_TRUE(mat.equals(mat)); EXPECT_TRUE(mat.equals(copy)); - auto m_rng = std::unique_ptr(new CRandom(100)); + auto prng = get_prng(); + std::normal_distribution dist(0, 1); mat=SGMatrix(size, size); for (uint64_t i=0; irandn_float(); + mat.matrix[i] = dist(prng); EXPECT_TRUE(mat.equals(mat)); EXPECT_FALSE(mat.equals(copy)); copy=SGMatrix(size, size); EXPECT_FALSE(mat.equals(copy)); - m_rng->set_seed(100); + auto prng_copy = get_prng(); for (uint64_t i=0; irandn_float(); + copy.matrix[i] = dist(prng_copy); EXPECT_TRUE(mat.equals(copy)); } @@ -587,9 +594,10 @@ TEST(SGMatrixTest, clone) { const index_t size=10; SGMatrix mat(size, size); - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::normal_distribution dist(0, 1); for (uint64_t i=0; irandn_float(); + mat.matrix[i] = dist(prng); SGMatrix copy=mat.clone(); @@ -600,8 +608,9 @@ TEST(SGMatrixTest, set_const) { const index_t size=10; SGMatrix mat(size, size); - auto m_rng = std::unique_ptr(new CRandom()); - const auto value = m_rng->std_normal_distrib(); + auto prng = get_prng(); + std::normal_distribution dist(0, 1); + const auto value = dist(prng); mat.set_const(value); for (uint64_t i=0; i mat(size, size); - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::normal_distribution dist(0, 1); for (uint64_t i=0; irandn_float(); + mat.matrix[i] = dist(prng); auto max=mat.max_single(); for (uint64_t i=0; i mat(n_rows, n_cols); - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::normal_distribution dist(0, 1); for (index_t i = 0; i < n_rows * n_cols; ++i) - mat[i] = m_rng->std_normal_distrib(); + mat[i] = dist(prng); auto vec = mat.get_column_vector(col); @@ -644,9 +655,10 @@ TEST(SGMatrixTest, set_column) SGMatrix mat(n_rows, n_cols); SGVector vec(n_rows); - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::normal_distribution dist(0, 1); for (index_t i = 0; i < n_rows; ++i) - vec[i] = m_rng->std_normal_distrib(); + vec[i] = dist(prng); mat.set_column(col, vec); diff --git a/tests/unit/lib/SGSparseMatrix_unittest.cc b/tests/unit/lib/SGSparseMatrix_unittest.cc index 444de9d5cd5..2859b14bb4d 100644 --- a/tests/unit/lib/SGSparseMatrix_unittest.cc +++ b/tests/unit/lib/SGSparseMatrix_unittest.cc @@ -10,13 +10,13 @@ #include -#include -#include -#include -#include +#include #include #include -#include +#include +#include +#include +#include using namespace shogun; @@ -27,11 +27,12 @@ using namespace Eigen; template void GenerateMatrix(float64_t sparseLevel, int32_t m, int32_t n, int32_t randSeed, MatrixType* matrix) { - CRandom randGenerator(randSeed); + auto prng = get_prng(); + std::uniform_real_distribution dist(0.0, 1.0); for (index_t i=0; i(new CRandom(100)); + auto prng = get_prng(); + std::normal_distribution dist(0, 1); SGMatrix mat(n_rows, n_cols); for (index_t i = 0; i < mat.size(); ++i) - mat[i] = m_rng->std_normal_distrib(); + mat[i] = dist(prng); auto vec = SGVector(mat); diff --git a/tests/unit/machine/StochasticGBMachine_unittest.cc b/tests/unit/machine/StochasticGBMachine_unittest.cc index 86c36e4b388..8df4f5375fd 100644 --- a/tests/unit/machine/StochasticGBMachine_unittest.cc +++ b/tests/unit/machine/StochasticGBMachine_unittest.cc @@ -101,16 +101,16 @@ TEST(StochasticGBMachine,sinusoid_curve_fitting) SGVector ret=ret_labels->get_labels(); float64_t epsilon=1e-8; - EXPECT_NEAR(ret[0],-0.943157980,epsilon); - EXPECT_NEAR(ret[1],0.769725470,epsilon); - EXPECT_NEAR(ret[2],-0.065691733,epsilon); - EXPECT_NEAR(ret[3],0.251266829,epsilon); - EXPECT_NEAR(ret[4],-0.577155330,epsilon); - EXPECT_NEAR(ret[5],0.113875818,epsilon); - EXPECT_NEAR(ret[6],0.427405429,epsilon); - EXPECT_NEAR(ret[7],-0.098310066,epsilon); - EXPECT_NEAR(ret[8],-0.416565932,epsilon); - EXPECT_NEAR(ret[9],0.542023083,epsilon); + EXPECT_NEAR(ret[0], -0.91580992928965543, epsilon); + EXPECT_NEAR(ret[1], 0.83302568373135366, epsilon); + EXPECT_NEAR(ret[2], 0.42519621523857321, epsilon); + EXPECT_NEAR(ret[3], -0.54396234032218127, epsilon); + EXPECT_NEAR(ret[4], -0.54396234032218127, epsilon); + EXPECT_NEAR(ret[5], 0.64891735887560409, epsilon); + EXPECT_NEAR(ret[6], 0.8330256837313536, epsilon); + EXPECT_NEAR(ret[7], -0.76318443378750656, epsilon); + EXPECT_NEAR(ret[8], -0.52743316035159316, epsilon); + EXPECT_NEAR(ret[9], 0.13643452136869369, epsilon); SG_UNREF(train_feats); SG_UNREF(test_feats); diff --git a/tests/unit/machine/kerneldensity_unittest.cc b/tests/unit/machine/kerneldensity_unittest.cc index a7fdbbb9f30..c5bae890969 100644 --- a/tests/unit/machine/kerneldensity_unittest.cc +++ b/tests/unit/machine/kerneldensity_unittest.cc @@ -169,14 +169,23 @@ TEST(KernelDensity,dual_tree) TEST(KernelDensity,dual_tree_single_tree_equivalence) { - auto m_rng = std::unique_ptr(new CRandom(1)); + auto prng = get_prng(); + std::uniform_real_distribution dist(0.0, 1.0); SGMatrix data(5,100); - m_rng->fill_array_oo(data.matrix, 500); + for (index_t i = 0; i < 5; ++i) + for (index_t j = 0; j < 5; ++j) + { + data[i, j] = dist(prng); + } CDenseFeatures* feats=new CDenseFeatures(data); SGMatrix test(5,20); - m_rng->fill_array_oo(test.matrix, 100); + for (index_t i = 0; i < 5; ++i) + for (index_t j = 0; j < 20; ++j) + { + data[i, j] = dist(prng); + } CDenseFeatures* testfeats=new CDenseFeatures(test); diff --git a/tests/unit/mathematics/Math_unittest.cc b/tests/unit/mathematics/Math_unittest.cc index c37d325ff0d..25f31b4db3d 100644 --- a/tests/unit/mathematics/Math_unittest.cc +++ b/tests/unit/mathematics/Math_unittest.cc @@ -388,25 +388,26 @@ TEST(CMath, permute) { SGVector v(4); v.range_fill(0); - auto random = std::unique_ptr(new CRandom(2)); - CMath::permute(v, random.get()); - EXPECT_EQ(v[0], 2); - EXPECT_EQ(v[1], 1); + set_global_seed(2); + CMath::permute(v); + EXPECT_EQ(v[0], 0); + EXPECT_EQ(v[1], 2); EXPECT_EQ(v[2], 3); - EXPECT_EQ(v[3], 0); + EXPECT_EQ(v[3], 1); } TEST(CMath, permute_with_random) { SGVector v(4); v.range_fill(0); - auto random = std::unique_ptr(new CRandom(2)); - CMath::permute(v, random.get()); + set_global_seed(2); + auto prng = get_prng(); + CMath::permute(v, prng); - EXPECT_EQ(v[0], 2); - EXPECT_EQ(v[1], 1); + EXPECT_EQ(v[0], 0); + EXPECT_EQ(v[1], 2); EXPECT_EQ(v[2], 3); - EXPECT_EQ(v[3], 0); + EXPECT_EQ(v[3], 1); } TEST(CMath,misc) diff --git a/tests/unit/mathematics/Random_unittest.cc b/tests/unit/mathematics/Random_unittest.cc deleted file mode 100644 index c69b638d6f9..00000000000 --- a/tests/unit/mathematics/Random_unittest.cc +++ /dev/null @@ -1,358 +0,0 @@ -#include -#include -#include -#include -#include -#include -#include -#include - -using namespace shogun; - -const uint32_t n_runs=1200000; -const uint32_t array_len=23; - -/** - * NOTE: these unit tests were generated with MEXP=19937 - * with other exponents it is expected to fail! - */ - -TEST(Random, uint32_t) -{ - CRandom* prng = new CRandom(12345); - uint32_t r = prng->random_32(); - SG_FREE(prng); - EXPECT_EQ(1811630862U, r); -} - -TEST(Random, uint64_t) -{ - CRandom* prng = new CRandom(12345); - uint64_t r = prng->random_64(); - SG_FREE(prng); - EXPECT_EQ(18328733385137801998U, r); -} - -TEST(Random, fill_array_uint32) -{ - CRandom* prng = new CRandom(12345); - uint32_t t = 2228230814U; - SGVector rv(2*SFMT_N32+1); - prng->fill_array(rv.vector, rv.vlen); - SG_FREE(prng); - - EXPECT_EQ(t, rv[SFMT_N32]); -} - -#ifdef HAVE_SSE2 -TEST(Random, fill_array_uint32_simd) -{ - CRandom* prng = new CRandom(12345); - uint32_t t = 2228230814U; - SGVector rv(2*SFMT_N32); - prng->fill_array(rv.vector, rv.vlen); - SG_FREE(prng); - - EXPECT_EQ(t, rv[SFMT_N32]); -} -#endif - -TEST(Random, fill_array_uint64) -{ - CRandom* prng = new CRandom(12345); - uint64_t t = 9564086722318310046U; - SGVector rv(2*SFMT_N64+1); - prng->fill_array(rv.vector, rv.vlen); - SG_FREE(prng); - - EXPECT_EQ(t, rv[SFMT_N64]); -} - -#ifdef HAVE_SSE2 -TEST(Random, fill_array_uint64_simd) -{ - CRandom* prng = new CRandom(12345); - uint64_t t = 9564086722318310046U; - SGVector rv(2*SFMT_N64); - prng->fill_array(rv.vector, rv.vlen); - SG_FREE(prng); - - EXPECT_EQ(t, rv[SFMT_N64]); -} -#endif - -TEST(Random, fill_array_oc) -{ - CRandom* prng = new CRandom(12345); - float64_t t = 0.25551924513287405; - SGVector rv(2*dsfmt_get_min_array_size()+1); - prng->fill_array_oc(rv.vector, rv.vlen); - SG_FREE(prng); - - EXPECT_DOUBLE_EQ(t, rv[dsfmt_get_min_array_size()]); -} - -#ifdef HAVE_SSE2 -TEST(Random, fill_array_oc_simd) -{ - CRandom* prng = new CRandom(12345); - float64_t t = 0.25551924513287405; - SGVector rv(2*dsfmt_get_min_array_size()); - prng->fill_array_oc(rv.vector, rv.vlen); - SG_FREE(prng); - - EXPECT_DOUBLE_EQ(t, rv[dsfmt_get_min_array_size()]); -} -#endif - -TEST(Random, normal_distrib) -{ - CRandom* prng = new CRandom(12345); - float64_t t = 75.567130769021162; - float64_t r = prng->normal_distrib(100.0, 10.0); - SG_FREE(prng); - - EXPECT_DOUBLE_EQ(t, r); -} - -TEST(Random, random_uint64_1_2) -{ - auto m_rng = std::unique_ptr(new CRandom(17)); - for (int32_t i=0; i<10000; i++) - { - uint64_t r = m_rng->random((uint64_t)1, (uint64_t)2); - EXPECT_TRUE(r == 1 || r == 2); - } -} - -TEST(Random, random_uint64_0_10) -{ - CRandom* prng = new CRandom(17); - int rnds[10] = {0,0,0,0,0,0}; - for (int32_t i=0; i<10000; i++) - { - uint64_t r = prng->random((uint64_t)0, (uint64_t)9); - rnds[r]++; - } - - for (int32_t i=0; i<10; i++) { - EXPECT_TRUE(rnds[i]>0); - } - SG_FREE(prng); -} - -TEST(Random, random_int64_1_2) -{ - CRandom* prng = new CRandom(17); - for (int32_t i=0; i<10000; i++) - { - int64_t r = prng->random((int64_t)1, (int64_t)2); - EXPECT_TRUE(r == 1 || r == 2); - } -} - -TEST(Random, random_int64_0_10) -{ - CRandom* prng = new CRandom(17); - int rnds[10] = {0,0,0,0,0,0}; - for (int32_t i=0; i<10000; i++) - { - int64_t r = prng->random((int64_t)0, (int64_t)9); - rnds[r]++; - } - - for (int32_t i=0; i<10; i++) { - EXPECT_TRUE(rnds[i]>0); - } - SG_FREE(prng); -} - -TEST(Random, random_uint32_1_2) -{ - CRandom* prng = new CRandom(17); - for (int32_t i=0; i<10000; i++) - { - uint32_t r = prng->random((uint32_t)1, (uint32_t)2); - EXPECT_TRUE(r == 1 || r == 2); - } - SG_FREE(prng); -} - -TEST(Random, random_uint32_0_10) -{ - CRandom* prng = new CRandom(17); - int rnds[10] = {0,0,0,0,0,0}; - for (int32_t i=0; i<10000; i++) - { - uint32_t r = prng->random((uint32_t)0, (uint32_t)9); - rnds[r]++; - } - - for (int32_t i=0; i<10; i++) { - EXPECT_TRUE(rnds[i]>0); - } - SG_FREE(prng); -} - -TEST(Random, random_int32_1_2) -{ - CRandom* prng = new CRandom(17); - for (int32_t i=0; i<10000; i++) - { - int32_t r = prng->random((int32_t)1, (int32_t)2); - EXPECT_TRUE(r == 1 || r == 2); - } - SG_FREE(prng); -} - -TEST(Random, random_int64_range) -{ - CRandom* prng = new CRandom(17); - int rnds[array_len]; - for (uint32_t i=0; irandom((int64_t)0, (int64_t)array_len - 1); - rnds[r]++; - } - - for (uint32_t i=0; irandom((uint64_t)0, (uint64_t)array_len - 1); - rnds[r]++; - } - - for (uint32_t i=0; irandom((int32_t)0, (int32_t)array_len - 1); - rnds[r]++; - } - - for (uint32_t i=0; irandom((uint32_t)0, (uint32_t)array_len - 1); - rnds[r]++; - } - - for (uint32_t i=0; iset_seed(17); - int rnds[array_len]; - for (uint32_t i=0; irandom_32() % array_len; - rnds[r]++; - } - - for (uint32_t i=0; i(new CRandom(17)); - for (uint32_t i=0; irandom((float64_t)0, (float64_t)array_len); - rnds[r]++; - } - - for (uint32_t i=0; irandom((float64_t)0, (float64_t)1.0); - min=CMath::min(min, r); - max=CMath::max(max, r); - } - EXPECT_GE(max, 0.99999); - EXPECT_LE(min, 0.00001); - SG_FREE(prng); -} - -TEST(Random, random_std_normal_quantiles) -{ - CRandom* prng = new CRandom(); - - int64_t m=10000000; - SGVector counts(10); - counts.zero(); - - for (int64_t i=0; istd_normal_distrib(), 1); - index_t idx=(int32_t)(quantile*counts.vlen); - counts[idx]++; - } - - SG_FREE(prng); - - for (index_t i=0; i C(C_dims, 3); - auto m_rng = std::unique_ptr(new CRandom(17)); + auto prng = get_prng(); + std::uniform_int_distribution dist(1, 5); for (int i = 0; i < C_dims[2]; i++) { @@ -34,7 +35,7 @@ TEST(CFFDiag, diagonalize) tmp.setIdentity(); for (int j = 0; j < C_dims[0]; j++) - tmp(j, j) *= CMath::abs(m_rng->random(1, 5)); + tmp(j, j) *= CMath::abs(dist(prng)); } // Mixing and demixing matrices diff --git a/tests/unit/mathematics/ajd/JADiagOrth_unittest.cc b/tests/unit/mathematics/ajd/JADiagOrth_unittest.cc index 76a43c30942..1f0a04290fb 100644 --- a/tests/unit/mathematics/ajd/JADiagOrth_unittest.cc +++ b/tests/unit/mathematics/ajd/JADiagOrth_unittest.cc @@ -26,7 +26,8 @@ TEST(CJADiagOrth, diagonalize) C_dims[1] = 10; C_dims[2] = 30; SGNDArray< float64_t > C(C_dims, 3); - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::uniform_int_distribution dist(1, 5); for (int i = 0; i < C_dims[2]; i++) { @@ -34,7 +35,7 @@ TEST(CJADiagOrth, diagonalize) tmp.setIdentity(); for (int j = 0; j < C_dims[0]; j++) - tmp(j, j) *= CMath::abs(m_rng->random(1, 5)); + tmp(j, j) *= CMath::abs(dist(prng)); } // Building a random orthonormal matrix A diff --git a/tests/unit/mathematics/ajd/JADiag_unittest.cc b/tests/unit/mathematics/ajd/JADiag_unittest.cc index 4189d9ff7bb..80463795522 100644 --- a/tests/unit/mathematics/ajd/JADiag_unittest.cc +++ b/tests/unit/mathematics/ajd/JADiag_unittest.cc @@ -25,8 +25,8 @@ TEST(CJADiag, diagonalize) C_dims[1] = 10; C_dims[2] = 30; SGNDArray< float64_t > C(C_dims, 3); - - auto m_rng = std::unique_ptr(new CRandom(17)); + auto prng = get_prng(); + std::uniform_int_distribution dist(1, 5); for (int i = 0; i < C_dims[2]; i++) { @@ -34,7 +34,7 @@ TEST(CJADiag, diagonalize) tmp.setIdentity(); for (int j = 0; j < C_dims[0]; j++) - tmp(j, j) *= CMath::abs(m_rng->random(1, 5)); + tmp(j, j) *= CMath::abs(dist(prng)); } // Mixing and demixing matrices diff --git a/tests/unit/mathematics/ajd/JediDiag_unittest.cc b/tests/unit/mathematics/ajd/JediDiag_unittest.cc index a04557e9a86..4562a38badc 100644 --- a/tests/unit/mathematics/ajd/JediDiag_unittest.cc +++ b/tests/unit/mathematics/ajd/JediDiag_unittest.cc @@ -26,7 +26,8 @@ TEST(CJediDiag, diagonalize) C_dims[2] = 30; SGNDArray< float64_t > C(C_dims, 3); - auto m_rng = std::unique_ptr(new CRandom(17)); + auto prng = get_prng(); + std::uniform_int_distribution dist(1, 5); for (int i = 0; i < C_dims[2]; i++) { @@ -34,7 +35,7 @@ TEST(CJediDiag, diagonalize) tmp.setIdentity(); for (int j = 0; j < C_dims[0]; j++) - tmp(j, j) *= CMath::abs(m_rng->random(1, 5)); + tmp(j, j) *= CMath::abs(dist(prng)); } // Mixing and demixing matrices diff --git a/tests/unit/mathematics/ajd/QDiag_unittest.cc b/tests/unit/mathematics/ajd/QDiag_unittest.cc index 0cd3aef57a3..3bc3cb91ddc 100644 --- a/tests/unit/mathematics/ajd/QDiag_unittest.cc +++ b/tests/unit/mathematics/ajd/QDiag_unittest.cc @@ -26,7 +26,8 @@ TEST(CQDiag, diagonalize) C_dims[2] = 30; SGNDArray< float64_t > C(C_dims, 3); - auto m_rng = std::unique_ptr(new CRandom(17)); + auto prng = get_prng(); + std::uniform_int_distribution dist(1, 5); for (int i = 0; i < C_dims[2]; i++) { @@ -34,7 +35,7 @@ TEST(CQDiag, diagonalize) tmp.setIdentity(); for (int j = 0; j < C_dims[0]; j++) - tmp(j, j) *= CMath::abs(m_rng->random(1, 5)); + tmp(j, j) *= CMath::abs(dist(prng)); } // Mixing and demixing matrices diff --git a/tests/unit/mathematics/ajd/UWedge_unittest.cc b/tests/unit/mathematics/ajd/UWedge_unittest.cc index 8a11a2ed209..e6e52c961a8 100644 --- a/tests/unit/mathematics/ajd/UWedge_unittest.cc +++ b/tests/unit/mathematics/ajd/UWedge_unittest.cc @@ -26,7 +26,8 @@ TEST(CUWedge, diagonalize) C_dims[2] = 30; SGNDArray< float64_t > C(C_dims, 3); - auto m_rng = std::unique_ptr(new CRandom(17)); + auto prng = get_prng(); + std::uniform_int_distribution dist(1, 5); for (int i = 0; i < C_dims[2]; i++) { @@ -35,7 +36,7 @@ TEST(CUWedge, diagonalize) for (int j = 0; j < C_dims[0]; j++) { - tmp(j, j) *= CMath::abs(m_rng->random(1, 5)); + tmp(j, j) *= CMath::abs(dist(prng)); } } diff --git a/tests/unit/mathematics/linalg/ConjugateOrthogonalCGSolver_unittest.cc b/tests/unit/mathematics/linalg/ConjugateOrthogonalCGSolver_unittest.cc index 044f2e50fe8..24d12279d86 100644 --- a/tests/unit/mathematics/linalg/ConjugateOrthogonalCGSolver_unittest.cc +++ b/tests/unit/mathematics/linalg/ConjugateOrthogonalCGSolver_unittest.cc @@ -28,11 +28,12 @@ TEST(ConjugateOrthogonalCGSolver, solve) // diagonal non-Hermintian matrix with random complex entries SGVector diag(size); - auto m_rng = std::unique_ptr(new CRandom(100)); + auto prng = get_prng(); + std::normal_distribution dist(0, 1); for (index_t i=0; istd_normal_distrib(); - float64_t imag = m_rng->std_normal_distrib(); + float64_t real = dist(prng); + float64_t imag = dist(prng); diag[i]=complex128_t(real, imag); } A->set_diagonal(diag); @@ -40,7 +41,7 @@ TEST(ConjugateOrthogonalCGSolver, solve) // vector b of the system SGVector b(size); for (index_t i=0; istd_normal_distrib(); + b[i] = dist(prng); // Solve with COCG CConjugateOrthogonalCGSolver* cocg_linear_solver diff --git a/tests/unit/mathematics/linalg/DirectSparseLinearSolver_unittest.cc b/tests/unit/mathematics/linalg/DirectSparseLinearSolver_unittest.cc index f47095ec146..b7c53970114 100644 --- a/tests/unit/mathematics/linalg/DirectSparseLinearSolver_unittest.cc +++ b/tests/unit/mathematics/linalg/DirectSparseLinearSolver_unittest.cc @@ -13,7 +13,6 @@ #include #include #include -#include #include #include #include @@ -28,11 +27,10 @@ TEST(DirectSparseLinearSolver, solve) CSparseMatrixOperator* A=new CSparseMatrixOperator(sm); SGVector diag(size); float64_t difficulty=5; - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::normal_distribution dist(0, 1); for (index_t i=0; istd_normal_distrib()), difficulty) + - 0.0001; + diag[i] = CMath::pow(CMath::abs(dist(prng)), difficulty) + 0.0001; A->set_diagonal(diag); CDirectSparseLinearSolver* linear_solver=new CDirectSparseLinearSolver(); diff --git a/tests/unit/mathematics/linalg/LanczosEigenSolver_unittest.cc b/tests/unit/mathematics/linalg/LanczosEigenSolver_unittest.cc index a9ebbf0099a..41b427168f1 100644 --- a/tests/unit/mathematics/linalg/LanczosEigenSolver_unittest.cc +++ b/tests/unit/mathematics/linalg/LanczosEigenSolver_unittest.cc @@ -29,12 +29,14 @@ TEST(LanczosEigenSolver, compute) { const int32_t size=4; SGMatrix m(size, size); - auto m_rng = std::unique_ptr(new CRandom()); - m.set_const(m_rng->random(50.0, 100.0)); + auto prng = get_prng(); + std::uniform_real_distribution dist(50.0, 100.0); + std::uniform_real_distribution dist_t(100.0, 10000.0); + m.set_const(dist(prng)); // Hermintian matrix for (index_t i=0; irandom(100.0, 10000.0); + m(i, i) = dist_t(prng); // Creating sparse linear operator to use with Lanczos CSparseFeatures feat(m); @@ -81,15 +83,15 @@ TEST(LanczosEigenSolver, compute_big_diag_matrix) SGSparseMatrix sm(size, size); CSparseMatrixOperator* op=new CSparseMatrixOperator(sm); SG_REF(op); - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::normal_distribution dist(0, 1); // set its diagonal SGVector diag(size); for (index_t i=0; istd_normal_distrib()), difficulty) + - min_eigenvalue; + CMath::pow(CMath::abs(dist(prng)), difficulty) + min_eigenvalue; } op->set_diagonal(diag); diff --git a/tests/unit/mathematics/linalg/LogDetEstimator_unittest.cc b/tests/unit/mathematics/linalg/LogDetEstimator_unittest.cc index d9773754ca3..d9499a75b3f 100644 --- a/tests/unit/mathematics/linalg/LogDetEstimator_unittest.cc +++ b/tests/unit/mathematics/linalg/LogDetEstimator_unittest.cc @@ -11,7 +11,6 @@ #include #include -#include #include #include #include @@ -40,7 +39,6 @@ TEST(LogDetEstimator, sample) { CSerialComputationEngine* e=new CSerialComputationEngine; SG_REF(e); - const index_t size=2; SGMatrix mat(size, size); mat(0,0)=2.0; @@ -165,16 +163,18 @@ TEST(LogDetEstimator, sample_ratapp_dense) #ifdef HAVE_LAPACK TEST(LogDetEstimator, sample_ratapp_probing_sampler) { + set_global_seed(1); CSerialComputationEngine* e=new CSerialComputationEngine; SG_REF(e); const index_t size=16; SGMatrix mat(size, size); - auto m_rng = std::unique_ptr(new CRandom(1)); + auto prng = get_prng(); + std::normal_distribution dist(0, 1); mat.set_const(0.0); for (index_t i=0; istd_normal_distrib()) * 1000; + float64_t value = CMath::abs(dist(prng)) * 1000; mat(i,i)=value<1.0?10.0:value; } @@ -253,16 +253,18 @@ TEST(LogDetEstimator, sample_ratapp_probing_sampler) TEST(LogDetEstimator, sample_ratapp_probing_sampler_cgm) { + set_global_seed(1); CSerialComputationEngine* e=new CSerialComputationEngine; SG_REF(e); const index_t size=16; SGMatrix mat(size, size); - auto m_rng = std::unique_ptr(new CRandom(1)); + auto prng = get_prng(); + std::normal_distribution dist(0, 1); mat.set_const(0.0); for (index_t i=0; istd_normal_distrib()) * 1000; + float64_t value = CMath::abs(dist(prng)) * 1000; mat(i,i)=value<1.0?10.0:value; } @@ -337,6 +339,7 @@ TEST(LogDetEstimator, sample_ratapp_probing_sampler_cgm) TEST(LogDetEstimator, sample_ratapp_big_diag_matrix) { + set_global_seed(1); CSerialComputationEngine* e=new CSerialComputationEngine; SG_REF(e); @@ -350,14 +353,14 @@ TEST(LogDetEstimator, sample_ratapp_big_diag_matrix) CSparseMatrixOperator* op=new CSparseMatrixOperator(sm); SG_REF(op); - auto m_rng = std::unique_ptr(new CRandom(1)); + auto prng = get_prng(); + std::normal_distribution dist(0, 1); // set its diagonal SGVector diag(size); for (index_t i=0; istd_normal_distrib()), difficulty) + - min_eigenvalue; + CMath::pow(CMath::abs(dist(prng)), difficulty) + min_eigenvalue; } op->set_diagonal(diag); @@ -398,6 +401,7 @@ TEST(LogDetEstimator, sample_ratapp_big_diag_matrix) TEST(LogDetEstimator, sample_ratapp_big_matrix) { + set_global_seed(1); CSerialComputationEngine* e=new CSerialComputationEngine; SG_REF(e); @@ -411,12 +415,12 @@ TEST(LogDetEstimator, sample_ratapp_big_matrix) // set its diagonal SGVector diag(size); - auto m_rng = std::unique_ptr(new CRandom(1)); + auto prng = get_prng(); + std::normal_distribution dist(0, 1); for (index_t i=0; istd_normal_distrib()), difficulty) + - min_eigenvalue; + CMath::pow(CMath::abs(dist(prng)), difficulty) + min_eigenvalue; } // set its subdiagonal float64_t entry=min_eigenvalue/2; diff --git a/tests/unit/mathematics/linalg/NormalSampler_unittest.cc b/tests/unit/mathematics/linalg/NormalSampler_unittest.cc index 4d69334ff42..bbc4eeba825 100644 --- a/tests/unit/mathematics/linalg/NormalSampler_unittest.cc +++ b/tests/unit/mathematics/linalg/NormalSampler_unittest.cc @@ -20,6 +20,7 @@ using namespace Eigen; TEST(NormalSampler, sample) { + set_global_seed(1); const index_t dimension=2; const index_t num_samples=5000; SGMatrix samples(num_samples, dimension); diff --git a/tests/unit/mathematics/linalg/ProbingSampler_unittest.cc b/tests/unit/mathematics/linalg/ProbingSampler_unittest.cc index 35712ce2576..a916618fec9 100644 --- a/tests/unit/mathematics/linalg/ProbingSampler_unittest.cc +++ b/tests/unit/mathematics/linalg/ProbingSampler_unittest.cc @@ -85,19 +85,20 @@ TEST(ProbingSampler, probing_samples_big_diag_matrix) float64_t difficulty=3; float64_t min_eigenvalue=0.0001; + set_global_seed(1); // create a sparse matrix const index_t size=10000; SGSparseMatrix sm(size, size); CSparseMatrixOperator* op=new CSparseMatrixOperator(sm); SG_REF(op); - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::normal_distribution dist(0, 1); // set its diagonal SGVector diag(size); for (index_t i=0; istd_normal_distrib()), difficulty) + - min_eigenvalue; + CMath::pow(CMath::abs(dist(prng)), difficulty) + min_eigenvalue; } op->set_diagonal(diag); diff --git a/tests/unit/multiclass/BaggingMachine_unittest.cc b/tests/unit/multiclass/BaggingMachine_unittest.cc index fe5601ff8f9..f08ffafe7e9 100644 --- a/tests/unit/multiclass/BaggingMachine_unittest.cc +++ b/tests/unit/multiclass/BaggingMachine_unittest.cc @@ -228,10 +228,10 @@ TEST(BaggingMachine,classify_CART) EXPECT_EQ(0.0,res_vector[1]); EXPECT_EQ(0.0,res_vector[2]); EXPECT_EQ(1.0,res_vector[3]); - EXPECT_EQ(1.0,res_vector[4]); + EXPECT_EQ(0.0, res_vector[4]); CMulticlassAccuracy* eval=new CMulticlassAccuracy(); - EXPECT_NEAR(0.642857,c->get_oob_error(eval),1e-6); + EXPECT_NEAR(0.5714285, c->get_oob_error(eval), 1e-6); SG_UNREF(test_feats); SG_UNREF(result); diff --git a/tests/unit/multiclass/LaRank_unittest.cc b/tests/unit/multiclass/LaRank_unittest.cc index dd27bb8e9f9..79f267b0dba 100644 --- a/tests/unit/multiclass/LaRank_unittest.cc +++ b/tests/unit/multiclass/LaRank_unittest.cc @@ -18,14 +18,15 @@ TEST(LaRank,train) SGMatrix matrix_test(num_class, num_vec); CMulticlassLabels* labels=new CMulticlassLabels(num_vec); CMulticlassLabels* labels_test=new CMulticlassLabels(num_vec); - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::normal_distribution dist(0, 1); for (index_t i=0; istd_normal_distrib(); - matrix_test(j, i) = m_rng->std_normal_distrib(); + matrix(j, i) = dist(prng); + matrix_test(j, i) = dist(prng); labels->set_label(i, label); labels_test->set_label(i, label); } diff --git a/tests/unit/multiclass/MulticlassLibLinear_unittest.cc b/tests/unit/multiclass/MulticlassLibLinear_unittest.cc index ee73eb5966a..54d33d51508 100644 --- a/tests/unit/multiclass/MulticlassLibLinear_unittest.cc +++ b/tests/unit/multiclass/MulticlassLibLinear_unittest.cc @@ -16,14 +16,15 @@ TEST(MulticlassLibLinearTest,train_and_apply) SGMatrix matrix_test(num_class, num_vec); CMulticlassLabels* labels=new CMulticlassLabels(num_vec); CMulticlassLabels* labels_test=new CMulticlassLabels(num_vec); - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::normal_distribution dist(0, 1); for (index_t i=0; istd_normal_distrib(); - matrix_test(j, i) = m_rng->std_normal_distrib(); + matrix(j, i) = dist(prng); + matrix_test(j, i) = dist(prng); labels->set_label(i, label); labels_test->set_label(i, label); } diff --git a/tests/unit/multiclass/tree/RandomCARTree_unittest.cc b/tests/unit/multiclass/tree/RandomCARTree_unittest.cc index 34ff9732923..2099c77ebc6 100644 --- a/tests/unit/multiclass/tree/RandomCARTree_unittest.cc +++ b/tests/unit/multiclass/tree/RandomCARTree_unittest.cc @@ -191,7 +191,7 @@ TEST(RandomCARTree, classify_nominal) EXPECT_EQ(0.0,res_vector[1]); EXPECT_EQ(0.0,res_vector[2]); EXPECT_EQ(1.0,res_vector[3]); - EXPECT_EQ(0.0,res_vector[4]); + EXPECT_EQ(1.0, res_vector[4]); SG_UNREF(test_feats); SG_UNREF(result); diff --git a/tests/unit/multiclass/tree/RandomForest_unittest.cc b/tests/unit/multiclass/tree/RandomForest_unittest.cc index e100bd43c0f..8b04a4d0316 100644 --- a/tests/unit/multiclass/tree/RandomForest_unittest.cc +++ b/tests/unit/multiclass/tree/RandomForest_unittest.cc @@ -201,7 +201,7 @@ TEST(RandomForest,classify_nominal_test) EXPECT_EQ(0.0, res_vector[4]); CMulticlassAccuracy* eval=new CMulticlassAccuracy(); - EXPECT_NEAR(0.571428, c->get_oob_error(eval), 1e-6); + EXPECT_NEAR(0.78571428, c->get_oob_error(eval), 1e-6); SG_UNREF(test_feats); SG_UNREF(result); @@ -270,7 +270,7 @@ TEST(RandomForest,classify_non_nominal_test) EXPECT_EQ(0.0, res_vector[4]); CMulticlassAccuracy* eval=new CMulticlassAccuracy(); - EXPECT_NEAR(0.571428, c->get_oob_error(eval), 1e-6); + EXPECT_NEAR(0.78571428, c->get_oob_error(eval), 1e-6); SG_UNREF(test_feats); SG_UNREF(result); diff --git a/tests/unit/neuralnets/Autoencoder_unittest.cc b/tests/unit/neuralnets/Autoencoder_unittest.cc index af6572edd81..265e102dc48 100644 --- a/tests/unit/neuralnets/Autoencoder_unittest.cc +++ b/tests/unit/neuralnets/Autoencoder_unittest.cc @@ -44,7 +44,8 @@ using namespace shogun; TEST(Autoencoder, train) { - auto m_rng = std::unique_ptr(new CRandom(100)); + auto prng = get_prng(); + std::uniform_real_distribution dist(-1.0, 1.0); int32_t num_features = 10; int32_t num_examples = 100; @@ -52,7 +53,7 @@ TEST(Autoencoder, train) SGMatrix data(num_features, num_examples); for (int32_t i=0; irandom(-1.0, 1.0); + data[i] = dist(prng); CAutoencoder ae(num_features, new CNeuralRectifiedLinearLayer(num_hid)); diff --git a/tests/unit/neuralnets/ConvolutionalFeatureMap_unittest.cc b/tests/unit/neuralnets/ConvolutionalFeatureMap_unittest.cc index b689a85c173..ef4cb83a7ed 100644 --- a/tests/unit/neuralnets/ConvolutionalFeatureMap_unittest.cc +++ b/tests/unit/neuralnets/ConvolutionalFeatureMap_unittest.cc @@ -307,11 +307,13 @@ TEST(ConvolutionalFeatureMap, compute_parameter_gradients) const int32_t map_index = 1; const int32_t num_maps = 3; - auto m_rng = std::unique_ptr(new CRandom(10)); + auto prng = get_prng(); + std::uniform_real_distribution dist_uniform(-10.0, 10.0); + std::normal_distribution dist_normal(0.0, 0.01); SGMatrix x1(w*h,b); for (int32_t i=0; irandom(-10.0, 10.0); + x1[i] = dist_uniform(prng); CNeuralInputLayer* input1 = new CNeuralInputLayer (x1.num_rows); input1->set_batch_size(x1.num_cols); @@ -319,7 +321,7 @@ TEST(ConvolutionalFeatureMap, compute_parameter_gradients) // two channels SGMatrix x2(2*w*h,b); for (int32_t i=0; irandom(-10.0, 10.0); + x2[i] = dist_uniform(prng); CNeuralInputLayer* input2 = new CNeuralInputLayer (x2.num_rows); input2->set_batch_size(x2.num_cols); @@ -335,7 +337,7 @@ TEST(ConvolutionalFeatureMap, compute_parameter_gradients) CConvolutionalFeatureMap map(w,h,rx,ry,1,1,map_index); SGVector params(1+(2*rx+1)*(2*ry+1)*3); for (int32_t i=0; inormal_random(0.0, 0.01); + params[i] = dist_normal(prng); input1->compute_activations(x1); input2->compute_activations(x2); @@ -399,11 +401,13 @@ TEST(ConvolutionalFeatureMap, compute_parameter_gradients_with_stride) int32_t w_out = w/stride_x; int32_t h_out = h/stride_y; - auto m_rng = std::unique_ptr(new CRandom(10)); + auto prng = get_prng(); + std::uniform_real_distribution dist_uniform(-10.0, 10.0); + std::normal_distribution dist_normal(0.0, 0.01); SGMatrix x1(w*h,b); for (int32_t i=0; irandom(-10.0, 10.0); + x1[i] = dist_uniform(prng); CNeuralInputLayer* input1 = new CNeuralInputLayer (x1.num_rows); input1->set_batch_size(x1.num_cols); @@ -411,7 +415,7 @@ TEST(ConvolutionalFeatureMap, compute_parameter_gradients_with_stride) // two channels SGMatrix x2(2*w*h,b); for (int32_t i=0; irandom(-10.0, 10.0); + x2[i] = dist_uniform(prng); CNeuralInputLayer* input2 = new CNeuralInputLayer (x2.num_rows); input2->set_batch_size(x2.num_cols); @@ -427,7 +431,7 @@ TEST(ConvolutionalFeatureMap, compute_parameter_gradients_with_stride) CConvolutionalFeatureMap map(w,h,rx,ry,stride_x,stride_y,map_index); SGVector params(1+(2*rx+1)*(2*ry+1)*3); for (int32_t i=0; inormal_random(0.0, 0.01); + params[i] = dist_normal(prng); input1->compute_activations(x1); input2->compute_activations(x2); @@ -484,11 +488,13 @@ TEST(ConvolutionalFeatureMap, compute_parameter_gradients_logistic) const int32_t ry = 1; const int32_t b = 2; - auto m_rng = std::unique_ptr(new CRandom(10)); + auto prng = get_prng(); + std::uniform_real_distribution dist_uniform(-10.0, 10.0); + std::normal_distribution dist_normal(0.0, 0.01); SGMatrix x1(w*h,b); for (int32_t i=0; irandom(-10.0, 10.0); + x1[i] = dist_uniform(prng); CNeuralInputLayer* input1 = new CNeuralInputLayer (x1.num_rows); input1->set_batch_size(x1.num_cols); @@ -502,7 +508,7 @@ TEST(ConvolutionalFeatureMap, compute_parameter_gradients_logistic) CConvolutionalFeatureMap map(w,h,rx,ry,1,1,0, CMAF_LOGISTIC); SGVector params(1+(2*rx+1)*(2*ry+1)); for (int32_t i=0; inormal_random(0.0, 0.01); + params[i] = dist_normal(prng); input1->compute_activations(x1); @@ -558,11 +564,13 @@ TEST(ConvolutionalFeatureMap, compute_parameter_gradients_rectified_linear) const int32_t ry = 1; const int32_t b = 2; - auto m_rng = std::unique_ptr(new CRandom(10)); + auto prng = get_prng(); + std::uniform_real_distribution dist_uniform(-10.0, 10.0); + std::normal_distribution dist_normal(0.0, 0.01); SGMatrix x1(w*h,b); for (int32_t i=0; irandom(-10.0, 10.0); + x1[i] = dist_uniform(prng); CNeuralInputLayer* input1 = new CNeuralInputLayer (x1.num_rows); input1->set_batch_size(x1.num_cols); @@ -576,7 +584,7 @@ TEST(ConvolutionalFeatureMap, compute_parameter_gradients_rectified_linear) CConvolutionalFeatureMap map(w,h,rx,ry,1,1,0, CMAF_RECTIFIED_LINEAR); SGVector params(1+(2*rx+1)*(2*ry+1)); for (int32_t i=0; inormal_random(0.0, 0.01); + params[i] = dist_normal(prng); input1->compute_activations(x1); @@ -634,7 +642,9 @@ TEST(ConvolutionalFeatureMap, compute_input_gradients) const int32_t map_index = 0; const int32_t num_maps = 1; - auto m_rng = std::unique_ptr(new CRandom(100)); + auto prng = get_prng(); + std::uniform_real_distribution dist_uniform(-10.0, 10.0); + std::normal_distribution dist_normal(0.0, 0.01); CNeuralLinearLayer* input1 = new CNeuralLinearLayer (w*h); input1->set_batch_size(b); @@ -644,10 +654,10 @@ TEST(ConvolutionalFeatureMap, compute_input_gradients) input2->set_batch_size(b); for (int32_t i=0; iget_num_neurons()*b; i++) - input1->get_activations()[i] = m_rng->random(-10.0, 10.0); + input1->get_activations()[i] = dist_uniform(prng); for (int32_t i=0; iget_num_neurons()*b; i++) - input2->get_activations()[i] = m_rng->random(-10.0, 10.0); + input2->get_activations()[i] = dist_uniform(prng); CDynamicObjectArray* layers = new CDynamicObjectArray(); layers->append_element(input1); @@ -660,7 +670,7 @@ TEST(ConvolutionalFeatureMap, compute_input_gradients) CConvolutionalFeatureMap map(w,h,rx,ry,1,1,map_index); SGVector params(1+(2*rx+1)*(2*ry+1)*3); for (int32_t i=0; inormal_random(0.0, 0.01); + params[i] = dist_normal(prng); SGMatrix A(num_maps*w*h,b); A.zero(); diff --git a/tests/unit/neuralnets/DeepAutoencoder_unittest.cc b/tests/unit/neuralnets/DeepAutoencoder_unittest.cc index abf15e1d2bf..2a30ac4b132 100644 --- a/tests/unit/neuralnets/DeepAutoencoder_unittest.cc +++ b/tests/unit/neuralnets/DeepAutoencoder_unittest.cc @@ -44,14 +44,15 @@ using namespace shogun; TEST(DeepAutoencoder, pre_train) { - auto m_rng = std::unique_ptr(new CRandom(10)); + auto prng = get_prng(); + std::uniform_real_distribution dist(-1.0, 1.0); int32_t num_features = 10; int32_t num_examples = 100; SGMatrix data(num_features, num_examples); for (int32_t i=0; irandom(-1.0, 1.0); + data[i] = dist(prng); CDynamicObjectArray* layers = new CDynamicObjectArray(); layers->append_element(new CNeuralInputLayer(num_features)); @@ -83,7 +84,8 @@ TEST(DeepAutoencoder, pre_train) TEST(DeepAutoencoder, convert_to_neural_network) { - auto m_rng = std::unique_ptr(new CRandom(100)); + auto prng = get_prng(); + std::uniform_real_distribution dist(0.0, 1.0); CDynamicObjectArray* layers = new CDynamicObjectArray(); layers->append_element(new CNeuralInputLayer(10)); @@ -98,7 +100,7 @@ TEST(DeepAutoencoder, convert_to_neural_network) SGMatrix x(10, 3); for (int32_t i=0; irandom(0.0, 1.0); + x[i] = dist(prng); CDenseFeatures f(x); diff --git a/tests/unit/neuralnets/DeepBeliefNetwork_unittest.cc b/tests/unit/neuralnets/DeepBeliefNetwork_unittest.cc index 63cb3ca80de..c3573ea6fd6 100644 --- a/tests/unit/neuralnets/DeepBeliefNetwork_unittest.cc +++ b/tests/unit/neuralnets/DeepBeliefNetwork_unittest.cc @@ -41,7 +41,9 @@ using namespace shogun; TEST(DeepBeliefNetwork, convert_to_neural_network) { - auto m_rng = std::unique_ptr(new CRandom(100)); + set_global_seed(100); + auto prng = get_prng(); + std::uniform_real_distribution dist(0.0, 1.0); CDeepBeliefNetwork dbn(5, RBMVUT_BINARY); dbn.add_hidden_layer(6); @@ -54,7 +56,7 @@ TEST(DeepBeliefNetwork, convert_to_neural_network) SGMatrix x(5, 3); for (int32_t i=0; irandom(0.0, 1.0); + x[i] = dist(prng); CDenseFeatures f(x); diff --git a/tests/unit/neuralnets/NeuralInputLayer_unittest.cc b/tests/unit/neuralnets/NeuralInputLayer_unittest.cc index e5c9026654f..2d58676cdd5 100644 --- a/tests/unit/neuralnets/NeuralInputLayer_unittest.cc +++ b/tests/unit/neuralnets/NeuralInputLayer_unittest.cc @@ -38,10 +38,12 @@ using namespace shogun; TEST(NeuralInputLayer, compute_activations) { - auto m_rng = std::unique_ptr(new CRandom(100)); + set_global_seed(100); + auto prng = get_prng(); + std::uniform_int_distribution dist(-10, 10); SGMatrix x(12,3); for (int32_t i=0; irandom(-10.0, 10.0); + x[i] = dist(prng); CNeuralInputLayer layer(5, 4); layer.set_batch_size(x.num_cols); diff --git a/tests/unit/neuralnets/NeuralLeakyRectifiedLinearLayer_unittest.cc b/tests/unit/neuralnets/NeuralLeakyRectifiedLinearLayer_unittest.cc index 5ff137870c0..53487014d47 100644 --- a/tests/unit/neuralnets/NeuralLeakyRectifiedLinearLayer_unittest.cc +++ b/tests/unit/neuralnets/NeuralLeakyRectifiedLinearLayer_unittest.cc @@ -45,13 +45,15 @@ using namespace shogun; */ TEST(NeuralLeakyRectifiedLinearLayer, compute_activations) { + set_global_seed(100); CNeuralLeakyRectifiedLinearLayer layer(9); float64_t alpha = 0.02; // initialize some random inputs - auto m_rng = std::unique_ptr(new CRandom(100)); + auto prng = get_prng(); + std::uniform_int_distribution dist(-10, 10); SGMatrix x(12,3); for (int32_t i=0; irandom(-10.0, 10.0); + x[i] = dist(prng); CNeuralInputLayer* input = new CNeuralInputLayer (x.num_rows); input->set_batch_size(x.num_cols); diff --git a/tests/unit/neuralnets/NeuralLinearLayer_unittest.cc b/tests/unit/neuralnets/NeuralLinearLayer_unittest.cc index a3ca90f227a..29093929859 100644 --- a/tests/unit/neuralnets/NeuralLinearLayer_unittest.cc +++ b/tests/unit/neuralnets/NeuralLinearLayer_unittest.cc @@ -46,21 +46,22 @@ using namespace shogun; TEST(NeuralLinearLayer, compute_activations) { CNeuralLinearLayer layer(9); - + set_global_seed(100); // initialize some random inputs - auto m_rng = std::unique_ptr(new CRandom(100)); + auto prng = get_prng(); + std::uniform_real_distribution dist(-10.0, 10.0); SGMatrix x1(12,3); for (int32_t i=0; irandom(-10.0, 10.0); + x1[i] = dist(prng); - CNeuralInputLayer* input1 = new CNeuralInputLayer (x1.num_rows); + CNeuralInputLayer* input1 = new CNeuralInputLayer(x1.num_rows); input1->set_batch_size(x1.num_cols); SGMatrix x2(7,3); for (int32_t i=0; irandom(-10.0, 10.0); + x2[i] = dist(prng); - CNeuralInputLayer* input2 = new CNeuralInputLayer (x2.num_rows); + CNeuralInputLayer* input2 = new CNeuralInputLayer(x2.num_rows); input2->set_batch_size(x2.num_cols); CDynamicObjectArray* layers = new CDynamicObjectArray(); @@ -120,19 +121,21 @@ TEST(NeuralLinearLayer, compute_activations) */ TEST(NeuralLinearLayer, compute_error) { - auto m_rng = std::unique_ptr(new CRandom(100)); + set_global_seed(100); + auto prng = get_prng(); + std::uniform_real_distribution dist(-10.0, 10.0); SGMatrix x1(12,3); for (int32_t i=0; irandom(-10.0, 10.0); + x1[i] = dist(prng); - CNeuralInputLayer* input1 = new CNeuralInputLayer (x1.num_rows); + CNeuralInputLayer* input1 = new CNeuralInputLayer(x1.num_rows); input1->set_batch_size(x1.num_cols); SGMatrix x2(7,3); for (int32_t i=0; irandom(-10.0, 10.0); + x2[i] = dist(prng); - CNeuralInputLayer* input2 = new CNeuralInputLayer (x2.num_rows); + CNeuralInputLayer* input2 = new CNeuralInputLayer(x2.num_rows); input2->set_batch_size(x2.num_cols); CDynamicObjectArray* layers = new CDynamicObjectArray(); @@ -144,8 +147,9 @@ TEST(NeuralLinearLayer, compute_error) input_indices[1] = 1; SGMatrix y(9,3); + std::uniform_real_distribution dist_s(0.0, 1.0); for (int32_t i=0; irandom(0.0, 1.0); + y[i] = dist_s(prng); // initialize the layer CNeuralLinearLayer layer(y.num_rows); @@ -178,10 +182,12 @@ TEST(NeuralLinearLayer, compute_error) */ TEST(NeuralLinearLayer, compute_local_gradients) { - auto m_rng = std::unique_ptr(new CRandom(100)); + set_global_seed(100); + auto prng = get_prng(); + std::uniform_real_distribution dist(-10.0, 10.0); SGMatrix x(12,3); for (int32_t i=0; irandom(-10.0, 10.0); + x[i] = dist(prng); CNeuralInputLayer* input1 = new CNeuralInputLayer (x.num_rows); input1->set_batch_size(x.num_cols); @@ -193,8 +199,9 @@ TEST(NeuralLinearLayer, compute_local_gradients) input_indices[0] = 0; SGMatrix y(9,3); + std::uniform_real_distribution dist_s(0.0, 1.0); for (int32_t i=0; irandom(0.0, 1.0); + y[i] = dist_s(prng); // initialize the layer CNeuralLinearLayer layer(y.num_rows); @@ -240,17 +247,18 @@ TEST(NeuralLinearLayer, compute_local_gradients) */ TEST(NeuralLinearLayer, compute_parameter_gradients_output) { + set_global_seed(100); SGMatrix x1(12,3); - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::uniform_real_distribution dist(-10.0, 10.0); for (int32_t i=0; irandom(-10.0, 10.0); - + x1[i] = dist(prng); CNeuralInputLayer* input1 = new CNeuralInputLayer (x1.num_rows); input1->set_batch_size(x1.num_cols); SGMatrix x2(7,3); for (int32_t i=0; irandom(-10.0, 10.0); + x2[i] = dist(prng); CNeuralInputLayer* input2 = new CNeuralInputLayer (x2.num_rows); input2->set_batch_size(x2.num_cols); @@ -264,8 +272,9 @@ TEST(NeuralLinearLayer, compute_parameter_gradients_output) input_indices[1] = 1; SGMatrix y(9,3); + std::uniform_real_distribution dist_s(0.0, 1.0); for (int32_t i=0; irandom(0.0, 1.0); + y[i] = dist_s(prng); // initialize the layer CNeuralLinearLayer layer(y.num_rows); @@ -319,16 +328,17 @@ TEST(NeuralLinearLayer, compute_parameter_gradients_output) TEST(NeuralLinearLayer, compute_parameter_gradients_hidden) { SGMatrix x1(12,3); - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::uniform_real_distribution dist(-10.0, 10.0); for (int32_t i=0; irandom(-10.0, 10.0); + x1[i] = dist(prng); CNeuralInputLayer* input1 = new CNeuralInputLayer (x1.num_rows); input1->set_batch_size(x1.num_cols); SGMatrix x2(7,3); for (int32_t i=0; irandom(-10.0, 10.0); + x2[i] = dist(prng); CNeuralInputLayer* input2 = new CNeuralInputLayer (x2.num_rows); input2->set_batch_size(x2.num_cols); @@ -349,8 +359,9 @@ TEST(NeuralLinearLayer, compute_parameter_gradients_hidden) input_indices_out[0] = 2; SGMatrix y(9,3); + std::uniform_real_distribution dist_s(0.0, 1.0); for (int32_t i=0; irandom(0.0, 1.0); + y[i] = dist(prng); // initialize the hidden layer layer_hid->initialize_neural_layer(layers, input_indices_hid); @@ -407,7 +418,7 @@ TEST(NeuralLinearLayer, compute_parameter_gradients_hidden) // compare for (int32_t i=0; i(new CRandom(100)); + auto prng = get_prng(); + std::uniform_real_distribution dist(-10.0, 10.0); SGMatrix x(12,3); for (int32_t i=0; irandom(-10.0, 10.0); + x[i] = dist(prng); - CNeuralInputLayer* input = new CNeuralInputLayer (x.num_rows); + CNeuralInputLayer* input = new CNeuralInputLayer(x.num_rows); input->set_batch_size(x.num_cols); CDynamicObjectArray* layers = new CDynamicObjectArray(); @@ -108,13 +109,14 @@ TEST(NeuralLogisticLayer, compute_activations) TEST(NeuralLogisticLayer, compute_local_gradients) { CNeuralLogisticLayer layer(9); - - auto m_rng = std::unique_ptr(new CRandom(100)); + set_global_seed(100); + auto prng = get_prng(); + std::uniform_real_distribution dist(-10.0, 10.0); SGMatrix x(12,3); for (int32_t i=0; irandom(-10.0, 10.0); + x[i] = dist(prng); - CNeuralInputLayer* input = new CNeuralInputLayer (x.num_rows); + CNeuralInputLayer* input = new CNeuralInputLayer(x.num_rows); input->set_batch_size(x.num_cols); CDynamicObjectArray* layers = new CDynamicObjectArray(); @@ -131,8 +133,9 @@ TEST(NeuralLogisticLayer, compute_local_gradients) layer.set_batch_size(x.num_cols); SGMatrix y(layer.get_num_neurons(), x.num_cols); + std::uniform_real_distribution dist_s(0.0, 1.0); for (int32_t i=0; irandom(0.0, 1.0); + y[i] = dist_s(prng); // compute the layer's local gradients input->compute_activations(x); diff --git a/tests/unit/neuralnets/NeuralNetwork_unittest.cc b/tests/unit/neuralnets/NeuralNetwork_unittest.cc index 2c136c2dd53..fe54d83a2ce 100644 --- a/tests/unit/neuralnets/NeuralNetwork_unittest.cc +++ b/tests/unit/neuralnets/NeuralNetwork_unittest.cc @@ -250,7 +250,7 @@ TEST(NeuralNetwork, backpropagation_convolutional) /** tests a neural network on the binary XOR problem */ TEST(NeuralNetwork, binary_classification) { - set_global_seed(10); + set_global_seed(100); SGMatrix inputs_matrix(2,4); SGVector targets_vector(4); diff --git a/tests/unit/neuralnets/NeuralRectifiedLinearLayer_unittest.cc b/tests/unit/neuralnets/NeuralRectifiedLinearLayer_unittest.cc index 9010599daa0..e54fdab9f5c 100644 --- a/tests/unit/neuralnets/NeuralRectifiedLinearLayer_unittest.cc +++ b/tests/unit/neuralnets/NeuralRectifiedLinearLayer_unittest.cc @@ -45,13 +45,15 @@ using namespace shogun; */ TEST(NeuralRectifiedLinearLayer, compute_activations) { + set_global_seed(100); CNeuralRectifiedLinearLayer layer(9); // initialize some random inputs - auto m_rng = std::unique_ptr(new CRandom(100)); + auto prng = get_prng(); + std::uniform_real_distribution dist(-10.0, 10.0); SGMatrix x(12,3); for (int32_t i=0; irandom(-10.0, 10.0); + x[i] = dist(prng); CNeuralInputLayer* input = new CNeuralInputLayer (x.num_rows); input->set_batch_size(x.num_cols); @@ -108,19 +110,21 @@ TEST(NeuralRectifiedLinearLayer, compute_activations) */ TEST(NeuralRectifiedLinearLayer, compute_parameter_gradients_hidden) { + set_global_seed(100); SGMatrix x1(12,3); - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::uniform_real_distribution dist(-10.0, 10.0); for (int32_t i=0; irandom(-10.0, 10.0); + x1[i] = dist(prng); CNeuralInputLayer* input1 = new CNeuralInputLayer (x1.num_rows); input1->set_batch_size(x1.num_cols); SGMatrix x2(7,3); for (int32_t i=0; irandom(-10.0, 10.0); + x2[i] = dist(prng); - CNeuralInputLayer* input2 = new CNeuralInputLayer (x2.num_rows); + CNeuralInputLayer* input2 = new CNeuralInputLayer(x2.num_rows); input2->set_batch_size(x2.num_cols); // initialize hidden the layer @@ -139,8 +143,9 @@ TEST(NeuralRectifiedLinearLayer, compute_parameter_gradients_hidden) input_indices_out[0] = 2; SGMatrix y(9,3); + std::uniform_real_distribution dist_s(0.0, 1.0); for (int32_t i=0; irandom(0.0, 1.0); + y[i] = dist_s(prng); // initialize the hidden layer layer_hid->initialize_neural_layer(layers, input_indices_hid); diff --git a/tests/unit/neuralnets/NeuralSoftmaxLayer_unittest.cc b/tests/unit/neuralnets/NeuralSoftmaxLayer_unittest.cc index e2d6f081311..de1e6c98136 100644 --- a/tests/unit/neuralnets/NeuralSoftmaxLayer_unittest.cc +++ b/tests/unit/neuralnets/NeuralSoftmaxLayer_unittest.cc @@ -45,13 +45,15 @@ using namespace shogun; */ TEST(NeuralSoftmaxLayer, compute_activations) { + set_global_seed(100); CNeuralSoftmaxLayer layer(9); // initialize some random inputs - auto m_rng = std::unique_ptr(new CRandom(100)); + auto prng = get_prng(); + std::uniform_real_distribution dist(-10.0, 10.0); SGMatrix x(12,3); for (int32_t i=0; irandom(-10.0, 10.0); + x[i] = dist(prng); CNeuralInputLayer* input = new CNeuralInputLayer (x.num_rows); input->set_batch_size(x.num_cols); @@ -117,12 +119,14 @@ TEST(NeuralSoftmaxLayer, compute_activations) */ TEST(NeuralSoftmaxLayer, compute_error) { + set_global_seed(100); CNeuralSoftmaxLayer layer(9); - auto m_rng = std::unique_ptr(new CRandom(100)); + auto prng = get_prng(); + std::uniform_real_distribution dist(-10.0, 10.0); SGMatrix x(12,3); for (int32_t i=0; irandom(-10.0, 10.0); + x[i] = dist(prng); CNeuralInputLayer* input = new CNeuralInputLayer (x.num_rows); input->set_batch_size(x.num_cols); @@ -140,9 +144,10 @@ TEST(NeuralSoftmaxLayer, compute_error) layer.initialize_parameters(params, param_regularizable, 1.0); layer.set_batch_size(x.num_cols); + std::uniform_real_distribution dist_s(0.0, 1.0); SGMatrix y(layer.get_num_neurons(), x.num_cols); for (int32_t i=0; irandom(0.0, 1.0); + y[i] = dist_s(prng); // make sure y is in the form of a probability distribution for (int32_t j=0; j(new CRandom(100)); + auto prng = get_prng(); + std::uniform_real_distribution dist(-10.0, 10.0); SGMatrix x(12,3); for (int32_t i=0; irandom(-10.0, 10.0); + x[i] = dist(prng); - CNeuralInputLayer* input = new CNeuralInputLayer (x.num_rows); + CNeuralInputLayer* input = new CNeuralInputLayer(x.num_rows); input->set_batch_size(x.num_cols); CDynamicObjectArray* layers = new CDynamicObjectArray(); @@ -201,9 +208,10 @@ TEST(NeuralSoftmaxLayer, compute_local_gradients) layer.initialize_parameters(params, param_regularizable, 1.0); layer.set_batch_size(x.num_cols); + std::uniform_real_distribution dist_s(0.0, 1.0); SGMatrix y(layer.get_num_neurons(), x.num_cols); for (int32_t i=0; irandom(0.0, 1.0); + y[i] = dist_s(prng); // make sure y is in the form of a probability distribution for (int32_t j=0; j(new CRandom(100)); + set_global_seed(100); + auto prng = get_prng(); int32_t num_visible = 15; int32_t num_hidden = 6; @@ -126,7 +127,7 @@ TEST(RBM, free_energy_gradients) SGMatrix V(num_visible, batch_size); for (int32_t i=0; irandom_64() < 0.7; + V[i] = prng() < 0.7; SGVector gradients(rbm.get_num_parameters()); rbm.free_energy_gradients(V, gradients); diff --git a/tests/unit/preprocessor/Preprocessor_unittest.cc b/tests/unit/preprocessor/Preprocessor_unittest.cc index 21f041fddf7..c7327258b18 100644 --- a/tests/unit/preprocessor/Preprocessor_unittest.cc +++ b/tests/unit/preprocessor/Preprocessor_unittest.cc @@ -45,9 +45,10 @@ TEST(Preprocessor, dense_apply) const index_t dim=2; const index_t size=4; SGMatrix data(dim, size); - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::normal_distribution dist(0, 1); for (index_t i=0; istd_normal_distrib(); + data.matrix[i] = dist(prng); CDenseFeatures* features=new CDenseFeatures(data); CDensePreprocessor* preproc=new CNormOne(); @@ -70,16 +71,19 @@ TEST(Preprocessor, string_apply) const index_t min_string_length=max_string_length/2; SGStringList strings(num_strings, max_string_length); - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::uniform_int_distribution dist_len( + min_string_length, max_string_length); + std::uniform_int_distribution dist_asc('A', 'Z'); for (index_t i=0; irandom(min_string_length, max_string_length); + index_t len = dist_len(prng); SGString current(len); /* fill with random uppercase letters (ASCII) */ for (index_t j=0; jrandom('A', 'Z'); + current.string[j] = dist_asc(prng); strings.strings[i]=current; } diff --git a/tests/unit/regression/krrnystrom_unittest.cc b/tests/unit/regression/krrnystrom_unittest.cc index c80c27d22da..3ea5d402afe 100644 --- a/tests/unit/regression/krrnystrom_unittest.cc +++ b/tests/unit/regression/krrnystrom_unittest.cc @@ -56,11 +56,12 @@ TEST(KRRNystrom, apply_and_compare_to_KRR_with_all_columns) /* fill data matrix and labels */ SGMatrix train_dat(num_features, num_vectors); SGMatrix test_dat(num_features, num_vectors); - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::uniform_real_distribution dist(0.0, 1.0); for (index_t i=0; inormal_random(0, 1.0); + lab.vector[i] = i + dist(prng); train_dat.matrix[i]=i; test_dat.matrix[i]=i; } @@ -114,7 +115,8 @@ TEST(KRRNystrom, apply_and_compare_to_KRR_with_column_subset) /* training label data */ SGVector lab(num_vectors); - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::uniform_real_distribution dist(0.0, 1.0); /* fill data matrix and labels */ SGMatrix train_dat(num_features, num_vectors); @@ -123,7 +125,7 @@ TEST(KRRNystrom, apply_and_compare_to_KRR_with_column_subset) { /* labels are linear plus noise */ float64_t point=(float64_t)i*10/num_vectors; - lab.vector[i] = point + m_rng->normal_random(0, 1.0); + lab.vector[i] = point + dist(prng); train_dat.matrix[i]=point; test_dat.matrix[i]=point; } diff --git a/tests/unit/regression/lars_unittest.cc b/tests/unit/regression/lars_unittest.cc index ee2e6d1e18a..a953028fe58 100644 --- a/tests/unit/regression/lars_unittest.cc +++ b/tests/unit/regression/lars_unittest.cc @@ -378,13 +378,14 @@ TEST(LeastAngleRegression, cholesky_insert) SGVector vec(num_vec); vec.random(0.0,1.0); Map map_vec(vec.vector, vec.size()); - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::uniform_real_distribution dist(0.0, 1.0); for (index_t i=0; irandom(0.0, 1.0); + mat(i, j) = dist(prng); matnew(i,j)=mat(i,j); } } @@ -417,11 +418,12 @@ TEST(LeastAngleRegression, ols_equivalence) { int32_t n_feat=25, n_vec=100; SGMatrix data(n_feat, n_vec); - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::uniform_real_distribution dist(0.0, 1.0); for (index_t i=0; irandom(0.0, 1.0); + data(i, j) = dist(prng); } SGVector lab=SGVector(n_vec); diff --git a/tests/unit/statistical_testing/KernelSelection_unittest.cc b/tests/unit/statistical_testing/KernelSelection_unittest.cc index c425c849773..7a1be008983 100644 --- a/tests/unit/statistical_testing/KernelSelection_unittest.cc +++ b/tests/unit/statistical_testing/KernelSelection_unittest.cc @@ -105,7 +105,7 @@ TEST(KernelSelectionMaxMMD, quadratic_time_single_kernel_dense) mmd->set_train_test_mode(false); auto selected_kernel=static_cast(mmd->get_kernel()); - EXPECT_NEAR(selected_kernel->get_width(), 0.0625, 1E-10); + EXPECT_NEAR(selected_kernel->get_width(), 0.03125, 1E-10); } #ifdef USE_GPL_SHOGUN @@ -287,7 +287,7 @@ TEST(KernelSelectionMaxCrossValidation, quadratic_time_single_kernel_dense) mmd->set_train_test_mode(false); auto selected_kernel=static_cast(mmd->get_kernel()); - EXPECT_NEAR(selected_kernel->get_width(), 0.125, 1E-10); + EXPECT_NEAR(selected_kernel->get_width(), 0.03125, 1E-10); } TEST(KernelSelectionMaxCrossValidation, linear_time_single_kernel_dense) @@ -356,7 +356,7 @@ TEST(KernelSelectionMedianHeuristic, quadratic_time_single_kernel_dense) mmd->set_train_test_mode(false); auto selected_kernel=static_cast(mmd->get_kernel()); - EXPECT_NEAR(selected_kernel->get_width(), 1.0, 1E-10); + EXPECT_NEAR(selected_kernel->get_width(), 0.03125, 1E-10); } TEST(KernelSelectionMedianHeuristic, linear_time_single_kernel_dense) diff --git a/tests/unit/statistical_testing/QuadraticTimeMMD_unittest.cc b/tests/unit/statistical_testing/QuadraticTimeMMD_unittest.cc index 4db0131ff92..99f2e44dbcc 100644 --- a/tests/unit/statistical_testing/QuadraticTimeMMD_unittest.cc +++ b/tests/unit/statistical_testing/QuadraticTimeMMD_unittest.cc @@ -354,7 +354,7 @@ TEST(QuadraticTimeMMD, perform_test_permutation_biased_full) // assert against local machine computed result mmd->set_statistic_type(ST_BIASED_FULL); float64_t p_value=mmd->compute_p_value(mmd->compute_statistic()); - EXPECT_NEAR(p_value, 0.8, 1E-10); + EXPECT_NEAR(p_value, 0.0, 1E-10); } TEST(QuadraticTimeMMD, perform_test_permutation_unbiased_full) @@ -393,7 +393,7 @@ TEST(QuadraticTimeMMD, perform_test_permutation_unbiased_full) // assert against local machine computed result mmd->set_statistic_type(ST_UNBIASED_FULL); float64_t p_value=mmd->compute_p_value(mmd->compute_statistic()); - EXPECT_NEAR(p_value, 0.8, 1E-10); + EXPECT_NEAR(p_value, 0.0, 1E-10); } TEST(QuadraticTimeMMD, perform_test_permutation_unbiased_incomplete) @@ -432,7 +432,7 @@ TEST(QuadraticTimeMMD, perform_test_permutation_unbiased_incomplete) // assert against local machine computed result mmd->set_statistic_type(ST_UNBIASED_INCOMPLETE); float64_t p_value=mmd->compute_p_value(mmd->compute_statistic()); - EXPECT_NEAR(p_value, 0.6, 1E-10); + EXPECT_NEAR(p_value, 0.0, 1E-10); } TEST(QuadraticTimeMMD, perform_test_spectrum) @@ -475,7 +475,7 @@ TEST(QuadraticTimeMMD, perform_test_spectrum) // assert against local machine computed result mmd->set_statistic_type(ST_BIASED_FULL); float64_t p_value_spectrum=mmd->compute_p_value(mmd->compute_statistic()); - EXPECT_NEAR(p_value_spectrum, 0.8, 1E-10); + EXPECT_NEAR(p_value_spectrum, 0.0, 1E-10); // unbiased case @@ -483,7 +483,7 @@ TEST(QuadraticTimeMMD, perform_test_spectrum) // assert against local machine computed result mmd->set_statistic_type(ST_UNBIASED_FULL); p_value_spectrum=mmd->compute_p_value(mmd->compute_statistic()); - EXPECT_NEAR(p_value_spectrum, 0.8, 1E-10); + EXPECT_NEAR(p_value_spectrum, 0.0, 1E-10); } TEST(QuadraticTimeMMD, precomputed_vs_nonprecomputed) @@ -635,7 +635,7 @@ TEST(QuadraticTimeMMD, multikernel_compute_test_power) ASSERT_EQ(test_power_multiple.size(), test_power_single.size()); for (auto i=0; i(new CRandom()); + auto prng = get_prng(); + std::uniform_real_distribution dist(0.0, 1.0); SGMatrix data(dim, num_vec); for (auto i=0; irandom(0.0, 0.1); + data.matrix[i] = dist(prng); auto feats=some >(data); auto kernel=some(10, 2*sigma*sigma); diff --git a/tests/unit/statistical_testing/internals/PermutationMMD_unittest.cc b/tests/unit/statistical_testing/internals/PermutationMMD_unittest.cc index 8bad2dbf096..9bd3a572fbb 100644 --- a/tests/unit/statistical_testing/internals/PermutationMMD_unittest.cc +++ b/tests/unit/statistical_testing/internals/PermutationMMD_unittest.cc @@ -102,13 +102,13 @@ TEST(PermutationMMD, biased_full_single_kernel) Map map(kernel_matrix.matrix, kernel_matrix.num_rows, kernel_matrix.num_cols); SGVector result_2(num_null_samples); set_global_seed(12345); - auto prng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); for (auto i=0; i perm(kernel_matrix.num_rows); perm.setIdentity(); SGVector perminds(perm.indices().data(), perm.indices().size(), false); - CMath::permute(perminds, prng.get()); + CMath::permute(perminds, prng); MatrixXf permuted = perm.transpose()*map*perm; SGMatrix permuted_km(permuted.data(), permuted.rows(), permuted.cols(), false); result_2[i]=compute_mmd(permuted_km); @@ -117,11 +117,10 @@ TEST(PermutationMMD, biased_full_single_kernel) SGVector inds(kernel_matrix.num_rows); SGVector result_3(num_null_samples); - prng->set_seed(12345); for (auto i=0; iadd_subset(inds); kernel->init(feats, feats); kernel_matrix=kernel->get_kernel_matrix(); @@ -183,13 +182,13 @@ TEST(PermutationMMD, unbiased_full_single_kernel) set_global_seed(12345); Map map(kernel_matrix.matrix, kernel_matrix.num_rows, kernel_matrix.num_cols); SGVector result_2(num_null_samples); - auto prng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); for (auto i=0; i perm(kernel_matrix.num_rows); perm.setIdentity(); SGVector perminds(perm.indices().data(), perm.indices().size(), false); - CMath::permute(perminds, prng.get()); + CMath::permute(perminds, prng); MatrixXf permuted = perm.transpose()*map*perm; SGMatrix permuted_km(permuted.data(), permuted.rows(), permuted.cols(), false); result_2[i]=compute_mmd(permuted_km); @@ -198,11 +197,10 @@ TEST(PermutationMMD, unbiased_full_single_kernel) SGVector inds(kernel_matrix.num_rows); SGVector result_3(num_null_samples); - prng->set_seed(12345); for (auto i=0; iadd_subset(inds); kernel->init(feats, feats); kernel_matrix=kernel->get_kernel_matrix(); @@ -264,27 +262,26 @@ TEST(PermutationMMD, unbiased_incomplete_single_kernel) Map map(kernel_matrix.matrix, kernel_matrix.num_rows, kernel_matrix.num_cols); set_global_seed(12345); - auto prng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); SGVector result_2(num_null_samples); for (auto i=0; i perm(kernel_matrix.num_rows); perm.setIdentity(); SGVector perminds(perm.indices().data(), perm.indices().size(), false); - CMath::permute(perminds, prng.get()); + CMath::permute(perminds, prng); MatrixXf permuted = perm.transpose()*map*perm; SGMatrix permuted_km(permuted.data(), permuted.rows(), permuted.cols(), false); result_2[i]=compute_mmd(permuted_km); } - prng->set_seed(12345); SGVector inds(kernel_matrix.num_rows); SGVector result_3(num_null_samples); for (auto i=0; iadd_subset(inds); kernel->init(feats, feats); kernel_matrix=kernel->get_kernel_matrix(); diff --git a/tests/unit/structure/HierarchicalMultilabelModel_unittest.cc b/tests/unit/structure/HierarchicalMultilabelModel_unittest.cc index 211da9a9ee1..73643686da0 100644 --- a/tests/unit/structure/HierarchicalMultilabelModel_unittest.cc +++ b/tests/unit/structure/HierarchicalMultilabelModel_unittest.cc @@ -20,11 +20,12 @@ TEST(HierarchicalMultilabelModel, get_joint_feature_vector_1) int32_t num_samples = 2; SGMatrix feats(dim_features, num_samples); - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::uniform_int_distribution dist(-100, 100); for (index_t i = 0; i < dim_features * num_samples; i++) { - feats[i] = m_rng->random(-100, 100); + feats[i] = dist(prng); } CSparseFeatures * features = new CSparseFeatures(feats); @@ -80,11 +81,12 @@ TEST(HierarchicalMultilabelModel, get_joint_feature_vector_2) int32_t num_samples = 2; SGMatrix feats(dim_features, num_samples); - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::uniform_int_distribution dist(-100, 100); for (index_t i = 0; i < dim_features * num_samples; i++) { - feats[i] = m_rng->random(-100, 100); + feats[i] = dist(prng); } CSparseFeatures * features = new CSparseFeatures(feats); @@ -207,11 +209,12 @@ TEST(HierarchicalMultilabelModel, argmax) int32_t num_samples = 2; SGMatrix feats(dim_features, num_samples); - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::uniform_int_distribution dist_p(-100, 100); for (index_t i = 0; i < dim_features * num_samples; i++) { - feats[i] = m_rng->random(-100, 100); + feats[i] = dist_p(prng); } CSparseFeatures * features = new CSparseFeatures(feats); @@ -242,9 +245,10 @@ TEST(HierarchicalMultilabelModel, argmax) SGVector w(model->get_dim()); + std::uniform_int_distribution dist_q(-1, 1); for (index_t i = 0; i < w.vlen; i++) { - w[i] = m_rng->random(-1, 1); + w[i] = dist_q(prng); } CResultSet * ret_1 = model->argmax(w, 0, true); @@ -319,11 +323,12 @@ TEST(HierarchicalMultilabelModel, argmax_leaf_nodes_mandatory) int32_t num_samples = 2; SGMatrix feats(dim_features, num_samples); - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::uniform_int_distribution dist_p(-100, 100); for (index_t i = 0; i < dim_features * num_samples; i++) { - feats[i] = m_rng->random(-100, 100); + feats[i] = dist_p(prng); } CSparseFeatures * features = new CSparseFeatures(feats); @@ -354,9 +359,10 @@ TEST(HierarchicalMultilabelModel, argmax_leaf_nodes_mandatory) SGVector w(model->get_dim()); + std::uniform_int_distribution dist_q(-1, 1); for (index_t i = 0; i < w.vlen; i++) { - w[i] = m_rng->random(-1, 1); + w[i] = dist_q(prng); } CResultSet * ret_1 = model->argmax(w, 0, true); diff --git a/tests/unit/structure/MultilabelCLRModel_unittest.cc b/tests/unit/structure/MultilabelCLRModel_unittest.cc index 312c909e9a1..b2d8e32039d 100644 --- a/tests/unit/structure/MultilabelCLRModel_unittest.cc +++ b/tests/unit/structure/MultilabelCLRModel_unittest.cc @@ -21,10 +21,11 @@ using namespace shogun; TEST(MultilabelCLRModel, get_joint_feature_vector_1) { SGMatrix feats(DIMS, NUM_SAMPLES); - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::uniform_int_distribution dist(-100, 100); for (index_t i = 0; i < DIMS * NUM_SAMPLES; i++) { - feats[i] = m_rng->random(-100, 100); + feats[i] = dist(prng); } CSparseFeatures * features = new CSparseFeatures(feats); @@ -70,10 +71,11 @@ TEST(MultilabelCLRModel, get_joint_feature_vector_1) TEST(MultilabelCLRModel, get_joint_feature_vector_2) { SGMatrix feats(DIMS, NUM_SAMPLES); - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::uniform_int_distribution dist(-100, 100); for (index_t i = 0; i < DIMS * NUM_SAMPLES; i++) { - feats[i] = m_rng->random(-100, 100); + feats[i] = dist(prng); } CSparseFeatures * features = new CSparseFeatures(feats); @@ -176,10 +178,11 @@ TEST(MultilabelCLRModel, delta_loss) TEST(MultilabelCLRModel, argmax) { SGMatrix feats(DIMS, NUM_SAMPLES); - auto m_rng = std::unique_ptr(new CRandom()); + auto prng = get_prng(); + std::uniform_int_distribution dist_p(-100, 100); for (index_t i = 0; i < DIMS * NUM_SAMPLES; i++) { - feats[i] = m_rng->random(-100, 100); + feats[i] = dist_p(prng); } CSparseFeatures * features = new CSparseFeatures(feats); @@ -201,9 +204,10 @@ TEST(MultilabelCLRModel, argmax) SGVector w(model->get_dim()); + std::uniform_int_distribution dist_q(-1, 1); for (index_t i = 0; i < w.vlen; i++) { - w[i] = m_rng->random(-1, 1); + w[i] = dist_q(prng); } CResultSet * ret_1 = model->argmax(w, 0, true); diff --git a/tests/unit/structure/PrimalMosekSOSVM_unittest.cc b/tests/unit/structure/PrimalMosekSOSVM_unittest.cc index f6ec0316e5c..aada3866bb8 100644 --- a/tests/unit/structure/PrimalMosekSOSVM_unittest.cc +++ b/tests/unit/structure/PrimalMosekSOSVM_unittest.cc @@ -18,7 +18,7 @@ using namespace shogun; TEST(PrimalMosekSOSVM, mosek_init_sosvm_w_bounds) { int32_t num_samples = 10; - auto m_rng = std::unique_ptr(new CRandom(17)); + std::uniform_real_distribution dist(0.0, 1.0); // define factor type SGVector card(2); @@ -53,8 +53,8 @@ TEST(PrimalMosekSOSVM, mosek_init_sosvm_w_bounds) // add factors SGVector data1(2); - data1[0] = 2.0 * m_rng->random(0.0, 1.0) - 1.0; - data1[1] = 2.0 * m_rng->random(0.0, 1.0) - 1.0; + data1[0] = 2.0 * dist(17) - 1.0; + data1[1] = 2.0 * dist(17) - 1.0; SGVector var_index1(2); var_index1[0] = 0; var_index1[1] = 1; @@ -62,8 +62,8 @@ TEST(PrimalMosekSOSVM, mosek_init_sosvm_w_bounds) fg->add_factor(fac1); SGVector data2(2); - data2[0] = 2.0 * m_rng->random(0.0, 1.0) - 1.0; - data2[1] = 2.0 * m_rng->random(0.0, 1.0) - 1.0; + data2[0] = 2.0 * dist(17) - 1.0; + data2[1] = 2.0 * dist(17) - 1.0; SGVector var_index2(2); var_index2[0] = 1; var_index2[1] = 2; From 817eeeb3a40e3358b6cf7b52c44ca7fedf8311c2 Mon Sep 17 00:00:00 2001 From: MikeLing Date: Fri, 21 Jul 2017 13:37:16 +0800 Subject: [PATCH 8/9] fix the broken unit tests --- .../machine/StochasticGBMachine_unittest.cc | 20 +++++++++---------- tests/unit/mathematics/Math_unittest.cc | 16 +++++++-------- .../multiclass/BaggingMachine_unittest.cc | 2 +- .../multiclass/tree/RandomForest_unittest.cc | 4 ++-- tests/unit/neuralnets/RBM_unittest.cc | 2 +- .../KernelSelection_unittest.cc | 8 ++++---- .../LinearTimeMMD_unittest.cc | 6 +++--- .../QuadraticTimeMMD_unittest.cc | 8 ++++---- 8 files changed, 33 insertions(+), 33 deletions(-) diff --git a/tests/unit/machine/StochasticGBMachine_unittest.cc b/tests/unit/machine/StochasticGBMachine_unittest.cc index 8df4f5375fd..6a77b6fc243 100644 --- a/tests/unit/machine/StochasticGBMachine_unittest.cc +++ b/tests/unit/machine/StochasticGBMachine_unittest.cc @@ -101,16 +101,16 @@ TEST(StochasticGBMachine,sinusoid_curve_fitting) SGVector ret=ret_labels->get_labels(); float64_t epsilon=1e-8; - EXPECT_NEAR(ret[0], -0.91580992928965543, epsilon); - EXPECT_NEAR(ret[1], 0.83302568373135366, epsilon); - EXPECT_NEAR(ret[2], 0.42519621523857321, epsilon); - EXPECT_NEAR(ret[3], -0.54396234032218127, epsilon); - EXPECT_NEAR(ret[4], -0.54396234032218127, epsilon); - EXPECT_NEAR(ret[5], 0.64891735887560409, epsilon); - EXPECT_NEAR(ret[6], 0.8330256837313536, epsilon); - EXPECT_NEAR(ret[7], -0.76318443378750656, epsilon); - EXPECT_NEAR(ret[8], -0.52743316035159316, epsilon); - EXPECT_NEAR(ret[9], 0.13643452136869369, epsilon); + EXPECT_NEAR(ret[0], -0.91750791302398116, epsilon); + EXPECT_NEAR(ret[1], 0.84042006382699208, epsilon); + EXPECT_NEAR(ret[2], 0.41908702227342665, epsilon); + EXPECT_NEAR(ret[3], -0.51059999661859867, epsilon); + EXPECT_NEAR(ret[4], -0.51059999661859867, epsilon); + EXPECT_NEAR(ret[5], 0.67456542071826497, epsilon); + EXPECT_NEAR(ret[6], 0.84042006382699208, epsilon); + EXPECT_NEAR(ret[7], -0.73520642975448036, epsilon); + EXPECT_NEAR(ret[8], -0.55571022774324808, epsilon); + EXPECT_NEAR(ret[9], 0.14677577998145744, epsilon); SG_UNREF(train_feats); SG_UNREF(test_feats); diff --git a/tests/unit/mathematics/Math_unittest.cc b/tests/unit/mathematics/Math_unittest.cc index 25f31b4db3d..d79785ef396 100644 --- a/tests/unit/mathematics/Math_unittest.cc +++ b/tests/unit/mathematics/Math_unittest.cc @@ -390,10 +390,10 @@ TEST(CMath, permute) v.range_fill(0); set_global_seed(2); CMath::permute(v); - EXPECT_EQ(v[0], 0); - EXPECT_EQ(v[1], 2); - EXPECT_EQ(v[2], 3); - EXPECT_EQ(v[3], 1); + EXPECT_EQ(v[0], 3); + EXPECT_EQ(v[1], 0); + EXPECT_EQ(v[2], 1); + EXPECT_EQ(v[3], 2); } TEST(CMath, permute_with_random) @@ -404,10 +404,10 @@ TEST(CMath, permute_with_random) auto prng = get_prng(); CMath::permute(v, prng); - EXPECT_EQ(v[0], 0); - EXPECT_EQ(v[1], 2); - EXPECT_EQ(v[2], 3); - EXPECT_EQ(v[3], 1); + EXPECT_EQ(v[0], 3); + EXPECT_EQ(v[1], 0); + EXPECT_EQ(v[2], 1); + EXPECT_EQ(v[3], 2); } TEST(CMath,misc) diff --git a/tests/unit/multiclass/BaggingMachine_unittest.cc b/tests/unit/multiclass/BaggingMachine_unittest.cc index f08ffafe7e9..504e8851c91 100644 --- a/tests/unit/multiclass/BaggingMachine_unittest.cc +++ b/tests/unit/multiclass/BaggingMachine_unittest.cc @@ -231,7 +231,7 @@ TEST(BaggingMachine,classify_CART) EXPECT_EQ(0.0, res_vector[4]); CMulticlassAccuracy* eval=new CMulticlassAccuracy(); - EXPECT_NEAR(0.5714285, c->get_oob_error(eval), 1e-6); + EXPECT_NEAR(0.5, c->get_oob_error(eval), 1e-6); SG_UNREF(test_feats); SG_UNREF(result); diff --git a/tests/unit/multiclass/tree/RandomForest_unittest.cc b/tests/unit/multiclass/tree/RandomForest_unittest.cc index 8b04a4d0316..18f4387d7c6 100644 --- a/tests/unit/multiclass/tree/RandomForest_unittest.cc +++ b/tests/unit/multiclass/tree/RandomForest_unittest.cc @@ -201,7 +201,7 @@ TEST(RandomForest,classify_nominal_test) EXPECT_EQ(0.0, res_vector[4]); CMulticlassAccuracy* eval=new CMulticlassAccuracy(); - EXPECT_NEAR(0.78571428, c->get_oob_error(eval), 1e-6); + EXPECT_NEAR(0.4285714, c->get_oob_error(eval), 1e-6); SG_UNREF(test_feats); SG_UNREF(result); @@ -270,7 +270,7 @@ TEST(RandomForest,classify_non_nominal_test) EXPECT_EQ(0.0, res_vector[4]); CMulticlassAccuracy* eval=new CMulticlassAccuracy(); - EXPECT_NEAR(0.78571428, c->get_oob_error(eval), 1e-6); + EXPECT_NEAR(0.4285714, c->get_oob_error(eval), 1e-6); SG_UNREF(test_feats); SG_UNREF(result); diff --git a/tests/unit/neuralnets/RBM_unittest.cc b/tests/unit/neuralnets/RBM_unittest.cc index 3701b761004..9e6e6694fda 100644 --- a/tests/unit/neuralnets/RBM_unittest.cc +++ b/tests/unit/neuralnets/RBM_unittest.cc @@ -180,5 +180,5 @@ TEST(RBM, pseudo_likelihood_binary) pl += rbm.pseudo_likelihood(V)/10000; // generated using scikit-learn - EXPECT_NEAR(-3.3698, pl, 0.02); + EXPECT_NEAR(-3.33648, pl, 0.02); } diff --git a/tests/unit/statistical_testing/KernelSelection_unittest.cc b/tests/unit/statistical_testing/KernelSelection_unittest.cc index 7a1be008983..36672827891 100644 --- a/tests/unit/statistical_testing/KernelSelection_unittest.cc +++ b/tests/unit/statistical_testing/KernelSelection_unittest.cc @@ -72,7 +72,7 @@ TEST(KernelSelectionMaxMMD, linear_time_single_kernel_streaming) mmd->set_train_test_mode(false); auto selected_kernel=static_cast(mmd->get_kernel()); - EXPECT_NEAR(selected_kernel->get_width(), 0.03125, 1E-10); + EXPECT_NEAR(selected_kernel->get_width(), 0.0625, 1E-10); } TEST(KernelSelectionMaxMMD, quadratic_time_single_kernel_dense) @@ -210,7 +210,7 @@ TEST(KernelSelectionMaxTestPower, quadratic_time_single_kernel) mmd->set_train_test_mode(false); auto selected_kernel=static_cast(mmd->get_kernel()); - EXPECT_NEAR(selected_kernel->get_width(), 0.03125, 1E-10); + EXPECT_NEAR(selected_kernel->get_width(), 0.125, 1E-10); } #ifdef USE_GPL_SHOGUN @@ -356,7 +356,7 @@ TEST(KernelSelectionMedianHeuristic, quadratic_time_single_kernel_dense) mmd->set_train_test_mode(false); auto selected_kernel=static_cast(mmd->get_kernel()); - EXPECT_NEAR(selected_kernel->get_width(), 0.03125, 1E-10); + EXPECT_NEAR(selected_kernel->get_width(), 2, 1E-10); } TEST(KernelSelectionMedianHeuristic, linear_time_single_kernel_dense) @@ -388,5 +388,5 @@ TEST(KernelSelectionMedianHeuristic, linear_time_single_kernel_dense) mmd->set_train_test_mode(false); auto selected_kernel=static_cast(mmd->get_kernel()); - EXPECT_NEAR(selected_kernel->get_width(), 1.0, 1E-10); + EXPECT_NEAR(selected_kernel->get_width(), 2.0, 1E-10); } diff --git a/tests/unit/statistical_testing/LinearTimeMMD_unittest.cc b/tests/unit/statistical_testing/LinearTimeMMD_unittest.cc index a72172d36dc..8f75ac53c20 100644 --- a/tests/unit/statistical_testing/LinearTimeMMD_unittest.cc +++ b/tests/unit/statistical_testing/LinearTimeMMD_unittest.cc @@ -347,7 +347,7 @@ TEST(LinearTimeMMD, perform_test_gaussian_biased_full) // assert against local machine computed result mmd->set_statistic_type(ST_BIASED_FULL); float64_t p_value_gaussian=mmd->compute_p_value(mmd->compute_statistic()); - EXPECT_NEAR(p_value_gaussian, 0.0, 1E-6); + EXPECT_NEAR(p_value_gaussian, 0.0, 1E-5); } TEST(LinearTimeMMD, perform_test_gaussian_unbiased_full) @@ -384,7 +384,7 @@ TEST(LinearTimeMMD, perform_test_gaussian_unbiased_full) // assert against local machine computed result mmd->set_statistic_type(ST_UNBIASED_FULL); float64_t p_value_gaussian=mmd->compute_p_value(mmd->compute_statistic()); - EXPECT_NEAR(p_value_gaussian, 0.78999099853119159, 1E-6); + EXPECT_NEAR(p_value_gaussian, 0.79220410498576843, 1E-6); } TEST(LinearTimeMMD, perform_test_gaussian_unbiased_incomplete) @@ -422,5 +422,5 @@ TEST(LinearTimeMMD, perform_test_gaussian_unbiased_incomplete) // assert against local machine computed result mmd->set_statistic_type(ST_UNBIASED_INCOMPLETE); float64_t p_value_gaussian=mmd->compute_p_value(mmd->compute_statistic()); - EXPECT_NEAR(p_value_gaussian, 0.48342157360749094, 1E-6); + EXPECT_NEAR(p_value_gaussian, 0.4783288089290294, 1E-6); } diff --git a/tests/unit/statistical_testing/QuadraticTimeMMD_unittest.cc b/tests/unit/statistical_testing/QuadraticTimeMMD_unittest.cc index 99f2e44dbcc..f2e251172e1 100644 --- a/tests/unit/statistical_testing/QuadraticTimeMMD_unittest.cc +++ b/tests/unit/statistical_testing/QuadraticTimeMMD_unittest.cc @@ -354,7 +354,7 @@ TEST(QuadraticTimeMMD, perform_test_permutation_biased_full) // assert against local machine computed result mmd->set_statistic_type(ST_BIASED_FULL); float64_t p_value=mmd->compute_p_value(mmd->compute_statistic()); - EXPECT_NEAR(p_value, 0.0, 1E-10); + EXPECT_NEAR(p_value, 1.0, 1E-10); } TEST(QuadraticTimeMMD, perform_test_permutation_unbiased_full) @@ -393,7 +393,7 @@ TEST(QuadraticTimeMMD, perform_test_permutation_unbiased_full) // assert against local machine computed result mmd->set_statistic_type(ST_UNBIASED_FULL); float64_t p_value=mmd->compute_p_value(mmd->compute_statistic()); - EXPECT_NEAR(p_value, 0.0, 1E-10); + EXPECT_NEAR(p_value, 1.0, 1E-10); } TEST(QuadraticTimeMMD, perform_test_permutation_unbiased_incomplete) @@ -475,7 +475,7 @@ TEST(QuadraticTimeMMD, perform_test_spectrum) // assert against local machine computed result mmd->set_statistic_type(ST_BIASED_FULL); float64_t p_value_spectrum=mmd->compute_p_value(mmd->compute_statistic()); - EXPECT_NEAR(p_value_spectrum, 0.0, 1E-10); + EXPECT_NEAR(p_value_spectrum, 0.3, 1E-10); // unbiased case @@ -483,7 +483,7 @@ TEST(QuadraticTimeMMD, perform_test_spectrum) // assert against local machine computed result mmd->set_statistic_type(ST_UNBIASED_FULL); p_value_spectrum=mmd->compute_p_value(mmd->compute_statistic()); - EXPECT_NEAR(p_value_spectrum, 0.0, 1E-10); + EXPECT_NEAR(p_value_spectrum, 0.3, 1E-10); } TEST(QuadraticTimeMMD, precomputed_vs_nonprecomputed) From 5af0493da417d0d8e0fcfcb13f0a70dbcd960b54 Mon Sep 17 00:00:00 2001 From: MikeLing Date: Tue, 1 Aug 2017 15:15:23 +0800 Subject: [PATCH 9/9] add explicit random engine --- .../classical/GaussianDistribution.cpp | 2 +- .../generators/GaussianBlobsDataGenerator.cpp | 2 +- .../generators/MeanShiftDataGenerator.cpp | 2 +- src/shogun/mathematics/Math.h | 25 ------------------- .../ratapprox/tracesampler/TraceSampler.h | 2 +- src/shogun/neuralnets/RBM.cpp | 2 +- 6 files changed, 5 insertions(+), 30 deletions(-) diff --git a/src/shogun/distributions/classical/GaussianDistribution.cpp b/src/shogun/distributions/classical/GaussianDistribution.cpp index 3a83f83c8a9..945d7be086a 100644 --- a/src/shogun/distributions/classical/GaussianDistribution.cpp +++ b/src/shogun/distributions/classical/GaussianDistribution.cpp @@ -168,7 +168,7 @@ SGVector CGaussianDistribution::log_pdf_multiple(SGMatrix void CGaussianDistribution::init() { - m_rng = get_prng(); + m_rng = get_prng(); SG_ADD(&m_mean, "mean", "Mean of the Gaussian.", MS_NOT_AVAILABLE); SG_ADD(&m_L, "L", "Lower factor of covariance matrix, " "depending on the factorization type.", MS_NOT_AVAILABLE); diff --git a/src/shogun/features/streaming/generators/GaussianBlobsDataGenerator.cpp b/src/shogun/features/streaming/generators/GaussianBlobsDataGenerator.cpp index 644f3a41f32..10132a68ebf 100644 --- a/src/shogun/features/streaming/generators/GaussianBlobsDataGenerator.cpp +++ b/src/shogun/features/streaming/generators/GaussianBlobsDataGenerator.cpp @@ -73,7 +73,7 @@ void CGaussianBlobsDataGenerator::init() m_stretch=1; m_angle=0; m_cholesky=SGMatrix(2, 2); - m_rng = get_prng(); + m_rng = get_prng(); m_cholesky(0, 0)=1; m_cholesky(0, 1)=0; m_cholesky(1, 0)=0; diff --git a/src/shogun/features/streaming/generators/MeanShiftDataGenerator.cpp b/src/shogun/features/streaming/generators/MeanShiftDataGenerator.cpp index 9cf7716ffdc..536c239c94f 100644 --- a/src/shogun/features/streaming/generators/MeanShiftDataGenerator.cpp +++ b/src/shogun/features/streaming/generators/MeanShiftDataGenerator.cpp @@ -50,7 +50,7 @@ void CMeanShiftDataGenerator::init() SG_ADD(&m_dimension_shift, "m_dimension_shift", "Dimension of mean shift", MS_NOT_AVAILABLE); - m_rng = get_prng(); + m_rng = get_prng(); m_dimension=0; m_mean_shift=0; m_dimension_shift=0; diff --git a/src/shogun/mathematics/Math.h b/src/shogun/mathematics/Math.h index 67dd27c3001..5928144cead 100644 --- a/src/shogun/mathematics/Math.h +++ b/src/shogun/mathematics/Math.h @@ -1017,31 +1017,6 @@ class CMath : public CSGObject return 0 == a ? b : a; } - template < - class T, - class RandomGenerator = std::uniform_int_distribution> - static void permute(SGVector v) - { - auto prng = get_prng(); - for (index_t i = 0; i < v.vlen; ++i) - { - RandomGenerator dist(i, v.vlen - 1); - swap(v[i], v[dist(prng)]); - } - } - - template < - class T, class RandomGenerator, - class Distribution = std::uniform_int_distribution> - static void permute(SGVector v, RandomGenerator prng) - { - for (index_t i = 0; i < v.vlen; ++i) - { - Distribution dist(i, v.vlen - 1); - swap(v[i], v[dist(prng)]); - } - } - /** Computes sum of non-zero elements * @param vec vector * @param len length diff --git a/src/shogun/mathematics/linalg/ratapprox/tracesampler/TraceSampler.h b/src/shogun/mathematics/linalg/ratapprox/tracesampler/TraceSampler.h index 033f6e60704..9888c83aee9 100644 --- a/src/shogun/mathematics/linalg/ratapprox/tracesampler/TraceSampler.h +++ b/src/shogun/mathematics/linalg/ratapprox/tracesampler/TraceSampler.h @@ -100,7 +100,7 @@ class CTraceSampler : public CSGObject { m_num_samples=0; m_dimension=0; - m_rng = get_prng(); + m_rng = get_prng(); SG_ADD(&m_num_samples, "num_samples", "Number of samples this sampler can generate", MS_NOT_AVAILABLE); diff --git a/src/shogun/neuralnets/RBM.cpp b/src/shogun/neuralnets/RBM.cpp index 4da1c2b728a..25e1c686795 100644 --- a/src/shogun/neuralnets/RBM.cpp +++ b/src/shogun/neuralnets/RBM.cpp @@ -623,7 +623,7 @@ void CRBM::init() m_visible_state_offsets = new CDynamicArray(); m_num_params = 0; m_batch_size = 0; - m_rng = get_prng(); + m_rng = get_prng(); SG_ADD(&cd_num_steps, "cd_num_steps", "Number of CD Steps", MS_NOT_AVAILABLE); SG_ADD(&cd_persistent, "cd_persistent", "Whether to use PCD", MS_NOT_AVAILABLE);