Skip to content

Commit

Permalink
fix precision issue #126
Browse files Browse the repository at this point in the history
  • Loading branch information
zeyiwen committed Feb 18, 2019
1 parent a824a1a commit 7b6b743
Show file tree
Hide file tree
Showing 8 changed files with 33 additions and 3 deletions.
4 changes: 4 additions & 0 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,10 @@ endif()
set(USE_CUDA ON CACHE BOOL "Compile with CUDA")
set(USE_EIGEN OFF CACHE BOOL "Compile with Eigen")
set(BUILD_TESTS OFF CACHE BOOL "Build Tests")
set(USE_DOUBLE OFF CACHE BOOL "Use double as kernel_type")
if(USE_DOUBLE)
message("Use double as kernel_type")
endif()
if (NOT CMAKE_BUILD_TYPE)
set(CMAKE_BUILD_TYPE Release)
endif ()
Expand Down
3 changes: 3 additions & 0 deletions docs/faq.md
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,9 @@ Frequently Asked Questions (FAQs)
This page is dedicated to summarizing some frequently asked questions about ThunderSVM.

## FAQs of users
* **Why does ThunderSVM have large errors? My data set is not normalized.**
To reduce the errors, you need to add ``-DUSE_DOUBLE=ON`` in cmake (e.g., ``cmake -DUSE_DOUBLE=ON ..``). The reason is that ThunderSVM uses ``float`` to store kernel values for better efficiency. For problems require high precision, you need to tell ThunderSVM to use ``double``.

* **How can I use the source code?**
Please refere to [How To](how-to.md) page.

Expand Down
1 change: 1 addition & 0 deletions include/thundersvm/config.h.in
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
#cmakedefine DATASET_DIR "@DATASET_DIR@"
#cmakedefine USE_CUDA
#cmakedefine USE_EIGEN
#cmakedefine USE_DOUBLE
5 changes: 5 additions & 0 deletions include/thundersvm/thundersvm.h
Original file line number Diff line number Diff line change
Expand Up @@ -14,5 +14,10 @@
using std::string;
using std::vector;
typedef double float_type;

#ifdef USE_DOUBLE
typedef double kernel_type;
#else
typedef float kernel_type;
#endif
#endif //THUNDERSVM_THUNDERSVM_H
8 changes: 8 additions & 0 deletions src/thundersvm/cmdparser.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -184,6 +184,9 @@ void CMDParser::parse_command_line(int argc, char **argv) {
case 'u':
gpu_id = atoi(argv[i]);
break;
case 'o':
n_cores = atoi(argv[i]);
break;
case 'm':
param_cmd.max_mem_size = static_cast<size_t>(max(atoi(argv[i]), 0)) << 20;//MB to Byte
break;
Expand All @@ -192,6 +195,11 @@ void CMDParser::parse_command_line(int argc, char **argv) {
HelpInfo_svmpredict();
}
}
if (n_cores > 0) {
omp_set_num_threads(n_cores);
} else if (n_cores != -1) {
LOG(ERROR) << "the number of cpu cores must be positive or -1";
}
if (i >= argc - 2)
HelpInfo_svmpredict();
svmpredict_input_file = argv[i];
Expand Down
4 changes: 2 additions & 2 deletions src/thundersvm/kernel/kernelmatrix_kernel.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -149,7 +149,7 @@ namespace svm_kernel {
void dns_csr_mul(int m, int n, int k, const SyncArray<kernel_type> &dense_mat, const SyncArray<kernel_type> &csr_val,
const SyncArray<int> &csr_row_ptr, const SyncArray<int> &csr_col_ind, int nnz,
SyncArray<kernel_type> &result) {
Eigen::Map<const Eigen::MatrixXf> denseMat(dense_mat.host_data(), k, n);
Eigen::Map<const Eigen::Matrix<kernel_type, Eigen::Dynamic, Eigen::Dynamic, Eigen::ColMajor>> denseMat(dense_mat.host_data(), k, n);
Eigen::Map<const Eigen::SparseMatrix<kernel_type, Eigen::RowMajor>> sparseMat(m, k, nnz, csr_row_ptr.host_data(),
csr_col_ind.host_data(),
csr_val.host_data());
Expand Down Expand Up @@ -177,7 +177,7 @@ namespace svm_kernel {

void dns_dns_mul(int m, int n, int k, const SyncArray<kernel_type> &dense_mat,
const SyncArray<kernel_type> &origin_dense, SyncArray<kernel_type> &result){
Eigen::Map<const Eigen::MatrixXf> denseMat(dense_mat.host_data(), k, n);
Eigen::Map<const Eigen::Matrix<kernel_type, Eigen::Dynamic, Eigen::Dynamic, Eigen::ColMajor>> denseMat(dense_mat.host_data(), k, n);
Eigen::Map<const Eigen::Matrix<kernel_type, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor>>
originDenseMat(origin_dense.host_data(), m, k);
Eigen::Matrix<kernel_type, Eigen::Dynamic, Eigen::Dynamic, Eigen::ColMajor> retMat = originDenseMat * denseMat;
Expand Down
9 changes: 9 additions & 0 deletions src/thundersvm/kernel/kernelmatrix_kernel.cu
Original file line number Diff line number Diff line change
Expand Up @@ -144,10 +144,19 @@ namespace svm_kernel {
}
kernel_type one(1);
kernel_type zero(0);
#ifdef USE_DOUBLE
cusparseDcsrmm2(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, CUSPARSE_OPERATION_TRANSPOSE,
m, n, k, nnz, &one, descr, csr_val.device_data(), csr_row_ptr.device_data(),
csr_col_ind.device_data(),
dense_mat.device_data(), n, &zero, result.device_data(), m);
#else//kernel type is float
cusparseScsrmm2(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, CUSPARSE_OPERATION_TRANSPOSE,
m, n, k, nnz, &one, descr, csr_val.device_data(), csr_row_ptr.device_data(),
csr_col_ind.device_data(),
dense_mat.device_data(), n, &zero, result.device_data(), m);
#endif


//cusparseScsrmm return row-major matrix, so no transpose is needed
}
}
2 changes: 1 addition & 1 deletion src/thundersvm/model/svc.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -239,7 +239,7 @@ vector<float_type> SVC::predict_label(const SyncArray<float_type> &dec_values, i
}
}
int maxVoteClass = 0;
for (int i = 0; i < n_classes; ++i) {
for (int i = 1; i < n_classes; ++i) {
if (votes[i] > votes[maxVoteClass])
maxVoteClass = i;
}
Expand Down

0 comments on commit 7b6b743

Please sign in to comment.