Skip to content

Commit

Permalink
Merge pull request #114 from julianofoleiss/skgpuid
Browse files Browse the repository at this point in the history
Added gpu_id parameter to the Scikit interface.
  • Loading branch information
QinbinLi committed Dec 19, 2018
2 parents f549fe9 + 0bd4d9d commit 50642ca
Show file tree
Hide file tree
Showing 3 changed files with 28 additions and 11 deletions.
3 changes: 3 additions & 0 deletions python/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -122,6 +122,9 @@ The usage of thundersvm scikit interface is similar to sklearn.svm.
*max_mem_size*: int, optional (default=-1)\
set the maximum memory size (MB) that thundersvm uses, or -1 for no limit.

*gpu_id*: int, optional (default=0)\
set which gpu to use for training.

*decision_function_shape*: ‘ovo’, default=’ovo’, not supported yet for 'ovr'\
only for classifier. Whether to return a one-vs-rest (‘ovr’) decision function of shape (n_samples, n_classes) as all other classifiers, or the original one-vs-one (‘ovo’) decision function of libsvm which has shape (n_samples, n_classes * (n_classes - 1) / 2).

Expand Down
25 changes: 14 additions & 11 deletions python/thundersvmScikit.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ def __init__(self, kernel, degree,
gamma, coef0, C, nu, epsilon,
tol, probability, class_weight,
shrinking, cache_size, verbose,
max_iter, n_jobs, max_mem_size, random_state):
max_iter, n_jobs, max_mem_size, random_state,gpu_id):
self.kernel = kernel
self.degree = degree
self.gamma = gamma
Expand All @@ -72,6 +72,7 @@ def __init__(self, kernel, degree,
self.max_mem_size = max_mem_size
thundersvm.model_new.restype = c_void_p
self.model = thundersvm.model_new(SVM_TYPE.index(self._impl))
self.gpu_id = gpu_id
if self.max_mem_size != -1:
thundersvm.set_memory_size(c_void_p(self.model), self.max_mem_size)

Expand Down Expand Up @@ -171,6 +172,7 @@ def _dense_fit(self, X, y, solver_type, kernel):
c_float(self.C), c_float(self.nu), c_float(self.epsilon), c_float(self.tol),
self.probability, weight_size, weight_label, weight,
self.verbose, self.max_iter, self.n_jobs, self.max_mem_size,
self.gpu_id,
n_features, n_classes, self._train_succeed, c_void_p(self.model))
self.n_features = n_features[0]
self.n_classes = n_classes[0]
Expand Down Expand Up @@ -213,6 +215,7 @@ def _sparse_fit(self, X, y, solver_type, kernel):
c_float(self.C), c_float(self.nu), c_float(self.epsilon), c_float(self.tol),
self.probability, weight_size, weight_label, weight,
self.verbose, self.max_iter, self.n_jobs, self.max_mem_size,
self.gpu_id,
n_features, n_classes, self._train_succeed, c_void_p(self.model))
self.n_features = n_features[0]
self.n_classes = n_classes[0]
Expand Down Expand Up @@ -394,15 +397,15 @@ def __init__(self, kernel = 'rbf', degree = 3,
gamma = 'auto', coef0 = 0.0, C = 1.0,
tol = 0.001, probability = False, class_weight = None,
shrinking = False, cache_size = None, verbose = False,
max_iter = -1, n_jobs = -1, max_mem_size = -1, random_state = None, decison_function_shape = 'ovo'):
max_iter = -1, n_jobs = -1, max_mem_size = -1, random_state = None, decison_function_shape = 'ovo', gpu_id=0):
self.decison_function_shape = decison_function_shape
super(SVC, self).__init__(
kernel=kernel, degree=degree, gamma=gamma,
coef0=coef0, C=C, nu=0., epsilon=0.,
tol=tol, probability=probability,
class_weight=class_weight, shrinking = shrinking,
cache_size = cache_size, verbose = verbose,
max_iter = max_iter, n_jobs = n_jobs, max_mem_size = max_mem_size, random_state = random_state)
max_iter = max_iter, n_jobs = n_jobs, max_mem_size = max_mem_size, random_state = random_state, gpu_id=gpu_id)



Expand All @@ -411,28 +414,28 @@ class NuSVC(SvmModel, ClassifierMixin):
def __init__(self, kernel = 'rbf', degree = 3, gamma = 'auto',
coef0 = 0.0, nu = 0.5, tol = 0.001,
probability = False, shrinking = False, cache_size = None, verbose = False,
max_iter = -1, n_jobs = -1, max_mem_size = -1, random_state = None, decison_function_shape = 'ovo'):
max_iter = -1, n_jobs = -1, max_mem_size = -1, random_state = None, decison_function_shape = 'ovo', gpu_id=0):
self.decison_function_shape = decison_function_shape
super(NuSVC, self).__init__(
kernel = kernel, degree = degree, gamma = gamma,
coef0 = coef0, C = 0., nu = nu, epsilon= 0.,
tol = tol, probability = probability, class_weight = None,
shrinking = shrinking, cache_size = cache_size, verbose = verbose,
max_iter = max_iter, n_jobs = n_jobs, max_mem_size = max_mem_size, random_state = random_state
max_iter = max_iter, n_jobs = n_jobs, max_mem_size = max_mem_size, random_state = random_state, gpu_id=gpu_id
)

class OneClassSVM(SvmModel):
_impl = 'one_class'
def __init__(self, kernel = 'rbf', degree = 3, gamma = 'auto',
coef0 = 0.0, nu = 0.5, tol = 0.001,
shrinking = False, cache_size = None, verbose = False,
max_iter = -1, n_jobs = -1, max_mem_size = -1, random_state = None):
max_iter = -1, n_jobs = -1, max_mem_size = -1, random_state = None, gpu_id=0):
super(OneClassSVM, self).__init__(
kernel = kernel, degree = degree, gamma = gamma,
coef0 = coef0, C = 0., nu = nu, epsilon = 0.,
tol = tol, probability= False, class_weight = None,
shrinking = shrinking, cache_size = cache_size, verbose = verbose,
max_iter = max_iter, n_jobs = n_jobs, max_mem_size = max_mem_size, random_state = random_state
max_iter = max_iter, n_jobs = n_jobs, max_mem_size = max_mem_size, random_state = random_state, gpu_id=gpu_id
)

def fit(self, X, y=None):
Expand All @@ -444,25 +447,25 @@ def __init__(self, kernel = 'rbf', degree = 3, gamma = 'auto',
coef0 = 0.0, C = 1.0, epsilon = 0.1,
tol = 0.001, probability = False,
shrinking = False, cache_size = None, verbose = False,
max_iter = -1, n_jobs = -1,max_mem_size = -1):
max_iter = -1, n_jobs = -1,max_mem_size = -1, gpu_id=0):
super(SVR, self).__init__(
kernel = kernel, degree = degree, gamma = gamma,
coef0 = coef0, C = C, nu = 0., epsilon = epsilon,
tol = tol, probability = probability, class_weight = None,
shrinking = shrinking, cache_size = cache_size, verbose = verbose,
max_iter = max_iter, n_jobs = n_jobs, max_mem_size = max_mem_size, random_state = None
max_iter = max_iter, n_jobs = n_jobs, max_mem_size = max_mem_size, random_state = None, gpu_id=gpu_id
)

class NuSVR(SvmModel, RegressorMixin):
_impl = 'nu_svr'
def __init__(self, kernel = 'rbf', degree = 3, gamma = 'auto',
coef0 = 0.0, nu = 0.5, C = 1.0, tol = 0.001, probability = False,
shrinking = False, cache_size = None, verbose = False,
max_iter = -1, n_jobs = -1, max_mem_size = -1):
max_iter = -1, n_jobs = -1, max_mem_size = -1, gpu_id=0):
super(NuSVR, self).__init__(
kernel = kernel, degree = degree, gamma = gamma,
coef0 = coef0, nu = nu, C = C, epsilon = 0.,
tol = tol, probability = probability, class_weight = None,
shrinking = shrinking, cache_size = cache_size, verbose = verbose,
max_iter = max_iter, n_jobs = n_jobs, max_mem_size = max_mem_size, random_state = None
max_iter = max_iter, n_jobs = n_jobs, max_mem_size = max_mem_size, random_state = None, gpu_id=gpu_id
)
11 changes: 11 additions & 0 deletions src/thundersvm/thundersvm-scikit.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,12 @@ extern "C" {
float cost, float nu, float epsilon, float tol, int probability,
int weight_size, int* weight_label, float* weight,
int verbose, int max_iter, int n_cores, int max_mem_size,
int gpu_id,
int* n_features, int* n_classes, int* succeed, SvmModel* model){
#ifdef USE_CUDA
CUDA_CHECK(cudaSetDevice(gpu_id));
#endif

succeed[0] = 1;
if(verbose)
el::Loggers::reconfigureAllLoggers(el::ConfigurationType::Enabled, "true");
Expand Down Expand Up @@ -145,7 +150,13 @@ extern "C" {
float cost, float nu, float epsilon, float tol, int probability,
int weight_size, int* weight_label, float* weight,
int verbose, int max_iter, int n_cores, int max_mem_size,
int gpu_id,
int* n_features, int* n_classes, int* succeed, SvmModel* model){

#ifdef USE_CUDA
CUDA_CHECK(cudaSetDevice(gpu_id));
#endif

succeed[0] = 1;
if(verbose)
el::Loggers::reconfigureAllLoggers(el::ConfigurationType::Enabled, "true");
Expand Down

0 comments on commit 50642ca

Please sign in to comment.