@@ -130,35 +130,38 @@ def fit(self, X, y):
130130 csr_data = (c_float * (self .n_sv * self .n_features ))()
131131 data_size = (c_int * 1 )()
132132 thundersvm .get_sv (csr_row , csr_col , csr_data , data_size , c_void_p (self .model ))
133+ self .row = np .array ([csr_row [index ] for index in range (0 , self .n_sv + 1 )])
134+ self .col = np .array ([csr_col [index ] for index in range (0 , data_size [0 ])])
135+ self .data = np .array ([csr_data [index ] for index in range (0 , data_size [0 ])])
136+
137+ self .support_vectors_ = sp .csr_matrix ((self .data , self .col , self .row ))
138+ if self ._sparse == False :
139+ self .support_vectors_ = self .support_vectors_ .toarray (order = 'C' )
140+
133141 dual_coef = (c_float * ((self .n_classes - 1 ) * self .n_sv ))()
134142 thundersvm .get_coef (dual_coef , self .n_classes , self .n_sv , c_void_p (self .model ))
135-
143+
136144 self .dual_coef_ = np .array ([dual_coef [index ] for index in range (0 , (self .n_classes - 1 ) * self .n_sv )]).astype (float )
137145 self .dual_coef_ = np .reshape (self .dual_coef_ , (self .n_classes - 1 , self .n_sv ))
138146
139147 rho_size = int (self .n_classes * (self .n_classes - 1 ) / 2 )
140148 self .n_binary_model = rho_size
141149 rho = (c_float * rho_size )()
142150 thundersvm .get_rho (rho , rho_size , c_void_p (self .model ))
151+ self .intercept_ = np .array ([rho [index ] for index in range (0 , rho_size )]).astype (float )
143152
144153 if self .kernel == 'linear' :
145154 coef = (c_float * (self .n_binary_model * self .n_features ))()
146155 thundersvm .get_linear_coef (coef , self .n_binary_model , self .n_features , c_void_p (self .model ))
147156 self .coef_ = np .array ([coef [index ] for index in range (0 , self .n_binary_model * self .n_features )]).astype (float )
148157 self .coef_ = np .reshape (self .coef_ , (self .n_binary_model , self .n_features ))
149158
150- self .intercept_ = np .array ([rho [index ] for index in range (0 , rho_size )]).astype (float )
151-
152- self .row = np .array ([csr_row [index ] for index in range (0 , self .n_sv + 1 )])
153- self .col = np .array ([csr_col [index ] for index in range (0 , data_size [0 ])])
154- self .data = np .array ([csr_data [index ] for index in range (0 , data_size [0 ])])
155-
156- self .support_vectors_ = sp .csr_matrix ((self .data , self .col , self .row ))
157- if self ._sparse == False :
158- self .support_vectors_ = self .support_vectors_ .toarray (order = 'C' )
159+
160+
161+
159162 n_support_ = (c_int * self .n_classes )()
160163 thundersvm .get_support_classes (n_support_ , self .n_classes , c_void_p (self .model ))
161-
164+
162165 self .n_support_ = np .array ([n_support_ [index ] for index in range (0 , self .n_classes )]).astype (int )
163166
164167 self .shape_fit_ = X .shape
@@ -234,13 +237,13 @@ def _sparse_fit(self, X, y, solver_type, kernel):
234237 n_classes = (c_int * 1 )()
235238 self ._train_succeed = (c_int * 1 )()
236239 thundersvm .sparse_model_scikit (
237- X .shape [0 ], data , indptr , indices , label , solver_type ,
238- kernel_type , self .degree , c_float (self ._gamma ), c_float (self .coef0 ),
239- c_float (self .C ), c_float (self .nu ), c_float (self .epsilon ), c_float (self .tol ),
240- self .probability , weight_size , weight_label , weight ,
241- self .verbose , self .max_iter , self .n_jobs , self .max_mem_size ,
242- self .gpu_id ,
243- n_features , n_classes , self ._train_succeed , c_void_p (self .model ))
240+ X .shape [0 ], data , indptr , indices , label , solver_type ,
241+ kernel_type , self .degree , c_float (self ._gamma ), c_float (self .coef0 ),
242+ c_float (self .C ), c_float (self .nu ), c_float (self .epsilon ), c_float (self .tol ),
243+ self .probability , weight_size , weight_label , weight ,
244+ self .verbose , self .max_iter , self .n_jobs , self .max_mem_size ,
245+ self .gpu_id ,
246+ n_features , n_classes , self ._train_succeed , c_void_p (self .model ))
244247 self .n_features = n_features [0 ]
245248 self .n_classes = n_classes [0 ]
246249
@@ -323,7 +326,7 @@ def _dense_predict(self, X):
323326 samples , features , data ,
324327 c_void_p (self .model ),
325328 self .predict_label_ptr , self .verbose )
326-
329+
327330 self .predict_label = np .array ([self .predict_label_ptr [index ] for index in range (0 , X .shape [0 ])])
328331 return self .predict_label
329332
@@ -341,7 +344,7 @@ def _sparse_predict(self, X):
341344 self .predict_label_ptr , self .verbose )
342345
343346 predict_label = [self .predict_label_ptr [index ] for index in range (0 , X .shape [0 ])]
344-
347+
345348 self .predict_label = np .asarray (predict_label )
346349 return self .predict_label
347350
@@ -412,6 +415,48 @@ def load_from_file(self, path):
412415 kernel = (c_char * 20 )()
413416 thundersvm .init_model_param (kernel , degree , gamma ,
414417 coef0 , probability ,c_void_p (self .model ))
418+ n_classes = (c_int * 1 )()
419+ thundersvm .get_n_classes (c_void_p (self .model ), n_classes )
420+ self .n_classes = n_classes [0 ]
421+ n_support_ = (c_int * self .n_classes )()
422+ thundersvm .get_support_classes (n_support_ , self .n_classes , c_void_p (self .model ))
423+ self .n_support_ = np .array ([n_support_ [index ] for index in range (0 , self .n_classes )]).astype (int )
424+
425+ self .n_sv = thundersvm .n_sv (c_void_p (self .model ))
426+
427+ n_feature = (c_int * 1 )()
428+ thundersvm .get_sv_max_index (c_void_p (self .model ), n_feature )
429+ self .n_features = n_feature [0 ]
430+
431+ csr_row = (c_int * (self .n_sv + 1 ))()
432+ csr_col = (c_int * (self .n_sv * self .n_features ))()
433+ csr_data = (c_float * (self .n_sv * self .n_features ))()
434+ data_size = (c_int * 1 )()
435+ thundersvm .get_sv (csr_row , csr_col , csr_data , data_size , c_void_p (self .model ))
436+ self .row = np .array ([csr_row [index ] for index in range (0 , self .n_sv + 1 )])
437+ self .col = np .array ([csr_col [index ] for index in range (0 , data_size [0 ])])
438+ self .data = np .array ([csr_data [index ] for index in range (0 , data_size [0 ])])
439+ self .support_vectors_ = sp .csr_matrix ((self .data , self .col , self .row ))
440+ # if self._sparse == False:
441+ # self.support_vectors_ = self.support_vectors_.toarray(order = 'C')
442+
443+ dual_coef = (c_float * ((self .n_classes - 1 ) * self .n_sv ))()
444+ thundersvm .get_coef (dual_coef , self .n_classes , self .n_sv , c_void_p (self .model ))
445+ self .dual_coef_ = np .array ([dual_coef [index ] for index in range (0 , (self .n_classes - 1 ) * self .n_sv )]).astype (float )
446+ self .dual_coef_ = np .reshape (self .dual_coef_ , (self .n_classes - 1 , self .n_sv ))
447+
448+ rho_size = int (self .n_classes * (self .n_classes - 1 ) / 2 )
449+ self .n_binary_model = rho_size
450+ rho = (c_float * rho_size )()
451+ thundersvm .get_rho (rho , rho_size , c_void_p (self .model ))
452+ self .intercept_ = np .array ([rho [index ] for index in range (0 , rho_size )]).astype (float )
453+
454+ # if self.kernel == 'linear':
455+ # coef = (c_float * (self.n_binary_model * self.n_sv))()
456+ # thundersvm.get_linear_coef(coef, self.n_binary_model, self.n_features, c_void_p(self.model))
457+ # self.coef_ = np.array([coef[index] for index in range(0, self.n_binary_model * self.n_features)]).astype(float)
458+ # self.coef_ = np.reshape(self.coef_, (self.n_binary_model, self.n_features))
459+
415460 self .kernel = kernel .value
416461 self .degree = degree [0 ]
417462 if gamma [0 ] != 0.0 :
@@ -421,20 +466,20 @@ def load_from_file(self, path):
421466
422467
423468class SVC (SvmModel , ClassifierMixin ):
424- _impl = 'c_svc'
425- def __init__ (self , kernel = 'rbf' , degree = 3 ,
426- gamma = 'auto' , coef0 = 0.0 , C = 1.0 ,
427- tol = 0.001 , probability = False , class_weight = None ,
428- shrinking = False , cache_size = None , verbose = False ,
429- max_iter = - 1 , n_jobs = - 1 , max_mem_size = - 1 , random_state = None , decision_function_shape = 'ovo' , gpu_id = 0 ):
430- self .decision_function_shape = decision_function_shape
431- super (SVC , self ).__init__ (
432- kernel = kernel , degree = degree , gamma = gamma ,
433- coef0 = coef0 , C = C , nu = 0. , epsilon = 0. ,
434- tol = tol , probability = probability ,
435- class_weight = class_weight , shrinking = shrinking ,
436- cache_size = cache_size , verbose = verbose ,
437- max_iter = max_iter , n_jobs = n_jobs , max_mem_size = max_mem_size , random_state = random_state , gpu_id = gpu_id )
469+ _impl = 'c_svc'
470+ def __init__ (self , kernel = 'rbf' , degree = 3 ,
471+ gamma = 'auto' , coef0 = 0.0 , C = 1.0 ,
472+ tol = 0.001 , probability = False , class_weight = None ,
473+ shrinking = False , cache_size = None , verbose = False ,
474+ max_iter = - 1 , n_jobs = - 1 , max_mem_size = - 1 , random_state = None , decision_function_shape = 'ovo' , gpu_id = 0 ):
475+ self .decision_function_shape = decision_function_shape
476+ super (SVC , self ).__init__ (
477+ kernel = kernel , degree = degree , gamma = gamma ,
478+ coef0 = coef0 , C = C , nu = 0. , epsilon = 0. ,
479+ tol = tol , probability = probability ,
480+ class_weight = class_weight , shrinking = shrinking ,
481+ cache_size = cache_size , verbose = verbose ,
482+ max_iter = max_iter , n_jobs = n_jobs , max_mem_size = max_mem_size , random_state = random_state , gpu_id = gpu_id )
438483
439484
440485
@@ -488,13 +533,13 @@ def __init__(self, kernel = 'rbf', degree = 3, gamma = 'auto',
488533class NuSVR (SvmModel , RegressorMixin ):
489534 _impl = 'nu_svr'
490535 def __init__ (self , kernel = 'rbf' , degree = 3 , gamma = 'auto' ,
491- coef0 = 0.0 , nu = 0.5 , C = 1.0 , tol = 0.001 , probability = False ,
492- shrinking = False , cache_size = None , verbose = False ,
493- max_iter = - 1 , n_jobs = - 1 , max_mem_size = - 1 , gpu_id = 0 ):
536+ coef0 = 0.0 , nu = 0.5 , C = 1.0 , tol = 0.001 , probability = False ,
537+ shrinking = False , cache_size = None , verbose = False ,
538+ max_iter = - 1 , n_jobs = - 1 , max_mem_size = - 1 , gpu_id = 0 ):
494539 super (NuSVR , self ).__init__ (
495540 kernel = kernel , degree = degree , gamma = gamma ,
496541 coef0 = coef0 , nu = nu , C = C , epsilon = 0. ,
497542 tol = tol , probability = probability , class_weight = None ,
498543 shrinking = shrinking , cache_size = cache_size , verbose = verbose ,
499544 max_iter = max_iter , n_jobs = n_jobs , max_mem_size = max_mem_size , random_state = None , gpu_id = gpu_id
500- )
545+ )
0 commit comments