@@ -110,8 +110,7 @@ struct calc_grad_with_block_op<T, base_device::DEVICE_CPU>
110110template <typename FPTYPE>
111111struct dot_real_op <FPTYPE, base_device::DEVICE_CPU>
112112{
113- FPTYPE operator ()(const base_device::DEVICE_CPU* d,
114- const int & dim,
113+ FPTYPE operator ()(const int & dim,
115114 const FPTYPE* psi_L,
116115 const FPTYPE* psi_R,
117116 const bool reduce)
@@ -129,8 +128,7 @@ struct dot_real_op<FPTYPE, base_device::DEVICE_CPU>
129128template <typename FPTYPE>
130129struct dot_real_op <std::complex <FPTYPE>, base_device::DEVICE_CPU>
131130{
132- FPTYPE operator ()(const base_device::DEVICE_CPU* d,
133- const int & dim,
131+ FPTYPE operator ()(const int & dim,
134132 const std::complex <FPTYPE>* psi_L,
135133 const std::complex <FPTYPE>* psi_R,
136134 const bool reduce)
@@ -153,7 +151,7 @@ template <typename T>
153151struct vector_div_constant_op <T, base_device::DEVICE_CPU>
154152{
155153 using Real = typename GetTypeReal<T>::type;
156- void operator ()(const base_device::DEVICE_CPU* d, const int dim, T* result, const T* vector, const Real constant)
154+ void operator ()(const int dim, T* result, const T* vector, const Real constant)
157155 {
158156#ifdef _OPENMP
159157#pragma omp parallel for schedule(static, 4096 / sizeof(Real))
@@ -169,7 +167,7 @@ template <typename T>
169167struct vector_mul_vector_op <T, base_device::DEVICE_CPU>
170168{
171169 using Real = typename GetTypeReal<T>::type;
172- void operator ()(const base_device::DEVICE_CPU* d, const int & dim, T* result, const T* vector1, const Real* vector2)
170+ void operator ()(const int & dim, T* result, const T* vector1, const Real* vector2)
173171 {
174172#ifdef _OPENMP
175173#pragma omp parallel for schedule(static, 4096 / sizeof(Real))
@@ -185,7 +183,7 @@ template <typename T>
185183struct vector_div_vector_op <T, base_device::DEVICE_CPU>
186184{
187185 using Real = typename GetTypeReal<T>::type;
188- void operator ()(const base_device::DEVICE_CPU* d, const int & dim, T* result, const T* vector1, const Real* vector2)
186+ void operator ()(const int & dim, T* result, const T* vector1, const Real* vector2)
189187 {
190188#ifdef _OPENMP
191189#pragma omp parallel for schedule(static, 4096 / sizeof(Real))
@@ -201,8 +199,7 @@ template <typename T>
201199struct constantvector_addORsub_constantVector_op <T, base_device::DEVICE_CPU>
202200{
203201 using Real = typename GetTypeReal<T>::type;
204- void operator ()(const base_device::DEVICE_CPU* d,
205- const int & dim,
202+ void operator ()(const int & dim,
206203 T* result,
207204 const T* vector1,
208205 const Real constant1,
@@ -222,8 +219,7 @@ struct constantvector_addORsub_constantVector_op<T, base_device::DEVICE_CPU>
222219template <typename FPTYPE>
223220struct scal_op <FPTYPE, base_device::DEVICE_CPU>
224221{
225- void operator ()(const base_device::DEVICE_CPU* /* ctx*/ ,
226- const int & N,
222+ void operator ()(const int & N,
227223 const std::complex <FPTYPE>* alpha,
228224 std::complex <FPTYPE>* X,
229225 const int & incx)
@@ -235,8 +231,7 @@ struct scal_op<FPTYPE, base_device::DEVICE_CPU>
235231template <typename T>
236232struct gemv_op <T, base_device::DEVICE_CPU>
237233{
238- void operator ()(const base_device::DEVICE_CPU* d,
239- const char & trans,
234+ void operator ()(const char & trans,
240235 const int & m,
241236 const int & n,
242237 const T* alpha,
@@ -255,8 +250,7 @@ struct gemv_op<T, base_device::DEVICE_CPU>
255250template <typename T>
256251struct axpy_op <T, base_device::DEVICE_CPU>
257252{
258- void operator ()(const base_device::DEVICE_CPU* /* ctx*/ ,
259- const int & dim,
253+ void operator ()(const int & dim,
260254 const T* alpha,
261255 const T* X,
262256 const int & incX,
@@ -270,8 +264,7 @@ struct axpy_op<T, base_device::DEVICE_CPU>
270264template <typename T>
271265struct gemm_op <T, base_device::DEVICE_CPU>
272266{
273- void operator ()(const base_device::DEVICE_CPU* /* ctx*/ ,
274- const char & transa,
267+ void operator ()(const char & transa,
275268 const char & transb,
276269 const int & m,
277270 const int & n,
@@ -293,8 +286,7 @@ struct gemm_op<T, base_device::DEVICE_CPU>
293286template <typename T>
294287struct gemm_op_mt <T, base_device::DEVICE_CPU>
295288{
296- void operator ()(const base_device::DEVICE_CPU* /* ctx*/ ,
297- const char & transa,
289+ void operator ()(const char & transa,
298290 const char & transb,
299291 const int & m,
300292 const int & n,
@@ -316,8 +308,7 @@ struct gemm_op_mt<T, base_device::DEVICE_CPU>
316308template <typename T>
317309struct matrixTranspose_op <T, base_device::DEVICE_CPU>
318310{
319- void operator ()(const base_device::DEVICE_CPU* d,
320- const int & row,
311+ void operator ()(const int & row,
321312 const int & col,
322313 const T* input_matrix,
323314 T* output_matrix)
@@ -348,7 +339,7 @@ struct matrixTranspose_op<T, base_device::DEVICE_CPU>
348339template <typename T>
349340struct matrixSetToAnother <T, base_device::DEVICE_CPU>
350341{
351- void operator ()(const base_device::DEVICE_CPU* d, const int & n, const T* A, const int & LDA, T* B, const int & LDB)
342+ void operator ()(const int & n, const T* A, const int & LDA, T* B, const int & LDB)
352343 {
353344#ifdef _OPENMP
354345#pragma omp parallel for collapse(2) schedule(static, 8192 / sizeof(T))
0 commit comments