@@ -63,10 +63,14 @@ void Sampler::setSystemDefinition(shared_ptr<SystemDefinition> sysdef)
6363
6464void Sampler::run_on_data (py::function py_exec, const access_location::Enum location, const access_mode::Enum mode)
6565{
66+ #ifdef ENABLE_CUDA
6667 if (location == access_location::device and not m_exec_conf->isCUDAEnabled ())
6768 throw runtime_error (" Invalid request for device memory in non-cuda run." );
6869
6970 const bool on_device = location == access_location::device;
71+ #else
72+ const bool on_device = false ;
73+ #endif // ENABLE_CUDA
7074
7175 const ArrayHandle<Scalar4> pos (m_pdata->getPositions (), location, mode);
7276 auto pos_bridge = wrap<Scalar4, Scalar>(pos.data , on_device, 4 );
@@ -96,7 +100,11 @@ void Sampler::update(unsigned int timestep)
96100
97101 // Accessing the handles here holds them valid until the block of this function.
98102 // This keeps them valid for the python function call
103+ #ifdef ENABLE_CUDA
99104 auto location = m_exec_conf->isCUDAEnabled () ? access_location::device : access_location::host;
105+ #else
106+ auto location = access_location::host;
107+ #endif // ENABLE_CUDA
100108
101109 // const ArrayHandle<Scalar4> pos(m_pdata->getPositions(), location, access_mode::read);
102110 // auto pos_tensor = wrap<Scalar4, Scalar>(pos.data, 4 );
@@ -124,7 +132,11 @@ DLDataBridge Sampler::wrap(TV* ptr,
124132 assert ((size2 >= 1 )); // assert is a macro so the extra parentheses are requiered here
125133
126134 const unsigned int particle_number = this ->m_pdata ->getN ();
135+ #ifdef ENABLE_CUDA
127136 const int gpu_id = on_device ? m_exec_conf->getGPUIds ()[0 ] : m_exec_conf->getRank ();
137+ #else
138+ const int gpu_id = m_exec_conf->getRank ();
139+ #endif // ENABLE_CUDA
128140
129141 DLDataBridge bridge;
130142 bridge.tensor .manager_ctx = NULL ;
0 commit comments