Skip to content

Commit aaed5cf

Browse files
committed
revert real into float for swig API
1 parent 7ff8e76 commit aaed5cf

File tree

4 files changed

+56
-56
lines changed

4 files changed

+56
-56
lines changed

paddle/api/Matrix.cpp

Lines changed: 17 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -44,15 +44,15 @@ Matrix* Matrix::createZero(size_t height, size_t width, bool useGpu) {
4444
return m;
4545
}
4646

47-
Matrix* Matrix::createDense(const std::vector<real>& data, size_t height,
47+
Matrix* Matrix::createDense(const std::vector<float>& data, size_t height,
4848
size_t width, bool useGpu) {
4949
auto m = new Matrix();
5050
m->m->mat = paddle::Matrix::create(height, width, useGpu);
5151
m->m->mat->copyFrom(data.data(), data.size());
5252
return m;
5353
}
5454

55-
Matrix* Matrix::createCpuDenseFromNumpy(real* data, int dim1, int dim2,
55+
Matrix* Matrix::createCpuDenseFromNumpy(float* data, int dim1, int dim2,
5656
bool copy) {
5757
auto m = new Matrix();
5858
if (copy) {
@@ -64,7 +64,7 @@ Matrix* Matrix::createCpuDenseFromNumpy(real* data, int dim1, int dim2,
6464
return m;
6565
}
6666

67-
Matrix* Matrix::createGpuDenseFromNumpy(real* data, int dim1, int dim2) {
67+
Matrix* Matrix::createGpuDenseFromNumpy(float* data, int dim1, int dim2) {
6868
auto m = new Matrix();
6969
m->m->mat = paddle::Matrix::create(dim1, dim2, false, true);
7070
m->m->mat->copyFrom(data, dim1 * dim2);
@@ -86,15 +86,15 @@ size_t Matrix::getHeight() const { return m->mat->getHeight(); }
8686

8787
size_t Matrix::getWidth() const { return m->mat->getWidth(); }
8888

89-
real Matrix::get(size_t x, size_t y) const throw(RangeError) {
89+
float Matrix::get(size_t x, size_t y) const throw(RangeError) {
9090
if (x > this->getWidth() || y > this->getHeight()) {
9191
RangeError e;
9292
throw e;
9393
}
9494
return m->mat->getElement(x, y);
9595
}
9696

97-
void Matrix::set(size_t x, size_t y, real val) throw(RangeError,
97+
void Matrix::set(size_t x, size_t y, float val) throw(RangeError,
9898
UnsupportError) {
9999
if (x > this->getWidth() || y > this->getHeight()) {
100100
RangeError e;
@@ -193,10 +193,10 @@ FloatArray Matrix::getData() const {
193193
auto rawMat = m->mat.get();
194194
if (dynamic_cast<paddle::GpuMemoryHandle*>(rawMat->getMemoryHandle().get())) {
195195
// is gpu. then copy data
196-
real* data = rawMat->getData();
196+
float* data = rawMat->getData();
197197
size_t len = rawMat->getElementCnt();
198-
real* cpuData = new real[len];
199-
hl_memcpy_device2host(cpuData, data, len * sizeof(real));
198+
float* cpuData = new float[len];
199+
hl_memcpy_device2host(cpuData, data, len * sizeof(float));
200200
FloatArray ret_val(cpuData, len);
201201
ret_val.needFree = true;
202202
return ret_val;
@@ -208,7 +208,7 @@ FloatArray Matrix::getData() const {
208208

209209
void Matrix::sparseCopyFrom(
210210
const std::vector<int>& rows, const std::vector<int>& cols,
211-
const std::vector<real>& vals) throw(UnsupportError) {
211+
const std::vector<float>& vals) throw(UnsupportError) {
212212
auto cpuSparseMat =
213213
std::dynamic_pointer_cast<paddle::CpuSparseMatrix>(m->mat);
214214
if (cpuSparseMat != nullptr) {
@@ -217,7 +217,7 @@ void Matrix::sparseCopyFrom(
217217
// <<" ValSize = "<<vals.size();
218218
cpuSparseMat->copyFrom(const_cast<std::vector<int>&>(rows),
219219
const_cast<std::vector<int>&>(cols),
220-
const_cast<std::vector<real>&>(vals));
220+
const_cast<std::vector<float>&>(vals));
221221
} else {
222222
UnsupportError e;
223223
throw e;
@@ -226,7 +226,7 @@ void Matrix::sparseCopyFrom(
226226

227227
void* Matrix::getSharedPtr() const { return &m->mat; }
228228

229-
void Matrix::toNumpyMatInplace(real** view_data, int* dim1,
229+
void Matrix::toNumpyMatInplace(float** view_data, int* dim1,
230230
int* dim2) throw(UnsupportError) {
231231
auto cpuMat = std::dynamic_pointer_cast<paddle::CpuMatrix>(m->mat);
232232
if (cpuMat) {
@@ -237,34 +237,34 @@ void Matrix::toNumpyMatInplace(real** view_data, int* dim1,
237237
throw UnsupportError();
238238
}
239239
}
240-
void Matrix::copyToNumpyMat(real** view_m_data, int* dim1,
240+
void Matrix::copyToNumpyMat(float** view_m_data, int* dim1,
241241
int* dim2) throw(UnsupportError) {
242-
static_assert(sizeof(paddle::real) == sizeof(real),
242+
static_assert(sizeof(float) == sizeof(float),
243243
"Currently PaddleAPI only support for single "
244244
"precision version of paddle.");
245245
if (this->isSparse()) {
246246
throw UnsupportError();
247247
} else {
248248
*dim1 = m->mat->getHeight();
249249
*dim2 = m->mat->getWidth();
250-
*view_m_data = new real[(*dim1) * (*dim2)];
250+
*view_m_data = new float[(*dim1) * (*dim2)];
251251
if (auto cpuMat = dynamic_cast<paddle::CpuMatrix*>(m->mat.get())) {
252252
auto src = cpuMat->getData();
253253
auto dest = *view_m_data;
254-
std::memcpy(dest, src, sizeof(paddle::real) * (*dim1) * (*dim2));
254+
std::memcpy(dest, src, sizeof(float) * (*dim1) * (*dim2));
255255
} else if (auto gpuMat = dynamic_cast<paddle::GpuMatrix*>(m->mat.get())) {
256256
auto src = gpuMat->getData();
257257
auto dest = *view_m_data;
258258
hl_memcpy_device2host(dest, src,
259-
sizeof(paddle::real) * (*dim1) * (*dim2));
259+
sizeof(float) * (*dim1) * (*dim2));
260260
} else {
261261
LOG(WARNING) << "Unexpected Situation";
262262
throw UnsupportError();
263263
}
264264
}
265265
}
266266

267-
void Matrix::copyFromNumpyMat(real* data, int dim1,
267+
void Matrix::copyFromNumpyMat(float* data, int dim1,
268268
int dim2) throw(UnsupportError, RangeError) {
269269
if (isSparse()) {
270270
throw UnsupportError();

paddle/api/PaddleAPI.h

Lines changed: 22 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -56,10 +56,10 @@ class UnsupportError {};
5656

5757
/// This type will map to python's list of float.
5858
struct FloatArray {
59-
const real* buf;
59+
const float* buf;
6060
const size_t length;
6161
bool needFree; // true if the buf is dynamic alloced.
62-
FloatArray(const real* b, const size_t l);
62+
FloatArray(const float* b, const size_t l);
6363
};
6464

6565
/// This type will map to python's list of int
@@ -72,11 +72,11 @@ struct IntArray {
7272

7373
/// This type will map to python's list of (int, float)
7474
struct IntWithFloatArray {
75-
const real* valBuf;
75+
const float* valBuf;
7676
const int* idxBuf;
7777
const size_t length;
7878
bool needFree;
79-
IntWithFloatArray(const real* v, const int* i, size_t l, bool f = false);
79+
IntWithFloatArray(const float* v, const int* i, size_t l, bool f = false);
8080
};
8181

8282
enum SparseValueType { SPARSE_NON_VALUE = 0, SPARSE_VALUE = 1 };
@@ -122,7 +122,7 @@ class Matrix {
122122
* @param data list of float should be passed in python.
123123
* @note the value will be copy into a new matrix.
124124
*/
125-
static Matrix* createDense(const std::vector<real>& data, size_t height,
125+
static Matrix* createDense(const std::vector<float>& data, size_t height,
126126
size_t width, bool useGpu = false);
127127

128128
/**
@@ -134,11 +134,11 @@ class Matrix {
134134
* @param copy true if copy into a new matrix, false will create
135135
* matrix inplace.
136136
*/
137-
static Matrix* createCpuDenseFromNumpy(real* data, int dim1, int dim2,
137+
static Matrix* createCpuDenseFromNumpy(float* data, int dim1, int dim2,
138138
bool copy = false);
139139

140140
/// Create Gpu Dense Matrix from numpy matrix, dtype=float32
141-
static Matrix* createGpuDenseFromNumpy(real* data, int dim1, int dim2);
141+
static Matrix* createGpuDenseFromNumpy(float* data, int dim1, int dim2);
142142

143143
/**
144144
* Cast to numpy matrix.
@@ -154,15 +154,15 @@ class Matrix {
154154
* numpy_mat = m.toNumpyMat()
155155
* @endcode
156156
*/
157-
void toNumpyMatInplace(real** view_data, int* dim1,
157+
void toNumpyMatInplace(float** view_data, int* dim1,
158158
int* dim2) throw(UnsupportError);
159159

160160
/// Copy To numpy mat.
161-
void copyToNumpyMat(real** view_m_data, int* dim1,
161+
void copyToNumpyMat(float** view_m_data, int* dim1,
162162
int* dim2) throw(UnsupportError);
163163

164164
/// Copy From Numpy Mat
165-
void copyFromNumpyMat(real* data, int dim1, int dim2) throw(UnsupportError,
165+
void copyFromNumpyMat(float* data, int dim1, int dim2) throw(UnsupportError,
166166
RangeError);
167167

168168
/// return true if this matrix is sparse.
@@ -181,9 +181,9 @@ class Matrix {
181181

182182
size_t getWidth() const;
183183

184-
real get(size_t x, size_t y) const throw(RangeError);
184+
float get(size_t x, size_t y) const throw(RangeError);
185185

186-
void set(size_t x, size_t y, real val) throw(RangeError, UnsupportError);
186+
void set(size_t x, size_t y, float val) throw(RangeError, UnsupportError);
187187

188188
/// return type is list of float
189189
FloatArray getData() const;
@@ -195,8 +195,8 @@ class Matrix {
195195
*/
196196
void sparseCopyFrom(const std::vector<int>& rows,
197197
const std::vector<int>& cols,
198-
const std::vector<real>& values =
199-
std::vector<real>()) throw(UnsupportError);
198+
const std::vector<float>& values =
199+
std::vector<float>()) throw(UnsupportError);
200200

201201
bool isGpu() const;
202202

@@ -228,33 +228,33 @@ class Vector {
228228
*
229229
* It will create a new vector, and copy data into it.
230230
*/
231-
static Vector* create(const std::vector<real>& data, bool useGpu = false);
231+
static Vector* create(const std::vector<float>& data, bool useGpu = false);
232232

233233
/**
234234
* Create Cpu Vector from numpy array, which dtype=float32
235235
*
236236
* If copy is false, it will create vector inplace.
237237
*/
238-
static Vector* createCpuVectorFromNumpy(real* data, int dim,
238+
static Vector* createCpuVectorFromNumpy(float* data, int dim,
239239
bool copy = false);
240240

241241
/// Create Gpu Vector from numpy array, which dtype=float32
242-
static Vector* createGpuVectorFromNumpy(real* data, int dim);
242+
static Vector* createGpuVectorFromNumpy(float* data, int dim);
243243

244244
/// Cast to numpy array inplace.
245-
void toNumpyArrayInplace(real** view_data, int* dim1) throw(UnsupportError);
245+
void toNumpyArrayInplace(float** view_data, int* dim1) throw(UnsupportError);
246246

247247
/// Copy to numpy array.
248-
void copyToNumpyArray(real** view_m_data, int* dim1);
248+
void copyToNumpyArray(float** view_m_data, int* dim1);
249249

250250
/// Copy from numpy array.
251-
void copyFromNumpyArray(real* data, int dim);
251+
void copyFromNumpyArray(float* data, int dim);
252252

253253
/// __getitem__ in python
254-
real get(const size_t idx) const throw(RangeError, UnsupportError);
254+
float get(const size_t idx) const throw(RangeError, UnsupportError);
255255

256256
/// __setitem__ in python
257-
void set(const size_t idx, real val) throw(RangeError, UnsupportError);
257+
void set(const size_t idx, float val) throw(RangeError, UnsupportError);
258258

259259
/// Return is GPU vector or not.
260260
bool isGpu() const;

paddle/api/Util.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -31,13 +31,13 @@ void initPaddle(int argc, char** argv) {
3131
feenableexcept(FE_INVALID | FE_DIVBYZERO | FE_OVERFLOW);
3232
}
3333

34-
FloatArray::FloatArray(const real* b, const size_t l)
34+
FloatArray::FloatArray(const float* b, const size_t l)
3535
: buf(b), length(l), needFree(false) {}
3636

3737
IntArray::IntArray(const int* b, const size_t l, bool f)
3838
: buf(b), length(l), needFree(f) {}
3939

40-
IntWithFloatArray::IntWithFloatArray(const real* v, const int* i, size_t l,
40+
IntWithFloatArray::IntWithFloatArray(const float* v, const int* i, size_t l,
4141
bool f)
4242
: valBuf(v), idxBuf(i), length(l), needFree(f) {}
4343

paddle/api/Vector.cpp

Lines changed: 15 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -140,7 +140,7 @@ struct VectorPrivate {
140140
paddle::VectorPtr vec;
141141

142142
void safeAccessData(const size_t idx,
143-
const std::function<void(real&)>& func) const
143+
const std::function<void(float&)>& func) const
144144
throw(RangeError, UnsupportError) {
145145
auto cpuVec = std::dynamic_pointer_cast<const paddle::CpuVector>(vec);
146146
if (cpuVec != nullptr) {
@@ -170,7 +170,7 @@ Vector* Vector::createZero(size_t sz, bool useGpu) {
170170
return retVec;
171171
}
172172

173-
Vector* Vector::create(const std::vector<real>& data, bool useGpu) {
173+
Vector* Vector::create(const std::vector<float>& data, bool useGpu) {
174174
auto retVec = new Vector();
175175
retVec->m->vec = paddle::Vector::create(data.size(), useGpu);
176176
retVec->m->vec->copyFrom(data.data(), data.size());
@@ -188,7 +188,7 @@ Vector* Vector::createByPaddleVectorPtr(void* ptr) {
188188
}
189189
}
190190

191-
Vector* Vector::createCpuVectorFromNumpy(real* data, int dim, bool copy) {
191+
Vector* Vector::createCpuVectorFromNumpy(float* data, int dim, bool copy) {
192192
CHECK_GT(dim, 0);
193193
auto retVec = new Vector();
194194
if (copy) {
@@ -200,15 +200,15 @@ Vector* Vector::createCpuVectorFromNumpy(real* data, int dim, bool copy) {
200200
return retVec;
201201
}
202202

203-
Vector* Vector::createGpuVectorFromNumpy(real* data, int dim) {
203+
Vector* Vector::createGpuVectorFromNumpy(float* data, int dim) {
204204
CHECK_GT(dim, 0);
205205
auto retVec = new Vector();
206206
retVec->m->vec = paddle::Vector::create((size_t)dim, true);
207207
retVec->m->vec->copyFrom(data, (size_t)dim);
208208
return retVec;
209209
}
210210

211-
void Vector::toNumpyArrayInplace(real** view_data,
211+
void Vector::toNumpyArrayInplace(float** view_data,
212212
int* dim1) throw(UnsupportError) {
213213
auto v = std::dynamic_pointer_cast<paddle::CpuVector>(m->vec);
214214
if (v != nullptr) {
@@ -219,20 +219,20 @@ void Vector::toNumpyArrayInplace(real** view_data,
219219
}
220220
}
221221

222-
void Vector::copyToNumpyArray(real** view_m_data, int* dim1) {
222+
void Vector::copyToNumpyArray(float** view_m_data, int* dim1) {
223223
*dim1 = m->vec->getSize();
224-
*view_m_data = new real[*dim1];
224+
*view_m_data = new float[*dim1];
225225
if (auto cpuVec = dynamic_cast<paddle::CpuVector*>(m->vec.get())) {
226-
std::memcpy(*view_m_data, cpuVec->getData(), sizeof(real) * (*dim1));
226+
std::memcpy(*view_m_data, cpuVec->getData(), sizeof(float) * (*dim1));
227227
} else if (auto gpuVec = dynamic_cast<paddle::CpuVector*>(m->vec.get())) {
228228
hl_memcpy_device2host(*view_m_data, gpuVec->getData(),
229-
sizeof(real) * (*dim1));
229+
sizeof(float) * (*dim1));
230230
} else {
231231
LOG(INFO) << "Unexpected situation";
232232
}
233233
}
234234

235-
void Vector::copyFromNumpyArray(real* data, int dim) {
235+
void Vector::copyFromNumpyArray(float* data, int dim) {
236236
m->vec->resize(dim);
237237
m->vec->copyFrom(data, dim);
238238
}
@@ -241,15 +241,15 @@ bool Vector::isGpu() const {
241241
return std::dynamic_pointer_cast<paddle::GpuVector>(m->vec) != nullptr;
242242
}
243243

244-
real Vector::get(const size_t idx) const throw(RangeError, UnsupportError) {
245-
real r;
246-
m->safeAccessData(idx, [&](real& o) { r = o; });
244+
float Vector::get(const size_t idx) const throw(RangeError, UnsupportError) {
245+
float r;
246+
m->safeAccessData(idx, [&](float& o) { r = o; });
247247
return r;
248248
}
249249

250-
void Vector::set(const size_t idx, real val) throw(RangeError,
250+
void Vector::set(const size_t idx, float val) throw(RangeError,
251251
UnsupportError) {
252-
m->safeAccessData(idx, [&](real& o) { o = val; });
252+
m->safeAccessData(idx, [&](float& o) { o = val; });
253253
}
254254

255255
size_t Vector::getSize() const { return m->vec->getSize(); }

0 commit comments

Comments
 (0)