Skip to content

Commit 0882314

Browse files
authored
Merge pull request #11232 from reyoung/feature/extract_tensor
Extract method from tensor_impl.h to tensor.cc
2 parents d02b318 + b6c8701 commit 0882314

File tree

3 files changed

+113
-124
lines changed

3 files changed

+113
-124
lines changed

paddle/fluid/framework/tensor.cc

Lines changed: 98 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -15,5 +15,102 @@ limitations under the License. */
1515
#include "paddle/fluid/framework/tensor.h"
1616

1717
namespace paddle {
18-
namespace framework {}
18+
namespace framework {
19+
extern size_t SizeOfType(std::type_index type);
20+
void Tensor::check_memory_size() const {
21+
PADDLE_ENFORCE_NOT_NULL(
22+
holder_, "Tensor holds no memory. Call Tensor::mutable_data first.");
23+
PADDLE_ENFORCE_LE(
24+
numel() * SizeOfType(type()), memory_size(),
25+
"Tensor's dims_ is out of bound. Call Tensor::mutable_data "
26+
"first to re-allocate memory.\n"
27+
"or maybe the required data-type mismatches the data already stored.");
28+
}
29+
30+
size_t Tensor::memory_size() const {
31+
return holder_ == nullptr ? 0UL : holder_->size() - offset_;
32+
}
33+
34+
void* Tensor::mutable_data(platform::Place place, std::type_index type) {
35+
if (holder_ != nullptr) {
36+
holder_->set_type(type);
37+
}
38+
PADDLE_ENFORCE_GE(numel(), 0,
39+
"When calling this method, the Tensor's numel must be "
40+
"equal or larger than zero. "
41+
"Please check Tensor::Resize has been called first.");
42+
int64_t size = numel() * SizeOfType(type);
43+
/* some versions of boost::variant don't have operator!= */
44+
if (holder_ == nullptr || !(holder_->place() == place) ||
45+
holder_->size() < size + offset_) {
46+
if (platform::is_cpu_place(place)) {
47+
holder_.reset(new PlaceholderImpl<platform::CPUPlace>(
48+
boost::get<platform::CPUPlace>(place), size, type));
49+
} else if (platform::is_gpu_place(place) ||
50+
platform::is_cuda_pinned_place(place)) {
51+
#ifndef PADDLE_WITH_CUDA
52+
PADDLE_THROW(
53+
"CUDAPlace or CUDAPinnedPlace is not supported in CPU-only mode.");
54+
}
55+
#else
56+
if (platform::is_gpu_place(place)) {
57+
holder_.reset(new PlaceholderImpl<platform::CUDAPlace>(
58+
boost::get<platform::CUDAPlace>(place), size, type));
59+
} else if (platform::is_cuda_pinned_place(place)) {
60+
holder_.reset(new PlaceholderImpl<platform::CUDAPinnedPlace>(
61+
boost::get<platform::CUDAPinnedPlace>(place), size, type));
62+
}
63+
}
64+
#endif
65+
offset_ = 0;
66+
}
67+
return reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(holder_->ptr()) +
68+
offset_);
69+
}
70+
71+
void* Tensor::mutable_data(platform::Place place) {
72+
PADDLE_ENFORCE(this->holder_ != nullptr,
73+
"Cannot invoke mutable data if current hold nothing.");
74+
return mutable_data(place, holder_->type());
75+
}
76+
77+
Tensor& Tensor::ShareDataWith(const Tensor& src) {
78+
src.check_memory_size();
79+
*this = src;
80+
return *this;
81+
}
82+
83+
Tensor Tensor::Slice(int begin_idx, int end_idx) const {
84+
check_memory_size();
85+
PADDLE_ENFORCE_GE(begin_idx, 0,
86+
"The start row index must be greater than 0.");
87+
PADDLE_ENFORCE_LE(end_idx, dims_[0], "The end row index is out of bound.");
88+
PADDLE_ENFORCE_LT(
89+
begin_idx, end_idx,
90+
"The start row index must be lesser than the end row index.");
91+
92+
if (dims_[0] == 1) {
93+
return *this;
94+
} else {
95+
size_t base = numel() / dims_[0];
96+
Tensor dst;
97+
dst.holder_ = holder_;
98+
dst.set_layout(layout_);
99+
DDim dst_dims = dims_;
100+
dst_dims[0] = end_idx - begin_idx;
101+
dst.Resize(dst_dims);
102+
dst.offset_ = offset_ + begin_idx * base * SizeOfType(type());
103+
return dst;
104+
}
105+
}
106+
107+
Tensor& Tensor::Resize(const DDim& dims) {
108+
dims_ = dims;
109+
return *this;
110+
}
111+
112+
const DDim& Tensor::dims() const { return dims_; }
113+
114+
int64_t Tensor::numel() const { return product(dims_); }
115+
} // namespace framework
19116
} // namespace paddle

paddle/fluid/framework/tensor.h

Lines changed: 15 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -54,26 +54,24 @@ class Tensor {
5454

5555
/*! Return a pointer to mutable memory block. */
5656
template <typename T>
57-
inline T* data();
57+
T* data();
5858

5959
/*! Return a pointer to constant memory block. */
6060
template <typename T>
61-
inline const T* data() const;
61+
const T* data() const;
6262

63-
inline bool IsInitialized() const;
64-
65-
inline void switch_place(platform::Place new_place);
63+
bool IsInitialized() const;
6664

6765
/**
6866
* @brief Return a pointer to mutable memory block.
6967
* @note If not exist, then allocation.
7068
*/
7169
template <typename T>
72-
inline T* mutable_data(platform::Place place);
70+
T* mutable_data(platform::Place place);
7371

74-
inline void* mutable_data(platform::Place place, std::type_index type);
72+
void* mutable_data(platform::Place place, std::type_index type);
7573

76-
inline void* mutable_data(platform::Place place);
74+
void* mutable_data(platform::Place place);
7775

7876
/**
7977
* @brief Return a pointer to mutable memory block.
@@ -84,19 +82,19 @@ class Tensor {
8482
* @note If not exist, then allocation.
8583
*/
8684
template <typename T>
87-
inline T* mutable_data(DDim dims, platform::Place place);
85+
T* mutable_data(DDim dims, platform::Place place);
8886

8987
/*! Return the dimensions of the memory block. */
90-
inline const DDim& dims() const;
88+
const DDim& dims() const;
9189

9290
/*! Return the numel of the memory block. */
93-
inline int64_t numel() const;
91+
int64_t numel() const;
9492

9593
/*! Resize the dimensions of the memory block. */
96-
inline Tensor& Resize(const DDim& dims);
94+
Tensor& Resize(const DDim& dims);
9795

9896
/*! The internal of two tensors share the same memory block. */
99-
inline Tensor& ShareDataWith(const Tensor& src);
97+
Tensor& ShareDataWith(const Tensor& src);
10098

10199
/**
102100
* @brief Return a sub-tensor of the given tensor.
@@ -106,7 +104,7 @@ class Tensor {
106104
* @param[in] end_idx The index of the end row(exclusive) to slice.
107105
* The index number begins from 0.
108106
*/
109-
inline Tensor Slice(int begin_idx, int end_idx) const;
107+
Tensor Slice(int begin_idx, int end_idx) const;
110108

111109
platform::Place place() const {
112110
PADDLE_ENFORCE_NOT_NULL(
@@ -123,11 +121,11 @@ class Tensor {
123121
// memory size returns the holding memory size in byte.
124122
size_t memory_size() const;
125123

126-
inline void check_memory_size() const;
124+
void check_memory_size() const;
127125

128-
inline DataLayout layout() const { return layout_; }
126+
DataLayout layout() const { return layout_; }
129127

130-
inline void set_layout(const DataLayout layout) { layout_ = layout; }
128+
void set_layout(const DataLayout layout) { layout_ = layout; }
131129

132130
private:
133131
/**
@@ -210,15 +208,6 @@ class Tensor {
210208
size_t offset_;
211209
};
212210

213-
inline void Tensor::switch_place(platform::Place new_place) {
214-
if (holder_->place() == new_place) {
215-
return;
216-
}
217-
218-
// TODO(tonyyang-svail): do memcpy here.
219-
PADDLE_THROW("Not Implemented");
220-
}
221-
222211
} // namespace framework
223212
} // namespace paddle
224213

paddle/fluid/framework/tensor_impl.h

Lines changed: 0 additions & 97 deletions
Original file line numberDiff line numberDiff line change
@@ -20,21 +20,6 @@ limitations under the License. */
2020

2121
namespace paddle {
2222
namespace framework {
23-
extern size_t SizeOfType(std::type_index type);
24-
inline void Tensor::check_memory_size() const {
25-
PADDLE_ENFORCE_NOT_NULL(
26-
holder_, "Tensor holds no memory. Call Tensor::mutable_data first.");
27-
PADDLE_ENFORCE_LE(
28-
numel() * SizeOfType(type()), memory_size(),
29-
"Tensor's dims_ is out of bound. Call Tensor::mutable_data "
30-
"first to re-allocate memory.\n"
31-
"or maybe the required data-type mismatches the data already stored.");
32-
}
33-
34-
inline size_t Tensor::memory_size() const {
35-
return holder_ == nullptr ? 0UL : holder_->size() - offset_;
36-
}
37-
3823
template <typename T>
3924
inline const T* Tensor::data() const {
4025
check_memory_size();
@@ -73,88 +58,6 @@ inline T* Tensor::mutable_data(platform::Place place) {
7358
return reinterpret_cast<T*>(mutable_data(place, typeid(T)));
7459
}
7560

76-
inline void* Tensor::mutable_data(platform::Place place, std::type_index type) {
77-
if (holder_ != nullptr) {
78-
holder_->set_type(type);
79-
}
80-
PADDLE_ENFORCE_GE(numel(), 0,
81-
"When calling this method, the Tensor's numel must be "
82-
"equal or larger than zero. "
83-
"Please check Tensor::Resize has been called first.");
84-
int64_t size = numel() * SizeOfType(type);
85-
/* some versions of boost::variant don't have operator!= */
86-
if (holder_ == nullptr || !(holder_->place() == place) ||
87-
holder_->size() < size + offset_) {
88-
if (platform::is_cpu_place(place)) {
89-
holder_.reset(new PlaceholderImpl<platform::CPUPlace>(
90-
boost::get<platform::CPUPlace>(place), size, type));
91-
} else if (platform::is_gpu_place(place) ||
92-
platform::is_cuda_pinned_place(place)) {
93-
#ifndef PADDLE_WITH_CUDA
94-
PADDLE_THROW(
95-
"CUDAPlace or CUDAPinnedPlace is not supported in CPU-only mode.");
96-
}
97-
#else
98-
if (platform::is_gpu_place(place)) {
99-
holder_.reset(new PlaceholderImpl<platform::CUDAPlace>(
100-
boost::get<platform::CUDAPlace>(place), size, type));
101-
} else if (platform::is_cuda_pinned_place(place)) {
102-
holder_.reset(new PlaceholderImpl<platform::CUDAPinnedPlace>(
103-
boost::get<platform::CUDAPinnedPlace>(place), size, type));
104-
}
105-
}
106-
#endif
107-
offset_ = 0;
108-
}
109-
return reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(holder_->ptr()) +
110-
offset_);
111-
}
112-
113-
inline void* Tensor::mutable_data(platform::Place place) {
114-
PADDLE_ENFORCE(this->holder_ != nullptr,
115-
"Cannot invoke mutable data if current hold nothing.");
116-
return mutable_data(place, holder_->type());
117-
}
118-
119-
inline Tensor& Tensor::ShareDataWith(const Tensor& src) {
120-
src.check_memory_size();
121-
*this = src;
122-
return *this;
123-
}
124-
125-
inline Tensor Tensor::Slice(int begin_idx, int end_idx) const {
126-
check_memory_size();
127-
PADDLE_ENFORCE_GE(begin_idx, 0,
128-
"The start row index must be greater than 0.");
129-
PADDLE_ENFORCE_LE(end_idx, dims_[0], "The end row index is out of bound.");
130-
PADDLE_ENFORCE_LT(
131-
begin_idx, end_idx,
132-
"The start row index must be lesser than the end row index.");
133-
134-
if (dims_[0] == 1) {
135-
return *this;
136-
} else {
137-
size_t base = numel() / dims_[0];
138-
Tensor dst;
139-
dst.holder_ = holder_;
140-
dst.set_layout(layout_);
141-
DDim dst_dims = dims_;
142-
dst_dims[0] = end_idx - begin_idx;
143-
dst.Resize(dst_dims);
144-
dst.offset_ = offset_ + begin_idx * base * SizeOfType(type());
145-
return dst;
146-
}
147-
}
148-
149-
inline Tensor& Tensor::Resize(const DDim& dims) {
150-
dims_ = dims;
151-
return *this;
152-
}
153-
154-
inline const DDim& Tensor::dims() const { return dims_; }
155-
156-
inline int64_t Tensor::numel() const { return product(dims_); }
157-
15861
inline Tensor ReshapeToMatrix(const Tensor& src, int num_col_dims) {
15962
Tensor res;
16063
res.ShareDataWith(src);

0 commit comments

Comments
 (0)