Skip to content

Commit 788636f

Browse files
committed
update by comments
1 parent e2d5683 commit 788636f

File tree

3 files changed

+3
-38
lines changed

3 files changed

+3
-38
lines changed

paddle/fluid/framework/tensor.h

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -98,9 +98,6 @@ class Tensor {
9898
/*! The internal of two tensors share the same memory block. */
9999
inline Tensor& ShareDataWith(const Tensor& src);
100100

101-
/*! Share part of the memory of the two tensors */
102-
inline Tensor& ShareDataWith(const Tensor* src, size_t offset);
103-
104101
/**
105102
* @brief Return a sub-tensor of the given tensor.
106103
*

paddle/fluid/framework/tensor_impl.h

Lines changed: 0 additions & 31 deletions
Original file line numberDiff line numberDiff line change
@@ -162,37 +162,6 @@ inline Tensor& Tensor::ShareDataWith(const Tensor& src) {
162162
return *this;
163163
}
164164

165-
inline Tensor& Tensor::ShareDataWith(const Tensor* src, size_t offset) {
166-
// NOTE: data size is determined by current tensor shape and data type
167-
src->check_memory_size();
168-
PADDLE_ENFORCE_EQ(src->type(), this->type(),
169-
"tensor data type must be the same when sharing data");
170-
auto place = src->place();
171-
auto type = src->type();
172-
size_t size = src->numel() * SizeOfType(src->type());
173-
auto* ref = src->data<uint8_t>() + offset;
174-
if (platform::is_cpu_place(place)) {
175-
holder_.reset(new SharedPlaceholderImpl<platform::CPUPlace>(
176-
boost::get<platform::CPUPlace>(place), ref, size, type));
177-
} else if (platform::is_gpu_place(place) ||
178-
platform::is_cuda_pinned_place(place)) {
179-
#ifndef PADDLE_WITH_CUDA
180-
PADDLE_THROW(
181-
"CUDAPlace or CUDAPinnedPlace is not supported in CPU-only mode.");
182-
}
183-
#else
184-
if (platform::is_gpu_place(place)) {
185-
holder_.reset(new SharedPlaceholderImpl<platform::CUDAPlace>(
186-
boost::get<platform::CUDAPlace>(place), ref, size, type));
187-
} else if (platform::is_cuda_pinned_place(place)) {
188-
holder_.reset(new SharedPlaceholderImpl<platform::CUDAPinnedPlace>(
189-
boost::get<platform::CUDAPinnedPlace>(place), ref, size, type));
190-
}
191-
}
192-
#endif
193-
return *this;
194-
}
195-
196165
inline Tensor Tensor::Slice(int begin_idx, int end_idx) const {
197166
check_memory_size();
198167
PADDLE_ENFORCE_GE(begin_idx, 0,

paddle/fluid/operators/split_byref_op.h

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -26,15 +26,14 @@ class SplitByrefOpKernel : public framework::OpKernel<T> {
2626
void Compute(const framework::ExecutionContext& ctx) const override {
2727
auto* in = ctx.Input<framework::Tensor>("X");
2828
auto outs = ctx.MultiOutput<framework::Tensor>("Out");
29-
auto in_stride = framework::stride_numel(in->dims());
3029
auto place = ctx.GetPlace();
3130

32-
size_t input_offset = 0;
31+
size_t row_offset = 0;
3332
for (size_t i = 0; i < outs.size(); ++i) {
3433
// NOTE: no need to call mutable_data here to allocate memory.
3534
auto* out = outs[i];
36-
out->ShareDataWith(in, input_offset);
37-
input_offset += out->numel() * framework::SizeOfType(out->type());
35+
*out = std::move(in->Slice(row_offset, out->dims()[0]));
36+
row_offset += out->dims()[0];
3837
}
3938
}
4039
};

0 commit comments

Comments
 (0)