Skip to content

Commit 7b54f16

Browse files
author
chengduo
authored
Follow comment (#11845)
1 parent b037896 commit 7b54f16

File tree

2 files changed

+17
-15
lines changed

2 files changed

+17
-15
lines changed

paddle/fluid/framework/tensor_util.cc

Lines changed: 2 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -73,18 +73,12 @@ void TensorCopy(const Tensor& src, const platform::Place& dst_place,
7373
memory::Copy(dst_gpu_place, dst_ptr, src_gpu_place, src_ptr, size,
7474
stream);
7575
} else {
76-
// NOTE(zcd): Because TensorCopy is an async operation, when the src_place
77-
// and dst_place are two different GPU, to ensure that the operation can
78-
// be carried out correctly, we should make ctx wait.
79-
// If ctx_place and src_place are the same, we should add ctx.Wait()
80-
// after memory::Copy; if ctx_place and dst_place are the same, we should
81-
// add ctx.Wait() before memory::Copy.
8276
if (platform::is_same_place(ctx_place, src_place)) {
8377
memory::Copy(dst_gpu_place, dst_ptr, src_gpu_place, src_ptr, size,
8478
stream);
85-
ctx.Wait();
79+
platform::DeviceContextPool::Instance().Get(src.place())->Wait();
8680
} else if (platform::is_same_place(ctx_place, dst_place)) {
87-
ctx.Wait();
81+
platform::DeviceContextPool::Instance().Get(src.place())->Wait();
8882
memory::Copy(dst_gpu_place, dst_ptr, src_gpu_place, src_ptr, size,
8983
stream);
9084
} else {
@@ -97,13 +91,6 @@ void TensorCopy(const Tensor& src, const platform::Place& dst_place,
9791

9892
void TensorCopy(const Tensor& src, const platform::Place& dst_place,
9993
Tensor* dst) {
100-
// NOTE(zcd): If the src.place() and dst_place are two different GPU,
101-
// the copy operation is carried out on the dst_place's stream. This is
102-
// very important, because TensorCopy is an async operator, and in most
103-
// case, once this copy operator returns, dst is to be used in dst_place's
104-
// stream, if this copy operation is carried out on the src_place's stream,
105-
// when dst is used in dst_place's stream the copy operation may be
106-
// not completed.
10794
platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance();
10895
const platform::DeviceContext* dev_ctx;
10996
if (platform::is_gpu_place(dst_place)) {

paddle/fluid/framework/tensor_util.h

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -23,10 +23,25 @@ limitations under the License. */
2323
namespace paddle {
2424
namespace framework {
2525

26+
// NOTE(zcd): Because TensorCopy is an async operation, when the src_place
27+
// and dst_place are two different GPU, to ensure that the operation can
28+
// be carried out correctly, there is a src_ctx wait operation in TensorCopy.
29+
// If ctx_place and src_place are the same, src_ctx.Wait() is added
30+
// after memory::Copy; if ctx_place and dst_place are the same,
31+
// src_ctx.Wait() is added before memory::Copy.
2632
void TensorCopy(const Tensor& src, const platform::Place& dst_place,
2733
const platform::DeviceContext& ctx, Tensor* dst);
34+
35+
// NOTE(zcd): If the src.place() and dst_place are two different GPU,
36+
// the copy operation is carried out on the dst_place's stream. This is
37+
// very important, because TensorCopy is an async operator, and in most
38+
// case, once this copy operator returns, dst is to be used in dst_place's
39+
// stream, if this copy operation is carried out on the src_place's stream,
40+
// when dst is used in dst_place's stream the copy operation may be
41+
// not completed.
2842
void TensorCopy(const Tensor& src, const platform::Place& dst_place,
2943
Tensor* dst);
44+
3045
void TensorCopySync(const Tensor& src, const platform::Place& dst_place,
3146
Tensor* dst);
3247

0 commit comments

Comments
 (0)