Skip to content

Commit f79ca23

Browse files
committed
fix bugs
1 parent c501826 commit f79ca23

File tree

3 files changed

+9
-3
lines changed

3 files changed

+9
-3
lines changed

paddle/fluid/framework/rw_lock.h

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -71,13 +71,17 @@ class RWLockGuard {
7171
WRLock();
7272
break;
7373
}
74+
case Status::kUnLock: {
75+
break;
76+
}
7477
}
7578
}
7679

7780
void WRLock() {
7881
switch (status_) {
7982
case Status::kUnLock: {
8083
lock_->WRLock();
84+
status_ = Status::kWRLock;
8185
break;
8286
}
8387
case Status::kWRLock: {
@@ -95,6 +99,7 @@ class RWLockGuard {
9599
switch (status_) {
96100
case Status::kUnLock: {
97101
lock_->RDLock();
102+
status_ = Status::kRDLock;
98103
break;
99104
}
100105
case Status::kRDLock: {
@@ -111,6 +116,7 @@ class RWLockGuard {
111116
void UnLock() {
112117
if (status_ != Status::kUnLock) {
113118
lock_->UNLock();
119+
status_ = Status::kUnLock;
114120
}
115121
}
116122

paddle/fluid/operators/conv_transpose_cudnn_op.cu.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -230,7 +230,7 @@ class CUDNNConvTransposeGradOpKernel : public framework::OpKernel<T> {
230230
// Because beta is zero, it is unnecessary to reset filter_grad.
231231
// Gradient with respect to the filter
232232
for (int g = 0; g < groups; g++) {
233-
auto cudnn_func = [&](void* cudnn_func) {
233+
auto cudnn_func = [&](void* cudnn_workspace) {
234234
CUDNN_ENFORCE(platform::dynload::cudnnConvolutionBackwardFilter(
235235
handle, &alpha, cudnn_output_desc,
236236
output_grad_data + output_grad_offset * g, cudnn_input_desc,

paddle/fluid/platform/device_context.cc

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -176,15 +176,15 @@ class CudnnHolder {
176176
if (required_workspace_len <= workspace_len_) {
177177
return;
178178
}
179-
void* new_workspace = paddle::memory::Alloc(place_, required_len);
179+
void* new_workspace = paddle::memory::Alloc(place_, required_workspace_len);
180180
if (workspace_ != nullptr) {
181181
// Maybe someone is using the current workspace
182182
PADDLE_ENFORCE(cudaStreamSynchronize(*stream_));
183183
PADDLE_ENFORCE(cudaGetLastError());
184184
paddle::memory::Free(place_, workspace_);
185185
}
186186
workspace_ = new_workspace;
187-
workspace_len_ = required_len;
187+
workspace_len_ = required_workspace_len;
188188
}
189189

190190
cudnnHandle_t cudnn_handle_;

0 commit comments

Comments
 (0)