Skip to content

Commit 83ca657

Browse files
committed
Merge branch 'develop' into resnet50_ut
2 parents 21ee305 + 35b713c commit 83ca657

File tree

12 files changed

+288
-195
lines changed

12 files changed

+288
-195
lines changed

paddle/fluid/API.spec

Lines changed: 14 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -145,21 +145,27 @@ paddle.fluid.layers.unstack ArgSpec(args=['x', 'axis', 'num'], varargs=None, key
145145
paddle.fluid.layers.sequence_enumerate ArgSpec(args=['input', 'win_size', 'pad_value', 'name'], varargs=None, keywords=None, defaults=(0, None))
146146
paddle.fluid.layers.expand ArgSpec(args=['x', 'expand_times', 'name'], varargs=None, keywords=None, defaults=(None,))
147147
paddle.fluid.layers.sequence_concat ArgSpec(args=['input', 'name'], varargs=None, keywords=None, defaults=(None,))
148-
paddle.fluid.layers.scale ArgSpec(args=['x', 'scale', 'bias', 'bias_after_scale', 'out', 'act', 'name'], varargs=None, keywords=None, defaults=(1.0, 0.0, True, None, None, None))
149-
paddle.fluid.layers.elementwise_add ArgSpec(args=['x', 'y', 'out', 'axis', 'use_mkldnn', 'act', 'name'], varargs=None, keywords=None, defaults=(None, -1, False, None, None))
150-
paddle.fluid.layers.elementwise_div ArgSpec(args=['x', 'y', 'out', 'axis', 'use_mkldnn', 'act', 'name'], varargs=None, keywords=None, defaults=(None, -1, False, None, None))
151-
paddle.fluid.layers.elementwise_sub ArgSpec(args=['x', 'y', 'out', 'axis', 'use_mkldnn', 'act', 'name'], varargs=None, keywords=None, defaults=(None, -1, False, None, None))
152-
paddle.fluid.layers.elementwise_mul ArgSpec(args=['x', 'y', 'out', 'axis', 'use_mkldnn', 'act', 'name'], varargs=None, keywords=None, defaults=(None, -1, False, None, None))
153-
paddle.fluid.layers.elementwise_max ArgSpec(args=['x', 'y', 'out', 'axis', 'use_mkldnn', 'act', 'name'], varargs=None, keywords=None, defaults=(None, -1, False, None, None))
154-
paddle.fluid.layers.elementwise_min ArgSpec(args=['x', 'y', 'out', 'axis', 'use_mkldnn', 'act', 'name'], varargs=None, keywords=None, defaults=(None, -1, False, None, None))
155-
paddle.fluid.layers.elementwise_pow ArgSpec(args=['x', 'y', 'out', 'axis', 'use_mkldnn', 'act', 'name'], varargs=None, keywords=None, defaults=(None, -1, False, None, None))
148+
paddle.fluid.layers.scale ArgSpec(args=['x', 'scale', 'bias', 'bias_after_scale', 'act', 'name'], varargs=None, keywords=None, defaults=(1.0, 0.0, True, None, None))
149+
paddle.fluid.layers.elementwise_add ArgSpec(args=['x', 'y', 'axis', 'use_mkldnn', 'act', 'name'], varargs=None, keywords=None, defaults=(-1, False, None, None))
150+
paddle.fluid.layers.elementwise_div ArgSpec(args=['x', 'y', 'axis', 'use_mkldnn', 'act', 'name'], varargs=None, keywords=None, defaults=(-1, False, None, None))
151+
paddle.fluid.layers.elementwise_sub ArgSpec(args=['x', 'y', 'axis', 'use_mkldnn', 'act', 'name'], varargs=None, keywords=None, defaults=(-1, False, None, None))
152+
paddle.fluid.layers.elementwise_mul ArgSpec(args=['x', 'y', 'axis', 'use_mkldnn', 'act', 'name'], varargs=None, keywords=None, defaults=(-1, False, None, None))
153+
paddle.fluid.layers.elementwise_max ArgSpec(args=['x', 'y', 'axis', 'use_mkldnn', 'act', 'name'], varargs=None, keywords=None, defaults=(-1, False, None, None))
154+
paddle.fluid.layers.elementwise_min ArgSpec(args=['x', 'y', 'axis', 'use_mkldnn', 'act', 'name'], varargs=None, keywords=None, defaults=(-1, False, None, None))
155+
paddle.fluid.layers.elementwise_pow ArgSpec(args=['x', 'y', 'axis', 'use_mkldnn', 'act', 'name'], varargs=None, keywords=None, defaults=(-1, False, None, None))
156156
paddle.fluid.layers.uniform_random_batch_size_like ArgSpec(args=['input', 'shape', 'dtype', 'input_dim_idx', 'output_dim_idx', 'min', 'max', 'seed'], varargs=None, keywords=None, defaults=('float32', 0, 0, -1.0, 1.0, 0))
157157
paddle.fluid.layers.gaussian_random ArgSpec(args=['shape', 'mean', 'std', 'seed', 'dtype', 'use_mkldnn'], varargs=None, keywords=None, defaults=(0.0, 1.0, 0, 'float32', False))
158158
paddle.fluid.layers.sampling_id ArgSpec(args=['x', 'min', 'max', 'seed', 'dtype'], varargs=None, keywords=None, defaults=(0.0, 1.0, 0, 'float32'))
159159
paddle.fluid.layers.gaussian_random_batch_size_like ArgSpec(args=['input', 'shape', 'input_dim_idx', 'output_dim_idx', 'mean', 'std', 'seed', 'dtype'], varargs=None, keywords=None, defaults=(0, 0, 0.0, 1.0, 0, 'float32'))
160160
paddle.fluid.layers.sum ArgSpec(args=['x', 'use_mkldnn'], varargs=None, keywords=None, defaults=(False,))
161161
paddle.fluid.layers.slice ArgSpec(args=['input', 'axes', 'starts', 'ends'], varargs=None, keywords=None, defaults=None)
162162
paddle.fluid.layers.shape ArgSpec(args=['input'], varargs=None, keywords=None, defaults=None)
163+
paddle.fluid.layers.logical_and ArgSpec(args=['x', 'y', 'out', 'name'], varargs=None, keywords=None, defaults=(None, None))
164+
paddle.fluid.layers.logical_or ArgSpec(args=['x', 'y', 'out', 'name'], varargs=None, keywords=None, defaults=(None, None))
165+
paddle.fluid.layers.logical_xor ArgSpec(args=['x', 'y', 'out', 'name'], varargs=None, keywords=None, defaults=(None, None))
166+
paddle.fluid.layers.logical_not ArgSpec(args=['x', 'out', 'name'], varargs=None, keywords=None, defaults=(None, None))
167+
paddle.fluid.layers.clip ArgSpec(args=['x', 'min', 'max', 'name'], varargs=None, keywords=None, defaults=(None,))
168+
paddle.fluid.layers.clip_by_norm ArgSpec(args=['x', 'max_norm', 'name'], varargs=None, keywords=None, defaults=(None,))
163169
paddle.fluid.layers.data ArgSpec(args=['name', 'shape', 'append_batch_size', 'dtype', 'lod_level', 'type', 'stop_gradient'], varargs=None, keywords=None, defaults=(True, 'float32', 0, VarType.LOD_TENSOR, True))
164170
paddle.fluid.layers.open_files ArgSpec(args=['filenames', 'shapes', 'lod_levels', 'dtypes', 'thread_num', 'buffer_size', 'pass_num', 'is_test'], varargs=None, keywords=None, defaults=(None, None, 1, None))
165171
paddle.fluid.layers.read_file ArgSpec(args=['reader'], varargs=None, keywords=None, defaults=None)
@@ -225,12 +231,6 @@ paddle.fluid.layers.is_empty ArgSpec(args=['x', 'cond'], varargs=None, keywords=
225231
paddle.fluid.layers.mean ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None)
226232
paddle.fluid.layers.mul ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None)
227233
paddle.fluid.layers.sigmoid_cross_entropy_with_logits ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None)
228-
paddle.fluid.layers.clip ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None)
229-
paddle.fluid.layers.clip_by_norm ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None)
230-
paddle.fluid.layers.logical_and ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None)
231-
paddle.fluid.layers.logical_or ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None)
232-
paddle.fluid.layers.logical_xor ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None)
233-
paddle.fluid.layers.logical_not ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None)
234234
paddle.fluid.layers.maxout ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None)
235235
paddle.fluid.layers.sigmoid ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,))
236236
paddle.fluid.layers.logsigmoid ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,))

paddle/fluid/framework/scope.cc

Lines changed: 0 additions & 31 deletions
Original file line numberDiff line numberDiff line change
@@ -20,13 +20,6 @@ limitations under the License. */
2020
#include "paddle/fluid/framework/threadpool.h"
2121
#include "paddle/fluid/string/printf.h"
2222

23-
// The mutex is not needed by training and inference, only for distribution.
24-
#if PADDLE_WITH_DISTRIBUTE
25-
#define WITH_LOCK 1
26-
#else
27-
#define WITH_LOCK 0
28-
#endif
29-
3023
DEFINE_bool(benchmark, false,
3124
"Doing memory benchmark. It will make deleting scope synchronized, "
3225
"and add some memory usage logs."
@@ -56,24 +49,18 @@ int64_t GetEagerDeletionThreshold() {
5649
Scope::~Scope() { DropKids(); }
5750

5851
Scope& Scope::NewScope() const {
59-
#if WITH_LOCK
6052
std::unique_lock<std::mutex> lock(mutex_);
61-
#endif
6253
kids_.push_back(new Scope(this));
6354
return *kids_.back();
6455
}
6556

6657
Variable* Scope::Var(const std::string& name) {
67-
#if WITH_LOCK
6858
std::unique_lock<std::mutex> lock(mutex_);
69-
#endif
7059
return VarInternal(name);
7160
}
7261

7362
Variable* Scope::Var(std::string* name) {
74-
#if WITH_LOCK
7563
std::unique_lock<std::mutex> lock(mutex_);
76-
#endif
7764
auto new_name = string::Sprintf("%p.%d", this, vars_.size());
7865
if (name != nullptr) {
7966
*name = new_name;
@@ -82,39 +69,29 @@ Variable* Scope::Var(std::string* name) {
8269
}
8370

8471
Variable* Scope::FindVar(const std::string& name) const {
85-
#if WITH_LOCK
8672
std::unique_lock<std::mutex> lock(mutex_);
87-
#endif
8873
return FindVarInternal(name);
8974
}
9075

9176
const Scope* Scope::FindScope(const Variable* var) const {
92-
#if WITH_LOCK
9377
std::unique_lock<std::mutex> lock(mutex_);
94-
#endif
9578
return FindScopeInternal(var);
9679
}
9780

9881
void Scope::DropKids() {
99-
#if WITH_LOCK
10082
std::unique_lock<std::mutex> lock(mutex_);
101-
#endif
10283
for (Scope* s : kids_) delete s;
10384
kids_.clear();
10485
}
10586

10687
bool Scope::HasKid(const Scope* scope) const {
107-
#if WITH_LOCK
10888
std::unique_lock<std::mutex> lock(mutex_);
109-
#endif
11089
auto it = std::find(this->kids_.begin(), this->kids_.end(), scope);
11190
return it != this->kids_.end();
11291
}
11392

11493
std::vector<std::string> Scope::LocalVarNames() const {
115-
#if WITH_LOCK
11694
std::unique_lock<std::mutex> lock(mutex_);
117-
#endif
11895
std::vector<std::string> known_vars;
11996
known_vars.reserve(this->vars_.size());
12097
for (auto& p : vars_) {
@@ -124,9 +101,7 @@ std::vector<std::string> Scope::LocalVarNames() const {
124101
}
125102

126103
void Scope::DeleteScope(Scope* scope) const {
127-
#if WITH_LOCK
128104
std::unique_lock<std::mutex> lock(mutex_);
129-
#endif
130105
auto it = std::find(this->kids_.begin(), this->kids_.end(), scope);
131106
PADDLE_ENFORCE(it != this->kids_.end(), "Cannot find %p as kid scope", scope);
132107
this->kids_.erase(it);
@@ -139,9 +114,7 @@ void Scope::DeleteScope(Scope* scope) const {
139114
}
140115

141116
void Scope::EraseVars(const std::vector<std::string>& var_names) {
142-
#if WITH_LOCK
143117
std::unique_lock<std::mutex> lock(mutex_);
144-
#endif
145118
std::set<std::string> var_set(var_names.begin(), var_names.end());
146119
for (auto it = vars_.begin(); it != vars_.end();) {
147120
if (var_set.find(it->first) != var_set.end()) {
@@ -154,16 +127,12 @@ void Scope::EraseVars(const std::vector<std::string>& var_names) {
154127

155128
void Scope::Rename(const std::string& origin_name,
156129
const std::string& new_name) const {
157-
#if WITH_LOCK
158130
std::unique_lock<std::mutex> lock(mutex_);
159-
#endif
160131
RenameInternal(origin_name, new_name);
161132
}
162133

163134
std::string Scope::Rename(const std::string& origin_name) const {
164-
#if WITH_LOCK
165135
std::unique_lock<std::mutex> lock(mutex_);
166-
#endif
167136
auto new_name = string::Sprintf("%p.%d", this, vars_.size());
168137
RenameInternal(origin_name, new_name);
169138
return new_name;

paddle/fluid/platform/dynload/cublas.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -55,7 +55,7 @@ extern void *cublas_dso_handle;
5555
struct DynLoad__##__name { \
5656
template <typename... Args> \
5757
inline cublasStatus_t operator()(Args... args) { \
58-
return __name(args...); \
58+
return ::__name(args...); \
5959
} \
6060
}; \
6161
extern DynLoad__##__name __name

paddle/fluid/platform/dynload/cudnn.h

Lines changed: 10 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,9 @@ See the License for the specific language governing permissions and
1313
limitations under the License. */
1414

1515
#pragma once
16+
#define GLOG_NO_ABBREVIATED_SEVERITIES
17+
#define GOOGLE_GLOG_DLL_DECL
18+
#include <glog/logging.h>
1619

1720
#include <cudnn.h>
1821
#include <mutex> // NOLINT
@@ -47,13 +50,13 @@ extern void EnforceCUDNNLoaded(const char* fn_name);
4750

4851
#else
4952

50-
#define DECLARE_DYNAMIC_LOAD_CUDNN_WRAP(__name) \
51-
struct DynLoad__##__name { \
52-
template <typename... Args> \
53-
auto operator()(Args... args) -> decltype(__name(args...)) { \
54-
return __name(args...); \
55-
} \
56-
}; \
53+
#define DECLARE_DYNAMIC_LOAD_CUDNN_WRAP(__name) \
54+
struct DynLoad__##__name { \
55+
template <typename... Args> \
56+
inline cudnnStatus_t operator()(Args... args) { \
57+
return ::__name(args...); \
58+
} \
59+
}; \
5760
extern DynLoad__##__name __name
5861

5962
#endif

paddle/fluid/platform/dynload/curand.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -44,7 +44,7 @@ extern void *curand_dso_handle;
4444
struct DynLoad__##__name { \
4545
template <typename... Args> \
4646
curandStatus_t operator()(Args... args) { \
47-
return __name(args...); \
47+
return ::__name(args...); \
4848
} \
4949
}; \
5050
extern DynLoad__##__name __name

paddle/fluid/platform/dynload/dynamic_loader.cc

Lines changed: 17 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -107,7 +107,11 @@ static inline void* GetDsoHandleFromDefaultPath(const std::string& dso_path,
107107
static inline void* GetDsoHandleFromSearchPath(const std::string& search_root,
108108
const std::string& dso_name,
109109
bool throw_on_error = true) {
110+
#if !defined(_WIN32)
110111
int dynload_flags = RTLD_LAZY | RTLD_LOCAL;
112+
#else
113+
int dynload_flags = 0;
114+
#endif // !_WIN32
111115
void* dso_handle = nullptr;
112116

113117
std::string dlPath = dso_name;
@@ -117,10 +121,15 @@ static inline void* GetDsoHandleFromSearchPath(const std::string& search_root,
117121
// search xxx.so from custom path
118122
dlPath = join(search_root, dso_name);
119123
dso_handle = dlopen(dlPath.c_str(), dynload_flags);
124+
#if !defined(_WIN32)
125+
auto errorno = dlerror();
126+
#else
127+
auto errorno = GetLastError();
128+
#endif // !_WIN32
120129
// if not found, search from default path
121130
if (nullptr == dso_handle) {
122131
LOG(WARNING) << "Failed to find dynamic library: " << dlPath << " ("
123-
<< dlerror() << ")";
132+
<< errorno << ")";
124133
if (dlPath.find("nccl") != std::string::npos) {
125134
std::cout
126135
<< "You may need to install 'nccl2' from NVIDIA official website: "
@@ -139,10 +148,15 @@ static inline void* GetDsoHandleFromSearchPath(const std::string& search_root,
139148
"export LD_LIBRARY_PATH=... \n Note: After Mac OS 10.11, "
140149
"using the DYLD_LIBRARY_PATH is impossible unless System "
141150
"Integrity Protection (SIP) is disabled.";
151+
#if !defined(_WIN32)
152+
auto errorno = dlerror();
153+
#else
154+
auto errorno = GetLastError();
155+
#endif // !_WIN32
142156
if (throw_on_error) {
143-
PADDLE_ENFORCE(nullptr != dso_handle, error_msg, dlPath, dlerror());
157+
PADDLE_ENFORCE(nullptr != dso_handle, error_msg, dlPath, errorno);
144158
} else if (nullptr == dso_handle) {
145-
LOG(WARNING) << string::Sprintf(error_msg, dlPath, dlerror());
159+
LOG(WARNING) << string::Sprintf(error_msg, dlPath, errorno);
146160
}
147161

148162
return dso_handle;

paddle/scripts/paddle_build.sh

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -395,7 +395,7 @@ EOF
395395
ctest --output-on-failure -j $1
396396
# make install should also be test when unittest
397397
make install -j 8
398-
pip install /usr/local/opt/paddle/share/wheels/*.whl
398+
pip install ${INSTALL_PREFIX:-/paddle/build}/opt/paddle/share/wheels/*.whl
399399
if [[ ${WITH_FLUID_ONLY:-OFF} == "OFF" ]] ; then
400400
paddle version
401401
fi
@@ -750,7 +750,7 @@ function main() {
750750
cmake_gen ${PYTHON_ABI:-""}
751751
build
752752
run_test
753-
assert_api_not_changed
753+
assert_api_not_changed ${PYTHON_ABI:-""}
754754
;;
755755
*)
756756
print_usage

python/paddle/fluid/clip.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -271,7 +271,8 @@ def _process_context(self, context, param, grad):
271271
"All parameters' 'clip_norm' of a same group should be the same"
272272
)
273273

274-
local_norm_var = layers.reduce_sum(input=layers.pow(x=grad, factor=2.0))
274+
square = grad * grad
275+
local_norm_var = layers.cast(layers.reduce_sum(input=square), 'float64')
275276
context[self.group_name].append(local_norm_var)
276277

277278
self.context = context
@@ -281,6 +282,7 @@ def _create_operators(self, param, grad):
281282
if group_scale_name not in self.context:
282283
group_norm_var = layers.sums(input=self.context[self.group_name])
283284
group_norm_var = layers.sqrt(x=group_norm_var)
285+
group_norm_var = layers.cast(group_norm_var, 'float32')
284286
clip_var = self.context[self.group_name + "_clip"]
285287
group_scale_var = layers.elementwise_div(
286288
x=clip_var,

python/paddle/fluid/layers/control_flow.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@
2121
from ..framework import Program, Variable, Operator
2222
from ..layer_helper import LayerHelper, unique_name
2323
from ..initializer import force_init_on_cpu
24-
from .ops import logical_and, logical_not, logical_or
24+
from .nn import logical_and, logical_not, logical_or
2525
import numpy
2626
import warnings
2727
import six

0 commit comments

Comments
 (0)