Skip to content

Commit ba53801

Browse files
committed
Merge branch 'fix_Mac_compile_errors' into dev_data_balance
2 parents 077434c + 15be513 commit ba53801

File tree

8 files changed

+44
-28
lines changed

8 files changed

+44
-28
lines changed

paddle/contrib/inference/CMakeLists.txt

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -61,7 +61,7 @@ cc_library(paddle_inference_tensorrt_subgraph_engine
6161
inference_api_test(test_paddle_inference_api_tensorrt_subgraph_engine ARGS test_word2vec)
6262
endif()
6363

64-
if (WITH_ANAKIN AND WITH_TESTING) # only needed in CI
64+
if (WITH_ANAKIN) # only needed in CI
6565
# Due to Anakin do not have official library releases and the versions of protobuf and cuda do not match Paddle's,
6666
# so anakin library will not be merged to our official inference library. To use anakin prediction API, one need to
6767
# compile the libinference_anakin_api.a and compile with anakin.so.
@@ -71,10 +71,12 @@ if (WITH_ANAKIN AND WITH_TESTING) # only needed in CI
7171
target_compile_options(inference_anakin_api_shared BEFORE PUBLIC ${ANAKIN_COMPILE_EXTRA_FLAGS})
7272
target_link_libraries(inference_anakin_api anakin anakin_saber_common)
7373
target_link_libraries(inference_anakin_api_shared anakin anakin_saber_common)
74-
cc_test(inference_anakin_test SRCS paddle_inference_api_anakin_engine_tester.cc
74+
if (WITH_TESTING)
75+
cc_test(inference_anakin_test SRCS paddle_inference_api_anakin_engine_tester.cc
7576
ARGS --model=${ANAKIN_INSTALL_DIR}/mobilenet_v2.anakin.bin
7677
DEPS inference_anakin_api)
77-
target_compile_options(inference_anakin_test BEFORE PUBLIC ${ANAKIN_COMPILE_EXTRA_FLAGS})
78+
target_compile_options(inference_anakin_test BEFORE PUBLIC ${ANAKIN_COMPILE_EXTRA_FLAGS})
79+
endif(WITH_TESTING)
7880
endif()
7981

8082
if(WITH_TESTING)

paddle/fluid/framework/parallel_executor.cc

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -253,6 +253,9 @@ void ParallelExecutor::FeedAndSplitTensorIntoLocalScopes(
253253
t->set_lod(lod_tensors[j].lod());
254254
}
255255
}
256+
for (auto &p : member_->places_) {
257+
platform::DeviceContextPool::Instance().Get(p)->Wait();
258+
}
256259
}
257260

258261
ParallelExecutor::~ParallelExecutor() {

paddle/fluid/inference/analysis/tensorrt_subgraph_node_mark_pass.h

Lines changed: 10 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,10 @@
1616
* This file defines TensorRTSubgraphNodeMarkPass which helps to mark the ops
1717
* that supported by TensorRT engine.
1818
*/
19+
20+
#pragma once
21+
22+
#include <string>
1923
#include "paddle/fluid/inference/analysis/pass.h"
2024
#include "paddle/fluid/inference/analysis/subgraph_splitter.h"
2125

@@ -30,16 +34,19 @@ class TensorRTSubgraphNodeMarkPass : public DataFlowGraphPass {
3034
public:
3135
using teller_t = SubGraphSplitter::NodeInsideSubgraphTeller;
3236

33-
TensorRTSubgraphNodeMarkPass(const teller_t& teller) : teller_(teller) {}
37+
explicit TensorRTSubgraphNodeMarkPass(const teller_t& teller)
38+
: teller_(teller) {}
3439

3540
bool Initialize(Argument* argument) override { return true; }
3641

3742
// This class get a sub-graph as input and determine whether to transform this
3843
// sub-graph into TensorRT.
3944
void Run(DataFlowGraph* graph) override;
4045

41-
std::string repr() const { return "tensorrt-sub-subgraph-mark"; }
42-
std::string description() const { return "tensorrt sub-graph mark pass"; }
46+
std::string repr() const override { return "tensorrt-sub-subgraph-mark"; }
47+
std::string description() const override {
48+
return "tensorrt sub-graph mark pass";
49+
}
4350

4451
Pass* CreateGraphvizDebugerPass() const override;
4552
bool Finalize() override;

paddle/fluid/inference/analysis/tensorrt_subgraph_pass.h

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,7 @@ limitations under the License. */
1414

1515
#pragma once
1616

17+
#include <string>
1718
#include "paddle/fluid/inference/analysis/node.h"
1819
#include "paddle/fluid/inference/analysis/pass.h"
1920
#include "paddle/fluid/inference/analysis/subgraph_splitter.h"
@@ -30,7 +31,7 @@ class TensorRTSubGraphPass : public DataFlowGraphPass {
3031
// Tell whether to transform a sub-graph into TensorRT.
3132
using NodeInsideSubgraphTeller = SubGraphFuse::NodeInsideSubgraphTeller;
3233

33-
TensorRTSubGraphPass(const NodeInsideSubgraphTeller& teller);
34+
explicit TensorRTSubGraphPass(const NodeInsideSubgraphTeller& teller);
3435

3536
bool Initialize(Argument* argument) override { return true; }
3637

@@ -40,13 +41,13 @@ class TensorRTSubGraphPass : public DataFlowGraphPass {
4041

4142
bool Finalize() override { return true; }
4243

43-
std::string repr() const { return "tensorrt-sub-graph"; }
44-
std::string description() const { return "tensorrt sub graph pass"; }
44+
std::string repr() const override { return "tensorrt-sub-graph"; }
45+
std::string description() const override { return "tensorrt sub graph pass"; }
4546

4647
private:
4748
NodeInsideSubgraphTeller node_inside_subgraph_teller_;
4849
};
4950

5051
} // namespace analysis
5152
} // namespace inference
52-
} // paddle
53+
} // namespace paddle

paddle/scripts/paddle_build.sh

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -106,6 +106,7 @@ function cmake_gen() {
106106
-DWITH_FLUID_ONLY=${WITH_FLUID_ONLY:-OFF}
107107
-DCMAKE_EXPORT_COMPILE_COMMANDS=ON
108108
-DWITH_CONTRIB=${WITH_CONTRIB:-ON}
109+
-DWITH_ANAKIN=${WITH_ANAKIN:-ON}
109110
-DWITH_INFERENCE_DEMO=${WITH_INFERENCE_DEMO:-ON}
110111
========================================
111112
EOF

python/paddle/fluid/layers/nn.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -5078,12 +5078,12 @@ def mean_iou(input, label, num_classes):
50785078
out_correct = helper.create_tmp_variable(dtype='int32')
50795079
helper.append_op(
50805080
type="mean_iou",
5081-
inputs={"predictions": input,
5082-
"labels": label},
5081+
inputs={"Predictions": input,
5082+
"Labels": label},
50835083
outputs={
5084-
"out_mean_iou": out_mean_iou,
5085-
"out_wrong": out_wrong,
5086-
"out_correct": out_correct
5084+
"OutMeanIou": out_mean_iou,
5085+
"OutWrong": out_wrong,
5086+
"OutCorrect": out_correct
50875087
},
50885088
attrs={"num_classes": num_classes})
50895089
return out_mean_iou, out_wrong, out_correct

python/paddle/fluid/optimizer.py

Lines changed: 5 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -1113,7 +1113,6 @@ class ModelAverage(Optimizer):
11131113
11141114
Args:
11151115
average_window_rate: The rate of average window.
1116-
params_grads: A list of parameter-grad variable pairs.
11171116
min_average_window: The minimum size of average window.
11181117
max_average_window: The maximum size of average window.
11191118
@@ -1122,8 +1121,8 @@ class ModelAverage(Optimizer):
11221121
.. code-block:: python
11231122
11241123
optimizer = fluid.optimizer.Momentum()
1125-
_, params_grads = optimizer.minimize(cost)
1126-
model_average = fluid.optimizer.ModelAverage(params_grads, 0.15,
1124+
optimizer.minimize(cost)
1125+
model_average = fluid.optimizer.ModelAverage(0.15,
11271126
min_average_window=10000,
11281127
max_average_window=20000)
11291128
for pass_id in range(args.pass_num):
@@ -1137,7 +1136,6 @@ class ModelAverage(Optimizer):
11371136

11381137
def __init__(self,
11391138
average_window_rate,
1140-
params_grads=None,
11411139
min_average_window=10000,
11421140
max_average_window=10000,
11431141
**kwargs):
@@ -1146,21 +1144,16 @@ def __init__(self,
11461144
self.min_average_window = min_average_window
11471145
self.max_average_window = max_average_window
11481146

1149-
self.params_grads = [] if params_grads is None else params_grads
1150-
params = {}
1151-
for param, grad in self.params_grads:
1152-
if param.do_model_average != False:
1153-
params[param.name] = (param, grad)
1147+
self.params_grads = []
11541148
for param in framework.default_main_program().global_block(
11551149
).all_parameters():
1156-
if param.name not in params and param.do_model_average != False:
1150+
if param.do_model_average != False:
11571151
grad = param.block.create_var(
11581152
name=unique_name.generate(".".join([param.name, 'tmp'])),
11591153
dtype=param.dtype,
11601154
persistable=False,
11611155
stop_gradient=True)
1162-
params[param.name] = (param, grad)
1163-
self.params_grads = params.values()
1156+
self.params_grads.append((param, grad))
11641157

11651158
for param, grad in self.params_grads:
11661159
self._append_average_accumulate_op(param)

python/paddle/fluid/tests/unittests/test_layers.py

Lines changed: 10 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -401,7 +401,7 @@ def test_maxout(self):
401401
self.assertIsNotNone(output)
402402
print(str(program))
403403

404-
def test_maxout(self):
404+
def test_crop(self):
405405
program = Program()
406406
with program_guard(program):
407407
x = layers.data(name='x', shape=[3, 5], dtype="float32")
@@ -410,6 +410,15 @@ def test_maxout(self):
410410
self.assertIsNotNone(output)
411411
print(str(program))
412412

413+
def test_mean_iou(self):
414+
program = Program()
415+
with program_guard(program):
416+
x = layers.data(name='x', shape=[16], dtype='float32')
417+
y = layers.data(name='label', shape=[1], dtype='int64')
418+
iou = layers.mean_iou(x, y, 2)
419+
self.assertIsNotNone(iou)
420+
print(str(program))
421+
413422

414423
if __name__ == '__main__':
415424
unittest.main()

0 commit comments

Comments
 (0)