Skip to content

Commit 672f208

Browse files
authored
Merge branch 'develop' into python_map
2 parents 4161328 + 1ac31d3 commit 672f208

35 files changed

+774
-466
lines changed

CMakeLists.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -60,7 +60,7 @@ option(USE_NNPACK "Compile PaddlePaddle with NNPACK library" OFF)
6060
option(WITH_DISTRIBUTE "Compile with grpc distributed support" OFF)
6161
option(USE_EIGEN_FOR_BLAS "Use matrix multiplication in Eigen" OFF)
6262
option(WITH_ARM_FP16 "Use half precision support on armv8.2-a cpu" OFF)
63-
option(WITH_FAST_BUNDLE_TEST "Bundle tests that can be run in a single process together to reduce launch overhead" ON)
63+
option(WITH_FAST_BUNDLE_TEST "Bundle tests that can be run in a single process together to reduce launch overhead" OFF)
6464

6565
# CMAKE_BUILD_TYPE
6666
if(NOT CMAKE_BUILD_TYPE)

doc/howto/capi/workflow_of_capi_cn.md

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -65,6 +65,7 @@
6565
output_file = "output.paddle.model"
6666
merge_v2_model(net, param_file, output_file)
6767
```
68+
6869
对[手写数字识别](https://github.com/PaddlePaddle/Paddle/tree/develop/paddle/capi/examples/model_inference/dense)这个示例,可直接运行 `python` [merge_v2_model.py](https://github.com/PaddlePaddle/Paddle/tree/develop/paddle/capi/examples/model_inference/dense/merge_v2_model.py)。序列化结果会写入当前运行目录下的`output.paddle.model`文件中。使用这种方式,运行时C-API可以通过指定`output.paddle.model`文件的路径来加载预测模型。
6970

7071
#### 注意事项

paddle/fluid/framework/executor.cc

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -58,13 +58,13 @@ static void CreateTensor(Variable* var, proto::VarType::Type var_type) {
5858
var->GetMutable<ReaderHolder>();
5959
} else if (var_type == proto::VarType::CHANNEL) {
6060
var->GetMutable<ChannelHolder>();
61-
} else if (var_type == proto::VarType::NCCL_COM) {
62-
// GetMutable will be called in ncclInit
61+
} else if (var_type == proto::VarType::RAW) {
62+
// GetMutable will be called in operator
6363
} else {
6464
PADDLE_THROW(
6565
"Variable type %d is not in "
6666
"[LOD_TENSOR, SELECTED_ROWS, FEED_MINIBATCH, FETCH_LIST, "
67-
"LOD_RANK_TABLE, PLACE_LIST, READER, CHANNEL, NCCL_COM]",
67+
"LOD_RANK_TABLE, PLACE_LIST, READER, CHANNEL, RAW]",
6868
var_type);
6969
}
7070
}

paddle/fluid/framework/framework.proto

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -113,7 +113,10 @@ message VarType {
113113
PLACE_LIST = 14;
114114
READER = 15;
115115
CHANNEL = 16;
116-
NCCL_COM = 17;
116+
// Any runtime decided variable type is raw
117+
// raw variables should manage their own allocations
118+
// in operators like nccl_op
119+
RAW = 17;
117120
}
118121

119122
required Type type = 1;

paddle/fluid/framework/lod_tensor.cc

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -31,8 +31,14 @@ std::ostream &operator<<(std::ostream &os, const LoD &lod) {
3131
os << "{";
3232
for (auto &v : lod) {
3333
os << "{";
34+
bool is_first = true;
3435
for (auto &i : v) {
35-
os << i << ",";
36+
if (is_first) {
37+
os << i;
38+
is_first = false;
39+
} else {
40+
os << ", " << i;
41+
}
3642
}
3743
os << "}";
3844
}

paddle/fluid/inference/io.cc

Lines changed: 7 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -32,23 +32,11 @@ void ReadBinaryFile(const std::string& filename, std::string& contents) {
3232
inputfs.close();
3333
}
3434

35-
bool IsParameter(const framework::VarDesc* var,
36-
const framework::ProgramDesc& main_program) {
37-
if (var->Persistable()) {
38-
// There are many unreachable variables in the program
39-
for (size_t i = 0; i < main_program.Size(); ++i) {
40-
const framework::BlockDesc& block = main_program.Block(i);
41-
for (auto* op : block.AllOps()) {
42-
if (op->Type() == framework::kFeedOpType) {
43-
continue;
44-
}
45-
for (auto input_argument_name : op->InputArgumentNames()) {
46-
if (input_argument_name == var->Name()) {
47-
return true;
48-
}
49-
}
50-
}
51-
}
35+
bool IsPersistable(const framework::VarDesc* var) {
36+
if (var->Persistable() &&
37+
var->GetType() != framework::proto::VarType::FEED_MINIBATCH &&
38+
var->GetType() != framework::proto::VarType::FETCH_LIST) {
39+
return true;
5240
}
5341
return false;
5442
}
@@ -65,8 +53,8 @@ void LoadPersistables(framework::Executor& executor,
6553
std::vector<std::string> paramlist;
6654

6755
for (auto* var : global_block.AllVars()) {
68-
if (IsParameter(var, main_program)) {
69-
VLOG(3) << "parameter's name: " << var->Name();
56+
if (IsPersistable(var)) {
57+
VLOG(3) << "persistable variable's name: " << var->Name();
7058

7159
framework::VarDesc* new_var = load_block->Var(var->Name());
7260
new_var->SetShape(var->GetShape());
@@ -101,7 +89,6 @@ void LoadPersistables(framework::Executor& executor,
10189

10290
executor.Run(*load_program, &scope, 0, true, true);
10391

104-
VLOG(3) << "Ran loading successfully";
10592
delete load_program;
10693
}
10794

paddle/fluid/inference/tests/book/CMakeLists.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -30,5 +30,5 @@ inference_test(label_semantic_roles)
3030
inference_test(recognize_digits ARGS mlp conv)
3131
inference_test(recommender_system)
3232
#inference_test(rnn_encoder_decoder)
33-
inference_test(understand_sentiment)
33+
inference_test(understand_sentiment ARGS conv)
3434
inference_test(word2vec)

paddle/fluid/inference/tests/book/test_inference_label_semantic_roles.cc

Lines changed: 36 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -32,16 +32,42 @@ TEST(inference, label_semantic_roles) {
3232
paddle::framework::LoDTensor word, predicate, ctx_n2, ctx_n1, ctx_0, ctx_p1,
3333
ctx_p2, mark;
3434
paddle::framework::LoD lod{{0, 4, 10}};
35-
36-
SetupLoDTensor(word, lod, static_cast<int64_t>(0), static_cast<int64_t>(1));
37-
SetupLoDTensor(
38-
predicate, lod, static_cast<int64_t>(0), static_cast<int64_t>(1));
39-
SetupLoDTensor(ctx_n2, lod, static_cast<int64_t>(0), static_cast<int64_t>(1));
40-
SetupLoDTensor(ctx_n1, lod, static_cast<int64_t>(0), static_cast<int64_t>(1));
41-
SetupLoDTensor(ctx_0, lod, static_cast<int64_t>(0), static_cast<int64_t>(1));
42-
SetupLoDTensor(ctx_p1, lod, static_cast<int64_t>(0), static_cast<int64_t>(1));
43-
SetupLoDTensor(ctx_p2, lod, static_cast<int64_t>(0), static_cast<int64_t>(1));
44-
SetupLoDTensor(mark, lod, static_cast<int64_t>(0), static_cast<int64_t>(1));
35+
int64_t word_dict_len = 44068;
36+
int64_t predicate_dict_len = 3162;
37+
int64_t mark_dict_len = 2;
38+
39+
SetupLoDTensor(word,
40+
lod,
41+
static_cast<int64_t>(0),
42+
static_cast<int64_t>(word_dict_len - 1));
43+
SetupLoDTensor(predicate,
44+
lod,
45+
static_cast<int64_t>(0),
46+
static_cast<int64_t>(predicate_dict_len - 1));
47+
SetupLoDTensor(ctx_n2,
48+
lod,
49+
static_cast<int64_t>(0),
50+
static_cast<int64_t>(word_dict_len - 1));
51+
SetupLoDTensor(ctx_n1,
52+
lod,
53+
static_cast<int64_t>(0),
54+
static_cast<int64_t>(word_dict_len - 1));
55+
SetupLoDTensor(ctx_0,
56+
lod,
57+
static_cast<int64_t>(0),
58+
static_cast<int64_t>(word_dict_len - 1));
59+
SetupLoDTensor(ctx_p1,
60+
lod,
61+
static_cast<int64_t>(0),
62+
static_cast<int64_t>(word_dict_len - 1));
63+
SetupLoDTensor(ctx_p2,
64+
lod,
65+
static_cast<int64_t>(0),
66+
static_cast<int64_t>(word_dict_len - 1));
67+
SetupLoDTensor(mark,
68+
lod,
69+
static_cast<int64_t>(0),
70+
static_cast<int64_t>(mark_dict_len - 1));
4571

4672
std::vector<paddle::framework::LoDTensor*> cpu_feeds;
4773
cpu_feeds.push_back(&word);

paddle/fluid/inference/tests/book/test_inference_understand_sentiment.cc

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,12 @@ TEST(inference, understand_sentiment) {
3131

3232
paddle::framework::LoDTensor words;
3333
paddle::framework::LoD lod{{0, 4, 10}};
34-
SetupLoDTensor(words, lod, static_cast<int64_t>(0), static_cast<int64_t>(10));
34+
int64_t word_dict_len = 5147;
35+
36+
SetupLoDTensor(words,
37+
lod,
38+
static_cast<int64_t>(0),
39+
static_cast<int64_t>(word_dict_len - 1));
3540

3641
std::vector<paddle::framework::LoDTensor*> cpu_feeds;
3742
cpu_feeds.push_back(&words);

paddle/fluid/inference/tests/book/test_inference_word2vec.cc

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -31,12 +31,12 @@ TEST(inference, word2vec) {
3131

3232
paddle::framework::LoDTensor first_word, second_word, third_word, fourth_word;
3333
paddle::framework::LoD lod{{0, 1}};
34-
int64_t dict_size = 2072; // Hard-coding the size of dictionary
34+
int64_t dict_size = 2073; // The size of dictionary
3535

36-
SetupLoDTensor(first_word, lod, static_cast<int64_t>(0), dict_size);
37-
SetupLoDTensor(second_word, lod, static_cast<int64_t>(0), dict_size);
38-
SetupLoDTensor(third_word, lod, static_cast<int64_t>(0), dict_size);
39-
SetupLoDTensor(fourth_word, lod, static_cast<int64_t>(0), dict_size);
36+
SetupLoDTensor(first_word, lod, static_cast<int64_t>(0), dict_size - 1);
37+
SetupLoDTensor(second_word, lod, static_cast<int64_t>(0), dict_size - 1);
38+
SetupLoDTensor(third_word, lod, static_cast<int64_t>(0), dict_size - 1);
39+
SetupLoDTensor(fourth_word, lod, static_cast<int64_t>(0), dict_size - 1);
4040

4141
std::vector<paddle::framework::LoDTensor*> cpu_feeds;
4242
cpu_feeds.push_back(&first_word);

0 commit comments

Comments
 (0)