Skip to content

Commit 7f4bdb1

Browse files
committed
Merge branch 'release/1.3' into test/picked
test=release/1.3
2 parents 8f3bf90 + 745f88b commit 7f4bdb1

33 files changed

+117
-187
lines changed

paddle/fluid/API.spec

Lines changed: 12 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -8,13 +8,13 @@ paddle.fluid.Program.parse_from_string ArgSpec(args=['binary_str'], varargs=None
88
paddle.fluid.Program.to_string ArgSpec(args=['self', 'throw_on_error', 'with_details'], varargs=None, keywords=None, defaults=(False,))
99
paddle.fluid.default_startup_program ArgSpec(args=[], varargs=None, keywords=None, defaults=None)
1010
paddle.fluid.default_main_program ArgSpec(args=[], varargs=None, keywords=None, defaults=None)
11-
paddle.fluid.program_guard ArgSpec(args=[], varargs='args', keywords='kwds', defaults=None)
12-
paddle.fluid.name_scope ArgSpec(args=[], varargs='args', keywords='kwds', defaults=None)
11+
paddle.fluid.program_guard ArgSpec(args=['main_program', 'startup_program'], varargs=None, keywords=None, defaults=(None,))
12+
paddle.fluid.name_scope ArgSpec(args=['prefix'], varargs=None, keywords=None, defaults=(None,))
1313
paddle.fluid.Executor.__init__ ArgSpec(args=['self', 'place'], varargs=None, keywords=None, defaults=None)
1414
paddle.fluid.Executor.close ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None)
1515
paddle.fluid.Executor.run ArgSpec(args=['self', 'program', 'feed', 'fetch_list', 'feed_var_name', 'fetch_var_name', 'scope', 'return_numpy', 'use_program_cache'], varargs=None, keywords=None, defaults=(None, None, None, 'feed', 'fetch', None, True, False))
1616
paddle.fluid.global_scope ArgSpec(args=[], varargs=None, keywords=None, defaults=None)
17-
paddle.fluid.scope_guard ArgSpec(args=[], varargs='args', keywords='kwds', defaults=None)
17+
paddle.fluid.scope_guard ArgSpec(args=['scope'], varargs=None, keywords=None, defaults=None)
1818
paddle.fluid.DistributeTranspiler.__init__ ArgSpec(args=['self', 'config'], varargs=None, keywords=None, defaults=(None,))
1919
paddle.fluid.DistributeTranspiler.get_pserver_program ArgSpec(args=['self', 'endpoint'], varargs=None, keywords=None, defaults=None)
2020
paddle.fluid.DistributeTranspiler.get_pserver_programs ArgSpec(args=['self', 'endpoint'], varargs=None, keywords=None, defaults=None)
@@ -66,7 +66,7 @@ paddle.fluid.initializer.XavierInitializer.__init__ ArgSpec(args=['self', 'unifo
6666
paddle.fluid.initializer.BilinearInitializer.__init__ ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None)
6767
paddle.fluid.initializer.MSRAInitializer.__init__ ArgSpec(args=['self', 'uniform', 'fan_in', 'seed'], varargs=None, keywords=None, defaults=(True, None, 0))
6868
paddle.fluid.initializer.force_init_on_cpu ArgSpec(args=[], varargs=None, keywords=None, defaults=None)
69-
paddle.fluid.initializer.init_on_cpu ArgSpec(args=[], varargs='args', keywords='kwds', defaults=None)
69+
paddle.fluid.initializer.init_on_cpu ArgSpec(args=[], varargs=None, keywords=None, defaults=None)
7070
paddle.fluid.initializer.NumpyArrayInitializer.__init__ ArgSpec(args=['self', 'value'], varargs=None, keywords=None, defaults=None)
7171
paddle.fluid.layers.fc ArgSpec(args=['input', 'size', 'num_flatten_dims', 'param_attr', 'bias_attr', 'act', 'is_test', 'name'], varargs=None, keywords=None, defaults=(1, None, None, None, False, None))
7272
paddle.fluid.layers.embedding ArgSpec(args=['input', 'size', 'is_sparse', 'is_distributed', 'padding_idx', 'param_attr', 'dtype'], varargs=None, keywords=None, defaults=(False, False, None, None, 'float32'))
@@ -229,7 +229,7 @@ paddle.fluid.layers.random_data_generator ArgSpec(args=['low', 'high', 'shapes',
229229
paddle.fluid.layers.py_reader ArgSpec(args=['capacity', 'shapes', 'dtypes', 'lod_levels', 'name', 'use_double_buffer'], varargs=None, keywords=None, defaults=(None, None, True))
230230
paddle.fluid.layers.create_py_reader_by_data ArgSpec(args=['capacity', 'feed_list', 'name', 'use_double_buffer'], varargs=None, keywords=None, defaults=(None, True))
231231
paddle.fluid.layers.Preprocessor.__init__ ArgSpec(args=['self', 'reader', 'name'], varargs=None, keywords=None, defaults=(None,))
232-
paddle.fluid.layers.Preprocessor.block ArgSpec(args=[], varargs='args', keywords='kwds', defaults=None)
232+
paddle.fluid.layers.Preprocessor.block ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None)
233233
paddle.fluid.layers.Preprocessor.inputs ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None)
234234
paddle.fluid.layers.Preprocessor.outputs ArgSpec(args=['self'], varargs='outs', keywords=None, defaults=None)
235235
paddle.fluid.layers.load ArgSpec(args=['out', 'file_path', 'load_as_fp16'], varargs=None, keywords=None, defaults=(None,))
@@ -270,7 +270,7 @@ paddle.fluid.layers.IfElse.input ArgSpec(args=['self', 'x'], varargs=None, keywo
270270
paddle.fluid.layers.IfElse.output ArgSpec(args=['self'], varargs='outs', keywords=None, defaults=None)
271271
paddle.fluid.layers.IfElse.true_block ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None)
272272
paddle.fluid.layers.DynamicRNN.__init__ ArgSpec(args=['self', 'name'], varargs=None, keywords=None, defaults=(None,))
273-
paddle.fluid.layers.DynamicRNN.block ArgSpec(args=[], varargs='args', keywords='kwds', defaults=None)
273+
paddle.fluid.layers.DynamicRNN.block ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None)
274274
paddle.fluid.layers.DynamicRNN.memory ArgSpec(args=['self', 'init', 'shape', 'value', 'need_reorder', 'dtype'], varargs=None, keywords=None, defaults=(None, None, 0.0, False, 'float32'))
275275
paddle.fluid.layers.DynamicRNN.output ArgSpec(args=['self'], varargs='outputs', keywords=None, defaults=None)
276276
paddle.fluid.layers.DynamicRNN.static_input ArgSpec(args=['self', 'x'], varargs=None, keywords=None, defaults=None)
@@ -346,12 +346,12 @@ paddle.fluid.contrib.StateCell.set_state ArgSpec(args=['self', 'state_name', 'st
346346
paddle.fluid.contrib.StateCell.state_updater ArgSpec(args=['self', 'updater'], varargs=None, keywords=None, defaults=None)
347347
paddle.fluid.contrib.StateCell.update_states ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None)
348348
paddle.fluid.contrib.TrainingDecoder.__init__ ArgSpec(args=['self', 'state_cell', 'name'], varargs=None, keywords=None, defaults=(None,))
349-
paddle.fluid.contrib.TrainingDecoder.block ArgSpec(args=[], varargs='args', keywords='kwds', defaults=None)
349+
paddle.fluid.contrib.TrainingDecoder.block ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None)
350350
paddle.fluid.contrib.TrainingDecoder.output ArgSpec(args=['self'], varargs='outputs', keywords=None, defaults=None)
351351
paddle.fluid.contrib.TrainingDecoder.static_input ArgSpec(args=['self', 'x'], varargs=None, keywords=None, defaults=None)
352352
paddle.fluid.contrib.TrainingDecoder.step_input ArgSpec(args=['self', 'x'], varargs=None, keywords=None, defaults=None)
353353
paddle.fluid.contrib.BeamSearchDecoder.__init__ ArgSpec(args=['self', 'state_cell', 'init_ids', 'init_scores', 'target_dict_dim', 'word_dim', 'input_var_dict', 'topk_size', 'sparse_emb', 'max_len', 'beam_size', 'end_id', 'name'], varargs=None, keywords=None, defaults=({}, 50, True, 100, 1, 1, None))
354-
paddle.fluid.contrib.BeamSearchDecoder.block ArgSpec(args=[], varargs='args', keywords='kwds', defaults=None)
354+
paddle.fluid.contrib.BeamSearchDecoder.block ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None)
355355
paddle.fluid.contrib.BeamSearchDecoder.decode ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None)
356356
paddle.fluid.contrib.BeamSearchDecoder.early_stop ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None)
357357
paddle.fluid.contrib.BeamSearchDecoder.read_array ArgSpec(args=['self', 'init', 'is_ids', 'is_scores'], varargs=None, keywords=None, defaults=(False, False))
@@ -456,7 +456,7 @@ paddle.fluid.optimizer.AdadeltaOptimizer.apply_gradients ArgSpec(args=['self', '
456456
paddle.fluid.optimizer.AdadeltaOptimizer.backward ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set', 'callbacks'], varargs=None, keywords=None, defaults=(None, None, None, None))
457457
paddle.fluid.optimizer.AdadeltaOptimizer.minimize ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set'], varargs=None, keywords=None, defaults=(None, None, None))
458458
paddle.fluid.optimizer.ModelAverage.__init__ ArgSpec(args=['self', 'average_window_rate', 'min_average_window', 'max_average_window', 'regularization', 'name'], varargs=None, keywords=None, defaults=(10000, 10000, None, None))
459-
paddle.fluid.optimizer.ModelAverage.apply ArgSpec(args=[], varargs='args', keywords='kwds', defaults=None)
459+
paddle.fluid.optimizer.ModelAverage.apply ArgSpec(args=['self', 'executor', 'need_restore'], varargs=None, keywords=None, defaults=(True,))
460460
paddle.fluid.optimizer.ModelAverage.apply_gradients ArgSpec(args=['self', 'params_grads'], varargs=None, keywords=None, defaults=None)
461461
paddle.fluid.optimizer.ModelAverage.backward ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set', 'callbacks'], varargs=None, keywords=None, defaults=(None, None, None, None))
462462
paddle.fluid.optimizer.ModelAverage.minimize ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set'], varargs=None, keywords=None, defaults=(None, None, None))
@@ -491,14 +491,14 @@ paddle.fluid.clip.ErrorClipByValue.__init__ ArgSpec(args=['self', 'max', 'min'],
491491
paddle.fluid.clip.GradientClipByValue.__init__ ArgSpec(args=['self', 'max', 'min'], varargs=None, keywords=None, defaults=(None,))
492492
paddle.fluid.clip.GradientClipByNorm.__init__ ArgSpec(args=['self', 'clip_norm'], varargs=None, keywords=None, defaults=None)
493493
paddle.fluid.clip.GradientClipByGlobalNorm.__init__ ArgSpec(args=['self', 'clip_norm', 'group_name'], varargs=None, keywords=None, defaults=('default_group',))
494-
paddle.fluid.profiler.cuda_profiler ArgSpec(args=[], varargs='args', keywords='kwds', defaults=None)
494+
paddle.fluid.profiler.cuda_profiler ArgSpec(args=['output_file', 'output_mode', 'config'], varargs=None, keywords=None, defaults=(None, None))
495495
paddle.fluid.profiler.reset_profiler ArgSpec(args=[], varargs=None, keywords=None, defaults=None)
496-
paddle.fluid.profiler.profiler ArgSpec(args=[], varargs='args', keywords='kwds', defaults=None)
496+
paddle.fluid.profiler.profiler ArgSpec(args=['state', 'sorted_key', 'profile_path'], varargs=None, keywords=None, defaults=(None, '/tmp/profile'))
497497
paddle.fluid.profiler.start_profiler ArgSpec(args=['state'], varargs=None, keywords=None, defaults=None)
498498
paddle.fluid.profiler.stop_profiler ArgSpec(args=['sorted_key', 'profile_path'], varargs=None, keywords=None, defaults=(None, '/tmp/profile'))
499499
paddle.fluid.unique_name.generate ArgSpec(args=['key'], varargs=None, keywords=None, defaults=None)
500500
paddle.fluid.unique_name.switch ArgSpec(args=['new_generator'], varargs=None, keywords=None, defaults=(None,))
501-
paddle.fluid.unique_name.guard ArgSpec(args=[], varargs='args', keywords='kwds', defaults=None)
501+
paddle.fluid.unique_name.guard ArgSpec(args=['new_generator'], varargs=None, keywords=None, defaults=(None,))
502502
paddle.fluid.recordio_writer.convert_reader_to_recordio_file ArgSpec(args=['filename', 'reader_creator', 'feeder', 'compressor', 'max_num_records', 'feed_order'], varargs=None, keywords=None, defaults=(Compressor.Snappy, 1000, None))
503503
paddle.fluid.recordio_writer.convert_reader_to_recordio_files ArgSpec(args=['filename', 'batch_per_file', 'reader_creator', 'feeder', 'compressor', 'max_num_records', 'feed_order'], varargs=None, keywords=None, defaults=(Compressor.Snappy, 1000, None))
504504
paddle.fluid.Scope Scope() -> paddle.fluid.core._Scope

paddle/fluid/inference/analysis/ir_passes/subgraph_detector.cc

Lines changed: 0 additions & 71 deletions
Original file line numberDiff line numberDiff line change
@@ -460,77 +460,6 @@ inline bool CheckNodeIndegreeEquals(const Node &node, size_t n) {
460460
return node.inputs.size() == n;
461461
}
462462

463-
NodesTSIterator::NodesTSIterator(const std::vector<Node *> &source) {
464-
PADDLE_ENFORCE(!source.empty(),
465-
"Start points of topological sorting should not be empty!");
466-
// CHECK all the inputs' in-degree is 0
467-
for (auto *node : source) {
468-
PADDLE_ENFORCE(CheckNodeIndegreeEquals(*node, 0));
469-
}
470-
471-
std::unordered_set<Node *> visited;
472-
std::unordered_set<Node *> to_visit{source.begin(), source.end()};
473-
474-
std::vector<Node *> inlink_visited;
475-
while (!to_visit.empty()) {
476-
std::vector<Node *> queue(to_visit.begin(), to_visit.end());
477-
for (auto *p : queue) {
478-
if (Agent(p).deleted()) {
479-
visited.insert(p);
480-
to_visit.erase(p);
481-
}
482-
483-
inlink_visited.clear();
484-
485-
std::copy_if(p->inputs.begin(), p->inputs.end(),
486-
std::back_inserter(inlink_visited),
487-
[&](Node *x) -> bool { return visited.count(x) != 0; });
488-
489-
if (inlink_visited.size() == p->inputs.size()) {
490-
sorted_.push_back(p);
491-
for (auto *_ : p->outputs) {
492-
if (!visited.count(_)) {
493-
to_visit.insert(_);
494-
}
495-
}
496-
497-
to_visit.erase(p);
498-
visited.insert(p);
499-
}
500-
}
501-
}
502-
}
503-
504-
NodesTSIterator::NodesTSIterator(const NodesTSIterator &other)
505-
: sorted_(other.sorted_), cursor_(other.cursor_) {}
506-
507-
Node &NodesTSIterator::operator*() {
508-
PADDLE_ENFORCE_LT(cursor_, sorted_.size());
509-
return *sorted_[cursor_];
510-
}
511-
512-
NodesTSIterator &NodesTSIterator::operator++() {
513-
if (++cursor_ >= sorted_.size()) {
514-
sorted_.clear();
515-
cursor_ = 0;
516-
}
517-
return *this;
518-
}
519-
NodesTSIterator &NodesTSIterator::operator=(const NodesTSIterator &other) {
520-
cursor_ = other.cursor_;
521-
sorted_ = other.sorted_;
522-
return *this;
523-
}
524-
525-
bool NodesTSIterator::operator==(const NodesTSIterator &other) {
526-
return sorted_ == other.sorted_ && cursor_ == other.cursor_;
527-
}
528-
529-
Node *NodesTSIterator::operator->() {
530-
PADDLE_ENFORCE_LT(cursor_, sorted_.size());
531-
return sorted_[cursor_];
532-
}
533-
534463
} // namespace analysis
535464
} // namespace inference
536465
} // namespace paddle

paddle/fluid/inference/analysis/ir_passes/subgraph_detector.h

Lines changed: 1 addition & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -30,6 +30,7 @@ namespace inference {
3030
namespace analysis {
3131

3232
using framework::ir::Graph;
33+
using framework::ir::NodesTSIterator;
3334

3435
const char kIsFunctionNode[] = "__is_function_node__";
3536
const char kFunctionNodeSubGraph[] = "__function_node_sub_graph__";
@@ -132,32 +133,6 @@ struct Agent {
132133
framework::ir::Node *x_;
133134
};
134135

135-
// Topological sorting iterator on nodes.
136-
struct NodesTSIterator
137-
: public std::iterator<std::forward_iterator_tag, framework::ir::Node *> {
138-
NodesTSIterator() = default;
139-
explicit NodesTSIterator(const std::vector<framework::ir::Node *> &source);
140-
NodesTSIterator(NodesTSIterator &&other)
141-
: sorted_(std::move(other.sorted_)), cursor_(other.cursor_) {
142-
other.cursor_ = 0;
143-
}
144-
NodesTSIterator(const NodesTSIterator &other);
145-
146-
framework::ir::Node &operator*();
147-
NodesTSIterator &operator++();
148-
// TODO(Superjomn) current implementation just compare the first
149-
// element, need to compare the graph and all the elements in the queue and
150-
// set.
151-
NodesTSIterator &operator=(const NodesTSIterator &other);
152-
bool operator==(const NodesTSIterator &other);
153-
bool operator!=(const NodesTSIterator &other) { return !(*this == other); }
154-
framework::ir::Node *operator->();
155-
156-
private:
157-
std::vector<framework::ir::Node *> sorted_;
158-
size_t cursor_{0};
159-
};
160-
161136
// The nodes those have no input will be treated as start points.
162137
static std::vector<framework::ir::Node *> ExtractStartPoints(const Graph &g) {
163138
std::vector<framework::ir::Node *> result;

paddle/fluid/operators/fake_quantize_op.cc

Lines changed: 6 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -21,26 +21,17 @@ limitations under the License. */
2121
namespace paddle {
2222
namespace operators {
2323

24-
template <typename T, int MajorType = Eigen::RowMajor,
25-
typename IndexType = Eigen::DenseIndex>
26-
using EigenVectorArrayMap =
27-
Eigen::TensorMap<Eigen::Tensor<T, 1, MajorType, IndexType>>;
28-
29-
template <typename T, int MajorType = Eigen::RowMajor,
30-
typename IndexType = Eigen::DenseIndex>
31-
using ConstEigenVectorArrayMap =
32-
Eigen::TensorMap<const Eigen::Tensor<T, 1, MajorType, IndexType>>;
24+
template <typename T>
25+
struct Compare {
26+
public:
27+
bool operator()(const T a, const T b) { return (std::abs(a) < std::abs(b)); }
28+
};
3329

3430
template <typename T>
3531
struct FindAbsMaxFunctor<platform::CPUDeviceContext, T> {
3632
void operator()(const platform::CPUDeviceContext& ctx, const T* in,
3733
const int num, T* out) {
38-
Eigen::DSizes<Eigen::DenseIndex, 1> idim(num);
39-
Eigen::DSizes<Eigen::DenseIndex, 1> odim(1);
40-
Eigen::TensorMap<Eigen::Tensor<const T, 1, Eigen::RowMajor>> in_e(in, idim);
41-
Eigen::TensorMap<Eigen::Tensor<T, 1, Eigen::RowMajor>> out_e(out, odim);
42-
43-
out_e = in_e.abs().maximum();
34+
*out = *(std::max_element(in + 0, in + num, Compare<T>()));
4435
}
4536
};
4637

paddle/fluid/operators/jit/gen/act.h

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -63,7 +63,6 @@ class VActFunc : public JitCode {
6363
public:
6464
explicit VActFunc(size_t code_size, void* code_ptr)
6565
: JitCode(code_size, code_ptr) {}
66-
virtual const char* name() const = 0;
6766
virtual void genCode() = 0;
6867

6968
protected:
@@ -269,7 +268,7 @@ class VActJitCode : public VActFunc {
269268
this->genCode();
270269
}
271270

272-
const char* name() const override {
271+
std::string name() const override {
273272
std::string base = "VActJitCode";
274273
switch (type_) {
275274
case operand_type::RELU:
@@ -293,7 +292,7 @@ class VActJitCode : public VActFunc {
293292
default:
294293
break;
295294
}
296-
return base.c_str();
295+
return base;
297296
}
298297
void genCode() override;
299298

paddle/fluid/operators/jit/gen/blas.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -41,7 +41,7 @@ class VXXJitCode : public JitCode {
4141
this->genCode();
4242
}
4343

44-
virtual const char* name() const {
44+
std::string name() const override {
4545
std::string base = "VXXJitCode";
4646
if (scalar_index_ == 1) {
4747
base += "_Scalar";
@@ -62,7 +62,7 @@ class VXXJitCode : public JitCode {
6262
}
6363
base += (with_relu_ ? "_Relu" : "");
6464
base += "_D" + std::to_string(num_);
65-
return base.c_str();
65+
return base;
6666
}
6767
void genCode() override;
6868

paddle/fluid/operators/jit/gen/gru.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -49,7 +49,7 @@ class GRUJitCode : public VActFunc {
4949
this->genCode();
5050
}
5151

52-
const char* name() const override {
52+
std::string name() const override {
5353
std::string base = "GRUJitCode";
5454
if (id_ == 0) {
5555
base += "_H1";
@@ -81,7 +81,7 @@ class GRUJitCode : public VActFunc {
8181
};
8282
AddTypeStr(act_gate_);
8383
AddTypeStr(act_cand_);
84-
return base.c_str();
84+
return base;
8585
}
8686
void genCode() override;
8787

paddle/fluid/operators/jit/gen/hopv.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -35,14 +35,14 @@ class HOPVJitCode : public JitCode {
3535
this->genCode();
3636
}
3737

38-
virtual const char* name() const {
38+
std::string name() const override {
3939
std::string base = "VXXJitCode";
4040
if (type_ == operand_type::MAX) {
4141
base += "_MAX";
4242
} else {
4343
base += "_SUM";
4444
}
45-
return base.c_str();
45+
return base;
4646
}
4747
void genCode() override;
4848

paddle/fluid/operators/jit/gen/jitcode.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,7 @@
1414

1515
#pragma once
1616

17+
#include <string>
1718
#include <type_traits>
1819
#include "paddle/fluid/operators/jit/gen_base.h"
1920
#include "paddle/fluid/platform/cpu_info.h"
@@ -59,7 +60,7 @@ typedef enum {
5960
} operand_type;
6061

6162
#define DECLARE_JIT_CODE(codename) \
62-
const char* name() const override { return #codename; }
63+
std::string name() const override { return #codename; }
6364

6465
class JitCode : public GenBase, public Xbyak::CodeGenerator {
6566
public:
@@ -68,7 +69,6 @@ class JitCode : public GenBase, public Xbyak::CodeGenerator {
6869
(code_size % 4096 != 0 ? (code_size / 4096 + 1) * 4096 : code_size),
6970
code_ptr) {}
7071

71-
virtual const char* name() const = 0;
7272
virtual void genCode() = 0;
7373

7474
size_t getSize() const override { return CodeGenerator::getSize(); }

paddle/fluid/operators/jit/gen/lstm.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -53,7 +53,7 @@ class LSTMJitCode : public VActFunc {
5353
this->genCode();
5454
}
5555

56-
const char* name() const override {
56+
std::string name() const override {
5757
std::string base = "LSTMJitCode";
5858
if (use_peephole_) {
5959
base += "_Peephole";
@@ -85,7 +85,7 @@ class LSTMJitCode : public VActFunc {
8585
AddTypeStr(act_gate_);
8686
AddTypeStr(act_cand_);
8787
AddTypeStr(act_cell_);
88-
return base.c_str();
88+
return base;
8989
}
9090
void genCode() override;
9191

0 commit comments

Comments
 (0)