Skip to content

Commit b9fc80a

Browse files
authored
Merge pull request #16287 from PaddlePaddle/revert-16002-runtime_context
Revert "cache runtime_context"
2 parents 18911b6 + 7d2740d commit b9fc80a

File tree

10 files changed

+8
-145
lines changed

10 files changed

+8
-145
lines changed

paddle/fluid/framework/ir/CMakeLists.txt

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -70,7 +70,6 @@ pass_library(conv_affine_channel_fuse_pass inference)
7070
pass_library(transpose_flatten_concat_fuse_pass inference)
7171
pass_library(identity_scale_op_clean_pass base)
7272
pass_library(sync_batch_norm_pass base)
73-
pass_library(runtime_context_cache_pass base)
7473

7574
# There may be many transpose-flatten structures in a model, and the output of
7675
# these structures will be used as inputs to the concat Op. This pattern will

paddle/fluid/framework/ir/runtime_context_cache_pass.cc

Lines changed: 0 additions & 39 deletions
This file was deleted.

paddle/fluid/framework/ir/runtime_context_cache_pass.h

Lines changed: 0 additions & 32 deletions
This file was deleted.

paddle/fluid/framework/operator.cc

Lines changed: 7 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -874,23 +874,9 @@ std::vector<KernelConfig>* OperatorWithKernel::GetKernelConfig(
874874
return kernel_configs;
875875
}
876876

877-
RuntimeContext* OperatorWithKernel::GetRuntimeContext(
878-
const Scope& scope) const {
879-
if (!HasAttr(kEnableCacheRuntimeContext)) {
880-
return new RuntimeContext(Inputs(), Outputs(), scope);
881-
} else {
882-
const Scope* cur_scope = &scope;
883-
if (!runtime_ctx_ || pre_scope_ != cur_scope) {
884-
runtime_ctx_.reset(new RuntimeContext(Inputs(), Outputs(), scope));
885-
pre_scope_ = cur_scope;
886-
}
887-
return runtime_ctx_.get();
888-
}
889-
}
890-
891877
void OperatorWithKernel::RunImpl(const Scope& scope,
892878
const platform::Place& place) const {
893-
auto runtime_ctx = GetRuntimeContext(scope);
879+
RuntimeContext ctx(Inputs(), Outputs(), scope);
894880
platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance();
895881
auto* dev_ctx = pool.Get(place);
896882

@@ -905,7 +891,7 @@ void OperatorWithKernel::RunImpl(const Scope& scope,
905891
OpKernelMap& kernels = kernels_iter->second;
906892

907893
auto expected_kernel_key = this->GetExpectedKernelType(
908-
ExecutionContext(*this, scope, *dev_ctx, *runtime_ctx, nullptr));
894+
ExecutionContext(*this, scope, *dev_ctx, ctx, nullptr));
909895
VLOG(3) << "expected_kernel_key:" << expected_kernel_key;
910896

911897
auto kernel_iter = kernels.find(expected_kernel_key);
@@ -929,8 +915,8 @@ void OperatorWithKernel::RunImpl(const Scope& scope,
929915

930916
// do data transformScope &transfer_scope;
931917
std::vector<std::string> transfered_inplace_vars;
932-
auto* transfer_scope = PrepareData(scope, expected_kernel_key,
933-
&transfered_inplace_vars, runtime_ctx);
918+
auto* transfer_scope =
919+
PrepareData(scope, expected_kernel_key, &transfered_inplace_vars, &ctx);
934920

935921
// exec scope is the scope that kernel actually executed on.
936922
const Scope& exec_scope =
@@ -941,13 +927,13 @@ void OperatorWithKernel::RunImpl(const Scope& scope,
941927
}
942928

943929
if (!HasAttr(kAllKernelsMustComputeRuntimeShape)) {
944-
RuntimeInferShapeContext infer_shape_ctx(*this, exec_scope, *runtime_ctx);
930+
RuntimeInferShapeContext infer_shape_ctx(*this, exec_scope, ctx);
945931
this->InferShape(&infer_shape_ctx);
946932
}
947933
// TODO(panyx0718): ExecutionContext should only depend on RuntimeContext
948934
// not Scope. Imperative mode only pass inputs and get outputs.
949-
kernel_iter->second(ExecutionContext(*this, exec_scope, *dev_ctx,
950-
*runtime_ctx, kernel_configs));
935+
kernel_iter->second(
936+
ExecutionContext(*this, exec_scope, *dev_ctx, ctx, kernel_configs));
951937

952938
if (!transfered_inplace_vars.empty()) {
953939
// there is inplace variable has been transfered.

paddle/fluid/framework/operator.h

Lines changed: 0 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -62,14 +62,6 @@ constexpr char kZeroVarSuffix[] = "@ZERO";
6262
/// Variables with this suffix are the new Gradient.
6363
constexpr char kNewGradSuffix[] = "@NEWGRAD@";
6464

65-
/// RuntimeContext is used to relate input/output names of Operator with
66-
/// the corresponding variables in name scope.
67-
/// If an Op has attribute kEnableCacheRuntimeContext, it means that in a same
68-
/// name scope, since the input/output names of this Op do not change in the
69-
/// execution, RuntimeContext could be created only at the first iteration of
70-
/// this Op's execution to save the elapsed time.
71-
constexpr char kEnableCacheRuntimeContext[] = "@ENABLE_CACHE_RUNTIME_CONTEXT@";
72-
7365
/// If an Op has this attribute, all its kernels should calculate output
7466
/// variable's shape in the corresponding Compute() function. And
7567
/// OperatorWithKernel::RunImpl() would skip call this Op's InferShape()
@@ -464,7 +456,6 @@ class OperatorWithKernel : public OperatorBase {
464456
// same.
465457
proto::VarType::Type IndicateDataType(const ExecutionContext& ctx) const;
466458
void RunImpl(const Scope& scope, const platform::Place& place) const final;
467-
RuntimeContext* GetRuntimeContext(const Scope& scope) const;
468459

469460
/**
470461
* Transfer data from scope to a transfered scope. If there is no data need to
@@ -483,8 +474,6 @@ class OperatorWithKernel : public OperatorBase {
483474

484475
protected:
485476
mutable OpKernelConfigsMap kernel_configs_map_;
486-
mutable std::unique_ptr<RuntimeContext> runtime_ctx_;
487-
mutable const Scope* pre_scope_ = nullptr;
488477
};
489478

490479
extern bool OpSupportGPU(const std::string& op_type);

paddle/fluid/inference/api/analysis_config.cc

Lines changed: 0 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -118,9 +118,6 @@ AnalysisConfig::AnalysisConfig(const AnalysisConfig &other) {
118118

119119
CP_MEMBER(serialized_info_cache_);
120120

121-
// framework related.
122-
CP_MEMBER(enable_runtime_context_cache_);
123-
124121
if (use_gpu_) {
125122
pass_builder_.reset(new GpuPassStrategy(
126123
*static_cast<GpuPassStrategy *>(other.pass_builder())));
@@ -235,10 +232,6 @@ void AnalysisConfig::Update() {
235232
if (ir_debug_) {
236233
pass_builder()->TurnOnDebug();
237234
}
238-
239-
if (enable_runtime_context_cache_) {
240-
pass_builder()->AppendPass("runtime_context_cache_pass");
241-
}
242235
}
243236

244237
std::string AnalysisConfig::SerializeInfoCache() {
@@ -272,7 +265,6 @@ std::string AnalysisConfig::SerializeInfoCache() {
272265

273266
ss << specify_input_name_;
274267
ss << cpu_math_library_num_threads_;
275-
ss << enable_runtime_context_cache_;
276268

277269
return ss.str();
278270
}

paddle/fluid/inference/api/paddle_analysis_config.h

Lines changed: 0 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -194,23 +194,6 @@ struct AnalysisConfig {
194194
/** Tell whether the memory optimization is activated. */
195195
bool enable_memory_optim() const;
196196

197-
// framework related
198-
/** \brief Control whether to perform runtime context cache optimization.
199-
*
200-
* If turned off, in Op's every execution, RuntimeContext would be called to
201-
* relate input/output names of this Op with the corresponding variables in
202-
* Scope.
203-
*/
204-
void SwitchRuntimeContextCache(int x = true) {
205-
enable_runtime_context_cache_ = x;
206-
}
207-
/** A boolean state tell whether the runtime context cache optimization is
208-
* actived.
209-
*/
210-
bool runtime_context_cache_enabled() const {
211-
return enable_runtime_context_cache_;
212-
}
213-
214197
friend class ::paddle::AnalysisPredictor;
215198

216199
/** NOTE just for developer, not an official API, easily to be broken.
@@ -271,15 +254,6 @@ struct AnalysisConfig {
271254

272255
int cpu_math_library_num_threads_{1};
273256

274-
// framework related
275-
// RuntimeContext is used to relate input/output names of Operator with
276-
// the corresponding variables in Scope.
277-
// If enable_runtime_context_cache_ is true, it means that in a same Scope,
278-
// since the input/output names of this Op do not change in the execution,
279-
// RuntimeContext could be created only at the first iteration of this Op's
280-
// execution to save the elapsed time.
281-
bool enable_runtime_context_cache_{false};
282-
283257
// A runtime cache, shouldn't be transferred to others.
284258
std::string serialized_info_cache_;
285259

paddle/fluid/inference/tests/api/analyzer_pyramid_dnn_tester.cc

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -107,7 +107,6 @@ void SetConfig(AnalysisConfig *cfg) {
107107
cfg->DisableGpu();
108108
cfg->SwitchSpecifyInputNames();
109109
cfg->SwitchIrOptim();
110-
cfg->SwitchRuntimeContextCache();
111110
if (FLAGS_zero_copy) {
112111
cfg->SwitchUseFeedFetchOps(false);
113112
}

paddle/fluid/inference/tests/api/config_printer.h

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -72,8 +72,7 @@ std::ostream &operator<<(std::ostream &os, const AnalysisConfig &config) {
7272
}
7373
os << GenSpaces(num_spaces) << "enable_ir_optim: " << config.ir_optim()
7474
<< "\n";
75-
os << GenSpaces(num_spaces)
76-
<< "use_runtime_context_cache: " << config.runtime_context_cache_enabled()
75+
os << GenSpaces(num_spaces) << "enable_ir_optim: " << config.ir_optim()
7776
<< "\n";
7877
os << GenSpaces(num_spaces)
7978
<< "use_feed_fetch_ops: " << config.use_feed_fetch_ops_enabled() << "\n";

paddle/fluid/pybind/inference_api.cc

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -242,10 +242,6 @@ void BindAnalysisConfig(py::module *m) {
242242
.def("set_mkldnn_op", &AnalysisConfig::SetMKLDNNOp)
243243
.def("set_model_buffer", &AnalysisConfig::SetModelBuffer)
244244
.def("model_from_memory", &AnalysisConfig::model_from_memory)
245-
.def("runtime_context_cache_enabled",
246-
&AnalysisConfig::runtime_context_cache_enabled)
247-
.def("switch_runtime_context_cache",
248-
&AnalysisConfig::SwitchRuntimeContextCache, py::arg("x") = true)
249245
.def("pass_builder", &AnalysisConfig::pass_builder,
250246
py::return_value_policy::reference);
251247
}

0 commit comments

Comments
 (0)