Skip to content

Commit aca05d5

Browse files
committed
Merge branch 'release/1.0.0' of https://github.com/PaddlePaddle/Paddle into release/1.0.0
2 parents e8fbf82 + 587f3dd commit aca05d5

35 files changed

+1571
-426
lines changed

paddle/fluid/API.spec

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -61,12 +61,12 @@ paddle.fluid.layers.cos_sim ArgSpec(args=['X', 'Y'], varargs=None, keywords=None
6161
paddle.fluid.layers.cross_entropy ArgSpec(args=['input', 'label', 'soft_label', 'ignore_index'], varargs=None, keywords=None, defaults=(False, -100))
6262
paddle.fluid.layers.square_error_cost ArgSpec(args=['input', 'label'], varargs=None, keywords=None, defaults=None)
6363
paddle.fluid.layers.chunk_eval ArgSpec(args=['input', 'label', 'chunk_scheme', 'num_chunk_types', 'excluded_chunk_types'], varargs=None, keywords=None, defaults=(None,))
64-
paddle.fluid.layers.sequence_conv ArgSpec(args=['input', 'num_filters', 'filter_size', 'filter_stride', 'padding', 'bias_attr', 'param_attr', 'act'], varargs=None, keywords=None, defaults=(3, 1, None, None, None, None))
64+
paddle.fluid.layers.sequence_conv ArgSpec(args=['input', 'num_filters', 'filter_size', 'filter_stride', 'padding', 'bias_attr', 'param_attr', 'act', 'name'], varargs=None, keywords=None, defaults=(3, 1, None, None, None, None, None))
6565
paddle.fluid.layers.conv2d ArgSpec(args=['input', 'num_filters', 'filter_size', 'stride', 'padding', 'dilation', 'groups', 'param_attr', 'bias_attr', 'use_cudnn', 'act', 'name'], varargs=None, keywords=None, defaults=(1, 0, 1, None, None, None, True, None, None))
6666
paddle.fluid.layers.conv3d ArgSpec(args=['input', 'num_filters', 'filter_size', 'stride', 'padding', 'dilation', 'groups', 'param_attr', 'bias_attr', 'use_cudnn', 'act', 'name'], varargs=None, keywords=None, defaults=(1, 0, 1, None, None, None, True, None, None))
6767
paddle.fluid.layers.sequence_pool ArgSpec(args=['input', 'pool_type'], varargs=None, keywords=None, defaults=None)
68-
paddle.fluid.layers.sequence_softmax ArgSpec(args=['input', 'param_attr', 'bias_attr', 'use_cudnn'], varargs=None, keywords=None, defaults=(None, None, False))
69-
paddle.fluid.layers.softmax ArgSpec(args=['input', 'param_attr', 'bias_attr', 'use_cudnn', 'name'], varargs=None, keywords=None, defaults=(None, None, True, None))
68+
paddle.fluid.layers.sequence_softmax ArgSpec(args=['input', 'use_cudnn', 'name'], varargs=None, keywords=None, defaults=(False, None))
69+
paddle.fluid.layers.softmax ArgSpec(args=['input', 'use_cudnn', 'name'], varargs=None, keywords=None, defaults=(True, None))
7070
paddle.fluid.layers.pool2d ArgSpec(args=['input', 'pool_size', 'pool_type', 'pool_stride', 'pool_padding', 'global_pooling', 'use_cudnn', 'ceil_mode', 'name'], varargs=None, keywords=None, defaults=(-1, 'max', 1, 0, False, True, False, None))
7171
paddle.fluid.layers.pool3d ArgSpec(args=['input', 'pool_size', 'pool_type', 'pool_stride', 'pool_padding', 'global_pooling', 'use_cudnn', 'ceil_mode', 'name'], varargs=None, keywords=None, defaults=(-1, 'max', 1, 0, False, True, False, None))
7272
paddle.fluid.layers.batch_norm ArgSpec(args=['input', 'act', 'is_test', 'momentum', 'epsilon', 'param_attr', 'bias_attr', 'data_layout', 'in_place', 'name', 'moving_mean_name', 'moving_variance_name', 'do_model_average_for_mean_and_var', 'fuse_with_relu'], varargs=None, keywords=None, defaults=(None, False, 0.9, 1e-05, None, None, 'NCHW', False, None, None, None, False, False))
@@ -95,8 +95,8 @@ paddle.fluid.layers.warpctc ArgSpec(args=['input', 'label', 'blank', 'norm_by_ti
9595
paddle.fluid.layers.sequence_reshape ArgSpec(args=['input', 'new_dim'], varargs=None, keywords=None, defaults=None)
9696
paddle.fluid.layers.transpose ArgSpec(args=['x', 'perm', 'name'], varargs=None, keywords=None, defaults=(None,))
9797
paddle.fluid.layers.im2sequence ArgSpec(args=['input', 'filter_size', 'stride', 'padding', 'input_image_size', 'out_stride', 'name'], varargs=None, keywords=None, defaults=(1, 1, 0, None, 1, None))
98-
paddle.fluid.layers.nce ArgSpec(args=['input', 'label', 'num_total_classes', 'sample_weight', 'param_attr', 'bias_attr', 'num_neg_samples'], varargs=None, keywords=None, defaults=(None, None, None, None))
99-
paddle.fluid.layers.hsigmoid ArgSpec(args=['input', 'label', 'num_classes', 'param_attr', 'bias_attr'], varargs=None, keywords=None, defaults=(None, None))
98+
paddle.fluid.layers.nce ArgSpec(args=['input', 'label', 'num_total_classes', 'sample_weight', 'param_attr', 'bias_attr', 'num_neg_samples', 'name'], varargs=None, keywords=None, defaults=(None, None, None, None, None))
99+
paddle.fluid.layers.hsigmoid ArgSpec(args=['input', 'label', 'num_classes', 'param_attr', 'bias_attr', 'name'], varargs=None, keywords=None, defaults=(None, None, None))
100100
paddle.fluid.layers.beam_search ArgSpec(args=['pre_ids', 'pre_scores', 'ids', 'scores', 'beam_size', 'end_id', 'level', 'name'], varargs=None, keywords=None, defaults=(0, None))
101101
paddle.fluid.layers.row_conv ArgSpec(args=['input', 'future_context_size', 'param_attr', 'act'], varargs=None, keywords=None, defaults=(None, None))
102102
paddle.fluid.layers.multiplex ArgSpec(args=['inputs', 'index'], varargs=None, keywords=None, defaults=None)

paddle/fluid/inference/CMakeLists.txt

Lines changed: 12 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -19,18 +19,26 @@ cc_library(paddle_fluid_origin DEPS ${fluid_modules} paddle_fluid_api)
1919

2020
add_subdirectory(api)
2121

22+
set(STATIC_INFERENCE_APIS paddle_fluid_api paddle_inference_api analysis_predictor)
23+
set(SHARED_INFERENCE_SRCS
24+
io.cc ${CMAKE_CURRENT_SOURCE_DIR}/api/api.cc ${CMAKE_CURRENT_SOURCE_DIR}/api/api_impl.cc
25+
${CMAKE_CURRENT_SOURCE_DIR}/api/analysis_predictor.cc )
26+
if (WITH_GPU AND TENSORRT_FOUND)
27+
set(STATIC_INFERENCE_APIS ${STATIC_INFERENCE_APIS} paddle_inference_tensorrt_subgraph_engine)
28+
set(SHARED_INFERENCE_SRCS ${SHARED_INFERENCE_SRCS} ${CMAKE_CURRENT_SOURCE_DIR}/api/api_tensorrt_subgraph_engine.cc)
29+
endif()
30+
2231
# Create static library
23-
cc_library(paddle_fluid DEPS ${fluid_modules} paddle_fluid_api paddle_inference_api analysis_predictor)
32+
cc_library(paddle_fluid DEPS ${fluid_modules} ${STATIC_INFERENCE_APIS} )
33+
2434
if(NOT APPLE)
2535
# TODO(liuyiqu: Temporarily disable the link flag because it is not support on Mac.
2636
set(LINK_FLAGS "-Wl,--retain-symbols-file ${CMAKE_CURRENT_SOURCE_DIR}/paddle_fluid.sym")
2737
set_target_properties(paddle_fluid PROPERTIES LINK_FLAGS "${LINK_FLAGS}")
2838
endif()
2939

3040
# Create shared library
31-
cc_library(paddle_fluid_shared SHARED
32-
SRCS io.cc ${CMAKE_CURRENT_SOURCE_DIR}/api/api.cc ${CMAKE_CURRENT_SOURCE_DIR}/api/api_impl.cc
33-
${CMAKE_CURRENT_SOURCE_DIR}/api/analysis_predictor.cc
41+
cc_library(paddle_fluid_shared SHARED SRCS ${SHARED_INFERENCE_SRCS}
3442
DEPS ${fluid_modules} paddle_fluid_api)
3543

3644
set_target_properties(paddle_fluid_shared PROPERTIES OUTPUT_NAME paddle_fluid)

paddle/fluid/inference/api/demo_ci/CMakeLists.txt

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@ project(cpp_inference_demo CXX C)
33
option(WITH_MKL "Compile demo with MKL/OpenBlas support, default use MKL." ON)
44
option(WITH_GPU "Compile demo with GPU/CPU, default use CPU." OFF)
55
option(WITH_STATIC_LIB "Compile demo with static/shared library, default use static." ON)
6+
option(USE_TENSORRT "Compile demo with TensorRT." OFF)
67

78
macro(safe_set_static_flag)
89
foreach(flag_var
@@ -60,6 +61,13 @@ endif(NOT WIN32)
6061
include_directories("${PADDLE_LIB}/third_party/boost")
6162
include_directories("${PADDLE_LIB}/third_party/eigen3")
6263

64+
if (NOT WIN32)
65+
if (USE_TENSORRT AND WITH_GPU)
66+
include_directories("${TENSORRT_INCLUDE_DIR}")
67+
link_directories("${TENSORRT_LIB_DIR}")
68+
endif()
69+
endif(NOT WIN32)
70+
6371
if (NOT WIN32)
6472
link_directories("${PADDLE_LIB}/third_party/install/snappy/lib")
6573
link_directories("${PADDLE_LIB}/third_party/install/snappystream/lib")
@@ -112,6 +120,10 @@ endif(NOT WIN32)
112120

113121
if(WITH_GPU)
114122
if(NOT WIN32)
123+
if (USE_TENSORRT)
124+
set(DEPS ${DEPS} ${TENSORRT_LIB_DIR}/libnvinfer${CMAKE_STATIC_LIBRARY_SUFFIX})
125+
set(DEPS ${DEPS} ${TENSORRT_LIB_DIR}/libnvinfer_plugin${CMAKE_STATIC_LIBRARY_SUFFIX})
126+
endif()
115127
set(DEPS ${DEPS} ${CUDA_LIB}/libcudart${CMAKE_SHARED_LIBRARY_SUFFIX})
116128
else()
117129
set(DEPS ${DEPS} ${CUDA_LIB}/cudart${CMAKE_STATIC_LIBRARY_SUFFIX} )

paddle/fluid/inference/api/demo_ci/run.sh

Lines changed: 37 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,12 @@ set -x
22
PADDLE_ROOT=$1
33
TURN_ON_MKL=$2 # use MKL or Openblas
44
TEST_GPU_CPU=$3 # test both GPU/CPU mode or only CPU mode
5+
DATA_DIR=$4 # dataset
6+
TENSORRT_INCLUDE_DIR=$5 # TensorRT header file dir, defalut to /usr/local/TensorRT/include
7+
TENSORRT_LIB_DIR=$6 # TensorRT lib file dir, default to /usr/local/TensorRT/lib
8+
9+
cd `dirname $0`
10+
current_dir=`pwd`
511
if [ $2 == ON ]; then
612
# You can export yourself if move the install path
713
MKL_LIB=${PADDLE_ROOT}/build/fluid_install_dir/third_party/install/mklml/lib
@@ -13,6 +19,11 @@ else
1319
use_gpu_list='false'
1420
fi
1521

22+
USE_TENSORRT=OFF
23+
if [ [-d"$TENSORRT_INCLUDE_DIR"] -a [-d"$TENSORRT_LIB_DIR"] ]; then
24+
USE_TENSORRT=ON
25+
fi
26+
1627
PREFIX=inference-vis-demos%2F
1728
URL_ROOT=http://paddlemodels.cdn.bcebos.com/${PREFIX}
1829

@@ -29,15 +40,15 @@ function download() {
2940
fi
3041
cd ..
3142
}
32-
mkdir -p data
33-
cd data
43+
mkdir -p $DATA_DIR
44+
cd $DATA_DIR
3445
vis_demo_list='se_resnext50 ocr mobilenet'
3546
for vis_demo_name in $vis_demo_list; do
3647
download $vis_demo_name
3748
done
38-
cd ..
3949

4050
# compile and test the demo
51+
cd $current_dir
4152
mkdir -p build
4253
cd build
4354

@@ -73,15 +84,35 @@ for WITH_STATIC_LIB in ON OFF; do
7384
for use_gpu in $use_gpu_list; do
7485
for vis_demo_name in $vis_demo_list; do
7586
./vis_demo \
76-
--modeldir=../data/$vis_demo_name/model \
77-
--data=../data/$vis_demo_name/data.txt \
78-
--refer=../data/$vis_demo_name/result.txt \
87+
--modeldir=$DATA_DIR/$vis_demo_name/model \
88+
--data=$DATA_DIR/$vis_demo_name/data.txt \
89+
--refer=$DATA_DIR/$vis_demo_name/result.txt \
7990
--use_gpu=$use_gpu
8091
if [ $? -ne 0 ]; then
8192
echo "vis demo $vis_demo_name runs fail."
8293
exit 1
8394
fi
8495
done
8596
done
97+
98+
# --------tensorrt mobilenet------
99+
if [ $USE_TENSORRT == ON -a $TEST_GPU_CPU == ON ]; then
100+
rm -rf *
101+
cmake .. -DPADDLE_LIB=${PADDLE_ROOT}/build/fluid_install_dir/ \
102+
-DWITH_MKL=$TURN_ON_MKL \
103+
-DDEMO_NAME=vis_demo \
104+
-DWITH_GPU=$TEST_GPU_CPU \
105+
-DWITH_STATIC_LIB=$WITH_STATIC_LIB \
106+
-DUSE_TENSORRT=$USE_TENSORRT \
107+
-DTENSORRT_INCLUDE_DIR=$TENSORRT_INCLUDE_DIR \
108+
-DTENSORRT_LIB_DIR=$TENSORRT_LIB_DIR
109+
make -j
110+
./vis_demo \
111+
--modeldir=$DATA_DIR/mobilenet/model \
112+
--data=$DATA_DIR/mobilenet/data.txt \
113+
--refer=$DATA_DIR/mobilenet/result.txt \
114+
--use_gpu=true \
115+
--use_trt=true
116+
fi
86117
done
87118
set +x

paddle/fluid/inference/api/demo_ci/vis_demo.cc

Lines changed: 31 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -33,6 +33,7 @@ DEFINE_string(
3333
"path of data; each line is a record, format is "
3434
"'<space splitted floats as data>\t<space splitted ints as shape'");
3535
DEFINE_bool(use_gpu, false, "Whether use gpu.");
36+
DEFINE_bool(use_trt, false, "Whether use trt.");
3637

3738
namespace paddle {
3839
namespace demo {
@@ -100,20 +101,32 @@ void CheckOutput(const std::string& referfile, const PaddleTensor& output) {
100101
/*
101102
* Use the native fluid engine to inference the demo.
102103
*/
103-
void Main(bool use_gpu) {
104-
NativeConfig config;
105-
config.param_file = FLAGS_modeldir + "/__params__";
106-
config.prog_file = FLAGS_modeldir + "/__model__";
107-
config.use_gpu = use_gpu;
108-
config.device = 0;
109-
if (FLAGS_use_gpu) {
104+
void Main(bool use_gpu, bool use_trt) {
105+
std::unique_ptr<PaddlePredictor> predictor;
106+
if (!use_trt) {
107+
NativeConfig config;
108+
config.param_file = FLAGS_modeldir + "/__params__";
109+
config.prog_file = FLAGS_modeldir + "/__model__";
110+
config.use_gpu = use_gpu;
111+
config.device = 0;
112+
if (FLAGS_use_gpu) {
113+
config.fraction_of_gpu_memory = 0.1; // set by yourself
114+
}
115+
116+
VLOG(3) << "init predictor";
117+
predictor =
118+
CreatePaddlePredictor<NativeConfig, PaddleEngineKind::kNative>(config);
119+
} else {
120+
paddle::contrib::MixedRTConfig config;
121+
config.param_file = FLAGS_modeldir + "/__params__";
122+
config.prog_file = FLAGS_modeldir + "/__model__";
123+
config.use_gpu = true;
124+
config.device = 0;
125+
config.max_batch_size = 1;
110126
config.fraction_of_gpu_memory = 0.1; // set by yourself
127+
predictor = CreatePaddlePredictor<paddle::contrib::MixedRTConfig>(config);
111128
}
112129

113-
VLOG(3) << "init predictor";
114-
auto predictor =
115-
CreatePaddlePredictor<NativeConfig, PaddleEngineKind::kNative>(config);
116-
117130
VLOG(3) << "begin to process data";
118131
// Just a single batch of data.
119132
std::string line;
@@ -131,7 +144,7 @@ void Main(bool use_gpu) {
131144

132145
VLOG(3) << "run executor";
133146
std::vector<PaddleTensor> output;
134-
predictor->Run({input}, &output);
147+
predictor->Run({input}, &output, 1);
135148

136149
VLOG(3) << "output.size " << output.size();
137150
auto& tensor = output.front();
@@ -146,9 +159,12 @@ void Main(bool use_gpu) {
146159

147160
int main(int argc, char** argv) {
148161
google::ParseCommandLineFlags(&argc, &argv, true);
149-
paddle::demo::Main(false /* use_gpu*/);
150-
if (FLAGS_use_gpu) {
151-
paddle::demo::Main(true /*use_gpu*/);
162+
if (FLAGS_use_gpu && FLAGS_use_trt) {
163+
paddle::demo::Main(true /*use_gpu*/, true);
164+
} else if (FLAGS_use_gpu) {
165+
paddle::demo::Main(true /*use_gpu*/, false);
166+
} else {
167+
paddle::demo::Main(false /*use_gpu*/, false /*use_tensorrt*/);
152168
}
153169
return 0;
154170
}

paddle/fluid/operators/adadelta_op.cc

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,7 @@ namespace paddle {
1818
namespace operators {
1919

2020
using Tensor = framework::Tensor;
21+
2122
class AdadeltaOp : public framework::OperatorWithKernel {
2223
public:
2324
using framework::OperatorWithKernel::OperatorWithKernel;
@@ -31,6 +32,16 @@ class AdadeltaOp : public framework::OperatorWithKernel {
3132
"Input(AvgSquaredGrad) of AdadeltaOp should not be null.");
3233
PADDLE_ENFORCE(ctx->HasInput("AvgSquaredUpdate"),
3334
"Input(AvgSquaredUpdate) of AdadeltaOp should not be null.");
35+
PADDLE_ENFORCE(
36+
ctx->GetInputsVarType("Param").front() ==
37+
framework::proto::VarType::LOD_TENSOR,
38+
"The input var's type should be LoDTensor, but the received is %s",
39+
ctx->Inputs("Param").front(), ctx->GetInputsVarType("Param").front());
40+
PADDLE_ENFORCE(
41+
ctx->GetInputsVarType("Grad").front() ==
42+
framework::proto::VarType::LOD_TENSOR,
43+
"The input var's type should be LoDTensor, but the received is %s",
44+
ctx->Inputs("Grad").front(), ctx->GetInputsVarType("Grad").front());
3445

3546
PADDLE_ENFORCE(ctx->HasOutput("ParamOut"),
3647
"Output(ParamOut) of AdadeltaOp should not be null.");
@@ -56,6 +67,7 @@ class AdadeltaOp : public framework::OperatorWithKernel {
5667
ctx->SetOutputDim("AvgSquaredGradOut", param_dim);
5768
ctx->SetOutputDim("AvgSquaredUpdateOut", param_dim);
5869
}
70+
5971
framework::OpKernelType GetExpectedKernelType(
6072
const framework::ExecutionContext &ctx) const override {
6173
auto input_data_type =

paddle/fluid/operators/adadelta_op.h

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,17 @@ template <typename DeviceContext, typename T>
2323
class AdadeltaOpKernel : public framework::OpKernel<T> {
2424
public:
2525
void Compute(const framework::ExecutionContext& ctx) const override {
26+
const auto* param_var = ctx.InputVar("Param");
27+
PADDLE_ENFORCE(param_var->IsType<framework::LoDTensor>(),
28+
"The Var(%s)'s type should be LoDTensor, "
29+
"but the received is %s",
30+
ctx.Inputs("Param").front(), param_var->Type().name());
31+
const auto* grad_var = ctx.InputVar("Grad");
32+
PADDLE_ENFORCE(grad_var->IsType<framework::LoDTensor>(),
33+
"The Var(%s)'s type should be LoDTensor, "
34+
"but the received is %s",
35+
ctx.Inputs("Grad").front(), grad_var->Type().name());
36+
2637
auto param_out_tensor = ctx.Output<framework::Tensor>("ParamOut");
2738
auto avg_squared_grad_out_tensor =
2839
ctx.Output<framework::Tensor>("AvgSquaredGradOut");

paddle/fluid/operators/adagrad_op.h

Lines changed: 20 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,7 @@ See the License for the specific language governing permissions and
1313
limitations under the License. */
1414

1515
#pragma once
16+
1617
#include "paddle/fluid/framework/eigen.h"
1718
#include "paddle/fluid/framework/op_registry.h"
1819

@@ -21,42 +22,48 @@ namespace operators {
2122

2223
template <typename DeviceContext, typename T>
2324
struct SparseAdagradFunctor {
24-
void operator()(const DeviceContext& context,
25-
const framework::SelectedRows& grad,
26-
const framework::Tensor& learning_rate, T epsilon,
27-
framework::Tensor* moment, framework::Tensor* param);
25+
void operator()(const DeviceContext &context,
26+
const framework::SelectedRows &grad,
27+
const framework::Tensor &learning_rate, T epsilon,
28+
framework::Tensor *moment, framework::Tensor *param);
2829
};
2930

3031
template <typename DeviceContext, typename T>
3132
class AdagradOpKernel : public framework::OpKernel<T> {
3233
public:
33-
void Compute(const framework::ExecutionContext& ctx) const override {
34-
auto* param_out_tensor = ctx.Output<framework::Tensor>("ParamOut");
35-
auto* moment_out_tensor = ctx.Output<framework::Tensor>("MomentOut");
34+
void Compute(const framework::ExecutionContext &ctx) const override {
35+
const auto *param_var = ctx.InputVar("Param");
36+
PADDLE_ENFORCE(param_var->IsType<framework::LoDTensor>(),
37+
"The Var(%s)'s type should be LoDTensor, "
38+
"but the received is %s",
39+
ctx.Inputs("Param").front(), param_var->Type().name());
40+
41+
auto *param_out_tensor = ctx.Output<framework::Tensor>("ParamOut");
42+
auto *moment_out_tensor = ctx.Output<framework::Tensor>("MomentOut");
3643

3744
param_out_tensor->mutable_data<T>(ctx.GetPlace());
3845
moment_out_tensor->mutable_data<T>(ctx.GetPlace());
3946

4047
T epsilon = static_cast<T>(ctx.Attr<float>("epsilon"));
4148

42-
auto* grad_var = ctx.InputVar("Grad");
49+
auto *grad_var = ctx.InputVar("Grad");
4350
if (grad_var->IsType<framework::LoDTensor>()) {
4451
auto param = framework::EigenVector<T>::Flatten(
4552
*ctx.Input<framework::Tensor>("Param"));
4653
auto grad = framework::EigenVector<T>::Flatten(
4754
*ctx.Input<framework::Tensor>("Grad"));
4855
auto moment = framework::EigenVector<T>::Flatten(
4956
*ctx.Input<framework::Tensor>("Moment"));
50-
auto* learning_rate = ctx.Input<framework::Tensor>("LearningRate");
57+
auto *learning_rate = ctx.Input<framework::Tensor>("LearningRate");
5158

5259
auto param_out = framework::EigenVector<T>::Flatten(*param_out_tensor);
5360
auto moment_out = framework::EigenVector<T>::Flatten(*moment_out_tensor);
54-
auto* place = ctx.template device_context<DeviceContext>().eigen_device();
61+
auto *place = ctx.template device_context<DeviceContext>().eigen_device();
5562

5663
moment_out.device(*place) = moment + grad * grad;
5764
Eigen::DSizes<int, 1> m_dsize(moment_out_tensor->numel());
5865
if (platform::is_cpu_place(ctx.GetPlace())) {
59-
auto* lr = learning_rate->data<T>();
66+
auto *lr = learning_rate->data<T>();
6067
param_out.device(*place) =
6168
param - lr[0] * grad / (moment_out.sqrt() + epsilon);
6269
} else {
@@ -66,10 +73,10 @@ class AdagradOpKernel : public framework::OpKernel<T> {
6673
lr.broadcast(m_dsize) * grad / (moment_out.sqrt() + epsilon);
6774
}
6875
} else if (grad_var->IsType<framework::SelectedRows>()) {
69-
auto* param_tensor = ctx.Input<framework::Tensor>("Param");
76+
auto *param_tensor = ctx.Input<framework::Tensor>("Param");
7077
PADDLE_ENFORCE_EQ(param_tensor, param_out_tensor);
7178

72-
auto* moment_tensor = ctx.Input<framework::Tensor>("Moment");
79+
auto *moment_tensor = ctx.Input<framework::Tensor>("Moment");
7380
PADDLE_ENFORCE_EQ(moment_tensor, moment_out_tensor);
7481

7582
SparseAdagradFunctor<DeviceContext, T> functor;

0 commit comments

Comments
 (0)