|
| 1 | +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. |
| 2 | +
|
| 3 | + Licensed under the Apache License, Version 2.0 (the "License"); |
| 4 | + you may not use this file except in compliance with the License. |
| 5 | + You may obtain a copy of the License at |
| 6 | +
|
| 7 | + http://www.apache.org/licenses/LICENSE-2.0 |
| 8 | +
|
| 9 | + Unless required by applicable law or agreed to in writing, software |
| 10 | + distributed under the License is distributed on an "AS IS" BASIS, |
| 11 | + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 12 | + See the License for the specific language governing permissions and |
| 13 | + limitations under the License. */ |
| 14 | +#include "paddle/framework/lod_rank_table.h" |
| 15 | +#include "paddle/operators/array_operator.h" |
| 16 | +#include "paddle/operators/math/math_function.h" |
| 17 | + |
| 18 | +namespace paddle { |
| 19 | +namespace operators { |
| 20 | + |
| 21 | +class ShrinkRNNMemoryOp : public ArrayOp { |
| 22 | + public: |
| 23 | + ShrinkRNNMemoryOp(const std::string &type, |
| 24 | + const framework::VariableNameMap &inputs, |
| 25 | + const framework::VariableNameMap &outputs, |
| 26 | + const framework::AttributeMap &attrs) |
| 27 | + : ArrayOp(type, inputs, outputs, attrs) {} |
| 28 | + |
| 29 | + void Run(const framework::Scope &scope, |
| 30 | + const platform::DeviceContext &dev_ctx) const override { |
| 31 | + auto *x_var = scope.FindVar(Input("X")); |
| 32 | + PADDLE_ENFORCE(x_var != nullptr, "Input X must be set"); |
| 33 | + auto &x_tensor = x_var->Get<framework::LoDTensor>(); |
| 34 | + size_t offset = this->GetOffset(scope, dev_ctx); |
| 35 | + auto *rank_table_var = scope.FindVar(Input("RankTable")); |
| 36 | + PADDLE_ENFORCE(rank_table_var != nullptr, "RankTable must be set"); |
| 37 | + auto &rank_table = rank_table_var->Get<framework::LoDRankTable>(); |
| 38 | + |
| 39 | + auto &rank_items = rank_table.items(); |
| 40 | + int dst_num_rows = |
| 41 | + std::lower_bound(rank_items.begin(), rank_items.end(), offset, |
| 42 | + [](const framework::LoDRankTable::TableItem &a, |
| 43 | + size_t b) { return a.length > b; }) - |
| 44 | + rank_items.begin(); |
| 45 | + |
| 46 | + auto *out_var = scope.FindVar(Output("Out")); |
| 47 | + PADDLE_ENFORCE(out_var != nullptr, "Output Out must be set"); |
| 48 | + auto &out_tensor = *out_var->GetMutable<framework::LoDTensor>(); |
| 49 | + if (dst_num_rows != 0) { |
| 50 | + out_tensor.ShareDataWith(x_tensor.Slice(0, dst_num_rows)); |
| 51 | + } |
| 52 | + } |
| 53 | +}; |
| 54 | + |
| 55 | +class ShrinkRNNMemoryOpProtoMaker : public framework::OpProtoAndCheckerMaker { |
| 56 | + public: |
| 57 | + ShrinkRNNMemoryOpProtoMaker(framework::OpProto *proto, |
| 58 | + framework::OpAttrChecker *op_checker) |
| 59 | + : OpProtoAndCheckerMaker(proto, op_checker) { |
| 60 | + AddInput("X", ""); |
| 61 | + AddInput("RankTable", ""); |
| 62 | + AddInput("I", ""); |
| 63 | + AddOutput("Out", ""); |
| 64 | + AddComment(""); |
| 65 | + } |
| 66 | +}; |
| 67 | + |
| 68 | +class ShrinkRNNMemoryInferShape : public framework::InferShapeBase { |
| 69 | + public: |
| 70 | + void operator()(framework::InferShapeContext *context) const override { |
| 71 | + PADDLE_ENFORCE(context->HasInput("X")); |
| 72 | + PADDLE_ENFORCE(context->HasInput("I")); |
| 73 | + PADDLE_ENFORCE(context->HasInput("RankTable")); |
| 74 | + context->SetOutputDim("Out", context->GetInputDim("X")); |
| 75 | + } |
| 76 | +}; |
| 77 | + |
| 78 | +class ShrinkRNNMemoryGradOp : public ArrayOp { |
| 79 | + public: |
| 80 | + ShrinkRNNMemoryGradOp(const std::string &type, |
| 81 | + const framework::VariableNameMap &inputs, |
| 82 | + const framework::VariableNameMap &outputs, |
| 83 | + const framework::AttributeMap &attrs) |
| 84 | + : ArrayOp(type, inputs, outputs, attrs) {} |
| 85 | + |
| 86 | + void Run(const framework::Scope &scope, |
| 87 | + const platform::DeviceContext &dev_ctx) const override { |
| 88 | + auto *dout_var = scope.FindVar(Input(framework::GradVarName("Out"))); |
| 89 | + auto *dx_var = scope.FindVar(Output(framework::GradVarName("X"))); |
| 90 | + PADDLE_ENFORCE(dx_var != nullptr, "Input Gradient should not be nullptr"); |
| 91 | + auto *x_var = scope.FindVar(Input("X")); |
| 92 | + PADDLE_ENFORCE(x_var != nullptr); |
| 93 | + |
| 94 | + auto &x_tensor = x_var->Get<framework::LoDTensor>(); |
| 95 | + auto &dx_tensor = *dx_var->GetMutable<framework::LoDTensor>(); |
| 96 | + dx_tensor.Resize(x_tensor.dims()); |
| 97 | + dx_tensor.mutable_data(x_tensor.place(), x_tensor.type()); |
| 98 | + |
| 99 | + if (dout_var == nullptr) { // dx_tensor fill zero |
| 100 | + math::set_constant(dev_ctx, &dx_tensor, 0.0f); |
| 101 | + } else { |
| 102 | + auto &dout_tensor = dout_var->Get<framework::LoDTensor>(); |
| 103 | + auto height = dout_tensor.dims()[0]; |
| 104 | + dx_tensor.Slice(0, static_cast<int>(height)) |
| 105 | + .CopyFrom(dout_tensor, dout_tensor.place(), dev_ctx); |
| 106 | + if (dx_tensor.dims()[0] < height) { |
| 107 | + auto rest_tensor = dx_tensor.Slice( |
| 108 | + static_cast<int>(height), static_cast<int>(dout_tensor.dims()[0])); |
| 109 | + math::set_constant(dev_ctx, &rest_tensor, 0.0f); |
| 110 | + } |
| 111 | + } |
| 112 | + } |
| 113 | +}; |
| 114 | + |
| 115 | +class ShrinkRNNMemoryGradInferShape : public framework::InferShapeBase { |
| 116 | + public: |
| 117 | + void operator()(framework::InferShapeContext *context) const override { |
| 118 | + PADDLE_ENFORCE(context->HasInput("X")); |
| 119 | + PADDLE_ENFORCE(context->HasOutput(framework::GradVarName("X"))); |
| 120 | + context->SetOutputDim(framework::GradVarName("X"), |
| 121 | + context->GetInputDim("X")); |
| 122 | + } |
| 123 | +}; |
| 124 | + |
| 125 | +class ShrinkRNNGradOpMaker : public framework::SingleGradOpDescMaker { |
| 126 | + public: |
| 127 | + using framework::SingleGradOpDescMaker::SingleGradOpDescMaker; |
| 128 | + |
| 129 | + protected: |
| 130 | + std::unique_ptr<framework::OpDescBind> Apply() const override { |
| 131 | + auto *op = new framework::OpDescBind(); |
| 132 | + op->SetType("shrink_rnn_memory_grad"); |
| 133 | + op->SetInput("X", Input("X")); |
| 134 | + op->SetInput(framework::GradVarName("Out"), OutputGrad("Out")); |
| 135 | + op->SetOutput(framework::GradVarName("X"), InputGrad("X")); |
| 136 | + op->SetAttrMap(Attrs()); |
| 137 | + return std::unique_ptr<framework::OpDescBind>(op); |
| 138 | + } |
| 139 | +}; |
| 140 | + |
| 141 | +} // namespace operators |
| 142 | +} // namespace paddle |
| 143 | + |
| 144 | +namespace ops = paddle::operators; |
| 145 | +REGISTER_OPERATOR(shrink_rnn_memory, ops::ShrinkRNNMemoryOp, |
| 146 | + ops::ShrinkRNNMemoryInferShape, |
| 147 | + ops::ShrinkRNNMemoryOpProtoMaker, ops::ShrinkRNNGradOpMaker); |
| 148 | +REGISTER_OPERATOR(shrink_rnn_memory_grad, ops::ShrinkRNNMemoryGradOp, |
| 149 | + ops::ShrinkRNNMemoryGradInferShape); |
0 commit comments