Skip to content

Commit 43ee997

Browse files
authored
Merge pull request #13106 from jacquesqiao/cherry-pick-scale-support-selectedrows
cherry-pick Scale support selectedrows (#12960)
2 parents 781dc72 + fb8d007 commit 43ee997

File tree

3 files changed

+94
-9
lines changed

3 files changed

+94
-9
lines changed

paddle/fluid/operators/scale_op.cc

Lines changed: 20 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -13,8 +13,11 @@ See the License for the specific language governing permissions and
1313
limitations under the License. */
1414

1515
#include "paddle/fluid/operators/scale_op.h"
16+
1617
#include <string>
1718

19+
#include "paddle/fluid/operators/detail/safe_ref.h"
20+
1821
namespace paddle {
1922
namespace operators {
2023

@@ -52,6 +55,21 @@ Multiply the input tensor with a float scalar to scale the input tensor.
5255
}
5356
};
5457

58+
class ScaleOpVarTypeInference : public framework::VarTypeInference {
59+
public:
60+
void operator()(const framework::OpDesc &op_desc,
61+
framework::BlockDesc *block) const override {
62+
auto &in_var_name = op_desc.Input("X").front();
63+
auto &in_var = detail::Ref(block->FindVarRecursive(in_var_name));
64+
65+
auto out_var_name = op_desc.Output("Out").front();
66+
auto *out_var = block->FindVarRecursive(out_var_name);
67+
68+
out_var->SetType(in_var.GetType());
69+
out_var->SetDataType(in_var.GetDataType());
70+
}
71+
};
72+
5573
class ScaleGradMaker : public framework::SingleGradOpDescMaker {
5674
public:
5775
using framework::SingleGradOpDescMaker::SingleGradOpDescMaker;
@@ -71,7 +89,8 @@ class ScaleGradMaker : public framework::SingleGradOpDescMaker {
7189

7290
namespace ops = paddle::operators;
7391

74-
REGISTER_OPERATOR(scale, ops::ScaleOp, ops::ScaleOpMaker, ops::ScaleGradMaker);
92+
REGISTER_OPERATOR(scale, ops::ScaleOp, ops::ScaleOpMaker, ops::ScaleGradMaker,
93+
ops::ScaleOpVarTypeInference);
7594
REGISTER_OP_CPU_KERNEL(
7695
scale, ops::ScaleKernel<paddle::platform::CPUDeviceContext, float>,
7796
ops::ScaleKernel<paddle::platform::CPUDeviceContext, double>,

paddle/fluid/operators/scale_op.h

Lines changed: 20 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -22,17 +22,29 @@ namespace operators {
2222
template <typename DeviceContext, typename T>
2323
class ScaleKernel : public framework::OpKernel<T> {
2424
public:
25-
virtual void Compute(const framework::ExecutionContext& context) const {
26-
auto* tensor = context.Output<framework::Tensor>("Out");
27-
auto* in = context.Input<framework::Tensor>("X");
28-
tensor->mutable_data<T>(in->place());
25+
virtual void Compute(const framework::ExecutionContext& ctx) const {
26+
auto* in_var = ctx.InputVar("X");
27+
auto* in = ctx.Input<framework::Tensor>("X");
2928

30-
auto scale = static_cast<T>(context.Attr<float>("scale"));
29+
auto* out_var = ctx.OutputVar("Out");
30+
auto* out = ctx.Output<framework::Tensor>("Out");
31+
out->mutable_data<T>(in->place());
3132

32-
auto eigen_out = framework::EigenVector<T>::Flatten(*tensor);
33+
PADDLE_ENFORCE_EQ(in->dims(), out->dims(),
34+
"in and out should have the same dim");
35+
36+
auto scale = static_cast<T>(ctx.Attr<float>("scale"));
37+
38+
if (in_var->IsType<framework::SelectedRows>() && in_var != out_var) {
39+
auto& in_slr = in_var->Get<framework::SelectedRows>();
40+
auto* out_slr = out_var->GetMutable<framework::SelectedRows>();
41+
out_slr->set_rows(in_slr.rows());
42+
out_slr->set_height(in_slr.height());
43+
}
44+
45+
auto eigen_out = framework::EigenVector<T>::Flatten(*out);
3346
auto eigen_in = framework::EigenVector<T>::Flatten(*in);
34-
auto& dev =
35-
*context.template device_context<DeviceContext>().eigen_device();
47+
auto& dev = *ctx.template device_context<DeviceContext>().eigen_device();
3648
eigen_out.device(dev) = scale * eigen_in;
3749
}
3850
};

python/paddle/fluid/tests/unittests/test_scale_op.py

Lines changed: 54 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,8 @@
1717
import unittest
1818
import numpy as np
1919
from op_test import OpTest
20+
import paddle.fluid.core as core
21+
from paddle.fluid.op import Operator
2022

2123

2224
class TestScaleOp(OpTest):
@@ -33,5 +35,57 @@ def test_check_grad(self):
3335
self.check_grad(['X'], 'Out')
3436

3537

38+
class TestScaleOpSelectedRows(unittest.TestCase):
39+
def check_with_place(self, place, in_name, out_name):
40+
scope = core.Scope()
41+
42+
# create and initialize Grad Variable
43+
in_height = 10
44+
in_rows = [0, 4, 7]
45+
in_row_numel = 12
46+
scale = 2.0
47+
48+
in_selected_rows = scope.var(in_name).get_selected_rows()
49+
in_selected_rows.set_height(in_height)
50+
in_selected_rows.set_rows(in_rows)
51+
in_array = np.random.random(
52+
(len(in_rows), in_row_numel)).astype("float32")
53+
54+
in_tensor = in_selected_rows.get_tensor()
55+
in_tensor.set(in_array, place)
56+
57+
# create and initialize Param Variable
58+
out_selected_rows = scope.var(out_name).get_selected_rows()
59+
out_tensor = out_selected_rows.get_tensor()
60+
out_tensor._set_dims(in_tensor._get_dims())
61+
62+
# create and run sgd operator
63+
scale_op = Operator("scale", X=in_name, Out=out_name, scale=scale)
64+
scale_op.run(scope, place)
65+
66+
# get and compare result
67+
out_height = out_selected_rows.height()
68+
out_rows = out_selected_rows.rows()
69+
result_array = np.array(out_tensor)
70+
71+
assert (in_array * scale == result_array).all()
72+
assert in_height == out_height
73+
assert in_rows == out_rows
74+
75+
def test_scale_selected_rows(self):
76+
places = [core.CPUPlace()]
77+
if core.is_compiled_with_cuda():
78+
places.append(core.CUDAPlace(0))
79+
for place in places:
80+
self.check_with_place(place, 'in', 'out')
81+
82+
def test_scale_selected_rows_inplace(self):
83+
places = [core.CPUPlace()]
84+
if core.is_compiled_with_cuda():
85+
places.append(core.CUDAPlace(0))
86+
for place in places:
87+
self.check_with_place(place, 'in', 'in')
88+
89+
3690
if __name__ == "__main__":
3791
unittest.main()

0 commit comments

Comments
 (0)