Skip to content

Commit 6f93248

Browse files
author
Tomasz Patejko
committed
MKL elementwise_add: BLAS version compiles with integral types
1 parent e43c8f3 commit 6f93248

File tree

2 files changed

+34
-13
lines changed

2 files changed

+34
-13
lines changed

paddle/fluid/operators/elementwise_add_op.cc

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -18,10 +18,10 @@ namespace ops = paddle::operators;
1818
REGISTER_ELEMWISE_OP(elementwise_add, "Add", "Out = X + Y");
1919
REGISTER_OP_CPU_KERNEL(
2020
elementwise_add,
21-
ops::ElementwiseAddKernel<paddle::platform::CPUDeviceContext, float>);
22-
// ops::ElementwiseAddKernel<paddle::platform::CPUDeviceContext, double>);
23-
// ops::ElementwiseAddKernel<paddle::platform::CPUDeviceContext, int>,
24-
// ops::ElementwiseAddKernel<paddle::platform::CPUDeviceContext, int64_t>);
21+
ops::ElementwiseAddKernel<paddle::platform::CPUDeviceContext, float>,
22+
ops::ElementwiseAddKernel<paddle::platform::CPUDeviceContext, double>,
23+
ops::ElementwiseAddKernel<paddle::platform::CPUDeviceContext, int>,
24+
ops::ElementwiseAddKernel<paddle::platform::CPUDeviceContext, int64_t>);
2525
REGISTER_OP_CPU_KERNEL(
2626
elementwise_add_grad,
2727
ops::ElementwiseAddGradKernel<paddle::platform::CPUDeviceContext, float>,

paddle/fluid/operators/elementwise_add_op.h

Lines changed: 30 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,34 @@ struct AddFunctor {
2626
inline HOSTDEVICE T operator()(T a, T b) const { return a + b; }
2727
};
2828

29+
template <typename DeviceContext, typename T>
30+
void default_elementwise_add(const framework::ExecutionContext& ctx,
31+
const framework::Tensor* x,
32+
const framework::Tensor* y, framework::Tensor* z) {
33+
int axis = ctx.Attr<int>("axis");
34+
ElementwiseComputeEx<AddFunctor<T>, DeviceContext, T>(ctx, x, y, axis,
35+
AddFunctor<T>(), z);
36+
}
37+
38+
template <typename DeviceContext, typename T>
39+
typename std::enable_if<std::is_floating_point<T>::value>::type elementwise_add(
40+
const framework::ExecutionContext& ctx, const framework::Tensor* x,
41+
const framework::Tensor* y, framework::Tensor* z) {
42+
auto eigen_x = framework::EigenVector<T>::Flatten(*x);
43+
auto eigen_y = framework::EigenVector<T>::Flatten(*y);
44+
auto eigen_z = framework::EigenVector<T>::Flatten(*z);
45+
46+
auto blas = math::GetBlas<DeviceContext, T>(ctx);
47+
blas.VADD(x->numel(), eigen_x.data(), eigen_y.data(), eigen_z.data());
48+
}
49+
50+
template <typename DeviceContext, typename T>
51+
typename std::enable_if<std::is_integral<T>::value>::type elementwise_add(
52+
const framework::ExecutionContext& ctx, const framework::Tensor* x,
53+
const framework::Tensor* y, framework::Tensor* z) {
54+
default_elementwise_add<DeviceContext, T>(ctx, x, y, z);
55+
}
56+
2957
template <typename DeviceContext, typename T>
3058
class ElementwiseAddKernel : public framework::OpKernel<T> {
3159
public:
@@ -36,19 +64,12 @@ class ElementwiseAddKernel : public framework::OpKernel<T> {
3664
const auto y = ctx.Input<Tensor>("Y");
3765
auto z = ctx.Output<Tensor>("Out");
3866
z->mutable_data<T>(ctx.GetPlace());
39-
int axis = ctx.Attr<int>("axis");
4067

4168
auto dims_equal = x->dims() == y->dims();
4269
if (platform::is_cpu_place(ctx.GetPlace()) && dims_equal) {
43-
auto eigen_x = framework::EigenVector<T>::Flatten(*x);
44-
auto eigen_y = framework::EigenVector<T>::Flatten(*y);
45-
auto eigen_z = framework::EigenVector<T>::Flatten(*z);
46-
47-
auto blas = math::GetBlas<DeviceContext, T>(ctx);
48-
blas.VADD(x->numel(), eigen_x.data(), eigen_y.data(), eigen_z.data());
70+
elementwise_add<DeviceContext, T>(ctx, x, y, z);
4971
} else {
50-
ElementwiseComputeEx<AddFunctor<T>, DeviceContext, T>(ctx, x, y, axis,
51-
AddFunctor<T>(), z);
72+
default_elementwise_add<DeviceContext, T>(ctx, x, y, z);
5273
}
5374
}
5475
};

0 commit comments

Comments
 (0)