Skip to content

Commit acb13e7

Browse files
committed
Merge branch 'develop' into fluid_infer
2 parents 5c05653 + 336e8db commit acb13e7

File tree

12 files changed

+77
-73
lines changed

12 files changed

+77
-73
lines changed

paddle/operators/compare_op.cc

Lines changed: 5 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -39,6 +39,11 @@ N-dim tensor. X and Y could be any type. The each element of the Out tensor is
3939
calculated by %s
4040
)DOC",
4141
comment.type, comment.equation));
42+
AddAttr<int>("axis",
43+
"(int, default -1). The start dimension index "
44+
"for broadcasting Y onto X.")
45+
.SetDefault(-1)
46+
.EqualGreaterThan(-1);
4247
}
4348
};
4449

@@ -95,11 +100,5 @@ REGISTER_LOGICAL_OP(less_than, "Out = X < Y");
95100
REGISTER_LOGICAL_KERNEL(less_than, CPU, paddle::operators::LessThanFunctor);
96101
REGISTER_LOGICAL_OP(less_equal, "Out = X <= Y");
97102
REGISTER_LOGICAL_KERNEL(less_equal, CPU, paddle::operators::LessEqualFunctor);
98-
REGISTER_LOGICAL_OP(greater_than, "Out = X > Y");
99-
REGISTER_LOGICAL_KERNEL(greater_than, CPU,
100-
paddle::operators::GreaterThanFunctor);
101-
REGISTER_LOGICAL_OP(greater_equal, "Out = X >= Y");
102-
REGISTER_LOGICAL_KERNEL(greater_equal, CPU,
103-
paddle::operators::GreaterEqualFunctor);
104103
REGISTER_LOGICAL_OP(equal, "Out = X == Y");
105104
REGISTER_LOGICAL_KERNEL(equal, CPU, paddle::operators::EqualFunctor);

paddle/operators/compare_op.cu

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -16,8 +16,4 @@ limitations under the License. */
1616

1717
REGISTER_LOGICAL_KERNEL(less_than, CUDA, paddle::operators::LessThanFunctor);
1818
REGISTER_LOGICAL_KERNEL(less_equal, CUDA, paddle::operators::LessEqualFunctor);
19-
REGISTER_LOGICAL_KERNEL(greater_than, CUDA,
20-
paddle::operators::GreaterThanFunctor);
21-
REGISTER_LOGICAL_KERNEL(greater_equal, CUDA,
22-
paddle::operators::GreaterEqualFunctor);
2319
REGISTER_LOGICAL_KERNEL(equal, CUDA, paddle::operators::EqualFunctor);

paddle/operators/compare_op.h

Lines changed: 2 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,7 @@ limitations under the License. */
1616
#include <math.h>
1717
#include <type_traits>
1818
#include "paddle/framework/op_registry.h"
19+
#include "paddle/operators/elementwise_op_function.h"
1920
#include "paddle/platform/transform.h"
2021

2122
namespace paddle {
@@ -33,18 +34,6 @@ struct LessEqualFunctor {
3334
HOSTDEVICE bool operator()(const T& a, const T& b) const { return a <= b; }
3435
};
3536

36-
template <typename T>
37-
struct GreaterThanFunctor {
38-
using ELEM_TYPE = T;
39-
HOSTDEVICE bool operator()(const T& a, const T& b) const { return a > b; }
40-
};
41-
42-
template <typename T>
43-
struct GreaterEqualFunctor {
44-
using ELEM_TYPE = T;
45-
HOSTDEVICE bool operator()(const T& a, const T& b) const { return a >= b; }
46-
};
47-
4837
template <typename T>
4938
struct EqualFunctor {
5039
using ELEM_TYPE = T;
@@ -65,14 +54,7 @@ class CompareOpKernel
6554
public:
6655
void Compute(const framework::ExecutionContext& context) const override {
6756
using T = typename Functor::ELEM_TYPE;
68-
auto* x = context.Input<framework::Tensor>("X");
69-
auto* y = context.Input<framework::Tensor>("Y");
70-
auto* out = context.Output<framework::Tensor>("Out");
71-
Functor binary_func;
72-
platform::Transform<DeviceContext> trans;
73-
trans(context.template device_context<DeviceContext>(), x->data<T>(),
74-
x->data<T>() + x->numel(), y->data<T>(),
75-
out->mutable_data<bool>(context.GetPlace()), binary_func);
57+
ElementwiseComputeEx<Functor, DeviceContext, T, bool>(context);
7658
}
7759
};
7860

paddle/operators/elementwise_op_function.h

Lines changed: 8 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -176,14 +176,15 @@ class MidWiseTransformIterator<T, platform::CUDADeviceContext>
176176
};
177177
#endif
178178

179-
template <typename Functor, typename T, typename DeviceContext>
179+
template <typename Functor, typename T, typename DeviceContext,
180+
typename OutType = T>
180181
class TransformFunctor {
181182
public:
182183
TransformFunctor(const framework::Tensor* x, const framework::Tensor* y,
183184
framework::Tensor* z, const DeviceContext& ctx, Functor func)
184185
: x_(x->data<T>()),
185186
y_(y->data<T>()),
186-
z_(z->mutable_data<T>(ctx.GetPlace())),
187+
z_(z->mutable_data<OutType>(ctx.GetPlace())),
187188
nx_(x->numel()),
188189
ctx_(ctx),
189190
func_(func) {}
@@ -208,7 +209,7 @@ class TransformFunctor {
208209
private:
209210
const T* x_;
210211
const T* y_;
211-
T* z_;
212+
OutType* z_;
212213
int64_t nx_;
213214
const DeviceContext& ctx_;
214215
Functor func_;
@@ -364,15 +365,16 @@ void ElementwiseGradCompute(const framework::ExecutionContext& ctx) {
364365
}
365366
}
366367

367-
template <typename Functor, typename DeviceContext, typename T>
368+
template <typename Functor, typename DeviceContext, typename T,
369+
typename OutType = T>
368370
void ElementwiseComputeEx(const framework::ExecutionContext& ctx) {
369371
using Tensor = framework::Tensor;
370372

371373
auto* x = ctx.Input<Tensor>("X");
372374
auto* y = ctx.Input<Tensor>("Y");
373375
auto* z = ctx.Output<Tensor>("Out");
374-
z->mutable_data<T>(ctx.GetPlace());
375-
TransformFunctor<Functor, T, DeviceContext> functor(
376+
z->mutable_data<OutType>(ctx.GetPlace());
377+
TransformFunctor<Functor, T, DeviceContext, OutType> functor(
376378
x, y, z, ctx.template device_context<DeviceContext>(), Functor());
377379

378380
auto x_dims = x->dims();

python/paddle/v2/fluid/layers/control_flow.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -11,13 +11,13 @@
1111
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
1212
# See the License for the specific language governing permissions and
1313
# limitations under the License.
14+
import contextlib
1415

15-
from ..layer_helper import LayerHelper, unique_name
16-
from ..framework import Program, Variable, Operator
17-
from .. import core
16+
from layer_function_generator import autodoc
1817
from tensor import assign, fill_constant
19-
import contextlib
20-
from ..registry import autodoc
18+
from .. import core
19+
from ..framework import Program, Variable, Operator
20+
from ..layer_helper import LayerHelper, unique_name
2121

2222
__all__ = [
2323
'split_lod_tensor',
@@ -1477,7 +1477,7 @@ def _assert_in_rnn_block_(self, method):
14771477
method))
14781478

14791479

1480-
@autodoc
1480+
@autodoc()
14811481
def reorder_lod_tensor_by_rank(x, rank_table):
14821482
helper = LayerHelper('reorder_lod_tensor_by_rank', **locals())
14831483
helper.is_instance('x', Variable)

python/paddle/v2/fluid/layers/device.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -15,14 +15,14 @@
1515
All util layers.
1616
"""
1717

18-
from ..layer_helper import LayerHelper
18+
from layer_function_generator import autodoc
1919
from ..framework import unique_name
20-
from ..registry import autodoc
20+
from ..layer_helper import LayerHelper
2121

2222
__all__ = ['get_places']
2323

2424

25-
@autodoc
25+
@autodoc()
2626
def get_places(device_count=None, device_type=None):
2727
helper = LayerHelper('get_places', **locals())
2828
out_places = helper.create_variable(name=unique_name(helper.name + ".out"))

python/paddle/v2/fluid/registry.py renamed to python/paddle/v2/fluid/layers/layer_function_generator.py

Lines changed: 16 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -13,17 +13,19 @@
1313
# limitations under the License.
1414
import re
1515
import cStringIO
16-
import warnings
1716
import functools
18-
import inspect
17+
import warnings
18+
19+
from .. import proto
1920

20-
import proto.framework_pb2 as framework_pb2
21-
from framework import OpProtoHolder, Variable, Program, Operator
22-
from paddle.v2.fluid.layer_helper import LayerHelper, unique_name
21+
framework_pb2 = proto.framework_pb2
22+
23+
from ..framework import OpProtoHolder, Variable
24+
from ..layer_helper import LayerHelper
2325

2426
__all__ = [
2527
'deprecated',
26-
'register_layer',
28+
'generate_layer_fn',
2729
'autodoc',
2830
]
2931

@@ -96,7 +98,7 @@ def _type_to_str_(tp):
9698
return buf.getvalue()
9799

98100

99-
def register_layer(op_type):
101+
def generate_layer_fn(op_type):
100102
"""Register the Python layer for an Operator.
101103
102104
Args:
@@ -207,7 +209,10 @@ def func_wrapper(*args, **kwargs):
207209
return func_wrapper
208210

209211

210-
def autodoc(func):
211-
func.__doc__ = _generate_doc_string_(OpProtoHolder.instance().get_op_proto(
212-
func.__name__))
213-
return func
212+
def autodoc(comment=""):
213+
def __impl__(func):
214+
func.__doc__ = _generate_doc_string_(OpProtoHolder.instance(
215+
).get_op_proto(func.__name__)) + comment
216+
return func
217+
218+
return __impl__

python/paddle/v2/fluid/layers/ops.py

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -11,8 +11,7 @@
1111
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
1212
# See the License for the specific language governing permissions and
1313
# limitations under the License.
14-
15-
from ..registry import register_layer
14+
from layer_function_generator import generate_layer_fn
1615

1716
__activations__ = [
1817
'sigmoid',
@@ -53,4 +52,4 @@
5352
] + __activations__
5453

5554
for _OP in set(__all__):
56-
globals()[_OP] = register_layer(_OP)
55+
globals()[_OP] = generate_layer_fn(_OP)

python/paddle/v2/fluid/tests/book_memory_optimization/test_memopt_fit_a_line.py

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,17 @@
1+
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve.
2+
#
3+
# Licensed under the Apache License, Version 2.0 (the "License");
4+
# you may not use this file except in compliance with the License.
5+
# You may obtain a copy of the License at
6+
#
7+
# http://www.apache.org/licenses/LICENSE-2.0
8+
#
9+
# Unless required by applicable law or agreed to in writing, software
10+
# distributed under the License is distributed on an "AS IS" BASIS,
11+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
# See the License for the specific language governing permissions and
13+
# limitations under the License.
14+
115
import numpy as np
216
import paddle.v2 as paddle
317
import paddle.v2.fluid as fluid

python/paddle/v2/fluid/tests/book_memory_optimization/test_memopt_image_classification_train.py

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,17 @@
1+
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve.
2+
#
3+
# Licensed under the Apache License, Version 2.0 (the "License");
4+
# you may not use this file except in compliance with the License.
5+
# You may obtain a copy of the License at
6+
#
7+
# http://www.apache.org/licenses/LICENSE-2.0
8+
#
9+
# Unless required by applicable law or agreed to in writing, software
10+
# distributed under the License is distributed on an "AS IS" BASIS,
11+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
# See the License for the specific language governing permissions and
13+
# limitations under the License.
14+
115
from __future__ import print_function
216

317
import sys

0 commit comments

Comments
 (0)