Skip to content

Commit 98b14a3

Browse files
committed
Merge remote-tracking branch 'ups/develop' into fix
2 parents a427e76 + 566a940 commit 98b14a3

File tree

13 files changed

+303
-114
lines changed

13 files changed

+303
-114
lines changed

paddle/fluid/operators/activation_op.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -112,7 +112,7 @@ Sigmoid Activation Operator
112112
__attribute__((unused)) constexpr char LogSigmoidDoc[] = R"DOC(
113113
Logsigmoid Activation Operator
114114
115-
$$out = \log \frac{1}{1 + e^{-x}}$$
115+
$$out = \\log \\frac{1}{1 + e^{-x}}$$
116116
117117
)DOC";
118118

paddle/fluid/operators/detection/box_coder_op.cc

Lines changed: 27 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -106,23 +106,36 @@ class BoxCoderOpMaker : public framework::OpProtoAndCheckerMaker {
106106
"and M represents the number of deocded boxes.");
107107

108108
AddComment(R"DOC(
109-
Bounding Box Coder Operator.
109+
110+
Bounding Box Coder.
111+
110112
Encode/Decode the target bounding box with the priorbox information.
113+
111114
The Encoding schema described below:
112-
ox = (tx - px) / pw / pxv
113-
oy = (ty - py) / ph / pyv
114-
ow = log(abs(tw / pw)) / pwv
115-
oh = log(abs(th / ph)) / phv
115+
116+
ox = (tx - px) / pw / pxv
117+
118+
oy = (ty - py) / ph / pyv
119+
120+
ow = log(abs(tw / pw)) / pwv
121+
122+
oh = log(abs(th / ph)) / phv
123+
116124
The Decoding schema described below:
117-
ox = (pw * pxv * tx * + px) - tw / 2
118-
oy = (ph * pyv * ty * + py) - th / 2
119-
ow = exp(pwv * tw) * pw + tw / 2
120-
oh = exp(phv * th) * ph + th / 2
121-
where tx, ty, tw, th denote the target box's center coordinates, width and
122-
height respectively. Similarly, px, py, pw, ph denote the priorbox's(anchor)
123-
center coordinates, width and height. pxv, pyv, pwv, phv denote the variance
124-
of the priorbox and ox, oy, ow, oh denote the encoded/decoded coordinates,
125-
width and height.
125+
126+
ox = (pw * pxv * tx * + px) - tw / 2
127+
128+
oy = (ph * pyv * ty * + py) - th / 2
129+
130+
ow = exp(pwv * tw) * pw + tw / 2
131+
132+
oh = exp(phv * th) * ph + th / 2
133+
134+
where `tx`, `ty`, `tw`, `th` denote the target box's center coordinates, width
135+
and height respectively. Similarly, `px`, `py`, `pw`, `ph` denote the
136+
priorbox's (anchor) center coordinates, width and height. `pxv`, `pyv`, `pwv`,
137+
`phv` denote the variance of the priorbox and `ox`, `oy`, `ow`, `oh` denote the
138+
encoded/decoded coordinates, width and height.
126139
)DOC");
127140
}
128141
};

paddle/fluid/operators/gaussian_random_batch_size_like_op.cc

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -36,11 +36,12 @@ class GaussianRandomBatchSizeLikeOpMaker : public BatchSizeLikeOpMaker {
3636
void Apply() override {
3737
AddAttr<float>("mean",
3838
"(float, default 0.0) "
39-
"mean of random tensor.")
39+
"The mean (or center) of the gaussian distribution.")
4040
.SetDefault(.0f);
4141
AddAttr<float>("std",
4242
"(float, default 1.0) "
43-
"std of random tensor.")
43+
"The standard deviation (std, or spread) of the "
44+
"gaussian distribution.")
4445
.SetDefault(1.0f);
4546
AddAttr<int>("seed",
4647
"(int, default 0) "
@@ -55,9 +56,11 @@ class GaussianRandomBatchSizeLikeOpMaker : public BatchSizeLikeOpMaker {
5556
.SetDefault(framework::proto::VarType::FP32);
5657

5758
AddComment(R"DOC(
58-
GaussianRandom Operator.
5959
6060
Used to initialize tensors with gaussian random generator.
61+
The defalut mean of the distribution is 0. and defalut standard
62+
deviation (std) of the distribution is 1.. Uers can set mean and std
63+
by input arguments.
6164
)DOC");
6265
}
6366
};

paddle/fluid/operators/listen_and_serv_op.cc

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -348,7 +348,8 @@ class ListenAndServOpMaker : public framework::OpProtoAndCheckerMaker {
348348
};
349349

350350
void SignalHandler::StopAndExit(int signal_num) {
351-
VLOG(3) << "Catch interrupt signal: " << signal_num << ", program will exit";
351+
// Do not use VLOG here for the device for printing maybe already released.
352+
// exit will release interal allocated resoureces.
352353
exit(0);
353354
}
354355

python/paddle/fluid/initializer.py

Lines changed: 100 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -15,11 +15,13 @@
1515
import framework
1616
import numpy as np
1717
import contextlib
18+
from framework import convert_np_dtype_to_dtype_
19+
from core import VarDesc
1820

1921
__all__ = [
20-
'Constant', 'Uniform', 'Normal', 'Xavier', 'force_init_on_cpu',
22+
'Constant', 'Uniform', 'Normal', 'Xavier', 'Bilinear', 'force_init_on_cpu',
2123
'init_on_cpu', 'ConstantInitializer', 'UniformInitializer',
22-
'NormalInitializer', 'XavierInitializer'
24+
'NormalInitializer', 'XavierInitializer', 'BilinearInitializer'
2325
]
2426

2527
_force_init_on_cpu_ = False
@@ -422,6 +424,101 @@ def __call__(self, var, block):
422424
return op
423425

424426

427+
class BilinearInitializer(Initializer):
428+
"""Implements the bilinear initializer.
429+
430+
This initializer can be used in transposed convolution operator to
431+
act as upsampling. Users can upsample a feature map with shape of
432+
(B, C, H, W) by any integer factor. The usage is:
433+
434+
>>> factor = 2
435+
>>> w_attr = ParamAttr(learning_rate=0., regularizer=L2Decay(0.),
436+
>>> initializer=Bilinear())
437+
>>> conv_up = fluid.layers.conv2d_transpose(
438+
>>> input,
439+
>>> num_filters=C,
440+
>>> output_size=None,
441+
>>> filter_size=2 * factor - factor % 2,
442+
>>> padding=ceil((factor - 1) / 2.),
443+
>>> stride=factor,
444+
>>> groups=C,
445+
>>> param_attr=w_attr,
446+
>>> bias_attr=False)
447+
448+
449+
Where, `num_filters=C` and `groups=C` means this is channel-wise tranposed
450+
convolution. The filter shape will be (C, 1, K, K) where K is `filer_size`,
451+
This initializer will set a (K, K) interpolation kernel for every channel
452+
of the filter identically. The resulting shape of the output feature map
453+
will be (B, C, factor * H, factor * W). Note that the learning rate and the
454+
weight decay are set to 0 in order to keep coefficient values of bilinear
455+
interpolation unchanged during training.
456+
"""
457+
458+
def __init__(self):
459+
"""Constructor for BilinearInitializer.
460+
"""
461+
super(BilinearInitializer, self).__init__()
462+
463+
def __call__(self, var, block):
464+
"""Add biliear initialization ops for a variable
465+
466+
Args:
467+
var (Variable): Variable that needs to be initialized.
468+
block (Block): The block in which initialization ops should
469+
be added.
470+
471+
Returns:
472+
the initialization op
473+
474+
Raises:
475+
ValueError: If type of `var` and `block` is not right.
476+
If the shape of `var` size is not 4 and
477+
var.shape[2] != var.shape[3].
478+
"""
479+
if not isinstance(var, framework.Variable):
480+
raise ValueError("var must be framework.Variable.")
481+
482+
if not isinstance(block, framework.Block):
483+
raise ValueError("block must be framework.Block.")
484+
485+
shape = var.shape
486+
if len(shape) != 4:
487+
raise ValueError("the length of shape must be 4.")
488+
if shape[2] != shape[3]:
489+
raise ValueError("shape[2] must be equal to shape[3].")
490+
491+
weight = np.zeros(np.prod(var.shape), dtype='float32')
492+
size = shape[3]
493+
# factor
494+
f = np.ceil(size / 2.)
495+
# center
496+
c = (2 * f - 1 - f % 2) / (2. * f)
497+
for i in range(np.prod(shape)):
498+
x = i % size
499+
y = (i / size) % size
500+
weight[i] = (1 - abs(x / f - c)) * (1 - abs(y / f - c))
501+
weight = np.reshape(weight, shape)
502+
503+
if var.dtype == VarDesc.VarType.FP32:
504+
value_name = "fp32_values"
505+
values = [float(v) for v in weight.flat]
506+
else:
507+
raise ValueError("Unsupported dtype %s", input.dtype)
508+
if np.prod(shape) > 1024 * 1024:
509+
raise ValueError("The size of input is too big. ")
510+
op = block.append_op(
511+
type='assign_value',
512+
outputs={'Out': [var]},
513+
attrs={
514+
'dtype': var.dtype,
515+
'shape': list(shape),
516+
value_name: values
517+
})
518+
var.op = op
519+
return op
520+
521+
425522
# We short the class name, since users will use the initializer with the package
426523
# name. The sample code:
427524
#
@@ -436,3 +533,4 @@ def __call__(self, var, block):
436533
Normal = NormalInitializer
437534
Xavier = XavierInitializer
438535
MSRA = MSRAInitializer
536+
Bilinear = BilinearInitializer

python/paddle/fluid/layers/io.py

Lines changed: 30 additions & 35 deletions
Original file line numberDiff line numberDiff line change
@@ -22,9 +22,9 @@
2222
from layer_function_generator import generate_layer_fn, templatedoc
2323

2424
__all__ = [
25-
'data', 'BlockGuardServ', 'ListenAndServ', 'Send', 'open_recordio_file',
26-
'open_files', 'read_file', 'shuffle', 'batch', 'double_buffer',
27-
'random_data_generator', 'Preprocessor', 'load'
25+
'data', 'BlockGuardServ', 'ListenAndServ', 'Send', 'Recv',
26+
'open_recordio_file', 'open_files', 'read_file', 'shuffle', 'batch',
27+
'double_buffer', 'random_data_generator', 'Preprocessor', 'load'
2828
]
2929

3030

@@ -177,59 +177,51 @@ def complete_op(self):
177177
})
178178

179179

180-
def Send(endpoints, send_vars, get_vars=None):
180+
def Send(endpoints, send_vars, sync=True):
181181
"""
182-
Send layer
182+
Send variables to the server side, and get vars from server
183+
side when server have finished running server side program.
183184
184185
Args:
185-
endpoints: comma seperated IP:PORT pairs in the order
186+
endpoints (str): comma seperated IP:PORT pairs in the order
186187
of send_vars to send
187-
send_vars: vars to send
188-
get_vars: vars to get from server after send completes.
189-
190-
Send variables to the server side, and get vars from server
191-
side when server have finished running server side program.
188+
send_vars (list): variables to send to server
189+
sync (bool): whether to wait the request finish
190+
192191
"""
193192
assert (type(send_vars) == list)
194193

195194
epmap = endpoints.split(",")
196195
endpoints = list(set(epmap))
197196

198197
helper = LayerHelper("Send", **locals())
199-
if not get_vars:
200-
get_vars = []
201-
for s in send_vars:
202-
v = helper.create_tmp_variable(dtype=s.dtype, stop_gradient=True)
203-
get_vars.append(v)
204198
rpc_op_role_name = core.op_proto_and_checker_maker.kOpRoleAttrName()
205199

206200
helper.append_op(
207201
type="send",
208202
inputs={"X": send_vars},
209-
outputs={"Out": get_vars},
210203
attrs={
211204
"endpoints": endpoints,
212205
"epmap": epmap,
213206
rpc_op_role_name: core.op_proto_and_checker_maker.OpRole.RPC
214207
})
215-
216-
return get_vars
208+
if sync:
209+
helper.append_op(type="send_barrier", attrs={"endpoints": endpoints})
217210

218211

219-
def Recv(endpoints, get_vars):
212+
def Recv(endpoints, get_vars, sync=True):
220213
"""
221-
Recv layer
214+
Receive variables from server side
222215
223216
Args:
224-
endpoints: comma seperated IP:PORT pairs in the order
217+
endpoints (str): comma seperated IP:PORT pairs in the order
225218
of send_vars to send
226-
send_vars: vars to send
227-
get_vars: vars to get from server after send completes.
219+
get_vars (list): vars to get from server after send completes.
220+
sync (bool): whether to wait the request finish
228221
229-
Send variables to the server side, and get vars from server
230-
side when server have finished running server side program.
222+
Returns:
223+
list: list of received variables
231224
"""
232-
assert (type(send_vars) == list)
233225
assert (type(get_vars) == list)
234226

235227
epmap = endpoints.split(",")
@@ -242,6 +234,9 @@ def Recv(endpoints, get_vars):
242234
outputs={"Out": get_vars},
243235
attrs={"endpoints": endpoints,
244236
"epmap": epmap})
237+
if sync:
238+
helper.append_op(type="fetch_barrier", attrs={"endpoints": endpoints})
239+
return get_vars
245240

246241

247242
def monkey_patch_reader_methods(reader):
@@ -383,16 +378,16 @@ def random_data_generator(low, high, shapes, lod_levels, for_parallel=True):
383378
Variable: A Reader Variable from which we can get random data.
384379
385380
Examples:
386-
.. code-block:: python
387381
388-
reader = fluid.layers.io.random_data_generator(
389-
low=0.0,
390-
high=1.0,
391-
shapes=[(3,224,224), (1)],
392-
lod_levels=[0, 0])
382+
.. code-block:: python
393383
394-
# Via the reader, we can use 'read_file' layer to get data:
395-
image, label = fluid.layers.io.read_file(reader)
384+
reader = fluid.layers.random_data_generator(
385+
low=0.0,
386+
high=1.0,
387+
shapes=[[3,224,224], [1]],
388+
lod_levels=[0, 0])
389+
# Via the reader, we can use 'read_file' layer to get data:
390+
image, label = fluid.layers.read_file(reader)
396391
"""
397392
dtypes = [core.VarDesc.VarType.FP32] * len(shapes)
398393
shape_concat = []

0 commit comments

Comments
 (0)