Skip to content

Commit eebfb71

Browse files
authored
Merge pull request #8524 from reyoung/feature/unique_name_guard
Moving unique_name to python
2 parents 044fb0f + 63563b2 commit eebfb71

File tree

12 files changed

+173
-66
lines changed

12 files changed

+173
-66
lines changed

paddle/fluid/pybind/pybind.cc

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -49,11 +49,6 @@ PYBIND11_MAKE_OPAQUE(paddle::framework::LoDTensorArray);
4949

5050
namespace paddle {
5151
namespace pybind {
52-
static size_t UniqueIntegerGenerator(const std::string &prefix) {
53-
static std::unordered_map<std::string, std::atomic<size_t>> generators;
54-
return generators[prefix].fetch_add(1);
55-
}
56-
5752
bool IsCompiledWithCUDA() {
5853
#ifndef PADDLE_WITH_CUDA
5954
return false;
@@ -410,7 +405,6 @@ All parameter, weight, gradient are variables in Paddle.
410405
(void (Executor::*)(const ProgramDesc &, Scope *, int, bool, bool)) &
411406
Executor::Run);
412407

413-
m.def("unique_integer", UniqueIntegerGenerator);
414408
m.def("init_gflags", framework::InitGflags);
415409
m.def("init_glog", framework::InitGLOG);
416410
m.def("init_devices", &framework::InitDevices);

python/paddle/v2/fluid/__init__.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -39,6 +39,7 @@
3939
import clip
4040
from memory_optimization_transpiler import memory_optimize
4141
import profiler
42+
import unique_name
4243

4344
Tensor = LoDTensor
4445

@@ -63,6 +64,7 @@
6364
'DistributeTranspiler',
6465
'memory_optimize',
6566
'profiler',
67+
'unique_name',
6668
]
6769

6870

python/paddle/v2/fluid/backward.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,7 @@
1616
from . import core
1717
import collections
1818
import copy
19+
import unique_name
1920

2021
__all__ = [
2122
'append_backward',
@@ -391,7 +392,7 @@ def _rename_grad_(block, start_op_idx, grad_to_var, target_grad_map):
391392

392393
for name in op_desc.output_arg_names():
393394
if block.desc.find_var(name.encode("ascii")):
394-
new_name = "%s_%s" % (name, core.unique_integer(name))
395+
new_name = unique_name.generate(name)
395396
op_desc.rename_output(name, new_name)
396397
var_map[name] = new_name
397398

python/paddle/v2/fluid/evaluator.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,8 @@
1515
import numpy as np
1616

1717
import layers
18-
from framework import Program, unique_name, Variable, program_guard
18+
from framework import Program, Variable, program_guard
19+
import unique_name
1920
from layer_helper import LayerHelper
2021

2122
__all__ = [
@@ -96,7 +97,7 @@ def create_state(self, suffix, dtype, shape):
9697
9798
"""
9899
state = self.helper.create_variable(
99-
name="_".join([unique_name(self.helper.name), suffix]),
100+
name="_".join([unique_name.generate(self.helper.name), suffix]),
100101
persistable=True,
101102
dtype=dtype,
102103
shape=shape)

python/paddle/v2/fluid/framework.py

Lines changed: 2 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,7 @@
2020

2121
import proto.framework_pb2 as framework_pb2
2222
from . import core
23+
import unique_name
2324

2425
__all__ = [
2526
'Block',
@@ -47,20 +48,6 @@ def grad_var_name(var_name):
4748
return var_name + GRAD_VAR_SUFFIX
4849

4950

50-
def unique_name(prefix):
51-
"""
52-
Generate unique names with prefix
53-
54-
Args:
55-
prefix(str): The prefix of return string
56-
57-
Returns(str): A unique string with the prefix
58-
59-
"""
60-
uid = core.unique_integer(prefix) # unique during whole process.
61-
return "_".join([prefix, str(uid)])
62-
63-
6451
def convert_np_dtype_to_dtype_(np_dtype):
6552
"""
6653
Convert the data type in numpy to the data type in Paddle
@@ -175,7 +162,7 @@ def __init__(self,
175162
self.error_clip = error_clip
176163

177164
if name is None:
178-
name = Variable._unique_var_name_()
165+
name = unique_name.generate('_generated_var')
179166
is_new_var = False
180167
self.desc = self.block.desc.find_var(name)
181168

@@ -307,12 +294,6 @@ def lod_level(self):
307294
def type(self):
308295
return self.desc.type()
309296

310-
@staticmethod
311-
def _unique_var_name_():
312-
prefix = "_generated_var"
313-
uid = core.unique_integer(prefix) # unique during whole process.
314-
return "_".join([prefix, str(uid)])
315-
316297
def set_error_clip(self, error_clip):
317298
self.error_clip = error_clip
318299

python/paddle/v2/fluid/layer_helper.py

Lines changed: 17 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -15,8 +15,8 @@
1515
import copy
1616
import itertools
1717

18-
from framework import Variable, Parameter, default_main_program, default_startup_program, \
19-
unique_name, dtype_is_floating
18+
from framework import Variable, Parameter, default_main_program, default_startup_program, dtype_is_floating
19+
import unique_name
2020
from paddle.v2.fluid.initializer import Constant, Xavier
2121
from param_attr import ParamAttr, WeightNormParamAttr
2222

@@ -27,7 +27,7 @@ def __init__(self, layer_type, **kwargs):
2727
self.layer_type = layer_type
2828
name = self.kwargs.get('name', None)
2929
if name is None:
30-
self.kwargs['name'] = unique_name(self.layer_type)
30+
self.kwargs['name'] = unique_name.generate(self.layer_type)
3131

3232
@property
3333
def name(self):
@@ -117,17 +117,20 @@ def __norm_op(x,
117117
block=self.startup_program.global_block()):
118118
if out is None:
119119
out = block.create_var(
120-
name=unique_name(".".join([self.name, 'weight_norm_norm'])),
120+
name=unique_name.generate(".".join(
121+
[self.name, 'weight_norm_norm'])),
121122
dtype=dtype,
122123
persistable=False)
123124
abs_out = block.create_var(
124-
name=unique_name(".".join([self.name, 'weight_norm_abs'])),
125+
name=unique_name.generate(".".join(
126+
[self.name, 'weight_norm_abs'])),
125127
dtype=dtype,
126128
persistable=False)
127129
block.append_op(
128130
type='abs', inputs={'X': x}, outputs={'Out': abs_out})
129131
pow_out = block.create_var(
130-
name=unique_name(".".join([self.name, 'weight_norm_pow'])),
132+
name=unique_name.generate(".".join(
133+
[self.name, 'weight_norm_pow'])),
131134
dtype=dtype,
132135
persistable=False)
133136
block.append_op(
@@ -136,7 +139,8 @@ def __norm_op(x,
136139
outputs={'Out': pow_out},
137140
attrs={'factor': float(p)})
138141
sum_out = block.create_var(
139-
name=unique_name(".".join([self.name, 'weight_norm_sum'])),
142+
name=unique_name.generate(".".join(
143+
[self.name, 'weight_norm_sum'])),
140144
dtype=dtype,
141145
persistable=False)
142146
block.append_op(
@@ -161,7 +165,7 @@ def __reshape_op(x,
161165
block=self.startup_program.global_block()):
162166
if out is None:
163167
out = block.create_var(
164-
name=unique_name(".".join(
168+
name=unique_name.generate(".".join(
165169
[self.name, 'weight_norm_reshape'])),
166170
dtype=dtype,
167171
persistable=False)
@@ -178,7 +182,7 @@ def __transpose_op(x,
178182
block=self.startup_program.global_block()):
179183
if out is None:
180184
out = block.create_var(
181-
name=unique_name(".".join(
185+
name=unique_name.generate(".".join(
182186
[self.name, 'weight_norm_transpose'])),
183187
dtype=dtype,
184188
persistable=False)
@@ -196,7 +200,8 @@ def __norm_except_dim(x,
196200
"""Computes the norm over all dimensions except dim"""
197201
if out is None:
198202
out = block.create_var(
199-
name=unique_name(".".join([self.name, 'weight_norm_norm'])),
203+
name=unique_name.generate(".".join(
204+
[self.name, 'weight_norm_norm'])),
200205
dtype=dtype,
201206
persistable=False)
202207
if dim is None:
@@ -286,7 +291,7 @@ def create_parameter(self,
286291
assert isinstance(attr, ParamAttr)
287292
suffix = 'b' if is_bias else 'w'
288293
if attr.name is None:
289-
attr.name = unique_name(".".join([self.name, suffix]))
294+
attr.name = unique_name.generate(".".join([self.name, suffix]))
290295

291296
if default_initializer is None and attr.initializer is None:
292297
if is_bias:
@@ -316,7 +321,7 @@ def get_parameter(self, name):
316321

317322
def create_tmp_variable(self, dtype, stop_gradient=False):
318323
return self.main_program.current_block().create_var(
319-
name=unique_name(".".join([self.name, 'tmp'])),
324+
name=unique_name.generate(".".join([self.name, 'tmp'])),
320325
dtype=dtype,
321326
persistable=False,
322327
stop_gradient=stop_gradient)

python/paddle/v2/fluid/layers/control_flow.py

Lines changed: 20 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -428,7 +428,8 @@ def memory(self,
428428
raise ValueError(
429429
"if init is None, memory at least need shape and batch_ref")
430430
parent_block = self.parent_block()
431-
var_name = unique_name("@".join([self.helper.name, "memory_boot"]))
431+
var_name = unique_name.generate("@".join(
432+
[self.helper.name, "memory_boot"]))
432433
boot_var = parent_block.create_var(
433434
name=var_name,
434435
shape=shape,
@@ -450,7 +451,7 @@ def memory(self,
450451
return self.memory(init=boot_var)
451452
else:
452453
pre_mem = self.helper.create_variable(
453-
name=unique_name("@".join([self.helper.name, "mem"])),
454+
name=unique_name.generate("@".join([self.helper.name, "mem"])),
454455
dtype=init.dtype,
455456
shape=init.shape)
456457
self.memories[pre_mem.name] = StaticRNNMemoryLink(
@@ -710,7 +711,7 @@ def lod_rank_table(x, level=0):
710711
helper = LayerHelper("lod_rank_table", **locals())
711712
table = helper.create_variable(
712713
type=core.VarDesc.VarType.LOD_RANK_TABLE,
713-
name=unique_name("lod_rank_table"))
714+
name=unique_name.generate("lod_rank_table"))
714715
helper.append_op(
715716
type='lod_rank_table',
716717
inputs={'X': x},
@@ -808,7 +809,7 @@ def lod_tensor_to_array(x, table):
808809
"""
809810
helper = LayerHelper("lod_tensor_to_array", **locals())
810811
array = helper.create_variable(
811-
name=unique_name("lod_tensor_to_array"),
812+
name=unique_name.generate("lod_tensor_to_array"),
812813
type=core.VarDesc.VarType.LOD_TENSOR_ARRAY,
813814
dtype=x.dtype)
814815
helper.append_op(
@@ -1265,11 +1266,11 @@ def input(self, x):
12651266
if id(x) not in self.input_table:
12661267
parent_block = self.parent_block()
12671268
out_true = parent_block.create_var(
1268-
name=unique_name('ifelse_input' + self.helper.name),
1269+
name=unique_name.generate('ifelse_input' + self.helper.name),
12691270
dtype=x.dtype)
12701271

12711272
out_false = parent_block.create_var(
1272-
name=unique_name('ifelse_input' + self.helper.name),
1273+
name=unique_name.generate('ifelse_input' + self.helper.name),
12731274
dtype=x.dtype)
12741275
parent_block.append_op(
12751276
type='split_lod_tensor',
@@ -1311,7 +1312,8 @@ def output(self, *outs):
13111312
raise TypeError("Each output should be a variable")
13121313
# create outside tensor
13131314
outside_out = parent_block.create_var(
1314-
name=unique_name("_".join([self.helper.name, 'output'])),
1315+
name=unique_name.generate("_".join(
1316+
[self.helper.name, 'output'])),
13151317
dtype=each_out.dtype)
13161318
out_table.append(outside_out)
13171319

@@ -1374,15 +1376,16 @@ def step_input(self, x):
13741376
parent_block = self._parent_block_()
13751377
if self.lod_rank_table is None:
13761378
self.lod_rank_table = parent_block.create_var(
1377-
name=unique_name('lod_rank_table'),
1379+
name=unique_name.generate('lod_rank_table'),
13781380
type=core.VarDesc.VarType.LOD_RANK_TABLE)
13791381
self.lod_rank_table.stop_gradient = True
13801382
parent_block.append_op(
13811383
type='lod_rank_table',
13821384
inputs={"X": x},
13831385
outputs={"Out": self.lod_rank_table})
13841386
self.max_seq_len = parent_block.create_var(
1385-
name=unique_name('dynamic_rnn_max_seq_len'), dtype='int64')
1387+
name=unique_name.generate('dynamic_rnn_max_seq_len'),
1388+
dtype='int64')
13861389
self.max_seq_len.stop_gradient = False
13871390
parent_block.append_op(
13881391
type='max_sequence_len',
@@ -1396,7 +1399,7 @@ def step_input(self, x):
13961399
outputs={'Out': self.cond})
13971400

13981401
input_array = parent_block.create_var(
1399-
name=unique_name('dynamic_rnn_input_array'),
1402+
name=unique_name.generate('dynamic_rnn_input_array'),
14001403
type=core.VarDesc.VarType.LOD_TENSOR_ARRAY,
14011404
dtype=x.dtype)
14021405
self.input_array.append((input_array, x.dtype))
@@ -1417,7 +1420,7 @@ def static_input(self, x):
14171420
"static_input() must be called after step_input().")
14181421
parent_block = self._parent_block_()
14191422
x_reordered = parent_block.create_var(
1420-
name=unique_name("dynamic_rnn_static_input_reordered"),
1423+
name=unique_name.generate("dynamic_rnn_static_input_reordered"),
14211424
type=core.VarDesc.VarType.LOD_TENSOR,
14221425
dtype=x.dtype)
14231426
parent_block.append_op(
@@ -1479,7 +1482,7 @@ def memory(self,
14791482
'invoked before '
14801483
'memory(init=init, need_reordered=True, ...).')
14811484
init_reordered = parent_block.create_var(
1482-
name=unique_name('dynamic_rnn_mem_init_reordered'),
1485+
name=unique_name.generate('dynamic_rnn_mem_init_reordered'),
14831486
type=core.VarDesc.VarType.LOD_TENSOR,
14841487
dtype=init.dtype)
14851488
parent_block.append_op(
@@ -1491,7 +1494,7 @@ def memory(self,
14911494
outputs={'Out': [init_reordered]})
14921495
init_tensor = init_reordered
14931496
mem_array = parent_block.create_var(
1494-
name=unique_name('dynamic_rnn_mem_array'),
1497+
name=unique_name.generate('dynamic_rnn_mem_array'),
14951498
type=core.VarDesc.VarType.LOD_TENSOR_ARRAY,
14961499
dtype=init.dtype)
14971500
parent_block.append_op(
@@ -1511,9 +1514,10 @@ def memory(self,
15111514
)
15121515
parent_block = self._parent_block_()
15131516
init = parent_block.create_var(
1514-
name=unique_name('mem_init'), dtype=dtype)
1517+
name=unique_name.generate('mem_init'), dtype=dtype)
15151518
arr, dtype = self.input_array[0]
1516-
in0 = parent_block.create_var(name=unique_name('in0'), dtype=dtype)
1519+
in0 = parent_block.create_var(
1520+
name=unique_name.generate('in0'), dtype=dtype)
15171521
parent_block.append_op(
15181522
type='read_from_array',
15191523
inputs={'X': [arr],
@@ -1552,7 +1556,7 @@ def output(self, *outputs):
15521556
parent_block = self._parent_block_()
15531557
for each in outputs:
15541558
outside_array = parent_block.create_var(
1555-
name=unique_name("_".join(
1559+
name=unique_name.generate("_".join(
15561560
[self.helper.name, "output_array", each.name])),
15571561
type=core.VarDesc.VarType.LOD_TENSOR_ARRAY,
15581562
dtype=each.dtype)

python/paddle/v2/fluid/layers/device.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,8 @@
2525
@autodoc()
2626
def get_places(device_count=None, device_type=None):
2727
helper = LayerHelper('get_places', **locals())
28-
out_places = helper.create_variable(name=unique_name(helper.name + ".out"))
28+
out_places = helper.create_variable(
29+
name=unique_name.generate(helper.name + ".out"))
2930
attrs = dict()
3031
if device_count is not None:
3132
attrs['device_count'] = int(device_count)

python/paddle/v2/fluid/layers/math_op_patch.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@
2121

2222
def monkey_patch_variable():
2323
def unique_tmp_name():
24-
return unique_name("tmp")
24+
return unique_name.generate("tmp")
2525

2626
def safe_get_dtype(var):
2727
try:

0 commit comments

Comments
 (0)