Skip to content

Commit e8cb97b

Browse files
committed
Moving unique_name to python
* Add reset and guard to unique_name
1 parent 78cc64a commit e8cb97b

File tree

11 files changed

+134
-86
lines changed

11 files changed

+134
-86
lines changed

paddle/fluid/pybind/pybind.cc

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -48,11 +48,6 @@ PYBIND11_MAKE_OPAQUE(paddle::framework::LoDTensorArray);
4848

4949
namespace paddle {
5050
namespace pybind {
51-
static size_t UniqueIntegerGenerator(const std::string &prefix) {
52-
static std::unordered_map<std::string, std::atomic<size_t>> generators;
53-
return generators[prefix].fetch_add(1);
54-
}
55-
5651
bool IsCompiledWithCUDA() {
5752
#ifndef PADDLE_WITH_CUDA
5853
return false;
@@ -409,7 +404,6 @@ All parameter, weight, gradient are variables in Paddle.
409404
(void (Executor::*)(const ProgramDesc &, Scope *, int, bool, bool)) &
410405
Executor::Run);
411406

412-
m.def("unique_integer", UniqueIntegerGenerator);
413407
m.def("init_gflags", framework::InitGflags);
414408
m.def("init_glog", framework::InitGLOG);
415409
m.def("init_devices", &framework::InitDevices);

python/paddle/v2/fluid/__init__.py

Lines changed: 6 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -39,30 +39,16 @@
3939
import clip
4040
from memory_optimization_transpiler import memory_optimize
4141
import profiler
42+
import unique_name
4243

4344
Tensor = LoDTensor
4445

4546
__all__ = framework.__all__ + executor.__all__ + concurrency.__all__ + [
46-
'io',
47-
'initializer',
48-
'layers',
49-
'nets',
50-
'optimizer',
51-
'learning_rate_decay',
52-
'backward',
53-
'regularizer',
54-
'LoDTensor',
55-
'CPUPlace',
56-
'CUDAPlace',
57-
'Tensor',
58-
'ParamAttr',
59-
'WeightNormParamAttr',
60-
'DataFeeder',
61-
'clip',
62-
'SimpleDistributeTranspiler',
63-
'DistributeTranspiler',
64-
'memory_optimize',
65-
'profiler',
47+
'io', 'initializer', 'layers', 'nets', 'optimizer', 'learning_rate_decay',
48+
'backward', 'regularizer', 'LoDTensor', 'CPUPlace', 'CUDAPlace', 'Tensor',
49+
'ParamAttr', 'WeightNormParamAttr', 'DataFeeder', 'clip',
50+
'SimpleDistributeTranspiler', 'DistributeTranspiler', 'memory_optimize',
51+
'profiler', 'unique_name'
6652
]
6753

6854

python/paddle/v2/fluid/backward.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,7 @@
1616
from . import core
1717
import collections
1818
import copy
19+
import unique_name
1920

2021
__all__ = [
2122
'append_backward',
@@ -388,7 +389,7 @@ def _rename_grad_(block, start_op_idx, grad_to_var, target_grad_map):
388389

389390
for name in op_desc.output_arg_names():
390391
if block.desc.find_var(name.encode("ascii")):
391-
new_name = "%s_%s" % (name, core.unique_integer(name))
392+
new_name = unique_name.generate(name)
392393
op_desc.rename_output(name, new_name)
393394
var_map[name] = new_name
394395

python/paddle/v2/fluid/evaluator.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,8 @@
1515
import numpy as np
1616

1717
import layers
18-
from framework import Program, unique_name, Variable, program_guard
18+
from framework import Program, Variable, program_guard
19+
import unique_name
1920
from layer_helper import LayerHelper
2021

2122
__all__ = [
@@ -96,7 +97,7 @@ def create_state(self, suffix, dtype, shape):
9697
9798
"""
9899
state = self.helper.create_variable(
99-
name="_".join([unique_name(self.helper.name), suffix]),
100+
name="_".join([unique_name.generate(self.helper.name), suffix]),
100101
persistable=True,
101102
dtype=dtype,
102103
shape=shape)

python/paddle/v2/fluid/framework.py

Lines changed: 2 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,7 @@
2020

2121
import proto.framework_pb2 as framework_pb2
2222
from . import core
23+
import unique_name
2324

2425
__all__ = [
2526
'Block',
@@ -47,20 +48,6 @@ def grad_var_name(var_name):
4748
return var_name + GRAD_VAR_SUFFIX
4849

4950

50-
def unique_name(prefix):
51-
"""
52-
Generate unique names with prefix
53-
54-
Args:
55-
prefix(str): The prefix of return string
56-
57-
Returns(str): A unique string with the prefix
58-
59-
"""
60-
uid = core.unique_integer(prefix) # unique during whole process.
61-
return "_".join([prefix, str(uid)])
62-
63-
6451
def convert_np_dtype_to_dtype_(np_dtype):
6552
"""
6653
Convert the data type in numpy to the data type in Paddle
@@ -175,7 +162,7 @@ def __init__(self,
175162
self.error_clip = error_clip
176163

177164
if name is None:
178-
name = Variable._unique_var_name_()
165+
name = unique_name.generate('_generated_var')
179166
is_new_var = False
180167
self.desc = self.block.desc.find_var(name)
181168

@@ -303,12 +290,6 @@ def lod_level(self):
303290
def type(self):
304291
return self.desc.type()
305292

306-
@staticmethod
307-
def _unique_var_name_():
308-
prefix = "_generated_var"
309-
uid = core.unique_integer(prefix) # unique during whole process.
310-
return "_".join([prefix, str(uid)])
311-
312293
def set_error_clip(self, error_clip):
313294
self.error_clip = error_clip
314295

python/paddle/v2/fluid/layer_helper.py

Lines changed: 17 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -15,8 +15,8 @@
1515
import copy
1616
import itertools
1717

18-
from framework import Variable, Parameter, default_main_program, default_startup_program, \
19-
unique_name, dtype_is_floating
18+
from framework import Variable, Parameter, default_main_program, default_startup_program, dtype_is_floating
19+
import unique_name
2020
from paddle.v2.fluid.initializer import Constant, Xavier
2121
from param_attr import ParamAttr, WeightNormParamAttr
2222

@@ -27,7 +27,7 @@ def __init__(self, layer_type, **kwargs):
2727
self.layer_type = layer_type
2828
name = self.kwargs.get('name', None)
2929
if name is None:
30-
self.kwargs['name'] = unique_name(self.layer_type)
30+
self.kwargs['name'] = unique_name.generate(self.layer_type)
3131

3232
@property
3333
def name(self):
@@ -117,17 +117,20 @@ def __norm_op(x,
117117
block=self.startup_program.global_block()):
118118
if out is None:
119119
out = block.create_var(
120-
name=unique_name(".".join([self.name, 'weight_norm_norm'])),
120+
name=unique_name.generate(".".join(
121+
[self.name, 'weight_norm_norm'])),
121122
dtype=dtype,
122123
persistable=False)
123124
abs_out = block.create_var(
124-
name=unique_name(".".join([self.name, 'weight_norm_abs'])),
125+
name=unique_name.generate(".".join(
126+
[self.name, 'weight_norm_abs'])),
125127
dtype=dtype,
126128
persistable=False)
127129
block.append_op(
128130
type='abs', inputs={'X': x}, outputs={'Out': abs_out})
129131
pow_out = block.create_var(
130-
name=unique_name(".".join([self.name, 'weight_norm_pow'])),
132+
name=unique_name.generate(".".join(
133+
[self.name, 'weight_norm_pow'])),
131134
dtype=dtype,
132135
persistable=False)
133136
block.append_op(
@@ -136,7 +139,8 @@ def __norm_op(x,
136139
outputs={'Out': pow_out},
137140
attrs={'factor': float(p)})
138141
sum_out = block.create_var(
139-
name=unique_name(".".join([self.name, 'weight_norm_sum'])),
142+
name=unique_name.generate(".".join(
143+
[self.name, 'weight_norm_sum'])),
140144
dtype=dtype,
141145
persistable=False)
142146
block.append_op(
@@ -161,7 +165,7 @@ def __reshape_op(x,
161165
block=self.startup_program.global_block()):
162166
if out is None:
163167
out = block.create_var(
164-
name=unique_name(".".join(
168+
name=unique_name.generate(".".join(
165169
[self.name, 'weight_norm_reshape'])),
166170
dtype=dtype,
167171
persistable=False)
@@ -178,7 +182,7 @@ def __transpose_op(x,
178182
block=self.startup_program.global_block()):
179183
if out is None:
180184
out = block.create_var(
181-
name=unique_name(".".join(
185+
name=unique_name.generate(".".join(
182186
[self.name, 'weight_norm_transpose'])),
183187
dtype=dtype,
184188
persistable=False)
@@ -196,7 +200,8 @@ def __norm_except_dim(x,
196200
"""Computes the norm over all dimensions except dim"""
197201
if out is None:
198202
out = block.create_var(
199-
name=unique_name(".".join([self.name, 'weight_norm_norm'])),
203+
name=unique_name.generate(".".join(
204+
[self.name, 'weight_norm_norm'])),
200205
dtype=dtype,
201206
persistable=False)
202207
if dim is None:
@@ -286,7 +291,7 @@ def create_parameter(self,
286291
assert isinstance(attr, ParamAttr)
287292
suffix = 'b' if is_bias else 'w'
288293
if attr.name is None:
289-
attr.name = unique_name(".".join([self.name, suffix]))
294+
attr.name = unique_name.generate(".".join([self.name, suffix]))
290295

291296
if default_initializer is None and attr.initializer is None:
292297
if is_bias:
@@ -316,7 +321,7 @@ def get_parameter(self, name):
316321

317322
def create_tmp_variable(self, dtype, stop_gradient=False):
318323
return self.main_program.current_block().create_var(
319-
name=unique_name(".".join([self.name, 'tmp'])),
324+
name=unique_name.generate(".".join([self.name, 'tmp'])),
320325
dtype=dtype,
321326
persistable=False,
322327
stop_gradient=stop_gradient)

python/paddle/v2/fluid/layers/control_flow.py

Lines changed: 20 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -428,7 +428,8 @@ def memory(self,
428428
raise ValueError(
429429
"if init is None, memory at least need shape and batch_ref")
430430
parent_block = self.parent_block()
431-
var_name = unique_name("@".join([self.helper.name, "memory_boot"]))
431+
var_name = unique_name.generate("@".join(
432+
[self.helper.name, "memory_boot"]))
432433
boot_var = parent_block.create_var(
433434
name=var_name,
434435
shape=shape,
@@ -450,7 +451,7 @@ def memory(self,
450451
return self.memory(init=boot_var)
451452
else:
452453
pre_mem = self.helper.create_variable(
453-
name=unique_name("@".join([self.helper.name, "mem"])),
454+
name=unique_name.generate("@".join([self.helper.name, "mem"])),
454455
dtype=init.dtype,
455456
shape=init.shape)
456457
self.memories[pre_mem.name] = StaticRNNMemoryLink(
@@ -709,7 +710,7 @@ def lod_rank_table(x, level=0):
709710
helper = LayerHelper("lod_rank_table", **locals())
710711
table = helper.create_variable(
711712
type=core.VarDesc.VarType.LOD_RANK_TABLE,
712-
name=unique_name("lod_rank_table"))
713+
name=unique_name.generate("lod_rank_table"))
713714
helper.append_op(
714715
type='lod_rank_table',
715716
inputs={'X': x},
@@ -807,7 +808,7 @@ def lod_tensor_to_array(x, table):
807808
"""
808809
helper = LayerHelper("lod_tensor_to_array", **locals())
809810
array = helper.create_variable(
810-
name=unique_name("lod_tensor_to_array"),
811+
name=unique_name.generate("lod_tensor_to_array"),
811812
type=core.VarDesc.VarType.LOD_TENSOR_ARRAY,
812813
dtype=x.dtype)
813814
helper.append_op(
@@ -1264,11 +1265,11 @@ def input(self, x):
12641265
if id(x) not in self.input_table:
12651266
parent_block = self.parent_block()
12661267
out_true = parent_block.create_var(
1267-
name=unique_name('ifelse_input' + self.helper.name),
1268+
name=unique_name.generate('ifelse_input' + self.helper.name),
12681269
dtype=x.dtype)
12691270

12701271
out_false = parent_block.create_var(
1271-
name=unique_name('ifelse_input' + self.helper.name),
1272+
name=unique_name.generate('ifelse_input' + self.helper.name),
12721273
dtype=x.dtype)
12731274
parent_block.append_op(
12741275
type='split_lod_tensor',
@@ -1310,7 +1311,8 @@ def output(self, *outs):
13101311
raise TypeError("Each output should be a variable")
13111312
# create outside tensor
13121313
outside_out = parent_block.create_var(
1313-
name=unique_name("_".join([self.helper.name, 'output'])),
1314+
name=unique_name.generate("_".join(
1315+
[self.helper.name, 'output'])),
13141316
dtype=each_out.dtype)
13151317
out_table.append(outside_out)
13161318

@@ -1373,15 +1375,16 @@ def step_input(self, x):
13731375
parent_block = self._parent_block_()
13741376
if self.lod_rank_table is None:
13751377
self.lod_rank_table = parent_block.create_var(
1376-
name=unique_name('lod_rank_table'),
1378+
name=unique_name.generate('lod_rank_table'),
13771379
type=core.VarDesc.VarType.LOD_RANK_TABLE)
13781380
self.lod_rank_table.stop_gradient = True
13791381
parent_block.append_op(
13801382
type='lod_rank_table',
13811383
inputs={"X": x},
13821384
outputs={"Out": self.lod_rank_table})
13831385
self.max_seq_len = parent_block.create_var(
1384-
name=unique_name('dynamic_rnn_max_seq_len'), dtype='int64')
1386+
name=unique_name.generate('dynamic_rnn_max_seq_len'),
1387+
dtype='int64')
13851388
self.max_seq_len.stop_gradient = False
13861389
parent_block.append_op(
13871390
type='max_sequence_len',
@@ -1395,7 +1398,7 @@ def step_input(self, x):
13951398
outputs={'Out': self.cond})
13961399

13971400
input_array = parent_block.create_var(
1398-
name=unique_name('dynamic_rnn_input_array'),
1401+
name=unique_name.generate('dynamic_rnn_input_array'),
13991402
type=core.VarDesc.VarType.LOD_TENSOR_ARRAY,
14001403
dtype=x.dtype)
14011404
self.input_array.append((input_array, x.dtype))
@@ -1416,7 +1419,7 @@ def static_input(self, x):
14161419
"static_input() must be called after step_input().")
14171420
parent_block = self._parent_block_()
14181421
x_reordered = parent_block.create_var(
1419-
name=unique_name("dynamic_rnn_static_input_reordered"),
1422+
name=unique_name.generate("dynamic_rnn_static_input_reordered"),
14201423
type=core.VarDesc.VarType.LOD_TENSOR,
14211424
dtype=x.dtype)
14221425
parent_block.append_op(
@@ -1478,7 +1481,7 @@ def memory(self,
14781481
'invoked before '
14791482
'memory(init=init, need_reordered=True, ...).')
14801483
init_reordered = parent_block.create_var(
1481-
name=unique_name('dynamic_rnn_mem_init_reordered'),
1484+
name=unique_name.generate('dynamic_rnn_mem_init_reordered'),
14821485
type=core.VarDesc.VarType.LOD_TENSOR,
14831486
dtype=init.dtype)
14841487
parent_block.append_op(
@@ -1490,7 +1493,7 @@ def memory(self,
14901493
outputs={'Out': [init_reordered]})
14911494
init_tensor = init_reordered
14921495
mem_array = parent_block.create_var(
1493-
name=unique_name('dynamic_rnn_mem_array'),
1496+
name=unique_name.generate('dynamic_rnn_mem_array'),
14941497
type=core.VarDesc.VarType.LOD_TENSOR_ARRAY,
14951498
dtype=init.dtype)
14961499
parent_block.append_op(
@@ -1510,9 +1513,10 @@ def memory(self,
15101513
)
15111514
parent_block = self._parent_block_()
15121515
init = parent_block.create_var(
1513-
name=unique_name('mem_init'), dtype=dtype)
1516+
name=unique_name.generate('mem_init'), dtype=dtype)
15141517
arr, dtype = self.input_array[0]
1515-
in0 = parent_block.create_var(name=unique_name('in0'), dtype=dtype)
1518+
in0 = parent_block.create_var(
1519+
name=unique_name.generate('in0'), dtype=dtype)
15161520
parent_block.append_op(
15171521
type='read_from_array',
15181522
inputs={'X': [arr],
@@ -1551,7 +1555,7 @@ def output(self, *outputs):
15511555
parent_block = self._parent_block_()
15521556
for each in outputs:
15531557
outside_array = parent_block.create_var(
1554-
name=unique_name("_".join(
1558+
name=unique_name.generate("_".join(
15551559
[self.helper.name, "output_array", each.name])),
15561560
type=core.VarDesc.VarType.LOD_TENSOR_ARRAY,
15571561
dtype=each.dtype)

python/paddle/v2/fluid/layers/device.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,8 @@
2525
@autodoc()
2626
def get_places(device_count=None, device_type=None):
2727
helper = LayerHelper('get_places', **locals())
28-
out_places = helper.create_variable(name=unique_name(helper.name + ".out"))
28+
out_places = helper.create_variable(
29+
name=unique_name.generate(helper.name + ".out"))
2930
attrs = dict()
3031
if device_count is not None:
3132
attrs['device_count'] = int(device_count)

0 commit comments

Comments
 (0)