@@ -249,7 +249,7 @@ cpu_places
249
249
250
250
创建 ``fluid.CPUPlace `` 对象列表。
251
251
252
- 如果 ``device_count `` 为None,则设备数目将由环境变量 ``CPU_NUM `` 确定。如果未设置 ``CPU_NUM `` ,则设备数目将由 ``multiprocessing.cpu_count() `` 确定 。
252
+ 如果 ``device_count `` 为None,则设备数目将由环境变量 ``CPU_NUM `` 确定。如果未设置 ``CPU_NUM `` ,则设备数目默认为1,也就是说, ``CPU_NUM `` =1 。
253
253
254
254
参数:
255
255
- **device_count ** (None|int) - 设备数目
@@ -262,6 +262,7 @@ cpu_places
262
262
263
263
.. code-block :: python
264
264
265
+ import paddle.fluid as fluid
265
266
cpu_places = fluid.cpu_places()
266
267
267
268
@@ -279,6 +280,7 @@ CPUPlace是设备的描述符。它代表一个CPU,可以访问CPUPlace对应
279
280
280
281
.. code-block :: python
281
282
283
+ import paddle.fluid as fluid
282
284
cpu_place = fluid.CPUPlace()
283
285
284
286
@@ -397,6 +399,7 @@ cuda_pinned_places
397
399
398
400
.. code-block :: python
399
401
402
+ import paddle.fluid as fluid
400
403
cuda_pinned_places_cpu_num = fluid.cuda_pinned_places()
401
404
# 或者
402
405
cuda_pinned_places = fluid.cuda_pinned_places(1 )
@@ -428,6 +431,7 @@ cuda_places
428
431
429
432
.. code-block :: python
430
433
434
+ import paddle.fluid as fluid
431
435
cuda_places = fluid.cuda_places()
432
436
433
437
.. _cn_api_fluid_CUDAPinnedPlace :
@@ -443,6 +447,7 @@ CUDAPinnedPlace是一个设备描述符,它所指代的存储空间可以被GP
443
447
444
448
.. code-block :: python
445
449
450
+ import paddle.fluid as fluid
446
451
place = fluid.CUDAPinnedPlace()
447
452
448
453
.. _cn_api_fluid_CUDAPlace :
@@ -458,6 +463,7 @@ CUDAPlace是一个设备描述符,它代表一个GPU,并且每个CUDAPlace
458
463
459
464
.. code-block :: python
460
465
466
+ import paddle.fluid as fluid
461
467
gpu_place = fluid.CUDAPlace(0 )
462
468
463
469
@@ -482,6 +488,7 @@ DataFeedDesc应由来自磁盘的有效protobuf消息初始化。
482
488
483
489
.. code-block :: python
484
490
491
+ import paddle.fluid as fluid
485
492
f = open (" data.proto" , " w" )
486
493
print >> f, ' name: "MultiSlotDataFeed"'
487
494
print >> f, ' batch_size: 2'
@@ -508,6 +515,7 @@ DataFeedDesc也可以在运行时更改。一旦你熟悉了每个字段的含
508
515
509
516
.. code-block :: python
510
517
518
+ import paddle.fluid as fluid
511
519
data_feed = fluid.DataFeedDesc(' data.proto' )
512
520
data_feed.set_batch_size(128 )
513
521
data_feed.set_dense_slots(' wd' ) # 名为'wd'的slot将被设置为密集的
@@ -534,6 +542,7 @@ DataFeedDesc也可以在运行时更改。一旦你熟悉了每个字段的含
534
542
535
543
.. code-block :: python
536
544
545
+ import paddle.fluid as fluid
537
546
f = open (" data.proto" , " w" )
538
547
print >> f, ' name: "MultiSlotDataFeed"'
539
548
print >> f, ' batch_size: 2'
@@ -569,6 +578,7 @@ DataFeedDesc也可以在运行时更改。一旦你熟悉了每个字段的含
569
578
570
579
.. code-block :: python
571
580
581
+ import paddle.fluid as fluid
572
582
f = open (" data.proto" , " w" )
573
583
print >> f, ' name: "MultiSlotDataFeed"'
574
584
print >> f, ' batch_size: 2'
@@ -606,6 +616,7 @@ DataFeedDesc也可以在运行时更改。一旦你熟悉了每个字段的含
606
616
607
617
.. code-block :: python
608
618
619
+ import paddle.fluid as fluid
609
620
f = open (" data.proto" , " w" )
610
621
print >> f, ' name: "MultiSlotDataFeed"'
611
622
print >> f, ' batch_size: 2'
@@ -642,6 +653,7 @@ DataFeedDesc也可以在运行时更改。一旦你熟悉了每个字段的含
642
653
643
654
.. code-block :: python
644
655
656
+ import paddle.fluid as fluid
645
657
f = open (" data.proto" , " w" )
646
658
print >> f, ' name: "MultiSlotDataFeed"'
647
659
print >> f, ' batch_size: 2'
@@ -993,6 +1005,7 @@ DistributeTranspiler
993
1005
994
1006
.. code-block :: python
995
1007
1008
+ import paddle.fluid as fluid
996
1009
x = fluid.layers.data(name = ' x' , shape = [13 ], dtype = ' float32' )
997
1010
y = fluid.layers.data(name = ' y' , shape = [1 ], dtype = ' float32' )
998
1011
y_predict = fluid.layers.fc(input = x, size = 1 , act = None )
@@ -1053,6 +1066,7 @@ DistributeTranspiler
1053
1066
1054
1067
.. code- block:: python
1055
1068
1069
+ import paddle.fluid as fluid
1056
1070
transpiler = fluid.DistributeTranspiler()
1057
1071
t.transpile(
1058
1072
trainer_id = 0 ,
@@ -1162,6 +1176,7 @@ DistributeTranspiler
1162
1176
1163
1177
.. code- block:: python
1164
1178
1179
+ import paddle.fluid as fluid
1165
1180
pserver_endpoints = " 192.168.0.1:6174,192.168.0.2:6174"
1166
1181
trainer_endpoints = " 192.168.0.1:6174,192.168.0.2:6174"
1167
1182
current_endpoint = " 192.168.0.1:6174"
@@ -1207,6 +1222,7 @@ block中分割(split)出的元素个数的最小值。
1207
1222
1208
1223
.. code- block:: python
1209
1224
1225
+ import paddle.fluid as fluid
1210
1226
config = fluid.DistributeTranspilerConfig()
1211
1227
config.slice_var_up = True
1212
1228
@@ -1226,6 +1242,7 @@ ExecutionStrategy
1226
1242
1227
1243
.. code- block:: python
1228
1244
1245
+ import paddle.fluid as fluid
1229
1246
x = fluid.layers.data(name = ' x' , shape = [13 ], dtype = ' float32' )
1230
1247
y = fluid.layers.data(name = ' y' , shape = [1 ], dtype = ' float32' )
1231
1248
y_predict = fluid.layers.fc(input = x, size = 1 , act = None )
@@ -1578,6 +1595,7 @@ in_dygraph_mode
1578
1595
1579
1596
.. code-block :: python
1580
1597
1598
+ import paddle.fluid as fluid
1581
1599
if fluid.in_dygraph_mode():
1582
1600
pass
1583
1601
@@ -1875,6 +1893,7 @@ name_scope
1875
1893
1876
1894
.. code-block :: python
1877
1895
1896
+ import paddle.fluid as fluid
1878
1897
with fluid.name_scope(" s1" ):
1879
1898
a = fluid.layers.data(name = ' data' , shape = [1 ], dtype = ' int32' )
1880
1899
b = a + 1
@@ -2043,6 +2062,7 @@ ParallelExecutor
2043
2062
2044
2063
.. code-block :: python
2045
2064
2065
+ import paddle.fluid as fluid
2046
2066
pe = fluid.ParallelExecutor(use_cuda = use_cuda,
2047
2067
loss_name = avg_cost.name,
2048
2068
main_program = fluid.default_main_program())
@@ -2211,6 +2231,7 @@ Program
2211
2231
2212
2232
.. code-block :: python
2213
2233
2234
+ import paddle.fluid as fluid
2214
2235
test_program = fluid.default_main_program().clone(for_test = True )
2215
2236
optimizer = fluid.optimizer.Momentum(learning_rate = 0.01 , momentum = 0.9 )
2216
2237
optimizer.minimize()
@@ -2538,6 +2559,7 @@ scope_guard
2538
2559
2539
2560
.. code-block :: python
2540
2561
2562
+ import paddle.fluid as fluid
2541
2563
import numpy
2542
2564
2543
2565
new_scope = fluid.Scope()
0 commit comments