@@ -47,7 +47,7 @@ paddle.fluid.AsyncExecutor.run (ArgSpec(args=['self', 'program', 'data_feed', 'f
47
47
paddle .fluid .AsyncExecutor .save_model (ArgSpec (args = ['self' , 'save_path' ], varargs = None , keywords = None , defaults = None ), ('document' , 'c8ac0dfcb3b187aba25d03af7fea56b2' ))
48
48
paddle .fluid .AsyncExecutor .stop (ArgSpec (args = ['self' ], varargs = None , keywords = None , defaults = None ), ('document' , '5f23d043607bb5d55e466ec3f578e093' ))
49
49
paddle .fluid .CompiledProgram .__init__ (ArgSpec (args = ['self' , 'program_or_graph' ], varargs = None , keywords = None , defaults = None ), ('document' , '6adf97f83acf6453d4a6a4b1070f3754' ))
50
- paddle .fluid .CompiledProgram .with_data_parallel (ArgSpec (args = ['self' , 'loss_name' , 'build_strategy' , 'exec_strategy' , 'share_vars_from' , 'places' ], varargs = None , keywords = None , defaults = (None , None , None , None , None )), ('document' , 'dbf542d1384741650a1238ddb05daa37 ' ))
50
+ paddle .fluid .CompiledProgram .with_data_parallel (ArgSpec (args = ['self' , 'loss_name' , 'build_strategy' , 'exec_strategy' , 'share_vars_from' , 'places' ], varargs = None , keywords = None , defaults = (None , None , None , None , None )), ('document' , '5e8cca4619a5d7c3280fb3cae7021b14 ' ))
51
51
paddle .fluid .CompiledProgram .with_inference_optimize (ArgSpec (args = ['self' , 'config' ], varargs = None , keywords = None , defaults = None ), ('document' , '9e5b009d850191a010e859189c127fd8' ))
52
52
paddle .fluid .ExecutionStrategy .__init__ __init__ (self : paddle .fluid .core .ParallelExecutor .ExecutionStrategy ) - > None
53
53
paddle .fluid .BuildStrategy .GradientScaleStrategy .__init__ __init__ (self : paddle .fluid .core .ParallelExecutor .BuildStrategy .GradientScaleStrategy , arg0 : int ) - > None
@@ -61,8 +61,8 @@ paddle.fluid.io.load_params (ArgSpec(args=['executor', 'dirname', 'main_program'
61
61
paddle .fluid .io .load_persistables (ArgSpec (args = ['executor' , 'dirname' , 'main_program' , 'filename' ], varargs = None , keywords = None , defaults = (None , None )), ('document' , '28df5bfe26ca7a077f91156abb0fe6d2' ))
62
62
paddle .fluid .io .save_inference_model (ArgSpec (args = ['dirname' , 'feeded_var_names' , 'target_vars' , 'executor' , 'main_program' , 'model_filename' , 'params_filename' , 'export_for_deployment' ], varargs = None , keywords = None , defaults = (None , None , None , True )), ('document' , '582d87b8df75a5a639a107db8ff86f9c' ))
63
63
paddle .fluid .io .load_inference_model (ArgSpec (args = ['dirname' , 'executor' , 'model_filename' , 'params_filename' , 'pserver_endpoints' ], varargs = None , keywords = None , defaults = (None , None , None )), ('document' , '7a5255386075dac3c75b7058254fcdcb' ))
64
- paddle .fluid .io .PyReader .__init__ (ArgSpec (args = ['self' , 'feed_list' , 'capacity' , 'use_double_buffer' , 'iterable' ], varargs = None , keywords = None , defaults = (True , False )), ('document' , 'b3d72958b2568aae3f90f72abdcb7d1a ' ))
65
- paddle .fluid .io .PyReader .decorate_batch_generator (ArgSpec (args = ['self' , 'reader' , 'places' ], varargs = None , keywords = None , defaults = (None ,)), ('document' , 'd10224fef1095247063b6976da793021 ' ))
64
+ paddle .fluid .io .PyReader .__init__ (ArgSpec (args = ['self' , 'feed_list' , 'capacity' , 'use_double_buffer' , 'iterable' ], varargs = None , keywords = None , defaults = (True , False )), ('document' , '6adf97f83acf6453d4a6a4b1070f3754 ' ))
65
+ paddle .fluid .io .PyReader .decorate_batch_generator (ArgSpec (args = ['self' , 'reader' , 'places' ], varargs = None , keywords = None , defaults = (None ,)), ('document' , 'a3fefec8bacd6ce83f49906a9d05e779 ' ))
66
66
paddle .fluid .io .PyReader .decorate_sample_generator (ArgSpec (args = ['self' , 'sample_generator' , 'batch_size' , 'drop_last' , 'places' ], varargs = None , keywords = None , defaults = (True , None )), ('document' , '7abd9cf7d695bab5bb6cf7ded5903cb2' ))
67
67
paddle .fluid .io .PyReader .decorate_sample_list_generator (ArgSpec (args = ['self' , 'reader' , 'places' ], varargs = None , keywords = None , defaults = (None ,)), ('document' , 'faef298f73e91aedcfaf5d184f3109b7' ))
68
68
paddle .fluid .io .PyReader .reset (ArgSpec (args = ['self' ], varargs = None , keywords = None , defaults = None ), ('document' , 'ff1cc1e2beb8824d453656c72c28ddfb' ))
@@ -521,7 +521,7 @@ paddle.fluid.unique_name.guard (ArgSpec(args=['new_generator'], varargs=None, ke
521
521
paddle .fluid .recordio_writer .convert_reader_to_recordio_file (ArgSpec (args = ['filename' , 'reader_creator' , 'feeder' , 'compressor' , 'max_num_records' , 'feed_order' ], varargs = None , keywords = None , defaults = (Compressor .Snappy , 1000 , None )), ('document' , '65c7523e86f0c50bb729b01667f36310' ))
522
522
paddle .fluid .recordio_writer .convert_reader_to_recordio_files (ArgSpec (args = ['filename' , 'batch_per_file' , 'reader_creator' , 'feeder' , 'compressor' , 'max_num_records' , 'feed_order' ], varargs = None , keywords = None , defaults = (Compressor .Snappy , 1000 , None )), ('document' , 'bc643f0f5f1b9db57ff0d8a57d379bd7' ))
523
523
paddle .fluid .Scope Scope () - > paddle .fluid .core ._Scope
524
- paddle .reader .cache (ArgSpec (args = ['reader' ], varargs = None , keywords = None , defaults = None ), ('document' , '83b94750674c6a04b5f96599d4bf3105 ' ))
524
+ paddle .reader .cache (ArgSpec (args = ['reader' ], varargs = None , keywords = None , defaults = None ), ('document' , '1676886070eb607cb608f7ba47be0d3c ' ))
525
525
paddle .reader .map_readers (ArgSpec (args = ['func' ], varargs = 'readers' , keywords = None , defaults = None ), ('document' , '77cbadb09df588e21e5cc0819b69c87d' ))
526
526
paddle .reader .buffered (ArgSpec (args = ['reader' , 'size' ], varargs = None , keywords = None , defaults = None ), ('document' , '0d6186f109feceb99f60ec50a0a624cb' ))
527
527
paddle .reader .compose (ArgSpec (args = [], varargs = 'readers' , keywords = 'kwargs' , defaults = None ), ('document' , '884291104e1c3f37f33aae44b7deeb0d' ))
0 commit comments