File tree Expand file tree Collapse file tree 4 files changed +13
-10
lines changed
python/paddle/fluid/tests/book Expand file tree Collapse file tree 4 files changed +13
-10
lines changed Original file line number Diff line number Diff line change 14
14
15
15
#include " paddle/fluid/framework/threadpool.h"
16
16
17
+ #include " gflags/gflags.h"
17
18
#include " paddle/fluid/platform/enforce.h"
18
19
20
+ DEFINE_int32 (io_threadpool_size, 100 ,
21
+ " number of threads used for doing IO, default 100" );
22
+
19
23
namespace paddle {
20
24
namespace framework {
21
25
@@ -94,15 +98,15 @@ void ThreadPool::TaskLoop() {
94
98
std::unique_ptr<ThreadPool> MultiStreamThreadPool::io_threadpool_ (nullptr );
95
99
std::once_flag MultiStreamThreadPool::io_init_flag_;
96
100
97
- MultiStreamThreadPool * MultiStreamThreadPool::GetInstanceIO () {
101
+ ThreadPool * MultiStreamThreadPool::GetInstanceIO () {
98
102
std::call_once (io_init_flag_, &MultiStreamThreadPool::InitIO);
99
- return static_cast <MultiStreamThreadPool*>( io_threadpool_.get () );
103
+ return io_threadpool_.get ();
100
104
}
101
105
102
106
void MultiStreamThreadPool::InitIO () {
103
107
if (io_threadpool_.get () == nullptr ) {
104
108
// TODO(typhoonzero1986): make this configurable
105
- io_threadpool_.reset (new ThreadPool (100 ));
109
+ io_threadpool_.reset (new ThreadPool (FLAGS_io_threadpool_size ));
106
110
}
107
111
}
108
112
Original file line number Diff line number Diff line change @@ -14,12 +14,12 @@ limitations under the License. */
14
14
15
15
#pragma once
16
16
17
- #include < condition_variable>
17
+ #include < condition_variable> // NOLINT
18
18
#include < functional>
19
- #include < future>
20
- #include < mutex>
19
+ #include < future> // NOLINT
20
+ #include < mutex> // NOLINT
21
21
#include < queue>
22
- #include < thread>
22
+ #include < thread> // NOLINT
23
23
#include < vector>
24
24
#include " glog/logging.h"
25
25
#include " paddle/fluid/platform/enforce.h"
@@ -137,7 +137,7 @@ class ThreadPool {
137
137
138
138
class MultiStreamThreadPool : ThreadPool {
139
139
public:
140
- static MultiStreamThreadPool * GetInstanceIO ();
140
+ static ThreadPool * GetInstanceIO ();
141
141
static void InitIO ();
142
142
143
143
private:
Original file line number Diff line number Diff line change @@ -216,10 +216,10 @@ void AsyncGRPCServer::RunSyncUpdate() {
216
216
std::function<void ()> prefetch_register =
217
217
std::bind (&AsyncGRPCServer::TryToRegisterNewPrefetchOne, this );
218
218
219
+ // TODO(wuyi): Run these "HandleRequest" in thread pool
219
220
t_send_.reset (
220
221
new std::thread (std::bind (&AsyncGRPCServer::HandleRequest, this ,
221
222
cq_send_.get (), " cq_send" , send_register)));
222
-
223
223
t_get_.reset (
224
224
new std::thread (std::bind (&AsyncGRPCServer::HandleRequest, this ,
225
225
cq_get_.get (), " cq_get" , get_register)));
Original file line number Diff line number Diff line change @@ -157,7 +157,6 @@ def train_loop(main_program):
157
157
for ip in pserver_ips .split ("," ):
158
158
eplist .append (':' .join ([ip , port ]))
159
159
pserver_endpoints = "," .join (eplist ) # ip:port,ip:port...
160
- pserver_endpoints = os .getenv ("PSERVERS" )
161
160
trainers = int (os .getenv ("TRAINERS" ))
162
161
current_endpoint = os .getenv ("POD_IP" ) + ":" + port
163
162
trainer_id = int (os .getenv ("PADDLE_INIT_TRAINER_ID" ))
You can’t perform that action at this time.
0 commit comments