2929#include < vector>
3030#include < mutex>
3131#include < condition_variable>
32+ #include " dali/core/semaphore.h"
3233#include " dali/core/api_helper.h"
3334#include " dali/core/multi_error.h"
3435#include " dali/core/mm/detail/aux_alloc.h"
@@ -66,10 +67,14 @@ class DLL_PUBLIC Job {
6667
6768 if (--num_pending_tasks_ == 0 ) {
6869 num_pending_tasks_.notify_all ();
69- std::cerr << make_string (( void *) this , " notified. " ) << std::endl ;
70+ ( void ) std::lock_guard (mtx_) ;
7071 cv_.notify_all ();
72+ // We need this second flag to avoid a race condition where the
73+ // desctructor is called between decrementing num_pending_tasks_ and notification_
74+ // without excessive use of mutexes. This must be the very last operation in the task
75+ // function that touches `this`.
76+ running_ = false ;
7177 }
72- assert (num_pending_tasks_ >= 0 );
7378 };
7479 } catch (...) { // if, for whatever reason, we cannot initialize the task, we should erase it
7580 tasks_.erase (it);
@@ -88,8 +93,10 @@ class DLL_PUBLIC Job {
8893
8994 private:
9095 // atomic wait has no timeout, so we're stuck with condvar for reentrance
96+ std::mutex mtx_;
9197 std::condition_variable cv_;
9298 std::atomic_int num_pending_tasks_{0 };
99+ std::atomic_bool running_{false };
93100 bool started_ = false ;
94101 bool waited_for_ = false ;
95102
@@ -139,8 +146,10 @@ class DLL_PUBLIC IncrementalJob {
139146 const void *executor_ = nullptr ;
140147 bool waited_for_ = false ;
141148 // atomic wait has no timeout, so we're stuck with condvar for reentrance
149+ std::mutex mtx_;
142150 std::condition_variable cv_;
143151 std::atomic_int num_pending_tasks_{0 };
152+ std::atomic_bool running_{false };
144153 using task_list_t = std::list<Task, mm::detail::object_pool_allocator<Task>>;
145154 task_list_t tasks_;
146155 std::optional<task_list_t ::iterator> last_task_run_;
@@ -192,18 +201,20 @@ class DLL_PUBLIC ThreadPoolBase {
192201 void Submit () {
193202 if (lock.owns_lock ()) {
194203 lock.unlock ();
195- if (tasks_added > 1 )
196- owner->cv_ .notify_all ();
197- else
198- owner->cv_ .notify_one ();
204+ owner->sem_ .release (tasks_added);
199205 }
200206 }
207+
208+ int Size () const {
209+ return tasks_added;
210+ }
211+
201212 private:
202213 friend class ThreadPoolBase ;
203214 explicit TaskBulkAdd (ThreadPoolBase *o) : owner(o), lock(o->mtx_, std::defer_lock) {}
204- ThreadPoolBase *owner;
215+ ThreadPoolBase *owner = nullptr ;
205216 std::unique_lock<std::mutex> lock;
206- int tasks_added;
217+ int tasks_added = 0 ;
207218 };
208219 friend class TaskBulkAdd ;
209220
@@ -247,7 +258,7 @@ class DLL_PUBLIC ThreadPoolBase {
247258 void Run (int index, const std::function<OnThreadStartFn> &on_thread_start) noexcept ;
248259
249260 std::mutex mtx_;
250- std::condition_variable cv_ ;
261+ counting_semaphore sem_{ 0 } ;
251262 bool shutdown_pending_ = false ;
252263 std::queue<TaskFunc> tasks_;
253264 std::vector<std::thread> threads_;
@@ -289,13 +300,15 @@ void Job::Run(Executor &executor, bool wait) {
289300 if (started_)
290301 throw std::logic_error (" This job has already been started." );
291302 started_ = true ;
303+ running_ = !tasks_.empty ();
292304 for (auto &x : tasks_) {
293305 num_pending_tasks_++;
294306 try {
295307 executor.AddTask (std::move (x.second .func ));
296308 } catch (...) {
297309 if (--num_pending_tasks_ == 0 ) {
298310 num_pending_tasks_.notify_all ();
311+ (void )std::lock_guard (mtx_);
299312 cv_.notify_all ();
300313 }
301314 throw ;
@@ -325,9 +338,14 @@ IncrementalJob::AddTask(Runnable &&runnable) {
325338
326339 if (--num_pending_tasks_ == 0 ) {
327340 num_pending_tasks_.notify_all ();
341+ (void )std::lock_guard (mtx_);
328342 cv_.notify_all ();
343+ // We need this second flag to avoid a race condition where the
344+ // desctructor is called between decrementing num_pending_tasks_ and notification_
345+ // without excessive use of mutexes. This must be the very last operation in the task
346+ // function that touches `this`.
347+ running_ = false ;
329348 }
330- assert (num_pending_tasks_ >= 0 );
331349 };
332350 } catch (...) { // if, for whatever reason, we cannot initialize the task, we should erase it
333351 tasks_.erase (it);
@@ -346,6 +364,7 @@ void IncrementalJob::Run(Executor &executor, bool wait) {
346364 executor_ = &executor;
347365 auto it = last_task_run_.has_value () ? std::next (*last_task_run_) : tasks_.begin ();
348366 for (; it != tasks_.end (); ++it) {
367+ running_ = true ;
349368 executor.AddTask (std::move (it->func ));
350369 last_task_run_ = it;
351370 }
0 commit comments