|
16 | 16 | #include <vector>
|
17 | 17 | #include <atomic>
|
18 | 18 |
|
19 |
| -/* |
| 19 | +namespace xss { |
| 20 | +namespace tp { |
| 21 | + |
| 22 | + /* |
20 | 23 | * ThreadPool class and doc: Generated by copilot
|
21 | 24 | * This thread pool implementation is a simple and efficient way to manage a
|
22 | 25 | * pool of threads for executing tasks concurrently. It uses a std::queue to store
|
|
25 | 28 | * execute it. The thread pool can be stopped gracefully, and it also provides
|
26 | 29 | * a way to wait for all tasks to complete before stopping.
|
27 | 30 | * */
|
28 |
| -class ThreadPool { |
29 |
| -private: |
30 |
| - std::vector<std::thread> workers; |
31 |
| - std::queue<std::function<void()>> tasks; |
32 |
| - std::mutex queue_mutex; |
33 |
| - std::condition_variable condition; // Condition variable for task queue |
34 |
| - std::condition_variable done_condition; // Condition variable for waiting |
35 |
| - int active_tasks {0}; |
36 |
| - bool stop; |
| 31 | + class ThreadPool { |
| 32 | + private: |
| 33 | + std::vector<std::thread> workers; |
| 34 | + std::queue<std::function<void()>> tasks; |
| 35 | + std::mutex queue_mutex; |
| 36 | + std::condition_variable condition; // Condition variable for task queue |
| 37 | + std::condition_variable |
| 38 | + done_condition; // Condition variable for waiting |
| 39 | + int active_tasks {0}; |
| 40 | + bool stop; |
37 | 41 |
|
38 |
| -public: |
39 |
| - ThreadPool(size_t num_threads) : stop(false) |
40 |
| - { |
41 |
| - for (size_t i = 0; i < num_threads; ++i) { |
42 |
| - // Create a worker thread and add it to the pool |
43 |
| - // Each thread will run a lambda function that waits for tasks |
44 |
| - workers.emplace_back([this] { |
45 |
| - while (true) { |
46 |
| - // Lock the queue mutex and wait for a task to be available |
47 |
| - std::unique_lock<std::mutex> lock(queue_mutex); |
48 |
| - // Wait until there is a task or the pool is stopped |
49 |
| - condition.wait(lock, |
50 |
| - [this] { return stop || !tasks.empty(); }); |
| 42 | + public: |
| 43 | + ThreadPool(size_t num_threads) : stop(false) |
| 44 | + { |
| 45 | + for (size_t i = 0; i < num_threads; ++i) { |
| 46 | + // Create a worker thread and add it to the pool |
| 47 | + // Each thread will run a lambda function that waits for tasks |
| 48 | + workers.emplace_back([this] { |
| 49 | + while (true) { |
| 50 | + // Lock the queue mutex and wait for a task to be available |
| 51 | + std::unique_lock<std::mutex> lock(queue_mutex); |
| 52 | + // Wait until there is a task or the pool is stopped |
| 53 | + condition.wait(lock, [this] { |
| 54 | + return stop || !tasks.empty(); |
| 55 | + }); |
51 | 56 |
|
52 |
| - // Check if we need to terminate the thread |
53 |
| - if (stop && tasks.empty()) { return; } |
| 57 | + // Check if we need to terminate the thread |
| 58 | + if (stop && tasks.empty()) { return; } |
54 | 59 |
|
55 |
| - // Extract the next task from the queue |
56 |
| - auto task = std::move(tasks.front()); |
57 |
| - tasks.pop(); |
58 |
| - // Unlock the mutex before executing the task |
59 |
| - lock.unlock(); |
60 |
| - // Execute the task: |
61 |
| - task(); |
62 |
| - } |
63 |
| - }); |
| 60 | + // Extract the next task from the queue |
| 61 | + auto task = std::move(tasks.front()); |
| 62 | + tasks.pop(); |
| 63 | + // Unlock the mutex before executing the task |
| 64 | + lock.unlock(); |
| 65 | + // Execute the task: |
| 66 | + task(); |
| 67 | + } |
| 68 | + }); |
| 69 | + } |
64 | 70 | }
|
65 |
| - } |
66 | 71 |
|
67 |
| - template <class F> |
68 |
| - void enqueue(F &&func) |
69 |
| - { |
70 |
| - // Add a new task to the queue and notify one of the worker threads |
71 |
| - std::unique_lock<std::mutex> lock(queue_mutex); |
72 |
| - tasks.emplace(std::forward<F>(func)); |
73 |
| - condition.notify_one(); |
74 |
| - } |
| 72 | + template <class F> |
| 73 | + void enqueue(F &&func) |
| 74 | + { |
| 75 | + // Add a new task to the queue and notify one of the worker threads |
| 76 | + std::unique_lock<std::mutex> lock(queue_mutex); |
| 77 | + tasks.emplace(std::forward<F>(func)); |
| 78 | + condition.notify_one(); |
| 79 | + } |
75 | 80 |
|
76 |
| - ~ThreadPool() |
77 |
| - { |
78 |
| - // Stop the thread pool and join all threads |
79 |
| - std::unique_lock<std::mutex> lock(queue_mutex); |
80 |
| - stop = true; |
81 |
| - lock.unlock(); |
82 |
| - condition.notify_all(); |
83 |
| - for (std::thread &worker : workers) { |
84 |
| - worker.join(); |
| 81 | + ~ThreadPool() |
| 82 | + { |
| 83 | + // Stop the thread pool and join all threads |
| 84 | + std::unique_lock<std::mutex> lock(queue_mutex); |
| 85 | + stop = true; |
| 86 | + lock.unlock(); |
| 87 | + condition.notify_all(); |
| 88 | + for (std::thread &worker : workers) { |
| 89 | + worker.join(); |
| 90 | + } |
85 | 91 | }
|
86 |
| - } |
87 | 92 |
|
88 |
| - // Wait for all tasks to complete before stopping the pool |
89 |
| - void wait_all() |
90 |
| - { |
91 |
| - std::unique_lock<std::mutex> lock(queue_mutex); |
92 |
| - done_condition.wait( |
93 |
| - lock, [this] { return tasks.empty() && (active_tasks == 0); }); |
94 |
| - // lock is automatically released here |
95 |
| - } |
| 93 | + // Wait for all tasks to complete before stopping the pool |
| 94 | + void wait_all() |
| 95 | + { |
| 96 | + std::unique_lock<std::mutex> lock(queue_mutex); |
| 97 | + done_condition.wait(lock, [this] { |
| 98 | + return tasks.empty() && (active_tasks == 0); |
| 99 | + }); |
| 100 | + // lock is automatically released here |
| 101 | + } |
96 | 102 |
|
97 |
| - // Track the number of active tasks |
98 |
| - void task_start() |
99 |
| - { |
100 |
| - std::unique_lock<std::mutex> lock(queue_mutex); |
101 |
| - active_tasks++; |
102 |
| - // lock is automatically released here |
103 |
| - } |
| 103 | + // Track the number of active tasks |
| 104 | + void task_start() |
| 105 | + { |
| 106 | + std::unique_lock<std::mutex> lock(queue_mutex); |
| 107 | + active_tasks++; |
| 108 | + // lock is automatically released here |
| 109 | + } |
| 110 | + |
| 111 | + // Decrement the active task count and notify if all tasks are done |
| 112 | + void task_end() |
| 113 | + { |
| 114 | + std::unique_lock<std::mutex> lock(queue_mutex); |
| 115 | + active_tasks--; |
| 116 | + if (tasks.empty() && active_tasks == 0) { |
| 117 | + done_condition.notify_all(); |
| 118 | + } |
| 119 | + // lock is automatically released here |
| 120 | + } |
| 121 | + }; |
104 | 122 |
|
105 |
| - // Decrement the active task count and notify if all tasks are done |
106 |
| - void task_end() |
| 123 | + // Wrapper for submitting tasks to the thread pool with automatic tracking |
| 124 | + template <typename F> |
| 125 | + void submit_task(ThreadPool &pool, F &&f) |
107 | 126 | {
|
108 |
| - std::unique_lock<std::mutex> lock(queue_mutex); |
109 |
| - active_tasks--; |
110 |
| - if (tasks.empty() && active_tasks == 0) { done_condition.notify_all(); } |
111 |
| - // lock is automatically released here |
| 127 | + pool.task_start(); |
| 128 | + pool.enqueue([f = std::forward<F>(f), &pool]() { |
| 129 | + try { |
| 130 | + f(); |
| 131 | + } catch (...) { |
| 132 | + // Ensure task_end is called even if the task throws an exception |
| 133 | + pool.task_end(); |
| 134 | + throw; // Re-throw the exception |
| 135 | + } |
| 136 | + pool.task_end(); |
| 137 | + }); |
112 | 138 | }
|
113 |
| -}; |
114 | 139 |
|
115 |
| -// Wrapper for submitting tasks to the thread pool with automatic tracking |
116 |
| -template <typename F> |
117 |
| -void submit_task(ThreadPool &pool, F &&f) |
118 |
| -{ |
119 |
| - pool.task_start(); |
120 |
| - pool.enqueue([f = std::forward<F>(f), &pool]() { |
121 |
| - f(); |
122 |
| - pool.task_end(); |
123 |
| - }); |
124 |
| -} |
| 140 | +} // namespace tp |
| 141 | +} // namespace xss |
125 | 142 |
|
126 | 143 | #endif // XSS_THREAD_POOL
|
0 commit comments