|
9 | 9 | #include "llvm/Support/Parallel.h" |
10 | 10 | #include "llvm/ADT/ScopeExit.h" |
11 | 11 | #include "llvm/Config/llvm-config.h" |
| 12 | +#include "llvm/Support/ExponentialBackoff.h" |
12 | 13 | #include "llvm/Support/Jobserver.h" |
13 | 14 | #include "llvm/Support/ManagedStatic.h" |
14 | 15 | #include "llvm/Support/Threading.h" |
@@ -122,32 +123,62 @@ class ThreadPoolExecutor : public Executor { |
122 | 123 | void work(ThreadPoolStrategy S, unsigned ThreadID) { |
123 | 124 | threadIndex = ThreadID; |
124 | 125 | S.apply_thread_strategy(ThreadID); |
125 | | - while (true) { |
126 | | - std::unique_lock<std::mutex> Lock(Mutex); |
127 | | - Cond.wait(Lock, [&] { return Stop || !WorkStack.empty(); }); |
128 | | - if (Stop) |
129 | | - break; |
130 | | - auto Task = std::move(WorkStack.back()); |
131 | | - WorkStack.pop_back(); |
132 | | - Lock.unlock(); |
| 126 | + // Note on jobserver deadlock avoidance: |
| 127 | + // GNU Make grants each invoked process one implicit job slot. Our |
| 128 | + // JobserverClient models this by returning an implicit JobSlot on the |
| 129 | + // first successful tryAcquire() in a process. This guarantees forward |
| 130 | + // progress without requiring a dedicated "always-on" thread here. |
133 | 131 |
|
| 132 | + static thread_local std::unique_ptr<ExponentialBackoff> Backoff; |
| 133 | + |
| 134 | + while (true) { |
134 | 135 | if (TheJobserver) { |
135 | | - JobSlot Slot = TheJobserver->tryAcquire(); |
136 | | - if (Slot.isValid()) { |
| 136 | + // Jobserver-mode scheduling: |
| 137 | + // - Acquire one job slot (with exponential backoff to avoid busy-wait). |
| 138 | + // - While holding the slot, drain and run tasks from the local queue. |
| 139 | + // - Release the slot when the queue is empty or when shutting down. |
| 140 | + // Rationale: Holding a slot amortizes acquire/release overhead over |
| 141 | + // multiple tasks and avoids requeue/yield churn, while still enforcing |
| 142 | + // the jobserver’s global concurrency limit. With K available slots, |
| 143 | + // up to K workers run tasks in parallel; within each worker tasks run |
| 144 | + // sequentially until the local queue is empty. |
| 145 | + ExponentialBackoff Backoff(std::chrono::hours(24)); |
| 146 | + JobSlot Slot; |
| 147 | + do { |
| 148 | + if (Stop) |
| 149 | + return; |
| 150 | + Slot = TheJobserver->tryAcquire(); |
| 151 | + if (Slot.isValid()) |
| 152 | + break; |
| 153 | + } while (Backoff.waitForNextAttempt()); |
| 154 | + |
| 155 | + auto SlotReleaser = llvm::make_scope_exit( |
| 156 | + [&] { TheJobserver->release(std::move(Slot)); }); |
| 157 | + |
| 158 | + while (true) { |
| 159 | + std::function<void()> Task; |
| 160 | + { |
| 161 | + std::unique_lock<std::mutex> Lock(Mutex); |
| 162 | + Cond.wait(Lock, [&] { return Stop || !WorkStack.empty(); }); |
| 163 | + if (Stop && WorkStack.empty()) |
| 164 | + return; |
| 165 | + if (WorkStack.empty()) |
| 166 | + break; |
| 167 | + Task = std::move(WorkStack.back()); |
| 168 | + WorkStack.pop_back(); |
| 169 | + } |
137 | 170 | Task(); |
138 | | - TheJobserver->release(std::move(Slot)); |
139 | | - } else { |
140 | | - // The task could not be run because no job slot was |
141 | | - // available. Re-queue the task so that another thread can try |
142 | | - // to run it later. |
143 | | - std::lock_guard<std::mutex> RequeueLock(Mutex); |
144 | | - WorkStack.push_back(std::move(Task)); |
145 | | - Cond.notify_one(); |
146 | | - // Yield to give another thread a chance to release a token. |
147 | | - std::this_thread::yield(); |
148 | 171 | } |
149 | | - } else |
| 172 | + } else { |
| 173 | + std::unique_lock<std::mutex> Lock(Mutex); |
| 174 | + Cond.wait(Lock, [&] { return Stop || !WorkStack.empty(); }); |
| 175 | + if (Stop) |
| 176 | + break; |
| 177 | + auto Task = std::move(WorkStack.back()); |
| 178 | + WorkStack.pop_back(); |
| 179 | + Lock.unlock(); |
150 | 180 | Task(); |
| 181 | + } |
151 | 182 | } |
152 | 183 | } |
153 | 184 |
|
|
0 commit comments