|
5 | 5 |
|
6 | 6 | #include <algorithm> |
7 | 7 | #include <array> |
| 8 | +#include <barrier> |
8 | 9 | #include <iostream> |
9 | 10 | #include <numeric> |
10 | 11 | #include <random> |
| 12 | +#include <shared_mutex> |
11 | 13 | #include <string> |
12 | 14 | #include <thread> |
13 | 15 |
|
@@ -560,3 +562,86 @@ TEST_CASE("Initialization function is called") { |
560 | 562 | } |
561 | 563 | CHECK_EQ(counter.load(), 4); |
562 | 564 | } |
| 565 | + |
| 566 | +TEST_CASE("Check clear_tasks() can be called from a task") { |
| 567 | + // Here: |
| 568 | + // - we use a barrier to trigger tasks_clear() once all threads are busy; |
| 569 | + // - to prevent race conditions (e.g. task_clear() getting called whilst we are still adding |
| 570 | + // tasks), we use a mutex to prevent the tasks from running, until all tasks have been added |
| 571 | + // to the pool. |
| 572 | + |
| 573 | + unsigned int thread_count = 0; |
| 574 | + |
| 575 | + SUBCASE("with single thread") { thread_count = 1; } |
| 576 | + SUBCASE("with multiple threads") { thread_count = 4; } |
| 577 | + |
| 578 | + std::atomic<unsigned int> counter = 0; |
| 579 | + dp::thread_pool pool(thread_count); |
| 580 | + std::shared_mutex mutex; |
| 581 | + |
| 582 | + { |
| 583 | + /* Clear thread_pool when barrier is hit, this must not throw */ |
| 584 | + auto clear_func = [&pool]() noexcept { |
| 585 | + try { |
| 586 | + pool.clear_tasks(); |
| 587 | + } catch (...) { |
| 588 | + } |
| 589 | + }; |
| 590 | + std::barrier sync_point(thread_count, clear_func); |
| 591 | + |
| 592 | + auto func = [&counter, &sync_point, &mutex]() { |
| 593 | + std::shared_lock lock(mutex); |
| 594 | + counter.fetch_add(1); |
| 595 | + sync_point.arrive_and_wait(); |
| 596 | + }; |
| 597 | + |
| 598 | + { |
| 599 | + std::unique_lock lock(mutex); |
| 600 | + for (int i = 0; i < 10; i++) pool.enqueue_detach(func); |
| 601 | + } |
| 602 | + |
| 603 | + pool.wait_for_tasks(); |
| 604 | + } |
| 605 | + |
| 606 | + CHECK_EQ(counter.load(), thread_count); |
| 607 | +} |
| 608 | + |
| 609 | +TEST_CASE("Check clear_tasks() clears tasks") { |
| 610 | + // Here we: |
| 611 | + // - add twice as many tasks to the pool as can be run simultaniously |
| 612 | + // - use a lock to prevent race conditions (e.g. clear_task() running whilst the another task is |
| 613 | + // being added) |
| 614 | + |
| 615 | + unsigned int thread_count{4}; |
| 616 | + size_t cleared_tasks{0}; |
| 617 | + std::atomic<unsigned int> counter{0}; |
| 618 | + |
| 619 | + SUBCASE("with no thread") { thread_count = 0; } |
| 620 | + SUBCASE("with single thread") { thread_count = 1; } |
| 621 | + SUBCASE("with multiple threads") { thread_count = 4; } |
| 622 | + |
| 623 | + { |
| 624 | + std::mutex mutex; |
| 625 | + dp::thread_pool pool(thread_count); |
| 626 | + |
| 627 | + std::function<void(void)> func; |
| 628 | + func = [&counter, &mutex]() { |
| 629 | + counter.fetch_add(1); |
| 630 | + std::lock_guard lock(mutex); |
| 631 | + }; |
| 632 | + |
| 633 | + { |
| 634 | + /* fill the thread_pool twice over, and wait until all threads running and locked in a |
| 635 | + * task */ |
| 636 | + std::lock_guard lock(mutex); |
| 637 | + for (unsigned int i = 0; i < 2 * thread_count; i++) pool.enqueue_detach(func); |
| 638 | + |
| 639 | + while (counter != thread_count) |
| 640 | + std::this_thread::sleep_for(std::chrono::milliseconds(100)); |
| 641 | + |
| 642 | + cleared_tasks = pool.clear_tasks(); |
| 643 | + } |
| 644 | + } |
| 645 | + CHECK_EQ(cleared_tasks, static_cast<size_t>(thread_count)); |
| 646 | + CHECK_EQ(thread_count, counter.load()); |
| 647 | +} |
0 commit comments