Skip to content

Commit 2358bb3

Browse files
max-krasnyanskyfmz
authored andcommitted
threadpool: better naming for thread/cpumask releated functions
1 parent 63a0dad commit 2358bb3

File tree

1 file changed

+21
-21
lines changed

1 file changed

+21
-21
lines changed

ggml/src/ggml.c

Lines changed: 21 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -3053,15 +3053,15 @@ static_assert(sizeof(struct ggml_tensor)%GGML_MEM_ALIGN == 0, "ggml_tensor size
30533053

30543054
// Helpers for polling loops
30553055
#if defined(__aarch64__) && ( defined(__clang__) || defined(__GNUC__) )
3056-
static inline void __cpu_relax(void) {
3056+
static inline void ggml_thread_cpu_relax(void) {
30573057
__asm__ volatile("yield" ::: "memory");
30583058
}
30593059
#elif defined(__x86_64__)
3060-
static inline void __cpu_relax(void) {
3060+
static inline void ggml_thread_cpu_relax(void) {
30613061
_mm_pause();
30623062
}
30633063
#else
3064-
static inline void __cpu_relax(void) {;}
3064+
static inline void ggml_thread_cpu_relax(void) {;}
30653065
#endif
30663066

30673067
//
@@ -3140,7 +3140,7 @@ static void ggml_barrier(struct ggml_compute_threadpool * threadpool) {
31403140
if (atomic_load_explicit(n_barrier_passed, memory_order_relaxed) != passed_old) {
31413141
return;
31423142
}
3143-
__cpu_relax();
3143+
ggml_thread_cpu_relax();
31443144
}
31453145
}
31463146
}
@@ -18667,7 +18667,7 @@ enum {
1866718667
#include "windows.h"
1866818668

1866918669
// TODO: support > 64 CPUs
18670-
static bool __thread_affinity(bool * mask) {
18670+
static bool ggml_thread_apply_affinity(bool * mask) {
1867118671
HANDLE h = GetCurrentThread();
1867218672
uint64_t bitmask = 0ULL;
1867318673

@@ -18701,7 +18701,7 @@ static bool __thread_affinity(bool * mask) {
1870118701
return m != 0;
1870218702
}
1870318703

18704-
static bool __process_priority(int32_t prio) {
18704+
static bool ggml_thread_apply_process_priority(int32_t prio) {
1870518705
DWORD p = NORMAL_PRIORITY_CLASS;
1870618706

1870718707
switch (prio) {
@@ -18714,7 +18714,7 @@ static bool __process_priority(int32_t prio) {
1871418714
return SetPriorityClass(GetCurrentProcess(), p);
1871518715
}
1871618716

18717-
static bool __thread_priority(int32_t prio) {
18717+
static bool ggml_thread_apply_thread_priority(int32_t prio) {
1871818718
DWORD p = NORMAL_PRIORITY_CLASS;
1871918719

1872018720
switch (prio) {
@@ -18732,12 +18732,12 @@ static bool __thread_priority(int32_t prio) {
1873218732
#include <sys/types.h>
1873318733
#include <sys/resource.h>
1873418734

18735-
static bool __thread_affinity(const bool * mask) {
18735+
static bool ggml_thread_apply_affinity(const bool * mask) {
1873618736
UNUSED(mask);
1873718737
return true;
1873818738
}
1873918739

18740-
static bool __process_priority(int32_t prio) {
18740+
static bool ggml_thread_apply_process_prio(int32_t prio) {
1874118741
int32_t p = 0;
1874218742

1874318743
switch (prio) {
@@ -18751,14 +18751,14 @@ static bool __process_priority(int32_t prio) {
1875118751
return r != -1;
1875218752
}
1875318753

18754-
static bool __thread_priority(int32_t prio) {
18754+
static bool ggml_thread_apply_thread_priority(int32_t prio) {
1875518755
UNUSED(prio);
1875618756
return true;
1875718757
}
1875818758

1875918759
#else // posix?
1876018760

18761-
static bool __thread_affinity(const bool * mask) {
18761+
static bool ggml_thread_apply_affinity(const bool * mask) {
1876218762
cpu_set_t cpuset;
1876318763
int32_t err;
1876418764

@@ -18787,7 +18787,7 @@ static bool __thread_affinity(const bool * mask) {
1878718787
return true;
1878818788
}
1878918789

18790-
static bool __process_priority(int32_t prio) {
18790+
static bool ggml_thread_apply_process_prio(int32_t prio) {
1879118791
struct sched_param p;
1879218792
int32_t policy = SCHED_OTHER;
1879318793

@@ -18807,7 +18807,7 @@ static bool __process_priority(int32_t prio) {
1880718807
return true;
1880818808
}
1880918809

18810-
static bool __thread_priority(int32_t prio) {
18810+
static bool ggml_thread_apply_thread_priority(int32_t prio) {
1881118811
struct sched_param p;
1881218812
int32_t policy = SCHED_OTHER;
1881318813
switch (prio) {
@@ -18828,7 +18828,7 @@ static bool __thread_priority(int32_t prio) {
1882818828

1882918829
#endif
1883018830

18831-
static void __cpumask_next(const bool * global_mask, bool * local_mask, bool strict, int32_t* iter) {
18831+
static void ggml_thread_cpumask_next(const bool * global_mask, bool * local_mask, bool strict, int32_t* iter) {
1883218832
if (!global_mask) {
1883318833
memset(local_mask, 1, GGML_MAX_N_THREADS);
1883418834
return;
@@ -19160,7 +19160,7 @@ static inline bool ggml_graph_compute_poll_for_work(struct ggml_compute_state *
1916019160

1916119161
for (uint64_t i=0; !ggml_graph_compute_ready(state) && i<n_rounds; i++) {
1916219162
// No new work. Keep polling.
19163-
__cpu_relax();
19163+
ggml_thread_cpu_relax();
1916419164
}
1916519165

1916619166
return state->pending;
@@ -19188,9 +19188,9 @@ static thread_ret_t ggml_graph_compute_secondary_thread(void* data) {
1918819188
struct ggml_compute_state * state = (struct ggml_compute_state *) data;
1918919189
struct ggml_compute_threadpool * threadpool = state->threadpool;
1919019190

19191-
__thread_priority(threadpool->prio);
19191+
ggml_thread_apply_thread_priority(threadpool->prio);
1919219192
if (state->mask_specified)
19193-
__thread_affinity(state->cpumask);
19193+
ggml_thread_apply_affinity(state->cpumask);
1919419194

1919519195
while (true) {
1919619196
// Check if we need to sleep
@@ -19306,8 +19306,8 @@ static struct ggml_compute_threadpool * ggml_create_threadpool_impl(
1930619306
#else // Not using OPENMP
1930719307
int32_t cpumask_iter = 0;
1930819308

19309-
__process_priority(tpp->prio);
19310-
__thread_priority(tpp->prio);
19309+
ggml_thread_apply_process_prio(tpp->prio);
19310+
ggml_thread_apply_thread_priority(tpp->prio);
1931119311

1931219312
for (int j = 0; j < tpp->n_threads; j++) {
1931319313
workers[j] = (struct ggml_compute_state) {
@@ -19320,7 +19320,7 @@ static struct ggml_compute_threadpool * ggml_create_threadpool_impl(
1932019320
};
1932119321

1932219322
if (tpp->mask_specified) {
19323-
__cpumask_next(tpp->cpumask, workers[j].cpumask, tpp->strict_cpu, &cpumask_iter);
19323+
ggml_thread_cpumask_next(tpp->cpumask, workers[j].cpumask, tpp->strict_cpu, &cpumask_iter);
1932419324
}
1932519325

1932619326
// Spin threads for all secondary workers
@@ -19408,7 +19408,7 @@ enum ggml_status ggml_graph_compute(struct ggml_cgraph * cgraph, struct ggml_cpl
1940819408
#else
1940919409
// Update main thread affinity to match the current threadpool
1941019410
if (threadpool->workers[0].mask_specified) {
19411-
__thread_affinity(threadpool->workers[0].cpumask);
19411+
ggml_thread_apply_affinity(threadpool->workers[0].cpumask);
1941219412
}
1941319413

1941419414
// Kick all threads to start the new graph

0 commit comments

Comments
 (0)