Skip to content

Commit e14d9d8

Browse files
committed
llama : add thread safety test
llama : ignore main_gpu <= 0 if there are no GPUs ggml-ci
1 parent 669c13e commit e14d9d8

File tree

4 files changed

+152
-2
lines changed

4 files changed

+152
-2
lines changed

ci/run.sh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -39,7 +39,7 @@ sd=`dirname $0`
3939
cd $sd/../
4040
SRC=`pwd`
4141

42-
CMAKE_EXTRA="-DLLAMA_FATAL_WARNINGS=ON -DLLAMA_CURL=OFF"
42+
CMAKE_EXTRA="-DLLAMA_FATAL_WARNINGS=ON -DLLAMA_CURL=ON"
4343

4444
if [ ! -z ${GG_BUILD_METAL} ]; then
4545
CMAKE_EXTRA="${CMAKE_EXTRA} -DGGML_METAL=ON -DGGML_METAL_USE_BF16=ON"

src/llama.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -197,7 +197,7 @@ static struct llama_model * llama_model_load_from_file_impl(
197197
}
198198

199199
// if using single GPU mode, remove all except the main GPU
200-
if (params.split_mode == LLAMA_SPLIT_MODE_NONE) {
200+
if (params.split_mode == LLAMA_SPLIT_MODE_NONE && !model->devices.empty() && params.main_gpu >= 0) {
201201
if (params.main_gpu < 0 || params.main_gpu >= (int)model->devices.size()) {
202202
LLAMA_LOG_ERROR("%s: invalid value for main_gpu: %d (available devices: %d)\n", __func__, params.main_gpu, (int)model->devices.size());
203203
llama_model_free(model);

tests/CMakeLists.txt

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -151,6 +151,8 @@ llama_build_and_test(test-json-partial.cpp)
151151
llama_build_and_test(test-log.cpp)
152152
llama_build_and_test(test-regex-partial.cpp)
153153

154+
llama_build_and_test(test-thread-safety.cpp ARGS -hf ggml-org/Qwen3-0.6B-GGUF:Q8_0 -ngl 99 -p "The meaning of life is" -n 128 -c 256 -ub 32 -np 4)
155+
154156
# this fails on windows (github hosted runner) due to curl DLL not found (exit code 0xc0000135)
155157
if (NOT WIN32)
156158
llama_build_and_test(test-arg-parser.cpp)

tests/test-thread-safety.cpp

Lines changed: 148 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,148 @@
1+
// thread safety test
2+
// - Loads a copy of the same model on each GPU, plus a copy on the CPU
3+
// - Creates n_parallel (--parallel) contexts per model
4+
// - Runs inference in parallel on each context
5+
6+
#include <thread>
7+
#include <vector>
8+
#include <atomic>
9+
#include "llama.h"
10+
#include "arg.h"
11+
#include "common.h"
12+
#include "log.h"
13+
#include "sampling.h"
14+
15+
int main(int argc, char ** argv) {
16+
common_params params;
17+
18+
if (!common_params_parse(argc, argv, params, LLAMA_EXAMPLE_COMMON)) {
19+
return 1;
20+
}
21+
22+
common_init();
23+
24+
llama_backend_init();
25+
llama_numa_init(params.numa);
26+
27+
llama_log_set([](ggml_log_level level, const char * text, void * /*user_data*/) {
28+
if (level == GGML_LOG_LEVEL_ERROR) {
29+
common_log_add(common_log_main(), level, "%s", text);
30+
}
31+
}, NULL);
32+
33+
auto mparams = common_model_params_to_llama(params);
34+
auto cparams = common_context_params_to_llama(params);
35+
36+
int dev_count = ggml_backend_dev_count();
37+
int gpu_dev_count = 0;
38+
for (int i = 0; i < dev_count; ++i) {
39+
auto * dev = ggml_backend_dev_get(i);
40+
if (dev && ggml_backend_dev_type(dev) == GGML_BACKEND_DEVICE_TYPE_GPU) {
41+
gpu_dev_count++;
42+
}
43+
}
44+
const int num_models = gpu_dev_count + 1; // GPUs + 1 CPU model
45+
//const int num_models = std::max(1, gpu_dev_count);
46+
const int num_contexts = std::max(1, params.n_parallel);
47+
48+
struct model_context {
49+
llama_model_ptr model;
50+
std::vector<llama_context_ptr> contexts;
51+
std::vector<std::unique_ptr<common_sampler, decltype(&common_sampler_free)>> samplers;
52+
};
53+
54+
std::vector<model_context> models;
55+
std::vector<std::thread> threads;
56+
std::atomic<bool> failed = false;
57+
58+
for (int m = 0; m < num_models; ++m) {
59+
model_context this_model;
60+
61+
mparams.split_mode = LLAMA_SPLIT_MODE_NONE;
62+
mparams.main_gpu = m < gpu_dev_count ? m : -1;
63+
64+
llama_model * model = llama_model_load_from_file(params.model.path.c_str(), mparams);
65+
if (model == NULL) {
66+
LOG_ERR("%s: failed to load model '%s'\n", __func__, params.model.path.c_str());
67+
return 1;
68+
}
69+
70+
this_model.model.reset(model);
71+
72+
for (int c = 0; c < num_contexts; ++c) {
73+
LOG_INF("Creating context %d/%d for model %d/%d\n", c + 1, num_contexts, m + 1, num_models);
74+
llama_context * ctx = llama_init_from_model(model, cparams);
75+
if (ctx == NULL) {
76+
LOG_ERR("%s: failed to create context\n", __func__);
77+
return 1;
78+
}
79+
this_model.contexts.emplace_back(ctx);
80+
81+
common_sampler * sampler = common_sampler_init(model, params.sampling);
82+
if (sampler == NULL) {
83+
LOG_ERR("%s: failed to create sampler\n", __func__);
84+
return 1;
85+
}
86+
this_model.samplers.emplace_back(sampler, common_sampler_free);
87+
88+
threads.emplace_back([model, ctx, sampler, &params, &failed, m, c, num_models, num_contexts]() {
89+
llama_batch batch = {};
90+
{
91+
auto prompt = common_tokenize(ctx, params.prompt, true);
92+
if (prompt.empty()) {
93+
LOG_ERR("failed to tokenize prompt\n");
94+
failed.store(true);
95+
return;
96+
}
97+
batch = llama_batch_get_one(prompt.data(), prompt.size());
98+
if (llama_decode(ctx, batch)) {
99+
LOG_ERR("failed to decode prompt\n");
100+
failed.store(true);
101+
return;
102+
}
103+
}
104+
105+
const auto * vocab = llama_model_get_vocab(model);
106+
std::string result = params.prompt;
107+
108+
for (int i = 0; i < params.n_predict; i++) {
109+
llama_token token;
110+
if (batch.n_tokens > 0) {
111+
token = common_sampler_sample(sampler, ctx, batch.n_tokens - 1);
112+
} else {
113+
token = llama_vocab_bos(vocab);
114+
}
115+
116+
if (llama_vocab_is_eog(vocab, token)) {
117+
break;
118+
}
119+
result += common_token_to_piece(ctx, token);
120+
121+
batch = llama_batch_get_one(&token, 1);
122+
if (llama_decode(ctx, batch)) {
123+
LOG_ERR("failed to decode\n");
124+
failed.store(true);
125+
return;
126+
}
127+
}
128+
129+
LOG_INF("Model %d/%d, Context %d/%d: Result: '%s'\n", m + 1, num_models, c + 1, num_contexts, result.c_str());
130+
});
131+
132+
}
133+
134+
models.emplace_back(std::move(this_model));
135+
}
136+
137+
for (auto & thread : threads) {
138+
thread.join();
139+
}
140+
141+
if (failed) {
142+
LOG_ERR("One or more threads failed.\n");
143+
return 1;
144+
}
145+
146+
LOG_INF("All threads completed successfully.\n");
147+
return 0;
148+
}

0 commit comments

Comments
 (0)