Skip to content

Commit e1ce7ab

Browse files
committed
优化打印
1 parent 19858f9 commit e1ce7ab

File tree

1 file changed

+7
-7
lines changed

1 file changed

+7
-7
lines changed

src/runner/LLM.cpp

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -139,7 +139,7 @@ struct LLM::Impl {
139139
llama_layers.resize(attr.axmodel_num);
140140
auto dev_assign = distributeModels((int)_attr.dev_ids.size(), attr.axmodel_num);
141141
std::vector<int> rets(attr.axmodel_num, 0);
142-
std::atomic<int> process_idx(2);
142+
std::atomic<int> process_idx(1);
143143
#pragma omp parallel for if (_attr.dev_ids.size() > 1)
144144
for (int i = 0; i < attr.axmodel_num; i++)
145145
{
@@ -159,7 +159,7 @@ struct LLM::Impl {
159159
if (ret != 0) { ALOGE("init post axmodel(%s) failed", attr.filename_post_axmodel.c_str()); return false; }
160160
char path[1024];
161161
sprintf(path, "init post axmodel ok,remain_cmm(%d MB)", axcl_GetCMMRemain(post_devid));
162-
update_cqdm(&cqdm, attr.axmodel_num + 2, "count", path);
162+
update_cqdm(&cqdm, attr.axmodel_num + 1, "count", path);
163163
}
164164
#else
165165
llama_layers.resize(attr.axmodel_num);
@@ -172,17 +172,17 @@ struct LLM::Impl {
172172
if (ret != 0) { ALOGE("init axmodel(%s) failed", llama_layers[i].filename.c_str()); return false; }
173173
int remain_cmm = get_remaining_cmm_size();
174174
sprintf(axmodel_path, "init %d axmodel ok,remain_cmm(%d MB)", i, remain_cmm);
175-
update_cqdm(&cqdm, i + 2, "count", axmodel_path);
175+
update_cqdm(&cqdm, i + 1, "count", axmodel_path);
176176
}
177177
{
178178
int ret = llama_post.init(attr.filename_post_axmodel.c_str(), -1);
179179
if (ret != 0) { ALOGE("init post axmodel(%s) failed", attr.filename_post_axmodel.c_str()); return false; }
180180
int remain_cmm = get_remaining_cmm_size();
181181
sprintf(axmodel_path, "init post axmodel ok,remain_cmm(%d MB)", remain_cmm);
182-
update_cqdm(&cqdm, attr.axmodel_num + 2, "count", axmodel_path);
182+
update_cqdm(&cqdm, attr.axmodel_num + 1, "count", axmodel_path);
183183
}
184184
#endif
185-
185+
printf("\n");
186186
{
187187
_attr.max_token_len = llama_layers[0].layer.get_input("mask").nSize / sizeof(unsigned short) - 1;
188188
ALOGI("max_token_len : %d", _attr.max_token_len);
@@ -217,9 +217,9 @@ struct LLM::Impl {
217217
ALOGE("embed_selector.Init(%s, %d, %d) failed", _attr.filename_tokens_embed.c_str(), _attr.tokens_embed_num, _attr.tokens_embed_size);
218218
return false;
219219
}
220-
update_cqdm(&cqdm, 1, "count", "embed_selector init ok");
220+
update_cqdm(&cqdm, attr.axmodel_num + 2, "count", "embed_selector init ok");
221221
}
222-
222+
printf("\n");
223223
if (!postprocess.load_config(attr.post_config_path)) { ALOGW("load postprocess config(%s) failed", attr.post_config_path.c_str()); }
224224
ALOGI("LLM init ok");
225225
return true;

0 commit comments

Comments
 (0)