Skip to content

Commit 5ae5b31

Browse files
Merge pull request #194 from menloresearch/update-dev-from-master-2025-08-05-00-13
Sync master with upstream release b6089
2 parents adb4fe2 + ec428b0 commit 5ae5b31

28 files changed

+1164
-470
lines changed

.github/workflows/build.yml

Lines changed: 16 additions & 48 deletions
Original file line numberDiff line numberDiff line change
@@ -159,31 +159,15 @@ jobs:
159159
- name: Dawn Dependency
160160
id: dawn-depends
161161
run: |
162-
ARTIFACTS_JSON=$(curl -s -L \
163-
-H "Accept: application/vnd.github+json" \
164-
-H "Authorization: Bearer ${{ secrets.GITHUB_TOKEN }}" \
165-
-H "X-GitHub-Api-Version: 2022-11-28" \
166-
"https://api.github.com/repos/google/dawn/actions/artifacts")
167-
echo "Finding latest macos-latest-Release artifact..."
168-
DOWNLOAD_URL=$(echo "$ARTIFACTS_JSON" | jq -r '.artifacts
169-
| sort_by(.created_at)
170-
| reverse
171-
| map(select(.name | test("macos-latest-Release$")))
172-
| .[0].archive_download_url')
173-
if [ "$DOWNLOAD_URL" = "null" ] || [ -z "$DOWNLOAD_URL" ]; then
174-
echo "No suitable Dawn artifact found!"
175-
exit 1
176-
fi
177-
echo "Downloading from: $DOWNLOAD_URL"
178-
curl -L \
179-
-H "Accept: application/vnd.github+json" \
180-
-H "Authorization: Bearer ${{ secrets.GITHUB_TOKEN }}" \
181-
-o artifact.zip "$DOWNLOAD_URL"
182-
unzip artifact.zip
162+
DAWN_VERSION="v1.0.0"
163+
DAWN_OWNER="reeselevine"
164+
DAWN_REPO="dawn"
165+
DAWN_ASSET_NAME="Dawn-a1a6b45cced25a3b7f4fb491e0ae70796cc7f22b-macos-latest-Release.tar.gz"
166+
echo "Fetching release asset from https://github.com/${DAWN_OWNER}/${DAWN_REPO}/releases/download/${DAWN_VERSION}/${DAWN_ASSET_NAME}"
167+
curl -L -o artifact.tar.gz \
168+
"https://github.com/${DAWN_OWNER}/${DAWN_REPO}/releases/download/${DAWN_VERSION}/${DAWN_ASSET_NAME}"
183169
mkdir dawn
184-
tar_file=$(find . -name '*.tar.gz' | head -n 1)
185-
echo "Extracting: $tar_file"
186-
tar -xvf "$tar_file" -C dawn --strip-components=1
170+
tar -xvf artifact.tar.gz -C dawn --strip-components=1
187171
188172
- name: Build
189173
id: cmake_build
@@ -433,31 +417,15 @@ jobs:
433417
id: dawn-depends
434418
run: |
435419
sudo apt-get install -y libxrandr-dev libxinerama-dev libxcursor-dev mesa-common-dev libx11-xcb-dev libxi-dev
436-
ARTIFACTS_JSON=$(curl -s -L \
437-
-H "Accept: application/vnd.github+json" \
438-
-H "Authorization: Bearer ${{ secrets.GITHUB_TOKEN }}" \
439-
-H "X-GitHub-Api-Version: 2022-11-28" \
440-
"https://api.github.com/repos/google/dawn/actions/artifacts")
441-
echo "Finding latest ubuntu-latest-Release artifact..."
442-
DOWNLOAD_URL=$(echo "$ARTIFACTS_JSON" | jq -r '.artifacts
443-
| sort_by(.created_at)
444-
| reverse
445-
| map(select(.name | test("ubuntu-latest-Release$")))
446-
| .[0].archive_download_url')
447-
if [ "$DOWNLOAD_URL" = "null" ] || [ -z "$DOWNLOAD_URL" ]; then
448-
echo "No suitable Dawn artifact found!"
449-
exit 1
450-
fi
451-
echo "Downloading from: $DOWNLOAD_URL"
452-
curl -L \
453-
-H "Accept: application/vnd.github+json" \
454-
-H "Authorization: Bearer ${{ secrets.GITHUB_TOKEN }}" \
455-
-o artifact.zip "$DOWNLOAD_URL"
456-
unzip artifact.zip
420+
DAWN_VERSION="v1.0.0"
421+
DAWN_OWNER="reeselevine"
422+
DAWN_REPO="dawn"
423+
DAWN_ASSET_NAME="Dawn-a1a6b45cced25a3b7f4fb491e0ae70796cc7f22b-ubuntu-latest-Release.tar.gz"
424+
echo "Fetching release asset from https://github.com/${DAWN_OWNER}/${DAWN_REPO}/releases/download/${DAWN_VERSION}/${DAWN_ASSET_NAME}"
425+
curl -L -o artifact.tar.gz \
426+
"https://github.com/${DAWN_OWNER}/${DAWN_REPO}/releases/download/${DAWN_VERSION}/${DAWN_ASSET_NAME}"
457427
mkdir dawn
458-
tar_file=$(find . -name '*.tar.gz' | head -n 1)
459-
echo "Extracting: $tar_file"
460-
tar -xvf "$tar_file" -C dawn --strip-components=1
428+
tar -xvf artifact.tar.gz -C dawn --strip-components=1
461429
462430
- name: Build
463431
id: cmake_build

common/arg.cpp

Lines changed: 26 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,7 @@
2424
#include <cstdarg>
2525
#include <filesystem>
2626
#include <fstream>
27+
#include <list>
2728
#include <regex>
2829
#include <set>
2930
#include <string>
@@ -2375,20 +2376,35 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
23752376
}
23762377
throw std::invalid_argument("unknown buffer type");
23772378
}
2378-
// FIXME: this leaks memory
2379-
params.tensor_buft_overrides.push_back({strdup(tensor_name.c_str()), buft_list.at(buffer_type)});
2379+
// keep strings alive and avoid leaking memory by storing them in a static vector
2380+
static std::list<std::string> buft_overrides;
2381+
buft_overrides.push_back(tensor_name);
2382+
params.tensor_buft_overrides.push_back({buft_overrides.back().c_str(), buft_list.at(buffer_type)});
23802383
}
23812384
}
23822385
));
23832386
add_opt(common_arg(
2384-
{"--cpu-moe"},
2385-
"use CPU for Mixture of Experts (MoE) weights",
2387+
{"--cpu-moe", "-cmoe"},
2388+
"keep all Mixture of Experts (MoE) weights in the CPU",
23862389
[](common_params & params) {
2387-
params.tensor_buft_overrides.push_back({"\\.ffn_up_exps\\.weight$", ggml_backend_cpu_buffer_type()});
2388-
params.tensor_buft_overrides.push_back({"\\.ffn_down_exps\\.weight$", ggml_backend_cpu_buffer_type()});
2389-
params.tensor_buft_overrides.push_back({"\\.ffn_gate_exps\\.weight$", ggml_backend_cpu_buffer_type()});
2390+
params.tensor_buft_overrides.push_back({"\\.ffn_(up|down|gate)_exps", ggml_backend_cpu_buffer_type()});
23902391
}
23912392
).set_env("LLAMA_ARG_CPU_MOE"));
2393+
add_opt(common_arg(
2394+
{"--n-cpu-moe", "-ncmoe"}, "N",
2395+
"keep the Mixture of Experts (MoE) weights of the first N layers in the CPU",
2396+
[](common_params & params, int value) {
2397+
if (value < 0) {
2398+
throw std::invalid_argument("invalid value");
2399+
}
2400+
for (int i = 0; i < value; ++i) {
2401+
// keep strings alive and avoid leaking memory by storing them in a static vector
2402+
static std::list<std::string> buft_overrides;
2403+
buft_overrides.push_back(string_format("blk\\.%d\\.ffn_(up|down|gate)_exps", i));
2404+
params.tensor_buft_overrides.push_back({buft_overrides.back().c_str(), ggml_backend_cpu_buffer_type()});
2405+
}
2406+
}
2407+
).set_env("LLAMA_ARG_N_CPU_MOE"));
23922408
add_opt(common_arg(
23932409
{"-ngl", "--gpu-layers", "--n-gpu-layers"}, "N",
23942410
"number of layers to store in VRAM",
@@ -2649,10 +2665,10 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
26492665
).set_examples({LLAMA_EXAMPLE_IMATRIX}));
26502666
add_opt(common_arg(
26512667
{"--output-format"}, "{gguf,dat}",
2652-
string_format("output format for imatrix file (default: %s)", params.imat_dat ? "dat" : "gguf"),
2668+
string_format("output format for imatrix file (default: %s)", params.imat_dat > 0 ? "dat" : "gguf"),
26532669
[](common_params & params, const std::string & value) {
2654-
/**/ if (value == "gguf") { params.imat_dat = false; }
2655-
else if (value == "dat") { params.imat_dat = true; }
2670+
/**/ if (value == "gguf") { params.imat_dat = -1; }
2671+
else if (value == "dat") { params.imat_dat = 1; }
26562672
else { throw std::invalid_argument("invalid output format"); }
26572673
}
26582674
).set_examples({LLAMA_EXAMPLE_IMATRIX}));

common/common.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -439,7 +439,7 @@ struct common_params {
439439
int32_t n_out_freq = 10; // output the imatrix every n_out_freq iterations
440440
int32_t n_save_freq = 0; // save the imatrix every n_save_freq iterations
441441
int32_t i_chunk = 0; // start processing from this chunk
442-
bool imat_dat = false; // whether the legacy imatrix.dat format should be output
442+
int8_t imat_dat = 0; // whether the legacy imatrix.dat format should be output (gguf <= 0 < dat)
443443

444444
bool process_output = false; // collect data for the output tensor
445445
bool compute_ppl = true; // whether to compute perplexity

convert_hf_to_gguf.py

Lines changed: 136 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -678,6 +678,9 @@ def get_vocab_base_pre(self, tokenizer) -> str:
678678
if chkhsh == "a1336059768a55c99a734006ffb02203cd450fed003e9a71886c88acf24fdbc2":
679679
# ref: https://huggingface.co/THUDM/glm-4-9b-hf
680680
res = "glm4"
681+
if chkhsh == "9ca2dd618e8afaf09731a7cf6e2105b373ba6a1821559f258b272fe83e6eb902":
682+
# ref: https://huggingface.co/zai-org/GLM-4.5-Air
683+
res = "glm4"
681684
if chkhsh == "1431a23e583c97432bc230bff598d103ddb5a1f89960c8f1d1051aaa944d0b35":
682685
# ref: https://huggingface.co/sapienzanlp/Minerva-7B-base-v1.0
683686
res = "minerva-7b"
@@ -6696,6 +6699,139 @@ def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iter
66966699
return super().modify_tensors(data_torch, name, bid)
66976700

66986701

6702+
@ModelBase.register("Glm4MoeForCausalLM")
6703+
class Glm4MoeModel(TextModel):
6704+
model_arch = gguf.MODEL_ARCH.GLM4_MOE
6705+
6706+
def __init__(self, *args, **kwargs):
6707+
super().__init__(*args, **kwargs)
6708+
# GLM4_MOE has num_hidden_layers + 1 actual layers (including NextN layer)
6709+
self.block_count = self.hparams["num_hidden_layers"] + self.hparams.get("num_nextn_predict_layers", 0)
6710+
self.tensor_map = gguf.get_tensor_name_map(self.model_arch, self.block_count)
6711+
6712+
def set_vocab(self):
6713+
from transformers import AutoTokenizer
6714+
6715+
tokenizer = AutoTokenizer.from_pretrained(self.dir_model)
6716+
special_vocab = gguf.SpecialVocab(self.dir_model, load_merges=True)
6717+
tokens, toktypes, tokpre = self.get_vocab_base()
6718+
self.gguf_writer.add_tokenizer_model("gpt2")
6719+
self.gguf_writer.add_tokenizer_pre(tokpre)
6720+
self.gguf_writer.add_token_list(tokens)
6721+
self.gguf_writer.add_token_types(toktypes)
6722+
6723+
# Special tokens
6724+
# Note: Using <|endoftext|> (151329) for eot causes endless generation
6725+
special_vocab._set_special_token("bos", tokenizer.get_added_vocab()["[gMASK]"]) # 151331
6726+
special_vocab._set_special_token("eot", tokenizer.get_added_vocab()["<|user|>"]) # 151336
6727+
special_vocab._set_special_token("unk", tokenizer.get_added_vocab()["<|endoftext|>"]) # 151329
6728+
special_vocab._set_special_token("eom", tokenizer.get_added_vocab()["<|observation|>"]) # 151338
6729+
6730+
# Patch broken chat template
6731+
if isinstance(special_vocab.chat_template, str) and "visible_text(m.content).endswith" in special_vocab.chat_template:
6732+
special_vocab.chat_template = special_vocab.chat_template.replace(
6733+
"""{{ visible_text(m.content) }}\n{{- '/nothink' if (enable_thinking is defined and not enable_thinking and not visible_text(m.content).endswith("/nothink")) else '' -}}""",
6734+
"""{% set content = visible_text(m.content) %}{{ content }}\n{{- '/nothink' if (enable_thinking is defined and not enable_thinking and not content.endswith("/nothink")) else '' -}}""")
6735+
6736+
special_vocab.add_to_gguf(self.gguf_writer)
6737+
6738+
def set_gguf_parameters(self):
6739+
super().set_gguf_parameters()
6740+
if (rope_dim := self.hparams.get("head_dim")) is None:
6741+
rope_dim = (
6742+
self.hparams["hidden_size"] // self.hparams["num_attention_heads"]
6743+
)
6744+
self.gguf_writer.add_rope_dimension_count(
6745+
int(rope_dim * self.hparams.get("partial_rotary_factor", 0.5))
6746+
)
6747+
6748+
# MoE parameters - Use only routed expert count (shared experts handled separately)
6749+
if (n_routed_experts := self.hparams.get("n_routed_experts")) is not None:
6750+
self.gguf_writer.add_expert_count(n_routed_experts)
6751+
if (moe_intermediate_size := self.hparams.get("moe_intermediate_size")) is not None:
6752+
self.gguf_writer.add_expert_feed_forward_length(moe_intermediate_size)
6753+
if (n_shared_experts := self.hparams.get("n_shared_experts")) is not None:
6754+
self.gguf_writer.add_expert_shared_count(n_shared_experts)
6755+
if (first_k_dense_replace := self.hparams.get("first_k_dense_replace")) is not None:
6756+
self.gguf_writer.add_leading_dense_block_count(first_k_dense_replace)
6757+
6758+
# Expert gating function (sigmoid for GLM4_MOE)
6759+
self.gguf_writer.add_expert_gating_func(gguf.ExpertGatingFuncType.SIGMOID)
6760+
6761+
# Routed scaling factor
6762+
if (routed_scaling_factor := self.hparams.get("routed_scaling_factor")) is not None:
6763+
self.gguf_writer.add_expert_weights_scale(routed_scaling_factor)
6764+
6765+
# Normalise topk probabilities
6766+
if (norm_topk_prob := self.hparams.get("norm_topk_prob")) is not None:
6767+
self.gguf_writer.add_expert_weights_norm(norm_topk_prob)
6768+
6769+
# NextN/MTP prediction layers
6770+
if (num_nextn_predict_layers := self.hparams.get("num_nextn_predict_layers")) is not None:
6771+
self.gguf_writer.add_nextn_predict_layers(num_nextn_predict_layers)
6772+
6773+
_experts: list[dict[str, Tensor]] | None = None
6774+
6775+
def modify_tensors(
6776+
self, data_torch: Tensor, name: str, bid: int | None
6777+
) -> Iterable[tuple[str, Tensor]]:
6778+
if name.startswith("model.visual."): # ignore visual part
6779+
return []
6780+
elif name.startswith("model.language_model."):
6781+
name = name.replace("language_model.", "") # for multimodal variants
6782+
6783+
# Handle main token embedding (but not layer-specific NextN embeddings)
6784+
if name == "model.embed_tokens.weight" and ".layers." not in name:
6785+
return [(self.map_tensor_name("token_embd.weight"), data_torch)]
6786+
6787+
# Handle routed experts
6788+
if name.find("mlp.experts") != -1:
6789+
n_experts = self.hparams["n_routed_experts"]
6790+
assert bid is not None
6791+
6792+
if self._experts is None:
6793+
self._experts = [{} for _ in range(self.block_count)]
6794+
6795+
self._experts[bid][name] = data_torch
6796+
6797+
if len(self._experts[bid]) >= n_experts * 3:
6798+
tensors: list[tuple[str, Tensor]] = []
6799+
6800+
# merge the experts into a single 3d tensor
6801+
for w_name in ["down_proj", "gate_proj", "up_proj"]:
6802+
datas: list[Tensor] = []
6803+
6804+
for xid in range(n_experts):
6805+
ename = f"model.layers.{bid}.mlp.experts.{xid}.{w_name}.weight"
6806+
datas.append(self._experts[bid][ename])
6807+
del self._experts[bid][ename]
6808+
6809+
data_torch = torch.stack(datas, dim=0)
6810+
6811+
merged_name = f"model.layers.{bid}.mlp.experts.{w_name}.weight"
6812+
6813+
new_name = self.map_tensor_name(merged_name)
6814+
tensors.append((new_name, data_torch))
6815+
return tensors
6816+
else:
6817+
return []
6818+
6819+
if name.endswith("e_score_correction_bias"):
6820+
name = name.replace("e_score_correction_bias", "e_score_correction.bias")
6821+
6822+
new_name = self.map_tensor_name(name)
6823+
6824+
return [(new_name, data_torch)]
6825+
6826+
def prepare_tensors(self):
6827+
super().prepare_tensors()
6828+
if self._experts is not None:
6829+
# flatten `list[dict[str, Tensor]]` into `list[str]`
6830+
experts = [k for d in self._experts for k in d.keys()]
6831+
if len(experts) > 0:
6832+
raise ValueError(f"Unprocessed experts: {experts}")
6833+
6834+
66996835
@ModelBase.register("GlmForCausalLM", "ChatGLMModel", "ChatGLMForConditionalGeneration")
67006836
class ChatGLMModel(TextModel):
67016837
model_arch = gguf.MODEL_ARCH.CHATGLM

convert_hf_to_gguf_update.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -147,6 +147,7 @@ class TOKENIZER_TYPE(IntEnum):
147147
{"name": "chatglm-bpe", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/THUDM/glm-4-9b-chat", "chkhsh": "b6e8e1518dc4305be2fe39c313ed643381c4da5db34a98f6a04c093f8afbe99b"},
148148
{"name": "chatglm-bpe", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/THUDM/glm-4-9b-chat", "chkhsh": "81d72c7348a9f0ebe86f23298d37debe0a5e71149e29bd283904c02262b27516"},
149149
{"name": "glm4", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/THUDM/glm-4-9b-hf", "chkhsh": "a1336059768a55c99a734006ffb02203cd450fed003e9a71886c88acf24fdbc2"},
150+
{"name": "glm4", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/zai-org/GLM-4.5-Air", "chkhsh": "9ca2dd618e8afaf09731a7cf6e2105b373ba6a1821559f258b272fe83e6eb902"},
150151
{"name": "minerva-7b", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/sapienzanlp/Minerva-7B-base-v1.0", "chkhsh": "1431a23e583c97432bc230bff598d103ddb5a1f89960c8f1d1051aaa944d0b35"},
151152
{"name": "hunyuan", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/tencent/Hunyuan-A13B-Instruct", "chkhsh": "7e57df22b1fe23a7b1e1c7f3dc4e3f96d43a4eb0836d0c6bdc3436d7b2f1c664"},
152153
{"name": "hunyuan-dense", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/tencent/Hunyuan-4B-Instruct", "chkhsh": "bba3b3366b646dbdded5dbc42d59598b849371afc42f7beafa914afaa5b70aa6"},

ggml/CMakeLists.txt

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -39,8 +39,9 @@ if (WIN32)
3939
set(CMAKE_SHARED_MODULE_PREFIX "")
4040
endif()
4141

42-
option(BUILD_SHARED_LIBS "ggml: build shared libraries" ${BUILD_SHARED_LIBS_DEFAULT})
43-
option(GGML_BACKEND_DL "ggml: build backends as dynamic libraries (requires BUILD_SHARED_LIBS)" OFF)
42+
option(BUILD_SHARED_LIBS "ggml: build shared libraries" ${BUILD_SHARED_LIBS_DEFAULT})
43+
option(GGML_BACKEND_DL "ggml: build backends as dynamic libraries (requires BUILD_SHARED_LIBS)" OFF)
44+
set(GGML_BACKEND_DIR "" CACHE PATH "ggml: directory to load dynamic backends from (requires GGML_BACKEND_DL")
4445

4546
#
4647
# option list

ggml/cmake/ggml-config.cmake.in

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -106,7 +106,7 @@ if(NOT TARGET ggml::ggml)
106106

107107
find_library(GGML_LIBRARY ggml
108108
REQUIRED
109-
HINTS ${GGML_LIB_DIR}
109+
HINTS ${GGML_LIB_DIR} ${GGML_BACKEND_DIR}
110110
NO_CMAKE_FIND_ROOT_PATH)
111111

112112
add_library(ggml::ggml UNKNOWN IMPORTED)

ggml/src/CMakeLists.txt

Lines changed: 12 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -214,6 +214,13 @@ add_library(ggml
214214
ggml-backend-reg.cpp)
215215
add_library(ggml::ggml ALIAS ggml)
216216

217+
if (GGML_BACKEND_DIR)
218+
if (NOT GGML_BACKEND_DL)
219+
message(FATAL_ERROR "GGML_BACKEND_DIR requires GGML_BACKEND_DL")
220+
endif()
221+
target_compile_definitions(ggml PUBLIC GGML_BACKEND_DIR="${GGML_BACKEND_DIR}")
222+
endif()
223+
217224
target_link_libraries(ggml PUBLIC ggml-base)
218225

219226
if (CMAKE_SYSTEM_NAME MATCHES "Linux")
@@ -227,7 +234,11 @@ function(ggml_add_backend_library backend)
227234
set_target_properties(${backend} PROPERTIES LIBRARY_OUTPUT_DIRECTORY ${CMAKE_RUNTIME_OUTPUT_DIRECTORY})
228235
target_compile_definitions(${backend} PRIVATE GGML_BACKEND_DL)
229236
add_dependencies(ggml ${backend})
230-
install(TARGETS ${backend} LIBRARY DESTINATION ${CMAKE_INSTALL_BINDIR})
237+
if (GGML_BACKEND_DIR)
238+
install(TARGETS ${backend} LIBRARY DESTINATION ${GGML_BACKEND_DIR})
239+
else()
240+
install(TARGETS ${backend} LIBRARY DESTINATION ${CMAKE_INSTALL_BINDIR})
241+
endif()
231242
else()
232243
add_library(${backend} ${ARGN})
233244
target_link_libraries(ggml PUBLIC ${backend})

ggml/src/ggml-backend-reg.cpp

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -498,6 +498,9 @@ static ggml_backend_reg_t ggml_backend_load_best(const char * name, bool silent,
498498

499499
std::vector<fs::path> search_paths;
500500
if (user_search_path == nullptr) {
501+
#ifdef GGML_BACKEND_DIR
502+
search_paths.push_back(fs::u8path(GGML_BACKEND_DIR));
503+
#endif
501504
// default search paths: executable directory, current directory
502505
search_paths.push_back(get_executable_path());
503506
search_paths.push_back(fs::current_path());

0 commit comments

Comments
 (0)