|
18 | 18 | #include <cstdarg> |
19 | 19 | #include <cstring> |
20 | 20 | #include <ctime> |
| 21 | +#include <filesystem> |
21 | 22 | #include <fstream> |
22 | 23 | #include <iostream> |
23 | 24 | #include <iterator> |
|
62 | 63 | #ifdef __linux__ |
63 | 64 | #include <linux/limits.h> |
64 | 65 | #elif defined(_WIN32) |
65 | | -#define PATH_MAX MAX_PATH |
| 66 | +# if !defined(PATH_MAX) |
| 67 | +# define PATH_MAX MAX_PATH |
| 68 | +# endif |
66 | 69 | #else |
67 | 70 | #include <sys/syslimits.h> |
68 | 71 | #endif |
@@ -1148,8 +1151,7 @@ static bool common_download_file(const std::string & url, const std::string & pa |
1148 | 1151 | #endif |
1149 | 1152 |
|
1150 | 1153 | // Check if the file already exists locally |
1151 | | - struct stat model_file_info; |
1152 | | - auto file_exists = (stat(path.c_str(), &model_file_info) == 0); |
| 1154 | + auto file_exists = std::filesystem::exists(path); |
1153 | 1155 |
|
1154 | 1156 | // If the file exists, check its JSON metadata companion file. |
1155 | 1157 | std::string metadata_path = path + ".json"; |
@@ -1612,6 +1614,18 @@ std::string common_detokenize(llama_context * ctx, const std::vector<llama_token |
1612 | 1614 | // Chat template utils |
1613 | 1615 | // |
1614 | 1616 |
|
| 1617 | +std::string common_get_builtin_chat_template(const struct llama_model * model) { |
| 1618 | + static const char * template_key = "tokenizer.chat_template"; |
| 1619 | + // call with NULL buffer to get the total size of the string |
| 1620 | + int32_t res = llama_model_meta_val_str(model, template_key, NULL, 0); |
| 1621 | + if (res > 0) { |
| 1622 | + std::vector<char> model_template(res + 1, 0); |
| 1623 | + llama_model_meta_val_str(model, template_key, model_template.data(), model_template.size()); |
| 1624 | + return std::string(model_template.data(), model_template.size() - 1); |
| 1625 | + } |
| 1626 | + return ""; |
| 1627 | +} |
| 1628 | + |
1615 | 1629 | bool common_chat_verify_template(const std::string & tmpl) { |
1616 | 1630 | llama_chat_message chat[] = {{"user", "test"}}; |
1617 | 1631 | int res = llama_chat_apply_template(nullptr, tmpl.c_str(), chat, 1, true, nullptr, 0); |
|
0 commit comments