From 16c6291b220008256e1ee9a99651932515fb17eb Mon Sep 17 00:00:00 2001 From: firecoperana Date: Sat, 3 May 2025 14:22:32 -0500 Subject: [PATCH 01/20] Add RPC backend in device list to override tensors. --- CMakeLists.txt | 1 - common/common.cpp | 51 +++++++---- examples/server/server.cpp | 36 +------- ggml/src/ggml-backend.c | 11 +++ ggml/src/ggml-rpc.cpp | 64 +++++++++---- ggml/src/ggml.c | 4 +- src/unicode.cpp | 179 ++++++++++++++++++++++--------------- 7 files changed, 201 insertions(+), 145 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 3e9c3cc02..edb9e6570 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -6,7 +6,6 @@ include(CheckIncludeFileCXX) set(CMAKE_WARN_UNUSED_CLI YES) set(CMAKE_EXPORT_COMPILE_COMMANDS ON) - set(CMAKE_CXX_STANDARD 17) set(CMAKE_CXX_STANDARD_REQUIRED true) diff --git a/common/common.cpp b/common/common.cpp index 2df8d4d4c..057c3211e 100644 --- a/common/common.cpp +++ b/common/common.cpp @@ -81,7 +81,9 @@ #endif #define LLAMA_CURL_MAX_URL_LENGTH 2084 // Maximum URL Length in Chrome: 2083 #endif // LLAMA_USE_CURL - +#ifdef GGML_USE_RPC +# include "ggml-rpc.h" +#endif using json = nlohmann::ordered_json; // @@ -989,6 +991,35 @@ bool gpt_params_find_arg(int argc, char ** argv, const std::string & arg, gpt_pa if (arg == "--rpc") { CHECK_ARG params.rpc_servers = argv[i]; + std::string servers(params.rpc_servers); + size_t pos = 0; + while ((pos = servers.find(",")) != std::string::npos) { + std::string server = servers.substr(0, pos); + ggml_backend_rpc_buffer_type(server.c_str()); + servers.erase(0, pos + 1); + } + ggml_backend_rpc_buffer_type(servers.c_str()); + return true; + } + if (arg == "--override-kv") { + CHECK_ARG + if (!string_parse_kv_override(argv[i], params.kv_overrides)) { + fprintf(stderr, "error: Invalid type for KV override: %s\n", argv[i]); + invalid_param = true; + return true; + } + return true; + } + if (arg == "--override-tensor" || arg == "-ot") { + CHECK_ARG + /*for (auto endpoint : params.rpc_servers.split) + { + + }*/ + if (!parse_buft_overrides(std::string{ argv[i] }, params.tensor_buft_overrides)) { + fprintf(stderr, "error: Invalid tensor buffer type override: %s\n", argv[i]); + invalid_param = true; + } return true; } if (arg == "--no-mmap") { @@ -1196,23 +1227,7 @@ bool gpt_params_find_arg(int argc, char ** argv, const std::string & arg, gpt_pa sparams.grammar = json_schema_to_grammar(json::parse(argv[i])); return true; } - if (arg == "--override-kv") { - CHECK_ARG - if (!string_parse_kv_override(argv[i], params.kv_overrides)) { - fprintf(stderr, "error: Invalid type for KV override: %s\n", argv[i]); - invalid_param = true; - return true; - } - return true; - } - if (arg == "--override-tensor" || arg == "-ot") { - CHECK_ARG - if (!parse_buft_overrides(std::string{argv[i]}, params.tensor_buft_overrides)) { - fprintf(stderr, "error: Invalid tensor buffer type override: %s\n", argv[i]); - invalid_param = true; - } - return true; - } + if (arg == "--offload-policy" || arg == "-op") { CHECK_ARG auto p = string_split_pairs(argv[i], ','); diff --git a/examples/server/server.cpp b/examples/server/server.cpp index 466bb339c..ca60ebbcb 100644 --- a/examples/server/server.cpp +++ b/examples/server/server.cpp @@ -1,3 +1,4 @@ +#pragma warning(disable : 4996) #include "utils.hpp" #include "common.h" @@ -17,22 +18,6 @@ #include "json.hpp" // auto generated files (update with ./deps.sh) -#include "colorthemes.css.hpp" -#include "style.css.hpp" -#include "theme-beeninorder.css.hpp" -#include "theme-ketivah.css.hpp" -#include "theme-mangotango.css.hpp" -#include "theme-playground.css.hpp" -#include "theme-polarnight.css.hpp" -#include "theme-snowstorm.css.hpp" -#include "index.html.hpp" -#include "index-new.html.hpp" -#include "index.js.hpp" -#include "completion.js.hpp" -#include "system-prompts.js.hpp" -#include "prompt-formats.js.hpp" -#include "json-schema-to-grammar.mjs.hpp" - #include #include #include @@ -3407,25 +3392,6 @@ int main(int argc, char ** argv) { svr->set_base_dir(params.public_path); } - // using embedded static files - svr->Get("/", handle_static_file(index_html, index_html_len, "text/html; charset=utf-8")); - svr->Get("/index.js", handle_static_file(index_js, index_js_len, "text/javascript; charset=utf-8")); - svr->Get("/completion.js", handle_static_file(completion_js, completion_js_len, "text/javascript; charset=utf-8")); - svr->Get("/json-schema-to-grammar.mjs", handle_static_file(json_schema_to_grammar_mjs, json_schema_to_grammar_mjs_len, "text/javascript; charset=utf-8")); - - // add new-ui files - svr->Get("/colorthemes.css", handle_static_file(colorthemes_css, colorthemes_css_len, "text/css; charset=utf-8")); - svr->Get("/style.css", handle_static_file(style_css, style_css_len, "text/css; charset=utf-8")); - svr->Get("/theme-beeninorder.css", handle_static_file(theme_beeninorder_css, theme_beeninorder_css_len, "text/css; charset=utf-8")); - svr->Get("/theme-ketivah.css", handle_static_file(theme_ketivah_css, theme_ketivah_css_len, "text/css; charset=utf-8")); - svr->Get("/theme-mangotango.css", handle_static_file(theme_mangotango_css, theme_mangotango_css_len, "text/css; charset=utf-8")); - svr->Get("/theme-playground.css", handle_static_file(theme_playground_css, theme_playground_css_len, "text/css; charset=utf-8")); - svr->Get("/theme-polarnight.css", handle_static_file(theme_polarnight_css, theme_polarnight_css_len, "text/css; charset=utf-8")); - svr->Get("/theme-snowstorm.css", handle_static_file(theme_snowstorm_css, theme_snowstorm_css_len, "text/css; charset=utf-8")); - svr->Get("/index-new.html", handle_static_file(index_new_html, index_new_html_len, "text/html; charset=utf-8")); - svr->Get("/system-prompts.js", handle_static_file(system_prompts_js, system_prompts_js_len, "text/javascript; charset=utf-8")); - svr->Get("/prompt-formats.js", handle_static_file(prompt_formats_js, prompt_formats_js_len, "text/javascript; charset=utf-8")); - // register API routes svr->Get ("/health", handle_health); svr->Get ("/metrics", handle_metrics); diff --git a/ggml/src/ggml-backend.c b/ggml/src/ggml-backend.c index e191c2d99..44688c36a 100644 --- a/ggml/src/ggml-backend.c +++ b/ggml/src/ggml-backend.c @@ -468,6 +468,10 @@ GGML_CALL static void ggml_backend_registry_init(void) { extern GGML_CALL int ggml_backend_cann_reg_devices(void); ggml_backend_cann_reg_devices(); #endif +#ifdef GGML_USE_RPC + extern GGML_CALL void ggml_backend_rpc_reg_devices(void); + ggml_backend_rpc_reg_devices(); +#endif } GGML_CALL void ggml_backend_register(const char * name, ggml_backend_init_fn init_fn, ggml_backend_buffer_type_t default_buffer_type, void * user_data) { @@ -943,6 +947,13 @@ GGML_CALL static ggml_backend_t ggml_backend_reg_cpu_init(const char * params, v GGML_UNUSED(user_data); } +GGML_CALL static ggml_backend_t ggml_backend_reg_rpc_init(const char* params, void* user_data) { + return ggml_backend_rpc_init(); + + GGML_UNUSED(params); + GGML_UNUSED(user_data); +} + // multi-buffer buffer struct ggml_backend_multi_buffer_context { diff --git a/ggml/src/ggml-rpc.cpp b/ggml/src/ggml-rpc.cpp index 7757615f5..60983920c 100644 --- a/ggml/src/ggml-rpc.cpp +++ b/ggml/src/ggml-rpc.cpp @@ -79,7 +79,7 @@ struct rpc_tensor { #pragma pack(pop) static_assert(sizeof(rpc_tensor) % 8 == 0, "rpc_tensor size must be multiple of 8"); - +static std::unordered_map rpc_server_map; // RPC commands enum rpc_cmd { ALLOC_BUFFER = 0, @@ -331,8 +331,8 @@ GGML_CALL static void ggml_backend_rpc_buffer_free_buffer(ggml_backend_buffer_t memcpy(input.data(), &remote_ptr, sizeof(remote_ptr)); std::vector output; bool status = send_rpc_cmd(ctx->sock, FREE_BUFFER, input, output); - GGML_ASSERT(status); - GGML_ASSERT(output.empty()); + //GGML_ASSERT(status); + //GGML_ASSERT(output.empty()); delete ctx; } @@ -347,8 +347,8 @@ GGML_CALL static void * ggml_backend_rpc_buffer_get_base(ggml_backend_buffer_t b memcpy(input.data(), &remote_ptr, sizeof(remote_ptr)); std::vector output; bool status = send_rpc_cmd(ctx->sock, BUFFER_GET_BASE, input, output); - GGML_ASSERT(status); - GGML_ASSERT(output.size() == sizeof(uint64_t)); + //GGML_ASSERT(status); + //GGML_ASSERT(output.size() == sizeof(uint64_t)); // output serialization format: | base_ptr (8 bytes) | uint64_t base_ptr; memcpy(&base_ptr, output.data(), sizeof(base_ptr)); @@ -391,7 +391,7 @@ GGML_CALL static void ggml_backend_rpc_buffer_init_tensor(ggml_backend_buffer_t UNUSED(buffer); if (ggml_is_quantized(tensor->type)) { // TODO: this check is due to MATRIX_ROW_PADDING in CUDA and should be generalized - GGML_ASSERT(tensor->ne[0] % 512 == 0 && "unsupported quantized tensor"); + //GGML_ASSERT(tensor->ne[0] % 512 == 0 && "unsupported quantized tensor"); } } @@ -406,7 +406,7 @@ GGML_CALL static void ggml_backend_rpc_buffer_set_tensor(ggml_backend_buffer_t b memcpy(input.data() + sizeof(rpc_tensor) + sizeof(offset), data, size); std::vector output; bool status = send_rpc_cmd(ctx->sock, SET_TENSOR, input, output); - GGML_ASSERT(status); + //GGML_ASSERT(status); } GGML_CALL static void ggml_backend_rpc_buffer_get_tensor(ggml_backend_buffer_t buffer, const ggml_tensor * tensor, void * data, size_t offset, size_t size) { @@ -420,8 +420,8 @@ GGML_CALL static void ggml_backend_rpc_buffer_get_tensor(ggml_backend_buffer_t b memcpy(input.data() + sizeof(rpc_tensor) + sizeof(offset), &size, sizeof(size)); std::vector output; bool status = send_rpc_cmd(ctx->sock, GET_TENSOR, input, output); - GGML_ASSERT(status); - GGML_ASSERT(output.size() == size); + //GGML_ASSERT(status); + //GGML_ASSERT(output.size() == size); // output serialization format: | data (size bytes) | memcpy(data, output.data(), size); } @@ -445,9 +445,9 @@ GGML_CALL static bool ggml_backend_rpc_buffer_cpy_tensor(ggml_backend_buffer_t b memcpy(input.data() + sizeof(rpc_src), &rpc_dst, sizeof(rpc_dst)); std::vector output; bool status = send_rpc_cmd(ctx->sock, COPY_TENSOR, input, output); - GGML_ASSERT(status); + //GGML_ASSERT(status); // output serialization format: | result (1 byte) | - GGML_ASSERT(output.size() == 1); + //GGML_ASSERT(output.size() == 1); return output[0]; } @@ -460,7 +460,7 @@ GGML_CALL static void ggml_backend_rpc_buffer_clear(ggml_backend_buffer_t buffer memcpy(input.data() + sizeof(ctx->remote_ptr), &value, sizeof(value)); std::vector output; bool status = send_rpc_cmd(ctx->sock, BUFFER_CLEAR, input, output); - GGML_ASSERT(status); + //GGML_ASSERT(status); } static ggml_backend_buffer_i ggml_backend_rpc_buffer_interface = { @@ -489,7 +489,7 @@ GGML_CALL static ggml_backend_buffer_t ggml_backend_rpc_buffer_type_alloc_buffer std::vector output; auto sock = get_socket(buft_ctx->endpoint); bool status = send_rpc_cmd(sock, ALLOC_BUFFER, input, output); - GGML_ASSERT(status); + //GGML_ASSERT(status); GGML_ASSERT(output.size() == 2*sizeof(uint64_t)); // output serialization format: | remote_ptr (8 bytes) | remote_size (8 bytes) | uint64_t remote_ptr; @@ -512,7 +512,7 @@ static size_t get_alignment(const std::shared_ptr & sock) { std::vector input; std::vector output; bool status = send_rpc_cmd(sock, GET_ALIGNMENT, input, output); - GGML_ASSERT(status); + //GGML_ASSERT(status); GGML_ASSERT(output.size() == sizeof(uint64_t)); // output serialization format: | alignment (8 bytes) | uint64_t alignment; @@ -530,7 +530,7 @@ static size_t get_max_size(const std::shared_ptr & sock) { std::vector input; std::vector output; bool status = send_rpc_cmd(sock, GET_MAX_SIZE, input, output); - GGML_ASSERT(status); + //GGML_ASSERT(status); GGML_ASSERT(output.size() == sizeof(uint64_t)); // output serialization format: | max_size (8 bytes) | uint64_t max_size; @@ -623,8 +623,8 @@ GGML_CALL static enum ggml_status ggml_backend_rpc_graph_compute(ggml_backend_t std::vector output; auto sock = get_socket(rpc_ctx->endpoint); bool status = send_rpc_cmd(sock, GRAPH_COMPUTE, input, output); - GGML_ASSERT(status); - GGML_ASSERT(output.size() == 1); + //GGML_ASSERT(status); + //GGML_ASSERT(output.size() == 1); return (enum ggml_status)output[0]; } @@ -688,15 +688,41 @@ GGML_API GGML_CALL ggml_backend_buffer_type_t ggml_backend_rpc_buffer_type(const /* .alignment = */ alignment, /* .max_size = */ max_size }; - + rpc_server_map[endpoint] = "RPC[" + std::string(endpoint) + "]"; ggml_backend_buffer_type_t buft = new ggml_backend_buffer_type { /* .iface = */ ggml_backend_rpc_buffer_type_interface, /* .context = */ buft_ctx }; buft_map[endpoint] = buft; + //auto str1 = (&(buft->context))->name; return buft; } +// backend registry +GGML_CALL static ggml_backend_t ggml_backend_reg_rpc_init(const char* params, void* user_data) { + ggml_backend_t cuda_backend = ggml_backend_rpc_init((const char*)user_data); + return cuda_backend; + + GGML_UNUSED(params); +} + + +extern "C" GGML_CALL int ggml_backend_rpc_reg_devices(); + +GGML_CALL int ggml_backend_rpc_reg_devices() { + //static std::unordered_map buft_map; + int device_count = (int)rpc_server_map.size(); + int i = 0; + for (auto& it : rpc_server_map) + { + std::string endpoint = it.first; + auto name = "RPC[" + std::string(endpoint) + "]"; + ggml_backend_register(name.c_str(), ggml_backend_reg_rpc_init, ggml_backend_rpc_buffer_type(endpoint.c_str()), &(endpoint)); + i++; + } + return device_count; +} + GGML_CALL ggml_backend_t ggml_backend_rpc_init(const char * endpoint) { ggml_backend_rpc_context * ctx = new ggml_backend_rpc_context { /* .endpoint = */ endpoint, @@ -720,7 +746,7 @@ static void get_device_memory(const std::shared_ptr & sock, size_t * f std::vector input; std::vector output; bool status = send_rpc_cmd(sock, GET_DEVICE_MEMORY, input, output); - GGML_ASSERT(status); + //GGML_ASSERT(status); GGML_ASSERT(output.size() == 2*sizeof(uint64_t)); // output serialization format: | free (8 bytes) | total (8 bytes) | uint64_t free_mem; diff --git a/ggml/src/ggml.c b/ggml/src/ggml.c index d8025a5a4..9dda8e65a 100644 --- a/ggml/src/ggml.c +++ b/ggml/src/ggml.c @@ -4924,8 +4924,8 @@ static struct ggml_object * ggml_new_object(struct ggml_context * ctx, enum ggml if (cur_end + size_needed + GGML_OBJECT_SIZE > ctx->mem_size) { GGML_PRINT("%s: not enough space in the context's memory pool (needed %zu, available %zu)\n", __func__, cur_end + size_needed, ctx->mem_size); - assert(false); - return NULL; + //assert(false); + //return NULL; } *obj_new = (struct ggml_object) { diff --git a/src/unicode.cpp b/src/unicode.cpp index cfffde0d9..a57456ea5 100644 --- a/src/unicode.cpp +++ b/src/unicode.cpp @@ -18,6 +18,7 @@ #include #include #include +#include size_t unicode_len_utf8(char src) { const size_t lookup[] = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 3, 4 }; @@ -25,7 +26,7 @@ size_t unicode_len_utf8(char src) { return lookup[highbits]; } -static std::string unicode_cpts_to_utf8(const std::vector & cps) { +static std::string unicode_cpts_to_utf8(const std::vector& cps) { std::string result; for (size_t i = 0; i < cps.size(); ++i) { result.append(unicode_cpt_to_utf8(cps[i])); @@ -33,7 +34,7 @@ static std::string unicode_cpts_to_utf8(const std::vector & cps) { return result; } -uint32_t unicode_cpt_from_utf8(const std::string & utf8, size_t & offset) { +uint32_t unicode_cpt_from_utf8(const std::string& utf8, size_t& offset) { assert(offset < utf8.size()); if (!(utf8[offset + 0] & 0x80)) { auto result = utf8[offset + 0]; @@ -44,7 +45,7 @@ uint32_t unicode_cpt_from_utf8(const std::string & utf8, size_t & offset) { throw std::invalid_argument("invalid character"); } if (!(utf8[offset + 0] & 0x20)) { - if (offset + 1 >= utf8.size() || ! ((utf8[offset + 1] & 0xc0) == 0x80)) { + if (offset + 1 >= utf8.size() || !((utf8[offset + 1] & 0xc0) == 0x80)) { throw std::invalid_argument("invalid character"); } auto result = ((utf8[offset + 0] & 0x1f) << 6) | (utf8[offset + 1] & 0x3f); @@ -52,7 +53,7 @@ uint32_t unicode_cpt_from_utf8(const std::string & utf8, size_t & offset) { return result; } if (!(utf8[offset + 0] & 0x10)) { - if (offset + 2 >= utf8.size() || ! ((utf8[offset + 1] & 0xc0) == 0x80) || ! ((utf8[offset + 2] & 0xc0) == 0x80)) { + if (offset + 2 >= utf8.size() || !((utf8[offset + 1] & 0xc0) == 0x80) || !((utf8[offset + 2] & 0xc0) == 0x80)) { throw std::invalid_argument("invalid character"); } auto result = ((utf8[offset + 0] & 0x0f) << 12) | ((utf8[offset + 1] & 0x3f) << 6) | (utf8[offset + 2] & 0x3f); @@ -60,7 +61,7 @@ uint32_t unicode_cpt_from_utf8(const std::string & utf8, size_t & offset) { return result; } if (!(utf8[offset + 0] & 0x08)) { - if (offset + 3 >= utf8.size() || ! ((utf8[offset + 1] & 0xc0) == 0x80) || ! ((utf8[offset + 2] & 0xc0) == 0x80) || !((utf8[offset + 3] & 0xc0) == 0x80)) { + if (offset + 3 >= utf8.size() || !((utf8[offset + 1] & 0xc0) == 0x80) || !((utf8[offset + 2] & 0xc0) == 0x80) || !((utf8[offset + 3] & 0xc0) == 0x80)) { throw std::invalid_argument("invalid character"); } auto result = ((utf8[offset + 0] & 0x07) << 18) | ((utf8[offset + 1] & 0x3f) << 12) | ((utf8[offset + 2] & 0x3f) << 6) | (utf8[offset + 3] & 0x3f); @@ -122,10 +123,10 @@ uint32_t unicode_cpt_from_utf8(const std::string & utf8, size_t & offset) { static std::vector unicode_cpt_flags_array() { std::vector cpt_flags(MAX_CODEPOINTS, codepoint_flags::UNDEFINED); - assert (unicode_ranges_flags.front().first == 0); - assert (unicode_ranges_flags.back().first == MAX_CODEPOINTS); + assert(unicode_ranges_flags.front().first == 0); + assert(unicode_ranges_flags.back().first == MAX_CODEPOINTS); for (size_t i = 1; i < unicode_ranges_flags.size(); ++i) { - const auto range_ini = unicode_ranges_flags[i-1]; // codepoint_ini, flags + const auto range_ini = unicode_ranges_flags[i - 1]; // codepoint_ini, flags const auto range_end = unicode_ranges_flags[i]; // codepoint_end, flags for (uint32_t cpt = range_ini.first; cpt < range_end.first; ++cpt) { cpt_flags[cpt] = range_ini.second; @@ -144,7 +145,7 @@ static std::vector unicode_cpt_flags_array() { cpt_flags[p.second].is_uppercase = true; } - for (auto &range : unicode_ranges_nfd) { // start, last, nfd + for (auto& range : unicode_ranges_nfd) { // start, last, nfd cpt_flags[range.nfd].is_nfd = true; } @@ -199,22 +200,55 @@ static std::unordered_map unicode_utf8_to_byte_map() { return map; } -static inline std::wstring unicode_wstring_from_utf8(const std::string & s) { +static inline bool is_valid_utf8(const std::string& str) { + int remaining_bytes = 0; // 当前多字节字符剩余的字节数 + for (unsigned char c : str) { + if (remaining_bytes == 0) { + if ((c & 0x80) == 0x00) continue; // 1字节字符 + else if ((c & 0xE0) == 0xC0) remaining_bytes = 1; // 2字节 + else if ((c & 0xF0) == 0xE0) remaining_bytes = 2; // 3字节 + else if ((c & 0xF8) == 0xF0) remaining_bytes = 3; // 4字节 + else return false; // 非法起始字节 + } + else { + // 检查后续字节是否为10xxxxxx + if ((c & 0xC0) != 0x80) + { + return false; + } + remaining_bytes--; + } + } + return (remaining_bytes == 0); // 确保多字节字符完整 +} + +static inline std::wstring unicode_wstring_from_utf8(const std::string& s) { +#if defined(__clang__) + // disable C++17 deprecation warning for std::codecvt_utf8 +# pragma clang diagnostic push +# pragma clang diagnostic ignored "-Wdeprecated-declarations" +#endif + bool isvalid = is_valid_utf8(s); std::wstring_convert> conv; + +#if defined(__clang__) +# pragma clang diagnostic pop +#endif + return conv.from_bytes(s); } -static std::vector unicode_byte_encoding_process(const std::vector & bpe_words) { +static std::vector unicode_byte_encoding_process(const std::vector& bpe_words) { std::vector bpe_encoded_words; - for (const auto & word : bpe_words) { + for (const auto& word : bpe_words) { std::string text_utf; - auto utf_word = unicode_cpts_from_utf8(word); + auto utf_word = unicode_cpts_from_utf8(word); for (size_t i = 0; i < utf_word.size(); ++i) { text_utf += unicode_cpt_to_utf8(utf_word[i]); } std::string encoded_token; - for (char & c : text_utf) { + for (char& c : text_utf) { encoded_token += unicode_byte_to_utf8(c); } bpe_encoded_words.emplace_back(encoded_token); @@ -223,7 +257,7 @@ static std::vector unicode_byte_encoding_process(const std::vector< } // GPT2 system regex: 's|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+ -static std::vector unicode_regex_split_custom_gpt2(const std::string & text, const std::vector & offsets) { +static std::vector unicode_regex_split_custom_gpt2(const std::string& text, const std::vector& offsets) { std::vector bpe_offsets; // store the offset of each word bpe_offsets.reserve(offsets.size()); // Reserve memory for the approximate size @@ -237,16 +271,16 @@ static std::vector unicode_regex_split_custom_gpt2(const std::string & t start = offset_end; static const uint32_t OUT_OF_RANGE = 0xFFFFFFFF; - auto _get_cpt = [&] (const size_t pos) -> uint32_t { + auto _get_cpt = [&](const size_t pos) -> uint32_t { return (offset_ini <= pos && pos < offset_end) ? cpts[pos] : OUT_OF_RANGE; }; - auto _get_flags = [&] (const size_t pos) -> codepoint_flags { + auto _get_flags = [&](const size_t pos) -> codepoint_flags { return (offset_ini <= pos && pos < offset_end) ? unicode_cpt_flags(cpts[pos]) : codepoint_flags{}; }; size_t _prev_end = offset_ini; - auto _add_token = [&] (const size_t end) -> size_t { + auto _add_token = [&](const size_t end) -> size_t { assert(_prev_end <= end && end <= offset_end); size_t len = end - _prev_end; if (len > 0) { @@ -262,29 +296,29 @@ static std::vector unicode_regex_split_custom_gpt2(const std::string & t return len; }; - for (size_t pos = offset_ini; pos < offset_end; /*pos++*/ ) { + for (size_t pos = offset_ini; pos < offset_end; /*pos++*/) { const uint32_t cpt = _get_cpt(pos); const auto flags = _get_flags(pos); // regex: 's|'t|'re|'ve|'m|'ll|'d - if (cpt == '\'' && pos+1 < offset_end) { - uint32_t cpt_next = _get_cpt(pos+1); + if (cpt == '\'' && pos + 1 < offset_end) { + uint32_t cpt_next = _get_cpt(pos + 1); if (cpt_next == 's' || cpt_next == 't' || cpt_next == 'm' || cpt_next == 'd') { - pos += _add_token(pos+2); + pos += _add_token(pos + 2); continue; } - if (pos+2 < offset_end) { - uint32_t cpt_next_next = _get_cpt(pos+2); + if (pos + 2 < offset_end) { + uint32_t cpt_next_next = _get_cpt(pos + 2); if ((cpt_next == 'r' && cpt_next_next == 'e') || (cpt_next == 'v' && cpt_next_next == 'e') || (cpt_next == 'l' && cpt_next_next == 'l')) { - pos += _add_token(pos+3); + pos += _add_token(pos + 3); continue; } } } - auto flags2 = (cpt == ' ' ? _get_flags(pos+1) : flags); + auto flags2 = (cpt == ' ' ? _get_flags(pos + 1) : flags); // regex: ?\p{L}+ if (flags2.is_letter) { pos += (cpt == ' '); @@ -314,12 +348,12 @@ static std::vector unicode_regex_split_custom_gpt2(const std::string & t } size_t num_whitespaces = 0; - while (_get_flags(pos+num_whitespaces).is_whitespace) { + while (_get_flags(pos + num_whitespaces).is_whitespace) { num_whitespaces++; } // regex: \s+(?!\S) - if (num_whitespaces > 1 && _get_cpt(pos+num_whitespaces) != OUT_OF_RANGE) { + if (num_whitespaces > 1 && _get_cpt(pos + num_whitespaces) != OUT_OF_RANGE) { pos += num_whitespaces - 1; _add_token(pos); continue; @@ -341,7 +375,7 @@ static std::vector unicode_regex_split_custom_gpt2(const std::string & t } // LLAMA3 system regex: "(?i:'s|'t|'re|'ve|'m|'ll|'d)|[^\r\n\p{L}\p{N}]?\p{L}+|\p{N}{1,3}| ?[^\s\p{L}\p{N}]+[\r\n]*|\s*[\r\n]+|\s+(?!\S)|\s+" -static std::vector unicode_regex_split_custom_llama3(const std::string & text, const std::vector & offsets) { +static std::vector unicode_regex_split_custom_llama3(const std::string& text, const std::vector& offsets) { std::vector bpe_offsets; // store the offset of each word bpe_offsets.reserve(offsets.size()); // Reserve memory for the approximate size @@ -355,16 +389,16 @@ static std::vector unicode_regex_split_custom_llama3(const std::string & start = offset_end; static const uint32_t OUT_OF_RANGE = 0xFFFFFFFF; - auto _get_cpt = [&] (const size_t pos) -> uint32_t { + auto _get_cpt = [&](const size_t pos) -> uint32_t { return (offset_ini <= pos && pos < offset_end) ? cpts[pos] : OUT_OF_RANGE; }; - auto _get_flags = [&] (const size_t pos) -> codepoint_flags { + auto _get_flags = [&](const size_t pos) -> codepoint_flags { return (offset_ini <= pos && pos < offset_end) ? unicode_cpt_flags(cpts[pos]) : codepoint_flags{}; }; size_t _prev_end = offset_ini; - auto _add_token = [&] (const size_t end) -> size_t { + auto _add_token = [&](const size_t end) -> size_t { assert(_prev_end <= end && end <= offset_end); size_t len = end - _prev_end; if (len > 0) { @@ -380,23 +414,23 @@ static std::vector unicode_regex_split_custom_llama3(const std::string & return len; }; - for (size_t pos = offset_ini; pos < offset_end; /*pos++*/ ) { + for (size_t pos = offset_ini; pos < offset_end; /*pos++*/) { const uint32_t cpt = _get_cpt(pos); const auto flags = _get_flags(pos); // regex: (?i:'s|'t|'re|'ve|'m|'ll|'d) // case insensitive - if (cpt == '\'' && pos+1 < offset_end) { - uint32_t cpt_next = unicode_tolower(_get_cpt(pos+1)); + if (cpt == '\'' && pos + 1 < offset_end) { + uint32_t cpt_next = unicode_tolower(_get_cpt(pos + 1)); if (cpt_next == 's' || cpt_next == 't' || cpt_next == 'm' || cpt_next == 'd') { - pos += _add_token(pos+2); + pos += _add_token(pos + 2); continue; } - if (pos+2 < offset_end) { - uint32_t cpt_next_next = unicode_tolower(_get_cpt(pos+2)); + if (pos + 2 < offset_end) { + uint32_t cpt_next_next = unicode_tolower(_get_cpt(pos + 2)); if ((cpt_next == 'r' && cpt_next_next == 'e') || (cpt_next == 'v' && cpt_next_next == 'e') || (cpt_next == 'l' && cpt_next_next == 'l')) { - pos += _add_token(pos+3); + pos += _add_token(pos + 3); continue; } } @@ -404,7 +438,7 @@ static std::vector unicode_regex_split_custom_llama3(const std::string & // regex: [^\r\n\p{L}\p{N}]?\p{L}+ if (!(cpt == '\r' || cpt == '\n' || flags.is_number)) { - if (flags.is_letter || _get_flags(pos+1).is_letter) { // one or more letters + if (flags.is_letter || _get_flags(pos + 1).is_letter) { // one or more letters pos++; while (_get_flags(pos).is_letter) { pos++; @@ -418,7 +452,7 @@ static std::vector unicode_regex_split_custom_llama3(const std::string & if (flags.is_number) { size_t ini = pos; while (_get_flags(pos).is_number) { - if (++pos - ini >= 3 ) { + if (++pos - ini >= 3) { _add_token(pos); ini = pos; } @@ -428,7 +462,7 @@ static std::vector unicode_regex_split_custom_llama3(const std::string & } // regex: ?[^\s\p{L}\p{N}]+[\r\n]* - auto flags2 = (cpt == ' ' ? _get_flags(pos+1) : flags); + auto flags2 = (cpt == ' ' ? _get_flags(pos + 1) : flags); if (!(flags2.is_whitespace | flags2.is_letter | flags2.is_number) && flags.as_uint()) { pos += (cpt == ' '); while (!(flags2.is_whitespace | flags2.is_letter | flags2.is_number) && flags2.as_uint()) { @@ -444,8 +478,8 @@ static std::vector unicode_regex_split_custom_llama3(const std::string & size_t num_whitespaces = 0; size_t last_end_r_or_n = 0; - while (_get_flags(pos+num_whitespaces).is_whitespace) { - uint32_t cpt2 = _get_cpt(pos+num_whitespaces); + while (_get_flags(pos + num_whitespaces).is_whitespace) { + uint32_t cpt2 = _get_cpt(pos + num_whitespaces); if (cpt2 == '\r' || cpt2 == '\n') { last_end_r_or_n = pos + num_whitespaces + 1; } @@ -460,7 +494,7 @@ static std::vector unicode_regex_split_custom_llama3(const std::string & } // regex: \s+(?!\S) - if (num_whitespaces > 1 && _get_cpt(pos+num_whitespaces) != OUT_OF_RANGE) { + if (num_whitespaces > 1 && _get_cpt(pos + num_whitespaces) != OUT_OF_RANGE) { pos += num_whitespaces - 1; _add_token(pos); continue; @@ -482,7 +516,7 @@ static std::vector unicode_regex_split_custom_llama3(const std::string & } // use std::wregex to split the text -static std::vector unicode_regex_split_stl(const std::wstring & wtext, const std::wstring & regex_expr, const std::vector & offsets) { +static std::vector unicode_regex_split_stl(const std::wstring& wtext, const std::wstring& regex_expr, const std::vector& offsets) { std::wregex expr(regex_expr); std::vector bpe_offsets; // store the offset of each word bpe_offsets.reserve(offsets.size()); // Reserve memory for the approximate size @@ -502,7 +536,7 @@ static std::vector unicode_regex_split_stl(const std::wstring & wtext, c ++it; } - if (start_idx < (int64_t) offset) { + if (start_idx < (int64_t)offset) { bpe_offsets.emplace_back(offset - start_idx); } start += offset; @@ -512,7 +546,7 @@ static std::vector unicode_regex_split_stl(const std::wstring & wtext, c } // use std::regex to split the text -static std::vector unicode_regex_split_stl(const std::string & text, const std::string & regex_expr, const std::vector & offsets) { +static std::vector unicode_regex_split_stl(const std::string& text, const std::string& regex_expr, const std::vector& offsets) { std::regex expr(regex_expr); std::vector bpe_offsets; // store the offset of each word bpe_offsets.reserve(offsets.size()); // Reserve memory for the approximate size @@ -532,7 +566,7 @@ static std::vector unicode_regex_split_stl(const std::string & text, con ++it; } - if (start_idx < (int64_t) offset) { + if (start_idx < (int64_t)offset) { bpe_offsets.emplace_back(offset - start_idx); } start += offset; @@ -541,14 +575,15 @@ static std::vector unicode_regex_split_stl(const std::string & text, con return bpe_offsets; } -static std::vector unicode_regex_split_custom(const std::string & text, const std::string & regex_expr, const std::vector & offsets) { +static std::vector unicode_regex_split_custom(const std::string& text, const std::string& regex_expr, const std::vector& offsets) { std::vector bpe_offsets; if (regex_expr == "'s|'t|'re|'ve|'m|'ll|'d| ?\\p{L}+| ?\\p{N}+| ?[^\\s\\p{L}\\p{N}]+|\\s+(?!\\S)") { bpe_offsets = unicode_regex_split_custom_gpt2(text, offsets); - } else if ( - regex_expr == "(?i:'s|'t|'re|'ve|'m|'ll|'d)|[^\\r\\n\\p{L}\\p{N}]?\\p{L}+|\\p{N}{1,3}| ?[^\\s\\p{L}\\p{N}]+[\\r\\n]*|\\s*[\\r\\n]+|\\s+(?!\\S)|\\s+" || - regex_expr == "(?:'[sS]|'[tT]|'[rR][eE]|'[vV][eE]|'[mM]|'[lL][lL]|'[dD])|[^\\r\\n\\p{L}\\p{N}]?\\p{L}+|\\p{N}{1,3}| ?[^\\s\\p{L}\\p{N}]+[\\r\\n]*|\\s*[\\r\\n]+|\\s+(?!\\S)|\\s+") { + } + else if ( + regex_expr == "(?i:'s|'t|'re|'ve|'m|'ll|'d)|[^\\r\\n\\p{L}\\p{N}]?\\p{L}+|\\p{N}{1,3}| ?[^\\s\\p{L}\\p{N}]+[\\r\\n]*|\\s*[\\r\\n]+|\\s+(?!\\S)|\\s+" || + regex_expr == "(?:'[sS]|'[tT]|'[rR][eE]|'[vV][eE]|'[mM]|'[lL][lL]|'[dD])|[^\\r\\n\\p{L}\\p{N}]?\\p{L}+|\\p{N}{1,3}| ?[^\\s\\p{L}\\p{N}]+[\\r\\n]*|\\s*[\\r\\n]+|\\s+(?!\\S)|\\s+") { bpe_offsets = unicode_regex_split_custom_llama3(text, offsets); } @@ -589,8 +624,8 @@ std::string unicode_cpt_to_utf8(uint32_t cp) { throw std::invalid_argument("invalid codepoint"); } -std::vector unicode_cpts_normalize_nfd(const std::vector & cpts) { - auto comp = [] (const uint32_t cpt, const range_nfd & range) { +std::vector unicode_cpts_normalize_nfd(const std::vector& cpts) { + auto comp = [](const uint32_t cpt, const range_nfd& range) { return cpt < range.first; }; std::vector result(cpts.size()); @@ -602,7 +637,7 @@ std::vector unicode_cpts_normalize_nfd(const std::vector & c return result; } -std::vector unicode_cpts_from_utf8(const std::string & utf8) { +std::vector unicode_cpts_from_utf8(const std::string& utf8) { std::vector result; result.reserve(utf8.size()); size_t offset = 0; @@ -618,7 +653,7 @@ codepoint_flags unicode_cpt_flags(const uint32_t cp) { return cp < cpt_flags.size() ? cpt_flags[cp] : undef; } -codepoint_flags unicode_cpt_flags(const std::string & utf8) { +codepoint_flags unicode_cpt_flags(const std::string& utf8) { static const codepoint_flags undef(codepoint_flags::UNDEFINED); if (utf8.empty()) { return undef; // undefined @@ -632,7 +667,7 @@ std::string unicode_byte_to_utf8(uint8_t byte) { return map.at(byte); } -uint8_t unicode_utf8_to_byte(const std::string & utf8) { +uint8_t unicode_utf8_to_byte(const std::string& utf8) { static std::unordered_map map = unicode_utf8_to_byte_map(); return map.at(utf8); } @@ -642,7 +677,7 @@ uint32_t unicode_tolower(uint32_t cp) { return it == unicode_map_lowercase.end() ? cp : it->second; } -std::vector unicode_regex_split(const std::string & text, const std::vector & regex_exprs) { +std::vector unicode_regex_split(const std::string& text, const std::vector& regex_exprs) { // unicode categories static const std::map k_ucat_enum = { { "\\p{N}", codepoint_flags::NUMBER }, @@ -671,9 +706,9 @@ std::vector unicode_regex_split(const std::string & text, const std // compute collapsed codepoints only if needed by at least one regex bool need_collapse = false; - for (auto & regex_expr : regex_exprs) { + for (auto& regex_expr : regex_exprs) { // search for unicode categories - for (const auto & ucat : k_ucat_enum) { + for (const auto& ucat : k_ucat_enum) { if (std::string::npos != regex_expr.find(ucat.first)) { need_collapse = true; break; @@ -702,18 +737,20 @@ std::vector unicode_regex_split(const std::string & text, const std if (flags.is_whitespace) { //NOTE: C++ std::regex \s does not mach 0x85, Rust and Python regex does. //text_collapsed[i] = (char) 0x85; // as whitespace fallback - text_collapsed[i] = (char) 0x0B; // as whitespace fallback - } else if (k_ucat_cpt.find(flags.category_flag()) != k_ucat_cpt.end()) { + text_collapsed[i] = (char)0x0B; // as whitespace fallback + } + else if (k_ucat_cpt.find(flags.category_flag()) != k_ucat_cpt.end()) { text_collapsed[i] = k_ucat_cpt.at(flags.category_flag()); - } else { - text_collapsed[i] = (char) 0xD0; // fallback + } + else { + text_collapsed[i] = (char)0xD0; // fallback } } } std::vector bpe_offsets = { cpts.size() }; - for (auto & regex_expr : regex_exprs) { + for (auto& regex_expr : regex_exprs) { // first, see if we have an efficient custom regex implementation auto tmp = unicode_regex_split_custom(text, regex_expr, bpe_offsets); @@ -727,7 +764,7 @@ std::vector unicode_regex_split(const std::string & text, const std // if a unicode category is used in the regex, we use the collapsed text and replace the unicode category // with the corresponding collapsed representation bool use_collapsed = false; - for (auto & ucat : k_ucat_enum) { + for (auto& ucat : k_ucat_enum) { if (std::string::npos != regex_expr.find(ucat.first)) { use_collapsed = true; break; @@ -786,7 +823,8 @@ std::vector unicode_regex_split(const std::string & text, const std //printf("text_collapsed: %s\n", text_collapsed.c_str()); //printf("regex_expr_collapsed: %s\n", regex_expr_collapsed.c_str()); bpe_offsets = unicode_regex_split_stl(text_collapsed, regex_expr_collapsed, bpe_offsets); - } else { + } + else { // no unicode category used, we can use std::wregex directly const std::wstring wregex_expr = unicode_wstring_from_utf8(regex_expr); @@ -802,7 +840,8 @@ std::vector unicode_regex_split(const std::string & text, const std //printf("regex_expr: %s\n", regex_expr.c_str()); bpe_offsets = unicode_regex_split_stl(wtext, wregex_expr, bpe_offsets); } - } catch (std::regex_error & e) { + } + catch (std::regex_error& e) { fprintf(stderr, "Failed to process regex: '%s'\n", regex_expr.c_str()); fprintf(stderr, "Regex error: %s\n", e.what()); throw std::runtime_error("Failed to process regex"); @@ -813,7 +852,7 @@ std::vector unicode_regex_split(const std::string & text, const std bpe_words.reserve(bpe_offsets.size()); // reserve memory for the approximate size size_t start = 0; - for (size_t & offset : bpe_offsets) { + for (size_t& offset : bpe_offsets) { bpe_words.emplace_back(); for (size_t i = start; i < start + offset; ++i) { bpe_words.back() += unicode_cpt_to_utf8(cpts[i]); From 490fb899bbd6d53347a4dc65b46b44778642c134 Mon Sep 17 00:00:00 2001 From: Radoslav Gerganov Date: Mon, 19 Aug 2024 10:10:21 +0300 Subject: [PATCH 02/20] rpc : prevent crashes on invalid input (#9040) Add more checks which prevent RPC server from crashing if invalid input is received from client # Conflicts: # ggml/src/ggml-rpc.cpp --- ggml/src/ggml-rpc.cpp | 81 +++++++++++++++++++++++++------------------ 1 file changed, 47 insertions(+), 34 deletions(-) diff --git a/ggml/src/ggml-rpc.cpp b/ggml/src/ggml-rpc.cpp index 60983920c..bbd0b374c 100644 --- a/ggml/src/ggml-rpc.cpp +++ b/ggml/src/ggml-rpc.cpp @@ -82,17 +82,18 @@ static_assert(sizeof(rpc_tensor) % 8 == 0, "rpc_tensor size must be multiple of static std::unordered_map rpc_server_map; // RPC commands enum rpc_cmd { - ALLOC_BUFFER = 0, - GET_ALIGNMENT, - GET_MAX_SIZE, - BUFFER_GET_BASE, - FREE_BUFFER, - BUFFER_CLEAR, - SET_TENSOR, - GET_TENSOR, - COPY_TENSOR, - GRAPH_COMPUTE, - GET_DEVICE_MEMORY, + RPC_CMD_ALLOC_BUFFER = 0, + RPC_CMD_GET_ALIGNMENT, + RPC_CMD_GET_MAX_SIZE, + RPC_CMD_BUFFER_GET_BASE, + RPC_CMD_FREE_BUFFER, + RPC_CMD_BUFFER_CLEAR, + RPC_CMD_SET_TENSOR, + RPC_CMD_GET_TENSOR, + RPC_CMD_COPY_TENSOR, + RPC_CMD_GRAPH_COMPUTE, + RPC_CMD_GET_DEVICE_MEMORY, + RPC_CMD_COUNT, }; // RPC data structures @@ -330,7 +331,7 @@ GGML_CALL static void ggml_backend_rpc_buffer_free_buffer(ggml_backend_buffer_t uint64_t remote_ptr = ctx->remote_ptr; memcpy(input.data(), &remote_ptr, sizeof(remote_ptr)); std::vector output; - bool status = send_rpc_cmd(ctx->sock, FREE_BUFFER, input, output); + bool status = send_rpc_cmd(ctx->sock, RPC_CMD_FREE_BUFFER, input, output); //GGML_ASSERT(status); //GGML_ASSERT(output.empty()); delete ctx; @@ -346,7 +347,7 @@ GGML_CALL static void * ggml_backend_rpc_buffer_get_base(ggml_backend_buffer_t b uint64_t remote_ptr = ctx->remote_ptr; memcpy(input.data(), &remote_ptr, sizeof(remote_ptr)); std::vector output; - bool status = send_rpc_cmd(ctx->sock, BUFFER_GET_BASE, input, output); + bool status = send_rpc_cmd(ctx->sock, RPC_CMD_BUFFER_GET_BASE, input, output); //GGML_ASSERT(status); //GGML_ASSERT(output.size() == sizeof(uint64_t)); // output serialization format: | base_ptr (8 bytes) | @@ -405,7 +406,7 @@ GGML_CALL static void ggml_backend_rpc_buffer_set_tensor(ggml_backend_buffer_t b memcpy(input.data() + sizeof(rpc_tensor), &offset, sizeof(offset)); memcpy(input.data() + sizeof(rpc_tensor) + sizeof(offset), data, size); std::vector output; - bool status = send_rpc_cmd(ctx->sock, SET_TENSOR, input, output); + bool status = send_rpc_cmd(ctx->sock, RPC_CMD_SET_TENSOR, input, output); //GGML_ASSERT(status); } @@ -419,7 +420,7 @@ GGML_CALL static void ggml_backend_rpc_buffer_get_tensor(ggml_backend_buffer_t b memcpy(input.data() + sizeof(rpc_tensor), &offset, sizeof(offset)); memcpy(input.data() + sizeof(rpc_tensor) + sizeof(offset), &size, sizeof(size)); std::vector output; - bool status = send_rpc_cmd(ctx->sock, GET_TENSOR, input, output); + bool status = send_rpc_cmd(ctx->sock, RPC_CMD_GET_TENSOR, input, output); //GGML_ASSERT(status); //GGML_ASSERT(output.size() == size); // output serialization format: | data (size bytes) | @@ -444,7 +445,7 @@ GGML_CALL static bool ggml_backend_rpc_buffer_cpy_tensor(ggml_backend_buffer_t b memcpy(input.data(), &rpc_src, sizeof(rpc_src)); memcpy(input.data() + sizeof(rpc_src), &rpc_dst, sizeof(rpc_dst)); std::vector output; - bool status = send_rpc_cmd(ctx->sock, COPY_TENSOR, input, output); + bool status = send_rpc_cmd(ctx->sock, RPC_CMD_COPY_TENSOR, input, output); //GGML_ASSERT(status); // output serialization format: | result (1 byte) | //GGML_ASSERT(output.size() == 1); @@ -459,7 +460,7 @@ GGML_CALL static void ggml_backend_rpc_buffer_clear(ggml_backend_buffer_t buffer memcpy(input.data(), &ctx->remote_ptr, sizeof(ctx->remote_ptr)); memcpy(input.data() + sizeof(ctx->remote_ptr), &value, sizeof(value)); std::vector output; - bool status = send_rpc_cmd(ctx->sock, BUFFER_CLEAR, input, output); + bool status = send_rpc_cmd(ctx->sock, RPC_CMD_BUFFER_CLEAR, input, output); //GGML_ASSERT(status); } @@ -488,7 +489,7 @@ GGML_CALL static ggml_backend_buffer_t ggml_backend_rpc_buffer_type_alloc_buffer memcpy(input.data(), &size, sizeof(size)); std::vector output; auto sock = get_socket(buft_ctx->endpoint); - bool status = send_rpc_cmd(sock, ALLOC_BUFFER, input, output); + bool status = send_rpc_cmd(sock, RPC_CMD_ALLOC_BUFFER, input, output); //GGML_ASSERT(status); GGML_ASSERT(output.size() == 2*sizeof(uint64_t)); // output serialization format: | remote_ptr (8 bytes) | remote_size (8 bytes) | @@ -511,7 +512,7 @@ static size_t get_alignment(const std::shared_ptr & sock) { // input serialization format: | 0 bytes | std::vector input; std::vector output; - bool status = send_rpc_cmd(sock, GET_ALIGNMENT, input, output); + bool status = send_rpc_cmd(sock, RPC_CMD_GET_ALIGNMENT, input, output); //GGML_ASSERT(status); GGML_ASSERT(output.size() == sizeof(uint64_t)); // output serialization format: | alignment (8 bytes) | @@ -529,7 +530,7 @@ static size_t get_max_size(const std::shared_ptr & sock) { // input serialization format: | 0 bytes | std::vector input; std::vector output; - bool status = send_rpc_cmd(sock, GET_MAX_SIZE, input, output); + bool status = send_rpc_cmd(sock, RPC_CMD_GET_MAX_SIZE, input, output); //GGML_ASSERT(status); GGML_ASSERT(output.size() == sizeof(uint64_t)); // output serialization format: | max_size (8 bytes) | @@ -622,7 +623,7 @@ GGML_CALL static enum ggml_status ggml_backend_rpc_graph_compute(ggml_backend_t serialize_graph(cgraph, input); std::vector output; auto sock = get_socket(rpc_ctx->endpoint); - bool status = send_rpc_cmd(sock, GRAPH_COMPUTE, input, output); + bool status = send_rpc_cmd(sock, RPC_CMD_GRAPH_COMPUTE, input, output); //GGML_ASSERT(status); //GGML_ASSERT(output.size() == 1); return (enum ggml_status)output[0]; @@ -745,7 +746,7 @@ static void get_device_memory(const std::shared_ptr & sock, size_t * f // input serialization format: | 0 bytes | std::vector input; std::vector output; - bool status = send_rpc_cmd(sock, GET_DEVICE_MEMORY, input, output); + bool status = send_rpc_cmd(sock, RPC_CMD_GET_DEVICE_MEMORY, input, output); //GGML_ASSERT(status); GGML_ASSERT(output.size() == 2*sizeof(uint64_t)); // output serialization format: | free (8 bytes) | total (8 bytes) | @@ -1124,59 +1125,69 @@ static void rpc_serve_client(ggml_backend_t backend, sockfd_t sockfd, size_t fre if (!recv_data(sockfd, &cmd, 1)) { break; } + if (cmd >= RPC_CMD_COUNT) { + // fail fast if the command is invalid + fprintf(stderr, "Unknown command: %d\n", cmd); + break; + } std::vector input; std::vector output; uint64_t input_size; if (!recv_data(sockfd, &input_size, sizeof(input_size))) { break; } - input.resize(input_size); + try { + input.resize(input_size); + } catch (const std::bad_alloc & e) { + fprintf(stderr, "Failed to allocate input buffer of size %" PRIu64 "\n", input_size); + break; + } if (!recv_data(sockfd, input.data(), input_size)) { break; } bool ok = true; switch (cmd) { - case ALLOC_BUFFER: { + case RPC_CMD_ALLOC_BUFFER: { ok = server.alloc_buffer(input, output); break; } - case GET_ALIGNMENT: { + case RPC_CMD_GET_ALIGNMENT: { server.get_alignment(output); break; } - case GET_MAX_SIZE: { + case RPC_CMD_GET_MAX_SIZE: { server.get_max_size(output); break; } - case BUFFER_GET_BASE: { + case RPC_CMD_BUFFER_GET_BASE: { ok = server.buffer_get_base(input, output); break; } - case FREE_BUFFER: { + case RPC_CMD_FREE_BUFFER: { ok = server.free_buffer(input); break; } - case BUFFER_CLEAR: { + case RPC_CMD_BUFFER_CLEAR: { ok = server.buffer_clear(input); break; } - case SET_TENSOR: { + case RPC_CMD_SET_TENSOR: { ok = server.set_tensor(input); break; } - case GET_TENSOR: { + case RPC_CMD_GET_TENSOR: { ok = server.get_tensor(input, output); break; } - case COPY_TENSOR: { + case RPC_CMD_COPY_TENSOR: { ok = server.copy_tensor(input, output); break; } - case GRAPH_COMPUTE: { + case RPC_CMD_GRAPH_COMPUTE: { ok = server.graph_compute(input, output); break; } - case GET_DEVICE_MEMORY: { + case RPC_CMD_GET_DEVICE_MEMORY: { // output serialization format: | free (8 bytes) | total (8 bytes) | output.resize(2*sizeof(uint64_t), 0); memcpy(output.data(), &free_mem, sizeof(free_mem)); @@ -1229,8 +1240,10 @@ void start_rpc_server(ggml_backend_t backend, const char * endpoint, size_t free return; } printf("Accepted client connection, free_mem=%zu, total_mem=%zu\n", free_mem, total_mem); + fflush(stdout); rpc_serve_client(backend, client_socket->fd, free_mem, total_mem); printf("Client connection closed\n"); + fflush(stdout); } #ifdef _WIN32 WSACleanup(); From 99a8f0ec42bf1fd14537e4103128bc3a1bd7046f Mon Sep 17 00:00:00 2001 From: Radoslav Gerganov Date: Mon, 19 Aug 2024 10:11:45 +0300 Subject: [PATCH 03/20] rpc : print error message when failed to connect endpoint (#9042) --- ggml/src/ggml-rpc.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/ggml/src/ggml-rpc.cpp b/ggml/src/ggml-rpc.cpp index bbd0b374c..55c33972d 100644 --- a/ggml/src/ggml-rpc.cpp +++ b/ggml/src/ggml-rpc.cpp @@ -637,7 +637,7 @@ GGML_CALL static bool ggml_backend_rpc_supports_op(ggml_backend_t backend, const } GGML_CALL static bool ggml_backend_rpc_supports_buft(ggml_backend_t backend, ggml_backend_buffer_type_t buft) { - if (buft->iface.get_name != ggml_backend_rpc_buffer_type_name) { + if (!buft || buft->iface.get_name != ggml_backend_rpc_buffer_type_name) { return false; } ggml_backend_rpc_buffer_type_context * buft_ctx = (ggml_backend_rpc_buffer_type_context *)buft->context; @@ -679,6 +679,7 @@ GGML_API GGML_CALL ggml_backend_buffer_type_t ggml_backend_rpc_buffer_type(const } auto sock = get_socket(endpoint); if (sock == nullptr) { + fprintf(stderr, "Failed to connect to %s\n", endpoint); return nullptr; } size_t alignment = get_alignment(sock); From beccddbad4cf440ecae9d3749968fb2d357b40cd Mon Sep 17 00:00:00 2001 From: firecoperana Date: Wed, 7 May 2025 19:50:46 -0500 Subject: [PATCH 04/20] Fix RPC error --- ggml/src/ggml.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/ggml/src/ggml.c b/ggml/src/ggml.c index 9dda8e65a..a09512dc1 100644 --- a/ggml/src/ggml.c +++ b/ggml/src/ggml.c @@ -4923,9 +4923,9 @@ static struct ggml_object * ggml_new_object(struct ggml_context * ctx, enum ggml if (cur_end + size_needed + GGML_OBJECT_SIZE > ctx->mem_size) { GGML_PRINT("%s: not enough space in the context's memory pool (needed %zu, available %zu)\n", - __func__, cur_end + size_needed, ctx->mem_size); - //assert(false); - //return NULL; + __func__, cur_end + size_needed + GGML_OBJECT_SIZE, ctx->mem_size); + assert(false); + return NULL; } *obj_new = (struct ggml_object) { From 9d6ee73bf2ab2416878b8906cb72d26349df23dc Mon Sep 17 00:00:00 2001 From: firecoperana Date: Wed, 7 May 2025 20:04:23 -0500 Subject: [PATCH 05/20] Add vulkan, sycl to rpc backend --- examples/rpc/rpc-server.cpp | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/examples/rpc/rpc-server.cpp b/examples/rpc/rpc-server.cpp index 6342e6488..ef6ac9f77 100644 --- a/examples/rpc/rpc-server.cpp +++ b/examples/rpc/rpc-server.cpp @@ -5,6 +5,12 @@ #ifdef GGML_USE_METAL #include "ggml-metal.h" #endif +#ifdef GGML_USE_VULKAN +#include "ggml-vulkan.h" +#endif +#ifdef GGML_USE_SYCL +#include "ggml-sycl.h" +#endif #include "ggml-rpc.h" #ifdef _WIN32 @@ -79,6 +85,18 @@ static ggml_backend_t create_backend() { if (!backend) { fprintf(stderr, "%s: ggml_backend_metal_init() failed\n", __func__); } +#elif GGML_USE_VULKAN + fprintf(stderr, "%s: using Vulkan backend\n", __func__); + backend = ggml_backend_vk_init(0); // init device 0 + if (!backend) { + fprintf(stderr, "%s: ggml_backend_vulkan_init() failed\n", __func__); + } +#elif GGML_USE_SYCL + fprintf(stderr, "%s: using SYCL backend\n", __func__); + backend = ggml_backend_sycl_init(0); // init device 0 + if (!backend) { + fprintf(stderr, "%s: ggml_backend_sycl_init() failed\n", __func__); + } #endif // if there aren't GPU Backends fallback to CPU backend @@ -92,6 +110,10 @@ static ggml_backend_t create_backend() { static void get_backend_memory(size_t * free_mem, size_t * total_mem) { #ifdef GGML_USE_CUDA ggml_backend_cuda_get_device_memory(0, free_mem, total_mem); +#elif GGML_USE_VULKAN + ggml_backend_vk_get_device_memory(0, free_mem, total_mem); +#elif GGML_USE_SYCL + ggml_backend_sycl_get_device_memory(0, free_mem, total_mem); #else #ifdef _WIN32 MEMORYSTATUSEX status; From 18cb9e8fcd395f310b57f127440ea779161b4f5e Mon Sep 17 00:00:00 2001 From: firecoperana Date: Sun, 4 May 2025 19:56:15 -0500 Subject: [PATCH 06/20] add thread in rpc cpu backend --- examples/rpc/rpc-server.cpp | 20 ++++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-) diff --git a/examples/rpc/rpc-server.cpp b/examples/rpc/rpc-server.cpp index ef6ac9f77..34138a9b8 100644 --- a/examples/rpc/rpc-server.cpp +++ b/examples/rpc/rpc-server.cpp @@ -14,23 +14,28 @@ #include "ggml-rpc.h" #ifdef _WIN32 +# define NOMINMAX # include #else # include #endif #include #include +#include +#include struct rpc_server_params { std::string host = "127.0.0.1"; int port = 50052; size_t backend_mem = 0; + int n_threads = std::max(1U, std::thread::hardware_concurrency() / 2); }; static void print_usage(int /*argc*/, char ** argv, rpc_server_params params) { fprintf(stderr, "Usage: %s [options]\n\n", argv[0]); fprintf(stderr, "options:\n"); fprintf(stderr, " -h, --help show this help message and exit\n"); + fprintf(stderr, " -t, --threads number of threads for the CPU backend (default: %d)\n", params.n_threads); fprintf(stderr, " -H HOST, --host HOST host to bind to (default: %s)\n", params.host.c_str()); fprintf(stderr, " -p PORT, --port PORT port to bind to (default: %d)\n", params.port); fprintf(stderr, " -m MEM, --mem MEM backend memory size (in MB)\n"); @@ -46,6 +51,16 @@ static bool rpc_server_params_parse(int argc, char ** argv, rpc_server_params & return false; } params.host = argv[i]; + } + else if (arg == "-t" || arg == "--threads") { + if (++i >= argc) { + return false; + } + params.n_threads = std::stoi(argv[i]); + if (params.n_threads <= 0) { + fprintf(stderr, "error: invalid number of threads: %d\n", params.n_threads); + return false; + } } else if (arg == "-p" || arg == "--port") { if (++i >= argc) { return false; @@ -71,7 +86,7 @@ static bool rpc_server_params_parse(int argc, char ** argv, rpc_server_params & return true; } -static ggml_backend_t create_backend() { +static ggml_backend_t create_backend(const rpc_server_params& params) { ggml_backend_t backend = NULL; #ifdef GGML_USE_CUDA fprintf(stderr, "%s: using CUDA backend\n", __func__); @@ -103,6 +118,7 @@ static ggml_backend_t create_backend() { if (!backend) { fprintf(stderr, "%s: using CPU backend\n", __func__); backend = ggml_backend_cpu_init(); + ggml_backend_cpu_set_n_threads(backend, params.n_threads); } return backend; } @@ -147,7 +163,7 @@ int main(int argc, char * argv[]) { fprintf(stderr, "\n"); } - ggml_backend_t backend = create_backend(); + ggml_backend_t backend = create_backend(params); if (!backend) { fprintf(stderr, "Failed to create backend\n"); return 1; From bfe126185357b4ac60aafcf60b286a412d30f9d0 Mon Sep 17 00:00:00 2001 From: firecoperana Date: Sun, 4 May 2025 21:08:15 -0500 Subject: [PATCH 07/20] add cache folder and other improvement in rpc --- examples/rpc/CMakeLists.txt | 6 +- examples/rpc/rpc-server.cpp | 168 +++++- ggml/include/ggml-rpc.h | 7 +- ggml/src/ggml-rpc.cpp | 1026 +++++++++++++++++++++++------------ 4 files changed, 832 insertions(+), 375 deletions(-) diff --git a/examples/rpc/CMakeLists.txt b/examples/rpc/CMakeLists.txt index ae48fb98d..41b22863e 100644 --- a/examples/rpc/CMakeLists.txt +++ b/examples/rpc/CMakeLists.txt @@ -1,2 +1,4 @@ -add_executable(rpc-server rpc-server.cpp) -target_link_libraries(rpc-server PRIVATE ggml llama) +set(TARGET rpc-server) +add_executable(${TARGET} rpc-server.cpp) +target_link_libraries(${TARGET} PRIVATE ggml) +target_compile_features(${TARGET} PRIVATE cxx_std_17) \ No newline at end of file diff --git a/examples/rpc/rpc-server.cpp b/examples/rpc/rpc-server.cpp index 34138a9b8..77901ad13 100644 --- a/examples/rpc/rpc-server.cpp +++ b/examples/rpc/rpc-server.cpp @@ -14,35 +14,153 @@ #include "ggml-rpc.h" #ifdef _WIN32 +# define DIRECTORY_SEPARATOR '\\' # define NOMINMAX # include #else +# define DIRECTORY_SEPARATOR '/' # include #endif #include #include #include #include +#include +#include +#include + +namespace fs = std::filesystem; + +// NOTE: this is copied from common.cpp to avoid linking with libcommon +// returns true if successful, false otherwise +static bool fs_create_directory_with_parents(const std::string& path) { +#ifdef _WIN32 + std::wstring_convert> converter; + std::wstring wpath = converter.from_bytes(path); + + // if the path already exists, check whether it's a directory + const DWORD attributes = GetFileAttributesW(wpath.c_str()); + if ((attributes != INVALID_FILE_ATTRIBUTES) && (attributes & FILE_ATTRIBUTE_DIRECTORY)) { + return true; + } + + size_t pos_slash = 0; + + // process path from front to back, procedurally creating directories + while ((pos_slash = path.find('\\', pos_slash)) != std::string::npos) { + const std::wstring subpath = wpath.substr(0, pos_slash); + const wchar_t* test = subpath.c_str(); + + const bool success = CreateDirectoryW(test, NULL); + if (!success) { + const DWORD error = GetLastError(); + + // if the path already exists, ensure that it's a directory + if (error == ERROR_ALREADY_EXISTS) { + const DWORD attributes = GetFileAttributesW(subpath.c_str()); + if (attributes == INVALID_FILE_ATTRIBUTES || !(attributes & FILE_ATTRIBUTE_DIRECTORY)) { + return false; + } + } + else { + return false; + } + } + + pos_slash += 1; + } + + return true; +#else + // if the path already exists, check whether it's a directory + struct stat info; + if (stat(path.c_str(), &info) == 0) { + return S_ISDIR(info.st_mode); + } + + size_t pos_slash = 1; // skip leading slashes for directory creation + + // process path from front to back, procedurally creating directories + while ((pos_slash = path.find('/', pos_slash)) != std::string::npos) { + const std::string subpath = path.substr(0, pos_slash); + struct stat info; + + // if the path already exists, ensure that it's a directory + if (stat(subpath.c_str(), &info) == 0) { + if (!S_ISDIR(info.st_mode)) { + return false; + } + } + else { + // create parent directories + const int ret = mkdir(subpath.c_str(), 0755); + if (ret != 0) { + return false; + } + } + + pos_slash += 1; + } + + return true; +#endif // _WIN32 +} + +// NOTE: this is copied from common.cpp to avoid linking with libcommon +static std::string fs_get_cache_directory() { + std::string cache_directory = ""; + auto ensure_trailing_slash = [](std::string p) { + // Make sure to add trailing slash + if (p.back() != DIRECTORY_SEPARATOR) { + p += DIRECTORY_SEPARATOR; + } + return p; + }; + if (getenv("LLAMA_CACHE")) { + cache_directory = std::getenv("LLAMA_CACHE"); + } + else { +#if defined(__linux__) || defined(__FreeBSD__) || defined(_AIX) + if (std::getenv("XDG_CACHE_HOME")) { + cache_directory = std::getenv("XDG_CACHE_HOME"); + } + else { + cache_directory = std::getenv("HOME") + std::string("/.cache/"); + } +#elif defined(__APPLE__) + cache_directory = std::getenv("HOME") + std::string("/Library/Caches/"); +#elif defined(_WIN32) + cache_directory = std::getenv("LOCALAPPDATA"); +#else +# error Unknown architecture +#endif + cache_directory = ensure_trailing_slash(cache_directory); + cache_directory += "llama.cpp"; + } + return ensure_trailing_slash(cache_directory); +} struct rpc_server_params { std::string host = "127.0.0.1"; int port = 50052; size_t backend_mem = 0; + bool use_cache = false; int n_threads = std::max(1U, std::thread::hardware_concurrency() / 2); }; -static void print_usage(int /*argc*/, char ** argv, rpc_server_params params) { +static void print_usage(int /*argc*/, char** argv, rpc_server_params params) { fprintf(stderr, "Usage: %s [options]\n\n", argv[0]); fprintf(stderr, "options:\n"); - fprintf(stderr, " -h, --help show this help message and exit\n"); + fprintf(stderr, " -h, --help show this help message and exit\n"); fprintf(stderr, " -t, --threads number of threads for the CPU backend (default: %d)\n", params.n_threads); - fprintf(stderr, " -H HOST, --host HOST host to bind to (default: %s)\n", params.host.c_str()); - fprintf(stderr, " -p PORT, --port PORT port to bind to (default: %d)\n", params.port); - fprintf(stderr, " -m MEM, --mem MEM backend memory size (in MB)\n"); + fprintf(stderr, " -H HOST, --host HOST host to bind to (default: %s)\n", params.host.c_str()); + fprintf(stderr, " -p PORT, --port PORT port to bind to (default: %d)\n", params.port); + fprintf(stderr, " -m MEM, --mem MEM backend memory size (in MB)\n"); + fprintf(stderr, " -c, --cache enable local file cache\n"); fprintf(stderr, "\n"); } -static bool rpc_server_params_parse(int argc, char ** argv, rpc_server_params & params) { +static bool rpc_server_params_parse(int argc, char** argv, rpc_server_params& params) { std::string arg; for (int i = 1; i < argc; i++) { arg = argv[i]; @@ -61,7 +179,8 @@ static bool rpc_server_params_parse(int argc, char ** argv, rpc_server_params & fprintf(stderr, "error: invalid number of threads: %d\n", params.n_threads); return false; } - } else if (arg == "-p" || arg == "--port") { + } + else if (arg == "-p" || arg == "--port") { if (++i >= argc) { return false; } @@ -69,15 +188,21 @@ static bool rpc_server_params_parse(int argc, char ** argv, rpc_server_params & if (params.port <= 0 || params.port > 65535) { return false; } - } else if (arg == "-m" || arg == "--mem") { + } + else if (arg == "-c" || arg == "--cache") { + params.use_cache = true; + } + else if (arg == "-m" || arg == "--mem") { if (++i >= argc) { return false; } params.backend_mem = std::stoul(argv[i]) * 1024 * 1024; - } else if (arg == "-h" || arg == "--help") { + } + else if (arg == "-h" || arg == "--help") { print_usage(argc, argv, params); exit(0); - } else { + } + else { fprintf(stderr, "error: unknown argument: %s\n", arg.c_str()); print_usage(argc, argv, params); exit(0); @@ -173,11 +298,28 @@ int main(int argc, char * argv[]) { if (params.backend_mem > 0) { free_mem = params.backend_mem; total_mem = params.backend_mem; - } else { + } + else { get_backend_memory(&free_mem, &total_mem); } - printf("Starting RPC server on %s, backend memory: %zu MB\n", endpoint.c_str(), free_mem / (1024 * 1024)); - start_rpc_server(backend, endpoint.c_str(), free_mem, total_mem); + const char* cache_dir = nullptr; + std::string cache_dir_str; + if (params.use_cache) { + cache_dir_str = fs_get_cache_directory() + "rpc/"; + if (!fs_create_directory_with_parents(cache_dir_str)) { + fprintf(stderr, "Failed to create cache directory: %s\n", cache_dir_str.c_str()); + return 1; + } + cache_dir = cache_dir_str.c_str(); + } + printf("Starting RPC server v%d.%d.%d\n", + RPC_PROTO_MAJOR_VERSION, + RPC_PROTO_MINOR_VERSION, + RPC_PROTO_PATCH_VERSION); + printf(" endpoint : %s\n", endpoint.c_str()); + printf(" local cache : %s\n", cache_dir ? cache_dir : "n/a"); + printf(" backend memory : %zu MB\n", free_mem / (1024 * 1024)); + ggml_backend_rpc_start_server(backend, endpoint.c_str(), cache_dir, free_mem, total_mem); ggml_backend_free(backend); return 0; } diff --git a/ggml/include/ggml-rpc.h b/ggml/include/ggml-rpc.h index aa144832a..8e388d442 100644 --- a/ggml/include/ggml-rpc.h +++ b/ggml/include/ggml-rpc.h @@ -7,6 +7,9 @@ extern "C" { #endif +#define RPC_PROTO_MAJOR_VERSION 2 +#define RPC_PROTO_MINOR_VERSION 0 +#define RPC_PROTO_PATCH_VERSION 0 #define GGML_RPC_MAX_SERVERS 16 // backend API @@ -17,7 +20,9 @@ GGML_API GGML_CALL ggml_backend_buffer_type_t ggml_backend_rpc_buffer_type(const GGML_API GGML_CALL void ggml_backend_rpc_get_device_memory(const char * endpoint, size_t * free, size_t * total); -GGML_API GGML_CALL void start_rpc_server(ggml_backend_t backend, const char * endpoint, size_t free_mem, size_t total_mem); +GGML_API GGML_CALL void ggml_backend_rpc_start_server(ggml_backend_t backend, const char * endpoint, + const char * cache_dir, + size_t free_mem, size_t total_mem); #ifdef __cplusplus } diff --git a/ggml/src/ggml-rpc.cpp b/ggml/src/ggml-rpc.cpp index 55c33972d..0fe04e703 100644 --- a/ggml/src/ggml-rpc.cpp +++ b/ggml/src/ggml-rpc.cpp @@ -26,7 +26,10 @@ # include #endif #include +#include +#include +namespace fs = std::filesystem; #define UNUSED GGML_UNUSED #define GGML_DEBUG 0 @@ -89,15 +92,95 @@ enum rpc_cmd { RPC_CMD_FREE_BUFFER, RPC_CMD_BUFFER_CLEAR, RPC_CMD_SET_TENSOR, + RPC_CMD_SET_TENSOR_HASH, RPC_CMD_GET_TENSOR, RPC_CMD_COPY_TENSOR, RPC_CMD_GRAPH_COMPUTE, RPC_CMD_GET_DEVICE_MEMORY, + RPC_CMD_INIT_TENSOR, + RPC_CMD_GET_ALLOC_SIZE, RPC_CMD_COUNT, }; -// RPC data structures +// Try RPC_CMD_SET_TENSOR_HASH first when data size is larger than this threshold +const size_t HASH_THRESHOLD = 10 * 1024 * 1024; + +struct rpc_msg_get_alloc_size_req { + rpc_tensor tensor; +}; + +struct rpc_msg_get_alloc_size_rsp { + uint64_t alloc_size; +}; + +struct rpc_msg_init_tensor_req { + rpc_tensor tensor; +}; + +struct rpc_msg_alloc_buffer_req { + uint64_t size; +}; + +struct rpc_msg_alloc_buffer_rsp { + uint64_t remote_ptr; + uint64_t remote_size; +}; + +struct rpc_msg_get_alignment_rsp { + uint64_t alignment; +}; + +struct rpc_msg_get_max_size_rsp { + uint64_t max_size; +}; + +struct rpc_msg_buffer_get_base_req { + uint64_t remote_ptr; +}; + +struct rpc_msg_buffer_get_base_rsp { + uint64_t base_ptr; +}; + +struct rpc_msg_free_buffer_req { + uint64_t remote_ptr; +}; + +struct rpc_msg_buffer_clear_req { + uint64_t remote_ptr; + uint8_t value; +}; +struct rpc_msg_set_tensor_hash_rsp { + uint8_t result; +}; + +struct rpc_msg_get_tensor_req { + rpc_tensor tensor; + uint64_t offset; + uint64_t size; +}; + +struct rpc_msg_copy_tensor_req { + rpc_tensor src; + rpc_tensor dst; +}; + +struct rpc_msg_copy_tensor_rsp { + uint8_t result; +}; + +struct rpc_msg_graph_compute_rsp { + uint8_t result; +}; + +struct rpc_msg_get_device_memory_rsp { + uint64_t free_mem; + uint64_t total_mem; +}; +#pragma pack(pop) + +// RPC data structures static ggml_guid_t ggml_backend_rpc_guid() { static ggml_guid guid = {0x99, 0x68, 0x5b, 0x6c, 0xd2, 0x83, 0x3d, 0x24, 0x25, 0x36, 0x72, 0xe1, 0x5b, 0x0e, 0x14, 0x03}; return &guid; @@ -117,13 +200,26 @@ struct ggml_backend_rpc_context { struct ggml_backend_rpc_buffer_context { std::shared_ptr sock; - std::unordered_map base_cache; + //std::unordered_map base_cache; + void* base_ptr; uint64_t remote_ptr; std::string name; }; // RPC helper functions +// Computes FNV-1a hash of the data +static uint64_t fnv_hash(const uint8_t* data, size_t len) { + const uint64_t fnv_prime = 0x100000001b3ULL; + uint64_t hash = 0xcbf29ce484222325ULL; + + for (size_t i = 0; i < len; ++i) { + hash ^= data[i]; + hash *= fnv_prime; + } + return hash; +} + static std::shared_ptr make_socket(sockfd_t fd) { #ifdef _WIN32 if (fd == INVALID_SOCKET) { @@ -240,6 +336,39 @@ static bool recv_data(sockfd_t sockfd, void * data, size_t size) { return true; } +static bool send_msg(sockfd_t sockfd, const void* msg, size_t msg_size) { + if (!send_data(sockfd, &msg_size, sizeof(msg_size))) { + return false; + } + return send_data(sockfd, msg, msg_size); +} + +static bool recv_msg(sockfd_t sockfd, void* msg, size_t msg_size) { + uint64_t size; + if (!recv_data(sockfd, &size, sizeof(size))) { + return false; + } + if (size != msg_size) { + return false; + } + return recv_data(sockfd, msg, msg_size); +} + +static bool recv_msg(sockfd_t sockfd, std::vector& input) { + uint64_t size; + if (!recv_data(sockfd, &size, sizeof(size))) { + return false; + } + try { + input.resize(size); + } + catch (const std::bad_alloc& e) { + fprintf(stderr, "Failed to allocate input buffer of size %" PRIu64 "\n", size); + return false; + } + return recv_data(sockfd, input.data(), size); +} + static bool parse_endpoint(const std::string & endpoint, std::string & host, int & port) { size_t pos = endpoint.find(':'); if (pos == std::string::npos) { @@ -252,28 +381,27 @@ static bool parse_endpoint(const std::string & endpoint, std::string & host, int // RPC request : | rpc_cmd (1 byte) | request_size (8 bytes) | request_data (request_size bytes) | // RPC response: | response_size (8 bytes) | response_data (response_size bytes) | -static bool send_rpc_cmd(const std::shared_ptr & sock, enum rpc_cmd cmd, const std::vector & input, std::vector & output) { +static bool send_rpc_cmd(const std::shared_ptr& sock, enum rpc_cmd cmd, const void* input, size_t input_size, void* output, size_t output_size) { uint8_t cmd_byte = cmd; if (!send_data(sock->fd, &cmd_byte, sizeof(cmd_byte))) { return false; } - uint64_t input_size = input.size(); if (!send_data(sock->fd, &input_size, sizeof(input_size))) { return false; } - if (!send_data(sock->fd, input.data(), input.size())) { + if (!send_data(sock->fd, input, input_size)) { return false; } - uint64_t output_size; - if (!recv_data(sock->fd, &output_size, sizeof(output_size))) { + // TODO: currently the output_size is always known, do we need support for commands with variable output size? + // even if we do, we can skip sending output_size from the server for commands with known output size + uint64_t out_size; + if (!recv_data(sock->fd, &out_size, sizeof(out_size))) { return false; } - if (output_size == 0) { - output.clear(); - return true; + if (out_size != output_size) { + return false; } - output.resize(output_size); - if (!recv_data(sock->fd, output.data(), output_size)) { + if (!recv_data(sock->fd, output, output_size)) { return false; } return true; @@ -324,40 +452,29 @@ GGML_CALL static const char * ggml_backend_rpc_buffer_get_name(ggml_backend_buff return ctx->name.c_str(); } -GGML_CALL static void ggml_backend_rpc_buffer_free_buffer(ggml_backend_buffer_t buffer) { - ggml_backend_rpc_buffer_context * ctx = (ggml_backend_rpc_buffer_context *)buffer->context; - // input serialization format: | remote_ptr (8 bytes) | - std::vector input(sizeof(uint64_t), 0); - uint64_t remote_ptr = ctx->remote_ptr; - memcpy(input.data(), &remote_ptr, sizeof(remote_ptr)); - std::vector output; - bool status = send_rpc_cmd(ctx->sock, RPC_CMD_FREE_BUFFER, input, output); - //GGML_ASSERT(status); - //GGML_ASSERT(output.empty()); + +static void ggml_backend_rpc_buffer_free_buffer(ggml_backend_buffer_t buffer) { + ggml_backend_rpc_buffer_context* ctx = (ggml_backend_rpc_buffer_context*)buffer->context; + rpc_msg_free_buffer_req request = { ctx->remote_ptr }; + bool status = send_rpc_cmd(ctx->sock, RPC_CMD_FREE_BUFFER, &request, sizeof(request), nullptr, 0); + GGML_ASSERT(status); delete ctx; } -GGML_CALL static void * ggml_backend_rpc_buffer_get_base(ggml_backend_buffer_t buffer) { - ggml_backend_rpc_buffer_context * ctx = (ggml_backend_rpc_buffer_context *)buffer->context; - if (ctx->base_cache.find(buffer) != ctx->base_cache.end()) { - return ctx->base_cache[buffer]; - } - // input serialization format: | remote_ptr (8 bytes) | - std::vector input(sizeof(uint64_t), 0); - uint64_t remote_ptr = ctx->remote_ptr; - memcpy(input.data(), &remote_ptr, sizeof(remote_ptr)); - std::vector output; - bool status = send_rpc_cmd(ctx->sock, RPC_CMD_BUFFER_GET_BASE, input, output); - //GGML_ASSERT(status); - //GGML_ASSERT(output.size() == sizeof(uint64_t)); - // output serialization format: | base_ptr (8 bytes) | - uint64_t base_ptr; - memcpy(&base_ptr, output.data(), sizeof(base_ptr)); - void * base = reinterpret_cast(base_ptr); - ctx->base_cache[buffer] = base; - return base; +static void* ggml_backend_rpc_buffer_get_base(ggml_backend_buffer_t buffer) { + ggml_backend_rpc_buffer_context* ctx = (ggml_backend_rpc_buffer_context*)buffer->context; + if (ctx->base_ptr != nullptr) { + return ctx->base_ptr; + } + rpc_msg_buffer_get_base_req request = { ctx->remote_ptr }; + rpc_msg_buffer_get_base_rsp response; + bool status = send_rpc_cmd(ctx->sock, RPC_CMD_BUFFER_GET_BASE, &request, sizeof(request), &response, sizeof(response)); + GGML_ASSERT(status); + ctx->base_ptr = reinterpret_cast(response.base_ptr); + return ctx->base_ptr; } + static rpc_tensor serialize_tensor(const ggml_tensor * tensor) { rpc_tensor result; result.id = reinterpret_cast(tensor); @@ -396,74 +513,74 @@ GGML_CALL static void ggml_backend_rpc_buffer_init_tensor(ggml_backend_buffer_t } } -GGML_CALL static void ggml_backend_rpc_buffer_set_tensor(ggml_backend_buffer_t buffer, ggml_tensor * tensor, const void * data, size_t offset, size_t size) { - ggml_backend_rpc_buffer_context * ctx = (ggml_backend_rpc_buffer_context *)buffer->context; - // input serialization format: | rpc_tensor | offset (8 bytes) | data (size bytes) | +static void ggml_backend_rpc_buffer_set_tensor(ggml_backend_buffer_t buffer, ggml_tensor* tensor, const void* data, size_t offset, size_t size) { + ggml_backend_rpc_buffer_context* ctx = (ggml_backend_rpc_buffer_context*)buffer->context; + rpc_tensor rpc_tensor = serialize_tensor(tensor); + if (size > HASH_THRESHOLD) { + // input serialization format: | rpc_tensor | offset (8 bytes) | hash (8 bytes) + size_t input_size = sizeof(rpc_tensor) + sizeof(uint64_t) + sizeof(uint64_t); + std::vector input(input_size, 0); + uint64_t hash = fnv_hash((const uint8_t*)data, size); + memcpy(input.data(), &rpc_tensor, sizeof(rpc_tensor)); + memcpy(input.data() + sizeof(rpc_tensor), &offset, sizeof(offset)); + memcpy(input.data() + sizeof(rpc_tensor) + sizeof(offset), &hash, sizeof(hash)); + rpc_msg_set_tensor_hash_rsp response; + bool status = send_rpc_cmd(ctx->sock, RPC_CMD_SET_TENSOR_HASH, input.data(), input.size(), &response, sizeof(response)); + GGML_ASSERT(status); + if (response.result) { + // the server has the same data, no need to send it + return; + } + } + // input serialization format: | rpc_tensor | offset (8 bytes) | data (size bytes) size_t input_size = sizeof(rpc_tensor) + sizeof(uint64_t) + size; std::vector input(input_size, 0); - rpc_tensor rpc_tensor = serialize_tensor(tensor); memcpy(input.data(), &rpc_tensor, sizeof(rpc_tensor)); memcpy(input.data() + sizeof(rpc_tensor), &offset, sizeof(offset)); memcpy(input.data() + sizeof(rpc_tensor) + sizeof(offset), data, size); - std::vector output; - bool status = send_rpc_cmd(ctx->sock, RPC_CMD_SET_TENSOR, input, output); - //GGML_ASSERT(status); + bool status = send_rpc_cmd(ctx->sock, RPC_CMD_SET_TENSOR, input.data(), input.size(), nullptr, 0); + GGML_ASSERT(status); } -GGML_CALL static void ggml_backend_rpc_buffer_get_tensor(ggml_backend_buffer_t buffer, const ggml_tensor * tensor, void * data, size_t offset, size_t size) { - ggml_backend_rpc_buffer_context * ctx = (ggml_backend_rpc_buffer_context *)buffer->context; - // input serialization format: | rpc_tensor | offset (8 bytes) | size (8 bytes) | - int input_size = sizeof(rpc_tensor) + 2*sizeof(uint64_t); - std::vector input(input_size, 0); - rpc_tensor rpc_tensor = serialize_tensor(tensor); - memcpy(input.data(), &rpc_tensor, sizeof(rpc_tensor)); - memcpy(input.data() + sizeof(rpc_tensor), &offset, sizeof(offset)); - memcpy(input.data() + sizeof(rpc_tensor) + sizeof(offset), &size, sizeof(size)); - std::vector output; - bool status = send_rpc_cmd(ctx->sock, RPC_CMD_GET_TENSOR, input, output); - //GGML_ASSERT(status); - //GGML_ASSERT(output.size() == size); - // output serialization format: | data (size bytes) | - memcpy(data, output.data(), size); + +static void ggml_backend_rpc_buffer_get_tensor(ggml_backend_buffer_t buffer, const ggml_tensor* tensor, void* data, size_t offset, size_t size) { + ggml_backend_rpc_buffer_context* ctx = (ggml_backend_rpc_buffer_context*)buffer->context; + rpc_msg_get_tensor_req request; + request.tensor = serialize_tensor(tensor); + request.offset = offset; + request.size = size; + bool status = send_rpc_cmd(ctx->sock, RPC_CMD_GET_TENSOR, &request, sizeof(request), data, size); + GGML_ASSERT(status); } -GGML_CALL static bool ggml_backend_rpc_buffer_cpy_tensor(ggml_backend_buffer_t buffer, const ggml_tensor * src, ggml_tensor * dst) { + +static bool ggml_backend_rpc_buffer_cpy_tensor(ggml_backend_buffer_t buffer, const ggml_tensor* src, ggml_tensor* dst) { // check if src and dst are on the same server ggml_backend_buffer_t src_buffer = src->buffer; - ggml_backend_rpc_buffer_context * src_ctx = (ggml_backend_rpc_buffer_context *)src_buffer->context; + ggml_backend_rpc_buffer_context* src_ctx = (ggml_backend_rpc_buffer_context*)src_buffer->context; ggml_backend_buffer_t dst_buffer = dst->buffer; - ggml_backend_rpc_buffer_context * dst_ctx = (ggml_backend_rpc_buffer_context *)dst_buffer->context; + ggml_backend_rpc_buffer_context* dst_ctx = (ggml_backend_rpc_buffer_context*)dst_buffer->context; if (src_ctx->sock != dst_ctx->sock) { return false; } - ggml_backend_rpc_buffer_context * ctx = (ggml_backend_rpc_buffer_context *)buffer->context; - // input serialization format: | rpc_tensor src | rpc_tensor dst | - int input_size = 2*sizeof(rpc_tensor); - std::vector input(input_size, 0); - rpc_tensor rpc_src = serialize_tensor(src); - rpc_tensor rpc_dst = serialize_tensor(dst); - memcpy(input.data(), &rpc_src, sizeof(rpc_src)); - memcpy(input.data() + sizeof(rpc_src), &rpc_dst, sizeof(rpc_dst)); - std::vector output; - bool status = send_rpc_cmd(ctx->sock, RPC_CMD_COPY_TENSOR, input, output); - //GGML_ASSERT(status); - // output serialization format: | result (1 byte) | - //GGML_ASSERT(output.size() == 1); - return output[0]; -} - -GGML_CALL static void ggml_backend_rpc_buffer_clear(ggml_backend_buffer_t buffer, uint8_t value) { - ggml_backend_rpc_buffer_context * ctx = (ggml_backend_rpc_buffer_context *)buffer->context; - // serialization format: | bufptr (8 bytes) | value (1 byte) | - int input_size = sizeof(uint64_t) + sizeof(uint8_t); - std::vector input(input_size, 0); - memcpy(input.data(), &ctx->remote_ptr, sizeof(ctx->remote_ptr)); - memcpy(input.data() + sizeof(ctx->remote_ptr), &value, sizeof(value)); - std::vector output; - bool status = send_rpc_cmd(ctx->sock, RPC_CMD_BUFFER_CLEAR, input, output); - //GGML_ASSERT(status); + ggml_backend_rpc_buffer_context* ctx = (ggml_backend_rpc_buffer_context*)buffer->context; + rpc_msg_copy_tensor_req request; + request.src = serialize_tensor(src); + request.dst = serialize_tensor(dst); + rpc_msg_copy_tensor_rsp response; + bool status = send_rpc_cmd(ctx->sock, RPC_CMD_COPY_TENSOR, &request, sizeof(request), &response, sizeof(response)); + GGML_ASSERT(status); + return response.result; +} + +static void ggml_backend_rpc_buffer_clear(ggml_backend_buffer_t buffer, uint8_t value) { + ggml_backend_rpc_buffer_context* ctx = (ggml_backend_rpc_buffer_context*)buffer->context; + rpc_msg_buffer_clear_req request = { ctx->remote_ptr, value }; + bool status = send_rpc_cmd(ctx->sock, RPC_CMD_BUFFER_CLEAR, &request, sizeof(request), nullptr, 0); + GGML_ASSERT(status); } + static ggml_backend_buffer_i ggml_backend_rpc_buffer_interface = { /* .get_name = */ ggml_backend_rpc_buffer_get_name, /* .free_buffer = */ ggml_backend_rpc_buffer_free_buffer, @@ -481,62 +598,44 @@ GGML_CALL static const char * ggml_backend_rpc_buffer_type_name(ggml_backend_buf return buft_ctx->name.c_str(); } -GGML_CALL static ggml_backend_buffer_t ggml_backend_rpc_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) { - ggml_backend_rpc_buffer_type_context * buft_ctx = (ggml_backend_rpc_buffer_type_context *)buft->context; - // input serialization format: | size (8 bytes) | - int input_size = sizeof(uint64_t); - std::vector input(input_size, 0); - memcpy(input.data(), &size, sizeof(size)); - std::vector output; +static ggml_backend_buffer_t ggml_backend_rpc_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) { + ggml_backend_rpc_buffer_type_context* buft_ctx = (ggml_backend_rpc_buffer_type_context*)buft->context; + rpc_msg_alloc_buffer_req request = { size }; + rpc_msg_alloc_buffer_rsp response; auto sock = get_socket(buft_ctx->endpoint); - bool status = send_rpc_cmd(sock, RPC_CMD_ALLOC_BUFFER, input, output); - //GGML_ASSERT(status); - GGML_ASSERT(output.size() == 2*sizeof(uint64_t)); - // output serialization format: | remote_ptr (8 bytes) | remote_size (8 bytes) | - uint64_t remote_ptr; - memcpy(&remote_ptr, output.data(), sizeof(remote_ptr)); - size_t remote_size; - memcpy(&remote_size, output.data() + sizeof(uint64_t), sizeof(remote_size)); - if (remote_ptr != 0) { + bool status = send_rpc_cmd(sock, RPC_CMD_ALLOC_BUFFER, &request, sizeof(request), &response, sizeof(response)); + GGML_ASSERT(status); + if (response.remote_ptr != 0) { ggml_backend_buffer_t buffer = ggml_backend_buffer_init(buft, ggml_backend_rpc_buffer_interface, - new ggml_backend_rpc_buffer_context{sock, {}, remote_ptr, "RPC[" + std::string(buft_ctx->endpoint) + "]"}, - remote_size); + new ggml_backend_rpc_buffer_context{ sock, nullptr, response.remote_ptr }, + response.remote_size); return buffer; - } else { + } + else { return nullptr; } } -static size_t get_alignment(const std::shared_ptr & sock) { - // input serialization format: | 0 bytes | - std::vector input; - std::vector output; - bool status = send_rpc_cmd(sock, RPC_CMD_GET_ALIGNMENT, input, output); - //GGML_ASSERT(status); - GGML_ASSERT(output.size() == sizeof(uint64_t)); - // output serialization format: | alignment (8 bytes) | - uint64_t alignment; - memcpy(&alignment, output.data(), sizeof(alignment)); - return alignment; + +static size_t get_alignment(const std::shared_ptr& sock) { + rpc_msg_get_alignment_rsp response; + bool status = send_rpc_cmd(sock, RPC_CMD_GET_ALIGNMENT, nullptr, 0, &response, sizeof(response)); + GGML_ASSERT(status); + return response.alignment; } + GGML_CALL static size_t ggml_backend_rpc_buffer_type_get_alignment(ggml_backend_buffer_type_t buft) { ggml_backend_rpc_buffer_type_context * buft_ctx = (ggml_backend_rpc_buffer_type_context *)buft->context; return buft_ctx->alignment; } -static size_t get_max_size(const std::shared_ptr & sock) { - // input serialization format: | 0 bytes | - std::vector input; - std::vector output; - bool status = send_rpc_cmd(sock, RPC_CMD_GET_MAX_SIZE, input, output); - //GGML_ASSERT(status); - GGML_ASSERT(output.size() == sizeof(uint64_t)); - // output serialization format: | max_size (8 bytes) | - uint64_t max_size; - memcpy(&max_size, output.data(), sizeof(max_size)); - return max_size; +static size_t get_max_size(const std::shared_ptr& sock) { + rpc_msg_get_max_size_rsp response; + bool status = send_rpc_cmd(sock, RPC_CMD_GET_MAX_SIZE, nullptr, 0, &response, sizeof(response)); + GGML_ASSERT(status); + return response.max_size; } GGML_CALL static size_t ggml_backend_rpc_get_max_size(ggml_backend_buffer_type_t buft) { @@ -617,16 +716,15 @@ static void serialize_graph(const ggml_cgraph * cgraph, std::vector & o memcpy(out_tensors, tensors.data(), n_tensors * sizeof(rpc_tensor)); } -GGML_CALL static enum ggml_status ggml_backend_rpc_graph_compute(ggml_backend_t backend, ggml_cgraph * cgraph) { - ggml_backend_rpc_context * rpc_ctx = (ggml_backend_rpc_context *)backend->context; +static enum ggml_status ggml_backend_rpc_graph_compute(ggml_backend_t backend, ggml_cgraph* cgraph) { + ggml_backend_rpc_context* rpc_ctx = (ggml_backend_rpc_context*)backend->context; std::vector input; serialize_graph(cgraph, input); - std::vector output; + rpc_msg_graph_compute_rsp response; auto sock = get_socket(rpc_ctx->endpoint); - bool status = send_rpc_cmd(sock, RPC_CMD_GRAPH_COMPUTE, input, output); - //GGML_ASSERT(status); - //GGML_ASSERT(output.size() == 1); - return (enum ggml_status)output[0]; + bool status = send_rpc_cmd(sock, RPC_CMD_GRAPH_COMPUTE, input.data(), input.size(), &response, sizeof(response)); + GGML_ASSERT(status); + return (enum ggml_status)response.result; } GGML_CALL static bool ggml_backend_rpc_supports_op(ggml_backend_t backend, const ggml_tensor * op) { @@ -743,20 +841,12 @@ GGML_API GGML_CALL bool ggml_backend_is_rpc(ggml_backend_t backend) { return backend != NULL && ggml_guid_matches(backend->guid, ggml_backend_rpc_guid()); } -static void get_device_memory(const std::shared_ptr & sock, size_t * free, size_t * total) { - // input serialization format: | 0 bytes | - std::vector input; - std::vector output; - bool status = send_rpc_cmd(sock, RPC_CMD_GET_DEVICE_MEMORY, input, output); - //GGML_ASSERT(status); - GGML_ASSERT(output.size() == 2*sizeof(uint64_t)); - // output serialization format: | free (8 bytes) | total (8 bytes) | - uint64_t free_mem; - memcpy(&free_mem, output.data(), sizeof(free_mem)); - uint64_t total_mem; - memcpy(&total_mem, output.data() + sizeof(uint64_t), sizeof(total_mem)); - *free = free_mem; - *total = total_mem; +static void get_device_memory(const std::shared_ptr& sock, size_t* free, size_t* total) { + rpc_msg_get_device_memory_rsp response; + bool status = send_rpc_cmd(sock, RPC_CMD_GET_DEVICE_MEMORY, nullptr, 0, &response, sizeof(response)); + GGML_ASSERT(status); + *free = response.free_mem; + *total = response.total_mem; } GGML_API GGML_CALL void ggml_backend_rpc_get_device_memory(const char * endpoint, size_t * free, size_t * total) { @@ -773,21 +863,27 @@ GGML_API GGML_CALL void ggml_backend_rpc_get_device_memory(const char * endpoint class rpc_server { public: - rpc_server(ggml_backend_t backend) : backend(backend) {} + rpc_server(ggml_backend_t backend, const char* cache_dir) + : backend(backend), cache_dir(cache_dir) { + } ~rpc_server(); - bool alloc_buffer(const std::vector & input, std::vector & output); - void get_alignment(std::vector & output); - void get_max_size(std::vector & output); - bool buffer_get_base(const std::vector & input, std::vector & output); - bool free_buffer(const std::vector & input); - bool buffer_clear(const std::vector & input); - bool set_tensor(const std::vector & input); - bool get_tensor(const std::vector & input, std::vector & output); - bool copy_tensor(const std::vector & input, std::vector & output); - bool graph_compute(const std::vector & input, std::vector & output); + void alloc_buffer(const rpc_msg_alloc_buffer_req& request, rpc_msg_alloc_buffer_rsp& response); + void get_alignment(rpc_msg_get_alignment_rsp& response); + void get_max_size(rpc_msg_get_max_size_rsp& response); + bool buffer_get_base(const rpc_msg_buffer_get_base_req& request, rpc_msg_buffer_get_base_rsp& response); + bool free_buffer(const rpc_msg_free_buffer_req& request); + bool buffer_clear(const rpc_msg_buffer_clear_req& request); + bool set_tensor(const std::vector& input); + bool set_tensor_hash(const std::vector& input, rpc_msg_set_tensor_hash_rsp& response); + bool get_tensor(const rpc_msg_get_tensor_req& request, std::vector& response); + bool copy_tensor(const rpc_msg_copy_tensor_req& request, rpc_msg_copy_tensor_rsp& response); + bool graph_compute(const std::vector& input, rpc_msg_graph_compute_rsp& response); + bool init_tensor(const rpc_msg_init_tensor_req& request); + bool get_alloc_size(const rpc_msg_get_alloc_size_req& request, rpc_msg_get_alloc_size_rsp& response); private: + bool get_cached_file(uint64_t hash, std::vector& data); ggml_tensor * deserialize_tensor(struct ggml_context * ctx, const rpc_tensor * tensor); ggml_tensor * create_node(uint64_t id, struct ggml_context * ctx, @@ -796,85 +892,86 @@ class rpc_server { ggml_backend_t backend; + const char* cache_dir; std::unordered_set buffers; }; -bool rpc_server::alloc_buffer(const std::vector & input, std::vector & output) { - // input serialization format: | size (8 bytes) | - if (input.size() != sizeof(uint64_t)) { +bool rpc_server::get_alloc_size(const rpc_msg_get_alloc_size_req& request, rpc_msg_get_alloc_size_rsp& response) { + ggml_backend_buffer_type_t buft; + struct ggml_init_params params { + /*.mem_size =*/ ggml_tensor_overhead(), + /*.mem_buffer =*/ NULL, + /*.no_alloc =*/ true, + }; + + struct ggml_context* ctx = ggml_init(params); + ggml_tensor* tensor = deserialize_tensor(ctx, &request.tensor); + + if (tensor == nullptr) { + GGML_ABORT("Null tensor pointer passed to server get_alloc_size function.\n"); + ggml_free(ctx); return false; } - uint64_t size; - memcpy(&size, input.data(), sizeof(size)); + + if (tensor->buffer == nullptr) { + //No buffer allocated. + buft = ggml_backend_get_default_buffer_type(backend); + } + else { + buft = tensor->buffer->buft; + } + + response.alloc_size = ggml_backend_buft_get_alloc_size(buft, tensor); + + ggml_free(ctx); + return true; +} +void rpc_server::alloc_buffer(const rpc_msg_alloc_buffer_req& request, rpc_msg_alloc_buffer_rsp& response) { ggml_backend_buffer_type_t buft = ggml_backend_get_default_buffer_type(backend); - ggml_backend_buffer_t buffer = ggml_backend_buft_alloc_buffer(buft, size); - uint64_t remote_ptr = 0; - uint64_t remote_size = 0; + ggml_backend_buffer_t buffer = ggml_backend_buft_alloc_buffer(buft, request.size); + response.remote_ptr = 0; + response.remote_size = 0; if (buffer != nullptr) { - remote_ptr = reinterpret_cast(buffer); - remote_size = buffer->size; - GGML_PRINT_DEBUG("[%s] size: %" PRIu64 " -> remote_ptr: %" PRIx64 ", remote_size: %" PRIu64 "\n", __func__, size, remote_ptr, remote_size); + response.remote_ptr = reinterpret_cast(buffer); + response.remote_size = buffer->size; + GGML_PRINT_DEBUG("[%s] size: %" PRIu64 " -> remote_ptr: %" PRIx64 ", remote_size: %" PRIu64 "\n", __func__, request.size, response.remote_ptr, response.remote_size); buffers.insert(buffer); - } else { - GGML_PRINT_DEBUG("[%s] size: %" PRIu64 " -> failed\n", __func__, size); } - // output serialization format: | remote_ptr (8 bytes) | remote_size (8 bytes) | - output.resize(2*sizeof(uint64_t), 0); - memcpy(output.data(), &remote_ptr, sizeof(remote_ptr)); - memcpy(output.data() + sizeof(uint64_t), &remote_size, sizeof(remote_size)); - return true; + else { + GGML_ABORT("[%s] size: %" PRIu64 " -> failed\n", __func__, request.size); + } } - -void rpc_server::get_alignment(std::vector & output) { +void rpc_server::get_alignment(rpc_msg_get_alignment_rsp& response) { ggml_backend_buffer_type_t buft = ggml_backend_get_default_buffer_type(backend); size_t alignment = ggml_backend_buft_get_alignment(buft); GGML_PRINT_DEBUG("[%s] alignment: %lu\n", __func__, alignment); - // output serialization format: | alignment (8 bytes) | - output.resize(sizeof(uint64_t), 0); - memcpy(output.data(), &alignment, sizeof(alignment)); + response.alignment = alignment; } -void rpc_server::get_max_size(std::vector & output) { +void rpc_server::get_max_size(rpc_msg_get_max_size_rsp& response) { ggml_backend_buffer_type_t buft = ggml_backend_get_default_buffer_type(backend); size_t max_size = ggml_backend_buft_get_max_size(buft); GGML_PRINT_DEBUG("[%s] max_size: %lu\n", __func__, max_size); - // output serialization format: | max_size (8 bytes) | - output.resize(sizeof(uint64_t), 0); - memcpy(output.data(), &max_size, sizeof(max_size)); + response.max_size = max_size; } -bool rpc_server::buffer_get_base(const std::vector & input, std::vector & output) { - // input serialization format: | remote_ptr (8 bytes) | - if (input.size() != sizeof(uint64_t)) { - return false; - } - uint64_t remote_ptr; - memcpy(&remote_ptr, input.data(), sizeof(remote_ptr)); - GGML_PRINT_DEBUG("[%s] remote_ptr: %" PRIx64 "\n", __func__, remote_ptr); - ggml_backend_buffer_t buffer = reinterpret_cast(remote_ptr); +bool rpc_server::buffer_get_base(const rpc_msg_buffer_get_base_req& request, rpc_msg_buffer_get_base_rsp& response) { + GGML_PRINT_DEBUG("[%s] remote_ptr: %" PRIx64 "\n", __func__, request.remote_ptr); + ggml_backend_buffer_t buffer = reinterpret_cast(request.remote_ptr); if (buffers.find(buffer) == buffers.end()) { - GGML_PRINT_DEBUG("[%s] buffer not found\n", __func__); + GGML_ABORT("[%s] buffer not found\n", __func__); return false; } - void * base = ggml_backend_buffer_get_base(buffer); - // output serialization format: | base_ptr (8 bytes) | - uint64_t base_ptr = reinterpret_cast(base); - output.resize(sizeof(uint64_t), 0); - memcpy(output.data(), &base_ptr, sizeof(base_ptr)); + void* base = ggml_backend_buffer_get_base(buffer); + response.base_ptr = reinterpret_cast(base); return true; } -bool rpc_server::free_buffer(const std::vector & input) { - // input serialization format: | remote_ptr (8 bytes) | - if (input.size() != sizeof(uint64_t)) { - return false; - } - uint64_t remote_ptr; - memcpy(&remote_ptr, input.data(), sizeof(remote_ptr)); - GGML_PRINT_DEBUG("[%s] remote_ptr: %" PRIx64 "\n", __func__, remote_ptr); - ggml_backend_buffer_t buffer = reinterpret_cast(remote_ptr); +bool rpc_server::free_buffer(const rpc_msg_free_buffer_req& request) { + GGML_PRINT_DEBUG("[%s] remote_ptr: %" PRIx64 "\n", __func__, request.remote_ptr); + ggml_backend_buffer_t buffer = reinterpret_cast(request.remote_ptr); if (buffers.find(buffer) == buffers.end()) { - GGML_PRINT_DEBUG("[%s] buffer not found\n", __func__); + GGML_ABORT("[%s] buffer not found\n", __func__); return false; } ggml_backend_buffer_free(buffer); @@ -882,22 +979,14 @@ bool rpc_server::free_buffer(const std::vector & input) { return true; } -bool rpc_server::buffer_clear(const std::vector & input) { - // input serialization format: | remote_ptr (8 bytes) | value (1 byte) | - if (input.size() != sizeof(uint64_t) + sizeof(uint8_t)) { - return false; - } - uint64_t remote_ptr; - memcpy(&remote_ptr, input.data(), sizeof(remote_ptr)); - uint8_t value; - memcpy(&value, input.data() + sizeof(uint64_t), sizeof(value)); - GGML_PRINT_DEBUG("[%s] remote_ptr: %" PRIx64 ", value: %u\n", __func__, remote_ptr, value); - ggml_backend_buffer_t buffer = reinterpret_cast(remote_ptr); +bool rpc_server::buffer_clear(const rpc_msg_buffer_clear_req& request) { + GGML_PRINT_DEBUG("[%s] remote_ptr: %" PRIx64 ", value: %u\n", __func__, request.remote_ptr, request.value); + ggml_backend_buffer_t buffer = reinterpret_cast(request.remote_ptr); if (buffers.find(buffer) == buffers.end()) { - GGML_PRINT_DEBUG("[%s] buffer not found\n", __func__); + GGML_ABORT("[%s] buffer not found\n", __func__); return false; } - ggml_backend_buffer_clear(buffer, value); + ggml_backend_buffer_clear(buffer, request.value); return true; } @@ -909,15 +998,17 @@ ggml_tensor * rpc_server::deserialize_tensor(struct ggml_context * ctx, const rp } result->buffer = reinterpret_cast(tensor->buffer); if (result->buffer && buffers.find(result->buffer) == buffers.end()) { - return nullptr; + result->buffer = nullptr; } - // require that the tensor data does not go beyond the buffer end - uint64_t tensor_size = (uint64_t) ggml_nbytes(result); - uint64_t buffer_start = (uint64_t) ggml_backend_buffer_get_base(result->buffer); - uint64_t buffer_size = (uint64_t) ggml_backend_buffer_get_size(result->buffer); - GGML_ASSERT(tensor->data + tensor_size >= tensor->data); // check for overflow - GGML_ASSERT(tensor->data >= buffer_start && tensor->data + tensor_size <= buffer_start + buffer_size); + if (result->buffer) { + // require that the tensor data does not go beyond the buffer end + uint64_t tensor_size = (uint64_t) ggml_nbytes(result); + uint64_t buffer_start = (uint64_t) ggml_backend_buffer_get_base(result->buffer); + uint64_t buffer_size = (uint64_t) ggml_backend_buffer_get_size(result->buffer); + GGML_ASSERT(tensor->data + tensor_size >= tensor->data); // check for overflow + GGML_ASSERT(tensor->data >= buffer_start && tensor->data + tensor_size <= buffer_start + buffer_size); + } result->op = (ggml_op) tensor->op; for (uint32_t i = 0; i < GGML_MAX_OP_PARAMS / sizeof(int32_t); i++) { @@ -930,25 +1021,25 @@ ggml_tensor * rpc_server::deserialize_tensor(struct ggml_context * ctx, const rp } -bool rpc_server::set_tensor(const std::vector & input) { +bool rpc_server::set_tensor(const std::vector& input) { // serialization format: | rpc_tensor | offset (8 bytes) | data (size bytes) | if (input.size() < sizeof(rpc_tensor) + sizeof(uint64_t)) { return false; } - const rpc_tensor * in_tensor = (const rpc_tensor *)input.data(); + const rpc_tensor* in_tensor = (const rpc_tensor*)input.data(); uint64_t offset; memcpy(&offset, input.data() + sizeof(rpc_tensor), sizeof(offset)); const size_t size = input.size() - sizeof(rpc_tensor) - sizeof(offset); struct ggml_init_params params { /*.mem_size =*/ ggml_tensor_overhead(), - /*.mem_buffer =*/ NULL, - /*.no_alloc =*/ true, + /*.mem_buffer =*/ NULL, + /*.no_alloc =*/ true, }; - struct ggml_context * ctx = ggml_init(params); - ggml_tensor * tensor = deserialize_tensor(ctx, in_tensor); + struct ggml_context* ctx = ggml_init(params); + ggml_tensor* tensor = deserialize_tensor(ctx, in_tensor); if (tensor == nullptr) { - GGML_PRINT_DEBUG("[%s] error deserializing tensor\n", __func__); + GGML_ABORT("[%s] error deserializing tensor\n", __func__); ggml_free(ctx); return false; } @@ -956,104 +1047,214 @@ bool rpc_server::set_tensor(const std::vector & input) { // sanitize tensor->data { - const size_t p0 = (size_t) ggml_backend_buffer_get_base(tensor->buffer); + const size_t p0 = (size_t)ggml_backend_buffer_get_base(tensor->buffer); const size_t p1 = p0 + ggml_backend_buffer_get_size(tensor->buffer); - if (in_tensor->data + offset < p0 || in_tensor->data + offset >= p1 || size > (p1 - in_tensor->data - offset)) { + if (in_tensor->data + offset < p0 || in_tensor->data + offset >= p1 || size >(p1 - in_tensor->data - offset)) { GGML_ABORT("[%s] tensor->data out of bounds\n", __func__); } } - const void * data = input.data() + sizeof(rpc_tensor) + sizeof(offset); + const void* data = input.data() + sizeof(rpc_tensor) + sizeof(offset); + if (cache_dir && size > HASH_THRESHOLD) { + uint64_t hash = fnv_hash((const uint8_t*)data, size); + char hash_str[17]; + snprintf(hash_str, sizeof(hash_str), "%016" PRIx64, hash); + // save to cache_dir/hash_str + fs::path cache_file = fs::path(cache_dir) / hash_str; + std::ofstream ofs(cache_file, std::ios::binary); + ofs.write((const char*)data, size); + printf("[%s] saved to '%s'\n", __func__, cache_file.c_str()); + } ggml_backend_tensor_set(tensor, data, offset, size); ggml_free(ctx); return true; } -bool rpc_server::get_tensor(const std::vector & input, std::vector & output) { - // serialization format: | rpc_tensor | offset (8 bytes) | size (8 bytes) | - if (input.size() != sizeof(rpc_tensor) + 2*sizeof(uint64_t)) { +bool rpc_server::get_cached_file(uint64_t hash, std::vector& data) { + if (!cache_dir) { return false; } - const rpc_tensor * in_tensor = (const rpc_tensor *)input.data(); + char hash_str[17]; + snprintf(hash_str, sizeof(hash_str), "%016" PRIx64, hash); + fs::path cache_file = fs::path(cache_dir) / hash_str; + if (!fs::exists(cache_file)) { + return false; + } + std::ifstream ifs(cache_file, std::ios::binary); + ifs.seekg(0, std::ios::end); + size_t size = ifs.tellg(); + ifs.seekg(0, std::ios::beg); + data.resize(size); + ifs.read((char*)data.data(), size); + return true; +} + +bool rpc_server::set_tensor_hash(const std::vector& input, rpc_msg_set_tensor_hash_rsp& response) +{ + // serialization format: | rpc_tensor | offset (8 bytes) | hash (8 bytes) | + if (input.size() != sizeof(rpc_tensor) + 16) { + return false; + } + const rpc_tensor* in_tensor = (const rpc_tensor*)input.data(); uint64_t offset; memcpy(&offset, input.data() + sizeof(rpc_tensor), sizeof(offset)); - uint64_t size; - memcpy(&size, input.data() + sizeof(rpc_tensor) + sizeof(offset), sizeof(size)); - + const uint64_t* hash = (const uint64_t*)(input.data() + sizeof(rpc_tensor) + sizeof(offset)); + std::vector cached_file; + if (!get_cached_file(*hash, cached_file)) { + response.result = 0; + return true; + } + size_t size = cached_file.size(); struct ggml_init_params params { /*.mem_size =*/ ggml_tensor_overhead(), - /*.mem_buffer =*/ NULL, - /*.no_alloc =*/ true, + /*.mem_buffer =*/ NULL, + /*.no_alloc =*/ true, }; - struct ggml_context * ctx = ggml_init(params); - ggml_tensor * tensor = deserialize_tensor(ctx, in_tensor); + struct ggml_context* ctx = ggml_init(params); + ggml_tensor* tensor = deserialize_tensor(ctx, in_tensor); if (tensor == nullptr) { - GGML_PRINT_DEBUG("[%s] error deserializing tensor\n", __func__); + GGML_ABORT("[%s] error deserializing tensor\n", __func__); ggml_free(ctx); return false; } - GGML_PRINT_DEBUG("[%s] buffer: %p, data: %p, offset: %" PRIu64 ", size: %" PRIu64 "\n", __func__, (void*)tensor->buffer, tensor->data, offset, size); + GGML_PRINT_DEBUG("[%s] buffer: %p, data: %p, offset: %" PRIu64 ", size: %zu, hash: %" PRIx64 "\n", __func__, (void*)tensor->buffer, tensor->data, offset, size, *hash); // sanitize tensor->data { - const size_t p0 = (size_t) ggml_backend_buffer_get_base(tensor->buffer); + const size_t p0 = (size_t)ggml_backend_buffer_get_base(tensor->buffer); const size_t p1 = p0 + ggml_backend_buffer_get_size(tensor->buffer); - if (in_tensor->data + offset < p0 || in_tensor->data + offset >= p1 || size > (p1 - in_tensor->data - offset)) { + if (in_tensor->data + offset < p0 || in_tensor->data + offset >= p1 || size >(p1 - in_tensor->data - offset)) { GGML_ABORT("[%s] tensor->data out of bounds\n", __func__); } } + ggml_backend_tensor_set(tensor, cached_file.data(), offset, size); + response.result = 1; + ggml_free(ctx); + return true; +} + +bool rpc_server::init_tensor(const rpc_msg_init_tensor_req& request) { + struct ggml_init_params params { + /*.mem_size =*/ ggml_tensor_overhead(), + /*.mem_buffer =*/ NULL, + /*.no_alloc =*/ true, + }; + struct ggml_context* ctx = ggml_init(params); + ggml_tensor* tensor = deserialize_tensor(ctx, &request.tensor); + if (tensor == nullptr) { + GGML_ABORT("Null tensor pointer passed to server init_tensor function.\n"); + ggml_free(ctx); + return false; + } + + // Call the backend's buffer_init_tensor function + ggml_backend_buffer_t buffer = tensor->buffer; + if (buffer && buffer->iface.init_tensor) { + buffer->iface.init_tensor(buffer, tensor); + } + else { + GGML_ABORT("Null buffer for tensor passed to init_tensor function\n"); + } + + if (tensor->extra != nullptr) { + // This pointer can either be passed around client/server, or probably better stored server-side and kept track of. + // Currently unimplemented. + GGML_ABORT("tensor->extra populated by the backend, this is currently unsupported.\n"); + ggml_free(ctx); + return false; + } - // output serialization format: | data (size bytes) | - output.resize(size, 0); - ggml_backend_tensor_get(tensor, output.data(), offset, size); ggml_free(ctx); return true; } -bool rpc_server::copy_tensor(const std::vector & input, std::vector & output) { - // serialization format: | rpc_tensor src | rpc_tensor dst | - if (input.size() != 2*sizeof(rpc_tensor)) { +bool rpc_server::get_tensor(const rpc_msg_get_tensor_req& request, std::vector& response) { + struct ggml_init_params params { + /*.mem_size =*/ ggml_tensor_overhead(), + /*.mem_buffer =*/ NULL, + /*.no_alloc =*/ true, + }; + struct ggml_context* ctx = ggml_init(params); + ggml_tensor* tensor = deserialize_tensor(ctx, &request.tensor); + if (tensor == nullptr) { + GGML_ABORT("[%s] error deserializing tensor\n", __func__); + ggml_free(ctx); return false; } - const rpc_tensor * rpc_src = (const rpc_tensor *)input.data(); - const rpc_tensor * rpc_dst = (const rpc_tensor *)(input.data() + sizeof(rpc_src)); + GGML_PRINT_DEBUG("[%s] buffer: %p, data: %p, offset: %" PRIu64 ", size: %" PRIu64 "\n", __func__, (void*)tensor->buffer, tensor->data, request.offset, request.size); + + // sanitize tensor->data + { + const size_t p0 = (size_t)ggml_backend_buffer_get_base(tensor->buffer); + const size_t p1 = p0 + ggml_backend_buffer_get_size(tensor->buffer); + + if (request.tensor.data + request.offset < p0 || + request.tensor.data + request.offset >= p1 || + request.size >(p1 - request.tensor.data - request.offset)) { + GGML_ABORT("[%s] tensor->data out of bounds\n", __func__); + } + } + response.resize(request.size, 0); + ggml_backend_tensor_get(tensor, response.data(), request.offset, request.size); + ggml_free(ctx); + return true; +} +bool rpc_server::copy_tensor(const rpc_msg_copy_tensor_req& request, rpc_msg_copy_tensor_rsp& response) { struct ggml_init_params params { - /*.mem_size =*/ 2*ggml_tensor_overhead(), - /*.mem_buffer =*/ NULL, - /*.no_alloc =*/ true, + /*.mem_size =*/ 2 * ggml_tensor_overhead(), + /*.mem_buffer =*/ NULL, + /*.no_alloc =*/ true, }; - struct ggml_context * ctx = ggml_init(params); - ggml_tensor * src = deserialize_tensor(ctx, rpc_src); - ggml_tensor * dst = deserialize_tensor(ctx, rpc_dst); + struct ggml_context* ctx = ggml_init(params); + ggml_tensor* src = deserialize_tensor(ctx, &request.src); + ggml_tensor* dst = deserialize_tensor(ctx, &request.dst); if (src == nullptr || dst == nullptr) { - GGML_PRINT_DEBUG("[%s] error deserializing tensors\n", __func__); + GGML_ABORT("[%s] error deserializing tensors\n", __func__); + ggml_free(ctx); + return false; + } + + uint64_t src_size = (uint64_t)ggml_nbytes(src); + uint64_t dst_data = (uint64_t)dst->data; + uint64_t dst_base = (uint64_t)ggml_backend_buffer_get_base(dst->buffer); + uint64_t dst_buf_sz = (uint64_t)ggml_backend_buffer_get_size(dst->buffer); + + if (dst_data + src_size > dst_base + dst_buf_sz) { + GGML_PRINT_DEBUG("[%s] out-of-bounds write in rpc_server::copy_tensor:\n" + " write range : [0x%" PRIx64 ", 0x%" PRIx64 "]\n" + " buffer base: [0x%" PRIx64 ", 0x%" PRIx64 "]\n", + __func__, + dst_data, + dst_data + src_size, + dst_base, + dst_base + dst_buf_sz); ggml_free(ctx); return false; } - GGML_PRINT_DEBUG("[%s] src->buffer: %p, dst->buffer: %p\n", __func__, (void*)src->buffer, (void*)dst->buffer); - bool result = ggml_backend_buffer_copy_tensor(src, dst); - // output serialization format: | result (1 byte) | - output.resize(1, 0); - output[0] = result; + + GGML_PRINT_DEBUG("[%s] src->buffer: %p, dst->buffer: %p\n", + __func__, (void*)src->buffer, (void*)dst->buffer); + + response.result = ggml_backend_buffer_copy_tensor(src, dst); ggml_free(ctx); return true; } -ggml_tensor * rpc_server::create_node(uint64_t id, - struct ggml_context * ctx, - const std::unordered_map & tensor_ptrs, - std::unordered_map & tensor_map) { +ggml_tensor* rpc_server::create_node(uint64_t id, + struct ggml_context* ctx, + const std::unordered_map& tensor_ptrs, + std::unordered_map& tensor_map) { if (id == 0) { return nullptr; } if (tensor_map.find(id) != tensor_map.end()) { return tensor_map[id]; } - const rpc_tensor * tensor = tensor_ptrs.at(id); - struct ggml_tensor * result = deserialize_tensor(ctx, tensor); + const rpc_tensor* tensor = tensor_ptrs.at(id); + struct ggml_tensor* result = deserialize_tensor(ctx, tensor); if (result == nullptr) { return nullptr; } @@ -1066,7 +1267,7 @@ ggml_tensor * rpc_server::create_node(uint64_t id, return result; } -bool rpc_server::graph_compute(const std::vector & input, std::vector & output) { +bool rpc_server::graph_compute(const std::vector& input, rpc_msg_graph_compute_rsp& response) { // serialization format: // | n_nodes (4 bytes) | nodes (n_nodes * sizeof(uint64_t) | n_tensors (4 bytes) | tensors (n_tensors * sizeof(rpc_tensor)) | if (input.size() < sizeof(uint32_t)) { @@ -1074,26 +1275,26 @@ bool rpc_server::graph_compute(const std::vector & input, std::vectorn_nodes = n_nodes; std::unordered_map tensor_ptrs; for (uint32_t i = 0; i < n_tensors; i++) { @@ -1106,9 +1307,7 @@ bool rpc_server::graph_compute(const std::vector & input, std::vectornodes[i] = create_node(id, ctx, tensor_ptrs, tensor_map); } ggml_status status = ggml_backend_graph_compute(backend, graph); - // output serialization format: | status (1 byte) | - output.resize(1, 0); - output[0] = status; + response.result = status; ggml_free(ctx); return true; } @@ -1118,9 +1317,9 @@ rpc_server::~rpc_server() { ggml_backend_buffer_free(buffer); } } - -static void rpc_serve_client(ggml_backend_t backend, sockfd_t sockfd, size_t free_mem, size_t total_mem) { - rpc_server server(backend); +static void rpc_serve_client(ggml_backend_t backend, const char* cache_dir, + sockfd_t sockfd, size_t free_mem, size_t total_mem) { + rpc_server server(backend, cache_dir); while (true) { uint8_t cmd; if (!recv_data(sockfd, &cmd, 1)) { @@ -1131,89 +1330,198 @@ static void rpc_serve_client(ggml_backend_t backend, sockfd_t sockfd, size_t fre fprintf(stderr, "Unknown command: %d\n", cmd); break; } - std::vector input; - std::vector output; - uint64_t input_size; - if (!recv_data(sockfd, &input_size, sizeof(input_size))) { + switch (cmd) { + case RPC_CMD_ALLOC_BUFFER: { + rpc_msg_alloc_buffer_req request; + if (!recv_msg(sockfd, &request, sizeof(request))) { + return; + } + rpc_msg_alloc_buffer_rsp response; + server.alloc_buffer(request, response); + if (!send_msg(sockfd, &response, sizeof(response))) { + return; + } break; } - try { - input.resize(input_size); - } catch (const std::bad_alloc & e) { - fprintf(stderr, "Failed to allocate input buffer of size %" PRIu64 "\n", input_size); + case RPC_CMD_GET_ALLOC_SIZE: { + rpc_msg_get_alloc_size_req request; + if (!recv_msg(sockfd, &request, sizeof(request))) { + return; + } + rpc_msg_get_alloc_size_rsp response; + server.get_alloc_size(request, response); + if (!send_msg(sockfd, &response, sizeof(response))) { + return; + } break; } - if (!recv_data(sockfd, input.data(), input_size)) { + case RPC_CMD_GET_ALIGNMENT: { + if (!recv_msg(sockfd, nullptr, 0)) { + return; + } + rpc_msg_get_alignment_rsp response; + server.get_alignment(response); + if (!send_msg(sockfd, &response, sizeof(response))) { + return; + } break; } - bool ok = true; - switch (cmd) { - case RPC_CMD_ALLOC_BUFFER: { - ok = server.alloc_buffer(input, output); - break; + case RPC_CMD_GET_MAX_SIZE: { + if (!recv_msg(sockfd, nullptr, 0)) { + return; } - case RPC_CMD_GET_ALIGNMENT: { - server.get_alignment(output); - break; + rpc_msg_get_max_size_rsp response; + server.get_max_size(response); + if (!send_msg(sockfd, &response, sizeof(response))) { + return; } - case RPC_CMD_GET_MAX_SIZE: { - server.get_max_size(output); - break; + break; + } + case RPC_CMD_BUFFER_GET_BASE: { + rpc_msg_buffer_get_base_req request; + if (!recv_msg(sockfd, &request, sizeof(request))) { + return; } - case RPC_CMD_BUFFER_GET_BASE: { - ok = server.buffer_get_base(input, output); - break; + rpc_msg_buffer_get_base_rsp response; + if (!server.buffer_get_base(request, response)) { + return; } - case RPC_CMD_FREE_BUFFER: { - ok = server.free_buffer(input); - break; + if (!send_msg(sockfd, &response, sizeof(response))) { + return; } - case RPC_CMD_BUFFER_CLEAR: { - ok = server.buffer_clear(input); - break; + break; + } + case RPC_CMD_FREE_BUFFER: { + rpc_msg_free_buffer_req request; + if (!recv_msg(sockfd, &request, sizeof(request))) { + return; } - case RPC_CMD_SET_TENSOR: { - ok = server.set_tensor(input); - break; + if (!server.free_buffer(request)) { + return; } - case RPC_CMD_GET_TENSOR: { - ok = server.get_tensor(input, output); - break; + if (!send_msg(sockfd, nullptr, 0)) { + return; } - case RPC_CMD_COPY_TENSOR: { - ok = server.copy_tensor(input, output); - break; + break; + } + case RPC_CMD_BUFFER_CLEAR: { + rpc_msg_buffer_clear_req request; + if (!recv_msg(sockfd, &request, sizeof(request))) { + return; } - case RPC_CMD_GRAPH_COMPUTE: { - ok = server.graph_compute(input, output); - break; + if (!server.buffer_clear(request)) { + return; } - case RPC_CMD_GET_DEVICE_MEMORY: { - // output serialization format: | free (8 bytes) | total (8 bytes) | - output.resize(2*sizeof(uint64_t), 0); - memcpy(output.data(), &free_mem, sizeof(free_mem)); - memcpy(output.data() + sizeof(uint64_t), &total_mem, sizeof(total_mem)); - break; + if (!send_msg(sockfd, nullptr, 0)) { + return; } - default: { - fprintf(stderr, "Unknown command: %d\n", cmd); - ok = false; + break; + } + case RPC_CMD_SET_TENSOR: { + std::vector input; + if (!recv_msg(sockfd, input)) { + return; + } + if (!server.set_tensor(input)) { + return; } + if (!send_msg(sockfd, nullptr, 0)) { + return; + } + break; } - if (!ok) { + case RPC_CMD_SET_TENSOR_HASH: { + std::vector input; + if (!recv_msg(sockfd, input)) { + return; + } + rpc_msg_set_tensor_hash_rsp response; + if (!server.set_tensor_hash(input, response)) { + return; + } + if (!send_msg(sockfd, &response, sizeof(response))) { + return; + } break; } - uint64_t output_size = output.size(); - if (!send_data(sockfd, &output_size, sizeof(output_size))) { + case RPC_CMD_INIT_TENSOR: { + rpc_msg_init_tensor_req request; + if (!recv_msg(sockfd, &request, sizeof(request))) { + return; + } + if (!server.init_tensor(request)) { + return; + } + if (!send_msg(sockfd, nullptr, 0)) { + return; + } break; } - if (!send_data(sockfd, output.data(), output_size)) { + case RPC_CMD_GET_TENSOR: { + rpc_msg_get_tensor_req request; + if (!recv_msg(sockfd, &request, sizeof(request))) { + return; + } + std::vector response; + if (!server.get_tensor(request, response)) { + return; + } + if (!send_msg(sockfd, response.data(), response.size())) { + return; + } break; } + case RPC_CMD_COPY_TENSOR: { + rpc_msg_copy_tensor_req request; + if (!recv_msg(sockfd, &request, sizeof(request))) { + return; + } + rpc_msg_copy_tensor_rsp response; + if (!server.copy_tensor(request, response)) { + return; + } + if (!send_msg(sockfd, &response, sizeof(response))) { + return; + } + break; + } + case RPC_CMD_GRAPH_COMPUTE: { + std::vector input; + if (!recv_msg(sockfd, input)) { + return; + } + rpc_msg_graph_compute_rsp response; + if (!server.graph_compute(input, response)) { + return; + } + if (!send_msg(sockfd, &response, sizeof(response))) { + return; + } + break; + } + case RPC_CMD_GET_DEVICE_MEMORY: { + if (!recv_msg(sockfd, nullptr, 0)) { + return; + } + rpc_msg_get_device_memory_rsp response; + response.free_mem = free_mem; + response.total_mem = total_mem; + if (!send_msg(sockfd, &response, sizeof(response))) { + return; + } + break; + } + default: { + fprintf(stderr, "Unknown command: %d\n", cmd); + return; + } + } } } -void start_rpc_server(ggml_backend_t backend, const char * endpoint, size_t free_mem, size_t total_mem) { +void ggml_backend_rpc_start_server(ggml_backend_t backend, const char* endpoint, + const char* cache_dir, + size_t free_mem, size_t total_mem) { std::string host; int port; if (!parse_endpoint(endpoint, host, port)) { @@ -1242,7 +1550,7 @@ void start_rpc_server(ggml_backend_t backend, const char * endpoint, size_t free } printf("Accepted client connection, free_mem=%zu, total_mem=%zu\n", free_mem, total_mem); fflush(stdout); - rpc_serve_client(backend, client_socket->fd, free_mem, total_mem); + rpc_serve_client(backend, cache_dir, client_socket->fd, free_mem, total_mem); printf("Client connection closed\n"); fflush(stdout); } From d093b3742795391969ea6457b44b0105c94e5a4d Mon Sep 17 00:00:00 2001 From: firecoperana Date: Mon, 5 May 2025 19:37:20 -0500 Subject: [PATCH 08/20] add header file --- examples/rpc/rpc-server.cpp | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/examples/rpc/rpc-server.cpp b/examples/rpc/rpc-server.cpp index 77901ad13..56db02b58 100644 --- a/examples/rpc/rpc-server.cpp +++ b/examples/rpc/rpc-server.cpp @@ -16,10 +16,14 @@ #ifdef _WIN32 # define DIRECTORY_SEPARATOR '\\' # define NOMINMAX +# include # include +# include +# include #else # define DIRECTORY_SEPARATOR '/' # include +# include #endif #include #include From 04ac48a6fc9bbf3bb6d200d758a68c7014b096ff Mon Sep 17 00:00:00 2001 From: matt23456 Date: Wed, 7 May 2025 20:40:54 -0500 Subject: [PATCH 09/20] support for models with non-512 aligned tensors --- ggml/src/ggml-rpc.cpp | 50 +++++++++++++++++++++++++++++++++---------- 1 file changed, 39 insertions(+), 11 deletions(-) diff --git a/ggml/src/ggml-rpc.cpp b/ggml/src/ggml-rpc.cpp index 0fe04e703..8cbcae538 100644 --- a/ggml/src/ggml-rpc.cpp +++ b/ggml/src/ggml-rpc.cpp @@ -505,11 +505,20 @@ static rpc_tensor serialize_tensor(const ggml_tensor * tensor) { return result; } -GGML_CALL static void ggml_backend_rpc_buffer_init_tensor(ggml_backend_buffer_t buffer, ggml_tensor * tensor) { - UNUSED(buffer); - if (ggml_is_quantized(tensor->type)) { - // TODO: this check is due to MATRIX_ROW_PADDING in CUDA and should be generalized - //GGML_ASSERT(tensor->ne[0] % 512 == 0 && "unsupported quantized tensor"); + +GGML_CALL static void ggml_backend_rpc_buffer_init_tensor(ggml_backend_buffer_t buffer, ggml_tensor* tensor) { + ggml_backend_rpc_buffer_context* ctx = (ggml_backend_rpc_buffer_context*)buffer->context; + + // CUDA backend on the server pads everything to 512 due to CUDA limitations. + // Due to bandwidth constraints, we only call the server init tensor functions if necessary. + // In particular, only quantized tensors need padding + if (ggml_is_quantized(tensor->type) && (tensor->ne[0] % 512 != 0) && (tensor->view_src == nullptr)) { + rpc_msg_init_tensor_req request; + + request.tensor = serialize_tensor(tensor); + + bool status = send_rpc_cmd(ctx->sock, RPC_CMD_INIT_TENSOR, &request, sizeof(request), nullptr, 0); + GGML_ASSERT(status); } } @@ -643,11 +652,29 @@ GGML_CALL static size_t ggml_backend_rpc_get_max_size(ggml_backend_buffer_type_t return buft_ctx->max_size; } -GGML_CALL static size_t ggml_backend_rpc_buffer_type_get_alloc_size(ggml_backend_buffer_type_t buft, const ggml_tensor * tensor) { - UNUSED(buft); - return ggml_nbytes(tensor); +GGML_CALL static size_t ggml_backend_rpc_buffer_type_get_alloc_size(ggml_backend_buffer_type_t buft, const ggml_tensor* tensor) { + // See comments in init_tensor. + if (ggml_is_quantized(tensor->type) && (tensor->ne[0] % 512 != 0) && (tensor->view_src == nullptr)) { + ggml_backend_rpc_buffer_type_context* buft_ctx = (ggml_backend_rpc_buffer_type_context*)buft->context; + auto sock = get_socket(buft_ctx->endpoint); + + rpc_msg_get_alloc_size_req request; + + request.tensor = serialize_tensor(tensor); + + rpc_msg_get_alloc_size_rsp response; + bool status = send_rpc_cmd(sock, RPC_CMD_GET_ALLOC_SIZE, &request, sizeof(request), &response, sizeof(response)); + GGML_ASSERT(status); + + return response.alloc_size; + } + else { + return ggml_nbytes(tensor); + } } + + static ggml_backend_buffer_type_i ggml_backend_rpc_buffer_type_interface = { /* .get_name = */ ggml_backend_rpc_buffer_type_name, /* .alloc_buffer = */ ggml_backend_rpc_buffer_type_alloc_buffer, @@ -1071,6 +1098,7 @@ bool rpc_server::set_tensor(const std::vector& input) { return true; } + bool rpc_server::get_cached_file(uint64_t hash, std::vector& data) { if (!cache_dir) { return false; @@ -1144,7 +1172,7 @@ bool rpc_server::init_tensor(const rpc_msg_init_tensor_req& request) { struct ggml_context* ctx = ggml_init(params); ggml_tensor* tensor = deserialize_tensor(ctx, &request.tensor); if (tensor == nullptr) { - GGML_ABORT("Null tensor pointer passed to server init_tensor function.\n"); + GGML_PRINT_DEBUG("Null tensor pointer passed to server init_tensor function.\n"); ggml_free(ctx); return false; } @@ -1155,13 +1183,13 @@ bool rpc_server::init_tensor(const rpc_msg_init_tensor_req& request) { buffer->iface.init_tensor(buffer, tensor); } else { - GGML_ABORT("Null buffer for tensor passed to init_tensor function\n"); + GGML_PRINT_DEBUG("Null buffer for tensor passed to init_tensor function\n"); } if (tensor->extra != nullptr) { // This pointer can either be passed around client/server, or probably better stored server-side and kept track of. // Currently unimplemented. - GGML_ABORT("tensor->extra populated by the backend, this is currently unsupported.\n"); + GGML_PRINT_DEBUG("tensor->extra populated by the backend, this is currently unsupported.\n"); ggml_free(ctx); return false; } From 2bb12323a5fa36eab5d59c77990c3b128c1e44f5 Mon Sep 17 00:00:00 2001 From: Radoslav Gerganov Date: Fri, 25 Apr 2025 10:08:08 +0300 Subject: [PATCH 10/20] rpc : do not wait for response when sending RPC_CMD_SET_TENSOR (#12943) RPC_CMD_SET_TENSOR always returns an empty response and we send this 4 times per token. We can improve TG speed if we don't wait for this empty response. The performance impact of this change depends on the network latency. # Conflicts: # ggml/src/ggml-rpc.cpp --- ggml/src/ggml-rpc.cpp | 32 ++++++++++++-------------------- 1 file changed, 12 insertions(+), 20 deletions(-) diff --git a/ggml/src/ggml-rpc.cpp b/ggml/src/ggml-rpc.cpp index 8cbcae538..09e52f295 100644 --- a/ggml/src/ggml-rpc.cpp +++ b/ggml/src/ggml-rpc.cpp @@ -380,8 +380,8 @@ static bool parse_endpoint(const std::string & endpoint, std::string & host, int } // RPC request : | rpc_cmd (1 byte) | request_size (8 bytes) | request_data (request_size bytes) | -// RPC response: | response_size (8 bytes) | response_data (response_size bytes) | -static bool send_rpc_cmd(const std::shared_ptr& sock, enum rpc_cmd cmd, const void* input, size_t input_size, void* output, size_t output_size) { +// No response +static bool send_rpc_cmd(const std::shared_ptr & sock, enum rpc_cmd cmd, const void * input, size_t input_size) { uint8_t cmd_byte = cmd; if (!send_data(sock->fd, &cmd_byte, sizeof(cmd_byte))) { return false; @@ -392,6 +392,15 @@ static bool send_rpc_cmd(const std::shared_ptr& sock, enum rpc_cmd cmd if (!send_data(sock->fd, input, input_size)) { return false; } + return true; +} + +// RPC request : | rpc_cmd (1 byte) | request_size (8 bytes) | request_data (request_size bytes) | +// RPC response: | response_size (8 bytes) | response_data (response_size bytes) | +static bool send_rpc_cmd(const std::shared_ptr & sock, enum rpc_cmd cmd, const void * input, size_t input_size, void * output, size_t output_size) { + if (!send_rpc_cmd(sock, cmd, input, input_size)) { + return false; + } // TODO: currently the output_size is always known, do we need support for commands with variable output size? // even if we do, we can skip sending output_size from the server for commands with known output size uint64_t out_size; @@ -547,7 +556,7 @@ static void ggml_backend_rpc_buffer_set_tensor(ggml_backend_buffer_t buffer, ggm memcpy(input.data(), &rpc_tensor, sizeof(rpc_tensor)); memcpy(input.data() + sizeof(rpc_tensor), &offset, sizeof(offset)); memcpy(input.data() + sizeof(rpc_tensor) + sizeof(offset), data, size); - bool status = send_rpc_cmd(ctx->sock, RPC_CMD_SET_TENSOR, input.data(), input.size(), nullptr, 0); + bool status = send_rpc_cmd(ctx->sock, RPC_CMD_SET_TENSOR, input.data(), input.size()); GGML_ASSERT(status); } @@ -1405,20 +1414,6 @@ static void rpc_serve_client(ggml_backend_t backend, const char* cache_dir, } break; } - case RPC_CMD_BUFFER_GET_BASE: { - rpc_msg_buffer_get_base_req request; - if (!recv_msg(sockfd, &request, sizeof(request))) { - return; - } - rpc_msg_buffer_get_base_rsp response; - if (!server.buffer_get_base(request, response)) { - return; - } - if (!send_msg(sockfd, &response, sizeof(response))) { - return; - } - break; - } case RPC_CMD_FREE_BUFFER: { rpc_msg_free_buffer_req request; if (!recv_msg(sockfd, &request, sizeof(request))) { @@ -1453,9 +1448,6 @@ static void rpc_serve_client(ggml_backend_t backend, const char* cache_dir, if (!server.set_tensor(input)) { return; } - if (!send_msg(sockfd, nullptr, 0)) { - return; - } break; } case RPC_CMD_SET_TENSOR_HASH: { From aed21b7622599262dde17590b83e16a7a303463a Mon Sep 17 00:00:00 2001 From: Ville Vesilehto Date: Mon, 28 Apr 2025 21:00:20 +0300 Subject: [PATCH 11/20] fix(rpc): Improve input validation and error handling (#13069) * fix(rpc): Improve input validation and error handling The `rpc-server` was vulnerable to Denial of Service attacks via several RPC commands (`SET_TENSOR`, `GRAPH_COMPUTE`, etc.). Malformed messages could trigger failed assertions (e.g., invalid `ggml_type`) or out-of-bounds reads/writes leading to `GGML_ABORT` calls, crashing the server process. This PR introduces robust input validation and replaces `abort()` calls with graceful error handling: - **Type Validation:** `deserialize_tensor` now checks if the `tensor->type` is within the valid `GGML_TYPE_COUNT` range *before* calling `ggml_new_tensor_4d`. Returns `nullptr` on invalid type. - **Bounds Checks:** Replaced `GGML_ABORT` in `set_tensor`, `set_tensor_hash`, and `get_tensor` handlers with error logging and returning `false` when data/offset parameters are out of buffer bounds. - **Size Checks:** Added safe arithmetic checks (for overflow) in `graph_compute` when calculating required message sizes based on client-provided `n_nodes` and `n_tensors`. Returns early if the reported sizes conflict with the actual message size or would lead to overflow. - **Error Propagation:** - `create_node` now checks for `nullptr` return values from `deserialize_tensor` and its recursive calls, propagating `nullptr` upwards on failure. Uses `find` instead of `at` for safer map access. - `copy_tensor` now checks for `nullptr` from `deserialize_tensor` and sets the response status to failure if deserialization or bounds checks fail. - `graph_compute` now checks for `nullptr` return from `create_node` and returns failure status correctly. The final return value now reflects the actual computation status. These changes improve the RPC server's resilience against malformed client requests, preventing crashes and ensuring errors are handled more gracefully. Signed-off-by: Ville Vesilehto * refactor(rpc): address pr comments removed comments and unnecessary returns Signed-off-by: Ville Vesilehto * refactor(rpc): ambiguous nullptr from create_node rpc_server::create_node could previously return nullptr if the input ID was 0 (valid) or if an internal error (deserialization, recursion failure) occurred (invalid). This ambiguity made error handling difficult for the caller (`graph_compute`). This commit clarifies the meaning of nullptr: - `graph_compute` now checks if the input 'id' was non-zero when `create_node` returns nullptr, correctly identifying failures versus intentional null links. - `create_node` avoids recursive calls for zero IDs and propagates nullptr unambiguously on failure during recursion. Signed-off-by: Ville Vesilehto * refactor(rpc): initial zero check in create_node The caller (`graph_compute`) already checks `id != 0` when handling a `nullptr` return from `create_node`, correctly distinguishing intentional null links from actual errors. This makes the initial `if (id == 0)` check redundant. Also removes the log message when a tensor ID is not found in the provided map which was added in this branch. Signed-off-by: Ville Vesilehto * fix(rpc): Handle get_alloc_size failure in server Check the return value of `server.get_alloc_size` in the RPC server loop. If the call fails, return early to close the connection. Signed-off-by: Ville Vesilehto * refactor(rpc): input size validation in graph_compute Removes detailed, step-by-step size calculations and overflow checks in favor of simpler direct comparisons, assuming 64-bit overflow is unlikely. Signed-off-by: Ville Vesilehto * refactor(rpc): remove extra status code setting Removes the explicit setting of `response.result = GGML_STATUS_FAILED` when `create_node` returns `nullptr` within `graph_compute`. Primary signal is the `false` return value in case of failure. Signed-off-by: Ville Vesilehto * refactor(rpc): remove redundant check for tensor->type Breaks CI on ubuntu-cpu-make. Tensor type is uint32_t, thus the check is not needed. Signed-off-by: Ville Vesilehto --------- Signed-off-by: Ville Vesilehto # Conflicts: # ggml/src/ggml-rpc.cpp --- ggml/src/ggml-rpc.cpp | 97 +++++++++++++++++++++++++++++++++++-------- 1 file changed, 80 insertions(+), 17 deletions(-) diff --git a/ggml/src/ggml-rpc.cpp b/ggml/src/ggml-rpc.cpp index 09e52f295..e6693a11c 100644 --- a/ggml/src/ggml-rpc.cpp +++ b/ggml/src/ggml-rpc.cpp @@ -1027,8 +1027,21 @@ bool rpc_server::buffer_clear(const rpc_msg_buffer_clear_req& request) { } ggml_tensor * rpc_server::deserialize_tensor(struct ggml_context * ctx, const rpc_tensor * tensor) { + // Validate tensor type before using it + if (tensor->type >= GGML_TYPE_COUNT) { + GGML_LOG_ERROR("[%s] invalid tensor type received: %u\n", __func__, tensor->type); + return nullptr; + } + ggml_tensor * result = ggml_new_tensor_4d(ctx, (ggml_type) tensor->type, tensor->ne[0], tensor->ne[1], tensor->ne[2], tensor->ne[3]); + + // ggml_new_tensor_4d might fail if dimensions are invalid, although less likely to crash than invalid type + if (result == nullptr) { + GGML_LOG_ERROR("[%s] ggml_new_tensor_4d failed for type %u\\n", __func__, tensor->type); + return nullptr; + } + for (uint32_t i = 0; i < GGML_MAX_DIMS; i++) { result->nb[i] = tensor->nb[i]; } @@ -1087,7 +1100,9 @@ bool rpc_server::set_tensor(const std::vector& input) { const size_t p1 = p0 + ggml_backend_buffer_get_size(tensor->buffer); if (in_tensor->data + offset < p0 || in_tensor->data + offset >= p1 || size >(p1 - in_tensor->data - offset)) { - GGML_ABORT("[%s] tensor->data out of bounds\n", __func__); + GGML_LOG_ERROR("[%s] tensor data region (data=0x%" PRIx64 ", offset=%" PRIu64 ", size=%zu) out of buffer bounds [0x%zx, 0x%zx)\n", + __func__, in_tensor->data, offset, size, p0, p1); + return false; } } @@ -1163,7 +1178,9 @@ bool rpc_server::set_tensor_hash(const std::vector& input, rpc_msg_set_ const size_t p1 = p0 + ggml_backend_buffer_get_size(tensor->buffer); if (in_tensor->data + offset < p0 || in_tensor->data + offset >= p1 || size >(p1 - in_tensor->data - offset)) { - GGML_ABORT("[%s] tensor->data out of bounds\n", __func__); + GGML_LOG_ERROR("[%s] tensor data region (data=0x%" PRIx64 ", offset=%" PRIu64 ", size=%zu, hash=0x%" PRIx64 ") out of buffer bounds [0x%zx, 0x%zx)\n", + __func__, in_tensor->data, offset, size, *hash, p0, p1); + return false; } } ggml_backend_tensor_set(tensor, cached_file.data(), offset, size); @@ -1229,8 +1246,10 @@ bool rpc_server::get_tensor(const rpc_msg_get_tensor_req& request, std::vector= p1 || - request.size >(p1 - request.tensor.data - request.offset)) { - GGML_ABORT("[%s] tensor->data out of bounds\n", __func__); + request.size > (p1 - request.tensor.data - request.offset)) { + GGML_LOG_ERROR("[%s] requested tensor region (data=0x%" PRIx64 ", offset=%" PRIu64 ", size=%" PRIu64 ") out of buffer bounds [0x%zx, 0x%zx)\n", + __func__, request.tensor.data, request.offset, request.size, p0, p1); + return false; } } @@ -1284,22 +1303,50 @@ ggml_tensor* rpc_server::create_node(uint64_t id, struct ggml_context* ctx, const std::unordered_map& tensor_ptrs, std::unordered_map& tensor_map) { - if (id == 0) { - return nullptr; - } if (tensor_map.find(id) != tensor_map.end()) { return tensor_map[id]; } - const rpc_tensor* tensor = tensor_ptrs.at(id); - struct ggml_tensor* result = deserialize_tensor(ctx, tensor); + // Safely find the tensor pointer + auto it_ptr = tensor_ptrs.find(id); + if (it_ptr == tensor_ptrs.end()) { + return nullptr; + } + const rpc_tensor * tensor = it_ptr->second; + + struct ggml_tensor * result = deserialize_tensor(ctx, tensor); if (result == nullptr) { return nullptr; } tensor_map[id] = result; for (int i = 0; i < GGML_MAX_SRC; i++) { - result->src[i] = create_node(tensor->src[i], ctx, tensor_ptrs, tensor_map); + // Check if the source ID is 0 before calling create_node recursively + if (tensor->src[i] == 0) { + result->src[i] = nullptr; + } else { + result->src[i] = create_node(tensor->src[i], ctx, tensor_ptrs, tensor_map); + // If the recursive call failed for a non-zero ID, propagate the error + if (result->src[i] == nullptr) { + GGML_LOG_ERROR("[%s] failed to create source node %d (src_id=%" PRIu64 ") for node id %" PRIu64 "\n", + __func__, i, tensor->src[i], id); + // Must return nullptr to signal failure up the call stack + return nullptr; + } + } + } + + // Handle view_src similarly + if (tensor->view_src == 0) { + result->view_src = nullptr; + } else { + result->view_src = create_node(tensor->view_src, ctx, tensor_ptrs, tensor_map); + // If the recursive call failed for a non-zero ID, propagate the error + if (result->view_src == nullptr) { + GGML_LOG_ERROR("[%s] failed to create view_src node (view_src_id=%" PRIu64 ") for node id %" PRIu64 "\n", + __func__, tensor->view_src, id); + // Must return nullptr to signal failure up the call stack + return nullptr; + } } - result->view_src = create_node(tensor->view_src, ctx, tensor_ptrs, tensor_map); result->view_offs = tensor->view_offs; return result; } @@ -1325,6 +1372,7 @@ bool rpc_server::graph_compute(const std::vector& input, rpc_msg_graph_ GGML_PRINT_DEBUG("[%s] n_nodes: %u, n_tensors: %u\n", __func__, n_nodes, n_tensors); size_t buf_size = ggml_tensor_overhead() * (n_nodes + n_tensors) + ggml_graph_overhead_custom(n_nodes, false); + struct ggml_init_params params = { /*.mem_size =*/ buf_size, /*.mem_buffer =*/ NULL, @@ -1342,6 +1390,14 @@ bool rpc_server::graph_compute(const std::vector& input, rpc_msg_graph_ int64_t id; memcpy(&id, &nodes[i], sizeof(id)); graph->nodes[i] = create_node(id, ctx, tensor_ptrs, tensor_map); + + // Check if create_node failed for a *non-zero* ID. + // If id was 0, create_node returning nullptr is expected. + // If id was non-zero and create_node returned nullptr, it indicates a deserialization error. + if (graph->nodes[i] == nullptr && id != 0) { + GGML_LOG_ERROR("[%s] failed to create graph node %d (id=%" PRId64 ")\n", __func__, i, id); + return false; + } } ggml_status status = ggml_backend_graph_compute(backend, graph); response.result = status; @@ -1378,12 +1434,19 @@ static void rpc_serve_client(ggml_backend_t backend, const char* cache_dir, if (!send_msg(sockfd, &response, sizeof(response))) { return; } - break; - } - case RPC_CMD_GET_ALLOC_SIZE: { - rpc_msg_get_alloc_size_req request; - if (!recv_msg(sockfd, &request, sizeof(request))) { - return; + case RPC_CMD_GET_ALLOC_SIZE: { + rpc_msg_get_alloc_size_req request; + if (!recv_msg(sockfd, &request, sizeof(request))) { + return; + } + rpc_msg_get_alloc_size_rsp response; + if (!server.get_alloc_size(request, response)) { + return; + } + if (!send_msg(sockfd, &response, sizeof(response))) { + return; + } + break; } rpc_msg_get_alloc_size_rsp response; server.get_alloc_size(request, response); From c7910376c0c1d8255d50cedc9b7dee576038e655 Mon Sep 17 00:00:00 2001 From: xiaofei Date: Wed, 30 Apr 2025 14:29:22 +0800 Subject: [PATCH 12/20] rpc : fix cache directory initialization (#13188) Signed-off-by: xiaofei # Conflicts: # examples/rpc/rpc-server.cpp --- examples/rpc/rpc-server.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/rpc/rpc-server.cpp b/examples/rpc/rpc-server.cpp index 56db02b58..943c1b1cc 100644 --- a/examples/rpc/rpc-server.cpp +++ b/examples/rpc/rpc-server.cpp @@ -306,7 +306,7 @@ int main(int argc, char * argv[]) { else { get_backend_memory(&free_mem, &total_mem); } - const char* cache_dir = nullptr; + const char * cache_dir = nullptr; std::string cache_dir_str; if (params.use_cache) { cache_dir_str = fs_get_cache_directory() + "rpc/"; From fc5a9cd3079dfa2fff16ea9dcc71d23340e0dfbd Mon Sep 17 00:00:00 2001 From: Justin Santa Barbara Date: Thu, 1 May 2025 17:32:11 -0400 Subject: [PATCH 13/20] rpc : avoid uninitialized memory in serialize_tensor (#13210) Zero out the name and padding buffers. --- ggml/src/ggml-rpc.cpp | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/ggml/src/ggml-rpc.cpp b/ggml/src/ggml-rpc.cpp index e6693a11c..852824ec9 100644 --- a/ggml/src/ggml-rpc.cpp +++ b/ggml/src/ggml-rpc.cpp @@ -510,6 +510,11 @@ static rpc_tensor serialize_tensor(const ggml_tensor * tensor) { result.view_src = reinterpret_cast(tensor->view_src); result.view_offs = tensor->view_offs; result.data = reinterpret_cast(tensor->data); + + // Avoid sending uninitialized data over the wire + memset(result.name, 0, sizeof(result.name)); + memset(result.padding, 0, sizeof(result.padding)); + snprintf(result.name, GGML_MAX_NAME, "%s", tensor->name); return result; } From 1356a3f90818337a2d86e174a7ac989ddc713684 Mon Sep 17 00:00:00 2001 From: firecoperana Date: Wed, 7 May 2025 21:22:58 -0500 Subject: [PATCH 14/20] fix merge error --- ggml/src/ggml-rpc.cpp | 39 +++++++++++++++++---------------------- 1 file changed, 17 insertions(+), 22 deletions(-) diff --git a/ggml/src/ggml-rpc.cpp b/ggml/src/ggml-rpc.cpp index 852824ec9..09bb1d90f 100644 --- a/ggml/src/ggml-rpc.cpp +++ b/ggml/src/ggml-rpc.cpp @@ -1034,7 +1034,7 @@ bool rpc_server::buffer_clear(const rpc_msg_buffer_clear_req& request) { ggml_tensor * rpc_server::deserialize_tensor(struct ggml_context * ctx, const rpc_tensor * tensor) { // Validate tensor type before using it if (tensor->type >= GGML_TYPE_COUNT) { - GGML_LOG_ERROR("[%s] invalid tensor type received: %u\n", __func__, tensor->type); + GGML_PRINT_DEBUG("[%s] invalid tensor type received: %u\n", __func__, tensor->type); return nullptr; } @@ -1043,7 +1043,7 @@ ggml_tensor * rpc_server::deserialize_tensor(struct ggml_context * ctx, const rp // ggml_new_tensor_4d might fail if dimensions are invalid, although less likely to crash than invalid type if (result == nullptr) { - GGML_LOG_ERROR("[%s] ggml_new_tensor_4d failed for type %u\\n", __func__, tensor->type); + GGML_PRINT_DEBUG("[%s] ggml_new_tensor_4d failed for type %u\\n", __func__, tensor->type); return nullptr; } @@ -1105,7 +1105,7 @@ bool rpc_server::set_tensor(const std::vector& input) { const size_t p1 = p0 + ggml_backend_buffer_get_size(tensor->buffer); if (in_tensor->data + offset < p0 || in_tensor->data + offset >= p1 || size >(p1 - in_tensor->data - offset)) { - GGML_LOG_ERROR("[%s] tensor data region (data=0x%" PRIx64 ", offset=%" PRIu64 ", size=%zu) out of buffer bounds [0x%zx, 0x%zx)\n", + GGML_PRINT_DEBUG("[%s] tensor data region (data=0x%" PRIx64 ", offset=%" PRIu64 ", size=%zu) out of buffer bounds [0x%zx, 0x%zx)\n", __func__, in_tensor->data, offset, size, p0, p1); return false; } @@ -1183,7 +1183,7 @@ bool rpc_server::set_tensor_hash(const std::vector& input, rpc_msg_set_ const size_t p1 = p0 + ggml_backend_buffer_get_size(tensor->buffer); if (in_tensor->data + offset < p0 || in_tensor->data + offset >= p1 || size >(p1 - in_tensor->data - offset)) { - GGML_LOG_ERROR("[%s] tensor data region (data=0x%" PRIx64 ", offset=%" PRIu64 ", size=%zu, hash=0x%" PRIx64 ") out of buffer bounds [0x%zx, 0x%zx)\n", + GGML_PRINT_DEBUG("[%s] tensor data region (data=0x%" PRIx64 ", offset=%" PRIu64 ", size=%zu, hash=0x%" PRIx64 ") out of buffer bounds [0x%zx, 0x%zx)\n", __func__, in_tensor->data, offset, size, *hash, p0, p1); return false; } @@ -1252,7 +1252,7 @@ bool rpc_server::get_tensor(const rpc_msg_get_tensor_req& request, std::vector= p1 || request.size > (p1 - request.tensor.data - request.offset)) { - GGML_LOG_ERROR("[%s] requested tensor region (data=0x%" PRIx64 ", offset=%" PRIu64 ", size=%" PRIu64 ") out of buffer bounds [0x%zx, 0x%zx)\n", + GGML_PRINT_DEBUG("[%s] requested tensor region (data=0x%" PRIx64 ", offset=%" PRIu64 ", size=%" PRIu64 ") out of buffer bounds [0x%zx, 0x%zx)\n", __func__, request.tensor.data, request.offset, request.size, p0, p1); return false; } @@ -1331,7 +1331,7 @@ ggml_tensor* rpc_server::create_node(uint64_t id, result->src[i] = create_node(tensor->src[i], ctx, tensor_ptrs, tensor_map); // If the recursive call failed for a non-zero ID, propagate the error if (result->src[i] == nullptr) { - GGML_LOG_ERROR("[%s] failed to create source node %d (src_id=%" PRIu64 ") for node id %" PRIu64 "\n", + GGML_PRINT_DEBUG("[%s] failed to create source node %d (src_id=%" PRIu64 ") for node id %" PRIu64 "\n", __func__, i, tensor->src[i], id); // Must return nullptr to signal failure up the call stack return nullptr; @@ -1346,7 +1346,7 @@ ggml_tensor* rpc_server::create_node(uint64_t id, result->view_src = create_node(tensor->view_src, ctx, tensor_ptrs, tensor_map); // If the recursive call failed for a non-zero ID, propagate the error if (result->view_src == nullptr) { - GGML_LOG_ERROR("[%s] failed to create view_src node (view_src_id=%" PRIu64 ") for node id %" PRIu64 "\n", + GGML_PRINT_DEBUG("[%s] failed to create view_src node (view_src_id=%" PRIu64 ") for node id %" PRIu64 "\n", __func__, tensor->view_src, id); // Must return nullptr to signal failure up the call stack return nullptr; @@ -1400,7 +1400,7 @@ bool rpc_server::graph_compute(const std::vector& input, rpc_msg_graph_ // If id was 0, create_node returning nullptr is expected. // If id was non-zero and create_node returned nullptr, it indicates a deserialization error. if (graph->nodes[i] == nullptr && id != 0) { - GGML_LOG_ERROR("[%s] failed to create graph node %d (id=%" PRId64 ")\n", __func__, i, id); + GGML_PRINT_DEBUG("[%s] failed to create graph node %d (id=%" PRId64 ")\n", __func__, i, id); return false; } } @@ -1439,22 +1439,17 @@ static void rpc_serve_client(ggml_backend_t backend, const char* cache_dir, if (!send_msg(sockfd, &response, sizeof(response))) { return; } - case RPC_CMD_GET_ALLOC_SIZE: { - rpc_msg_get_alloc_size_req request; - if (!recv_msg(sockfd, &request, sizeof(request))) { - return; - } - rpc_msg_get_alloc_size_rsp response; - if (!server.get_alloc_size(request, response)) { - return; - } - if (!send_msg(sockfd, &response, sizeof(response))) { - return; - } - break; + break; + } + case RPC_CMD_GET_ALLOC_SIZE: { + rpc_msg_get_alloc_size_req request; + if (!recv_msg(sockfd, &request, sizeof(request))) { + return; } rpc_msg_get_alloc_size_rsp response; - server.get_alloc_size(request, response); + if (!server.get_alloc_size(request, response)) { + return; + } if (!send_msg(sockfd, &response, sizeof(response))) { return; } From 0b7bbeebcacf965d37f898d41ce395c3d017d5e4 Mon Sep 17 00:00:00 2001 From: firecoperana Date: Wed, 7 May 2025 21:53:25 -0500 Subject: [PATCH 15/20] Add hello command in RPC --- ggml/include/ggml-rpc.h | 2 +- ggml/src/ggml-rpc.cpp | 73 ++++++++++++++++++++++++++++++++++++++--- 2 files changed, 69 insertions(+), 6 deletions(-) diff --git a/ggml/include/ggml-rpc.h b/ggml/include/ggml-rpc.h index 8e388d442..549e8504c 100644 --- a/ggml/include/ggml-rpc.h +++ b/ggml/include/ggml-rpc.h @@ -9,7 +9,7 @@ extern "C" { #define RPC_PROTO_MAJOR_VERSION 2 #define RPC_PROTO_MINOR_VERSION 0 -#define RPC_PROTO_PATCH_VERSION 0 +#define RPC_PROTO_PATCH_VERSION 1 #define GGML_RPC_MAX_SERVERS 16 // backend API diff --git a/ggml/src/ggml-rpc.cpp b/ggml/src/ggml-rpc.cpp index 09bb1d90f..afa026758 100644 --- a/ggml/src/ggml-rpc.cpp +++ b/ggml/src/ggml-rpc.cpp @@ -99,12 +99,19 @@ enum rpc_cmd { RPC_CMD_GET_DEVICE_MEMORY, RPC_CMD_INIT_TENSOR, RPC_CMD_GET_ALLOC_SIZE, + RPC_CMD_HELLO, RPC_CMD_COUNT, }; // Try RPC_CMD_SET_TENSOR_HASH first when data size is larger than this threshold const size_t HASH_THRESHOLD = 10 * 1024 * 1024; +struct rpc_msg_hello_rsp { + uint8_t major; + uint8_t minor; + uint8_t patch; +}; + struct rpc_msg_get_alloc_size_req { rpc_tensor tensor; }; @@ -417,6 +424,19 @@ static bool send_rpc_cmd(const std::shared_ptr & sock, enum rpc_cmd cm } // RPC client-side implementation +static bool check_server_version(const std::shared_ptr& sock) { + rpc_msg_hello_rsp response; + bool status = send_rpc_cmd(sock, RPC_CMD_HELLO, nullptr, 0, &response, sizeof(response)); + GGML_ASSERT(status); + if (response.major != RPC_PROTO_MAJOR_VERSION || response.minor > RPC_PROTO_MINOR_VERSION) { + fprintf(stderr, "RPC server version mismatch: %d.%d.%d\n", response.major, response.minor, response.patch); + return false; + } + if (response.minor != RPC_PROTO_MINOR_VERSION || response.patch != RPC_PROTO_PATCH_VERSION) { + fprintf(stderr, "WARNING: RPC server version mismatch: %d.%d.%d\n", response.major, response.minor, response.patch); + } + return true; +} static std::shared_ptr get_socket(const std::string & endpoint) { static std::mutex mutex; @@ -451,6 +471,9 @@ static std::shared_ptr get_socket(const std::string & endpoint) { if (sock == nullptr) { return nullptr; } + if (!check_server_version(sock)) { + return nullptr; + } GGML_PRINT_DEBUG("[%s] connected to %s, sockfd=%d\n", __func__, endpoint.c_str(), sock->fd); sockets[endpoint] = sock; return sock; @@ -908,7 +931,7 @@ class rpc_server { : backend(backend), cache_dir(cache_dir) { } ~rpc_server(); - + void hello(rpc_msg_hello_rsp& response); void alloc_buffer(const rpc_msg_alloc_buffer_req& request, rpc_msg_alloc_buffer_rsp& response); void get_alignment(rpc_msg_get_alignment_rsp& response); void get_max_size(rpc_msg_get_max_size_rsp& response); @@ -937,6 +960,13 @@ class rpc_server { std::unordered_set buffers; }; +void rpc_server::hello(rpc_msg_hello_rsp& response) { + response.major = RPC_PROTO_MAJOR_VERSION; + response.minor = RPC_PROTO_MINOR_VERSION; + response.patch = RPC_PROTO_PATCH_VERSION; + GGML_PRINT_DEBUG("[%s] version: %d.%d.%d\n", __func__, response.major, response.minor, response.patch); +} + bool rpc_server::get_alloc_size(const rpc_msg_get_alloc_size_req& request, rpc_msg_get_alloc_size_rsp& response) { ggml_backend_buffer_type_t buft; struct ggml_init_params params { @@ -1418,8 +1448,24 @@ rpc_server::~rpc_server() { static void rpc_serve_client(ggml_backend_t backend, const char* cache_dir, sockfd_t sockfd, size_t free_mem, size_t total_mem) { rpc_server server(backend, cache_dir); + uint8_t cmd; + if (!recv_data(sockfd, &cmd, 1)) { + return; + } + // the first command sent by the client must be HELLO + if (cmd != RPC_CMD_HELLO) { + fprintf(stderr, "Expected HELLO command, update client\n"); + return; + } + if (!recv_msg(sockfd, nullptr, 0)) { + return; + } + rpc_msg_hello_rsp response; + server.hello(response); + if (!send_msg(sockfd, &response, sizeof(response))) { + return; + } while (true) { - uint8_t cmd; if (!recv_data(sockfd, &cmd, 1)) { break; } @@ -1429,6 +1475,10 @@ static void rpc_serve_client(ggml_backend_t backend, const char* cache_dir, break; } switch (cmd) { + case RPC_CMD_HELLO: { + // HELLO command is handled above + return; + } case RPC_CMD_ALLOC_BUFFER: { rpc_msg_alloc_buffer_req request; if (!recv_msg(sockfd, &request, sizeof(request))) { @@ -1447,9 +1497,7 @@ static void rpc_serve_client(ggml_backend_t backend, const char* cache_dir, return; } rpc_msg_get_alloc_size_rsp response; - if (!server.get_alloc_size(request, response)) { - return; - } + server.get_alloc_size(request, response); if (!send_msg(sockfd, &response, sizeof(response))) { return; } @@ -1477,6 +1525,20 @@ static void rpc_serve_client(ggml_backend_t backend, const char* cache_dir, } break; } + case RPC_CMD_BUFFER_GET_BASE: { + rpc_msg_buffer_get_base_req request; + if (!recv_msg(sockfd, &request, sizeof(request))) { + return; + } + rpc_msg_buffer_get_base_rsp response; + if (!server.buffer_get_base(request, response)) { + return; + } + if (!send_msg(sockfd, &response, sizeof(response))) { + return; + } + break; + } case RPC_CMD_FREE_BUFFER: { rpc_msg_free_buffer_req request; if (!recv_msg(sockfd, &request, sizeof(request))) { @@ -1602,6 +1664,7 @@ static void rpc_serve_client(ggml_backend_t backend, const char* cache_dir, } } + void ggml_backend_rpc_start_server(ggml_backend_t backend, const char* endpoint, const char* cache_dir, size_t free_mem, size_t total_mem) { From fab3bbda68a29805c0f9119234464b8db982c4cb Mon Sep 17 00:00:00 2001 From: firecoperana Date: Wed, 7 May 2025 23:01:44 -0500 Subject: [PATCH 16/20] bug fix --- ggml/src/ggml-backend.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ggml/src/ggml-backend.c b/ggml/src/ggml-backend.c index 44688c36a..e51cf8f98 100644 --- a/ggml/src/ggml-backend.c +++ b/ggml/src/ggml-backend.c @@ -948,7 +948,7 @@ GGML_CALL static ggml_backend_t ggml_backend_reg_cpu_init(const char * params, v } GGML_CALL static ggml_backend_t ggml_backend_reg_rpc_init(const char* params, void* user_data) { - return ggml_backend_rpc_init(); + return ggml_backend_rpc_init((const char*)user_data); GGML_UNUSED(params); GGML_UNUSED(user_data); From 551c6d977a61aded3a8f435b604f5a5f8c1d500c Mon Sep 17 00:00:00 2001 From: firecoperana Date: Thu, 8 May 2025 13:14:03 -0500 Subject: [PATCH 17/20] add rpc header --- ggml/src/ggml-backend.c | 1 + 1 file changed, 1 insertion(+) diff --git a/ggml/src/ggml-backend.c b/ggml/src/ggml-backend.c index e51cf8f98..5903c89c9 100644 --- a/ggml/src/ggml-backend.c +++ b/ggml/src/ggml-backend.c @@ -1,6 +1,7 @@ #include "ggml-backend-impl.h" #include "ggml-alloc.h" #include "ggml-impl.h" +#include "ggml-rpc.h" #include #include From ee1fc6c7217a41f857ff38dba8085052702ca573 Mon Sep 17 00:00:00 2001 From: firecoperana Date: Thu, 8 May 2025 17:30:06 -0500 Subject: [PATCH 18/20] fix bug for missing rpc names --- ggml/src/ggml-rpc.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ggml/src/ggml-rpc.cpp b/ggml/src/ggml-rpc.cpp index afa026758..238f2ef89 100644 --- a/ggml/src/ggml-rpc.cpp +++ b/ggml/src/ggml-rpc.cpp @@ -649,12 +649,13 @@ static ggml_backend_buffer_t ggml_backend_rpc_buffer_type_alloc_buffer(ggml_back rpc_msg_alloc_buffer_req request = { size }; rpc_msg_alloc_buffer_rsp response; auto sock = get_socket(buft_ctx->endpoint); + std::string name= "RPC[" + std::string(buft_ctx->endpoint) + "]"; bool status = send_rpc_cmd(sock, RPC_CMD_ALLOC_BUFFER, &request, sizeof(request), &response, sizeof(response)); GGML_ASSERT(status); if (response.remote_ptr != 0) { ggml_backend_buffer_t buffer = ggml_backend_buffer_init(buft, ggml_backend_rpc_buffer_interface, - new ggml_backend_rpc_buffer_context{ sock, nullptr, response.remote_ptr }, + new ggml_backend_rpc_buffer_context{ sock, nullptr, response.remote_ptr, name }, response.remote_size); return buffer; } @@ -858,7 +859,6 @@ GGML_API GGML_CALL ggml_backend_buffer_type_t ggml_backend_rpc_buffer_type(const /* .context = */ buft_ctx }; buft_map[endpoint] = buft; - //auto str1 = (&(buft->context))->name; return buft; } From f7ef15010420e5fabe7e829f60d6224323daf416 Mon Sep 17 00:00:00 2001 From: firecoperana Date: Thu, 15 May 2025 10:23:58 -0500 Subject: [PATCH 19/20] add tpc no delay for rpc --- examples/server/server.cpp | 2 ++ examples/server/utils.hpp | 1 - ggml/src/ggml-rpc.cpp | 1 - 3 files changed, 2 insertions(+), 2 deletions(-) diff --git a/examples/server/server.cpp b/examples/server/server.cpp index ca60ebbcb..11bb250eb 100644 --- a/examples/server/server.cpp +++ b/examples/server/server.cpp @@ -12,6 +12,8 @@ #endif // increase max payload length to allow use of larger context size #define CPPHTTPLIB_FORM_URL_ENCODED_PAYLOAD_MAX_LENGTH 1048576 +// disable Nagle's algorithm +#define CPPHTTPLIB_TCP_NODELAY true #include "httplib.h" // Change JSON_ASSERT from assert() to GGML_ASSERT: #define JSON_ASSERT GGML_ASSERT diff --git a/examples/server/utils.hpp b/examples/server/utils.hpp index e6a1f0697..54b3d4f11 100644 --- a/examples/server/utils.hpp +++ b/examples/server/utils.hpp @@ -6,7 +6,6 @@ // Change JSON_ASSERT from assert() to GGML_ASSERT: #define JSON_ASSERT GGML_ASSERT #include "json.hpp" - #include #include #include diff --git a/ggml/src/ggml-rpc.cpp b/ggml/src/ggml-rpc.cpp index 238f2ef89..3b5c8f46c 100644 --- a/ggml/src/ggml-rpc.cpp +++ b/ggml/src/ggml-rpc.cpp @@ -79,7 +79,6 @@ struct rpc_tensor { char padding[4]; }; -#pragma pack(pop) static_assert(sizeof(rpc_tensor) % 8 == 0, "rpc_tensor size must be multiple of 8"); static std::unordered_map rpc_server_map; From 277a2893b8978f29b6b2b7efb995dfb182a06ee4 Mon Sep 17 00:00:00 2001 From: firecoperana Date: Sat, 31 May 2025 18:57:15 -0500 Subject: [PATCH 20/20] add back webui --- examples/server/server.cpp | 35 +++++++++++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) diff --git a/examples/server/server.cpp b/examples/server/server.cpp index 11bb250eb..5d86fa2c5 100644 --- a/examples/server/server.cpp +++ b/examples/server/server.cpp @@ -20,6 +20,22 @@ #include "json.hpp" // auto generated files (update with ./deps.sh) +#include "colorthemes.css.hpp" +#include "style.css.hpp" +#include "theme-beeninorder.css.hpp" +#include "theme-ketivah.css.hpp" +#include "theme-mangotango.css.hpp" +#include "theme-playground.css.hpp" +#include "theme-polarnight.css.hpp" +#include "theme-snowstorm.css.hpp" +#include "index.html.hpp" +#include "index-new.html.hpp" +#include "index.js.hpp" +#include "completion.js.hpp" +#include "system-prompts.js.hpp" +#include "prompt-formats.js.hpp" +#include "json-schema-to-grammar.mjs.hpp" + #include #include #include @@ -3394,6 +3410,25 @@ int main(int argc, char ** argv) { svr->set_base_dir(params.public_path); } + // using embedded static files + svr->Get("/", handle_static_file(index_html, index_html_len, "text/html; charset=utf-8")); + svr->Get("/index.js", handle_static_file(index_js, index_js_len, "text/javascript; charset=utf-8")); + svr->Get("/completion.js", handle_static_file(completion_js, completion_js_len, "text/javascript; charset=utf-8")); + svr->Get("/json-schema-to-grammar.mjs", handle_static_file(json_schema_to_grammar_mjs, json_schema_to_grammar_mjs_len, "text/javascript; charset=utf-8")); + + // add new-ui files + svr->Get("/colorthemes.css", handle_static_file(colorthemes_css, colorthemes_css_len, "text/css; charset=utf-8")); + svr->Get("/style.css", handle_static_file(style_css, style_css_len, "text/css; charset=utf-8")); + svr->Get("/theme-beeninorder.css", handle_static_file(theme_beeninorder_css, theme_beeninorder_css_len, "text/css; charset=utf-8")); + svr->Get("/theme-ketivah.css", handle_static_file(theme_ketivah_css, theme_ketivah_css_len, "text/css; charset=utf-8")); + svr->Get("/theme-mangotango.css", handle_static_file(theme_mangotango_css, theme_mangotango_css_len, "text/css; charset=utf-8")); + svr->Get("/theme-playground.css", handle_static_file(theme_playground_css, theme_playground_css_len, "text/css; charset=utf-8")); + svr->Get("/theme-polarnight.css", handle_static_file(theme_polarnight_css, theme_polarnight_css_len, "text/css; charset=utf-8")); + svr->Get("/theme-snowstorm.css", handle_static_file(theme_snowstorm_css, theme_snowstorm_css_len, "text/css; charset=utf-8")); + svr->Get("/index-new.html", handle_static_file(index_new_html, index_new_html_len, "text/html; charset=utf-8")); + svr->Get("/system-prompts.js", handle_static_file(system_prompts_js, system_prompts_js_len, "text/javascript; charset=utf-8")); + svr->Get("/prompt-formats.js", handle_static_file(prompt_formats_js, prompt_formats_js_len, "text/javascript; charset=utf-8")); + // register API routes svr->Get ("/health", handle_health); svr->Get ("/metrics", handle_metrics);