Skip to content

Commit 46f1c50

Browse files
committed
Revert "Add vision support in llama-server (ikawrakow#901)"
This reverts commit 15159a8.
1 parent 320fc60 commit 46f1c50

26 files changed

+730
-2457
lines changed

common/CMakeLists.txt

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -57,6 +57,8 @@ add_library(${TARGET} STATIC
5757
chat-parser.cpp
5858
chat-parser.h
5959
common.cpp
60+
chat.h
61+
chat.cpp
6062
sampling.h
6163
sampling.cpp
6264
console.h

common/common.cpp

Lines changed: 2 additions & 39 deletions
Original file line numberDiff line numberDiff line change
@@ -270,14 +270,6 @@ static std::string parse_device_list(const std::string& value) {
270270
return value;
271271
}
272272

273-
274-
std::pair<long, std::vector<char>> common_remote_get_content(const std::string& url, const common_remote_params&) {
275-
if (!url.empty()) {
276-
throw std::runtime_error("error: built without CURL, cannot download file from the internet");
277-
}
278-
return {};
279-
}
280-
281273
//
282274
// CLI argument parsing
283275
//
@@ -1735,11 +1727,6 @@ bool gpt_params_find_arg(int argc, char ** argv, const std::string & arg, gpt_pa
17351727
params.n_junk = std::stoi(argv[i]);
17361728
return true;
17371729
}
1738-
if (arg == "--no-context-shift") {
1739-
CHECK_ARG
1740-
params.ctx_shift = false;
1741-
return true;
1742-
}
17431730
if (arg == "--pos") {
17441731
CHECK_ARG
17451732
params.i_pos = std::stoi(argv[i]);
@@ -2073,7 +2060,7 @@ void gpt_params_print_usage(int /*argc*/, char ** argv, const gpt_params & param
20732060
options.push_back({ "multi-modality" });
20742061
options.push_back({ "*", " --mmproj FILE", "path to a multimodal projector file for LLaVA. see examples/llava/README.md" });
20752062
options.push_back({ "*", " --image FILE", "path to an image file. use with multimodal models. Specify multiple times for batching" });
2076-
options.push_back({ "*", " --no-context-shift", "disable context-shift." });
2063+
20772064
options.push_back({ "backend" });
20782065
options.push_back({ "*", " --rpc SERVERS", "comma separated list of RPC servers" });
20792066

@@ -3324,29 +3311,6 @@ std::vector<llama_token> llama_tokenize(
33243311
return result;
33253312
}
33263313

3327-
std::vector<llama_token> llama_tokenize(
3328-
const struct llama_vocab* vocab,
3329-
const std::string& text,
3330-
bool add_special,
3331-
bool parse_special) {
3332-
// upper limit for the number of tokens
3333-
int n_tokens = text.length() + 2 * add_special;
3334-
std::vector<llama_token> result(n_tokens);
3335-
n_tokens = llama_vocab_tokenize(vocab, text.data(), text.length(), result.data(), result.size(), add_special, parse_special);
3336-
if (n_tokens == std::numeric_limits<int32_t>::min()) {
3337-
throw std::runtime_error("Tokenization failed: input text too large, tokenization result exceeds int32_t limit");
3338-
}
3339-
if (n_tokens < 0) {
3340-
result.resize(-n_tokens);
3341-
int check = llama_vocab_tokenize(vocab, text.data(), text.length(), result.data(), result.size(), add_special, parse_special);
3342-
GGML_ASSERT(check == -n_tokens);
3343-
}
3344-
else {
3345-
result.resize(n_tokens);
3346-
}
3347-
return result;
3348-
}
3349-
33503314
std::string llama_token_to_piece(const struct llama_context * ctx, llama_token token, bool special) {
33513315
std::string piece;
33523316
piece.resize(piece.capacity()); // using string internal cache, 15 bytes + '\n'
@@ -3379,7 +3343,7 @@ std::string llama_token_to_piece(const struct llama_model* model, llama_token to
33793343
return piece;
33803344
}
33813345

3382-
std::string llama_detokenize(const llama_context * ctx, const std::vector<llama_token> & tokens, bool special) {
3346+
std::string llama_detokenize(llama_context * ctx, const std::vector<llama_token> & tokens, bool special) {
33833347
std::string text;
33843348
text.resize(std::max(text.capacity(), tokens.size()));
33853349
int32_t n_chars = llama_detokenize(llama_get_model(ctx), tokens.data(), (int32_t)tokens.size(), &text[0], (int32_t)text.size(), false, special);
@@ -3395,7 +3359,6 @@ std::string llama_detokenize(const llama_context * ctx, const std::vector<llama_
33953359
return text;
33963360
}
33973361

3398-
33993362
bool llama_should_add_bos_token(const llama_model * model) {
34003363
const int add_bos = llama_add_bos_token(model);
34013364

common/common.h

Lines changed: 58 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -53,8 +53,6 @@ struct llama_lora_adapter_container : llama_lora_adapter_info {
5353
struct llama_lora_adapter * adapter;
5454
};
5555

56-
using llama_tokens = std::vector<llama_token>;
57-
5856
// build info
5957
extern int LLAMA_BUILD_NUMBER;
6058
extern char const * LLAMA_COMMIT;
@@ -239,7 +237,7 @@ struct gpt_params {
239237
bool conversation = false; // conversation mode (does not print special tokens and suffix/prefix)
240238
bool prompt_cache_all = false; // save user input and generations to prompt cache
241239
bool prompt_cache_ro = false; // open the prompt cache read-only and do not update it
242-
bool ctx_shift = true;
240+
243241
bool escape = true; // escape "\n", "\r", "\t", "\'", "\"", and "\\"
244242
bool multiline_input = false; // reverse the usage of `\`
245243
bool simple_io = false; // improves compatibility with subprocesses and limited consoles
@@ -373,9 +371,6 @@ struct gpt_params {
373371
bool sweep_bench_output_jsonl = false;
374372
};
375373

376-
377-
378-
void gpt_params_handle_hf_token(gpt_params & params);
379374
void gpt_params_parse_from_env(gpt_params & params);
380375
void gpt_params_handle_model_default(gpt_params & params);
381376

@@ -386,15 +381,6 @@ void gpt_params_print_usage(int argc, char ** argv, const gpt_params & params);
386381

387382
std::string gpt_params_get_system_info(const gpt_params & params);
388383

389-
390-
struct common_remote_params {
391-
std::vector<std::string> headers;
392-
long timeout = 0; // CURLOPT_TIMEOUT, in seconds ; 0 means no timeout
393-
long max_size = 0; // max size of the response ; unlimited if 0 ; max is 2GB
394-
};
395-
// get remote file content, returns <http_code, raw_response_body>
396-
std::pair<long, std::vector<char>> common_remote_get_content(const std::string& url, const common_remote_params& params);
397-
398384
//
399385
// String utils
400386
//
@@ -511,12 +497,6 @@ std::vector<llama_token> llama_tokenize(
511497
bool add_special,
512498
bool parse_special = false);
513499

514-
std::vector<llama_token> llama_tokenize(
515-
const struct llama_vocab* vocab,
516-
const std::string& text,
517-
bool add_special,
518-
bool parse_special = false);
519-
520500
// tokenizes a token into a piece, optionally renders special/control tokens
521501
// should work similar to Python's `tokenizer.id_to_piece`
522502
std::string llama_token_to_piece(
@@ -533,16 +513,70 @@ std::string llama_token_to_piece(
533513
// should work similar to Python's `tokenizer.decode`
534514
// optionally renders special/control tokens
535515
std::string llama_detokenize(
536-
const llama_context * ctx,
516+
llama_context * ctx,
537517
const std::vector<llama_token> & tokens,
538518
bool special = true);
539519

540-
541520
// Uses the value from the model metadata if possible, otherwise
542521
// defaults to true when model type is SPM, otherwise false.
543522
bool llama_should_add_bos_token(const llama_model * model);
544523

545-
524+
//
525+
// Chat template utils
526+
//
527+
//struct common_tool_call {
528+
// std::string name;
529+
// std::string arguments;
530+
// std::string id;
531+
//};
532+
//
533+
//// same with llama_chat_message, but uses std::string
534+
//struct common_chat_msg {
535+
// std::string role;
536+
// std::string content;
537+
// std::vector<common_tool_call> tool_calls;
538+
// std::string reasoning_content = "";
539+
//};
540+
541+
//// Check if the template supplied via "--chat-template" is supported or not. Returns true if it's valid
542+
//bool llama_chat_verify_template(const struct llama_model* , const std::string& tmpl, bool use_jinja);
543+
//
544+
//namespace minja {
545+
// class chat_template;
546+
//}
547+
//
548+
//typedef minja::chat_template common_chat_template;
549+
//
550+
//struct common_chat_templates {
551+
// bool has_explicit_template; // Model had builtin template or template overridde was specified.
552+
// std::unique_ptr<common_chat_template> template_default; // always set (defaults to chatml)
553+
// std::unique_ptr<common_chat_template> template_tool_use;
554+
//};
555+
//
556+
//
557+
//// CPP wrapper for llama_chat_apply_template
558+
//// If the built-in template is not supported, we default to chatml
559+
//// If the custom "tmpl" is not supported, we throw an error
560+
//std::string llama_chat_apply_template(
561+
// const struct llama_model* model,
562+
// const common_chat_template& tmpl,
563+
// const std::vector< common_chat_msg>& chat,
564+
// bool add_ass,
565+
// bool use_jinja);
566+
//
567+
//// Format single message, while taking into account the position of that message in chat history
568+
//std::string llama_chat_format_single(const struct llama_model* model,
569+
// const common_chat_template& tmpl,
570+
// const std::vector< common_chat_msg>& past_msg,
571+
// const common_chat_msg& new_msg,
572+
// bool add_ass,
573+
// bool use_jinja);
574+
//
575+
//// Returns an example of formatted chat
576+
//std::string llama_chat_format_example(const struct llama_model* model,
577+
// const common_chat_template& tmpl, bool use_jinja);
578+
//
579+
//common_chat_templates llama_chat_templates_from_model(const struct llama_model* model, const std::string& chat_template_override);
546580

547581

548582
//

examples/mtmd/clip.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3331,7 +3331,7 @@ struct image_manipulation {
33313331
dst.buf.resize(3 * target_width * target_height);
33323332

33333333
float Cc;
3334-
float C[5] = {};
3334+
float C[5];
33353335
float d0, d2, d3, a0, a1, a2, a3;
33363336
int i, j, k, jj;
33373337
int x, y;

examples/server/CMakeLists.txt

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -70,9 +70,6 @@ endif()
7070
target_include_directories(${TARGET} PRIVATE ${CMAKE_SOURCE_DIR})
7171
target_link_libraries(${TARGET} PRIVATE common ${CMAKE_THREAD_LIBS_INIT})
7272

73-
target_include_directories(${TARGET} PRIVATE ../mtmd)
74-
target_link_libraries(${TARGET} PRIVATE common mtmd ${CMAKE_THREAD_LIBS_INIT})
75-
7673
if (LLAMA_SERVER_SSL)
7774
find_package(OpenSSL REQUIRED)
7875
target_link_libraries(${TARGET} PRIVATE OpenSSL::SSL OpenSSL::Crypto)
-572 KB
Binary file not shown.

0 commit comments

Comments
 (0)