|
39 | 39 | using json = nlohmann::ordered_json; |
40 | 40 |
|
41 | 41 | std::initializer_list<enum llama_example> mmproj_examples = { |
42 | | - LLAMA_EXAMPLE_LLAVA, |
| 42 | + LLAMA_EXAMPLE_MTMD, |
43 | 43 | LLAMA_EXAMPLE_SERVER, |
44 | 44 | }; |
45 | 45 |
|
@@ -2233,12 +2233,12 @@ common_params_context common_params_parser_init(common_params & params, llama_ex |
2233 | 2233 | } |
2234 | 2234 | ).set_examples(mmproj_examples).set_env("LLAMA_ARG_NO_MMPROJ_OFFLOAD")); |
2235 | 2235 | add_opt(common_arg( |
2236 | | - {"--image"}, "FILE", |
2237 | | - "path to an image file. use with multimodal models. Specify multiple times for batching", |
| 2236 | + {"--image", "--audio"}, "FILE", |
| 2237 | + "path to an image or audio file. use with multimodal models, can be repeated if you have multiple files\n", |
2238 | 2238 | [](common_params & params, const std::string & value) { |
2239 | 2239 | params.image.emplace_back(value); |
2240 | 2240 | } |
2241 | | - ).set_examples({LLAMA_EXAMPLE_LLAVA})); |
| 2241 | + ).set_examples({LLAMA_EXAMPLE_MTMD})); |
2242 | 2242 | if (llama_supports_rpc()) { |
2243 | 2243 | add_opt(common_arg( |
2244 | 2244 | {"--rpc"}, "SERVERS", |
@@ -2868,7 +2868,7 @@ common_params_context common_params_parser_init(common_params & params, llama_ex |
2868 | 2868 | [](common_params & params, const std::string & value) { |
2869 | 2869 | params.chat_template = value; |
2870 | 2870 | } |
2871 | | - ).set_examples({LLAMA_EXAMPLE_MAIN, LLAMA_EXAMPLE_SERVER, LLAMA_EXAMPLE_LLAVA}).set_env("LLAMA_ARG_CHAT_TEMPLATE")); |
| 2871 | + ).set_examples({LLAMA_EXAMPLE_MAIN, LLAMA_EXAMPLE_SERVER, LLAMA_EXAMPLE_MTMD}).set_env("LLAMA_ARG_CHAT_TEMPLATE")); |
2872 | 2872 | add_opt(common_arg( |
2873 | 2873 | {"--chat-template-file"}, "JINJA_TEMPLATE_FILE", |
2874 | 2874 | string_format( |
|
0 commit comments