|
40 | 40 | using json = nlohmann::ordered_json; |
41 | 41 |
|
42 | 42 | std::initializer_list<enum llama_example> mmproj_examples = { |
43 | | - LLAMA_EXAMPLE_LLAVA, |
| 43 | + LLAMA_EXAMPLE_MTMD, |
44 | 44 | LLAMA_EXAMPLE_SERVER, |
45 | 45 | }; |
46 | 46 |
|
@@ -2234,12 +2234,12 @@ common_params_context common_params_parser_init(common_params & params, llama_ex |
2234 | 2234 | } |
2235 | 2235 | ).set_examples(mmproj_examples).set_env("LLAMA_ARG_NO_MMPROJ_OFFLOAD")); |
2236 | 2236 | add_opt(common_arg( |
2237 | | - {"--image"}, "FILE", |
2238 | | - "path to an image file. use with multimodal models. Specify multiple times for batching", |
| 2237 | + {"--image", "--audio"}, "FILE", |
| 2238 | + "path to an image or audio file. use with multimodal models, can be repeated if you have multiple files\n", |
2239 | 2239 | [](common_params & params, const std::string & value) { |
2240 | 2240 | params.image.emplace_back(value); |
2241 | 2241 | } |
2242 | | - ).set_examples({LLAMA_EXAMPLE_LLAVA})); |
| 2242 | + ).set_examples({LLAMA_EXAMPLE_MTMD})); |
2243 | 2243 | if (llama_supports_rpc()) { |
2244 | 2244 | add_opt(common_arg( |
2245 | 2245 | {"--rpc"}, "SERVERS", |
@@ -2869,7 +2869,7 @@ common_params_context common_params_parser_init(common_params & params, llama_ex |
2869 | 2869 | [](common_params & params, const std::string & value) { |
2870 | 2870 | params.chat_template = value; |
2871 | 2871 | } |
2872 | | - ).set_examples({LLAMA_EXAMPLE_MAIN, LLAMA_EXAMPLE_SERVER, LLAMA_EXAMPLE_LLAVA}).set_env("LLAMA_ARG_CHAT_TEMPLATE")); |
| 2872 | + ).set_examples({LLAMA_EXAMPLE_MAIN, LLAMA_EXAMPLE_SERVER, LLAMA_EXAMPLE_MTMD}).set_env("LLAMA_ARG_CHAT_TEMPLATE")); |
2873 | 2873 | add_opt(common_arg( |
2874 | 2874 | {"--chat-template-file"}, "JINJA_TEMPLATE_FILE", |
2875 | 2875 | string_format( |
|
0 commit comments