From e77b49cd56313e90c338fd0a6258002655c017a2 Mon Sep 17 00:00:00 2001 From: Wes Higbee Date: Fri, 3 Oct 2025 00:28:02 -0500 Subject: [PATCH] claude code generated fish completion support for me --- common/arg.cpp | 155 ++++++++++++++++++++++++++++++++++++++++++++++++ common/common.h | 3 +- 2 files changed, 157 insertions(+), 1 deletion(-) diff --git a/common/arg.cpp b/common/arg.cpp index cbca8b5ac5abb..f25bc11a70a80 100644 --- a/common/arg.cpp +++ b/common/arg.cpp @@ -1585,6 +1585,150 @@ static void common_params_print_completion(common_params_context & ctx_arg) { } } +static void common_params_print_completion_fish(common_params_context & ctx_arg) { + std::vector common_options; + std::vector sparam_options; + std::vector specific_options; + + for (auto & opt : ctx_arg.options) { + if (opt.is_sparam) { + sparam_options.push_back(&opt); + } else if (opt.in_example(ctx_arg.ex)) { + specific_options.push_back(&opt); + } else { + common_options.push_back(&opt); + } + } + + std::set executables = { + "llama-batched", + "llama-batched-bench", + "llama-bench", + "llama-cli", + "llama-convert-llama2c-to-ggml", + "llama-cvector-generator", + "llama-embedding", + "llama-eval-callback", + "llama-export-lora", + "llama-gen-docs", + "llama-gguf", + "llama-gguf-hash", + "llama-gguf-split", + "llama-gritlm", + "llama-imatrix", + "llama-infill", + "llama-mtmd-cli", + "llama-llava-clip-quantize-cli", + "llama-lookahead", + "llama-lookup", + "llama-lookup-create", + "llama-lookup-merge", + "llama-lookup-stats", + "llama-parallel", + "llama-passkey", + "llama-perplexity", + "llama-q8dot", + "llama-quantize", + "llama-qwen2vl-cli", + "llama-retrieval", + "llama-run", + "llama-save-load-state", + "llama-server", + "llama-simple", + "llama-simple-chat", + "llama-speculative", + "llama-speculative-simple", + "llama-tokenize", + "llama-tts", + "llama-vdot" + }; + + // Helper to escape fish strings + auto escape_fish = [](const std::string & str) { + std::string result; + for (char c : str) { + if (c == '\'' || c == '\\' || c == '\n') { + result += '\\'; + } + result += c; + } + return result; + }; + + // Helper to print fish completions for options + auto print_fish_option = [&](const common_arg * opt) { + std::string short_opt, long_opt; + + for (const char * arg : opt->args) { + std::string arg_str(arg); + if (arg_str.size() == 2 && arg_str[0] == '-' && arg_str[1] != '-') { + short_opt = arg_str.substr(1); + } else if (arg_str.size() > 2 && arg_str[0] == '-' && arg_str[1] == '-') { + long_opt = arg_str.substr(2); + } + } + + // Extract description (first line only, remove env info) + std::string desc = opt->help; + size_t newline_pos = desc.find('\n'); + if (newline_pos != std::string::npos) { + desc = desc.substr(0, newline_pos); + } + desc = escape_fish(desc); + + // Determine if option takes an argument + bool has_value = opt->value_hint != nullptr || opt->handler_string != nullptr || + opt->handler_int != nullptr || opt->handler_str_str != nullptr; + + for (const auto& exe : executables) { + printf("complete -c %s", exe.c_str()); + + if (!short_opt.empty()) { + printf(" -s %s", short_opt.c_str()); + } + if (!long_opt.empty()) { + printf(" -l %s", long_opt.c_str()); + } + if (!desc.empty()) { + printf(" -d '%s'", desc.c_str()); + } + if (has_value) { + printf(" -r"); + // Add specific file completions + if (long_opt == "model" || short_opt == "m") { + printf("F"); // disable default file completion, we'll add custom + } + } else { + printf(" -f"); // no file completion for flags + } + printf("\n"); + } + }; + + // Print header comment + printf("# Fish shell completions for llama.cpp\n"); + printf("# Generated by --completion-fish\n\n"); + + // Print all options + for (const common_arg * opt : common_options) { + print_fish_option(opt); + } + for (const common_arg * opt : sparam_options) { + print_fish_option(opt); + } + for (const common_arg * opt : specific_options) { + print_fish_option(opt); + } + + // Add custom file type completions + for (const auto& exe : executables) { + printf("\n# Custom file completions\n"); + printf("complete -c %s -s m -l model -rF -a '(for file in *.gguf; echo $file; end)'\n", exe.c_str()); + printf("complete -c %s -l grammar-file -rF -a '(for file in *.gbnf; echo $file; end)'\n", exe.c_str()); + printf("complete -c %s -l chat-template-file -rF -a '(for file in *.jinja; echo $file; end)'\n", exe.c_str()); + } +} + static std::vector parse_device_list(const std::string & value) { std::vector devices; auto dev_names = string_split(value, ','); @@ -1650,6 +1794,10 @@ bool common_params_parse(int argc, char ** argv, common_params & params, llama_e common_params_print_completion(ctx_arg); exit(0); } + if (ctx_arg.params.completion_fish) { + common_params_print_completion_fish(ctx_arg); + exit(0); + } params.lr.init(); } catch (const std::invalid_argument & ex) { fprintf(stderr, "%s\n", ex.what()); @@ -1741,6 +1889,13 @@ common_params_context common_params_parser_init(common_params & params, llama_ex params.completion = true; } )); + add_opt(common_arg( + {"--completion-fish"}, + "print source-able fish completion script for llama.cpp", + [](common_params & params) { + params.completion_fish = true; + } + )); add_opt(common_arg( {"--verbose-prompt"}, string_format("print a verbose prompt before generation (default: %s)", params.verbose_prompt ? "true" : "false"), diff --git a/common/common.h b/common/common.h index 40c6847f32ddb..221dc549099af 100644 --- a/common/common.h +++ b/common/common.h @@ -365,7 +365,8 @@ struct common_params { bool kl_divergence = false; // compute KL divergence bool usage = false; // print usage - bool completion = false; // print source-able completion script + bool completion = false; // print source-able bash completion script + bool completion_fish = false; // print source-able fish completion script bool use_color = false; // use color to distinguish generations and inputs bool special = false; // enable special token output bool interactive = false; // interactive mode