- 
                Notifications
    You must be signed in to change notification settings 
- Fork 77
llama.vim: filter server response fields #24
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from 4 commits
aa91f6a
              c57ac19
              5bce411
              32169e8
              fc6f90d
              79ee0d1
              4185ae4
              File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change | 
|---|---|---|
|  | @@ -308,8 +308,8 @@ function! s:ring_update() | |
| \ 'cache_prompt': v:true, | ||
| \ 't_max_prompt_ms': 1, | ||
| \ 't_max_predict_ms': 1 | ||
| \ 'response_fields': [""] | ||
| \ }) | ||
|  | ||
| let l:curl_command = [ | ||
| \ "curl", | ||
| \ "--silent", | ||
|  | @@ -420,7 +420,21 @@ function! llama#fim(is_auto, cache) abort | |
| \ 'samplers': ["top_k", "top_p", "infill"], | ||
| \ 'cache_prompt': v:true, | ||
| \ 't_max_prompt_ms': g:llama_config.t_max_prompt_ms, | ||
| \ 't_max_predict_ms': g:llama_config.t_max_predict_ms | ||
| \ 't_max_predict_ms': g:llama_config.t_max_predict_ms, | ||
| \ 'response_fields': [ | ||
| \ "content", | ||
| \ "timings/prompt_n", | ||
| \ "timings/prompt_ms", | ||
| \ "timings/prompt_per_token_ms", | ||
| \ "timings/prompt_per_second", | ||
| \ "timings/predicted_n", | ||
| \ "timings/predicted_ms", | ||
| \ "timings/predicted_per_token_ms", | ||
| \ "timings/predicted_per_second", | ||
| \ "truncated", | ||
| \ "tokens_cached", | ||
| \ "generation_settings/n_ctx", | ||
| \ ], | ||
| \ }) | ||
|  | ||
| let l:curl_command = [ | ||
|  | @@ -662,24 +676,22 @@ function! s:fim_on_stdout(hash, cache, pos_x, pos_y, is_auto, job_id, data, even | |
| call remove(s:content, -1) | ||
| endwhile | ||
|  | ||
| let l:generation_settings = get(l:response, 'generation_settings', {}) | ||
| let l:n_ctx = get(l:generation_settings, 'n_ctx', 0) | ||
|  | ||
| let l:n_cached = get(l:response, 'tokens_cached', 0) | ||
| let l:truncated = get(l:response, 'truncated', v:false) | ||
| let l:n_ctx = get(l:response, 'generation_settings/n_ctx', 0) | ||
|          | ||
| let l:n_cached = get(l:response, 'timings/tokens_cached', 0) | ||
| let l:truncated = get(l:response, 'timings/truncated', v:false) | ||
|  | ||
| " if response.timings is available | ||
| if len(get(l:response, 'timings', {})) > 0 | ||
| if has_key(l:response, 'timings/prompt_n') && has_key(l:response, 'timings/prompt_ms') && has_key(l:response, 'timings/prompt_per_second') | ||
| \ && has_key(l:response, 'timings/predicted_n') && has_key(l:response, 'timings/predicted_ms') && has_key(l:response, 'timings/predicted_per_second') | ||
| let l:has_info = v:true | ||
| let l:timings = get(l:response, 'timings', {}) | ||
|  | ||
| let l:n_prompt = get(l:timings, 'prompt_n', 0) | ||
| let l:t_prompt_ms = get(l:timings, 'prompt_ms', 1) | ||
| let l:s_prompt = get(l:timings, 'prompt_per_second', 0) | ||
| let l:n_prompt = get(l:response, 'timings/prompt_n', 0) | ||
| let l:t_prompt_ms = get(l:response, 'timings/prompt_ms', 1) | ||
| let l:s_prompt = get(l:response, 'timings/prompt_per_second', 0) | ||
|  | ||
| let l:n_predict = get(l:timings, 'predicted_n', 0) | ||
| let l:t_predict_ms = get(l:timings, 'predicted_ms', 1) | ||
| let l:s_predict = get(l:timings, 'predicted_per_second', 0) | ||
| let l:n_predict = get(l:response, 'timings/predicted_n', 0) | ||
| let l:t_predict_ms = get(l:response, 'timings/predicted_ms', 1) | ||
| let l:s_predict = get(l:response, 'timings/predicted_per_second', 0) | ||
| endif | ||
|  | ||
| " if response was pulled from cache | ||
|  | ||

Uh oh!
There was an error while loading. Please reload this page.