@@ -445,32 +445,33 @@ function! llama#fim(is_auto, cache) abort
445445    endif 
446446    let  s: job_error=  0 
447447
448-     "  Construct hash from prefix, prompt, and suffix 
449-     let  l: request_context=  l: prefixl: promptl: suffix
448+     "  Construct hash from prefix, prompt, and suffix with separators  
449+     let  l: request_context=  l: prefix' Î '  .  l: prompt .  ' Î ' l: suffix
450450    let  l: hash=  sha256 (l: request_context
451451
452452    if  a: cache
453453        "  Check if the completion is cached 
454-         let  l: cached_completion=  get (g: result_cachel: hash  , v: null
454+         let  l: cached_completion=  get (g: result_cachel: hashv: null
455455
456456        "  ... or if there is a cached completion nearby (10 characters behind) 
457457        "  Looks at the previous 10 characters to see if a completion is cached. If one is found at (x,y) 
458458        "  then it checks that the characters typed after (x,y) match up with the cached completion result. 
459459        if  l: cached_completion==  v: null
460-             let  l: past_text=  l: prefixl: prompt
460+             let  l: past_text=  l: prefix' Î '  .  l: prompt
461461            for  i  in  range (10 )
462-                 let  l: hash_txt=  l: past_text- (2 + i )] . l: suffix
462+                 let  l: removed_section=  l: past_text- (1  +  i ):]
463+                 let  l: hash_txt=  l: past_text- (2  +  i )] . ' Î' l: suffix
463464                let  l: temp_hash=  sha256 (l: hash_txt
464465                if  has_key (g: result_cachel: temp_hash
465466                    let  l: temp_cached_completion=  get (g: result_cachel: temp_hash
466-                     if    l: temp_cached_completion==  " " 
467+                     if  l: temp_cached_completion==  " " 
467468                        break 
468469                    endif 
469470                    let  l: response=  json_decode (l: temp_cached_completion
470-                     if  l: response' content' 0 : len ( l: past_text [ - ( 1 + i ):]) -1 ]  !=#  l: past_text [ - ( 1 + i ):] 
471+                     if  l: response' content' 0 :i ]  !=#  l: removed_section 
471472                        break 
472473                    endif 
473-                     let  l: response' content'   =  l: response' content' i + 1 :]
474+                     let  l: response' content' =  l: response' content' i   +   1 :]
474475                    let  g: result_cachel: hash=  json_encode (l: response
475476                    let  l: cached_completion=  g: result_cachel: hash
476477                    break 
@@ -594,18 +595,8 @@ endfunction
594595
595596"  callback that processes the FIM result from the server and displays the suggestion
596597function !  s: fim_on_stdoutevent  =  v: null
597-     "  make sure cursor position hasn't changed since fim_on_stdout was triggered 
598-     if  a: pos_x!=  col (' .' -  1  ||  a: pos_y!=  line (' .' 
599-         return 
600-     endif 
601- 
602-     "  show the suggestion only in insert mode 
603-     if  mode () !=#  ' i' 
604-         return 
605-     endif 
606- 
607-     "  Retrieve the FIM result from cache 
608598    if  a: cache&&  has_key (g: result_cachea: hash
599+         "  retrieve the FIM result from cache 
609600        let  l: raw=  get (g: result_cachea: hash
610601        let  l: is_cached=  v: true
611602    else 
@@ -617,21 +608,33 @@ function! s:fim_on_stdout(hash, cache, pos_x, pos_y, is_auto, job_id, data, even
617608        let  l: is_cached=  v: false
618609    endif 
619610
620-     "  TODO:  this does not seem to work as expected, so disabling for now 
621-     " if s:job_error || len(l:raw) == 0 
622-     "     let l:raw = json_encode({'content': '  llama.vim : cannot reach llama.cpp server. (:help llama)'}) 
623- 
624-     "     let s:can_accept = v:false 
625-     " endif 
626- 
611+     "  ignore empty results 
627612    if  len (l: raw==  0 
628613        return 
629614    endif 
630615
616+     "  save the FIM result to the cache 
631617    if  ! l: is_cached
632618        call  s: insert_cachea: hashl: raw
633619    endif 
634620
621+     "  make sure cursor position hasn't changed since fim_on_stdout was triggered 
622+     if  a: pos_x!=  col (' .' -  1  ||  a: pos_y!=  line (' .' 
623+         return 
624+     endif 
625+ 
626+     "  show the suggestion only in insert mode 
627+     if  mode () !=#  ' i' 
628+         return 
629+     endif 
630+ 
631+     "  TODO:  this does not seem to work as expected, so disabling for now 
632+     " if s:job_error || len(l:raw) == 0 
633+     "     let l:raw = json_encode({'content': '  llama.vim : cannot reach llama.cpp server. (:help llama)'}) 
634+ 
635+     "     let s:can_accept = v:false 
636+     " endif 
637+ 
635638    let  s: pos_x=  a: pos_x
636639    let  s: pos_y=  a: pos_y
637640
0 commit comments