@@ -671,3 +671,34 @@ await new Promise(resolve => setTimeout(resolve, 1500));
671671const cachedCompletion = completionEngine .complete (" Hi there! How" );
672672console .log (" Cached completion:" , cachedCompletion );
673673```
674+
675+ ## Response Prefix {#response-prefix}
676+ You can force the model response to start with a specific prefix,
677+ to make the model follow a certain direction in its response.
678+
679+ ``` typescript
680+ import {fileURLToPath } from " url" ;
681+ import path from " path" ;
682+ import {getLlama , LlamaChatSession , GeneralChatWrapper } from " node-llama-cpp" ;
683+
684+ const __dirname = path .dirname (fileURLToPath (import .meta .url ));
685+
686+ const llama = await getLlama ();
687+ const model = await llama .loadModel ({
688+ modelPath: path .join (__dirname , " models" , " Meta-Llama-3.1-8B-Instruct.Q4_K_M.gguf" )
689+ });
690+ const context = await model .createContext ();
691+ const session = new LlamaChatSession ({
692+ contextSequence: context .getSequence (),
693+ chatWrapper: new GeneralChatWrapper ()
694+ });
695+
696+
697+ const q1 = " Hi there, how are you?" ;
698+ console .log (" User: " + q1 );
699+
700+ const a1 = await session .prompt (q1 , {
701+ responsePrefix: " The weather today is"
702+ });
703+ console .log (" AI: " + a1 );
704+ ```
0 commit comments