@@ -100,6 +100,103 @@ describe("NativeOllamaHandler", () => {
100100 expect ( results . some ( ( r ) => r . type === "reasoning" ) ) . toBe ( true )
101101 expect ( results . some ( ( r ) => r . type === "text" ) ) . toBe ( true )
102102 } )
103+
104+ describe ( "context window configuration" , ( ) => {
105+ it ( "should use custom context window when ollamaContextWindow is provided" , async ( ) => {
106+ const customContextWindow = 48000
107+ const optionsWithCustomContext : ApiHandlerOptions = {
108+ apiModelId : "llama2" ,
109+ ollamaModelId : "llama2" ,
110+ ollamaBaseUrl : "http://localhost:11434" ,
111+ ollamaContextWindow : customContextWindow ,
112+ }
113+ handler = new NativeOllamaHandler ( optionsWithCustomContext )
114+
115+ // Mock the chat response
116+ mockChat . mockImplementation ( async function * ( ) {
117+ yield {
118+ message : { content : "Test response" } ,
119+ eval_count : 10 ,
120+ prompt_eval_count : 5 ,
121+ }
122+ } )
123+
124+ // Create a message to trigger the chat call
125+ const generator = handler . createMessage ( "System prompt" , [ { role : "user" , content : "Test message" } ] )
126+
127+ // Consume the generator
128+ const results = [ ]
129+ for await ( const chunk of generator ) {
130+ results . push ( chunk )
131+ }
132+
133+ // Verify that chat was called with the custom context window
134+ expect ( mockChat ) . toHaveBeenCalledWith (
135+ expect . objectContaining ( {
136+ options : expect . objectContaining ( {
137+ num_ctx : customContextWindow ,
138+ } ) ,
139+ } ) ,
140+ )
141+ } )
142+
143+ it ( "should use model's default context window when ollamaContextWindow is not provided" , async ( ) => {
144+ // Mock the chat response
145+ mockChat . mockImplementation ( async function * ( ) {
146+ yield {
147+ message : { content : "Test response" } ,
148+ eval_count : 10 ,
149+ prompt_eval_count : 5 ,
150+ }
151+ } )
152+
153+ // Create a message to trigger the chat call
154+ const generator = handler . createMessage ( "System prompt" , [ { role : "user" , content : "Test message" } ] )
155+
156+ // Consume the generator
157+ const results = [ ]
158+ for await ( const chunk of generator ) {
159+ results . push ( chunk )
160+ }
161+
162+ // Verify that chat was called with the model's default context window (4096)
163+ expect ( mockChat ) . toHaveBeenCalledWith (
164+ expect . objectContaining ( {
165+ options : expect . objectContaining ( {
166+ num_ctx : 4096 ,
167+ } ) ,
168+ } ) ,
169+ )
170+ } )
171+
172+ it ( "should use custom context window in completePrompt method" , async ( ) => {
173+ const customContextWindow = 48000
174+ const optionsWithCustomContext : ApiHandlerOptions = {
175+ apiModelId : "llama2" ,
176+ ollamaModelId : "llama2" ,
177+ ollamaBaseUrl : "http://localhost:11434" ,
178+ ollamaContextWindow : customContextWindow ,
179+ }
180+ handler = new NativeOllamaHandler ( optionsWithCustomContext )
181+
182+ // Mock the chat response
183+ mockChat . mockResolvedValue ( {
184+ message : { content : "Test response" } ,
185+ } )
186+
187+ // Call completePrompt
188+ await handler . completePrompt ( "Test prompt" )
189+
190+ // Verify that chat was called with the custom context window
191+ expect ( mockChat ) . toHaveBeenCalledWith (
192+ expect . objectContaining ( {
193+ options : expect . objectContaining ( {
194+ num_ctx : customContextWindow ,
195+ } ) ,
196+ } ) ,
197+ )
198+ } )
199+ } )
103200 } )
104201
105202 describe ( "completePrompt" , ( ) => {
@@ -115,6 +212,7 @@ describe("NativeOllamaHandler", () => {
115212 messages : [ { role : "user" , content : "Tell me a joke" } ] ,
116213 stream : false ,
117214 options : {
215+ num_ctx : 4096 ,
118216 temperature : 0 ,
119217 } ,
120218 } )
0 commit comments