@@ -114,6 +114,18 @@ type OllamaErrorResponse = {
114114 error : string ;
115115} ;
116116
117+ type N8nChatReponse = {
118+ type : string ;
119+ content ?: string ;
120+ metadata : {
121+ nodeId : string ;
122+ nodeName : string ;
123+ itemIndex : number ;
124+ runIndex : number ;
125+ timestamps : number ;
126+ } ;
127+ } ;
128+
117129type OllamaRawResponse =
118130 | OllamaErrorResponse
119131 | ( OllamaBaseResponse & {
@@ -124,7 +136,8 @@ type OllamaChatResponse =
124136 | OllamaErrorResponse
125137 | ( OllamaBaseResponse & {
126138 message : OllamaChatMessage ;
127- } ) ;
139+ } )
140+ | N8nChatReponse ;
128141
129142interface OllamaTool {
130143 type : "function" ;
@@ -146,6 +159,7 @@ class Ollama extends BaseLLM implements ModelInstaller {
146159 private static modelsBeingInstalled : Set < string > = new Set ( ) ;
147160 private static modelsBeingInstalledMutex = new Mutex ( ) ;
148161
162+ private static _isThinking : boolean = false ;
149163 private fimSupported : boolean = false ;
150164 constructor ( options : LLMOptions ) {
151165 super ( options ) ;
@@ -388,6 +402,15 @@ class Ollama extends BaseLLM implements ModelInstaller {
388402 }
389403 }
390404
405+ static GetIsThinking ( ) : boolean {
406+ return this . _isThinking ;
407+ }
408+ static SetIsThinking ( newValue : boolean ) : void {
409+ if ( this . _isThinking !== newValue ) {
410+ this . _isThinking = newValue ;
411+ }
412+ }
413+
391414 protected async * _streamChat (
392415 messages : ChatMessage [ ] ,
393416 signal : AbortSignal ,
@@ -433,6 +456,39 @@ class Ollama extends BaseLLM implements ModelInstaller {
433456 throw new Error ( res . error ) ;
434457 }
435458
459+ if ( "type" in res ) {
460+ const { content } = res ;
461+
462+ if ( content === "<think>" ) {
463+ Ollama . SetIsThinking ( true ) ;
464+ }
465+
466+ if ( Ollama . GetIsThinking ( ) && content ) {
467+ const thinkingMessage : ThinkingChatMessage = {
468+ role : "thinking" ,
469+ content : content ,
470+ } ;
471+
472+ if ( thinkingMessage ) {
473+ if ( content === "</think>" ) {
474+ Ollama . SetIsThinking ( false ) ;
475+ }
476+ // When Streaming you can't have both thinking and content
477+ console . log ( "THINKING TOKEN:" , thinkingMessage . content ) ;
478+ return [ thinkingMessage ] ;
479+ }
480+ }
481+
482+ if ( content ) {
483+ const chatMessage : ChatMessage = {
484+ role : "assistant" ,
485+ content : content ,
486+ } ;
487+ return [ chatMessage ] ;
488+ }
489+ return [ ] ;
490+ }
491+
436492 const { role, content, thinking, tool_calls : toolCalls } = res . message ;
437493
438494 if ( role === "tool" ) {
0 commit comments