diff --git a/model/openai.go b/model/openai.go index ce63361..acc3be0 100644 --- a/model/openai.go +++ b/model/openai.go @@ -530,7 +530,6 @@ func (m *openAIModel) generateStream(ctx context.Context, openaiReq *openAIReque scanner := bufio.NewScanner(httpResp.Body) var textBuffer strings.Builder - var reasoningBuffer strings.Builder var toolCalls []toolCall var usage *usage @@ -560,24 +559,6 @@ func (m *openAIModel) generateStream(ctx context.Context, openaiReq *openAIReque continue } - if delta.ReasoningContent != nil { - if text, ok := delta.ReasoningContent.(string); ok && text != "" { - reasoningBuffer.WriteString(text) - llmResp := &model.LLMResponse{ - Content: &genai.Content{ - Role: "model", - Parts: []*genai.Part{ - {Text: text, Thought: true}, - }, - }, - Partial: true, - } - if !yield(llmResp, nil) { - return - } - } - } - if delta.Content != nil { if text, ok := delta.Content.(string); ok && text != "" { textBuffer.WriteString(text) @@ -623,7 +604,7 @@ func (m *openAIModel) generateStream(ctx context.Context, openaiReq *openAIReque } if choice.FinishReason != "" { - finalResp := m.buildFinalResponse(textBuffer.String(), reasoningBuffer.String(), toolCalls, usage, choice.FinishReason) + finalResp := m.buildFinalResponse(textBuffer.String(), toolCalls, usage, choice.FinishReason) yield(finalResp, nil) return } @@ -635,7 +616,7 @@ func (m *openAIModel) generateStream(ctx context.Context, openaiReq *openAIReque } if textBuffer.Len() > 0 || len(toolCalls) > 0 { - finalResp := m.buildFinalResponse(textBuffer.String(), reasoningBuffer.String(), toolCalls, usage, "stop") + finalResp := m.buildFinalResponse(textBuffer.String(), toolCalls, usage, "stop") yield(finalResp, nil) } } @@ -751,16 +732,9 @@ func (m *openAIModel) convertResponse(resp *response) (*model.LLMResponse, error return llmResp, nil } -func (m *openAIModel) buildFinalResponse(text string, reasoningText string, toolCalls []toolCall, usage *usage, finishReason string) *model.LLMResponse { +func (m *openAIModel) buildFinalResponse(text string, toolCalls []toolCall, usage *usage, finishReason string) *model.LLMResponse { var parts []*genai.Part - if reasoningText != "" { - parts = append(parts, &genai.Part{ - Text: reasoningText, - Thought: true, - }) - } - if text != "" { parts = append(parts, genai.NewPartFromText(text)) } diff --git a/model/openai_test.go b/model/openai_test.go index 846a9fa..7fb8b5b 100644 --- a/model/openai_test.go +++ b/model/openai_test.go @@ -1268,7 +1268,7 @@ func TestBuildFinalResponse_EmptyToolCallFiltering(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - resp := m.buildFinalResponse("", "", tt.toolCalls, nil, "stop") + resp := m.buildFinalResponse("", tt.toolCalls, nil, "stop") var functionCalls []*genai.FunctionCall for _, part := range resp.Content.Parts {