Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
32 changes: 3 additions & 29 deletions model/openai.go
Original file line number Diff line number Diff line change
Expand Up @@ -530,7 +530,6 @@ func (m *openAIModel) generateStream(ctx context.Context, openaiReq *openAIReque

scanner := bufio.NewScanner(httpResp.Body)
var textBuffer strings.Builder
var reasoningBuffer strings.Builder
var toolCalls []toolCall
var usage *usage

Expand Down Expand Up @@ -560,24 +559,6 @@ func (m *openAIModel) generateStream(ctx context.Context, openaiReq *openAIReque
continue
}

if delta.ReasoningContent != nil {
if text, ok := delta.ReasoningContent.(string); ok && text != "" {
reasoningBuffer.WriteString(text)
llmResp := &model.LLMResponse{
Content: &genai.Content{
Role: "model",
Parts: []*genai.Part{
{Text: text, Thought: true},
},
},
Partial: true,
}
if !yield(llmResp, nil) {
return
}
}
}

if delta.Content != nil {
if text, ok := delta.Content.(string); ok && text != "" {
textBuffer.WriteString(text)
Expand Down Expand Up @@ -623,7 +604,7 @@ func (m *openAIModel) generateStream(ctx context.Context, openaiReq *openAIReque
}

if choice.FinishReason != "" {
finalResp := m.buildFinalResponse(textBuffer.String(), reasoningBuffer.String(), toolCalls, usage, choice.FinishReason)
finalResp := m.buildFinalResponse(textBuffer.String(), toolCalls, usage, choice.FinishReason)
yield(finalResp, nil)
return
}
Expand All @@ -635,7 +616,7 @@ func (m *openAIModel) generateStream(ctx context.Context, openaiReq *openAIReque
}

if textBuffer.Len() > 0 || len(toolCalls) > 0 {
finalResp := m.buildFinalResponse(textBuffer.String(), reasoningBuffer.String(), toolCalls, usage, "stop")
finalResp := m.buildFinalResponse(textBuffer.String(), toolCalls, usage, "stop")
yield(finalResp, nil)
}
}
Expand Down Expand Up @@ -751,16 +732,9 @@ func (m *openAIModel) convertResponse(resp *response) (*model.LLMResponse, error
return llmResp, nil
}

func (m *openAIModel) buildFinalResponse(text string, reasoningText string, toolCalls []toolCall, usage *usage, finishReason string) *model.LLMResponse {
func (m *openAIModel) buildFinalResponse(text string, toolCalls []toolCall, usage *usage, finishReason string) *model.LLMResponse {
var parts []*genai.Part

if reasoningText != "" {
parts = append(parts, &genai.Part{
Text: reasoningText,
Thought: true,
})
}

if text != "" {
parts = append(parts, genai.NewPartFromText(text))
}
Expand Down
2 changes: 1 addition & 1 deletion model/openai_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1268,7 +1268,7 @@ func TestBuildFinalResponse_EmptyToolCallFiltering(t *testing.T) {

for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
resp := m.buildFinalResponse("", "", tt.toolCalls, nil, "stop")
resp := m.buildFinalResponse("", tt.toolCalls, nil, "stop")

var functionCalls []*genai.FunctionCall
for _, part := range resp.Content.Parts {
Expand Down