|
5 | 5 | "context"
|
6 | 6 | "encoding/json"
|
7 | 7 | "fmt"
|
| 8 | + "github.com/EinStack/glide/pkg/providers/clients" |
8 | 9 | "io"
|
9 | 10 | "net/http"
|
10 | 11 |
|
@@ -36,39 +37,21 @@ func NewChatRequestFromConfig(cfg *Config) *ChatRequest {
|
36 | 37 | }
|
37 | 38 |
|
38 | 39 | // Chat sends a chat request to the specified azure openai model.
|
39 |
| -func (c *Client) Chat(ctx context.Context, request *schemas.ChatRequest) (*schemas.ChatResponse, error) { |
| 40 | +func (c *Client) Chat(ctx context.Context, params *schemas.ChatParams) (*schemas.ChatResponse, error) { |
40 | 41 | // Create a new chat request
|
41 |
| - chatRequest := c.createRequestSchema(request) |
| 42 | + // TODO: consider using objectpool to optimize memory allocation |
| 43 | + chatReq := *c.chatRequestTemplate // hoping to get a copy of the template |
| 44 | + chatReq.ApplyParams(params) |
| 45 | + |
| 46 | + chatResponse, err := c.doChatRequest(ctx, &chatReq) |
42 | 47 |
|
43 |
| - chatResponse, err := c.doChatRequest(ctx, chatRequest) |
44 | 48 | if err != nil {
|
45 | 49 | return nil, err
|
46 | 50 | }
|
47 | 51 |
|
48 |
| - if len(chatResponse.ModelResponse.Message.Content) == 0 { |
49 |
| - return nil, ErrEmptyResponse |
50 |
| - } |
51 |
| - |
52 | 52 | return chatResponse, nil
|
53 | 53 | }
|
54 | 54 |
|
55 |
| -// createRequestSchema creates a new ChatRequest object based on the given request. |
56 |
| -func (c *Client) createRequestSchema(request *schemas.ChatRequest) *ChatRequest { |
57 |
| - // TODO: consider using objectpool to optimize memory allocation |
58 |
| - chatRequest := *c.chatRequestTemplate // hoping to get a copy of the template |
59 |
| - |
60 |
| - chatRequest.Messages = make([]ChatMessage, 0, len(request.MessageHistory)+1) |
61 |
| - |
62 |
| - // Add items from messageHistory first and the new chat message last |
63 |
| - for _, message := range request.MessageHistory { |
64 |
| - chatRequest.Messages = append(chatRequest.Messages, ChatMessage{Role: message.Role, Content: message.Content}) |
65 |
| - } |
66 |
| - |
67 |
| - chatRequest.Messages = append(chatRequest.Messages, ChatMessage{Role: request.Message.Role, Content: request.Message.Content}) |
68 |
| - |
69 |
| - return &chatRequest |
70 |
| -} |
71 |
| - |
72 | 55 | func (c *Client) doChatRequest(ctx context.Context, payload *ChatRequest) (*schemas.ChatResponse, error) {
|
73 | 56 | // Build request payload
|
74 | 57 | rawPayload, err := json.Marshal(payload)
|
@@ -110,35 +93,37 @@ func (c *Client) doChatRequest(ctx context.Context, payload *ChatRequest) (*sche
|
110 | 93 | }
|
111 | 94 |
|
112 | 95 | // Parse the response JSON
|
113 |
| - var openAICompletion openai.ChatCompletion |
| 96 | + var chatCompletion openai.ChatCompletion |
114 | 97 |
|
115 |
| - err = json.Unmarshal(bodyBytes, &openAICompletion) |
| 98 | + err = json.Unmarshal(bodyBytes, &chatCompletion) |
116 | 99 | if err != nil {
|
117 | 100 | c.tel.Logger.Error("failed to parse openai chat response", zap.Error(err))
|
118 | 101 | return nil, err
|
119 | 102 | }
|
120 | 103 |
|
121 |
| - openAICompletion.SystemFingerprint = "" // Azure OpenAI doesn't return this |
| 104 | + modelChoice := chatCompletion.Choices[0] |
| 105 | + |
| 106 | + if len(modelChoice.Message.Content) == 0 { |
| 107 | + return nil, clients.ErrEmptyResponse |
| 108 | + } |
122 | 109 |
|
123 | 110 | // Map response to UnifiedChatResponse schema
|
124 | 111 | response := schemas.ChatResponse{
|
125 |
| - ID: openAICompletion.ID, |
126 |
| - Created: openAICompletion.Created, |
| 112 | + ID: chatCompletion.ID, |
| 113 | + Created: chatCompletion.Created, |
127 | 114 | Provider: providerName,
|
128 |
| - ModelName: openAICompletion.ModelName, |
| 115 | + ModelName: chatCompletion.ModelName, |
129 | 116 | Cached: false,
|
130 | 117 | ModelResponse: schemas.ModelResponse{
|
131 |
| - Metadata: map[string]string{ |
132 |
| - "system_fingerprint": openAICompletion.SystemFingerprint, |
133 |
| - }, |
| 118 | + Metadata: map[string]string{}, |
134 | 119 | Message: schemas.ChatMessage{
|
135 |
| - Role: openAICompletion.Choices[0].Message.Role, |
136 |
| - Content: openAICompletion.Choices[0].Message.Content, |
| 120 | + Role: modelChoice.Message.Role, |
| 121 | + Content: modelChoice.Message.Content, |
137 | 122 | },
|
138 | 123 | TokenUsage: schemas.TokenUsage{
|
139 |
| - PromptTokens: openAICompletion.Usage.PromptTokens, |
140 |
| - ResponseTokens: openAICompletion.Usage.CompletionTokens, |
141 |
| - TotalTokens: openAICompletion.Usage.TotalTokens, |
| 124 | + PromptTokens: chatCompletion.Usage.PromptTokens, |
| 125 | + ResponseTokens: chatCompletion.Usage.CompletionTokens, |
| 126 | + TotalTokens: chatCompletion.Usage.TotalTokens, |
142 | 127 | },
|
143 | 128 | },
|
144 | 129 | }
|
|
0 commit comments