|
| 1 | +import { THREE_ZERO_TWO_AI } from '../../globals'; |
| 2 | +import { OpenAIErrorResponseTransform } from '../openai/utils'; |
| 3 | +import { |
| 4 | + ChatCompletionResponse, |
| 5 | + ErrorResponse, |
| 6 | + ProviderConfig, |
| 7 | +} from '../types'; |
| 8 | +import { generateInvalidProviderResponseError } from '../utils'; |
| 9 | + |
| 10 | +export const AI302ChatCompleteConfig: ProviderConfig = { |
| 11 | + model: { |
| 12 | + param: 'model', |
| 13 | + required: true, |
| 14 | + default: 'gpt-3.5-turbo', |
| 15 | + }, |
| 16 | + messages: { |
| 17 | + param: 'messages', |
| 18 | + default: '', |
| 19 | + }, |
| 20 | + max_tokens: { |
| 21 | + param: 'max_tokens', |
| 22 | + default: 100, |
| 23 | + min: 0, |
| 24 | + }, |
| 25 | + temperature: { |
| 26 | + param: 'temperature', |
| 27 | + default: 1, |
| 28 | + min: 0, |
| 29 | + max: 2, |
| 30 | + }, |
| 31 | + top_p: { |
| 32 | + param: 'top_p', |
| 33 | + default: 1, |
| 34 | + min: 0, |
| 35 | + max: 1, |
| 36 | + }, |
| 37 | + stream: { |
| 38 | + param: 'stream', |
| 39 | + default: false, |
| 40 | + }, |
| 41 | + frequency_penalty: { |
| 42 | + param: 'frequency_penalty', |
| 43 | + default: 0, |
| 44 | + min: -2, |
| 45 | + max: 2, |
| 46 | + }, |
| 47 | + presence_penalty: { |
| 48 | + param: 'presence_penalty', |
| 49 | + default: 0, |
| 50 | + min: -2, |
| 51 | + max: 2, |
| 52 | + }, |
| 53 | + stop: { |
| 54 | + param: 'stop', |
| 55 | + default: null, |
| 56 | + }, |
| 57 | +}; |
| 58 | + |
| 59 | +interface AI302ChatCompleteResponse extends ChatCompletionResponse { |
| 60 | + id: string; |
| 61 | + object: string; |
| 62 | + created: number; |
| 63 | + model: string; |
| 64 | + usage?: { |
| 65 | + prompt_tokens: number; |
| 66 | + completion_tokens: number; |
| 67 | + total_tokens: number; |
| 68 | + }; |
| 69 | +} |
| 70 | + |
| 71 | +interface AI302StreamChunk { |
| 72 | + id: string; |
| 73 | + object: string; |
| 74 | + created: number; |
| 75 | + model: string; |
| 76 | + choices: { |
| 77 | + delta: { |
| 78 | + role?: string | null; |
| 79 | + content?: string; |
| 80 | + }; |
| 81 | + index: number; |
| 82 | + finish_reason: string | null; |
| 83 | + }[]; |
| 84 | +} |
| 85 | + |
| 86 | +export const AI302ChatCompleteResponseTransform: ( |
| 87 | + response: AI302ChatCompleteResponse | ErrorResponse, |
| 88 | + responseStatus: number |
| 89 | +) => ChatCompletionResponse | ErrorResponse = (response, responseStatus) => { |
| 90 | + if ('error' in response && responseStatus !== 200) { |
| 91 | + return OpenAIErrorResponseTransform(response, THREE_ZERO_TWO_AI); |
| 92 | + } |
| 93 | + |
| 94 | + if ('choices' in response) { |
| 95 | + return { |
| 96 | + id: response.id, |
| 97 | + object: response.object, |
| 98 | + created: response.created, |
| 99 | + model: response.model, |
| 100 | + provider: THREE_ZERO_TWO_AI, |
| 101 | + choices: response.choices.map((c) => ({ |
| 102 | + index: c.index, |
| 103 | + message: { |
| 104 | + role: c.message.role, |
| 105 | + content: c.message.content, |
| 106 | + }, |
| 107 | + finish_reason: c.finish_reason, |
| 108 | + })), |
| 109 | + usage: { |
| 110 | + prompt_tokens: response.usage?.prompt_tokens || 0, |
| 111 | + completion_tokens: response.usage?.completion_tokens || 0, |
| 112 | + total_tokens: response.usage?.total_tokens || 0, |
| 113 | + }, |
| 114 | + }; |
| 115 | + } |
| 116 | + |
| 117 | + return generateInvalidProviderResponseError(response, THREE_ZERO_TWO_AI); |
| 118 | +}; |
| 119 | + |
| 120 | +export const AI302ChatCompleteStreamChunkTransform: ( |
| 121 | + response: string |
| 122 | +) => string = (responseChunk) => { |
| 123 | + let chunk = responseChunk.trim(); |
| 124 | + chunk = chunk.replace(/^data: /, ''); |
| 125 | + chunk = chunk.trim(); |
| 126 | + |
| 127 | + if (chunk === '[DONE]') { |
| 128 | + return `data: ${chunk}\n\n`; |
| 129 | + } |
| 130 | + |
| 131 | + try { |
| 132 | + const parsedChunk: AI302StreamChunk = JSON.parse(chunk); |
| 133 | + |
| 134 | + return ( |
| 135 | + `data: ${JSON.stringify({ |
| 136 | + id: parsedChunk.id, |
| 137 | + object: parsedChunk.object, |
| 138 | + created: parsedChunk.created, |
| 139 | + model: parsedChunk.model, |
| 140 | + provider: THREE_ZERO_TWO_AI, |
| 141 | + choices: [ |
| 142 | + { |
| 143 | + index: parsedChunk.choices[0]?.index ?? 0, |
| 144 | + delta: parsedChunk.choices[0]?.delta ?? {}, |
| 145 | + finish_reason: parsedChunk.choices[0]?.finish_reason ?? null, |
| 146 | + }, |
| 147 | + ], |
| 148 | + })}` + '\n\n' |
| 149 | + ); |
| 150 | + } catch (error) { |
| 151 | + console.error('Error parsing 302AI stream chunk:', error); |
| 152 | + return `data: ${chunk}\n\n`; |
| 153 | + } |
| 154 | +}; |
0 commit comments