|
| 1 | +// Copyright Envoy AI Gateway Authors |
| 2 | +// SPDX-License-Identifier: Apache-2.0 |
| 3 | +// The full text of the Apache license is available in the LICENSE file at |
| 4 | +// the root of the repo. |
| 5 | + |
| 6 | +package translator |
| 7 | + |
| 8 | +import ( |
| 9 | + "encoding/json" |
| 10 | + "fmt" |
| 11 | + "strconv" |
| 12 | + |
| 13 | + "github.com/envoyproxy/ai-gateway/internal/apischema/gcp" |
| 14 | + "github.com/envoyproxy/ai-gateway/internal/apischema/openai" |
| 15 | + "github.com/envoyproxy/ai-gateway/internal/internalapi" |
| 16 | +) |
| 17 | + |
| 18 | +// NewEmbeddingOpenAIToAzureOpenAITranslator implements [Factory] for OpenAI to Azure OpenAI translation |
| 19 | +// for embeddings. |
| 20 | +func NewEmbeddingOpenAIToGCPVertexAITranslator(requestModel internalapi.RequestModel, modelNameOverride internalapi.ModelNameOverride) OpenAIEmbeddingTranslator { |
| 21 | + return &openAIToGCPVertexAITranslatorV1Embedding{ |
| 22 | + apiVersion: apiVersion, |
| 23 | + openAIToOpenAITranslatorV1Embedding: openAIToOpenAITranslatorV1Embedding{ |
| 24 | + modelNameOverride: modelNameOverride, |
| 25 | + }, |
| 26 | + } |
| 27 | +} |
| 28 | + |
| 29 | +// openAIToGCPVertexAITranslatorV1Embedding implements [OpenAIEmbeddingTranslator] for /embeddings. |
| 30 | +type openAIToGCPVertexAITranslatorV1Embedding[T openai.EmbeddingRequest] struct { |
| 31 | + requestModel internalapi.RequestModel |
| 32 | + openAIToOpenAITranslatorV1Embedding |
| 33 | +} |
| 34 | + |
| 35 | + |
| 36 | + |
| 37 | +func InputToGeminiConent(input openai.EmbeddingRequestInput){ |
| 38 | + switch v := input.Value.(type) { |
| 39 | + case string: |
| 40 | + |
| 41 | + return v, "string", nil |
| 42 | + case []string: |
| 43 | + // Array of text inputs |
| 44 | + return v, "string_array", nil |
| 45 | + case []int64: |
| 46 | + // Array of token IDs |
| 47 | + return v, "token_array", nil |
| 48 | + case [][]int64: |
| 49 | + // Array of token ID arrays |
| 50 | + return v, "token_array_batch", nil |
| 51 | + default: |
| 52 | + return nil, "unknown", fmt.Errorf("unsupported input type: %T", v) |
| 53 | + } |
| 54 | + |
| 55 | + |
| 56 | +} |
| 57 | + |
| 58 | +// openAIToGCPVertexAITranslatorV1Embedding converts an OpenAI EmbeddingRequest to a GCP Gemini GenerateContentRequest. |
| 59 | +func openAIEmbeddingCompletionToGeminiMessage(openAIReq *openai.EmbeddingCompletionRequest, requestModel internalapi.RequestModel) (*gcp.EmbedContentRequest, error) { |
| 60 | + // Convert OpenAI EmbeddingRequest's input to Gemini Contents |
| 61 | + contents, err := InputToGeminiConent(openAIReq.Input, requestModel) |
| 62 | + if err != nil { |
| 63 | + return nil, err |
| 64 | + } |
| 65 | + |
| 66 | + // Convert generation config. |
| 67 | + embedConfig,, err := openAIReqToGeminiGenerationConfig(openAIReq, requestModel) |
| 68 | + if err != nil { |
| 69 | + return nil, fmt.Errorf("error converting generation config: %w", err) |
| 70 | + } |
| 71 | + |
| 72 | + gcr := gcp.EmbedContentRequest{ |
| 73 | + Contents: contents, |
| 74 | + Config: embedConfig, |
| 75 | + } |
| 76 | + |
| 77 | + return &gcr, nil |
| 78 | +} |
| 79 | + |
| 80 | +// RequestBody implements [OpenAIEmbeddingTranslator.RequestBody]. |
| 81 | +func (o *openAIToGCPVertexAITranslatorV1Embedding[T]) RequestBody(original []byte, req *T, onRetry bool) ( |
| 82 | + newHeaders []internalapi.Header, newBody []byte, err error, |
| 83 | +) { |
| 84 | + |
| 85 | + o.requestModel = openai.GetModelFromEmbeddingRequest(req) |
| 86 | + if o.modelNameOverride != "" { |
| 87 | + // Use modelName override if set. |
| 88 | + o.requestModel = o.modelNameOverride |
| 89 | + } |
| 90 | + |
| 91 | + // Choose the correct endpoint based on streaming. |
| 92 | + var path string |
| 93 | + |
| 94 | + path = buildGCPModelPathSuffix(gcpModelPublisherGoogle, o.requestModel, gcpMethodGenerateContent) |
| 95 | + |
| 96 | + switch any(*req).(type) { |
| 97 | + case openai.EmbeddingCompletionRequest: |
| 98 | + gcpReq, err := openAIEmbeddingCompletionToGeminiMessage(openAIReq, o.requestModel) |
| 99 | + case openai.EmbeddingChatRequest: |
| 100 | + gcpReq, err := openAIEmbeddingChatToGeminiMessage(openAIReq, o.requestModel) |
| 101 | + |
| 102 | + default: |
| 103 | + return nil, nil, fmt.Errorf("request body is wrong: %w", err) |
| 104 | + } |
| 105 | + |
| 106 | + newBody, err = json.Marshal(gcpReq) |
| 107 | + if err != nil { |
| 108 | + return nil, nil, fmt.Errorf("error marshaling Gemini request: %w", err) |
| 109 | + } |
| 110 | + newHeaders = []internalapi.Header{ |
| 111 | + {pathHeaderName, path}, |
| 112 | + {contentLengthHeaderName, strconv.Itoa(len(newBody))}, |
| 113 | + } |
| 114 | + return |
| 115 | +} |
0 commit comments