diff --git a/integrations/llms/vertex-ai/files.mdx b/integrations/llms/vertex-ai/files.mdx index cf7ebf7a..a0e14c7c 100644 --- a/integrations/llms/vertex-ai/files.mdx +++ b/integrations/llms/vertex-ai/files.mdx @@ -304,4 +304,132 @@ print(file_content) -Note: The `ListFiles` endpoint is not supported for Vertex AI. \ No newline at end of file +Note: The `ListFiles` endpoint is not supported for Vertex AI. + + +### Uploading Files for Inference + +Vertex AI supports inference with files that are uploaded to GCS, to upload files for inference i.e not for `batch` and `fine-tune`, please check below code snippet. + + + +```python +from portkey_ai import Portkey + +# Initialize the Portkey client +portkey = Portkey( + api_key="PORTKEY_API_KEY", # Replace with your Portkey API key + provider="@VERTEX_PROVIDER", + vertex_storage_bucket_name="your_bucket_name", # Specify the GCS bucket name + provider_file_name="your_file_name.png", # Specify the file name in GCS + provider_model="gemini-1.5-flash-001" # Specify the model to use +) + +upload_file_response = portkey.files.create( + purpose="upload", + file=open("your_file_name.png", "rb"), + extra_headers={"x-portkey-file-purpose": "upload"} +) + +print(upload_file_response) +``` + + +```js +import { Portkey } from 'portkey-ai'; +import * as fs from 'fs'; + +// Initialize the Portkey client +const portkey = Portkey({ + apiKey: "PORTKEY_API_KEY", // Replace with your Portkey API key + provider:"@VERTEX_PROVIDER", + vertexStorageBucketName: "your_bucket_name", // Specify the GCS bucket name + providerFileName: "your_file_name.png", // Specify the file name in GCS + providerModel: "gemini-1.5-flash-001" // Specify the model to use +}); + +const uploadFile = async () => { + const file = await portkey.files.create({ + purpose: "upload", + file: fs.createReadStream("your_file_name.png"), + extra_headers: {"x-portkey-file-purpose": "upload"} + }); + + console.log(file); +} + +uploadFile(); +``` + + +```sh +curl -X POST --header 'x-portkey-api-key: ' \ + --header 'x-portkey-provider: ' \ + --header 'x-portkey-vertex-storage-bucket-name: ' \ + --header 'x-portkey-provider-file-name: .png' \ + --header 'x-portkey-provider-model: ' \ + --header 'x-portkey-file-purpose: "upload"' \ + --form 'purpose="upload"' \ + --form 'file=@your_file_name.png' \ + 'https://api.portkey.ai/v1/files' +``` + + +```js +import OpenAI from 'openai'; +import { PORTKEY_GATEWAY_URL, createHeaders } from 'portkey-ai'; +import * as fs from 'fs'; + +const openai = new OpenAI({ + apiKey: 'OPENAI_API_KEY', + baseURL: PORTKEY_GATEWAY_URL, + defaultHeaders: createHeaders({ + provider:"@VERTEX_PROVIDER", + apiKey: "PORTKEY_API_KEY", + vertexStorageBucketName: "your_bucket_name", + providerFileName: "your_file_name.png", + providerModel: "gemini-1.5-flash-001" + }) +}); + +const uploadFile = async () => { + const file = await openai.files.create({ + purpose: "upload", + file: fs.createReadStream("your_file_name.png"), + extraHeaders: {"x-portkey-file-purpose": "upload"} + }); + + console.log(file); +} + +uploadFile(); +``` + + +```python +from openai import OpenAI +from portkey_ai import PORTKEY_GATEWAY_URL, createHeaders + +openai = OpenAI( + api_key='OPENAI_API_KEY', + base_url=PORTKEY_GATEWAY_URL, + default_headers=createHeaders( + provider="@VERTEX_PROVIDER", + api_key="PORTKEY_API_KEY", + vertex_storage_bucket_name="your_bucket_name", + provider_file_name="your_file_name.png", + provider_model="gemini-1.5-flash-001" + ) +) + +upload_file_response = openai.files.create( + purpose="upload", + file=open("your_file_name.png", "rb"), + extra_headers={"x-portkey-file-purpose": "upload"} +) + +print(upload_file_response) +``` + + + \ No newline at end of file