|
| 1 | +""" |
| 2 | +Batched Embeddings Example for Gemini API |
| 3 | +
|
| 4 | +Demonstrates how to efficiently process multiple texts in a single API call. |
| 5 | +""" |
| 6 | + |
| 7 | +import google.generativeai as genai |
| 8 | + |
| 9 | +def batch_embed_example(): |
| 10 | + # Configure the API (in production, use environment variables) |
| 11 | + genai.configure(api_key="YOUR_API_KEY_HERE") # Replace with your actual API key |
| 12 | + |
| 13 | + # Sample texts to embed |
| 14 | + texts = [ |
| 15 | + "The quick brown fox jumps over the lazy dog.", |
| 16 | + "Gemini's batch embeddings are more efficient than individual calls.", |
| 17 | + "Always prefer batch processing when possible.", |
| 18 | + "This example shows best practices for the Gemini Embeddings API.", |
| 19 | + "Batch processing reduces API calls and improves performance." |
| 20 | + ] |
| 21 | + |
| 22 | + print(f"Embedding {len(texts)} texts in a single batch...") |
| 23 | + |
| 24 | + # Make a single batch request |
| 25 | + try: |
| 26 | + response = genai.embed_content( |
| 27 | + model="models/embedding-001", |
| 28 | + content=texts, |
| 29 | + task_type="RETRIEVAL_DOCUMENT" |
| 30 | + ) |
| 31 | + |
| 32 | + # Verify we got embeddings for all texts |
| 33 | + assert len(response["embedding"]) == len(texts) |
| 34 | + |
| 35 | + print("\nSuccess! Embeddings generated:") |
| 36 | + for i, (text, embedding) in enumerate(zip(texts, response["embedding"])): |
| 37 | + print(f"\nText {i+1}: {text[:50]}...") |
| 38 | + print(f"Embedding vector (first 5 dims): {embedding[:5]}") |
| 39 | + print(f"Vector length: {len(embedding)}") |
| 40 | + |
| 41 | + except Exception as e: |
| 42 | + print(f"Error during batch embedding: {e}") |
| 43 | + |
| 44 | +if __name__ == "__main__": |
| 45 | + batch_embed_example() |
0 commit comments