|
| 1 | +# Copyright 2026 Google Inc. All rights reserved. |
| 2 | +# |
| 3 | +# Licensed under the Apache License, Version 2.0 (the "License"); |
| 4 | +# you may not use this file except in compliance with the License. |
| 5 | +# You may obtain a copy of the License at |
| 6 | +# |
| 7 | +# http://www.apache.org/licenses/LICENSE-2.0 |
| 8 | +# |
| 9 | +# Unless required by applicable law or agreed to in writing, software |
| 10 | +# distributed under the License is distributed on an "AS IS" BASIS, |
| 11 | +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 12 | +# See the License for the specific language governing permissions and |
| 13 | +# limitations under the License. |
| 14 | +"""Google GenAI LLM provider (supporting both Vertex AI and Gemini API).""" |
| 15 | + |
| 16 | +import json |
| 17 | +import logging |
| 18 | +from typing import Any, Optional |
| 19 | + |
| 20 | +from google import genai |
| 21 | +from google.genai import types |
| 22 | +from timesketch.lib.llms.providers import interface |
| 23 | +from timesketch.lib.llms.providers import manager |
| 24 | + |
| 25 | +logger = logging.getLogger("timesketch.llm.providers.google_genai") |
| 26 | + |
| 27 | + |
| 28 | +class GoogleGenAI(interface.LLMProvider): |
| 29 | + """Google GenAI LLM provider.""" |
| 30 | + |
| 31 | + NAME = "google_genai" |
| 32 | + |
| 33 | + def __init__(self, config: dict, **kwargs: Any): |
| 34 | + """Initialize the Google GenAI provider. |
| 35 | +
|
| 36 | + Args: |
| 37 | + config: The configuration for the provider. |
| 38 | + **kwargs: Additional arguments passed to the base class. |
| 39 | +
|
| 40 | + Raises: |
| 41 | + ValueError: If required configuration keys are missing. |
| 42 | + """ |
| 43 | + super().__init__(config, **kwargs) |
| 44 | + self._api_key = self.config.get("api_key") |
| 45 | + self._project_id = self.config.get("project_id") |
| 46 | + self._location = self.config.get("location") |
| 47 | + self._model_name = self.config.get("model") |
| 48 | + |
| 49 | + if not self._model_name: |
| 50 | + raise ValueError( |
| 51 | + "Google GenAI provider requires a 'model' in its configuration." |
| 52 | + ) |
| 53 | + |
| 54 | + try: |
| 55 | + if self._project_id: |
| 56 | + # Vertex AI path |
| 57 | + self.client = genai.Client( |
| 58 | + vertexai=True, project=self._project_id, location=self._location |
| 59 | + ) |
| 60 | + elif self._api_key: |
| 61 | + # Gemini API path |
| 62 | + self.client = genai.Client(api_key=self._api_key) |
| 63 | + else: |
| 64 | + raise ValueError( |
| 65 | + "Google GenAI provider requires either 'api_key' (for Gemini API) " |
| 66 | + "or 'project_id' (for Vertex AI) in its configuration." |
| 67 | + ) |
| 68 | + except Exception as e: |
| 69 | + raise ValueError(f"Failed to initialize Google GenAI client: {e}") from e |
| 70 | + |
| 71 | + def generate(self, prompt: str, response_schema: Optional[dict] = None) -> Any: |
| 72 | + """ |
| 73 | + Generate text using the Google GenAI service. |
| 74 | +
|
| 75 | + Args: |
| 76 | + prompt: The prompt to use for the generation. |
| 77 | + response_schema: An optional JSON schema to define the expected |
| 78 | + response format. |
| 79 | +
|
| 80 | + Returns: |
| 81 | + The generated text as a string (or parsed data if |
| 82 | + response_schema is provided). |
| 83 | + """ |
| 84 | + config_params = { |
| 85 | + "temperature": self.config.get("temperature"), |
| 86 | + "top_k": self.config.get("top_k"), |
| 87 | + "top_p": self.config.get("top_p"), |
| 88 | + "max_output_tokens": self.config.get("max_output_tokens"), |
| 89 | + } |
| 90 | + |
| 91 | + if response_schema: |
| 92 | + config_params["response_mime_type"] = "application/json" |
| 93 | + config_params["response_schema"] = response_schema |
| 94 | + |
| 95 | + generate_config = types.GenerateContentConfig(**config_params) |
| 96 | + |
| 97 | + try: |
| 98 | + response = self.client.models.generate_content( |
| 99 | + model=self._model_name, |
| 100 | + contents=prompt, |
| 101 | + config=generate_config, |
| 102 | + ) |
| 103 | + except Exception as e: |
| 104 | + logger.error("Error generating content with Google GenAI: %s", e) |
| 105 | + raise ValueError(f"Error generating content: {e}") from e |
| 106 | + |
| 107 | + if response_schema: |
| 108 | + try: |
| 109 | + if hasattr(response, "parsed") and response.parsed is not None: |
| 110 | + return response.parsed |
| 111 | + return json.loads(response.text) |
| 112 | + except Exception as error: |
| 113 | + raise ValueError( |
| 114 | + f"Error JSON parsing text: {response.text}: {error}" |
| 115 | + ) from error |
| 116 | + |
| 117 | + return response.text |
| 118 | + |
| 119 | + |
| 120 | +manager.LLMManager.register_provider(GoogleGenAI) |
| 121 | + |
| 122 | + |
| 123 | +# Register aliases for backward compatibility with old configuration names. |
| 124 | +class VertexAI(GoogleGenAI): |
| 125 | + """Alias for VertexAI.""" |
| 126 | + |
| 127 | + NAME = "vertexai" |
| 128 | + |
| 129 | + |
| 130 | +class AIStudio(GoogleGenAI): |
| 131 | + """Alias for AI Studio.""" |
| 132 | + |
| 133 | + NAME = "aistudio" |
| 134 | + |
| 135 | + |
| 136 | +manager.LLMManager.register_provider(VertexAI) |
| 137 | +manager.LLMManager.register_provider(AIStudio) |
0 commit comments