|
22 | 22 | from botocore.config import Config |
23 | 23 |
|
24 | 24 | log = logging.getLogger(__name__) |
25 | | -log.level = logging.DEBUG |
26 | 25 |
|
27 | 26 | bedrock_runtime_exceptions = boto3.client( |
28 | 27 | service_name="bedrock-runtime", |
@@ -174,7 +173,7 @@ def handle_stream_response(self, stream: dict) -> GeneratorType: |
174 | 173 | yield chunk |
175 | 174 | except Exception as e: |
176 | 175 | print(f"Error in handle_stream_response: {e}") # Debug print |
177 | | - raise |
| 176 | + raise from e |
178 | 177 |
|
179 | 178 | def parse_chat_completion(self, completion: dict) -> "GeneratorOutput": |
180 | 179 | """Parse the completion, and put it into the raw_response.""" |
@@ -203,14 +202,14 @@ def list_models(self): |
203 | 202 |
|
204 | 203 | try: |
205 | 204 | response = self._client.list_foundation_models() |
206 | | - models = response.get("modelSummaries", []) |
| 205 | + models = response.get("models", []) |
207 | 206 | for model in models: |
208 | 207 | print(f"Model ID: {model['modelId']}") |
209 | | - print(f" Name: {model['modelName']}") |
210 | | - print(f" Input Modalities: {model['inputModalities']}") |
211 | | - print(f" Output Modalities: {model['outputModalities']}") |
212 | | - print(f" Provider: {model['providerName']}") |
| 208 | + print(f" Name: {model['name']}") |
| 209 | + print(f" Description: {model['description']}") |
| 210 | + print(f" Provider: {model['provider']}") |
213 | 211 | print("") |
| 212 | + |
214 | 213 | except Exception as e: |
215 | 214 | print(f"Error listing models: {e}") |
216 | 215 |
|
@@ -255,7 +254,6 @@ def call( |
255 | 254 | if model_type == ModelType.LLM: |
256 | 255 | if "stream" in api_kwargs and api_kwargs.get("stream", False): |
257 | 256 | log.debug("Streaming call") |
258 | | - printc("Streaming") |
259 | 257 | api_kwargs.pop("stream") # stream is not a valid parameter for bedrock |
260 | 258 | self.chat_completion_parser = self.handle_stream_response |
261 | 259 | return self.sync_client.converse_stream(**api_kwargs) |
|
0 commit comments