diff --git a/docs-website/scripts/test_python_snippets.py b/docs-website/scripts/test_python_snippets.py index 3a524f59df..662232523f 100755 --- a/docs-website/scripts/test_python_snippets.py +++ b/docs-website/scripts/test_python_snippets.py @@ -331,12 +331,25 @@ def run_snippet(snippet: Snippet, timeout_seconds: int, cwd: str, skip_unsafe: b stderr=completed.stderr, ) except subprocess.TimeoutExpired as exc: + # Handle stderr which might be bytes or str + stderr_text = exc.stderr + if stderr_text is None: + stderr_text = "" + elif isinstance(stderr_text, bytes): + stderr_text = stderr_text.decode("utf-8", errors="replace") + stderr_text = stderr_text + f"\n[timeout after {timeout_seconds}s]" + + # Handle stdout which might be bytes or str + stdout_text = exc.stdout + if stdout_text is not None and isinstance(stdout_text, bytes): + stdout_text = stdout_text.decode("utf-8", errors="replace") + return ExecutionResult( snippet=snippet, status=ExecutionStatus.FAILED, reason=f"timeout after {timeout_seconds}s", - stdout=exc.stdout or None, - stderr=(exc.stderr or "") + f"\n[timeout after {timeout_seconds}s]", + stdout=stdout_text, + stderr=stderr_text, ) @@ -414,7 +427,7 @@ def main(argv: Optional[list[str]] = None) -> int: "(defaults to docs and versioned_docs)" ), ) - parser.add_argument("--timeout-seconds", type=int, default=30, help="Timeout per snippet execution (seconds)") + parser.add_argument("--timeout-seconds", type=int, default=600, help="Timeout per snippet execution (seconds)") parser.add_argument( "--allow-unsafe", action="store_true", help="Allow execution of snippets with potentially unsafe patterns" ) diff --git a/haystack/components/audio/whisper_local.py b/haystack/components/audio/whisper_local.py index 66bf2a3d0d..bb7a963612 100644 --- a/haystack/components/audio/whisper_local.py +++ b/haystack/components/audio/whisper_local.py @@ -46,7 +46,7 @@ class LocalWhisperTranscriber: whisper = LocalWhisperTranscriber(model="small") whisper.warm_up() - transcription = whisper.run(sources=["path/to/audio/file"]) + transcription = whisper.run(sources=["test/test_files/audio/answer.wav"]) ``` """ diff --git a/haystack/components/audio/whisper_remote.py b/haystack/components/audio/whisper_remote.py index b8ff4f19fc..83593aebb8 100644 --- a/haystack/components/audio/whisper_remote.py +++ b/haystack/components/audio/whisper_remote.py @@ -30,9 +30,10 @@ class RemoteWhisperTranscriber: ```python from haystack.components.audio import RemoteWhisperTranscriber + from haystack.utils import Secret - whisper = RemoteWhisperTranscriber(api_key=Secret.from_token(""), model="tiny") - transcription = whisper.run(sources=["path/to/audio/file"]) + whisper = RemoteWhisperTranscriber(api_key=Secret.from_env_var("OPENAI_API_KEY"), model="whisper-1") + transcription = whisper.run(sources=["test/test_files/audio/answer.wav"]) ``` """ diff --git a/haystack/components/builders/chat_prompt_builder.py b/haystack/components/builders/chat_prompt_builder.py index aba2780fa7..2d5f6a56f1 100644 --- a/haystack/components/builders/chat_prompt_builder.py +++ b/haystack/components/builders/chat_prompt_builder.py @@ -75,7 +75,7 @@ class ChatPromptBuilder: # no parameter init, we don't use any runtime template variables prompt_builder = ChatPromptBuilder() - llm = OpenAIChatGenerator(api_key=Secret.from_token(""), model="gpt-4o-mini") + llm = OpenAIChatGenerator(api_key=Secret.from_env_var("OPENAI_API_KEY"), model="gpt-5-mini") pipe = Pipeline() pipe.add_component("prompt_builder", prompt_builder) @@ -90,27 +90,17 @@ class ChatPromptBuilder: res = pipe.run(data={"prompt_builder": {"template_variables": {"location": location, "language": language}, "template": messages}}) print(res) + # Output example (truncated): + # {'llm': {'replies': [ChatMessage(...)]}} - >> {'llm': {'replies': [ChatMessage(_role=, _content=[TextContent(text= - "Berlin is the capital city of Germany and one of the most vibrant - and diverse cities in Europe. Here are some key things to know...Enjoy your time exploring the vibrant and dynamic - capital of Germany!")], _name=None, _meta={'model': 'gpt-4o-mini', - 'index': 0, 'finish_reason': 'stop', 'usage': {'prompt_tokens': 27, 'completion_tokens': 681, 'total_tokens': - 708}})]}} - - messages = [system_message, ChatMessage.from_user("What's the weather forecast for {{location}} in the next - {{day_count}} days?")] + messages = [system_message, ChatMessage.from_user("What's the forecast for {{location}}, next {{day_count}} days?")] res = pipe.run(data={"prompt_builder": {"template_variables": {"location": location, "day_count": "5"}, "template": messages}}) print(res) - >> {'llm': {'replies': [ChatMessage(_role=, _content=[TextContent(text= - "Here is the weather forecast for Berlin in the next 5 - days:\\n\\nDay 1: Mostly cloudy with a high of 22°C (72°F) and...so it's always a good idea to check for updates - closer to your visit.")], _name=None, _meta={'model': 'gpt-4o-mini', - 'index': 0, 'finish_reason': 'stop', 'usage': {'prompt_tokens': 37, 'completion_tokens': 201, - 'total_tokens': 238}})]}} + # Output example (truncated): + # {'llm': {'replies': [ChatMessage(...)]}} ``` #### String prompt template @@ -131,7 +121,8 @@ class ChatPromptBuilder: {% endmessage %} \"\"\" - images = [ImageContent.from_file_path("apple.jpg"), ImageContent.from_file_path("orange.jpg")] + images = [ImageContent.from_file_path("test/test_files/images/apple.jpg"), + ImageContent.from_file_path("test/test_files/images/haystack-logo.png")] builder = ChatPromptBuilder(template=template) builder.run(user_name="John", images=images)