Skip to content
Open
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
19 changes: 16 additions & 3 deletions docs-website/scripts/test_python_snippets.py
Original file line number Diff line number Diff line change
Expand Up @@ -331,12 +331,25 @@ def run_snippet(snippet: Snippet, timeout_seconds: int, cwd: str, skip_unsafe: b
stderr=completed.stderr,
)
except subprocess.TimeoutExpired as exc:
# Handle stderr which might be bytes or str
stderr_text = exc.stderr
if stderr_text is None:
stderr_text = ""
elif isinstance(stderr_text, bytes):
stderr_text = stderr_text.decode("utf-8", errors="replace")
stderr_text = stderr_text + f"\n[timeout after {timeout_seconds}s]"

# Handle stdout which might be bytes or str
stdout_text = exc.stdout
if stdout_text is not None and isinstance(stdout_text, bytes):
stdout_text = stdout_text.decode("utf-8", errors="replace")

return ExecutionResult(
snippet=snippet,
status=ExecutionStatus.FAILED,
reason=f"timeout after {timeout_seconds}s",
stdout=exc.stdout or None,
stderr=(exc.stderr or "") + f"\n[timeout after {timeout_seconds}s]",
stdout=stdout_text,
stderr=stderr_text,
)


Expand Down Expand Up @@ -414,7 +427,7 @@ def main(argv: Optional[list[str]] = None) -> int:
"(defaults to docs and versioned_docs)"
),
)
parser.add_argument("--timeout-seconds", type=int, default=30, help="Timeout per snippet execution (seconds)")
parser.add_argument("--timeout-seconds", type=int, default=600, help="Timeout per snippet execution (seconds)")
parser.add_argument(
"--allow-unsafe", action="store_true", help="Allow execution of snippets with potentially unsafe patterns"
)
Expand Down
2 changes: 1 addition & 1 deletion haystack/components/audio/whisper_local.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ class LocalWhisperTranscriber:

whisper = LocalWhisperTranscriber(model="small")
whisper.warm_up()
transcription = whisper.run(sources=["path/to/audio/file"])
transcription = whisper.run(sources=["test/test_files/audio/answer.wav"])
```
"""

Expand Down
5 changes: 3 additions & 2 deletions haystack/components/audio/whisper_remote.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,9 +30,10 @@ class RemoteWhisperTranscriber:

```python
from haystack.components.audio import RemoteWhisperTranscriber
from haystack.utils import Secret

whisper = RemoteWhisperTranscriber(api_key=Secret.from_token("<your-api-key>"), model="tiny")
transcription = whisper.run(sources=["path/to/audio/file"])
whisper = RemoteWhisperTranscriber(api_key=Secret.from_env_var("OPENAI_API_KEY"), model="whisper-1")
Copy link
Member

@anakin87 anakin87 Nov 27, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I am not sure... Secret.from_token("<your-api-key>") was a meaningful placeholder.
Secret.from_env_var("OPENAI_API_KEY") is just the default value. (If we want to go this route, we can just remove the api_key parameter from the example)

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We can just use whisper = RemoteWhisperTranscriber(model="whisper-1")
If you think it's helpful, add a comment about setting the OPENAI_API_KEY env var.

transcription = whisper.run(sources=["test/test_files/audio/answer.wav"])
```
"""

Expand Down
25 changes: 8 additions & 17 deletions haystack/components/builders/chat_prompt_builder.py
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,7 @@ class ChatPromptBuilder:

# no parameter init, we don't use any runtime template variables
prompt_builder = ChatPromptBuilder()
llm = OpenAIChatGenerator(api_key=Secret.from_token("<your-api-key>"), model="gpt-4o-mini")
llm = OpenAIChatGenerator(api_key=Secret.from_env_var("OPENAI_API_KEY"), model="gpt-5-mini")
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Same as above.
We can just use llm = OpenAIChatGenerator(model="gpt-5-mini")
If you think it's helpful, add a comment about setting the OPENAI_API_KEY env var.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yes, deal!


pipe = Pipeline()
pipe.add_component("prompt_builder", prompt_builder)
Expand All @@ -90,27 +90,17 @@ class ChatPromptBuilder:
res = pipe.run(data={"prompt_builder": {"template_variables": {"location": location, "language": language},
"template": messages}})
print(res)
# Output example (truncated):
# {'llm': {'replies': [ChatMessage(...)]}}

>> {'llm': {'replies': [ChatMessage(_role=<ChatRole.ASSISTANT: 'assistant'>, _content=[TextContent(text=
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

the long output can be helpful for users

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

ok will put it back in

"Berlin is the capital city of Germany and one of the most vibrant
and diverse cities in Europe. Here are some key things to know...Enjoy your time exploring the vibrant and dynamic
capital of Germany!")], _name=None, _meta={'model': 'gpt-4o-mini',
'index': 0, 'finish_reason': 'stop', 'usage': {'prompt_tokens': 27, 'completion_tokens': 681, 'total_tokens':
708}})]}}

messages = [system_message, ChatMessage.from_user("What's the weather forecast for {{location}} in the next
{{day_count}} days?")]
messages = [system_message, ChatMessage.from_user("What's the forecast for {{location}}, next {{day_count}} days?")]

res = pipe.run(data={"prompt_builder": {"template_variables": {"location": location, "day_count": "5"},
"template": messages}})

print(res)
>> {'llm': {'replies': [ChatMessage(_role=<ChatRole.ASSISTANT: 'assistant'>, _content=[TextContent(text=
"Here is the weather forecast for Berlin in the next 5
days:\\n\\nDay 1: Mostly cloudy with a high of 22°C (72°F) and...so it's always a good idea to check for updates
closer to your visit.")], _name=None, _meta={'model': 'gpt-4o-mini',
'index': 0, 'finish_reason': 'stop', 'usage': {'prompt_tokens': 37, 'completion_tokens': 201,
'total_tokens': 238}})]}}
# Output example (truncated):
# {'llm': {'replies': [ChatMessage(...)]}}
```

#### String prompt template
Expand All @@ -131,7 +121,8 @@ class ChatPromptBuilder:
{% endmessage %}
\"\"\"

images = [ImageContent.from_file_path("apple.jpg"), ImageContent.from_file_path("orange.jpg")]
images = [ImageContent.from_file_path("test/test_files/images/apple.jpg"),
ImageContent.from_file_path("test/test_files/images/haystack-logo.png")]

builder = ChatPromptBuilder(template=template)
builder.run(user_name="John", images=images)
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
---
fixes:
- |
Fixed pydoc examples in audio and builder components to be valid executable scripts.
Added missing ``Secret`` import in ``RemoteWhisperTranscriber`` example, fixed unterminated
string literals in ``ChatPromptBuilder`` examples, and updated file paths to use actual test
files instead of placeholders. API key examples now use environment variables instead of
hardcoded placeholders. This ensures all documentation examples can be tested and executed
successfully.
Loading