Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions examples/internal/antonio/agents/.envrc
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
dotenv
1 change: 1 addition & 0 deletions examples/internal/antonio/agents/.gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
baml_client
1 change: 1 addition & 0 deletions examples/internal/antonio/agents/.python-version
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
3.10
29 changes: 29 additions & 0 deletions examples/internal/antonio/agents/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
# stock-agent

Python BAML workflow

## Setup

```bash
# Set your Boundary API key in .env
echo BOUNDARY_API_KEY=sk-key-YOUR_KEY > .env

# Allow direnv (if using)
direnv allow

# Set up everything (install deps, build BAML, generate)
pnpm setup

# Run the workflow
pnpm dev
```

## Available Scripts

- `pnpm dev` - Run the Python workflow
- `pnpm setup` - One-liner setup (sync + build:baml + generate)
- `pnpm sync` - Install/update Python dependencies
- `pnpm build:baml` - Build BAML Python client
- `pnpm generate` - Generate BAML client code
- `pnpm test` - Run tests
- `pnpm typecheck` - Run type checking with mypy
142 changes: 142 additions & 0 deletions examples/internal/antonio/agents/baml_src/clients.baml
Original file line number Diff line number Diff line change
@@ -0,0 +1,142 @@
// Learn more about clients at https://docs.boundaryml.com/docs/snippets/clients/overview

// Using the new OpenAI Responses API for enhanced formatting
client<llm> CustomGPT5 {
provider openai-responses
options {
model "gpt-5"
api_key env.OPENAI_API_KEY
}
}

client<llm> CustomGPT5Mini {
provider openai-responses
retry_policy Exponential
options {
model "gpt-5-mini"
api_key env.OPENAI_API_KEY
}
}

// Openai with chat completion
client<llm> CustomGPT5Chat {
provider openai
options {
model "gpt-5"
api_key env.OPENAI_API_KEY
}
}

// Latest Anthropic Claude 4 models
client<llm> CustomOpus4 {
provider anthropic
options {
model "claude-opus-4-1-20250805"
api_key env.ANTHROPIC_API_KEY
}
}

client<llm> CustomSonnet4 {
provider anthropic
options {
model "claude-sonnet-4-20250514"
api_key env.ANTHROPIC_API_KEY
}
}

client<llm> CustomHaiku {
provider anthropic
retry_policy Constant
options {
model "claude-3-5-haiku-20241022"
api_key env.ANTHROPIC_API_KEY
}
}

// Example Google AI client (uncomment to use)
// client<llm> CustomGemini {
// provider google-ai
// options {
// model "gemini-2.5-pro"
// api_key env.GOOGLE_API_KEY
// }
// }

// Example AWS Bedrock client (uncomment to use)
// client<llm> CustomBedrock {
// provider aws-bedrock
// options {
// model "anthropic.claude-sonnet-4-20250514-v1:0"
// region "us-east-1"
// // AWS credentials are auto-detected from env vars
// }
// }

// Example Azure OpenAI client (uncomment to use)
// client<llm> CustomAzure {
// provider azure-openai
// options {
// model "gpt-5"
// api_key env.AZURE_OPENAI_API_KEY
// base_url "https://MY_RESOURCE_NAME.openai.azure.com/openai/deployments/MY_DEPLOYMENT_ID"
// api_version "2024-10-01-preview"
// }
// }

// Example Vertex AI client (uncomment to use)
// client<llm> CustomVertex {
// provider vertex-ai
// options {
// model "gemini-2.5-pro"
// location "us-central1"
// // Uses Google Cloud Application Default Credentials
// }
// }

// Example Ollama client for local models (uncomment to use)
// client<llm> CustomOllama {
// provider openai-generic
// options {
// base_url "http://localhost:11434/v1"
// model "llama4"
// default_role "user" // Most local models prefer the user role
// // No API key needed for local Ollama
// }
// }

// https://docs.boundaryml.com/docs/snippets/clients/round-robin
client<llm> CustomFast {
provider round-robin
options {
// This will alternate between the two clients
strategy [CustomGPT5Mini, CustomHaiku]
}
}

// https://docs.boundaryml.com/docs/snippets/clients/fallback
client<llm> OpenaiFallback {
provider fallback
options {
// This will try the clients in order until one succeeds
strategy [CustomGPT5Mini, CustomGPT5]
}
}

// https://docs.boundaryml.com/docs/snippets/clients/retry
retry_policy Constant {
max_retries 3
strategy {
type constant_delay
delay_ms 200
}
}

retry_policy Exponential {
max_retries 2
strategy {
type exponential_backoff
delay_ms 300
multiplier 1.5
max_delay_ms 10000
}
}
18 changes: 18 additions & 0 deletions examples/internal/antonio/agents/baml_src/generators.baml
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
// This helps use auto generate libraries you can use in the language of
// your choice. You can have multiple generators if you use multiple languages.
// Just ensure that the output_dir is different for each generator.
generator target {
// Valid values: "python/pydantic", "typescript", "ruby/sorbet", "rest/openapi"
output_type "python/pydantic"

// Where the generated code will be saved (relative to baml_src/)
output_dir "../"

// The version of the BAML package you have installed (e.g. same version as your baml-py or @boundaryml/baml).
// The BAML VSCode extension version should also match this version.
version "0.211.2"

// Valid values: "sync", "async"
// This controls what `b.FunctionName()` will be (sync or async).
default_client_mode sync
}
101 changes: 101 additions & 0 deletions examples/internal/antonio/agents/baml_src/human_loop.baml
Original file line number Diff line number Diff line change
@@ -0,0 +1,101 @@
function GenerateFamousPersonName(previous_names: string[]) -> string {
client CustomGPT5Mini
prompt #"
You are a famous person generator for a "Heads Up" guessing game.

Generate the name of a well-known famous person who:
- Is recognizable to most people
- Has distinctive characteristics that can be described with yes/no questions
- Is appropriate for all audiences
- Has a clear, unambiguous name

IMPORTANT: Check the list of what famous people you've already suggested
and NEVER repeat a person you've already suggested.

Already suggested names:
{{ previous_names }}

Examples: Albert Einstein, Beyoncé, Leonardo da Vinci, Oprah Winfrey, Michael Jordan

Return only the person's name, nothing else.
"#
}

class GuessResponse {
game_won bool
text string
}

class Message {
role "user" | "assistant"
content string
}

function TakeGuess(user_guess: string, famous_person_name: string, history: Message[]) -> GuessResponse {
client CustomGPT5Mini
prompt #"
You are a helpful game assistant for a "Heads Up" guessing game.

CRITICAL: You know the famous person's name but you must NEVER reveal it in any response.

When a user asks a question about the famous person:
- Answer truthfully based on the famous person provided
- Keep responses concise and friendly
- NEVER mention the person's name, even if it seems natural
- NEVER reveal gender, nationality, or other characteristics unless specifically asked about them
- Answer yes/no questions with clear "Yes" or "No" responses
- Be consistent - same question asked differently should get the same answer
- Ask for clarification if a question is unclear
- If multiple questions are asked at once, ask them to ask one at a time

When they make a guess:
- If correct: Congratulate them warmly
- If incorrect: Politely correct them and encourage them to try again

Encourage players to make a guess when they seem to have enough information.

{{ ctx.output_format }}

Conversation history:

{{ history }}

Here's the user input:

{{ user_guess }}
"#
}

function SimulateHumanGuess(history: Message[]) -> string {
client CustomGPT5Mini
prompt #"
You are playing a "Heads Up" guessing game. Given the conversation history,
you must take a guess at the famous person's name or ask a question about them.

Conversation history:

{{ history }}
"#
}

function GuessGameAgent() -> GuessResponse {
let history: Message[] = [];

let famous_person_name = GenerateFamousPersonName([]);

let user_input = SimulateHumanGuess(history);
let guess_response = TakeGuess(user_input, famous_person_name, history);

history.push(Message { role: "user", content: user_input });
history.push(Message { role: "assistant", content: guess_response.text });

while (!guess_response.game_won) {
user_input = SimulateHumanGuess(history);
guess_response = TakeGuess(user_input, famous_person_name, history);

history.push(Message { role: "user", content: user_input });
history.push(Message { role: "assistant", content: guess_response.text });
}

guess_response
}
Loading
Loading