Skip to content

Commit 8f19482

Browse files
committed
Add sources
From private repo commit 55315374
1 parent cceb567 commit 8f19482

37 files changed

+3665
-5
lines changed

.gitignore

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,6 @@
1+
/.idea/
2+
/.venv/
3+
/dist/
4+
__pycache__/
5+
6+
/.env

LICENSE

Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,21 @@
1+
MIT License
2+
3+
Copyright (c) 2024 Rivo Laks
4+
5+
Permission is hereby granted, free of charge, to any person obtaining a copy
6+
of this software and associated documentation files (the "Software"), to deal
7+
in the Software without restriction, including without limitation the rights
8+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9+
copies of the Software, and to permit persons to whom the Software is
10+
furnished to do so, subject to the following conditions:
11+
12+
The above copyright notice and this permission notice shall be included in all
13+
copies or substantial portions of the Software.
14+
15+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21+
SOFTWARE.

README.md

Lines changed: 93 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,93 @@
1+
# AAF
2+
3+
AAF is a powerful and flexible framework for building and managing conversational AI models.
4+
It provides a unified interface for various language model providers and
5+
implements advanced virtual models for complex conversational scenarios.
6+
7+
8+
## Features
9+
10+
- Support for multiple LLM providers (OpenAI, Anthropic, Ollama)
11+
- Advanced conversation management with Threads and Sessions
12+
- Virtual models for complex scenarios (TwoPhase, Multiphase, Router)
13+
- Tool integration for function calling capabilities
14+
- Cost and token usage tracking
15+
16+
17+
## Installation
18+
19+
(Add installation instructions here)
20+
21+
22+
## Quick Start
23+
24+
```python
25+
from aaf.threads import Session
26+
27+
thread = Session().create_thread("gpt-4o", system="You are a helpful assistant.")
28+
thread.add_message("user", "What is the capital of France?")
29+
30+
async with thread.run() as stream:
31+
async for chunk in stream.text_chunks():
32+
print(chunk.content, end="", flush=True)
33+
print()
34+
35+
print(thread.cost_and_usage().pretty())
36+
```
37+
38+
39+
## LLM Providers
40+
41+
AAF supports the following LLM providers:
42+
- OpenAI
43+
- Anthropic
44+
- Ollama
45+
46+
To use a specific provider, specify the model name when creating a thread:
47+
48+
```python
49+
thread = session.create_thread("gpt-4o") # OpenAI
50+
thread = session.create_thread("claude-3-5-sonnet-20240620") # Anthropic
51+
thread = session.create_thread("llama3:instruct") # Ollama
52+
```
53+
54+
55+
## Virtual Models
56+
57+
AAF implements several virtual models for advanced use cases:
58+
59+
- TwoPhase: Generates a prompt and then uses it to create a response
60+
- Multiphase: Multi-step process for complex questions, including drafting, feedback, and refinement
61+
- Router: Selects the appropriate model based on the user's request
62+
63+
Using a virtual model is same as with standard models:
64+
65+
```python
66+
from aaf.virtual_models.two_phase import TwoPhaseModel
67+
from aaf.threads import Session
68+
69+
thread = Session().create_thread(model="two-phase", runner=TwoPhaseModel())
70+
thread.add_message("user", "What is the capital of France?")
71+
72+
async with thread.run() as stream:
73+
async for chunk in stream.text_chunks():
74+
print(chunk.content, end="", flush=True)
75+
print()
76+
77+
print(thread.cost_and_usage().pretty())
78+
```
79+
80+
81+
## Project Structure
82+
83+
- `aaf/`: Main package directory
84+
- `llms/`: LLM provider implementations
85+
- `virtual_models/`: Virtual model implementations
86+
- `threads.py`: Thread and Session management
87+
- `logging.py`: Custom logging implementation
88+
- `utils.py`: Utility functions
89+
90+
91+
## License
92+
93+
(Add license information)

pyproject.toml

Lines changed: 14 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,22 @@
11
[project]
22
name = "aaf"
3-
version = "0.1.0"
3+
version = "0.3.0"
44
description = "Add your description here"
55
readme = "README.md"
66
requires-python = ">=3.12"
7-
dependencies = []
7+
dependencies = [
8+
"anthropic~=0.29.0",
9+
"docstring-parser~=0.16",
10+
"duckduckgo-search~=6.2.6",
11+
"fastapi~=0.111.0",
12+
"openai~=1.30.1",
13+
"prompt-toolkit~=3.0.47",
14+
"pydantic==2.7.1",
15+
"rich~=13.7.1",
16+
"sse-starlette~=2.1.2",
17+
"typer~=0.12.3",
18+
"typing-inspect~=0.9.0",
19+
]
820

921
[build-system]
1022
requires = ["hatchling"]

src/aaf/__init__.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,2 +0,0 @@
1-
def hello() -> str:
2-
return "Hello from aaf!"

src/aaf/chat.py

Lines changed: 59 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,59 @@
1+
from typing import Optional
2+
3+
from prompt_toolkit import PromptSession
4+
from prompt_toolkit.patch_stdout import patch_stdout
5+
6+
from aaf.event_handlers import use_event_handler
7+
from aaf.threads import Thread
8+
from aaf.ui import SimpleStreamingUserInterface
9+
10+
11+
class ChatSession:
12+
"""
13+
Manages an interactive chat session with an AI model.
14+
15+
This class handles the user input loop, processes questions through the AI model,
16+
and manages the overall flow of the conversation, including handling user exit commands.
17+
18+
Attributes:
19+
thread (Thread): The Thread object managing the conversation with the AI model.
20+
ui (SimpleStreamingUserInterface): The user interface for displaying output and handling input.
21+
"""
22+
23+
def __init__(self, thread: Thread, ui: Optional[SimpleStreamingUserInterface] = None):
24+
self.thread = thread
25+
self.ui = ui or SimpleStreamingUserInterface()
26+
self.prompt_session = PromptSession()
27+
28+
async def run_loop(self, initial_question: Optional[str] = None, *, interactive: bool = True):
29+
if initial_question:
30+
await self._process_question(initial_question)
31+
32+
try:
33+
while interactive:
34+
question = await self._get_user_input()
35+
if question is None:
36+
break
37+
await self._process_question(question)
38+
39+
finally:
40+
self.ui.info(f"Final cost: {self.thread.cost_and_usage().pretty_root()}")
41+
42+
async def _get_user_input(self) -> Optional[str]:
43+
with patch_stdout():
44+
try:
45+
question = await self.prompt_session.prompt_async("You: ")
46+
if question.lower() in ("exit", "quit"):
47+
return None
48+
return question
49+
except EOFError:
50+
# Handle Ctrl+D
51+
print("\nGoodbye!")
52+
return None
53+
54+
async def _process_question(self, question: str):
55+
self.thread.add_message("user", question)
56+
async with self.thread.run_loop() as stream:
57+
await use_event_handler(stream, self.ui)
58+
59+
self.ui.info(f"Current cost: {self.thread.cost_and_usage().pretty_root()}")

src/aaf/event_handlers.py

Lines changed: 73 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,73 @@
1+
from .llms.types import (
2+
ResponseChunkToolCallFailed,
3+
ResponseChunkToolCallFinished,
4+
ResponseChunkToolCallStarted,
5+
ResponseControlChunkStreamBegin,
6+
ResponseControlChunkStreamEnd,
7+
ResponseStream,
8+
ResponseTextChunk,
9+
ToolCall,
10+
)
11+
12+
13+
class EventHandlerBase:
14+
"""Base class for event handlers that process messages and events from the assistant and tools.
15+
16+
This can be used to implement User Interface classes, custom logic, etc.
17+
"""
18+
19+
def info(self, message: str):
20+
"""Print an informational message."""
21+
pass
22+
23+
def debug(self, message: str):
24+
"""Print a debug message."""
25+
pass
26+
27+
def assistant_message_stream_start(self, stream: ResponseStream):
28+
"""Called when the assistant starts sending a message stream."""
29+
pass
30+
31+
def assistant_message_stream_chunk(self, stream: ResponseStream, chunk: ResponseTextChunk):
32+
"""Called for each chunk of the assistant's response."""
33+
pass
34+
35+
def assistant_message_stream_end(self, stream: ResponseStream):
36+
"""Called when the assistant finishes sending a message stream."""
37+
pass
38+
39+
def tool_call_start(self, tool_call: ToolCall):
40+
"""Called when a tool call is about to be executed."""
41+
pass
42+
43+
def tool_call_result(self, tool_call: ToolCall, result: str):
44+
"""Called when a tool call has been executed successfully."""
45+
pass
46+
47+
def tool_call_error(self, tool_call: ToolCall, error: str):
48+
"""Called when a tool call has failed."""
49+
pass
50+
51+
def loop_end(self, cost: float):
52+
pass
53+
54+
55+
async def use_event_handler(stream: ResponseStream, handler: EventHandlerBase):
56+
"""Uses the event handler for the stream's messages.
57+
58+
This is basically an adapter that directs the stream's messages to the event handler.
59+
"""
60+
61+
async for chunk in stream.all_chunks():
62+
if isinstance(chunk, ResponseControlChunkStreamBegin):
63+
handler.assistant_message_stream_start(stream)
64+
elif isinstance(chunk, ResponseControlChunkStreamEnd):
65+
handler.assistant_message_stream_end(stream)
66+
elif isinstance(chunk, ResponseTextChunk):
67+
handler.assistant_message_stream_chunk(stream, chunk)
68+
elif isinstance(chunk, ResponseChunkToolCallStarted):
69+
handler.tool_call_start(chunk.tool_call)
70+
elif isinstance(chunk, ResponseChunkToolCallFinished):
71+
handler.tool_call_result(chunk.tool_call, chunk.result)
72+
elif isinstance(chunk, ResponseChunkToolCallFailed):
73+
handler.tool_call_error(chunk.tool_call, chunk.error)

src/aaf/forwarding.py

Lines changed: 73 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,73 @@
1+
import asyncio
2+
from asyncio import Queue
3+
from typing import AsyncIterator
4+
5+
from .llms.base import ResponseAdapterBase
6+
from .llms.types import (
7+
ResponseChunk,
8+
ResponseDebugChunk,
9+
ResponseStopReasonChunk,
10+
ResponseTextChunk,
11+
ResponseVerboseChunk,
12+
StopReason,
13+
)
14+
15+
16+
class ResponseQueue(Queue):
17+
"""Queue for sending response chunks.
18+
19+
This is similar to ResponseStream, but focuses on the writing part (whereas ResponseStream focuses on reading).
20+
It's a subclass of asyncio.Queue, with some convenience methods for adding different types of chunks.
21+
"""
22+
23+
async def add(self, content: str) -> None:
24+
await self.put(ResponseTextChunk(content=content))
25+
26+
async def add_debug(self, content: str) -> None:
27+
await self.put(ResponseDebugChunk(content=content))
28+
29+
async def add_verbose(self, content: str) -> None:
30+
await self.put(ResponseVerboseChunk(content=content))
31+
32+
async def mark_finished(self) -> None:
33+
await self.put(None)
34+
35+
36+
class VirtualModelAdapter(ResponseAdapterBase):
37+
"""Response stream adapter for virtual models.
38+
39+
It reads chunks from the provided queue and forwards them to the stream.
40+
The queue must end with a None value to indicate the end of the stream.
41+
"""
42+
43+
def __init__(self, process_task: asyncio.Task[None], queue: ResponseQueue):
44+
super().__init__()
45+
46+
self.process_task = process_task
47+
self.queue = queue
48+
49+
self._stop_reason = None
50+
51+
async def __aiter__(self) -> AsyncIterator[ResponseChunk]:
52+
"""Forward chunks from the queue to the stream.
53+
54+
Takes care to avoid yielding the stop reason twice.
55+
"""
56+
57+
try:
58+
while True:
59+
chunk = await self.queue.get()
60+
if chunk is None: # End of stream marker
61+
if self._stop_reason is None:
62+
yield ResponseStopReasonChunk(reason=StopReason.END_TURN)
63+
64+
break
65+
66+
if isinstance(chunk, ResponseStopReasonChunk):
67+
self._stop_reason = chunk.reason
68+
69+
yield chunk
70+
71+
finally:
72+
# Ensure the process task is completed
73+
await self.process_task

src/aaf/llms/__init__.py

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,10 @@
1+
from .anthropic import AnthropicRunner
2+
from .base import ModelRunner
3+
from .ollama import OllamaRunner
4+
from .openai import OpenAIRunner
5+
6+
PROVIDERS = [OpenAIRunner, AnthropicRunner, OllamaRunner]
7+
8+
9+
def get_llm_provider_and_model(model_name: str) -> tuple[type["ModelRunner"], str]:
10+
return ModelRunner.get_provider_and_model(PROVIDERS, model_name)

0 commit comments

Comments
 (0)