Skip to content

Commit b02b307

Browse files
UserUser
authored andcommitted
chore: release v0.3.0 with v2 prompts and fixes
- Fix Black formatting issues for CI/CD compliance - Add experimental v2 prompts with confidence scoring - Include comprehensive documentation and examples - Update version to 0.3.0 for PyPI release
1 parent f2985be commit b02b307

File tree

12 files changed

+173
-92
lines changed

12 files changed

+173
-92
lines changed

.gitignore

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -70,6 +70,7 @@ venv.bak/
7070
# Project specific
7171
*.pdf
7272
ACE_IMPROVEMENTS.md
73+
ACE_ROADMAP.md
7374
*.egg-info/
7475
reports/
7576
docs/method_outline.md

CHANGELOG.md

Lines changed: 18 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -5,17 +5,33 @@ All notable changes to ACE Framework will be documented in this file.
55
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
66
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
77

8-
## [Unreleased]
8+
## [0.3.0] - 2025-10-16
99

1010
### Added
11+
- **Experimental v2 Prompts** with state-of-the-art prompt engineering
12+
- Confidence scoring at bullet and answer levels
13+
- Domain-specific variants for math and code generation
14+
- Hierarchical structure with identity headers and metadata
15+
- Concrete examples and anti-patterns for better guidance
16+
- PromptManager for version control and A/B testing
17+
- Comprehensive prompt engineering documentation (`docs/PROMPT_ENGINEERING.md`)
18+
- Advanced examples demonstrating v2 prompts (`examples/advanced_prompts_v2.py`)
19+
- Comparison script for v1 vs v2 prompts (`examples/compare_v1_v2_prompts.py`)
1120
- Playbook persistence with `save_to_file()` and `load_from_file()` methods
1221
- Example demonstrating playbook save/load functionality (`examples/playbook_persistence.py`)
1322
- py.typed file for PEP 561 type hint support
14-
- Documentation for playbook persistence in README
23+
- Mermaid flowchart visualization in README showing ACE learning loop
24+
- ACE_ROADMAP.md (untracked) for development planning
25+
26+
### Changed
27+
- Enhanced docstrings with comprehensive examples throughout codebase
28+
- Improved README with v2 prompts section and visual diagrams
29+
- Updated formatting to comply with Black code style
1530

1631
### Fixed
1732
- README incorrectly referenced non-existent docs/ directory
1833
- Test badge URL in README (test.yml → tests.yml)
34+
- Code formatting issues detected by GitHub Actions
1935

2036
## [0.2.0] - 2025-10-15
2137

ace/__init__.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,7 @@
2323
# Import production LLM clients if available
2424
try:
2525
from .llm_providers import LiteLLMClient
26+
2627
LITELLM_AVAILABLE = True
2728
except ImportError:
2829
LiteLLMClient = None

ace/adaptation.py

Lines changed: 20 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,14 @@
88
from typing import Dict, Iterable, List, Optional, Sequence
99

1010
from .playbook import Playbook
11-
from .roles import Curator, CuratorOutput, Generator, GeneratorOutput, Reflector, ReflectorOutput
11+
from .roles import (
12+
Curator,
13+
CuratorOutput,
14+
Generator,
15+
GeneratorOutput,
16+
Reflector,
17+
ReflectorOutput,
18+
)
1219

1320

1421
@dataclass
@@ -116,7 +123,9 @@ def _update_recent_reflections(self, reflection: ReflectorOutput) -> None:
116123
serialized = json.dumps(reflection.raw, ensure_ascii=False)
117124
self._recent_reflections.append(serialized)
118125
if len(self._recent_reflections) > self.reflection_window:
119-
self._recent_reflections = self._recent_reflections[-self.reflection_window :]
126+
self._recent_reflections = self._recent_reflections[
127+
-self.reflection_window :
128+
]
120129

121130
def _apply_bullet_tags(self, reflection: ReflectorOutput) -> None:
122131
for tag in reflection.bullet_tags:
@@ -125,7 +134,9 @@ def _apply_bullet_tags(self, reflection: ReflectorOutput) -> None:
125134
except ValueError:
126135
continue
127136

128-
def _question_context(self, sample: Sample, environment_result: EnvironmentResult) -> str:
137+
def _question_context(
138+
self, sample: Sample, environment_result: EnvironmentResult
139+
) -> str:
129140
parts = [
130141
f"question: {sample.question}",
131142
f"context: {sample.context}",
@@ -135,7 +146,9 @@ def _question_context(self, sample: Sample, environment_result: EnvironmentResul
135146
]
136147
return "\n".join(parts)
137148

138-
def _progress_string(self, epoch: int, total_epochs: int, step: int, total_steps: int) -> str:
149+
def _progress_string(
150+
self, epoch: int, total_epochs: int, step: int, total_steps: int
151+
) -> str:
139152
return f"epoch {epoch}/{total_epochs} · sample {step}/{total_steps}"
140153

141154
def _process_sample(
@@ -169,7 +182,9 @@ def _process_sample(
169182
reflection=reflection,
170183
playbook=self.playbook,
171184
question_context=self._question_context(sample, env_result),
172-
progress=self._progress_string(epoch, total_epochs, step_index, total_steps),
185+
progress=self._progress_string(
186+
epoch, total_epochs, step_index, total_steps
187+
),
173188
)
174189
self.playbook.apply_delta(curator_output.delta)
175190
return AdapterStepResult(

ace/llm.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -134,7 +134,9 @@ def _extract_text(self, outputs: Any) -> str:
134134
return str(generated)
135135

136136
# Older versions might return {"generated_text": "..."}
137-
if isinstance(candidate, dict) and isinstance(candidate.get("generated_text"), str):
137+
if isinstance(candidate, dict) and isinstance(
138+
candidate.get("generated_text"), str
139+
):
138140
return candidate["generated_text"].strip()
139141

140142
# Ultimate fallback: string representation.

ace/llm_providers/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10,4 +10,4 @@
1010
__all__ = [
1111
"LiteLLMClient",
1212
"LangChainLiteLLMClient",
13-
]
13+
]

ace/llm_providers/langchain_client.py

Lines changed: 21 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@
66
try:
77
from langchain_litellm import ChatLiteLLM, ChatLiteLLMRouter
88
from litellm import Router
9+
910
LANGCHAIN_AVAILABLE = True
1011
except ImportError:
1112
LANGCHAIN_AVAILABLE = False
@@ -59,7 +60,7 @@ def __init__(
5960
router: Optional[Any] = None, # Router from litellm
6061
temperature: float = 0.0,
6162
max_tokens: Optional[int] = None,
62-
**kwargs
63+
**kwargs,
6364
):
6465
if not LANGCHAIN_AVAILABLE:
6566
raise ImportError(
@@ -74,28 +75,27 @@ def __init__(
7475

7576
# Initialize the appropriate LangChain client
7677
if router:
77-
logger.info(f"Initializing LangChainLiteLLMClient with router for model: {model}")
78+
logger.info(
79+
f"Initializing LangChainLiteLLMClient with router for model: {model}"
80+
)
7881
self.llm = ChatLiteLLMRouter(
7982
router=router,
8083
model_name=model,
8184
temperature=temperature,
8285
max_tokens=max_tokens,
83-
**kwargs
86+
**kwargs,
8487
)
8588
self.is_router = True
8689
else:
8790
logger.info(f"Initializing LangChainLiteLLMClient for model: {model}")
8891
self.llm = ChatLiteLLM(
89-
model=model,
90-
temperature=temperature,
91-
max_tokens=max_tokens,
92-
**kwargs
92+
model=model, temperature=temperature, max_tokens=max_tokens, **kwargs
9393
)
9494
self.is_router = False
9595

9696
def _filter_kwargs(self, kwargs: Dict[str, Any]) -> Dict[str, Any]:
9797
"""Filter out ACE-specific parameters that shouldn't go to LangChain."""
98-
ace_specific_params = {'refinement_round', 'max_refinement_rounds'}
98+
ace_specific_params = {"refinement_round", "max_refinement_rounds"}
9999
return {k: v for k, v in kwargs.items() if k not in ace_specific_params}
100100

101101
def complete(self, prompt: str, **kwargs) -> LLMResponse:
@@ -121,7 +121,7 @@ def complete(self, prompt: str, **kwargs) -> LLMResponse:
121121
}
122122

123123
# Add usage information if available
124-
if hasattr(response, 'usage_metadata') and response.usage_metadata:
124+
if hasattr(response, "usage_metadata") and response.usage_metadata:
125125
metadata["usage"] = {
126126
"prompt_tokens": response.usage_metadata.get("input_tokens"),
127127
"completion_tokens": response.usage_metadata.get("output_tokens"),
@@ -131,12 +131,11 @@ def complete(self, prompt: str, **kwargs) -> LLMResponse:
131131
# Add router information if using router
132132
if self.is_router:
133133
metadata["router"] = True
134-
metadata["model_used"] = response.response_metadata.get("model_name", self.model)
134+
metadata["model_used"] = response.response_metadata.get(
135+
"model_name", self.model
136+
)
135137

136-
return LLMResponse(
137-
text=response.content,
138-
raw=metadata
139-
)
138+
return LLMResponse(text=response.content, raw=metadata)
140139

141140
except Exception as e:
142141
logger.error(f"Error in LangChain completion: {e}")
@@ -165,7 +164,7 @@ async def acomplete(self, prompt: str, **kwargs) -> LLMResponse:
165164
}
166165

167166
# Add usage information if available
168-
if hasattr(response, 'usage_metadata') and response.usage_metadata:
167+
if hasattr(response, "usage_metadata") and response.usage_metadata:
169168
metadata["usage"] = {
170169
"prompt_tokens": response.usage_metadata.get("input_tokens"),
171170
"completion_tokens": response.usage_metadata.get("output_tokens"),
@@ -175,12 +174,11 @@ async def acomplete(self, prompt: str, **kwargs) -> LLMResponse:
175174
# Add router information if using router
176175
if self.is_router:
177176
metadata["router"] = True
178-
metadata["model_used"] = response.response_metadata.get("model_name", self.model)
177+
metadata["model_used"] = response.response_metadata.get(
178+
"model_name", self.model
179+
)
179180

180-
return LLMResponse(
181-
text=response.content,
182-
raw=metadata
183-
)
181+
return LLMResponse(text=response.content, raw=metadata)
184182

185183
except Exception as e:
186184
logger.error(f"Error in async LangChain completion: {e}")
@@ -201,7 +199,7 @@ def complete_with_stream(self, prompt: str, **kwargs) -> Iterator[str]:
201199

202200
try:
203201
for chunk in self.llm.stream(prompt, **filtered_kwargs):
204-
if hasattr(chunk, 'content') and chunk.content:
202+
if hasattr(chunk, "content") and chunk.content:
205203
yield chunk.content
206204
except Exception as e:
207205
logger.error(f"Error in LangChain streaming: {e}")
@@ -222,8 +220,8 @@ async def acomplete_with_stream(self, prompt: str, **kwargs) -> AsyncIterator[st
222220

223221
try:
224222
async for chunk in self.llm.astream(prompt, **filtered_kwargs):
225-
if hasattr(chunk, 'content') and chunk.content:
223+
if hasattr(chunk, "content") and chunk.content:
226224
yield chunk.content
227225
except Exception as e:
228226
logger.error(f"Error in async LangChain streaming: {e}")
229-
raise
227+
raise

0 commit comments

Comments
 (0)