Skip to content

Commit 8a2ed43

Browse files
devin-ai-integration[bot]João
andcommitted
fix: Resolve remaining lint issues (W291, W293, B904)
- Remove trailing whitespace from examples/prompt_caching_example.py - Fix exception handling to use 'from e' for proper error chaining - All lint checks now pass locally Co-Authored-By: João <joao@crewai.com>
1 parent 9a0b3e8 commit 8a2ed43

File tree

2 files changed

+44
-44
lines changed

2 files changed

+44
-44
lines changed

examples/prompt_caching_example.py

Lines changed: 41 additions & 41 deletions
Original file line numberDiff line numberDiff line change
@@ -11,60 +11,60 @@
1111

1212
def create_crew_with_caching():
1313
"""Create a crew with prompt caching enabled."""
14-
14+
1515
llm = LLM(
1616
model="anthropic/claude-3-5-sonnet-20240620",
1717
enable_prompt_caching=True,
1818
temperature=0.1
1919
)
20-
20+
2121
analyst = Agent(
2222
role="Data Analyst",
2323
goal="Analyze data and provide insights",
24-
backstory="""You are an experienced data analyst with expertise in
25-
statistical analysis, data visualization, and business intelligence.
26-
You have worked with various industries including finance, healthcare,
27-
and technology. Your approach is methodical and you always provide
24+
backstory="""You are an experienced data analyst with expertise in
25+
statistical analysis, data visualization, and business intelligence.
26+
You have worked with various industries including finance, healthcare,
27+
and technology. Your approach is methodical and you always provide
2828
actionable insights based on data patterns.""",
2929
llm=llm
3030
)
31-
31+
3232
analysis_task = Task(
3333
description="""Analyze the following dataset: {dataset}
34-
34+
3535
Please provide:
3636
1. Summary statistics
3737
2. Key patterns and trends
3838
3. Actionable recommendations
3939
4. Potential risks or concerns
40-
40+
4141
Be thorough in your analysis and provide specific examples.""",
4242
expected_output="A comprehensive analysis report with statistics, trends, and recommendations",
4343
agent=analyst
4444
)
45-
45+
4646
return Crew(agents=[analyst], tasks=[analysis_task])
4747

4848

4949
def example_kickoff_for_each():
5050
"""Example using kickoff_for_each with prompt caching."""
5151
print("Running kickoff_for_each example with prompt caching...")
52-
52+
5353
crew = create_crew_with_caching()
54-
54+
5555
datasets = [
5656
{"dataset": "Q1 2024 sales data showing 15% growth in mobile segment"},
5757
{"dataset": "Q2 2024 customer satisfaction scores with 4.2/5 average rating"},
5858
{"dataset": "Q3 2024 website traffic data with 25% increase in organic search"},
5959
{"dataset": "Q4 2024 employee engagement survey with 78% satisfaction rate"}
6060
]
61-
61+
6262
results = crew.kickoff_for_each(datasets)
63-
63+
6464
for i, result in enumerate(results, 1):
6565
print(f"\n--- Analysis {i} ---")
6666
print(result.raw)
67-
67+
6868
if crew.usage_metrics:
6969
print(f"\nTotal usage metrics:")
7070
print(f"Total tokens: {crew.usage_metrics.total_tokens}")
@@ -75,21 +75,21 @@ def example_kickoff_for_each():
7575
async def example_kickoff_for_each_async():
7676
"""Example using kickoff_for_each_async with prompt caching."""
7777
print("Running kickoff_for_each_async example with prompt caching...")
78-
78+
7979
crew = create_crew_with_caching()
80-
80+
8181
datasets = [
8282
{"dataset": "Marketing campaign A: 12% CTR, 3.5% conversion rate"},
8383
{"dataset": "Marketing campaign B: 8% CTR, 4.1% conversion rate"},
8484
{"dataset": "Marketing campaign C: 15% CTR, 2.8% conversion rate"}
8585
]
86-
86+
8787
results = await crew.kickoff_for_each_async(datasets)
88-
88+
8989
for i, result in enumerate(results, 1):
9090
print(f"\n--- Async Analysis {i} ---")
9191
print(result.raw)
92-
92+
9393
if crew.usage_metrics:
9494
print(f"\nTotal async usage metrics:")
9595
print(f"Total tokens: {crew.usage_metrics.total_tokens}")
@@ -98,35 +98,35 @@ async def example_kickoff_for_each_async():
9898
def example_bedrock_caching():
9999
"""Example using AWS Bedrock with prompt caching."""
100100
print("Running Bedrock example with prompt caching...")
101-
101+
102102
llm = LLM(
103103
model="bedrock/anthropic.claude-3-5-sonnet-20240620-v1:0",
104104
enable_prompt_caching=True
105105
)
106-
106+
107107
agent = Agent(
108108
role="Legal Analyst",
109109
goal="Review legal documents and identify key clauses",
110110
backstory="Expert legal analyst with 10+ years experience in contract review",
111111
llm=llm
112112
)
113-
113+
114114
task = Task(
115115
description="Review this contract section: {contract_section}",
116116
expected_output="Summary of key legal points and potential issues",
117117
agent=agent
118118
)
119-
119+
120120
crew = Crew(agents=[agent], tasks=[task])
121-
121+
122122
contract_sections = [
123123
{"contract_section": "Section 1: Payment terms and conditions"},
124124
{"contract_section": "Section 2: Intellectual property rights"},
125125
{"contract_section": "Section 3: Termination clauses"}
126126
]
127-
127+
128128
results = crew.kickoff_for_each(contract_sections)
129-
129+
130130
for i, result in enumerate(results, 1):
131131
print(f"\n--- Legal Review {i} ---")
132132
print(result.raw)
@@ -135,53 +135,53 @@ def example_bedrock_caching():
135135
def example_openai_caching():
136136
"""Example using OpenAI with prompt caching."""
137137
print("Running OpenAI example with prompt caching...")
138-
138+
139139
llm = LLM(
140140
model="gpt-4o",
141141
enable_prompt_caching=True
142142
)
143-
143+
144144
agent = Agent(
145145
role="Content Writer",
146146
goal="Create engaging content for different audiences",
147147
backstory="Professional content writer with expertise in various writing styles and formats",
148148
llm=llm
149149
)
150-
150+
151151
task = Task(
152152
description="Write a {content_type} about: {topic}",
153153
expected_output="Well-structured and engaging content piece",
154154
agent=agent
155155
)
156-
156+
157157
crew = Crew(agents=[agent], tasks=[task])
158-
158+
159159
content_requests = [
160160
{"content_type": "blog post", "topic": "benefits of renewable energy"},
161161
{"content_type": "social media post", "topic": "importance of cybersecurity"},
162162
{"content_type": "newsletter", "topic": "latest AI developments"}
163163
]
164-
164+
165165
results = crew.kickoff_for_each(content_requests)
166-
166+
167167
for i, result in enumerate(results, 1):
168168
print(f"\n--- Content Piece {i} ---")
169169
print(result.raw)
170170

171171

172172
if __name__ == "__main__":
173173
print("=== CrewAI Prompt Caching Examples ===\n")
174-
174+
175175
example_kickoff_for_each()
176-
176+
177177
print("\n" + "="*50 + "\n")
178-
178+
179179
asyncio.run(example_kickoff_for_each_async())
180-
180+
181181
print("\n" + "="*50 + "\n")
182-
182+
183183
example_bedrock_caching()
184-
184+
185185
print("\n" + "="*50 + "\n")
186-
186+
187187
example_openai_caching()

src/crewai/llm.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -740,7 +740,7 @@ def _handle_streaming_response(
740740
# Catch context window errors from litellm and convert them to our own exception type.
741741
# This exception is handled by CrewAgentExecutor._invoke_loop() which can then
742742
# decide whether to summarize the content or abort based on the respect_context_window flag.
743-
raise LLMContextLengthExceededException(str(e))
743+
raise LLMContextLengthExceededException(str(e)) from e
744744
except Exception as e:
745745
logging.error(f"Error in streaming response: {e!s}")
746746
if full_response.strip():
@@ -762,7 +762,7 @@ def _handle_streaming_response(
762762
error=str(e), from_task=from_task, from_agent=from_agent
763763
),
764764
)
765-
raise Exception(f"Failed to get streaming response: {e!s}")
765+
raise Exception(f"Failed to get streaming response: {e!s}") from e
766766

767767
def _handle_streaming_tool_calls(
768768
self,
@@ -891,7 +891,7 @@ def _handle_non_streaming_response(
891891
except ContextWindowExceededError as e:
892892
# Convert litellm's context window error to our own exception type
893893
# for consistent handling in the rest of the codebase
894-
raise LLMContextLengthExceededException(str(e))
894+
raise LLMContextLengthExceededException(str(e)) from e
895895
# --- 2) Extract response message and content
896896
response_message = cast(Choices, cast(ModelResponse, response).choices)[
897897
0

0 commit comments

Comments
 (0)