Skip to content

Commit ae8903d

Browse files
crewai docs final
1 parent dfafd02 commit ae8903d

File tree

1 file changed

+20
-23
lines changed

1 file changed

+20
-23
lines changed

integrations/agents/crewai.mdx

Lines changed: 20 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,7 @@ pip install -U crewai portkey-ai
3030
</Step>
3131

3232
<Step title="Generate API Key" icon="lock">
33-
Create a Portkey API key with optional budget/rate limits from the [Portkey dashboard](https://app.portkey.ai/). You can attach configurations for reliability, caching, and more to this key.
33+
Create a Portkey API key with optional budget/rate limits from the [Portkey dashboard](https://app.portkey.ai/). You can also attach configurations for reliability, caching, and more to this key. More on this later.
3434
</Step>
3535

3636
<Step title="Configure CrewAI with Portkey">
@@ -42,20 +42,27 @@ from portkey_ai import createHeaders, PORTKEY_GATEWAY_URL
4242

4343
# Create an LLM instance with Portkey integration
4444
gpt_llm = LLM(
45-
model="gpt-3.5-turbo",
46-
max_tokens=100,
45+
model="gpt-4o",
4746
base_url=PORTKEY_GATEWAY_URL,
4847
api_key="dummy", # We are using a Virtual key, so this is a placeholder
4948
extra_headers=createHeaders(
5049
api_key="YOUR_PORTKEY_API_KEY",
5150
virtual_key="YOUR_LLM_VIRTUAL_KEY",
5251
trace_id="unique-trace-id", # Optional, for request tracing
53-
metadata={ # Optional, for request segmentation
54-
"app_env": "production",
55-
"_user": "user_123" # Special _user field for user analytics
56-
}
5752
)
5853
)
54+
55+
#Use them in your Crew Agents like this:
56+
57+
@agent
58+
def lead_market_analyst(self) -> Agent:
59+
return Agent(
60+
config=self.agents_config['lead_market_analyst'],
61+
verbose=True,
62+
memory=False,
63+
llm=gpt_llm
64+
)
65+
5966
```
6067

6168
<Info>
@@ -82,7 +89,6 @@ Traces provide a hierarchical view of your crew's execution, showing the sequenc
8289
# Add trace_id to enable hierarchical tracing in Portkey
8390
portkey_llm = LLM(
8491
model="gpt-4o",
85-
max_tokens=1000,
8692
base_url=PORTKEY_GATEWAY_URL,
8793
api_key="dummy",
8894
extra_headers=createHeaders(
@@ -134,7 +140,6 @@ Add custom metadata to your CrewAI LLM configuration to enable powerful filterin
134140
```python
135141
portkey_llm = LLM(
136142
model="gpt-4o",
137-
max_tokens=1000,
138143
base_url=PORTKEY_GATEWAY_URL,
139144
api_key="dummy",
140145
extra_headers=createHeaders(
@@ -246,18 +251,19 @@ from portkey_ai import createHeaders, PORTKEY_GATEWAY_URL, Portkey
246251
portkey_admin = Portkey(api_key="YOUR_PORTKEY_API_KEY")
247252

248253
# Retrieve prompt using the render API
249-
prompt_data = portkey_admin.prompts.render(
254+
prompt_data = portkey_client.prompts.render(
250255
prompt_id="YOUR_PROMPT_ID",
251256
variables={
252257
"agent_role": "Senior Research Scientist",
253-
"agent_goal": "Discover groundbreaking insights"
254258
}
255-
).data.dict()
259+
)
260+
261+
backstory_agent_prompt=prompt_data.data.messages[0]["content"]
262+
256263

257264
# Set up LLM with Portkey integration
258265
portkey_llm = LLM(
259266
model="gpt-4o",
260-
max_tokens=1000,
261267
base_url=PORTKEY_GATEWAY_URL,
262268
api_key="dummy",
263269
extra_headers=createHeaders(
@@ -270,7 +276,7 @@ portkey_llm = LLM(
270276
researcher = Agent(
271277
role="Senior Research Scientist",
272278
goal="Discover groundbreaking insights about the assigned topic",
273-
backstory=prompt_data["messages"][0]["content"], # Use the rendered prompt
279+
backstory=backstory_agent, # Use the rendered prompt
274280
verbose=True,
275281
llm=portkey_llm
276282
)
@@ -352,7 +358,6 @@ from portkey_ai import createHeaders, PORTKEY_GATEWAY_URL
352358
# Create LLM with guardrails
353359
portkey_llm = LLM(
354360
model="gpt-4o",
355-
max_tokens=1000,
356361
base_url=PORTKEY_GATEWAY_URL,
357362
api_key="dummy",
358363
extra_headers=createHeaders(
@@ -401,7 +406,6 @@ from portkey_ai import createHeaders, PORTKEY_GATEWAY_URL
401406
# Configure LLM with user tracking
402407
portkey_llm = LLM(
403408
model="gpt-4o",
404-
max_tokens=1000,
405409
base_url=PORTKEY_GATEWAY_URL,
406410
api_key="dummy",
407411
extra_headers=createHeaders(
@@ -457,7 +461,6 @@ from portkey_ai import createHeaders, PORTKEY_GATEWAY_URL
457461
# Configure LLM with simple caching
458462
portkey_llm = LLM(
459463
model="gpt-4o",
460-
max_tokens=1000,
461464
base_url=PORTKEY_GATEWAY_URL,
462465
api_key="dummy",
463466
extra_headers=createHeaders(
@@ -492,7 +495,6 @@ from portkey_ai import createHeaders, PORTKEY_GATEWAY_URL
492495
# Configure LLM with semantic caching
493496
portkey_llm = LLM(
494497
model="gpt-4o",
495-
max_tokens=1000,
496498
base_url=PORTKEY_GATEWAY_URL,
497499
api_key="dummy",
498500
extra_headers=createHeaders(
@@ -814,11 +816,6 @@ Here's a basic configuration to route requests to OpenAI, specifically using GPT
814816
<Card title="CrewAI Docs" icon="book" href="https://docs.crewai.com/">
815817
<p>Official CrewAI documentation</p>
816818
</Card>
817-
818-
<Card title="Portkey Docs" icon="book" href="https://portkey.ai/docs">
819-
<p>Official Portkey documentation</p>
820-
</Card>
821-
822819
<Card title="Book a Demo" icon="calendar" href="https://calendly.com/portkey-ai">
823820
<p>Get personalized guidance on implementing this integration</p>
824821
</Card>

0 commit comments

Comments
 (0)