Skip to content

Commit 405878b

Browse files
committed
Upmerge and fix conflicts
2 parents 1911ac4 + 0b8defc commit 405878b

File tree

24 files changed

+1341
-165
lines changed

24 files changed

+1341
-165
lines changed

.github/workflows/release.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -65,7 +65,7 @@ jobs:
6565
hatch build
6666
6767
- name: Publish Python 🐍 distributions 📦 to PyPI
68-
uses: pypa/gh-action-pypi-publish@master
68+
uses: pypa/gh-action-pypi-publish@release/v1
6969
with:
7070
password: ${{ secrets.PYPI_TOKEN }}
7171

README.md

Lines changed: 10 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -238,6 +238,14 @@ By default, prompt and completion data are captured. If you would like to opt ou
238238

239239
`TRACE_PROMPT_COMPLETION_DATA=false`
240240

241+
### Enable/Disable checkpoint tracing for DSPy
242+
243+
By default, checkpoints are traced for DSPy pipelines. If you would like to disable it, set the following env var,
244+
245+
`TRACE_DSPY_CHECKPOINT=false`
246+
247+
Note: Checkpoint tracing will increase the latency of executions as the state is serialized. Please disable it in production.
248+
241249
## Supported integrations
242250

243251
Langtrace automatically captures traces from the following vendors:
@@ -253,8 +261,9 @@ Langtrace automatically captures traces from the following vendors:
253261
| Gemini | LLM | :x: | :white_check_mark: |
254262
| Mistral | LLM | :x: | :white_check_mark: |
255263
| Langchain | Framework | :x: | :white_check_mark: |
256-
| LlamaIndex | Framework | :white_check_mark: | :white_check_mark: |
257264
| Langgraph | Framework | :x: | :white_check_mark: |
265+
| LlamaIndex | Framework | :white_check_mark: | :white_check_mark: |
266+
| LiteLLM | Framework | :x: | :white_check_mark: |
258267
| DSPy | Framework | :x: | :white_check_mark: |
259268
| CrewAI | Framework | :x: | :white_check_mark: |
260269
| Ollama | Framework | :x: | :white_check_mark: |

pyproject.toml

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -31,13 +31,14 @@ dependencies = [
3131
'fsspec>=2024.6.0',
3232
"transformers>=4.11.3",
3333
"sentry-sdk>=2.14.0",
34+
"ujson>=5.10.0",
3435
]
3536

3637
requires-python = ">=3.9"
3738

3839
[project.optional-dependencies]
3940
dev = [
40-
"openai==1.30.1",
41+
"openai==1.45.0",
4142
"anthropic",
4243
"chromadb",
4344
"qdrant-client",
@@ -46,7 +47,7 @@ dev = [
4647
"langchain",
4748
"langchain-community",
4849
"langchain-openai",
49-
"langchain-openai",
50+
"litellm==1.48.7",
5051
"chromadb",
5152
"cohere",
5253
"qdrant_client",
@@ -59,6 +60,7 @@ dev = [
5960
"embedchain",
6061
"psycopg",
6162
"pgvector"
63+
"embedchain"
6264
]
6365

6466
test = ["pytest", "pytest-vcr", "pytest-asyncio"]
Lines changed: 89 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,89 @@
1+
import dspy
2+
from dotenv import find_dotenv, load_dotenv
3+
from dspy.datasets import HotPotQA
4+
from dspy.teleprompt import BootstrapFewShot
5+
6+
from langtrace_python_sdk import inject_additional_attributes, langtrace
7+
8+
_ = load_dotenv(find_dotenv())
9+
10+
langtrace.init()
11+
12+
turbo = dspy.LM('openai/gpt-4o-mini')
13+
colbertv2_wiki17_abstracts = dspy.ColBERTv2(url='http://20.102.90.50:2017/wiki17_abstracts')
14+
15+
dspy.settings.configure(lm=turbo, rm=colbertv2_wiki17_abstracts)
16+
17+
18+
# Load the dataset.
19+
dataset = HotPotQA(train_seed=1, train_size=20, eval_seed=2023, dev_size=50, test_size=0)
20+
21+
# Tell DSPy that the 'question' field is the input. Any other fields are labels and/or metadata.
22+
trainset = [x.with_inputs('question') for x in dataset.train]
23+
devset = [x.with_inputs('question') for x in dataset.dev]
24+
25+
26+
class GenerateAnswer(dspy.Signature):
27+
"""Answer questions with short factoid answers."""
28+
29+
context = dspy.InputField(desc="may contain relevant facts")
30+
question = dspy.InputField()
31+
answer = dspy.OutputField(desc="often between 1 and 5 words")
32+
33+
34+
class RAG(dspy.Module):
35+
def __init__(self, num_passages=3):
36+
super().__init__()
37+
38+
self.retrieve = dspy.Retrieve(k=num_passages)
39+
self.generate_answer = dspy.ChainOfThought(GenerateAnswer)
40+
41+
def forward(self, question):
42+
context = self.retrieve(question).passages
43+
prediction = self.generate_answer(context=context, question=question)
44+
return dspy.Prediction(context=context, answer=prediction.answer)
45+
46+
47+
# Validation logic: check that the predicted answer is correct.
48+
# Also check that the retrieved context does actually contain that answer.
49+
def validate_context_and_answer(example, prediction, trace=None):
50+
answer_em = dspy.evaluate.answer_exact_match(example, prediction)
51+
answer_pm = dspy.evaluate.answer_passage_match(example, prediction)
52+
return answer_em and answer_pm
53+
54+
55+
# Set up a basic optimizer, which will compile our RAG program.
56+
optimizer = BootstrapFewShot(metric=validate_context_and_answer)
57+
58+
# Compile!
59+
compiled_rag = optimizer.compile(RAG(), trainset=trainset)
60+
61+
# Ask any question you like to this simple RAG program.
62+
my_question = "Who was the hero of the movie peraanmai?"
63+
64+
# Get the prediction. This contains `pred.context` and `pred.answer`.
65+
# pred = compiled_rag(my_question)
66+
pred = inject_additional_attributes(lambda: compiled_rag(my_question), {'experiment': 'experiment 6', 'description': 'trying additional stuff', 'run_id': 'run_1'})
67+
# compiled_rag.save('compiled_rag_v1.json')
68+
69+
# Print the contexts and the answer.
70+
print(f"Question: {my_question}")
71+
print(f"Predicted Answer: {pred.answer}")
72+
print(f"Retrieved Contexts (truncated): {[c[:200] + '...' for c in pred.context]}")
73+
74+
# print("Inspecting the history of the optimizer:")
75+
# turbo.inspect_history(n=1)
76+
77+
from dspy.evaluate import Evaluate
78+
79+
80+
def validate_answer(example, pred, trace=None):
81+
return True
82+
83+
84+
# Set up the evaluator, which can be used multiple times.
85+
evaluate = Evaluate(devset=devset, metric=validate_answer, num_threads=4, display_progress=True, display_table=0)
86+
87+
88+
# Evaluate our `optimized_cot` program.
89+
evaluate(compiled_rag)

src/examples/litellm_example/basic.py

Lines changed: 1 addition & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,3 @@
1-
from langtrace_python_sdk import with_langtrace_root_span, langtrace
21
from dotenv import load_dotenv
32
from litellm import completion, acompletion
43
import litellm
@@ -8,11 +7,9 @@
87

98

109
litellm.success_callback = ["langtrace"]
11-
langtrace.init()
1210
litellm.set_verbose = False
1311

1412

15-
@with_langtrace_root_span("Litellm Example OpenAI")
1613
def openAI(streaming=False):
1714
response = completion(
1815
model="gpt-3.5-turbo",
@@ -56,7 +53,6 @@ def anthropic(streaming=False):
5653
print("ERORRRR", e)
5754

5855

59-
# @with_langtrace_root_span("Litellm Example OpenAI Async Streaming")
6056
async def async_anthropic(streaming=False):
6157
response = await acompletion(
6258
model="claude-2.1",
@@ -93,6 +89,6 @@ def cohere(streaming=False):
9389

9490
if __name__ == "__main__":
9591
# openAI()
96-
anthropic(streaming=False)
92+
# anthropic(streaming=False)
9793
cohere(streaming=True)
9894
# asyncio.run(async_anthropic(streaming=True))
Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,10 @@
1+
model_list:
2+
- model_name: "gpt-4" # all requests where model not in your config go to this deployment
3+
litellm_params:
4+
model: openai/gpt-4 # set `openai/` to use the openai route
5+
6+
litellm_settings:
7+
success_callback: ["langtrace"]
8+
9+
environment_variables:
10+
LANGTRACE_API_KEY: "fake-api-key"
Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,16 @@
1+
import openai
2+
from dotenv import load_dotenv
3+
4+
load_dotenv()
5+
6+
client = openai.OpenAI(base_url="http://0.0.0.0:4000")
7+
8+
# request sent to model set on litellm proxy, `litellm --model`
9+
response = client.chat.completions.create(
10+
model="gpt-4",
11+
messages=[
12+
{"role": "user", "content": "this is a test request, write a short poem"}
13+
],
14+
)
15+
16+
print(response)

src/examples/openai_example/chat_completion.py

Lines changed: 19 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -9,19 +9,19 @@
99

1010
_ = load_dotenv(find_dotenv())
1111

12-
langtrace.init(write_spans_to_console=True)
12+
langtrace.init()
1313
client = OpenAI()
1414

1515

1616
def api():
1717
response = client.chat.completions.create(
18-
model="gpt-4",
18+
model="o1-mini",
1919
messages=[
20-
{"role": "system", "content": "Talk like a pirate"},
21-
{"role": "user", "content": "Tell me a story in 3 sentences or less."},
20+
# {"role": "system", "content": "Talk like a pirate"},
21+
{"role": "user", "content": "How many r's are in strawberry?"},
2222
],
23-
stream=True,
24-
# stream=False,
23+
# stream=True,
24+
stream=False,
2525
)
2626
return response
2727

@@ -31,14 +31,17 @@ def chat_completion():
3131
response = api()
3232
# print(response)
3333
# Uncomment this for streaming
34-
result = []
35-
for chunk in response:
36-
if chunk.choices[0].delta.content is not None:
37-
content = [
38-
choice.delta.content if choice.delta and choice.delta.content else ""
39-
for choice in chunk.choices
40-
]
41-
result.append(content[0] if len(content) > 0 else "")
42-
43-
# print("".join(result))
34+
# result = []
35+
# for chunk in response:
36+
# if chunk.choices[0].delta.content is not None:
37+
# content = [
38+
# choice.delta.content if choice.delta and choice.delta.content else ""
39+
# for choice in chunk.choices
40+
# ]
41+
# result.append(content[0] if len(content) > 0 else "")
42+
43+
# # print("".join(result))
44+
print(response)
4445
return response
46+
47+
chat_completion()
Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1 +1 @@
1-
LANGTRACE_REMOTE_URL = "https://langtrace.ai"
1+
LANGTRACE_REMOTE_URL = "https://app.langtrace.ai"

src/langtrace_python_sdk/constants/instrumentation/common.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@
1919
"LANGCHAIN_COMMUNITY": "Langchain Community",
2020
"LANGCHAIN_CORE": "Langchain Core",
2121
"LANGGRAPH": "Langgraph",
22+
"LITELLM": "Litellm",
2223
"LLAMAINDEX": "LlamaIndex",
2324
"OPENAI": "OpenAI",
2425
"PINECONE": "Pinecone",

0 commit comments

Comments
 (0)