Skip to content

Commit ad36f37

Browse files
committed
add graphlit instrumentation
1 parent 6ebc681 commit ad36f37

File tree

10 files changed

+289
-1
lines changed

10 files changed

+289
-1
lines changed

src/.DS_Store

6 KB
Binary file not shown.
Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,9 @@
1+
import asyncio
2+
from examples.graphlit_example.conversation import complete
3+
from langtrace_python_sdk import with_langtrace_root_span
4+
5+
6+
class GraphlitRunner:
7+
@with_langtrace_root_span("GraphlitRun")
8+
def run(self):
9+
asyncio.run(complete())
Lines changed: 87 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,87 @@
1+
import asyncio
2+
from dotenv import find_dotenv, load_dotenv
3+
from langtrace_python_sdk import langtrace
4+
from langtrace_python_sdk.utils.with_root_span import with_langtrace_root_span
5+
from openai import OpenAI
6+
from graphlit import Graphlit
7+
from graphlit_api import input_types, enums, exceptions
8+
9+
_ = load_dotenv(find_dotenv())
10+
11+
langtrace.init()
12+
13+
14+
graphlit = Graphlit()
15+
16+
langtrace.init()
17+
client = OpenAI()
18+
19+
20+
async def complete():
21+
uri = "https://themixchief.com"
22+
try:
23+
ingest_response = await graphlit.client.ingest_uri(uri=uri, is_synchronous=True)
24+
content_id = ingest_response.ingest_uri.id if ingest_response.ingest_uri is not None else None
25+
26+
if content_id is not None:
27+
print(f'Ingested content [{content_id}]:')
28+
29+
prompt = "In 1 sentence, what does mixchief do."
30+
31+
model = "gpt-4o"
32+
33+
specification_input = input_types.SpecificationInput(
34+
name=f"OpenAI [{str(enums.OpenAIModels.GPT4O_128K)}]",
35+
type=enums.SpecificationTypes.COMPLETION,
36+
serviceType=enums.ModelServiceTypes.OPEN_AI,
37+
openAI=input_types.OpenAIModelPropertiesInput(
38+
model=enums.OpenAIModels.GPT4O_128K,
39+
)
40+
)
41+
42+
specification_response = await graphlit.client.create_specification(specification_input)
43+
specification_id = specification_response.create_specification.id if specification_response.create_specification is not None else None
44+
45+
if specification_id is not None:
46+
print(f'Created specification [{specification_id}].')
47+
48+
conversation_input = input_types.ConversationInput(
49+
name="Conversation",
50+
specification=input_types.EntityReferenceInput(
51+
id=specification_id
52+
),
53+
)
54+
55+
conversation_response = await graphlit.client.create_conversation(conversation_input)
56+
conversation_id = conversation_response.create_conversation.id if conversation_response.create_conversation is not None else None
57+
58+
if conversation_id is not None:
59+
print(f'Created conversation [{conversation_id}].')
60+
61+
format_response = await graphlit.client.format_conversation(prompt, conversation_id)
62+
formatted_message = format_response.format_conversation.message.message if format_response.format_conversation is not None and format_response.format_conversation.message is not None else None
63+
64+
if formatted_message is not None:
65+
stream_response = client.chat.completions.create(
66+
model=model,
67+
messages=[{"role": "user", "content": formatted_message}],
68+
temperature=0.1,
69+
top_p=0.2,
70+
stream=True
71+
)
72+
73+
completion = ""
74+
75+
for chunk in stream_response:
76+
delta = chunk.choices[0].delta.content
77+
78+
if delta is not None:
79+
completion += delta
80+
81+
if completion is not None:
82+
# NOTE: stores completion back into conversation
83+
complete_response = await graphlit.client.complete_conversation(completion, conversation_id)
84+
85+
print(complete_response.complete_conversation.message.message if complete_response.complete_conversation is not None and complete_response.complete_conversation.message is not None else "None")
86+
except exceptions.GraphQLClientError as e:
87+
print(f"Graphlit API error: {e}")

src/langtrace_python_sdk/constants/instrumentation/common.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -40,6 +40,7 @@
4040
"AWS_BEDROCK": "AWS Bedrock",
4141
"CEREBRAS": "Cerebras",
4242
"MILVUS": "Milvus",
43+
"GRAPHLIT": "Graphlit",
4344
}
4445

4546
LANGTRACE_ADDITIONAL_SPAN_ATTRIBUTES_KEY = "langtrace_additional_attributes"

src/langtrace_python_sdk/instrumentation/__init__.py

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,10 @@
2626
from .qdrant import QdrantInstrumentation
2727
from .vertexai import VertexAIInstrumentation
2828
from .weaviate import WeaviateInstrumentation
29+
from .cerebras import CerebrasInstrumentation
30+
from .milvus import MilvusInstrumentation
31+
from .google_genai import GoogleGenaiInstrumentation
32+
from .graphlit import GraphlitInstrumentation
2933

3034
__all__ = [
3135
"AnthropicInstrumentation",
@@ -56,4 +60,5 @@
5660
"MilvusInstrumentation",
5761
"GoogleGenaiInstrumentation",
5862
"CrewaiToolsInstrumentation",
63+
"GraphlitInstrumentation",
5964
]
Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,3 @@
1+
from .instrumentation import GraphlitInstrumentation
2+
3+
__all__ = ["GraphlitInstrumentation"]
Lines changed: 57 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,57 @@
1+
from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
2+
from opentelemetry.trace import get_tracer
3+
from wrapt import wrap_function_wrapper as _W
4+
from typing import Collection
5+
from importlib_metadata import version as v
6+
from .patch import patch_graphlit_operation
7+
8+
class GraphlitInstrumentation(BaseInstrumentor):
9+
10+
def instrumentation_dependencies(self) -> Collection[str]:
11+
return ["graphlit-client >= 1.0.0"]
12+
13+
def _instrument(self, **kwargs):
14+
tracer_provider = kwargs.get("tracer_provider")
15+
tracer = get_tracer(__name__, "", tracer_provider)
16+
version = v("graphlit-client")
17+
try:
18+
_W(
19+
"graphlit.graphlit",
20+
"Client.ingest_uri",
21+
patch_graphlit_operation("ingest_uri", version, tracer),
22+
)
23+
_W(
24+
"graphlit.graphlit",
25+
"Client.create_feed",
26+
patch_graphlit_operation("create_feed", version, tracer),
27+
)
28+
_W(
29+
"graphlit.graphlit",
30+
"Client.create_specification",
31+
patch_graphlit_operation("create_specification", version, tracer),
32+
)
33+
_W(
34+
"graphlit.graphlit",
35+
"Client.create_conversation",
36+
patch_graphlit_operation("create_conversation", version, tracer),
37+
)
38+
_W(
39+
"graphlit.graphlit",
40+
"Client.format_conversation",
41+
patch_graphlit_operation("format_conversation", version, tracer),
42+
)
43+
_W(
44+
"graphlit.graphlit",
45+
"Client.complete_conversation",
46+
patch_graphlit_operation("complete_conversation", version, tracer),
47+
)
48+
_W(
49+
"graphlit.graphlit",
50+
"Client.prompt_conversation",
51+
patch_graphlit_operation("prompt_conversation", version, tracer),
52+
)
53+
except Exception:
54+
pass
55+
56+
def _uninstrument(self, **kwargs):
57+
pass
Lines changed: 69 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,69 @@
1+
import json
2+
from importlib_metadata import version as v
3+
from langtrace.trace_attributes import FrameworkSpanAttributes
4+
from opentelemetry import baggage
5+
from opentelemetry.trace import Span, SpanKind, Tracer
6+
from opentelemetry.trace.status import Status, StatusCode
7+
8+
from langtrace_python_sdk.constants import LANGTRACE_SDK_NAME
9+
from langtrace_python_sdk.constants.instrumentation.common import (
10+
LANGTRACE_ADDITIONAL_SPAN_ATTRIBUTES_KEY,
11+
SERVICE_PROVIDERS,
12+
)
13+
from langtrace_python_sdk.utils.llm import set_span_attributes
14+
from langtrace_python_sdk.utils.misc import serialize_args, serialize_kwargs
15+
16+
17+
def patch_graphlit_operation(operation_name, version, tracer: Tracer):
18+
async def traced_method(wrapped, instance, args, kwargs):
19+
service_provider = SERVICE_PROVIDERS["GRAPHLIT"]
20+
extra_attributes = baggage.get_baggage(LANGTRACE_ADDITIONAL_SPAN_ATTRIBUTES_KEY)
21+
span_attributes = {
22+
"langtrace.sdk.name": "langtrace-python-sdk",
23+
"langtrace.service.name": service_provider,
24+
"langtrace.service.type": "framework",
25+
"langtrace.service.version": version,
26+
"langtrace.version": v(LANGTRACE_SDK_NAME),
27+
**(extra_attributes if extra_attributes is not None else {}),
28+
}
29+
30+
span_attributes["langchain.metadata"] = serialize_kwargs(**kwargs)
31+
span_attributes["langchain.inputs"] = serialize_args(*args)
32+
33+
attributes = FrameworkSpanAttributes(**span_attributes)
34+
35+
with tracer.start_as_current_span(
36+
name=f"graphlit.{operation_name}", kind=SpanKind.CLIENT
37+
) as span:
38+
try:
39+
set_span_attributes(span, attributes)
40+
result = await wrapped(*args, **kwargs)
41+
42+
if result:
43+
operation_result = json.loads(result.model_dump_json())[operation_name]
44+
if operation_name == "complete_conversation" or operation_name == "prompt_conversation" or operation_name == "format_conversation":
45+
set_graphlit_conversation_attributes(span, operation_result)
46+
else:
47+
for key, value in operation_result.items():
48+
span.set_attribute(f"graphlit.{operation_name}.{key}", str(value))
49+
50+
span.set_status(Status(StatusCode.OK))
51+
52+
return result
53+
54+
except Exception as err:
55+
span.record_exception(err)
56+
span.set_status(Status(StatusCode.ERROR, str(err)))
57+
raise
58+
59+
return traced_method
60+
61+
62+
def set_graphlit_conversation_attributes(span: Span, response):
63+
if not response or "message" not in response:
64+
return
65+
66+
span.set_attribute(f"graphlit.conversation.id", response['conversation']['id'])
67+
68+
for key, value in response['message'].items():
69+
span.set_attribute(f"graphlit.conversation.{key}", str(value))

src/langtrace_python_sdk/langtrace.py

Lines changed: 50 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -29,9 +29,58 @@
2929
from opentelemetry.instrumentation.sqlalchemy import SQLAlchemyInstrumentor
3030
from opentelemetry.sdk.resources import SERVICE_NAME, Resource
3131
from opentelemetry.sdk.trace import TracerProvider
32+
<<<<<<< HEAD
3233
from opentelemetry.sdk.trace.export import (BatchSpanProcessor,
3334
ConsoleSpanExporter,
3435
SimpleSpanProcessor)
36+
=======
37+
from opentelemetry.sdk.trace.export import (
38+
BatchSpanProcessor,
39+
ConsoleSpanExporter,
40+
SimpleSpanProcessor,
41+
)
42+
43+
from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import (
44+
OTLPSpanExporter as GRPCExporter,
45+
)
46+
from opentelemetry.exporter.otlp.proto.http.trace_exporter import (
47+
OTLPSpanExporter as HTTPExporter,
48+
)
49+
from langtrace_python_sdk.constants.exporter.langtrace_exporter import (
50+
LANGTRACE_REMOTE_URL,
51+
LANGTRACE_SESSION_ID_HEADER,
52+
)
53+
from langtrace_python_sdk.instrumentation import (
54+
AnthropicInstrumentation,
55+
ChromaInstrumentation,
56+
CohereInstrumentation,
57+
CrewAIInstrumentation,
58+
DspyInstrumentation,
59+
EmbedchainInstrumentation,
60+
GeminiInstrumentation,
61+
GroqInstrumentation,
62+
LangchainCommunityInstrumentation,
63+
LangchainCoreInstrumentation,
64+
LangchainInstrumentation,
65+
LanggraphInstrumentation,
66+
LiteLLMInstrumentation,
67+
LlamaindexInstrumentation,
68+
MistralInstrumentation,
69+
AWSBedrockInstrumentation,
70+
OllamaInstrumentor,
71+
OpenAIInstrumentation,
72+
PineconeInstrumentation,
73+
QdrantInstrumentation,
74+
AutogenInstrumentation,
75+
VertexAIInstrumentation,
76+
WeaviateInstrumentation,
77+
PyMongoInstrumentation,
78+
CerebrasInstrumentation,
79+
MilvusInstrumentation,
80+
GoogleGenaiInstrumentation,
81+
GraphlitInstrumentation,
82+
)
83+
>>>>>>> 0f7e0f0 (add graphlit instrumentation)
3584
from opentelemetry.util.re import parse_env_headers
3685
from sentry_sdk.types import Event, Hint
3786

@@ -279,6 +328,7 @@ def init(
279328
"google-cloud-aiplatform": VertexAIInstrumentation(),
280329
"google-generativeai": GeminiInstrumentation(),
281330
"google-genai": GoogleGenaiInstrumentation(),
331+
"graphlit-client": GraphlitInstrumentation(),
282332
"mistralai": MistralInstrumentation(),
283333
"boto3": AWSBedrockInstrumentation(),
284334
"autogen": AutogenInstrumentation(),

src/run_example.py

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -20,9 +20,10 @@
2020
"vertexai": False,
2121
"gemini": False,
2222
"mistral": False,
23-
"awsbedrock": True,
23+
"awsbedrock": False,
2424
"cerebras": False,
2525
"google_genai": False,
26+
"graphlit": True,
2627
}
2728

2829
if ENABLED_EXAMPLES["anthropic"]:
@@ -144,3 +145,9 @@
144145

145146
print(Fore.BLUE + "Running Google Genai example" + Fore.RESET)
146147
GoogleGenaiRunner().run()
148+
149+
if ENABLED_EXAMPLES["graphlit"]:
150+
from examples.graphlit_example import GraphlitRunner
151+
152+
print(Fore.BLUE + "Running Graphlit example" + Fore.RESET)
153+
GraphlitRunner().run()

0 commit comments

Comments
 (0)