Skip to content

Commit 0784f03

Browse files
committed
Add OpenTelemetry pipeline
1 parent f60117e commit 0784f03

File tree

8 files changed

+839
-8
lines changed

8 files changed

+839
-8
lines changed

ai_server/server.py

Lines changed: 28 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -11,9 +11,12 @@
1111
import requests
1212
from dotenv import load_dotenv
1313
from flask import Flask, abort, jsonify, request
14-
from opentelemetry import trace
14+
from opentelemetry import metrics, trace
15+
from opentelemetry.exporter.otlp.proto.grpc.metric_exporter import OTLPMetricExporter
1516
from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import OTLPSpanExporter
1617
from opentelemetry.instrumentation.flask import FlaskInstrumentor
18+
from opentelemetry.sdk.metrics import MeterProvider
19+
from opentelemetry.sdk.metrics.export import PeriodicExportingMetricReader
1720
from opentelemetry.sdk.resources import Resource
1821
from opentelemetry.sdk.trace import TracerProvider
1922
from opentelemetry.sdk.trace.export import BatchSpanProcessor
@@ -23,20 +26,37 @@
2326
# Load environment variables from .env file
2427
load_dotenv()
2528

26-
# Configure OpenTelemetry
29+
# Configure OpenTelemetry - Shared Resource (identifies this service)
2730
resource = Resource.create({"service.name": "ai-server"})
31+
32+
# ========== TRACES CONFIGURATION ==========
33+
# TracerProvider: Factory for creating tracers (for distributed tracing)
2834
tracer_provider = TracerProvider(resource=resource)
2935

30-
# Configure OTLP exporter to send to collector at localhost:4317
31-
otlp_exporter = OTLPSpanExporter(endpoint="http://localhost:4317", insecure=True)
32-
span_processor = BatchSpanProcessor(otlp_exporter)
36+
# OTLP Trace Exporter: Sends traces to collector at localhost:4317
37+
otlp_trace_exporter = OTLPSpanExporter(endpoint="http://localhost:4317", insecure=True)
38+
span_processor = BatchSpanProcessor(otlp_trace_exporter)
3339
tracer_provider.add_span_processor(span_processor)
3440

35-
# Set the global tracer provider
41+
# Set the global tracer provider (FlaskInstrumentor will use this)
3642
trace.set_tracer_provider(tracer_provider)
37-
3843
tracer = trace.get_tracer("ai-server.tracer")
3944

45+
# ========== METRICS CONFIGURATION ==========
46+
# OTLP Metric Exporter: Sends metrics to collector at localhost:4317
47+
otlp_metric_exporter = OTLPMetricExporter(endpoint="http://localhost:4317", insecure=True)
48+
49+
# PeriodicExportingMetricReader: Collects and exports metrics every 10 seconds
50+
metric_reader = PeriodicExportingMetricReader(
51+
exporter=otlp_metric_exporter, export_interval_millis=10000 # Export every 10 seconds
52+
)
53+
54+
# MeterProvider: Factory for creating meters (for metrics collection)
55+
meter_provider = MeterProvider(resource=resource, metric_readers=[metric_reader])
56+
57+
# Set the global meter provider (FlaskInstrumentor will use this for HTTP metrics)
58+
metrics.set_meter_provider(meter_provider)
59+
4060
app = Flask('AI server')
4161
FlaskInstrumentor().instrument_app(app)
4262

@@ -262,7 +282,7 @@ def authenticate() -> str:
262282
@app.route('/chat', methods=['POST'])
263283
def chat():
264284
"""Handle chat request with optional llama_mode and system prompt parameters."""
265-
authenticate()
285+
# authenticate()
266286
model = request.form.get('model', DEFAULT_MODEL)
267287
content = request.form.get('content', '')
268288
llama_mode = request.form.get('llama_mode', 'cli')

0 commit comments

Comments
 (0)