|
11 | 11 | import requests |
12 | 12 | from dotenv import load_dotenv |
13 | 13 | from flask import Flask, abort, jsonify, request |
14 | | -from opentelemetry import trace |
| 14 | +from opentelemetry import metrics, trace |
| 15 | +from opentelemetry.exporter.otlp.proto.grpc.metric_exporter import OTLPMetricExporter |
15 | 16 | from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import OTLPSpanExporter |
16 | 17 | from opentelemetry.instrumentation.flask import FlaskInstrumentor |
| 18 | +from opentelemetry.sdk.metrics import MeterProvider |
| 19 | +from opentelemetry.sdk.metrics.export import PeriodicExportingMetricReader |
17 | 20 | from opentelemetry.sdk.resources import Resource |
18 | 21 | from opentelemetry.sdk.trace import TracerProvider |
19 | 22 | from opentelemetry.sdk.trace.export import BatchSpanProcessor |
|
23 | 26 | # Load environment variables from .env file |
24 | 27 | load_dotenv() |
25 | 28 |
|
26 | | -# Configure OpenTelemetry |
| 29 | +# Configure OpenTelemetry - Shared Resource (identifies this service) |
27 | 30 | resource = Resource.create({"service.name": "ai-server"}) |
| 31 | + |
| 32 | +# ========== TRACES CONFIGURATION ========== |
| 33 | +# TracerProvider: Factory for creating tracers (for distributed tracing) |
28 | 34 | tracer_provider = TracerProvider(resource=resource) |
29 | 35 |
|
30 | | -# Configure OTLP exporter to send to collector at localhost:4317 |
31 | | -otlp_exporter = OTLPSpanExporter(endpoint="http://localhost:4317", insecure=True) |
32 | | -span_processor = BatchSpanProcessor(otlp_exporter) |
| 36 | +# OTLP Trace Exporter: Sends traces to collector at localhost:4317 |
| 37 | +otlp_trace_exporter = OTLPSpanExporter(endpoint="http://localhost:4317", insecure=True) |
| 38 | +span_processor = BatchSpanProcessor(otlp_trace_exporter) |
33 | 39 | tracer_provider.add_span_processor(span_processor) |
34 | 40 |
|
35 | | -# Set the global tracer provider |
| 41 | +# Set the global tracer provider (FlaskInstrumentor will use this) |
36 | 42 | trace.set_tracer_provider(tracer_provider) |
37 | | - |
38 | 43 | tracer = trace.get_tracer("ai-server.tracer") |
39 | 44 |
|
| 45 | +# ========== METRICS CONFIGURATION ========== |
| 46 | +# OTLP Metric Exporter: Sends metrics to collector at localhost:4317 |
| 47 | +otlp_metric_exporter = OTLPMetricExporter(endpoint="http://localhost:4317", insecure=True) |
| 48 | + |
| 49 | +# PeriodicExportingMetricReader: Collects and exports metrics every 10 seconds |
| 50 | +metric_reader = PeriodicExportingMetricReader( |
| 51 | + exporter=otlp_metric_exporter, export_interval_millis=10000 # Export every 10 seconds |
| 52 | +) |
| 53 | + |
| 54 | +# MeterProvider: Factory for creating meters (for metrics collection) |
| 55 | +meter_provider = MeterProvider(resource=resource, metric_readers=[metric_reader]) |
| 56 | + |
| 57 | +# Set the global meter provider (FlaskInstrumentor will use this for HTTP metrics) |
| 58 | +metrics.set_meter_provider(meter_provider) |
| 59 | + |
40 | 60 | app = Flask('AI server') |
41 | 61 | FlaskInstrumentor().instrument_app(app) |
42 | 62 |
|
@@ -262,7 +282,7 @@ def authenticate() -> str: |
262 | 282 | @app.route('/chat', methods=['POST']) |
263 | 283 | def chat(): |
264 | 284 | """Handle chat request with optional llama_mode and system prompt parameters.""" |
265 | | - authenticate() |
| 285 | + # authenticate() |
266 | 286 | model = request.form.get('model', DEFAULT_MODEL) |
267 | 287 | content = request.form.get('content', '') |
268 | 288 | llama_mode = request.form.get('llama_mode', 'cli') |
|
0 commit comments