|
26 | 26 | # Load environment variables from .env file |
27 | 27 | load_dotenv() |
28 | 28 |
|
| 29 | +# OpenTelemetry endpoint configuration |
| 30 | +OTEL_EXPORTER_OTLP_ENDPOINT = os.getenv('OTEL_EXPORTER_OTLP_ENDPOINT', 'http://localhost:4317') |
| 31 | + |
29 | 32 | # Configure OpenTelemetry - Shared Resource (identifies this service) |
30 | 33 | resource = Resource.create({"service.name": "ai-server"}) |
31 | 34 |
|
32 | 35 | # ========== TRACES CONFIGURATION ========== |
33 | 36 | # TracerProvider: Factory for creating tracers (for distributed tracing) |
34 | 37 | tracer_provider = TracerProvider(resource=resource) |
35 | 38 |
|
36 | | -# OTLP Trace Exporter: Sends traces to collector at localhost:4317 |
37 | | -otlp_trace_exporter = OTLPSpanExporter(endpoint="http://localhost:4317", insecure=True) |
| 39 | +# OTLP Trace Exporter: Sends traces to collector |
| 40 | +otlp_trace_exporter = OTLPSpanExporter(endpoint=OTEL_EXPORTER_OTLP_ENDPOINT, insecure=True) |
38 | 41 | span_processor = BatchSpanProcessor(otlp_trace_exporter) |
39 | 42 | tracer_provider.add_span_processor(span_processor) |
40 | 43 |
|
|
43 | 46 | tracer = trace.get_tracer("ai-server.tracer") |
44 | 47 |
|
45 | 48 | # ========== METRICS CONFIGURATION ========== |
46 | | -# OTLP Metric Exporter: Sends metrics to collector at localhost:4317 |
47 | | -otlp_metric_exporter = OTLPMetricExporter(endpoint="http://localhost:4317", insecure=True) |
| 49 | +# OTLP Metric Exporter: Sends metrics to collector |
| 50 | +otlp_metric_exporter = OTLPMetricExporter(endpoint=OTEL_EXPORTER_OTLP_ENDPOINT, insecure=True) |
48 | 51 |
|
49 | 52 | # PeriodicExportingMetricReader: Collects and exports metrics every 10 seconds |
50 | 53 | metric_reader = PeriodicExportingMetricReader( |
@@ -282,7 +285,7 @@ def authenticate() -> str: |
282 | 285 | @app.route('/chat', methods=['POST']) |
283 | 286 | def chat(): |
284 | 287 | """Handle chat request with optional llama_mode and system prompt parameters.""" |
285 | | - # authenticate() |
| 288 | + authenticate() |
286 | 289 | model = request.form.get('model', DEFAULT_MODEL) |
287 | 290 | content = request.form.get('content', '') |
288 | 291 | llama_mode = request.form.get('llama_mode', 'cli') |
|
0 commit comments