Skip to content

Commit 5ef9036

Browse files
committed
Use environment variables
1 parent 0784f03 commit 5ef9036

File tree

2 files changed

+22
-5
lines changed

2 files changed

+22
-5
lines changed

.env.example

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,14 @@
1+
# Redis Configuration
2+
REDIS_URL=redis://localhost:6379
3+
4+
# Ollama/Llama Server Configuration
5+
LLAMA_SERVER_URL=http://localhost:11434
6+
OLLAMA_HOST=http://localhost:11434 # Used by ollama Python library
7+
DEFAULT_MODEL=deepseek-coder-v2:latest
8+
9+
# OpenTelemetry Configuration
10+
OTEL_EXPORTER_OTLP_ENDPOINT=http://localhost:4317
11+
12+
# Optional: Llama.cpp CLI configuration (for local llama.cpp usage)
13+
# LLAMA_CPP_CLI=/data1/llama.cpp/bin/llama-cli
14+
# GGUF_DIR=/data1/GGUF

ai_server/server.py

Lines changed: 8 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -26,15 +26,18 @@
2626
# Load environment variables from .env file
2727
load_dotenv()
2828

29+
# OpenTelemetry endpoint configuration
30+
OTEL_EXPORTER_OTLP_ENDPOINT = os.getenv('OTEL_EXPORTER_OTLP_ENDPOINT', 'http://localhost:4317')
31+
2932
# Configure OpenTelemetry - Shared Resource (identifies this service)
3033
resource = Resource.create({"service.name": "ai-server"})
3134

3235
# ========== TRACES CONFIGURATION ==========
3336
# TracerProvider: Factory for creating tracers (for distributed tracing)
3437
tracer_provider = TracerProvider(resource=resource)
3538

36-
# OTLP Trace Exporter: Sends traces to collector at localhost:4317
37-
otlp_trace_exporter = OTLPSpanExporter(endpoint="http://localhost:4317", insecure=True)
39+
# OTLP Trace Exporter: Sends traces to collector
40+
otlp_trace_exporter = OTLPSpanExporter(endpoint=OTEL_EXPORTER_OTLP_ENDPOINT, insecure=True)
3841
span_processor = BatchSpanProcessor(otlp_trace_exporter)
3942
tracer_provider.add_span_processor(span_processor)
4043

@@ -43,8 +46,8 @@
4346
tracer = trace.get_tracer("ai-server.tracer")
4447

4548
# ========== METRICS CONFIGURATION ==========
46-
# OTLP Metric Exporter: Sends metrics to collector at localhost:4317
47-
otlp_metric_exporter = OTLPMetricExporter(endpoint="http://localhost:4317", insecure=True)
49+
# OTLP Metric Exporter: Sends metrics to collector
50+
otlp_metric_exporter = OTLPMetricExporter(endpoint=OTEL_EXPORTER_OTLP_ENDPOINT, insecure=True)
4851

4952
# PeriodicExportingMetricReader: Collects and exports metrics every 10 seconds
5053
metric_reader = PeriodicExportingMetricReader(
@@ -282,7 +285,7 @@ def authenticate() -> str:
282285
@app.route('/chat', methods=['POST'])
283286
def chat():
284287
"""Handle chat request with optional llama_mode and system prompt parameters."""
285-
# authenticate()
288+
authenticate()
286289
model = request.form.get('model', DEFAULT_MODEL)
287290
content = request.form.get('content', '')
288291
llama_mode = request.form.get('llama_mode', 'cli')

0 commit comments

Comments
 (0)