|
| 1 | +# Production Configuration Example with OTLP Tracing |
| 2 | +# This configuration enables distributed tracing with OpenTelemetry OTLP exporter |
| 3 | +# for production deployment with Jaeger or other OTLP-compatible backends. |
| 4 | + |
| 5 | +bert_model: |
| 6 | + model_id: sentence-transformers/all-MiniLM-L12-v2 |
| 7 | + threshold: 0.6 |
| 8 | + use_cpu: true |
| 9 | + |
| 10 | +semantic_cache: |
| 11 | + enabled: true |
| 12 | + backend_type: "memory" |
| 13 | + similarity_threshold: 0.8 |
| 14 | + max_entries: 1000 |
| 15 | + ttl_seconds: 3600 |
| 16 | + eviction_policy: "fifo" |
| 17 | + |
| 18 | +tools: |
| 19 | + enabled: true |
| 20 | + top_k: 3 |
| 21 | + similarity_threshold: 0.2 |
| 22 | + tools_db_path: "config/tools_db.json" |
| 23 | + fallback_to_empty: true |
| 24 | + |
| 25 | +prompt_guard: |
| 26 | + enabled: true |
| 27 | + use_modernbert: true |
| 28 | + model_id: "models/jailbreak_classifier_modernbert-base_model" |
| 29 | + threshold: 0.7 |
| 30 | + use_cpu: true |
| 31 | + jailbreak_mapping_path: "models/jailbreak_classifier_modernbert-base_model/jailbreak_type_mapping.json" |
| 32 | + |
| 33 | +vllm_endpoints: |
| 34 | + - name: "endpoint1" |
| 35 | + address: "127.0.0.1" |
| 36 | + port: 8000 |
| 37 | + models: |
| 38 | + - "openai/gpt-oss-20b" |
| 39 | + weight: 1 |
| 40 | + |
| 41 | +model_config: |
| 42 | + "openai/gpt-oss-20b": |
| 43 | + reasoning_family: "gpt-oss" |
| 44 | + preferred_endpoints: ["endpoint1"] |
| 45 | + pii_policy: |
| 46 | + allow_by_default: true |
| 47 | + |
| 48 | +classifier: |
| 49 | + category_model: |
| 50 | + model_id: "models/category_classifier_modernbert-base_model" |
| 51 | + use_modernbert: true |
| 52 | + threshold: 0.6 |
| 53 | + use_cpu: true |
| 54 | + category_mapping_path: "models/category_classifier_modernbert-base_model/category_mapping.json" |
| 55 | + pii_model: |
| 56 | + model_id: "models/pii_classifier_modernbert-base_presidio_token_model" |
| 57 | + use_modernbert: true |
| 58 | + threshold: 0.7 |
| 59 | + use_cpu: true |
| 60 | + pii_mapping_path: "models/pii_classifier_modernbert-base_presidio_token_model/pii_type_mapping.json" |
| 61 | + |
| 62 | +categories: |
| 63 | + - name: math |
| 64 | + system_prompt: "You are a mathematics expert. Provide step-by-step solutions." |
| 65 | + model_scores: |
| 66 | + - model: openai/gpt-oss-20b |
| 67 | + score: 1.0 |
| 68 | + use_reasoning: true |
| 69 | + - name: other |
| 70 | + system_prompt: "You are a helpful assistant." |
| 71 | + model_scores: |
| 72 | + - model: openai/gpt-oss-20b |
| 73 | + score: 0.7 |
| 74 | + use_reasoning: false |
| 75 | + |
| 76 | +default_model: openai/gpt-oss-20b |
| 77 | + |
| 78 | +reasoning_families: |
| 79 | + gpt-oss: |
| 80 | + type: "reasoning_effort" |
| 81 | + parameter: "reasoning_effort" |
| 82 | + |
| 83 | +default_reasoning_effort: high |
| 84 | + |
| 85 | +api: |
| 86 | + batch_classification: |
| 87 | + max_batch_size: 100 |
| 88 | + concurrency_threshold: 5 |
| 89 | + max_concurrency: 8 |
| 90 | + metrics: |
| 91 | + enabled: true |
| 92 | + |
| 93 | +# Observability Configuration - Production with OTLP |
| 94 | +observability: |
| 95 | + tracing: |
| 96 | + # Enable distributed tracing for production monitoring |
| 97 | + enabled: true |
| 98 | + |
| 99 | + # OpenTelemetry provider (standard implementation) |
| 100 | + provider: "opentelemetry" |
| 101 | + |
| 102 | + exporter: |
| 103 | + # OTLP exporter for Jaeger, Tempo, or other OTLP backends |
| 104 | + type: "otlp" |
| 105 | + |
| 106 | + # Jaeger OTLP endpoint (default: 4317 for gRPC) |
| 107 | + # For Jaeger: localhost:4317 |
| 108 | + # For Grafana Tempo: tempo:4317 |
| 109 | + # For Datadog: trace-agent:4317 |
| 110 | + endpoint: "jaeger:4317" |
| 111 | + |
| 112 | + # Use insecure connection (set to false in production with TLS) |
| 113 | + insecure: true |
| 114 | + |
| 115 | + sampling: |
| 116 | + # Probabilistic sampling for production (reduces overhead) |
| 117 | + type: "probabilistic" |
| 118 | + |
| 119 | + # Sample 10% of requests (adjust based on traffic volume) |
| 120 | + # Higher rates (0.5-1.0) for low traffic |
| 121 | + # Lower rates (0.01-0.1) for high traffic |
| 122 | + rate: 0.1 |
| 123 | + |
| 124 | + resource: |
| 125 | + # Service name for trace identification |
| 126 | + service_name: "vllm-semantic-router" |
| 127 | + |
| 128 | + # Version for tracking deployments |
| 129 | + service_version: "v0.1.0" |
| 130 | + |
| 131 | + # Environment identifier |
| 132 | + deployment_environment: "production" |
0 commit comments