|
| 1 | +package main |
| 2 | + |
| 3 | +import ( |
| 4 | + "fmt" |
| 5 | + "log" |
| 6 | + |
| 7 | + "github.com/vllm-project/semantic-router/src/semantic-router/pkg/cache" |
| 8 | +) |
| 9 | + |
| 10 | +func main() { |
| 11 | + // Example: Setting up Redis cache backend |
| 12 | + fmt.Println("Redis Cache Backend Example") |
| 13 | + fmt.Println("===========================") |
| 14 | + |
| 15 | + // Configuration for Redis cache |
| 16 | + config := cache.CacheConfig{ |
| 17 | + BackendType: cache.RedisCacheType, |
| 18 | + Enabled: true, |
| 19 | + SimilarityThreshold: 0.85, |
| 20 | + TTLSeconds: 3600, // Entries expire after 1 hour |
| 21 | + BackendConfigPath: "config/semantic-cache/redis.yaml", |
| 22 | + } |
| 23 | + |
| 24 | + // Create cache backend |
| 25 | + fmt.Println("\n1. Creating Redis cache backend...") |
| 26 | + cacheBackend, err := cache.NewCacheBackend(config) |
| 27 | + if err != nil { |
| 28 | + log.Fatalf("Failed to create cache backend: %v", err) |
| 29 | + } |
| 30 | + defer cacheBackend.Close() |
| 31 | + |
| 32 | + fmt.Println("✓ Redis cache backend created successfully") |
| 33 | + |
| 34 | + // Example cache operations |
| 35 | + model := "gpt-4" |
| 36 | + query := "What is the capital of France?" |
| 37 | + requestID := "req-12345" |
| 38 | + requestBody := []byte(`{"model":"gpt-4","messages":[{"role":"user","content":"What is the capital of France?"}]}`) |
| 39 | + responseBody := []byte(`{"choices":[{"message":{"content":"The capital of France is Paris."}}]}`) |
| 40 | + |
| 41 | + // Add entry to cache |
| 42 | + fmt.Println("\n2. Adding entry to cache...") |
| 43 | + err = cacheBackend.AddEntry(requestID, model, query, requestBody, responseBody) |
| 44 | + if err != nil { |
| 45 | + log.Fatalf("Failed to add entry: %v", err) |
| 46 | + } |
| 47 | + fmt.Println("✓ Entry added to cache") |
| 48 | + |
| 49 | + // Search for similar entry |
| 50 | + fmt.Println("\n3. Searching for similar query...") |
| 51 | + similarQuery := "What's the capital city of France?" |
| 52 | + cachedResponse, found, err := cacheBackend.FindSimilar(model, similarQuery) |
| 53 | + if err != nil { |
| 54 | + log.Fatalf("Failed to search cache: %v", err) |
| 55 | + } |
| 56 | + |
| 57 | + if found { |
| 58 | + fmt.Println("✓ Cache HIT! Found similar query") |
| 59 | + fmt.Printf(" Cached response: %s\n", string(cachedResponse)) |
| 60 | + } else { |
| 61 | + fmt.Println("✗ Cache MISS - no similar query found") |
| 62 | + } |
| 63 | + |
| 64 | + // Get cache statistics |
| 65 | + fmt.Println("\n4. Cache Statistics:") |
| 66 | + stats := cacheBackend.GetStats() |
| 67 | + fmt.Printf(" Total Entries: %d\n", stats.TotalEntries) |
| 68 | + fmt.Printf(" Hits: %d\n", stats.HitCount) |
| 69 | + fmt.Printf(" Misses: %d\n", stats.MissCount) |
| 70 | + fmt.Printf(" Hit Ratio: %.2f%%\n", stats.HitRatio*100) |
| 71 | + |
| 72 | + // Example with custom threshold |
| 73 | + fmt.Println("\n5. Searching with custom threshold...") |
| 74 | + strictQuery := "Paris is the capital of which country?" |
| 75 | + cachedResponse, found, err = cacheBackend.FindSimilarWithThreshold(model, strictQuery, 0.75) |
| 76 | + if err != nil { |
| 77 | + log.Fatalf("Failed to search cache: %v", err) |
| 78 | + } |
| 79 | + |
| 80 | + if found { |
| 81 | + fmt.Println("✓ Cache HIT with threshold 0.75") |
| 82 | + fmt.Printf(" Cached response: %s\n", string(cachedResponse)) |
| 83 | + } else { |
| 84 | + fmt.Println("✗ Cache MISS with threshold 0.75") |
| 85 | + } |
| 86 | + |
| 87 | + // Example: Pending request workflow |
| 88 | + fmt.Println("\n6. Pending Request Workflow:") |
| 89 | + newRequestID := "req-67890" |
| 90 | + newQuery := "What is machine learning?" |
| 91 | + newRequestBody := []byte(`{"model":"gpt-4","messages":[{"role":"user","content":"What is machine learning?"}]}`) |
| 92 | + |
| 93 | + fmt.Println(" Adding pending request...") |
| 94 | + err = cacheBackend.AddPendingRequest(newRequestID, model, newQuery, newRequestBody) |
| 95 | + if err != nil { |
| 96 | + log.Fatalf("Failed to add pending request: %v", err) |
| 97 | + } |
| 98 | + fmt.Println(" ✓ Pending request added") |
| 99 | + |
| 100 | + // Simulate getting response from LLM |
| 101 | + newResponseBody := []byte(`{"choices":[{"message":{"content":"Machine learning is a subset of AI..."}}]}`) |
| 102 | + |
| 103 | + fmt.Println(" Updating with response...") |
| 104 | + err = cacheBackend.UpdateWithResponse(newRequestID, newResponseBody) |
| 105 | + if err != nil { |
| 106 | + log.Fatalf("Failed to update response: %v", err) |
| 107 | + } |
| 108 | + fmt.Println(" ✓ Response updated") |
| 109 | + |
| 110 | + // Verify the entry is now cached |
| 111 | + cachedResponse, found, err = cacheBackend.FindSimilar(model, newQuery) |
| 112 | + if err != nil { |
| 113 | + log.Fatalf("Failed to search cache: %v", err) |
| 114 | + } |
| 115 | + |
| 116 | + if found { |
| 117 | + fmt.Println(" ✓ Entry is now in cache and searchable") |
| 118 | + } |
| 119 | + |
| 120 | + // Final statistics |
| 121 | + fmt.Println("\n7. Final Statistics:") |
| 122 | + stats = cacheBackend.GetStats() |
| 123 | + fmt.Printf(" Total Entries: %d\n", stats.TotalEntries) |
| 124 | + fmt.Printf(" Hits: %d\n", stats.HitCount) |
| 125 | + fmt.Printf(" Misses: %d\n", stats.MissCount) |
| 126 | + fmt.Printf(" Hit Ratio: %.2f%%\n", stats.HitRatio*100) |
| 127 | + |
| 128 | + fmt.Println("\n✓ Example completed successfully!") |
| 129 | +} |
0 commit comments