@@ -543,6 +543,162 @@ async def get_with_fallback(key):
543543 )
544544```
545545
546+ ## Advanced Caching Patterns * (v0.3.0)*
547+
548+ YokedCache 0.3.0 introduces powerful advanced caching patterns designed for high-performance, production-ready applications.
549+
550+ ### HTTP Response Middleware
551+
552+ Add HTTP caching middleware to FastAPI applications for automatic ETag and Cache-Control header management:
553+
554+ ``` python
555+ from fastapi import FastAPI
556+ from yokedcache import YokedCache
557+ from yokedcache.middleware import HTTPCacheMiddleware
558+
559+ app = FastAPI()
560+ cache = YokedCache()
561+
562+ # Add HTTP caching middleware
563+ app.add_middleware(
564+ HTTPCacheMiddleware,
565+ cache = cache,
566+ default_ttl = 300 ,
567+ include_query = False ,
568+ cache_control = " public, max-age=300"
569+ )
570+
571+ @app.get (" /api/users/{user_id} " )
572+ async def get_user (user_id : int ):
573+ # Response automatically cached with ETag headers
574+ # Returns 304 Not Modified for unchanged data
575+ return {" id" : user_id, " name" : " John Doe" }
576+ ```
577+
578+ ### Single-Flight Protection
579+
580+ Prevent cache stampede by ensuring only one request computes a value while others wait:
581+
582+ ``` python
583+ from yokedcache import YokedCache, CacheConfig
584+
585+ config = CacheConfig(
586+ redis_url = " redis://localhost:6379" ,
587+ enable_single_flight = True
588+ )
589+ cache = YokedCache(config)
590+
591+ async def expensive_computation ():
592+ # This will only run once, even with concurrent requests
593+ await asyncio.sleep(5 )
594+ return compute_expensive_data()
595+
596+ # Multiple concurrent requests - only one computation runs
597+ results = await asyncio.gather(
598+ cache.fetch_or_set(" expensive_key" , expensive_computation, ttl = 300 ),
599+ cache.fetch_or_set(" expensive_key" , expensive_computation, ttl = 300 ),
600+ cache.fetch_or_set(" expensive_key" , expensive_computation, ttl = 300 ),
601+ )
602+ # All results are identical, but computation only ran once
603+ ```
604+
605+ ### Stale-While-Revalidate (SWR)
606+
607+ Serve stale cached data immediately while refreshing in the background:
608+
609+ ``` python
610+ from yokedcache import YokedCache, CacheConfig
611+
612+ config = CacheConfig(
613+ redis_url = " redis://localhost:6379" ,
614+ enable_stale_while_revalidate = True
615+ )
616+ cache = YokedCache(config)
617+
618+ # Function that returns stale data immediately and refreshes in background
619+ async def get_user_data (user_id : int ):
620+ return await cache.fetch_or_set(
621+ f " user: { user_id} " ,
622+ lambda : fetch_user_from_db(user_id),
623+ ttl = 60
624+ )
625+ ```
626+
627+ ### Stale-If-Error
628+
629+ Fallback to cached data when the primary data source fails:
630+
631+ ``` python
632+ from yokedcache import YokedCache, CacheConfig
633+
634+ config = CacheConfig(
635+ redis_url = " redis://localhost:6379" ,
636+ enable_stale_if_error = True ,
637+ stale_if_error_ttl = 120 # Serve stale for up to 2 minutes after TTL expires
638+ )
639+ cache = YokedCache(config)
640+
641+ async def get_data_with_fallback (key : str ):
642+ try :
643+ return await fetch_fresh_data(key)
644+ except Exception :
645+ # Returns stale cached data if available
646+ return await cache.get(key, default = None )
647+ ```
648+
649+ ### Per-Prefix Backend Routing
650+
651+ Route different cache keys to different backends based on key prefixes:
652+
653+ ``` python
654+ from yokedcache import YokedCache
655+ from yokedcache.backends import DiskCacheBackend, RedisBackend
656+
657+ cache = YokedCache()
658+
659+ # Setup prefix-based routing
660+ cache.setup_prefix_routing()
661+
662+ # Route different data types to different backends
663+ cache.add_backend_route(" user:" , RedisBackend(" redis://localhost:6379/0" ))
664+ cache.add_backend_route(" temp:" , DiskCacheBackend(" /tmp/cache" ))
665+ cache.add_backend_route(" session:" , RedisBackend(" redis://localhost:6379/1" ))
666+
667+ # Data automatically routed based on key prefix
668+ await cache.set(" user:123" , user_data) # -> Redis DB 0
669+ await cache.set(" temp:abc" , temp_data) # -> Disk cache
670+ await cache.set(" session:xyz" , session) # -> Redis DB 1
671+ ```
672+
673+ ### OpenTelemetry Distributed Tracing
674+
675+ Enable distributed tracing for cache operations:
676+
677+ ``` python
678+ from yokedcache import YokedCache, CacheConfig
679+ from yokedcache.tracing import initialize_tracing
680+
681+ # Initialize global tracing
682+ initialize_tracing(
683+ service_name = " my-api" ,
684+ enabled = True ,
685+ sample_rate = 1.0
686+ )
687+
688+ config = CacheConfig(
689+ redis_url = " redis://localhost:6379" ,
690+ enable_tracing = True
691+ )
692+ cache = YokedCache(config)
693+
694+ # All cache operations automatically traced
695+ async with cache._tracer.trace_operation(" get_user" , " user:123" ):
696+ user = await cache.get(" user:123" )
697+ # Span includes timing, hit/miss, backend info
698+ ```
699+
700+ These advanced patterns enable sophisticated caching strategies for high-performance, production applications.
701+
546702## Performance Optimization Patterns
547703
548704### Connection Reuse
0 commit comments