|
1 | 1 | use crate::SieveCache; |
2 | 2 | use std::borrow::Borrow; |
3 | 3 | use std::collections::hash_map::DefaultHasher; |
| 4 | +use std::fmt; |
4 | 5 | use std::hash::{Hash, Hasher}; |
5 | 6 | use std::sync::{Arc, Mutex, MutexGuard, PoisonError}; |
6 | 7 |
|
@@ -60,7 +61,114 @@ unsafe impl<K, V> Sync for ShardedSieveCache<K, V> |
60 | 61 | where |
61 | 62 | K: Eq + Hash + Clone + Send + Sync, |
62 | 63 | V: Send + Sync, |
63 | | -{} |
| 64 | +{ |
| 65 | +} |
| 66 | + |
| 67 | +impl<K, V> Default for ShardedSieveCache<K, V> |
| 68 | +where |
| 69 | + K: Eq + Hash + Clone + Send + Sync, |
| 70 | + V: Send + Sync, |
| 71 | +{ |
| 72 | + /// Creates a new sharded cache with a default capacity of 100 entries and default number of shards. |
| 73 | + /// |
| 74 | + /// # Panics |
| 75 | + /// |
| 76 | + /// Panics if the underlying `ShardedSieveCache::new()` returns an error, which should never |
| 77 | + /// happen for a non-zero capacity. |
| 78 | + /// |
| 79 | + /// # Examples |
| 80 | + /// |
| 81 | + /// ``` |
| 82 | + /// # use sieve_cache::ShardedSieveCache; |
| 83 | + /// # use std::default::Default; |
| 84 | + /// let cache: ShardedSieveCache<String, u32> = Default::default(); |
| 85 | + /// assert!(cache.capacity() >= 100); // Due to shard distribution, might be slightly larger |
| 86 | + /// assert_eq!(cache.num_shards(), 16); // Default shard count |
| 87 | + /// ``` |
| 88 | + fn default() -> Self { |
| 89 | + Self::new(100).expect("Failed to create cache with default capacity") |
| 90 | + } |
| 91 | +} |
| 92 | + |
| 93 | +impl<K, V> fmt::Debug for ShardedSieveCache<K, V> |
| 94 | +where |
| 95 | + K: Eq + Hash + Clone + Send + Sync + fmt::Debug, |
| 96 | + V: Send + Sync + fmt::Debug, |
| 97 | +{ |
| 98 | + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { |
| 99 | + f.debug_struct("ShardedSieveCache") |
| 100 | + .field("capacity", &self.capacity()) |
| 101 | + .field("len", &self.len()) |
| 102 | + .field("num_shards", &self.num_shards) |
| 103 | + .finish() |
| 104 | + } |
| 105 | +} |
| 106 | + |
| 107 | +impl<K, V> IntoIterator for ShardedSieveCache<K, V> |
| 108 | +where |
| 109 | + K: Eq + Hash + Clone + Send + Sync, |
| 110 | + V: Clone + Send + Sync, |
| 111 | +{ |
| 112 | + type Item = (K, V); |
| 113 | + type IntoIter = std::vec::IntoIter<(K, V)>; |
| 114 | + |
| 115 | + /// Converts the cache into an iterator over its key-value pairs. |
| 116 | + /// |
| 117 | + /// This collects all entries into a Vec and returns an iterator over that Vec. |
| 118 | + /// |
| 119 | + /// # Examples |
| 120 | + /// |
| 121 | + /// ``` |
| 122 | + /// # use sieve_cache::ShardedSieveCache; |
| 123 | + /// # use std::collections::HashMap; |
| 124 | + /// let cache = ShardedSieveCache::new(100).unwrap(); |
| 125 | + /// cache.insert("key1".to_string(), "value1".to_string()); |
| 126 | + /// cache.insert("key2".to_string(), "value2".to_string()); |
| 127 | + /// |
| 128 | + /// // Collect into a HashMap |
| 129 | + /// let map: HashMap<_, _> = cache.into_iter().collect(); |
| 130 | + /// assert_eq!(map.len(), 2); |
| 131 | + /// assert_eq!(map.get("key1"), Some(&"value1".to_string())); |
| 132 | + /// ``` |
| 133 | + fn into_iter(self) -> Self::IntoIter { |
| 134 | + self.entries().into_iter() |
| 135 | + } |
| 136 | +} |
| 137 | + |
| 138 | +#[cfg(feature = "sync")] |
| 139 | +impl<K, V> From<crate::SyncSieveCache<K, V>> for ShardedSieveCache<K, V> |
| 140 | +where |
| 141 | + K: Eq + Hash + Clone + Send + Sync, |
| 142 | + V: Clone + Send + Sync, |
| 143 | +{ |
| 144 | + /// Creates a new sharded cache from an existing `SyncSieveCache`. |
| 145 | + /// |
| 146 | + /// This allows for upgrading a standard thread-safe cache to a more scalable sharded version. |
| 147 | + /// |
| 148 | + /// # Examples |
| 149 | + /// |
| 150 | + /// ``` |
| 151 | + /// # use sieve_cache::{SyncSieveCache, ShardedSieveCache}; |
| 152 | + /// let sync_cache = SyncSieveCache::new(100).unwrap(); |
| 153 | + /// sync_cache.insert("key".to_string(), "value".to_string()); |
| 154 | + /// |
| 155 | + /// // Convert to sharded version with default sharding |
| 156 | + /// let sharded_cache = ShardedSieveCache::from(sync_cache); |
| 157 | + /// assert_eq!(sharded_cache.get(&"key".to_string()), Some("value".to_string())); |
| 158 | + /// ``` |
| 159 | + fn from(sync_cache: crate::SyncSieveCache<K, V>) -> Self { |
| 160 | + // Create a new sharded cache with the same capacity |
| 161 | + let capacity = sync_cache.capacity(); |
| 162 | + let sharded = Self::new(capacity).expect("Failed to create sharded cache"); |
| 163 | + |
| 164 | + // Transfer all entries |
| 165 | + for (key, value) in sync_cache.entries() { |
| 166 | + sharded.insert(key, value); |
| 167 | + } |
| 168 | + |
| 169 | + sharded |
| 170 | + } |
| 171 | +} |
64 | 172 |
|
65 | 173 | impl<K, V> ShardedSieveCache<K, V> |
66 | 174 | where |
@@ -605,7 +713,7 @@ where |
605 | 713 | { |
606 | 714 | for shard in &self.shards { |
607 | 715 | let mut guard = shard.lock().unwrap_or_else(PoisonError::into_inner); |
608 | | - guard.iter_mut().for_each(|entry| f(entry)); |
| 716 | + guard.iter_mut().for_each(&mut f); |
609 | 717 | } |
610 | 718 | } |
611 | 719 |
|
|
0 commit comments