|
11 | 11 | /*
|
12 | 12 | * Mbcache is a simple key-value store. Keys need not be unique, however
|
13 | 13 | * key-value pairs are expected to be unique (we use this fact in
|
14 |
| - * mb_cache_entry_delete()). |
| 14 | + * mb_cache_entry_delete_or_get()). |
15 | 15 | *
|
16 | 16 | * Ext2 and ext4 use this cache for deduplication of extended attribute blocks.
|
17 | 17 | * Ext4 also uses it for deduplication of xattr values stored in inodes.
|
@@ -125,6 +125,19 @@ void __mb_cache_entry_free(struct mb_cache_entry *entry)
|
125 | 125 | }
|
126 | 126 | EXPORT_SYMBOL(__mb_cache_entry_free);
|
127 | 127 |
|
| 128 | +/* |
| 129 | + * mb_cache_entry_wait_unused - wait to be the last user of the entry |
| 130 | + * |
| 131 | + * @entry - entry to work on |
| 132 | + * |
| 133 | + * Wait to be the last user of the entry. |
| 134 | + */ |
| 135 | +void mb_cache_entry_wait_unused(struct mb_cache_entry *entry) |
| 136 | +{ |
| 137 | + wait_var_event(&entry->e_refcnt, atomic_read(&entry->e_refcnt) <= 3); |
| 138 | +} |
| 139 | +EXPORT_SYMBOL(mb_cache_entry_wait_unused); |
| 140 | + |
128 | 141 | static struct mb_cache_entry *__entry_find(struct mb_cache *cache,
|
129 | 142 | struct mb_cache_entry *entry,
|
130 | 143 | u32 key)
|
@@ -217,7 +230,7 @@ struct mb_cache_entry *mb_cache_entry_get(struct mb_cache *cache, u32 key,
|
217 | 230 | }
|
218 | 231 | EXPORT_SYMBOL(mb_cache_entry_get);
|
219 | 232 |
|
220 |
| -/* mb_cache_entry_delete - remove a cache entry |
| 233 | +/* mb_cache_entry_delete - try to remove a cache entry |
221 | 234 | * @cache - cache we work with
|
222 | 235 | * @key - key
|
223 | 236 | * @value - value
|
@@ -254,6 +267,55 @@ void mb_cache_entry_delete(struct mb_cache *cache, u32 key, u64 value)
|
254 | 267 | }
|
255 | 268 | EXPORT_SYMBOL(mb_cache_entry_delete);
|
256 | 269 |
|
| 270 | +/* mb_cache_entry_delete_or_get - remove a cache entry if it has no users |
| 271 | + * @cache - cache we work with |
| 272 | + * @key - key |
| 273 | + * @value - value |
| 274 | + * |
| 275 | + * Remove entry from cache @cache with key @key and value @value. The removal |
| 276 | + * happens only if the entry is unused. The function returns NULL in case the |
| 277 | + * entry was successfully removed or there's no entry in cache. Otherwise the |
| 278 | + * function grabs reference of the entry that we failed to delete because it |
| 279 | + * still has users and return it. |
| 280 | + */ |
| 281 | +struct mb_cache_entry *mb_cache_entry_delete_or_get(struct mb_cache *cache, |
| 282 | + u32 key, u64 value) |
| 283 | +{ |
| 284 | + struct hlist_bl_node *node; |
| 285 | + struct hlist_bl_head *head; |
| 286 | + struct mb_cache_entry *entry; |
| 287 | + |
| 288 | + head = mb_cache_entry_head(cache, key); |
| 289 | + hlist_bl_lock(head); |
| 290 | + hlist_bl_for_each_entry(entry, node, head, e_hash_list) { |
| 291 | + if (entry->e_key == key && entry->e_value == value) { |
| 292 | + if (atomic_read(&entry->e_refcnt) > 2) { |
| 293 | + atomic_inc(&entry->e_refcnt); |
| 294 | + hlist_bl_unlock(head); |
| 295 | + return entry; |
| 296 | + } |
| 297 | + /* We keep hash list reference to keep entry alive */ |
| 298 | + hlist_bl_del_init(&entry->e_hash_list); |
| 299 | + hlist_bl_unlock(head); |
| 300 | + spin_lock(&cache->c_list_lock); |
| 301 | + if (!list_empty(&entry->e_list)) { |
| 302 | + list_del_init(&entry->e_list); |
| 303 | + if (!WARN_ONCE(cache->c_entry_count == 0, |
| 304 | + "mbcache: attempt to decrement c_entry_count past zero")) |
| 305 | + cache->c_entry_count--; |
| 306 | + atomic_dec(&entry->e_refcnt); |
| 307 | + } |
| 308 | + spin_unlock(&cache->c_list_lock); |
| 309 | + mb_cache_entry_put(cache, entry); |
| 310 | + return NULL; |
| 311 | + } |
| 312 | + } |
| 313 | + hlist_bl_unlock(head); |
| 314 | + |
| 315 | + return NULL; |
| 316 | +} |
| 317 | +EXPORT_SYMBOL(mb_cache_entry_delete_or_get); |
| 318 | + |
257 | 319 | /* mb_cache_entry_touch - cache entry got used
|
258 | 320 | * @cache - cache the entry belongs to
|
259 | 321 | * @entry - entry that got used
|
|
0 commit comments