Skip to content

Commit 67bd654

Browse files
authored
Merge pull request #84 from Suf42/lruCache
Add LRU Cache
2 parents 19dabf2 + ba44980 commit 67bd654

File tree

3 files changed

+117
-95
lines changed

3 files changed

+117
-95
lines changed

README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,7 @@ Gradle is used for development.
2828
- [Heap](src/main/java/dataStructures/heap)
2929
* Max heap implementation
3030
- [Linked List](src/main/java/dataStructures/linkedList)
31-
- LRU Cache
31+
- [LRU Cache](src/main/java/dataStructures/lruCache)
3232
- Minimum Spanning Tree
3333
* Kruskal
3434
* Prim's
Lines changed: 66 additions & 94 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,6 @@
11
package dataStructures.lruCache;
22

33
import java.util.HashMap;
4-
import java.util.Map;
54

65
/**
76
* Implementation of Least Recently Used (LRU) Cache
@@ -13,126 +12,99 @@
1312
* Client methods:
1413
* get(K key)
1514
* put(K key, V value)
16-
* Both methods above run in O(1) average time complexity
15+
* Both methods above run in expected O(1) time complexity
1716
*/
18-
public class LRU<K, V> {
19-
private final int cap;
20-
private final Map<K, Node<K, V>> map;
21-
private final Node<K, V> left; // dummy left node to point to the left end
22-
private final Node<K, V> right; // dummy right node to point to the right end
23-
17+
class LRU<K, V> {
2418
/**
25-
* Helper node class that encapsulates key-value pair and act as linked list to neighbour nodes.
19+
* Helper node class that implements doubly linked list
2620
*/
27-
private class Node<K, V> {
28-
private final K key;
21+
private class doublyLinkedListNode<K, V> {
22+
private K key;
2923
private V val;
30-
private Node<K, V> next;
31-
private Node<K, V> prev;
32-
33-
Node(K key, V value) {
34-
this.key = key;
35-
this.val = value;
36-
this.next = null;
37-
this.prev = null;
38-
}
24+
private doublyLinkedListNode<K, V> next;
25+
private doublyLinkedListNode<K, V> prev;
3926
}
4027

28+
private doublyLinkedListNode<K, V> dllHead;
29+
private doublyLinkedListNode<K, V> dllTail;
30+
private HashMap<K, doublyLinkedListNode<K, V>> keyToNode = new HashMap<>();
31+
private int capacity;
32+
private int lengthOfList = 0;
33+
4134
/**
42-
* Constructs an instance of Least Recently Used Cache.
35+
* Constructs an instance of Least Recently Used Cache
4336
*
44-
* @param capacity the maximum capacity of the cache.
37+
* @param capacity the maximum capacity of the cache
4538
*/
4639
public LRU(int capacity) {
47-
this.cap = capacity;
48-
this.map = new HashMap<>();
49-
this.left = new Node<>(null, null);
50-
this.right = new Node<>(null, null);
51-
this.left.next = this.right;
52-
this.right.prev = this.left;
53-
}
40+
this.capacity = capacity;
5441

55-
/**
56-
* Helper method to remove the specified node from the doubly linked list
57-
*
58-
* @param node to be removed from the linked list
59-
*/
60-
private void remove(Node<K, V> node) {
61-
Node<K, V> prev = node.prev;
62-
Node<K, V> nxt = node.next;
63-
prev.next = nxt;
64-
nxt.prev = prev;
42+
dllHead = new doublyLinkedListNode<>();
43+
dllTail = new doublyLinkedListNode<>();
44+
dllHead.next = dllTail;
45+
dllTail.prev = dllHead;
6546
}
6647

6748
/**
68-
* Helper method to insert a node to the right end of the double linked list (Most Recently Used)
49+
* Return the value of the key if it exists or return null
6950
*
70-
* @param node to be inserted
71-
*/
72-
private void insert(Node<K, V> node) {
73-
Node<K, V> prev = this.right.prev;
74-
prev.next = node;
75-
node.prev = prev;
76-
node.next = this.right;
77-
this.right.prev = node;
78-
}
79-
80-
/**
81-
* return the value of the key if it exists; otherwise null
82-
*
83-
* @param key whose value, if exists, to be obtained
51+
* @param key key of the value to be obtained from LRU cache
8452
*/
8553
public V get(K key) {
86-
if (this.map.containsKey(key)) {
87-
Node<K, V> node = this.map.get(key);
88-
this.remove(node);
89-
this.insert(node);
90-
return node.val;
54+
if (!keyToNode.containsKey(key)) {
55+
return null;
9156
}
92-
return null;
57+
58+
doublyLinkedListNode<K, V> temp = keyToNode.get(key);
59+
temp.prev.next = temp.next;
60+
temp.next.prev = temp.prev;
61+
62+
temp.next = dllHead.next;
63+
dllHead.next.prev = temp;
64+
temp.prev = dllHead;
65+
dllHead.next = temp;
66+
67+
return keyToNode.get(key).val;
9368
}
9469

9570
/**
96-
* Update the value of the key if the key exists.
97-
* Otherwise, add the key-value pair to the cache.
98-
* If the number of keys exceeds the capacity from this operation, evict the least recently used key
71+
* Insert key-value pair to LRU cache
9972
*
100-
* @param key the key
101-
* @param val the associated value
73+
* @param key key of the value to be inserted to LRU cache
74+
* @param value value to be inserted to LRU cache
10275
*/
103-
public void update(K key, V val) {
104-
if (this.map.containsKey(key)) {
105-
Node<K, V> node = this.map.get(key);
106-
this.remove(node);
107-
node.val = val;
108-
this.insert(node); // make most recently used
76+
public void put(K key, V value) {
77+
boolean addingNewNode = true;
78+
79+
doublyLinkedListNode<K, V> newlyCached;
80+
81+
if (!keyToNode.containsKey(key)) {
82+
newlyCached = new doublyLinkedListNode<>();
83+
newlyCached.key = key;
84+
newlyCached.val = value;
85+
keyToNode.put(key, newlyCached);
10986
} else {
110-
Node<K, V> node = new Node<>(key, val);
111-
this.map.put(node.key, node);
112-
this.insert(node);
113-
}
87+
newlyCached = keyToNode.get(key);
88+
newlyCached.val = value;
89+
addingNewNode = false;
11490

115-
if (this.map.size() > this.cap) { // evict LRU since capacity exceeded
116-
Node<K, V> toRemove = this.left.next;
117-
this.map.remove(toRemove.key);
118-
this.remove(toRemove);
91+
newlyCached.prev.next = newlyCached.next;
92+
newlyCached.next.prev = newlyCached.prev;
11993
}
120-
}
12194

122-
/**
123-
* Custom print for testing
124-
* prints from LRU to MRU (Most recently used)
125-
*/
126-
public void print() {
127-
Node<K, V> trav = this.left.next;
128-
System.out.print("Dummy");
129-
while (trav != this.right) {
130-
System.out.print(" ->");
131-
System.out.print(trav.key);
132-
System.out.print(",");
133-
System.out.print(trav.val);
134-
trav = trav.next;
95+
newlyCached.next = dllHead.next;
96+
dllHead.next.prev = newlyCached;
97+
newlyCached.prev = dllHead;
98+
dllHead.next = newlyCached;
99+
100+
if (addingNewNode) {
101+
if (lengthOfList == capacity) {
102+
keyToNode.remove(dllTail.prev.key);
103+
dllTail.prev.prev.next = dllTail;
104+
dllTail.prev = dllTail.prev.prev;
105+
} else {
106+
lengthOfList++;
107+
}
135108
}
136-
System.out.println();
137109
}
138110
}
Lines changed: 50 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,50 @@
1+
# LRU Cache
2+
3+
## Background
4+
5+
Assuming that software engineers develop their applications using well-structured design patterns, programs tend to reuse data and instructions they've recently accessed (temporal locality) or access data elements that are close together in memory (spatial locality).
6+
7+
### Temporal Locality
8+
9+
The Least Recently Used (LRU) Cache operates on the principle that the data most recently accessed is likely to be accessed again in the near future (temporal locality). By evicting the least recently accessed items first, LRU cache ensures that the most relevant data remains available in the cache.
10+
11+
### Applications
12+
13+
<ol>
14+
<li>Operating systems: Operating systems use LRU cache for memory management in page replacement algorithms. When a program requires more memory pages than are available in physical memory, the operating system decides which pages to evict to disc based on LRU caching, ensuring that the most recently accessed pages remain in memory.</li>
15+
<li>Web browsers: Web browsers use LRU cache to store frequently accessed web pages. This allows users to quickly revisit pages without the need to fetch the entire content from the server.</li>
16+
<li>Databases: Databases use LRU cache to store frequent query results. This reduces the need to access the underlying storage system for repeated queries.</li>
17+
</ol>
18+
19+
### Data Structures
20+
21+
Implementing an LRU cache typically involves using a combination of data structures. A common approach is to use a doubly-linked list to maintain the order of items based on access recency and a hash map to achieve constant-time access to any item in the cache. This combination effectively creates a data structure that supports the operations required for LRU cache. As nodes are connected in a doubly-linked list fashion, updating neighbours when rearranging recently cached items is as simple as redirecting the next and previous pointers of affected nodes.
22+
23+
<img src = "https://cdn.hashnode.com/res/hashnode/image/upload/v1655812960691/pqAZ20NyS.png?auto=compress,format&format=webp" alt = "Hash Map">
24+
25+
### Cache Key
26+
27+
The hash map values are accessed through cache keys, which are unique references to the cached items in a LRU cache. Moreover, storing key-value pairs of hash keys and their corresponding nodes, which encapsulate cached items in a hash map and allows us to avoid O(n) sequential access of cached items.
28+
29+
### Eviction
30+
31+
When the cache is full and a new item needs to be added, the eviction process is triggered. The item at the back of the list, which represents the least recently used data, is removed from both the list and the hash map. The new item is then added to the front of the list, and the cache key is stored in the hash map along with its corresponding cache value.
32+
33+
However, if a cached item is accessed through a read-only operation, we still move the cached item to the front of the list without any eviction. Therefore, any form of interaction with a key will move its corresponding node to the front of the doubly-linked list without evection being triggered. Eviction is only applicable to write operations when a cache is considered full.
34+
35+
## Complexity Analysis
36+
37+
**Time**: **expected** O(1) complexity
38+
39+
As we rely on basic hash map operations to insert, access, and delete cache nodes, the get and put operations supported by LRU cache are influenced by the time complexity of these hash map operations. Insertion, lookup, and deletion operations in a well-designed hash map take O(1) time on average. Therefore, the hash map provides expected O(1) time on operations, and the doubly-linked list provides insertion and removal of nodes in O(1) time.
40+
41+
**Space**: O(cache capacity)
42+
43+
## Notes
44+
45+
<ol>
46+
<li>Cache hit/miss ratio: A simple metric for measuring the effectiveness of the cache is the cache hit ratio. It is represented by the percentage of requests that are served from the cache without needing to access the original data store. Generally speaking, for most applications, a hit ratio of 95 - 99% is ideal.</li>
47+
<li>Outdated cached data: A cached item that is constantly accessed and remains in cache for too long may become outdated.</li>
48+
<li>Thread safety: When working with parallel computation, careful considerations have to be made when multiple threads try to access the cache at the same time. Thread-safe caching mechanisms may involve the proper use of mutex locks.</li>
49+
<li>Other caching algorithms: First-In-First-Out (FIFO) cache, Least Frequently Used (LFU) cache, Most Recently Used (MRU) cache, and Random Replacement (RR) cache. The performance of different caching algorithms depends entirely on the application. LRU caching provides a good balance between performance and memory usage, making it suitable for a wide range of applications as most applications obey recency of data access (we often do reuse the same data in many applications). However, in the event that access patterns are random or even anti-recent, random replacement may perform better as it has less overhead when compared to LRU due to lack of bookkeeping.</li>
50+
</ol>

0 commit comments

Comments
 (0)