LRU Cache

LRU-最近最少使用缓存算法

实现方法:
1.双向链表(头尾节点不含元素)
2.哈希

基于上述两个数据结构,我们可以实现以下方法:
1.put-O(1): 添加一个元素到cache中,需要将这个元素放到队头,如果超过容量,需要剔除最后一个元素
2.get-O(1): 获取一个元素,先通过哈希表判断cache中是否存在这个元素,如果存在将其调整到队头

首先看第一版:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
#include <iostream>
#include <unordered_map>
#include <cassert>
//LRU cache
//get/put O(1)
//刚加入队列的需要放在头部,刚被访问的需要放在头部
//加入队列时,如果队列满了,就删除尾部的元素

//使用双向链表+哈希表

template<typename T>
struct Node{
int key;
T value;
Node* prev{nullptr};
Node* next{nullptr};

Node() = default;
Node(int k, T v)
: key(k), value(std::move(v)){}
};

template<typename T>
class LRUCache{
public:
LRUCache(int cap = 3)
: capacity(cap) {
assert(cap > 1);
head = new Node<T>();
tail = new Node<T>();
head->next = tail;
tail->prev = head;
}

~LRUCache() {
auto cur = head;
while(cur) {
head = cur->next;
delete cur;
cur = head;
}
}

std::pair<bool, T> get(int key) {
auto it = cache.find(key);
if(it == cache.end()) {
return {false, T{}};
}

Node<T>* node = it->second;
moveToHead(node);

return {true, node->value};
}

void put(int key, const T& val) {
auto it = cache.find(key);
if(it != cache.end()) {
//update the value and move to head
Node<T>* node = it->second;
node->value = val;
moveToHead(node);
return ;
}
if(cache.size() >= capacity) {
//Remove the least recently used item
Node<T>* lru = tail->prev;
cache.erase(lru->key);
tail->prev = lru->prev;
lru->prev->next = tail;
delete lru;
lru = nullptr;
}
Node<T>* new_node = new Node<T>(key, val);
cache[key] = new_node;
insertToHead(new_node);
return;
}

void moveToHead(Node<T>*& node){
// Move the accessed node to the head
node->prev->next = node->next;
node->next->prev = node->prev;

head->next->prev = node;
node->next = head->next;
head->next = node;
node->prev = head;
}

void insertToHead(Node<T>*& node) {
//Insert a new node to the head;
head->next->prev = node;
node->next = head->next;
head->next = node;
node->prev = head;
}
private:
int capacity{0};
Node<T>* head{nullptr};
Node<T>* tail{nullptr};
std::unordered_map<int, Node<T>*> cache;
};

int main() {
LRUCache<int> cache(2);

cache.put(1, 1);
cache.put(2, 2);
auto res = cache.get(1);
std::cout << "get(1) : " << res.first << " " << res.second << std::endl; // 1
cache.put(3, 3);
cache.put(4, 4);
res = cache.get(2);
std::cout << "get(2) : " << res.first << " " << res.second << std::endl; // 0
cache.put(5, 5);
res = cache.get(3);
std::cout << "get(3) : " << res.first << " " << res.second << std::endl; // 0

res = cache.get(4);
std::cout << "get(4) : " << res.first << " " << res.second << std::endl; // 4
return 0;
}

/*
TODO:
1.容量的单位应该是字节,而不是元素个数
2.线程安全

*/

  • Copyright: Copyright is owned by the author. For commercial reprints, please contact the author for authorization. For non-commercial reprints, please indicate the source.
  • Copyrights © 2015-2025 Xudong0722
  • Visitors: | Views:

请我喝杯咖啡吧~

支付宝
微信