5
5
* Authors: Venkatesh Pallipadi <[email protected] >
6
6
* Suresh B Siddha <[email protected] >
7
7
*
8
- * Interval tree (augmented rbtree) used to store the PAT memory type
9
- * reservations.
8
+ * Interval tree used to store the PAT memory type reservations.
10
9
*/
11
10
12
11
#include <linux/seq_file.h>
13
12
#include <linux/debugfs.h>
14
13
#include <linux/kernel.h>
15
- #include <linux/rbtree_augmented .h>
14
+ #include <linux/interval_tree_generic .h>
16
15
#include <linux/sched.h>
17
16
#include <linux/gfp.h>
18
17
33
32
*
34
33
* memtype_lock protects the rbtree.
35
34
*/
36
-
37
- static struct rb_root memtype_rbroot = RB_ROOT ;
38
-
39
- static int is_node_overlap (struct memtype * node , u64 start , u64 end )
35
+ static inline u64 memtype_interval_start (struct memtype * memtype )
40
36
{
41
- if (node -> start >= end || node -> end <= start )
42
- return 0 ;
43
-
44
- return 1 ;
37
+ return memtype -> start ;
45
38
}
46
39
47
- static u64 get_subtree_max_end (struct rb_node * node )
40
+ static inline u64 memtype_interval_end (struct memtype * memtype )
48
41
{
49
- u64 ret = 0 ;
50
- if (node ) {
51
- struct memtype * data = rb_entry (node , struct memtype , rb );
52
- ret = data -> subtree_max_end ;
53
- }
54
- return ret ;
42
+ return memtype -> end - 1 ;
55
43
}
44
+ INTERVAL_TREE_DEFINE (struct memtype , rb , u64 , subtree_max_end ,
45
+ memtype_interval_start , memtype_interval_end ,
46
+ static , memtype_interval )
56
47
57
- #define NODE_END (node ) ((node)->end)
58
-
59
- RB_DECLARE_CALLBACKS_MAX (static , memtype_rb_augment_cb ,
60
- struct memtype , rb , u64 , subtree_max_end , NODE_END )
61
-
62
- /* Find the first (lowest start addr) overlapping range from rb tree */
63
- static struct memtype * memtype_rb_lowest_match (struct rb_root * root ,
64
- u64 start , u64 end )
65
- {
66
- struct rb_node * node = root -> rb_node ;
67
- struct memtype * last_lower = NULL ;
68
-
69
- while (node ) {
70
- struct memtype * data = rb_entry (node , struct memtype , rb );
71
-
72
- if (get_subtree_max_end (node -> rb_left ) > start ) {
73
- /* Lowest overlap if any must be on left side */
74
- node = node -> rb_left ;
75
- } else if (is_node_overlap (data , start , end )) {
76
- last_lower = data ;
77
- break ;
78
- } else if (start >= data -> start ) {
79
- /* Lowest overlap if any must be on right side */
80
- node = node -> rb_right ;
81
- } else {
82
- break ;
83
- }
84
- }
85
- return last_lower ; /* Returns NULL if there is no overlap */
86
- }
48
+ static struct rb_root_cached memtype_rbroot = RB_ROOT_CACHED ;
87
49
88
50
enum {
89
51
MEMTYPE_EXACT_MATCH = 0 ,
90
52
MEMTYPE_END_MATCH = 1
91
53
};
92
54
93
- static struct memtype * memtype_rb_match (struct rb_root * root ,
94
- u64 start , u64 end , int match_type )
55
+ static struct memtype * memtype_match (struct rb_root_cached * root ,
56
+ u64 start , u64 end , int match_type )
95
57
{
96
58
struct memtype * match ;
97
59
98
- match = memtype_rb_lowest_match (root , start , end );
60
+ match = memtype_interval_iter_first (root , start , end );
99
61
while (match != NULL && match -> start < end ) {
100
- struct rb_node * node ;
101
-
102
62
if ((match_type == MEMTYPE_EXACT_MATCH ) &&
103
63
(match -> start == start ) && (match -> end == end ))
104
64
return match ;
@@ -107,26 +67,21 @@ static struct memtype *memtype_rb_match(struct rb_root *root,
107
67
(match -> start < start ) && (match -> end == end ))
108
68
return match ;
109
69
110
- node = rb_next (& match -> rb );
111
- if (node )
112
- match = rb_entry (node , struct memtype , rb );
113
- else
114
- match = NULL ;
70
+ match = memtype_interval_iter_next (match , start , end );
115
71
}
116
72
117
73
return NULL ; /* Returns NULL if there is no match */
118
74
}
119
75
120
- static int memtype_rb_check_conflict (struct rb_root * root ,
76
+ static int memtype_rb_check_conflict (struct rb_root_cached * root ,
121
77
u64 start , u64 end ,
122
78
enum page_cache_mode reqtype ,
123
79
enum page_cache_mode * newtype )
124
80
{
125
- struct rb_node * node ;
126
81
struct memtype * match ;
127
82
enum page_cache_mode found_type = reqtype ;
128
83
129
- match = memtype_rb_lowest_match (& memtype_rbroot , start , end );
84
+ match = memtype_interval_iter_first (& memtype_rbroot , start , end );
130
85
if (match == NULL )
131
86
goto success ;
132
87
@@ -136,19 +91,12 @@ static int memtype_rb_check_conflict(struct rb_root *root,
136
91
dprintk ("Overlap at 0x%Lx-0x%Lx\n" , match -> start , match -> end );
137
92
found_type = match -> type ;
138
93
139
- node = rb_next (& match -> rb );
140
- while (node ) {
141
- match = rb_entry (node , struct memtype , rb );
142
-
143
- if (match -> start >= end ) /* Checked all possible matches */
144
- goto success ;
145
-
146
- if (is_node_overlap (match , start , end ) &&
147
- match -> type != found_type ) {
94
+ match = memtype_interval_iter_next (match , start , end );
95
+ while (match ) {
96
+ if (match -> type != found_type )
148
97
goto failure ;
149
- }
150
98
151
- node = rb_next ( & match -> rb );
99
+ match = memtype_interval_iter_next ( match , start , end );
152
100
}
153
101
success :
154
102
if (newtype )
@@ -163,44 +111,21 @@ static int memtype_rb_check_conflict(struct rb_root *root,
163
111
return - EBUSY ;
164
112
}
165
113
166
- static void memtype_rb_insert (struct rb_root * root , struct memtype * newdata )
167
- {
168
- struct rb_node * * node = & (root -> rb_node );
169
- struct rb_node * parent = NULL ;
170
-
171
- while (* node ) {
172
- struct memtype * data = rb_entry (* node , struct memtype , rb );
173
-
174
- parent = * node ;
175
- if (data -> subtree_max_end < newdata -> end )
176
- data -> subtree_max_end = newdata -> end ;
177
- if (newdata -> start <= data -> start )
178
- node = & ((* node )-> rb_left );
179
- else if (newdata -> start > data -> start )
180
- node = & ((* node )-> rb_right );
181
- }
182
-
183
- newdata -> subtree_max_end = newdata -> end ;
184
- rb_link_node (& newdata -> rb , parent , node );
185
- rb_insert_augmented (& newdata -> rb , root , & memtype_rb_augment_cb );
186
- }
187
-
188
114
int rbt_memtype_check_insert (struct memtype * new ,
189
115
enum page_cache_mode * ret_type )
190
116
{
191
117
int err = 0 ;
192
118
193
119
err = memtype_rb_check_conflict (& memtype_rbroot , new -> start , new -> end ,
194
- new -> type , ret_type );
120
+ new -> type , ret_type );
121
+ if (err )
122
+ return err ;
195
123
196
- if (!err ) {
197
- if (ret_type )
198
- new -> type = * ret_type ;
124
+ if (ret_type )
125
+ new -> type = * ret_type ;
199
126
200
- new -> subtree_max_end = new -> end ;
201
- memtype_rb_insert (& memtype_rbroot , new );
202
- }
203
- return err ;
127
+ memtype_interval_insert (new , & memtype_rbroot );
128
+ return 0 ;
204
129
}
205
130
206
131
struct memtype * rbt_memtype_erase (u64 start , u64 end )
@@ -214,26 +139,23 @@ struct memtype *rbt_memtype_erase(u64 start, u64 end)
214
139
* it then checks with END_MATCH, i.e. shrink the size of a node
215
140
* from the end for the mremap case.
216
141
*/
217
- data = memtype_rb_match (& memtype_rbroot , start , end ,
218
- MEMTYPE_EXACT_MATCH );
142
+ data = memtype_match (& memtype_rbroot , start , end ,
143
+ MEMTYPE_EXACT_MATCH );
219
144
if (!data ) {
220
- data = memtype_rb_match (& memtype_rbroot , start , end ,
221
- MEMTYPE_END_MATCH );
145
+ data = memtype_match (& memtype_rbroot , start , end ,
146
+ MEMTYPE_END_MATCH );
222
147
if (!data )
223
148
return ERR_PTR (- EINVAL );
224
149
}
225
150
226
151
if (data -> start == start ) {
227
152
/* munmap: erase this node */
228
- rb_erase_augmented (& data -> rb , & memtype_rbroot ,
229
- & memtype_rb_augment_cb );
153
+ memtype_interval_remove (data , & memtype_rbroot );
230
154
} else {
231
155
/* mremap: update the end value of this node */
232
- rb_erase_augmented (& data -> rb , & memtype_rbroot ,
233
- & memtype_rb_augment_cb );
156
+ memtype_interval_remove (data , & memtype_rbroot );
234
157
data -> end = start ;
235
- data -> subtree_max_end = data -> end ;
236
- memtype_rb_insert (& memtype_rbroot , data );
158
+ memtype_interval_insert (data , & memtype_rbroot );
237
159
return NULL ;
238
160
}
239
161
@@ -242,24 +164,24 @@ struct memtype *rbt_memtype_erase(u64 start, u64 end)
242
164
243
165
struct memtype * rbt_memtype_lookup (u64 addr )
244
166
{
245
- return memtype_rb_lowest_match (& memtype_rbroot , addr , addr + PAGE_SIZE );
167
+ return memtype_interval_iter_first (& memtype_rbroot , addr ,
168
+ addr + PAGE_SIZE );
246
169
}
247
170
248
171
#if defined(CONFIG_DEBUG_FS )
249
172
int rbt_memtype_copy_nth_element (struct memtype * out , loff_t pos )
250
173
{
251
- struct rb_node * node ;
174
+ struct memtype * match ;
252
175
int i = 1 ;
253
176
254
- node = rb_first (& memtype_rbroot );
255
- while (node && pos != i ) {
256
- node = rb_next ( node );
177
+ match = memtype_interval_iter_first (& memtype_rbroot , 0 , ULONG_MAX );
178
+ while (match && pos != i ) {
179
+ match = memtype_interval_iter_next ( match , 0 , ULONG_MAX );
257
180
i ++ ;
258
181
}
259
182
260
- if (node ) { /* pos == i */
261
- struct memtype * this = rb_entry (node , struct memtype , rb );
262
- * out = * this ;
183
+ if (match ) { /* pos == i */
184
+ * out = * match ;
263
185
return 0 ;
264
186
} else {
265
187
return 1 ;
0 commit comments