@@ -35,7 +35,7 @@ static struct landlock_ruleset *create_ruleset(const u32 num_layers)
35
35
return ERR_PTR (- ENOMEM );
36
36
refcount_set (& new_ruleset -> usage , 1 );
37
37
mutex_init (& new_ruleset -> lock );
38
- new_ruleset -> root = RB_ROOT ;
38
+ new_ruleset -> root_inode = RB_ROOT ;
39
39
new_ruleset -> num_layers = num_layers ;
40
40
/*
41
41
* hierarchy = NULL
@@ -68,8 +68,20 @@ static void build_check_rule(void)
68
68
BUILD_BUG_ON (rule .num_layers < LANDLOCK_MAX_NUM_LAYERS );
69
69
}
70
70
71
+ static bool is_object_pointer (const enum landlock_key_type key_type )
72
+ {
73
+ switch (key_type ) {
74
+ case LANDLOCK_KEY_INODE :
75
+ return true;
76
+
77
+ default :
78
+ WARN_ON_ONCE (1 );
79
+ return false;
80
+ }
81
+ }
82
+
71
83
static struct landlock_rule *
72
- create_rule (struct landlock_object * const object ,
84
+ create_rule (const struct landlock_id id ,
73
85
const struct landlock_layer (* const layers )[], const u32 num_layers ,
74
86
const struct landlock_layer * const new_layer )
75
87
{
@@ -90,8 +102,13 @@ create_rule(struct landlock_object *const object,
90
102
if (!new_rule )
91
103
return ERR_PTR (- ENOMEM );
92
104
RB_CLEAR_NODE (& new_rule -> node );
93
- landlock_get_object (object );
94
- new_rule -> object = object ;
105
+ if (is_object_pointer (id .type )) {
106
+ /* This should be catched by insert_rule(). */
107
+ WARN_ON_ONCE (!id .key .object );
108
+ landlock_get_object (id .key .object );
109
+ }
110
+
111
+ new_rule -> key = id .key ;
95
112
new_rule -> num_layers = new_num_layers ;
96
113
/* Copies the original layer stack. */
97
114
memcpy (new_rule -> layers , layers ,
@@ -102,12 +119,27 @@ create_rule(struct landlock_object *const object,
102
119
return new_rule ;
103
120
}
104
121
105
- static void free_rule (struct landlock_rule * const rule )
122
+ static struct rb_root * get_root (struct landlock_ruleset * const ruleset ,
123
+ const enum landlock_key_type key_type )
124
+ {
125
+ switch (key_type ) {
126
+ case LANDLOCK_KEY_INODE :
127
+ return & ruleset -> root_inode ;
128
+
129
+ default :
130
+ WARN_ON_ONCE (1 );
131
+ return ERR_PTR (- EINVAL );
132
+ }
133
+ }
134
+
135
+ static void free_rule (struct landlock_rule * const rule ,
136
+ const enum landlock_key_type key_type )
106
137
{
107
138
might_sleep ();
108
139
if (!rule )
109
140
return ;
110
- landlock_put_object (rule -> object );
141
+ if (is_object_pointer (key_type ))
142
+ landlock_put_object (rule -> key .object );
111
143
kfree (rule );
112
144
}
113
145
@@ -129,8 +161,8 @@ static void build_check_ruleset(void)
129
161
* insert_rule - Create and insert a rule in a ruleset
130
162
*
131
163
* @ruleset: The ruleset to be updated.
132
- * @object : The object to build the new rule with. The underlying kernel
133
- * object must be held by the caller.
164
+ * @id : The ID to build the new rule with. The underlying kernel object, if
165
+ * any, must be held by the caller.
134
166
* @layers: One or multiple layers to be copied into the new rule.
135
167
* @num_layers: The number of @layers entries.
136
168
*
@@ -144,26 +176,35 @@ static void build_check_ruleset(void)
144
176
* access rights.
145
177
*/
146
178
static int insert_rule (struct landlock_ruleset * const ruleset ,
147
- struct landlock_object * const object ,
179
+ const struct landlock_id id ,
148
180
const struct landlock_layer (* const layers )[],
149
- size_t num_layers )
181
+ const size_t num_layers )
150
182
{
151
183
struct rb_node * * walker_node ;
152
184
struct rb_node * parent_node = NULL ;
153
185
struct landlock_rule * new_rule ;
186
+ struct rb_root * root ;
154
187
155
188
might_sleep ();
156
189
lockdep_assert_held (& ruleset -> lock );
157
- if (WARN_ON_ONCE (!object || !layers ))
190
+ if (WARN_ON_ONCE (!layers ))
191
+ return - ENOENT ;
192
+
193
+ if (is_object_pointer (id .type ) && WARN_ON_ONCE (!id .key .object ))
158
194
return - ENOENT ;
159
- walker_node = & (ruleset -> root .rb_node );
195
+
196
+ root = get_root (ruleset , id .type );
197
+ if (IS_ERR (root ))
198
+ return PTR_ERR (root );
199
+
200
+ walker_node = & root -> rb_node ;
160
201
while (* walker_node ) {
161
202
struct landlock_rule * const this =
162
203
rb_entry (* walker_node , struct landlock_rule , node );
163
204
164
- if (this -> object != object ) {
205
+ if (this -> key . data != id . key . data ) {
165
206
parent_node = * walker_node ;
166
- if (this -> object < object )
207
+ if (this -> key . data < id . key . data )
167
208
walker_node = & ((* walker_node )-> rb_right );
168
209
else
169
210
walker_node = & ((* walker_node )-> rb_left );
@@ -195,24 +236,24 @@ static int insert_rule(struct landlock_ruleset *const ruleset,
195
236
* Intersects access rights when it is a merge between a
196
237
* ruleset and a domain.
197
238
*/
198
- new_rule = create_rule (object , & this -> layers , this -> num_layers ,
239
+ new_rule = create_rule (id , & this -> layers , this -> num_layers ,
199
240
& (* layers )[0 ]);
200
241
if (IS_ERR (new_rule ))
201
242
return PTR_ERR (new_rule );
202
- rb_replace_node (& this -> node , & new_rule -> node , & ruleset -> root );
203
- free_rule (this );
243
+ rb_replace_node (& this -> node , & new_rule -> node , root );
244
+ free_rule (this , id . type );
204
245
return 0 ;
205
246
}
206
247
207
- /* There is no match for @object . */
248
+ /* There is no match for @id . */
208
249
build_check_ruleset ();
209
250
if (ruleset -> num_rules >= LANDLOCK_MAX_NUM_RULES )
210
251
return - E2BIG ;
211
- new_rule = create_rule (object , layers , num_layers , NULL );
252
+ new_rule = create_rule (id , layers , num_layers , NULL );
212
253
if (IS_ERR (new_rule ))
213
254
return PTR_ERR (new_rule );
214
255
rb_link_node (& new_rule -> node , parent_node , walker_node );
215
- rb_insert_color (& new_rule -> node , & ruleset -> root );
256
+ rb_insert_color (& new_rule -> node , root );
216
257
ruleset -> num_rules ++ ;
217
258
return 0 ;
218
259
}
@@ -230,7 +271,7 @@ static void build_check_layer(void)
230
271
231
272
/* @ruleset must be locked by the caller. */
232
273
int landlock_insert_rule (struct landlock_ruleset * const ruleset ,
233
- struct landlock_object * const object ,
274
+ const struct landlock_id id ,
234
275
const access_mask_t access )
235
276
{
236
277
struct landlock_layer layers [] = { {
@@ -240,7 +281,7 @@ int landlock_insert_rule(struct landlock_ruleset *const ruleset,
240
281
} };
241
282
242
283
build_check_layer ();
243
- return insert_rule (ruleset , object , & layers , ARRAY_SIZE (layers ));
284
+ return insert_rule (ruleset , id , & layers , ARRAY_SIZE (layers ));
244
285
}
245
286
246
287
static inline void get_hierarchy (struct landlock_hierarchy * const hierarchy )
@@ -263,6 +304,7 @@ static int merge_ruleset(struct landlock_ruleset *const dst,
263
304
struct landlock_ruleset * const src )
264
305
{
265
306
struct landlock_rule * walker_rule , * next_rule ;
307
+ struct rb_root * src_root ;
266
308
int err = 0 ;
267
309
268
310
might_sleep ();
@@ -273,6 +315,10 @@ static int merge_ruleset(struct landlock_ruleset *const dst,
273
315
if (WARN_ON_ONCE (!dst || !dst -> hierarchy ))
274
316
return - EINVAL ;
275
317
318
+ src_root = get_root (src , LANDLOCK_KEY_INODE );
319
+ if (IS_ERR (src_root ))
320
+ return PTR_ERR (src_root );
321
+
276
322
/* Locks @dst first because we are its only owner. */
277
323
mutex_lock (& dst -> lock );
278
324
mutex_lock_nested (& src -> lock , SINGLE_DEPTH_NESTING );
@@ -285,11 +331,15 @@ static int merge_ruleset(struct landlock_ruleset *const dst,
285
331
dst -> access_masks [dst -> num_layers - 1 ] = src -> access_masks [0 ];
286
332
287
333
/* Merges the @src tree. */
288
- rbtree_postorder_for_each_entry_safe (walker_rule , next_rule , & src -> root ,
334
+ rbtree_postorder_for_each_entry_safe (walker_rule , next_rule , src_root ,
289
335
node ) {
290
336
struct landlock_layer layers [] = { {
291
337
.level = dst -> num_layers ,
292
338
} };
339
+ const struct landlock_id id = {
340
+ .key = walker_rule -> key ,
341
+ .type = LANDLOCK_KEY_INODE ,
342
+ };
293
343
294
344
if (WARN_ON_ONCE (walker_rule -> num_layers != 1 )) {
295
345
err = - EINVAL ;
@@ -300,8 +350,8 @@ static int merge_ruleset(struct landlock_ruleset *const dst,
300
350
goto out_unlock ;
301
351
}
302
352
layers [0 ].access = walker_rule -> layers [0 ].access ;
303
- err = insert_rule ( dst , walker_rule -> object , & layers ,
304
- ARRAY_SIZE (layers ));
353
+
354
+ err = insert_rule ( dst , id , & layers , ARRAY_SIZE (layers ));
305
355
if (err )
306
356
goto out_unlock ;
307
357
}
@@ -316,21 +366,30 @@ static int inherit_ruleset(struct landlock_ruleset *const parent,
316
366
struct landlock_ruleset * const child )
317
367
{
318
368
struct landlock_rule * walker_rule , * next_rule ;
369
+ struct rb_root * parent_root ;
319
370
int err = 0 ;
320
371
321
372
might_sleep ();
322
373
if (!parent )
323
374
return 0 ;
324
375
376
+ parent_root = get_root (parent , LANDLOCK_KEY_INODE );
377
+ if (IS_ERR (parent_root ))
378
+ return PTR_ERR (parent_root );
379
+
325
380
/* Locks @child first because we are its only owner. */
326
381
mutex_lock (& child -> lock );
327
382
mutex_lock_nested (& parent -> lock , SINGLE_DEPTH_NESTING );
328
383
329
384
/* Copies the @parent tree. */
330
385
rbtree_postorder_for_each_entry_safe (walker_rule , next_rule ,
331
- & parent -> root , node ) {
332
- err = insert_rule (child , walker_rule -> object ,
333
- & walker_rule -> layers ,
386
+ parent_root , node ) {
387
+ const struct landlock_id id = {
388
+ .key = walker_rule -> key ,
389
+ .type = LANDLOCK_KEY_INODE ,
390
+ };
391
+
392
+ err = insert_rule (child , id , & walker_rule -> layers ,
334
393
walker_rule -> num_layers );
335
394
if (err )
336
395
goto out_unlock ;
@@ -362,8 +421,9 @@ static void free_ruleset(struct landlock_ruleset *const ruleset)
362
421
struct landlock_rule * freeme , * next ;
363
422
364
423
might_sleep ();
365
- rbtree_postorder_for_each_entry_safe (freeme , next , & ruleset -> root , node )
366
- free_rule (freeme );
424
+ rbtree_postorder_for_each_entry_safe (freeme , next , & ruleset -> root_inode ,
425
+ node )
426
+ free_rule (freeme , LANDLOCK_KEY_INODE );
367
427
put_hierarchy (ruleset -> hierarchy );
368
428
kfree (ruleset );
369
429
}
@@ -454,20 +514,23 @@ landlock_merge_ruleset(struct landlock_ruleset *const parent,
454
514
*/
455
515
const struct landlock_rule *
456
516
landlock_find_rule (const struct landlock_ruleset * const ruleset ,
457
- const struct landlock_object * const object )
517
+ const struct landlock_id id )
458
518
{
519
+ const struct rb_root * root ;
459
520
const struct rb_node * node ;
460
521
461
- if (!object )
522
+ root = get_root ((struct landlock_ruleset * )ruleset , id .type );
523
+ if (IS_ERR (root ))
462
524
return NULL ;
463
- node = ruleset -> root .rb_node ;
525
+ node = root -> rb_node ;
526
+
464
527
while (node ) {
465
528
struct landlock_rule * this =
466
529
rb_entry (node , struct landlock_rule , node );
467
530
468
- if (this -> object == object )
531
+ if (this -> key . data == id . key . data )
469
532
return this ;
470
- if (this -> object < object )
533
+ if (this -> key . data < id . key . data )
471
534
node = node -> rb_right ;
472
535
else
473
536
node = node -> rb_left ;
0 commit comments