Skip to content

Commit 116f21b

Browse files
WOnder93pcmoore
authored andcommitted
selinux: avoid atomic_t usage in sidtab
As noted in Documentation/atomic_t.txt, if we don't need the RMW atomic operations, we should only use READ_ONCE()/WRITE_ONCE() + smp_rmb()/smp_wmb() where necessary (or the combined variants smp_load_acquire()/smp_store_release()). This patch converts the sidtab code to use regular u32 for the counter and reverse lookup cache and use the appropriate operations instead of atomic_get()/atomic_set(). Note that when reading/updating the reverse lookup cache we don't need memory barriers as it doesn't need to be consistent or accurate. We can now also replace some atomic ops with regular loads (when under spinlock) and stores (for conversion target fields that are always accessed under the master table's spinlock). We can now also bump SIDTAB_MAX to U32_MAX as we can use the full u32 range again. Suggested-by: Jann Horn <[email protected]> Signed-off-by: Ondrej Mosnacek <[email protected]> Reviewed-by: Jann Horn <[email protected]> Signed-off-by: Paul Moore <[email protected]>
1 parent ac5656d commit 116f21b

File tree

2 files changed

+35
-32
lines changed

2 files changed

+35
-32
lines changed

security/selinux/ss/sidtab.c

Lines changed: 21 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@
1212
#include <linux/slab.h>
1313
#include <linux/sched.h>
1414
#include <linux/spinlock.h>
15-
#include <linux/atomic.h>
15+
#include <asm/barrier.h>
1616
#include "flask.h"
1717
#include "security.h"
1818
#include "sidtab.h"
@@ -23,14 +23,14 @@ int sidtab_init(struct sidtab *s)
2323

2424
memset(s->roots, 0, sizeof(s->roots));
2525

26+
/* max count is SIDTAB_MAX so valid index is always < SIDTAB_MAX */
2627
for (i = 0; i < SIDTAB_RCACHE_SIZE; i++)
27-
atomic_set(&s->rcache[i], -1);
28+
s->rcache[i] = SIDTAB_MAX;
2829

2930
for (i = 0; i < SECINITSID_NUM; i++)
3031
s->isids[i].set = 0;
3132

32-
atomic_set(&s->count, 0);
33-
33+
s->count = 0;
3434
s->convert = NULL;
3535

3636
spin_lock_init(&s->lock);
@@ -130,14 +130,12 @@ static struct context *sidtab_do_lookup(struct sidtab *s, u32 index, int alloc)
130130

131131
static struct context *sidtab_lookup(struct sidtab *s, u32 index)
132132
{
133-
u32 count = (u32)atomic_read(&s->count);
133+
/* read entries only after reading count */
134+
u32 count = smp_load_acquire(&s->count);
134135

135136
if (index >= count)
136137
return NULL;
137138

138-
/* read entries after reading count */
139-
smp_rmb();
140-
141139
return sidtab_do_lookup(s, index, 0);
142140
}
143141

@@ -210,10 +208,10 @@ static int sidtab_find_context(union sidtab_entry_inner entry,
210208
static void sidtab_rcache_update(struct sidtab *s, u32 index, u32 pos)
211209
{
212210
while (pos > 0) {
213-
atomic_set(&s->rcache[pos], atomic_read(&s->rcache[pos - 1]));
211+
WRITE_ONCE(s->rcache[pos], READ_ONCE(s->rcache[pos - 1]));
214212
--pos;
215213
}
216-
atomic_set(&s->rcache[0], (int)index);
214+
WRITE_ONCE(s->rcache[0], index);
217215
}
218216

219217
static void sidtab_rcache_push(struct sidtab *s, u32 index)
@@ -227,14 +225,14 @@ static int sidtab_rcache_search(struct sidtab *s, struct context *context,
227225
u32 i;
228226

229227
for (i = 0; i < SIDTAB_RCACHE_SIZE; i++) {
230-
int v = atomic_read(&s->rcache[i]);
228+
u32 v = READ_ONCE(s->rcache[i]);
231229

232-
if (v < 0)
230+
if (v >= SIDTAB_MAX)
233231
continue;
234232

235-
if (context_cmp(sidtab_do_lookup(s, (u32)v, 0), context)) {
236-
sidtab_rcache_update(s, (u32)v, i);
237-
*index = (u32)v;
233+
if (context_cmp(sidtab_do_lookup(s, v, 0), context)) {
234+
sidtab_rcache_update(s, v, i);
235+
*index = v;
238236
return 0;
239237
}
240238
}
@@ -245,8 +243,7 @@ static int sidtab_reverse_lookup(struct sidtab *s, struct context *context,
245243
u32 *index)
246244
{
247245
unsigned long flags;
248-
u32 count = (u32)atomic_read(&s->count);
249-
u32 count_locked, level, pos;
246+
u32 count, count_locked, level, pos;
250247
struct sidtab_convert_params *convert;
251248
struct context *dst, *dst_convert;
252249
int rc;
@@ -255,11 +252,10 @@ static int sidtab_reverse_lookup(struct sidtab *s, struct context *context,
255252
if (rc == 0)
256253
return 0;
257254

255+
/* read entries only after reading count */
256+
count = smp_load_acquire(&s->count);
258257
level = sidtab_level_from_count(count);
259258

260-
/* read entries after reading count */
261-
smp_rmb();
262-
263259
pos = 0;
264260
rc = sidtab_find_context(s->roots[level], &pos, count, level,
265261
context, index);
@@ -272,7 +268,7 @@ static int sidtab_reverse_lookup(struct sidtab *s, struct context *context,
272268
spin_lock_irqsave(&s->lock, flags);
273269

274270
convert = s->convert;
275-
count_locked = (u32)atomic_read(&s->count);
271+
count_locked = s->count;
276272
level = sidtab_level_from_count(count_locked);
277273

278274
/* if count has changed before we acquired the lock, then catch up */
@@ -320,7 +316,7 @@ static int sidtab_reverse_lookup(struct sidtab *s, struct context *context,
320316
}
321317

322318
/* at this point we know the insert won't fail */
323-
atomic_set(&convert->target->count, count + 1);
319+
convert->target->count = count + 1;
324320
}
325321

326322
if (context->len)
@@ -331,9 +327,7 @@ static int sidtab_reverse_lookup(struct sidtab *s, struct context *context,
331327
*index = count;
332328

333329
/* write entries before writing new count */
334-
smp_wmb();
335-
336-
atomic_set(&s->count, count + 1);
330+
smp_store_release(&s->count, count + 1);
337331

338332
rc = 0;
339333
out_unlock:
@@ -423,7 +417,7 @@ int sidtab_convert(struct sidtab *s, struct sidtab_convert_params *params)
423417
return -EBUSY;
424418
}
425419

426-
count = (u32)atomic_read(&s->count);
420+
count = s->count;
427421
level = sidtab_level_from_count(count);
428422

429423
/* allocate last leaf in the new sidtab (to avoid race with
@@ -436,7 +430,7 @@ int sidtab_convert(struct sidtab *s, struct sidtab_convert_params *params)
436430
}
437431

438432
/* set count in case no new entries are added during conversion */
439-
atomic_set(&params->target->count, count);
433+
params->target->count = count;
440434

441435
/* enable live convert of new entries */
442436
s->convert = params;

security/selinux/ss/sidtab.h

Lines changed: 14 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -40,8 +40,8 @@ union sidtab_entry_inner {
4040
#define SIDTAB_LEAF_ENTRIES \
4141
(SIDTAB_NODE_ALLOC_SIZE / sizeof(struct sidtab_entry_leaf))
4242

43-
#define SIDTAB_MAX_BITS 31 /* limited to INT_MAX due to atomic_t range */
44-
#define SIDTAB_MAX (((u32)1 << SIDTAB_MAX_BITS) - 1)
43+
#define SIDTAB_MAX_BITS 32
44+
#define SIDTAB_MAX U32_MAX
4545
/* ensure enough tree levels for SIDTAB_MAX entries */
4646
#define SIDTAB_MAX_LEVEL \
4747
DIV_ROUND_UP(SIDTAB_MAX_BITS - size_to_shift(SIDTAB_LEAF_ENTRIES), \
@@ -69,13 +69,22 @@ struct sidtab_convert_params {
6969
#define SIDTAB_RCACHE_SIZE 3
7070

7171
struct sidtab {
72+
/*
73+
* lock-free read access only for as many items as a prior read of
74+
* 'count'
75+
*/
7276
union sidtab_entry_inner roots[SIDTAB_MAX_LEVEL + 1];
73-
atomic_t count;
77+
/*
78+
* access atomically via {READ|WRITE}_ONCE(); only increment under
79+
* spinlock
80+
*/
81+
u32 count;
82+
/* access only under spinlock */
7483
struct sidtab_convert_params *convert;
7584
spinlock_t lock;
7685

77-
/* reverse lookup cache */
78-
atomic_t rcache[SIDTAB_RCACHE_SIZE];
86+
/* reverse lookup cache - access atomically via {READ|WRITE}_ONCE() */
87+
u32 rcache[SIDTAB_RCACHE_SIZE];
7988

8089
/* index == SID - 1 (no entry for SECSID_NULL) */
8190
struct sidtab_isid_entry isids[SECINITSID_NUM];

0 commit comments

Comments
 (0)