@@ -19,7 +19,6 @@ static struct workqueue_struct *fsverity_read_workqueue;
19
19
static bool is_hash_block_verified (struct fsverity_info * vi , struct page * hpage ,
20
20
unsigned long hblock_idx )
21
21
{
22
- bool verified ;
23
22
unsigned int blocks_per_page ;
24
23
unsigned int i ;
25
24
@@ -43,12 +42,20 @@ static bool is_hash_block_verified(struct fsverity_info *vi, struct page *hpage,
43
42
* re-instantiated from the backing storage are re-verified. To do
44
43
* this, we use PG_checked again, but now it doesn't really mean
45
44
* "checked". Instead, now it just serves as an indicator for whether
46
- * the hash page is newly instantiated or not.
45
+ * the hash page is newly instantiated or not. If the page is new, as
46
+ * indicated by PG_checked=0, we clear the bitmap bits for the page's
47
+ * blocks since they are untrustworthy, then set PG_checked=1.
48
+ * Otherwise we return the bitmap bit for the requested block.
47
49
*
48
- * The first thread that sees PG_checked=0 must clear the corresponding
49
- * bitmap bits, then set PG_checked=1. This requires a spinlock. To
50
- * avoid having to take this spinlock in the common case of
51
- * PG_checked=1, we start with an opportunistic lockless read.
50
+ * Multiple threads may execute this code concurrently on the same page.
51
+ * This is safe because we use memory barriers to ensure that if a
52
+ * thread sees PG_checked=1, then it also sees the associated bitmap
53
+ * clearing to have occurred. Also, all writes and their corresponding
54
+ * reads are atomic, and all writes are safe to repeat in the event that
55
+ * multiple threads get into the PG_checked=0 section. (Clearing a
56
+ * bitmap bit again at worst causes a hash block to be verified
57
+ * redundantly. That event should be very rare, so it's not worth using
58
+ * a lock to avoid. Setting PG_checked again has no effect.)
52
59
*/
53
60
if (PageChecked (hpage )) {
54
61
/*
@@ -58,24 +65,17 @@ static bool is_hash_block_verified(struct fsverity_info *vi, struct page *hpage,
58
65
smp_rmb ();
59
66
return test_bit (hblock_idx , vi -> hash_block_verified );
60
67
}
61
- spin_lock (& vi -> hash_page_init_lock );
62
- if (PageChecked (hpage )) {
63
- verified = test_bit (hblock_idx , vi -> hash_block_verified );
64
- } else {
65
- blocks_per_page = vi -> tree_params .blocks_per_page ;
66
- hblock_idx = round_down (hblock_idx , blocks_per_page );
67
- for (i = 0 ; i < blocks_per_page ; i ++ )
68
- clear_bit (hblock_idx + i , vi -> hash_block_verified );
69
- /*
70
- * A write memory barrier is needed here to give RELEASE
71
- * semantics to the below SetPageChecked() operation.
72
- */
73
- smp_wmb ();
74
- SetPageChecked (hpage );
75
- verified = false;
76
- }
77
- spin_unlock (& vi -> hash_page_init_lock );
78
- return verified ;
68
+ blocks_per_page = vi -> tree_params .blocks_per_page ;
69
+ hblock_idx = round_down (hblock_idx , blocks_per_page );
70
+ for (i = 0 ; i < blocks_per_page ; i ++ )
71
+ clear_bit (hblock_idx + i , vi -> hash_block_verified );
72
+ /*
73
+ * A write memory barrier is needed here to give RELEASE semantics to
74
+ * the below SetPageChecked() operation.
75
+ */
76
+ smp_wmb ();
77
+ SetPageChecked (hpage );
78
+ return false;
79
79
}
80
80
81
81
/*
0 commit comments