Skip to content

Commit fb7275a

Browse files
Waiman-LongPeter Zijlstra
authored andcommitted
locking/lockdep: Iterate lock_classes directly when reading lockdep files
When dumping lock_classes information via /proc/lockdep, we can't take the lockdep lock as the lock hold time is indeterminate. Iterating over all_lock_classes without holding lock can be dangerous as there is a slight chance that it may branch off to other lists leading to infinite loop or even access invalid memory if changes are made to all_lock_classes list in parallel. To avoid this problem, iteration of lock classes is now done directly on the lock_classes array itself. The lock_classes_in_use bitmap is checked to see if the lock class is being used. To avoid iterating the full array all the times, a new max_lock_class_idx value is added to track the maximum lock_class index that is currently being used. We can theoretically take the lockdep lock for iterating all_lock_classes when other lockdep files (lockdep_stats and lock_stat) are accessed as the lock hold time will be shorter for them. For consistency, they are also modified to iterate the lock_classes array directly. Signed-off-by: Waiman Long <[email protected]> Signed-off-by: Peter Zijlstra (Intel) <[email protected]> Link: https://lkml.kernel.org/r/[email protected]
1 parent b008893 commit fb7275a

File tree

3 files changed

+56
-15
lines changed

3 files changed

+56
-15
lines changed

kernel/locking/lockdep.c

Lines changed: 9 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -183,11 +183,9 @@ static DECLARE_BITMAP(list_entries_in_use, MAX_LOCKDEP_ENTRIES);
183183
static struct hlist_head lock_keys_hash[KEYHASH_SIZE];
184184
unsigned long nr_lock_classes;
185185
unsigned long nr_zapped_classes;
186-
#ifndef CONFIG_DEBUG_LOCKDEP
187-
static
188-
#endif
186+
unsigned long max_lock_class_idx;
189187
struct lock_class lock_classes[MAX_LOCKDEP_KEYS];
190-
static DECLARE_BITMAP(lock_classes_in_use, MAX_LOCKDEP_KEYS);
188+
DECLARE_BITMAP(lock_classes_in_use, MAX_LOCKDEP_KEYS);
191189

192190
static inline struct lock_class *hlock_class(struct held_lock *hlock)
193191
{
@@ -338,7 +336,7 @@ static inline void lock_release_holdtime(struct held_lock *hlock)
338336
* elements. These elements are linked together by the lock_entry member in
339337
* struct lock_class.
340338
*/
341-
LIST_HEAD(all_lock_classes);
339+
static LIST_HEAD(all_lock_classes);
342340
static LIST_HEAD(free_lock_classes);
343341

344342
/**
@@ -1252,6 +1250,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
12521250
struct lockdep_subclass_key *key;
12531251
struct hlist_head *hash_head;
12541252
struct lock_class *class;
1253+
int idx;
12551254

12561255
DEBUG_LOCKS_WARN_ON(!irqs_disabled());
12571256

@@ -1317,6 +1316,9 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
13171316
* of classes.
13181317
*/
13191318
list_move_tail(&class->lock_entry, &all_lock_classes);
1319+
idx = class - lock_classes;
1320+
if (idx > max_lock_class_idx)
1321+
max_lock_class_idx = idx;
13201322

13211323
if (verbose(class)) {
13221324
graph_unlock();
@@ -6000,6 +6002,8 @@ static void zap_class(struct pending_free *pf, struct lock_class *class)
60006002
WRITE_ONCE(class->name, NULL);
60016003
nr_lock_classes--;
60026004
__clear_bit(class - lock_classes, lock_classes_in_use);
6005+
if (class - lock_classes == max_lock_class_idx)
6006+
max_lock_class_idx--;
60036007
} else {
60046008
WARN_ONCE(true, "%s() failed for class %s\n", __func__,
60056009
class->name);

kernel/locking/lockdep_internals.h

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -121,7 +121,6 @@ static const unsigned long LOCKF_USED_IN_IRQ_READ =
121121

122122
#define MAX_LOCKDEP_CHAIN_HLOCKS (MAX_LOCKDEP_CHAINS*5)
123123

124-
extern struct list_head all_lock_classes;
125124
extern struct lock_chain lock_chains[];
126125

127126
#define LOCK_USAGE_CHARS (2*XXX_LOCK_USAGE_STATES + 1)
@@ -151,6 +150,10 @@ extern unsigned int nr_large_chain_blocks;
151150

152151
extern unsigned int max_lockdep_depth;
153152
extern unsigned int max_bfs_queue_depth;
153+
extern unsigned long max_lock_class_idx;
154+
155+
extern struct lock_class lock_classes[MAX_LOCKDEP_KEYS];
156+
extern unsigned long lock_classes_in_use[];
154157

155158
#ifdef CONFIG_PROVE_LOCKING
156159
extern unsigned long lockdep_count_forward_deps(struct lock_class *);
@@ -205,7 +208,6 @@ struct lockdep_stats {
205208
};
206209

207210
DECLARE_PER_CPU(struct lockdep_stats, lockdep_stats);
208-
extern struct lock_class lock_classes[MAX_LOCKDEP_KEYS];
209211

210212
#define __debug_atomic_inc(ptr) \
211213
this_cpu_inc(lockdep_stats.ptr);

kernel/locking/lockdep_proc.c

Lines changed: 43 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -24,14 +24,33 @@
2424

2525
#include "lockdep_internals.h"
2626

27+
/*
28+
* Since iteration of lock_classes is done without holding the lockdep lock,
29+
* it is not safe to iterate all_lock_classes list directly as the iteration
30+
* may branch off to free_lock_classes or the zapped list. Iteration is done
31+
* directly on the lock_classes array by checking the lock_classes_in_use
32+
* bitmap and max_lock_class_idx.
33+
*/
34+
#define iterate_lock_classes(idx, class) \
35+
for (idx = 0, class = lock_classes; idx <= max_lock_class_idx; \
36+
idx++, class++)
37+
2738
static void *l_next(struct seq_file *m, void *v, loff_t *pos)
2839
{
29-
return seq_list_next(v, &all_lock_classes, pos);
40+
struct lock_class *class = v;
41+
42+
++class;
43+
*pos = class - lock_classes;
44+
return (*pos > max_lock_class_idx) ? NULL : class;
3045
}
3146

3247
static void *l_start(struct seq_file *m, loff_t *pos)
3348
{
34-
return seq_list_start_head(&all_lock_classes, *pos);
49+
unsigned long idx = *pos;
50+
51+
if (idx > max_lock_class_idx)
52+
return NULL;
53+
return lock_classes + idx;
3554
}
3655

3756
static void l_stop(struct seq_file *m, void *v)
@@ -57,14 +76,16 @@ static void print_name(struct seq_file *m, struct lock_class *class)
5776

5877
static int l_show(struct seq_file *m, void *v)
5978
{
60-
struct lock_class *class = list_entry(v, struct lock_class, lock_entry);
79+
struct lock_class *class = v;
6180
struct lock_list *entry;
6281
char usage[LOCK_USAGE_CHARS];
82+
int idx = class - lock_classes;
6383

64-
if (v == &all_lock_classes) {
84+
if (v == lock_classes)
6585
seq_printf(m, "all lock classes:\n");
86+
87+
if (!test_bit(idx, lock_classes_in_use))
6688
return 0;
67-
}
6889

6990
seq_printf(m, "%p", class->key);
7091
#ifdef CONFIG_DEBUG_LOCKDEP
@@ -220,8 +241,11 @@ static int lockdep_stats_show(struct seq_file *m, void *v)
220241

221242
#ifdef CONFIG_PROVE_LOCKING
222243
struct lock_class *class;
244+
unsigned long idx;
223245

224-
list_for_each_entry(class, &all_lock_classes, lock_entry) {
246+
iterate_lock_classes(idx, class) {
247+
if (!test_bit(idx, lock_classes_in_use))
248+
continue;
225249

226250
if (class->usage_mask == 0)
227251
nr_unused++;
@@ -254,6 +278,7 @@ static int lockdep_stats_show(struct seq_file *m, void *v)
254278

255279
sum_forward_deps += lockdep_count_forward_deps(class);
256280
}
281+
257282
#ifdef CONFIG_DEBUG_LOCKDEP
258283
DEBUG_LOCKS_WARN_ON(debug_atomic_read(nr_unused_locks) != nr_unused);
259284
#endif
@@ -345,6 +370,8 @@ static int lockdep_stats_show(struct seq_file *m, void *v)
345370
seq_printf(m, " max bfs queue depth: %11u\n",
346371
max_bfs_queue_depth);
347372
#endif
373+
seq_printf(m, " max lock class index: %11lu\n",
374+
max_lock_class_idx);
348375
lockdep_stats_debug_show(m);
349376
seq_printf(m, " debug_locks: %11u\n",
350377
debug_locks);
@@ -622,12 +649,16 @@ static int lock_stat_open(struct inode *inode, struct file *file)
622649
if (!res) {
623650
struct lock_stat_data *iter = data->stats;
624651
struct seq_file *m = file->private_data;
652+
unsigned long idx;
625653

626-
list_for_each_entry(class, &all_lock_classes, lock_entry) {
654+
iterate_lock_classes(idx, class) {
655+
if (!test_bit(idx, lock_classes_in_use))
656+
continue;
627657
iter->class = class;
628658
iter->stats = lock_stats(class);
629659
iter++;
630660
}
661+
631662
data->iter_end = iter;
632663

633664
sort(data->stats, data->iter_end - data->stats,
@@ -645,6 +676,7 @@ static ssize_t lock_stat_write(struct file *file, const char __user *buf,
645676
size_t count, loff_t *ppos)
646677
{
647678
struct lock_class *class;
679+
unsigned long idx;
648680
char c;
649681

650682
if (count) {
@@ -654,8 +686,11 @@ static ssize_t lock_stat_write(struct file *file, const char __user *buf,
654686
if (c != '0')
655687
return count;
656688

657-
list_for_each_entry(class, &all_lock_classes, lock_entry)
689+
iterate_lock_classes(idx, class) {
690+
if (!test_bit(idx, lock_classes_in_use))
691+
continue;
658692
clear_lock_stats(class);
693+
}
659694
}
660695
return count;
661696
}

0 commit comments

Comments
 (0)