Skip to content

Commit f0dbf6f

Browse files
beaubelgraverostedt
authored andcommitted
tracing/user_events: Track refcount consistently via put/get
Various parts of the code today track user_event's refcnt field directly via a refcount_add/dec. This makes it hard to modify the behavior of the last reference decrement in all code paths consistently. For example, in the future we will auto-delete events upon the last reference going away. This last reference could happen in many places, but we want it to be consistently handled. Add user_event_get() and user_event_put() for the add/dec. Update all places where direct refcounts are being used to utilize these new functions. In each location pass if event_mutex is locked or not. This allows us to drop events automatically in future patches clearly. Ensure when caller states the lock is held, it really is (or is not) held. Link: https://lkml.kernel.org/r/[email protected] Signed-off-by: Beau Belgrave <[email protected]> Signed-off-by: Steven Rostedt (Google) <[email protected]>
1 parent b08d725 commit f0dbf6f

File tree

1 file changed

+41
-28
lines changed

1 file changed

+41
-28
lines changed

kernel/trace/trace_events_user.c

Lines changed: 41 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -177,6 +177,28 @@ static u32 user_event_key(char *name)
177177
return jhash(name, strlen(name), 0);
178178
}
179179

180+
static struct user_event *user_event_get(struct user_event *user)
181+
{
182+
refcount_inc(&user->refcnt);
183+
184+
return user;
185+
}
186+
187+
static void user_event_put(struct user_event *user, bool locked)
188+
{
189+
#ifdef CONFIG_LOCKDEP
190+
if (locked)
191+
lockdep_assert_held(&event_mutex);
192+
else
193+
lockdep_assert_not_held(&event_mutex);
194+
#endif
195+
196+
if (unlikely(!user))
197+
return;
198+
199+
refcount_dec(&user->refcnt);
200+
}
201+
180202
static void user_event_group_destroy(struct user_event_group *group)
181203
{
182204
kfree(group->system_name);
@@ -228,12 +250,13 @@ static struct user_event_group *user_event_group_create(void)
228250
return NULL;
229251
};
230252

231-
static void user_event_enabler_destroy(struct user_event_enabler *enabler)
253+
static void user_event_enabler_destroy(struct user_event_enabler *enabler,
254+
bool locked)
232255
{
233256
list_del_rcu(&enabler->mm_enablers_link);
234257

235258
/* No longer tracking the event via the enabler */
236-
refcount_dec(&enabler->event->refcnt);
259+
user_event_put(enabler->event, locked);
237260

238261
kfree(enabler);
239262
}
@@ -295,7 +318,7 @@ static void user_event_enabler_fault_fixup(struct work_struct *work)
295318

296319
/* User asked for enabler to be removed during fault */
297320
if (test_bit(ENABLE_VAL_FREEING_BIT, ENABLE_BITOPS(enabler))) {
298-
user_event_enabler_destroy(enabler);
321+
user_event_enabler_destroy(enabler, true);
299322
goto out;
300323
}
301324

@@ -470,14 +493,12 @@ static bool user_event_enabler_dup(struct user_event_enabler *orig,
470493
if (!enabler)
471494
return false;
472495

473-
enabler->event = orig->event;
496+
enabler->event = user_event_get(orig->event);
474497
enabler->addr = orig->addr;
475498

476499
/* Only dup part of value (ignore future flags, etc) */
477500
enabler->values = orig->values & ENABLE_VAL_DUP_MASK;
478501

479-
refcount_inc(&enabler->event->refcnt);
480-
481502
/* Enablers not exposed yet, RCU not required */
482503
list_add(&enabler->mm_enablers_link, &mm->enablers);
483504

@@ -594,7 +615,7 @@ static void user_event_mm_destroy(struct user_event_mm *mm)
594615
struct user_event_enabler *enabler, *next;
595616

596617
list_for_each_entry_safe(enabler, next, &mm->enablers, mm_enablers_link)
597-
user_event_enabler_destroy(enabler);
618+
user_event_enabler_destroy(enabler, false);
598619

599620
mmdrop(mm->mm);
600621
kfree(mm);
@@ -749,7 +770,7 @@ static struct user_event_enabler
749770
* exit or run exec(), which includes forks and clones.
750771
*/
751772
if (!*write_result) {
752-
refcount_inc(&enabler->event->refcnt);
773+
user_event_get(user);
753774
list_add_rcu(&enabler->mm_enablers_link, &user_mm->enablers);
754775
}
755776

@@ -1337,10 +1358,8 @@ static struct user_event *find_user_event(struct user_event_group *group,
13371358
*outkey = key;
13381359

13391360
hash_for_each_possible(group->register_table, user, node, key)
1340-
if (!strcmp(EVENT_NAME(user), name)) {
1341-
refcount_inc(&user->refcnt);
1342-
return user;
1343-
}
1361+
if (!strcmp(EVENT_NAME(user), name))
1362+
return user_event_get(user);
13441363

13451364
return NULL;
13461365
}
@@ -1554,12 +1573,12 @@ static int user_event_reg(struct trace_event_call *call,
15541573

15551574
return ret;
15561575
inc:
1557-
refcount_inc(&user->refcnt);
1576+
user_event_get(user);
15581577
update_enable_bit_for(user);
15591578
return 0;
15601579
dec:
15611580
update_enable_bit_for(user);
1562-
refcount_dec(&user->refcnt);
1581+
user_event_put(user, true);
15631582
return 0;
15641583
}
15651584

@@ -1593,7 +1612,7 @@ static int user_event_create(const char *raw_command)
15931612
ret = user_event_parse_cmd(group, name, &user, 0);
15941613

15951614
if (!ret)
1596-
refcount_dec(&user->refcnt);
1615+
user_event_put(user, false);
15971616

15981617
mutex_unlock(&group->reg_mutex);
15991618

@@ -1794,7 +1813,7 @@ static int user_event_parse(struct user_event_group *group, char *name,
17941813

17951814
return 0;
17961815
error:
1797-
refcount_dec(&user->refcnt);
1816+
user_event_put(user, false);
17981817
return ret;
17991818
}
18001819

@@ -1883,7 +1902,7 @@ static int delete_user_event(struct user_event_group *group, char *name)
18831902
if (!user)
18841903
return -ENOENT;
18851904

1886-
refcount_dec(&user->refcnt);
1905+
user_event_put(user, true);
18871906

18881907
if (!user_event_last_ref(user))
18891908
return -EBUSY;
@@ -2042,9 +2061,7 @@ static int user_events_ref_add(struct user_event_file_info *info,
20422061
for (i = 0; i < count; ++i)
20432062
new_refs->events[i] = refs->events[i];
20442063

2045-
new_refs->events[i] = user;
2046-
2047-
refcount_inc(&user->refcnt);
2064+
new_refs->events[i] = user_event_get(user);
20482065

20492066
rcu_assign_pointer(info->refs, new_refs);
20502067

@@ -2158,7 +2175,7 @@ static long user_events_ioctl_reg(struct user_event_file_info *info,
21582175
ret = user_events_ref_add(info, user);
21592176

21602177
/* No longer need parse ref, ref_add either worked or not */
2161-
refcount_dec(&user->refcnt);
2178+
user_event_put(user, false);
21622179

21632180
/* Positive number is index and valid */
21642181
if (ret < 0)
@@ -2307,7 +2324,7 @@ static long user_events_ioctl_unreg(unsigned long uarg)
23072324
set_bit(ENABLE_VAL_FREEING_BIT, ENABLE_BITOPS(enabler));
23082325

23092326
if (!test_bit(ENABLE_VAL_FAULTING_BIT, ENABLE_BITOPS(enabler)))
2310-
user_event_enabler_destroy(enabler);
2327+
user_event_enabler_destroy(enabler, true);
23112328

23122329
/* Removed at least one */
23132330
ret = 0;
@@ -2365,7 +2382,6 @@ static int user_events_release(struct inode *node, struct file *file)
23652382
struct user_event_file_info *info = file->private_data;
23662383
struct user_event_group *group;
23672384
struct user_event_refs *refs;
2368-
struct user_event *user;
23692385
int i;
23702386

23712387
if (!info)
@@ -2389,12 +2405,9 @@ static int user_events_release(struct inode *node, struct file *file)
23892405
* The underlying user_events are ref counted, and cannot be freed.
23902406
* After this decrement, the user_events may be freed elsewhere.
23912407
*/
2392-
for (i = 0; i < refs->count; ++i) {
2393-
user = refs->events[i];
2408+
for (i = 0; i < refs->count; ++i)
2409+
user_event_put(refs->events[i], false);
23942410

2395-
if (user)
2396-
refcount_dec(&user->refcnt);
2397-
}
23982411
out:
23992412
file->private_data = NULL;
24002413

0 commit comments

Comments
 (0)