Skip to content

Commit d401b72

Browse files
beaubelgraverostedt
authored andcommitted
tracing/user_events: Use refcount instead of atomic for ref tracking
User processes could open up enough event references to cause rollovers. These could cause use after free scenarios, which we do not want. Switching to refcount APIs prevent this, but will leak memory once saturated. Once saturated, user processes can still use the events. This prevents a bad user process from stopping existing telemetry from being emitted. Link: https://lkml.kernel.org/r/[email protected] Link: https://lore.kernel.org/all/[email protected]/ Reported-by: Mathieu Desnoyers <[email protected]> Signed-off-by: Beau Belgrave <[email protected]> Signed-off-by: Steven Rostedt (Google) <[email protected]>
1 parent e6f89a1 commit d401b72

File tree

1 file changed

+24
-29
lines changed

1 file changed

+24
-29
lines changed

kernel/trace/trace_events_user.c

Lines changed: 24 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,7 @@
1414
#include <linux/uio.h>
1515
#include <linux/ioctl.h>
1616
#include <linux/jhash.h>
17+
#include <linux/refcount.h>
1718
#include <linux/trace_events.h>
1819
#include <linux/tracefs.h>
1920
#include <linux/types.h>
@@ -57,7 +58,7 @@ static DECLARE_BITMAP(page_bitmap, MAX_EVENTS);
5758
* within a file a user_event might be created if it does not
5859
* already exist. These are globally used and their lifetime
5960
* is tied to the refcnt member. These cannot go away until the
60-
* refcnt reaches zero.
61+
* refcnt reaches one.
6162
*/
6263
struct user_event {
6364
struct tracepoint tracepoint;
@@ -67,7 +68,7 @@ struct user_event {
6768
struct hlist_node node;
6869
struct list_head fields;
6970
struct list_head validators;
70-
atomic_t refcnt;
71+
refcount_t refcnt;
7172
int index;
7273
int flags;
7374
int min_size;
@@ -105,6 +106,12 @@ static u32 user_event_key(char *name)
105106
return jhash(name, strlen(name), 0);
106107
}
107108

109+
static __always_inline __must_check
110+
bool user_event_last_ref(struct user_event *user)
111+
{
112+
return refcount_read(&user->refcnt) == 1;
113+
}
114+
108115
static __always_inline __must_check
109116
size_t copy_nofault(void *addr, size_t bytes, struct iov_iter *i)
110117
{
@@ -662,7 +669,7 @@ static struct user_event *find_user_event(char *name, u32 *outkey)
662669

663670
hash_for_each_possible(register_table, user, node, key)
664671
if (!strcmp(EVENT_NAME(user), name)) {
665-
atomic_inc(&user->refcnt);
672+
refcount_inc(&user->refcnt);
666673
return user;
667674
}
668675

@@ -876,12 +883,12 @@ static int user_event_reg(struct trace_event_call *call,
876883

877884
return ret;
878885
inc:
879-
atomic_inc(&user->refcnt);
886+
refcount_inc(&user->refcnt);
880887
update_reg_page_for(user);
881888
return 0;
882889
dec:
883890
update_reg_page_for(user);
884-
atomic_dec(&user->refcnt);
891+
refcount_dec(&user->refcnt);
885892
return 0;
886893
}
887894

@@ -907,7 +914,7 @@ static int user_event_create(const char *raw_command)
907914
ret = user_event_parse_cmd(name, &user);
908915

909916
if (!ret)
910-
atomic_dec(&user->refcnt);
917+
refcount_dec(&user->refcnt);
911918

912919
mutex_unlock(&reg_mutex);
913920

@@ -951,14 +958,14 @@ static bool user_event_is_busy(struct dyn_event *ev)
951958
{
952959
struct user_event *user = container_of(ev, struct user_event, devent);
953960

954-
return atomic_read(&user->refcnt) != 0;
961+
return !user_event_last_ref(user);
955962
}
956963

957964
static int user_event_free(struct dyn_event *ev)
958965
{
959966
struct user_event *user = container_of(ev, struct user_event, devent);
960967

961-
if (atomic_read(&user->refcnt) != 0)
968+
if (!user_event_last_ref(user))
962969
return -EBUSY;
963970

964971
return destroy_user_event(user);
@@ -1137,8 +1144,8 @@ static int user_event_parse(char *name, char *args, char *flags,
11371144

11381145
user->index = index;
11391146

1140-
/* Ensure we track ref */
1141-
atomic_inc(&user->refcnt);
1147+
/* Ensure we track self ref and caller ref (2) */
1148+
refcount_set(&user->refcnt, 2);
11421149

11431150
dyn_event_init(&user->devent, &user_event_dops);
11441151
dyn_event_add(&user->devent, &user->call);
@@ -1164,29 +1171,17 @@ static int user_event_parse(char *name, char *args, char *flags,
11641171
static int delete_user_event(char *name)
11651172
{
11661173
u32 key;
1167-
int ret;
11681174
struct user_event *user = find_user_event(name, &key);
11691175

11701176
if (!user)
11711177
return -ENOENT;
11721178

1173-
/* Ensure we are the last ref */
1174-
if (atomic_read(&user->refcnt) != 1) {
1175-
ret = -EBUSY;
1176-
goto put_ref;
1177-
}
1178-
1179-
ret = destroy_user_event(user);
1179+
refcount_dec(&user->refcnt);
11801180

1181-
if (ret)
1182-
goto put_ref;
1183-
1184-
return ret;
1185-
put_ref:
1186-
/* No longer have this ref */
1187-
atomic_dec(&user->refcnt);
1181+
if (!user_event_last_ref(user))
1182+
return -EBUSY;
11881183

1189-
return ret;
1184+
return destroy_user_event(user);
11901185
}
11911186

11921187
/*
@@ -1314,7 +1309,7 @@ static int user_events_ref_add(struct file *file, struct user_event *user)
13141309

13151310
new_refs->events[i] = user;
13161311

1317-
atomic_inc(&user->refcnt);
1312+
refcount_inc(&user->refcnt);
13181313

13191314
rcu_assign_pointer(file->private_data, new_refs);
13201315

@@ -1374,7 +1369,7 @@ static long user_events_ioctl_reg(struct file *file, unsigned long uarg)
13741369
ret = user_events_ref_add(file, user);
13751370

13761371
/* No longer need parse ref, ref_add either worked or not */
1377-
atomic_dec(&user->refcnt);
1372+
refcount_dec(&user->refcnt);
13781373

13791374
/* Positive number is index and valid */
13801375
if (ret < 0)
@@ -1464,7 +1459,7 @@ static int user_events_release(struct inode *node, struct file *file)
14641459
user = refs->events[i];
14651460

14661461
if (user)
1467-
atomic_dec(&user->refcnt);
1462+
refcount_dec(&user->refcnt);
14681463
}
14691464
out:
14701465
file->private_data = NULL;

0 commit comments

Comments
 (0)