Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions arch/x86/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -332,6 +332,7 @@ config X86
select SCHED_SMT if SMP
select ARCH_SUPPORTS_SCHED_CLUSTER if SMP
select ARCH_SUPPORTS_SCHED_MC if SMP
select HAVE_SINGLE_FTRACE_DIRECT_OPS if X86_64 && DYNAMIC_FTRACE_WITH_DIRECT_CALLS

config INSTRUCTION_DECODER
def_bool y
Expand Down
7 changes: 5 additions & 2 deletions include/linux/bpf.h
Original file line number Diff line number Diff line change
Expand Up @@ -1288,14 +1288,17 @@ struct bpf_tramp_image {
};

struct bpf_trampoline {
/* hlist for trampoline_table */
struct hlist_node hlist;
/* hlist for trampoline_key_table */
struct hlist_node hlist_key;
/* hlist for trampoline_ip_table */
struct hlist_node hlist_ip;
struct ftrace_ops *fops;
/* serializes access to fields of this trampoline */
struct mutex mutex;
refcount_t refcnt;
u32 flags;
u64 key;
unsigned long ip;
struct {
struct btf_func_model model;
void *addr;
Expand Down
48 changes: 39 additions & 9 deletions include/linux/ftrace.h
Original file line number Diff line number Diff line change
Expand Up @@ -394,9 +394,25 @@ enum ftrace_ops_cmd {
* Negative on failure. The return value is dependent on the
* callback.
*/
typedef int (*ftrace_ops_func_t)(struct ftrace_ops *op, enum ftrace_ops_cmd cmd);
typedef int (*ftrace_ops_func_t)(struct ftrace_ops *op, unsigned long ip, enum ftrace_ops_cmd cmd);

#ifdef CONFIG_DYNAMIC_FTRACE

#define FTRACE_HASH_DEFAULT_BITS 10

struct ftrace_hash {
unsigned long size_bits;
struct hlist_head *buckets;
unsigned long count;
unsigned long flags;
struct rcu_head rcu;
};

struct ftrace_hash *alloc_ftrace_hash(int size_bits);
void free_ftrace_hash(struct ftrace_hash *hash);
struct ftrace_func_entry *add_hash_entry_direct(struct ftrace_hash *hash,
unsigned long ip, unsigned long direct);

/* The hash used to know what functions callbacks trace */
struct ftrace_ops_hash {
struct ftrace_hash __rcu *notrace_hash;
Expand Down Expand Up @@ -520,11 +536,14 @@ struct ftrace_func_entry {

#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
unsigned long ftrace_find_rec_direct(unsigned long ip);
int register_ftrace_direct(struct ftrace_ops *ops, unsigned long addr);
int unregister_ftrace_direct(struct ftrace_ops *ops, unsigned long addr,
int register_ftrace_direct(struct ftrace_ops *ops, unsigned long ip, unsigned long addr);
int unregister_ftrace_direct(struct ftrace_ops *ops, unsigned long ip, unsigned long addr,
bool free_filters);
int modify_ftrace_direct(struct ftrace_ops *ops, unsigned long addr);
int modify_ftrace_direct_nolock(struct ftrace_ops *ops, unsigned long addr);
int modify_ftrace_direct(struct ftrace_ops *ops, unsigned long ip, unsigned long addr, bool lock_direct_mutex);

int update_ftrace_direct_add(struct ftrace_ops *ops, struct ftrace_hash *hash);
int update_ftrace_direct_del(struct ftrace_ops *ops, struct ftrace_hash *hash);
int update_ftrace_direct_mod(struct ftrace_ops *ops, struct ftrace_hash *hash, bool do_direct_lock);

void ftrace_stub_direct_tramp(void);

Expand All @@ -534,20 +553,31 @@ static inline unsigned long ftrace_find_rec_direct(unsigned long ip)
{
return 0;
}
static inline int register_ftrace_direct(struct ftrace_ops *ops, unsigned long addr)
static inline int register_ftrace_direct(struct ftrace_ops *ops, unsigned long ip, unsigned long addr)
{
return -ENODEV;
}
static inline int unregister_ftrace_direct(struct ftrace_ops *ops, unsigned long addr,
static inline int unregister_ftrace_direct(struct ftrace_ops *ops, unsigned long ip, unsigned long addr,
bool free_filters)
{
return -ENODEV;
}
static inline int modify_ftrace_direct(struct ftrace_ops *ops, unsigned long addr)
static inline int modify_ftrace_direct(struct ftrace_ops *ops, unsigned long ip, unsigned long addr, bool lock_direct_mutex)
{
return -ENODEV;
}

int update_ftrace_direct_add(struct ftrace_ops *ops, struct ftrace_hash *hash)
{
return -ENODEV;
}

int update_ftrace_direct_del(struct ftrace_ops *ops, struct ftrace_hash *hash)
{
return -ENODEV;
}
static inline int modify_ftrace_direct_nolock(struct ftrace_ops *ops, unsigned long addr)

int modify_ftrace_direct_hash(struct ftrace_ops *ops, struct ftrace_hash *hash, bool do_direct_lock)
{
return -ENODEV;
}
Expand Down
128 changes: 98 additions & 30 deletions kernel/bpf/trampoline.c
Original file line number Diff line number Diff line change
Expand Up @@ -24,19 +24,49 @@ const struct bpf_prog_ops bpf_extension_prog_ops = {
#define TRAMPOLINE_HASH_BITS 10
#define TRAMPOLINE_TABLE_SIZE (1 << TRAMPOLINE_HASH_BITS)

static struct hlist_head trampoline_table[TRAMPOLINE_TABLE_SIZE];
static struct hlist_head trampoline_key_table[TRAMPOLINE_TABLE_SIZE];
static struct hlist_head trampoline_ip_table[TRAMPOLINE_TABLE_SIZE];

/* serializes access to trampoline_table */
/* serializes access to trampoline tables */
static DEFINE_MUTEX(trampoline_mutex);

#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
static int bpf_trampoline_update(struct bpf_trampoline *tr, bool lock_direct_mutex);

static int bpf_tramp_ftrace_ops_func(struct ftrace_ops *ops, enum ftrace_ops_cmd cmd)
#ifdef CONFIG_HAVE_SINGLE_FTRACE_DIRECT_OPS
static struct bpf_trampoline *direct_ops_ip_lookup(struct ftrace_ops *ops, unsigned long ip)
{
struct bpf_trampoline *tr = ops->private;
struct hlist_head *head_ip;
struct bpf_trampoline *tr;

mutex_lock(&trampoline_mutex);
head_ip = &trampoline_ip_table[hash_64(ip, TRAMPOLINE_HASH_BITS)];
hlist_for_each_entry(tr, head_ip, hlist_ip) {
if (tr->func.addr == (void *) ip)
goto out;
}
tr = NULL;
out:
mutex_unlock(&trampoline_mutex);
return tr;
}
#else
static struct bpf_trampoline *direct_ops_ip_lookup(struct ftrace_ops *ops, unsigned long ip)
{
return ops->private;
}
#endif /* CONFIG_HAVE_SINGLE_FTRACE_DIRECT_OPS */

static int bpf_tramp_ftrace_ops_func(struct ftrace_ops *ops, unsigned long ip,
enum ftrace_ops_cmd cmd)
{
struct bpf_trampoline *tr;
int ret = 0;

tr = direct_ops_ip_lookup(ops, ip);
if (!tr)
return -EINVAL;

if (cmd == FTRACE_OPS_CMD_ENABLE_SHARE_IPMODIFY_SELF) {
/* This is called inside register_ftrace_direct_multi(), so
* tr->mutex is already locked.
Expand Down Expand Up @@ -135,15 +165,57 @@ void bpf_image_ksym_del(struct bpf_ksym *ksym)
PAGE_SIZE, true, ksym->name);
}

static struct bpf_trampoline *bpf_trampoline_lookup(u64 key)
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
#ifdef CONFIG_HAVE_SINGLE_FTRACE_DIRECT_OPS
struct ftrace_ops direct_ops = {
.ops_func = bpf_tramp_ftrace_ops_func,
};

static int direct_ops_get(struct bpf_trampoline *tr)
{
tr->fops = &direct_ops;
return 0;
}
static void direct_ops_clear(struct bpf_trampoline *tr) { }
static void direct_ops_free(struct bpf_trampoline *tr) { }
#else
static int direct_ops_get(struct bpf_trampoline *tr)
{
tr->fops = kzalloc(sizeof(struct ftrace_ops), GFP_KERNEL);
if (!tr->fops)
return -1;
tr->fops->private = tr;
tr->fops->ops_func = bpf_tramp_ftrace_ops_func;
return 0;
}

static void direct_ops_clear(struct bpf_trampoline *tr)
{
tr->fops->func = NULL;
tr->fops->trampoline = 0;
}

static void direct_ops_free(struct bpf_trampoline *tr)
{
if (tr->fops) {
ftrace_free_filter(tr->fops);
kfree(tr->fops);
}
}
#endif /* CONFIG_HAVE_SINGLE_FTRACE_DIRECT_OPS */
#else
static void direct_ops_free(struct bpf_trampoline *tr) { }
#endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */

static struct bpf_trampoline *bpf_trampoline_lookup(u64 key, unsigned long ip)
{
struct bpf_trampoline *tr;
struct hlist_head *head;
int i;

mutex_lock(&trampoline_mutex);
head = &trampoline_table[hash_64(key, TRAMPOLINE_HASH_BITS)];
hlist_for_each_entry(tr, head, hlist) {
head = &trampoline_key_table[hash_64(key, TRAMPOLINE_HASH_BITS)];
hlist_for_each_entry(tr, head, hlist_key) {
if (tr->key == key) {
refcount_inc(&tr->refcnt);
goto out;
Expand All @@ -153,19 +225,20 @@ static struct bpf_trampoline *bpf_trampoline_lookup(u64 key)
if (!tr)
goto out;
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
tr->fops = kzalloc(sizeof(struct ftrace_ops), GFP_KERNEL);
if (!tr->fops) {
if (direct_ops_get(tr)) {
kfree(tr);
tr = NULL;
goto out;
}
tr->fops->private = tr;
tr->fops->ops_func = bpf_tramp_ftrace_ops_func;
#endif

tr->key = key;
INIT_HLIST_NODE(&tr->hlist);
hlist_add_head(&tr->hlist, head);
tr->ip = ip;
INIT_HLIST_NODE(&tr->hlist_key);
INIT_HLIST_NODE(&tr->hlist_ip);
hlist_add_head(&tr->hlist_key, head);
head = &trampoline_ip_table[hash_64(ip, TRAMPOLINE_HASH_BITS)];
hlist_add_head(&tr->hlist_ip, head);
refcount_set(&tr->refcnt, 1);
mutex_init(&tr->mutex);
for (i = 0; i < BPF_TRAMP_MAX; i++)
Expand All @@ -181,7 +254,7 @@ static int unregister_fentry(struct bpf_trampoline *tr, void *old_addr)
int ret;

if (tr->func.ftrace_managed)
ret = unregister_ftrace_direct(tr->fops, (long)old_addr, false);
ret = unregister_ftrace_direct(tr->fops, (unsigned long) ip, (long)old_addr, false);
else
ret = bpf_arch_text_poke(ip, BPF_MOD_CALL, old_addr, NULL);

Expand All @@ -195,10 +268,7 @@ static int modify_fentry(struct bpf_trampoline *tr, void *old_addr, void *new_ad
int ret;

if (tr->func.ftrace_managed) {
if (lock_direct_mutex)
ret = modify_ftrace_direct(tr->fops, (long)new_addr);
else
ret = modify_ftrace_direct_nolock(tr->fops, (long)new_addr);
ret = modify_ftrace_direct(tr->fops, (unsigned long) ip, (long)new_addr, lock_direct_mutex);
} else {
ret = bpf_arch_text_poke(ip, BPF_MOD_CALL, old_addr, new_addr);
}
Expand All @@ -220,8 +290,7 @@ static int register_fentry(struct bpf_trampoline *tr, void *new_addr)
}

if (tr->func.ftrace_managed) {
ftrace_set_filter_ip(tr->fops, (unsigned long)ip, 0, 1);
ret = register_ftrace_direct(tr->fops, (long)new_addr);
ret = register_ftrace_direct(tr->fops, (unsigned long)ip, (long)new_addr);
} else {
ret = bpf_arch_text_poke(ip, BPF_MOD_CALL, NULL, new_addr);
}
Expand Down Expand Up @@ -480,8 +549,7 @@ static int bpf_trampoline_update(struct bpf_trampoline *tr, bool lock_direct_mut
* trampoline again, and retry register.
*/
/* reset fops->func and fops->trampoline for re-register */
tr->fops->func = NULL;
tr->fops->trampoline = 0;
direct_ops_clear(tr);

/* free im memory and reallocate later */
bpf_tramp_image_free(im);
Expand Down Expand Up @@ -804,7 +872,7 @@ void bpf_trampoline_unlink_cgroup_shim(struct bpf_prog *prog)
prog->aux->attach_btf_id);

bpf_lsm_find_cgroup_shim(prog, &bpf_func);
tr = bpf_trampoline_lookup(key);
tr = bpf_trampoline_lookup(key, 0);
if (WARN_ON_ONCE(!tr))
return;

Expand All @@ -824,7 +892,7 @@ struct bpf_trampoline *bpf_trampoline_get(u64 key,
{
struct bpf_trampoline *tr;

tr = bpf_trampoline_lookup(key);
tr = bpf_trampoline_lookup(key, tgt_info->tgt_addr);
if (!tr)
return NULL;

Expand Down Expand Up @@ -860,11 +928,9 @@ void bpf_trampoline_put(struct bpf_trampoline *tr)
* fexit progs. The fentry-only trampoline will be freed via
* multiple rcu callbacks.
*/
hlist_del(&tr->hlist);
if (tr->fops) {
ftrace_free_filter(tr->fops);
kfree(tr->fops);
}
hlist_del(&tr->hlist_key);
hlist_del(&tr->hlist_ip);
direct_ops_free(tr);
kfree(tr);
out:
mutex_unlock(&trampoline_mutex);
Expand Down Expand Up @@ -1133,7 +1199,9 @@ static int __init init_trampolines(void)
int i;

for (i = 0; i < TRAMPOLINE_TABLE_SIZE; i++)
INIT_HLIST_HEAD(&trampoline_table[i]);
INIT_HLIST_HEAD(&trampoline_key_table[i]);
for (i = 0; i < TRAMPOLINE_TABLE_SIZE; i++)
INIT_HLIST_HEAD(&trampoline_ip_table[i]);
return 0;
}
late_initcall(init_trampolines);
3 changes: 3 additions & 0 deletions kernel/trace/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,9 @@ config HAVE_DYNAMIC_FTRACE_WITH_REGS
config HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
bool

config HAVE_SINGLE_FTRACE_DIRECT_OPS
bool

config HAVE_DYNAMIC_FTRACE_WITH_CALL_OPS
bool

Expand Down
Loading
Loading