diff --git a/include/linux/bpf.h b/include/linux/bpf.h index f9cd2164ed238..bad29fe38a128 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -1253,14 +1253,16 @@ struct bpf_tramp_image { }; struct bpf_trampoline { - /* hlist for trampoline_table */ - struct hlist_node hlist; - struct ftrace_ops *fops; + /* hlist for trampoline_key_table */ + struct hlist_node hlist_key; + /* hlist for trampoline_ip_table */ + struct hlist_node hlist_ip; /* serializes access to fields of this trampoline */ struct mutex mutex; refcount_t refcnt; u32 flags; u64 key; + unsigned long ip; struct { struct btf_func_model model; void *addr; diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index b672ca15f2655..27b26a87231c2 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -394,9 +394,25 @@ enum ftrace_ops_cmd { * Negative on failure. The return value is dependent on the * callback. */ -typedef int (*ftrace_ops_func_t)(struct ftrace_ops *op, enum ftrace_ops_cmd cmd); +typedef int (*ftrace_ops_func_t)(struct ftrace_ops *op, unsigned long ip, enum ftrace_ops_cmd cmd); #ifdef CONFIG_DYNAMIC_FTRACE + +#define FTRACE_HASH_DEFAULT_BITS 10 + +struct ftrace_hash { + unsigned long size_bits; + struct hlist_head *buckets; + unsigned long count; + unsigned long flags; + struct rcu_head rcu; +}; + +struct ftrace_hash *alloc_ftrace_hash(int size_bits); +void free_ftrace_hash(struct ftrace_hash *hash); +struct ftrace_func_entry *add_hash_entry_direct(struct ftrace_hash *hash, + unsigned long ip, unsigned long direct); + /* The hash used to know what functions callbacks trace */ struct ftrace_ops_hash { struct ftrace_hash __rcu *notrace_hash; @@ -441,9 +457,6 @@ struct ftrace_ops { struct list_head subop_list; ftrace_ops_func_t ops_func; struct ftrace_ops *managed; -#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS - unsigned long direct_call; -#endif #endif }; @@ -520,11 +533,14 @@ struct ftrace_func_entry { #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS unsigned long ftrace_find_rec_direct(unsigned long ip); -int register_ftrace_direct(struct ftrace_ops *ops, unsigned long addr); -int unregister_ftrace_direct(struct ftrace_ops *ops, unsigned long addr, +int register_ftrace_direct(struct ftrace_ops *ops, unsigned long ip, unsigned long addr); +int unregister_ftrace_direct(struct ftrace_ops *ops, unsigned long ip, unsigned long addr, bool free_filters); -int modify_ftrace_direct(struct ftrace_ops *ops, unsigned long addr); -int modify_ftrace_direct_nolock(struct ftrace_ops *ops, unsigned long addr); +int modify_ftrace_direct(struct ftrace_ops *ops, unsigned long ip, unsigned long addr, bool lock_direct_mutex); + +int register_ftrace_direct_hash(struct ftrace_ops *ops, struct ftrace_hash *hash); +int unregister_ftrace_direct_hash(struct ftrace_ops *ops, struct ftrace_hash *hash); +int modify_ftrace_direct_hash(struct ftrace_ops *ops, struct ftrace_hash *hash, bool do_direct_lock); void ftrace_stub_direct_tramp(void); @@ -534,20 +550,31 @@ static inline unsigned long ftrace_find_rec_direct(unsigned long ip) { return 0; } -static inline int register_ftrace_direct(struct ftrace_ops *ops, unsigned long addr) +static inline int register_ftrace_direct(struct ftrace_ops *ops, unsigned long ip, unsigned long addr) { return -ENODEV; } -static inline int unregister_ftrace_direct(struct ftrace_ops *ops, unsigned long addr, +static inline int unregister_ftrace_direct(struct ftrace_ops *ops, unsigned long ip, unsigned long addr, bool free_filters) { return -ENODEV; } -static inline int modify_ftrace_direct(struct ftrace_ops *ops, unsigned long addr) +static inline int modify_ftrace_direct(struct ftrace_ops *ops, unsigned long ip, unsigned long addr, bool lock_direct_mutex) { return -ENODEV; } -static inline int modify_ftrace_direct_nolock(struct ftrace_ops *ops, unsigned long addr) + +int register_ftrace_direct_hash(struct ftrace_ops *ops, struct ftrace_hash *hash) +{ + return -ENODEV; +} + +int unregister_ftrace_direct_hash(struct ftrace_ops *ops, struct ftrace_hash *hash) +{ + return -ENODEV; +} + +int modify_ftrace_direct_hash(struct ftrace_ops *ops, struct ftrace_hash *hash, bool do_direct_lock) { return -ENODEV; } diff --git a/kernel/bpf/trampoline.c b/kernel/bpf/trampoline.c index 0e364614c3a29..e6a0e7b20bb62 100644 --- a/kernel/bpf/trampoline.c +++ b/kernel/bpf/trampoline.c @@ -24,19 +24,42 @@ const struct bpf_prog_ops bpf_extension_prog_ops = { #define TRAMPOLINE_HASH_BITS 10 #define TRAMPOLINE_TABLE_SIZE (1 << TRAMPOLINE_HASH_BITS) -static struct hlist_head trampoline_table[TRAMPOLINE_TABLE_SIZE]; +static struct hlist_head trampoline_key_table[TRAMPOLINE_TABLE_SIZE]; +static struct hlist_head trampoline_ip_table[TRAMPOLINE_TABLE_SIZE]; -/* serializes access to trampoline_table */ +/* serializes access to trampoline tables */ static DEFINE_MUTEX(trampoline_mutex); #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS static int bpf_trampoline_update(struct bpf_trampoline *tr, bool lock_direct_mutex); -static int bpf_tramp_ftrace_ops_func(struct ftrace_ops *ops, enum ftrace_ops_cmd cmd) +static struct bpf_trampoline *bpf_trampoline_ip_lookup(unsigned long ip) { - struct bpf_trampoline *tr = ops->private; + struct hlist_head *head_ip; + struct bpf_trampoline *tr; + + mutex_lock(&trampoline_mutex); + head_ip = &trampoline_ip_table[hash_64(ip, TRAMPOLINE_HASH_BITS)]; + hlist_for_each_entry(tr, head_ip, hlist_ip) { + if (tr->func.addr == (void *) ip) + goto out; + } + tr = NULL; +out: + mutex_unlock(&trampoline_mutex); + return tr; +} + +static int bpf_tramp_ftrace_ops_func(struct ftrace_ops *ops, unsigned long ip, + enum ftrace_ops_cmd cmd) +{ + struct bpf_trampoline *tr; int ret = 0; + tr = bpf_trampoline_ip_lookup(ip); + if (!tr) + return -EINVAL; + if (cmd == FTRACE_OPS_CMD_ENABLE_SHARE_IPMODIFY_SELF) { /* This is called inside register_ftrace_direct_multi(), so * tr->mutex is already locked. @@ -135,15 +158,15 @@ void bpf_image_ksym_del(struct bpf_ksym *ksym) PAGE_SIZE, true, ksym->name); } -static struct bpf_trampoline *bpf_trampoline_lookup(u64 key) +static struct bpf_trampoline *bpf_trampoline_lookup(u64 key, unsigned long ip) { struct bpf_trampoline *tr; struct hlist_head *head; int i; mutex_lock(&trampoline_mutex); - head = &trampoline_table[hash_64(key, TRAMPOLINE_HASH_BITS)]; - hlist_for_each_entry(tr, head, hlist) { + head = &trampoline_key_table[hash_64(key, TRAMPOLINE_HASH_BITS)]; + hlist_for_each_entry(tr, head, hlist_key) { if (tr->key == key) { refcount_inc(&tr->refcnt); goto out; @@ -152,20 +175,14 @@ static struct bpf_trampoline *bpf_trampoline_lookup(u64 key) tr = kzalloc(sizeof(*tr), GFP_KERNEL); if (!tr) goto out; -#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS - tr->fops = kzalloc(sizeof(struct ftrace_ops), GFP_KERNEL); - if (!tr->fops) { - kfree(tr); - tr = NULL; - goto out; - } - tr->fops->private = tr; - tr->fops->ops_func = bpf_tramp_ftrace_ops_func; -#endif tr->key = key; - INIT_HLIST_NODE(&tr->hlist); - hlist_add_head(&tr->hlist, head); + tr->ip = ip; + INIT_HLIST_NODE(&tr->hlist_key); + INIT_HLIST_NODE(&tr->hlist_ip); + hlist_add_head(&tr->hlist_key, head); + head = &trampoline_ip_table[hash_64(ip, TRAMPOLINE_HASH_BITS)]; + hlist_add_head(&tr->hlist_ip, head); refcount_set(&tr->refcnt, 1); mutex_init(&tr->mutex); for (i = 0; i < BPF_TRAMP_MAX; i++) @@ -175,13 +192,19 @@ static struct bpf_trampoline *bpf_trampoline_lookup(u64 key) return tr; } +struct ftrace_ops direct_ops = { +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS + .ops_func = bpf_tramp_ftrace_ops_func, +#endif +}; + static int unregister_fentry(struct bpf_trampoline *tr, void *old_addr) { void *ip = tr->func.addr; int ret; if (tr->func.ftrace_managed) - ret = unregister_ftrace_direct(tr->fops, (long)old_addr, false); + ret = unregister_ftrace_direct(&direct_ops, (unsigned long) ip, (long)old_addr, false); else ret = bpf_arch_text_poke(ip, BPF_MOD_CALL, old_addr, NULL); @@ -195,10 +218,7 @@ static int modify_fentry(struct bpf_trampoline *tr, void *old_addr, void *new_ad int ret; if (tr->func.ftrace_managed) { - if (lock_direct_mutex) - ret = modify_ftrace_direct(tr->fops, (long)new_addr); - else - ret = modify_ftrace_direct_nolock(tr->fops, (long)new_addr); + ret = modify_ftrace_direct(&direct_ops, (unsigned long) ip, (long)new_addr, lock_direct_mutex); } else { ret = bpf_arch_text_poke(ip, BPF_MOD_CALL, old_addr, new_addr); } @@ -213,15 +233,11 @@ static int register_fentry(struct bpf_trampoline *tr, void *new_addr) int ret; faddr = ftrace_location((unsigned long)ip); - if (faddr) { - if (!tr->fops) - return -ENOTSUPP; + if (faddr) tr->func.ftrace_managed = true; - } if (tr->func.ftrace_managed) { - ftrace_set_filter_ip(tr->fops, (unsigned long)ip, 0, 1); - ret = register_ftrace_direct(tr->fops, (long)new_addr); + ret = register_ftrace_direct(&direct_ops, (unsigned long)ip, (long)new_addr); } else { ret = bpf_arch_text_poke(ip, BPF_MOD_CALL, NULL, new_addr); } @@ -479,9 +495,6 @@ static int bpf_trampoline_update(struct bpf_trampoline *tr, bool lock_direct_mut * BPF_TRAMP_F_SHARE_IPMODIFY is set, we can generate the * trampoline again, and retry register. */ - /* reset fops->func and fops->trampoline for re-register */ - tr->fops->func = NULL; - tr->fops->trampoline = 0; /* free im memory and reallocate later */ bpf_tramp_image_free(im); @@ -804,7 +817,7 @@ void bpf_trampoline_unlink_cgroup_shim(struct bpf_prog *prog) prog->aux->attach_btf_id); bpf_lsm_find_cgroup_shim(prog, &bpf_func); - tr = bpf_trampoline_lookup(key); + tr = bpf_trampoline_lookup(key, 0); if (WARN_ON_ONCE(!tr)) return; @@ -824,7 +837,7 @@ struct bpf_trampoline *bpf_trampoline_get(u64 key, { struct bpf_trampoline *tr; - tr = bpf_trampoline_lookup(key); + tr = bpf_trampoline_lookup(key, tgt_info->tgt_addr); if (!tr) return NULL; @@ -860,11 +873,8 @@ void bpf_trampoline_put(struct bpf_trampoline *tr) * fexit progs. The fentry-only trampoline will be freed via * multiple rcu callbacks. */ - hlist_del(&tr->hlist); - if (tr->fops) { - ftrace_free_filter(tr->fops); - kfree(tr->fops); - } + hlist_del(&tr->hlist_key); + hlist_del(&tr->hlist_ip); kfree(tr); out: mutex_unlock(&trampoline_mutex); @@ -1139,7 +1149,9 @@ static int __init init_trampolines(void) int i; for (i = 0; i < TRAMPOLINE_TABLE_SIZE; i++) - INIT_HLIST_HEAD(&trampoline_table[i]); + INIT_HLIST_HEAD(&trampoline_key_table[i]); + for (i = 0; i < TRAMPOLINE_TABLE_SIZE; i++) + INIT_HLIST_HEAD(&trampoline_ip_table[i]); return 0; } late_initcall(init_trampolines); diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 4203fad56b6c5..943feabdd5e63 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -68,7 +68,6 @@ }) /* hash bits for specific function selection */ -#define FTRACE_HASH_DEFAULT_BITS 10 #define FTRACE_HASH_MAX_BITS 12 #ifdef CONFIG_DYNAMIC_FTRACE @@ -1189,8 +1188,8 @@ static void __add_hash_entry(struct ftrace_hash *hash, hash->count++; } -static struct ftrace_func_entry * -add_hash_entry(struct ftrace_hash *hash, unsigned long ip) +struct ftrace_func_entry * +add_hash_entry_direct(struct ftrace_hash *hash, unsigned long ip, unsigned long direct) { struct ftrace_func_entry *entry; @@ -1199,11 +1198,18 @@ add_hash_entry(struct ftrace_hash *hash, unsigned long ip) return NULL; entry->ip = ip; + entry->direct = direct; __add_hash_entry(hash, entry); return entry; } +static struct ftrace_func_entry * +add_hash_entry(struct ftrace_hash *hash, unsigned long ip) +{ + return add_hash_entry_direct(hash, ip, 0); +} + static void free_hash_entry(struct ftrace_hash *hash, struct ftrace_func_entry *entry) @@ -1262,7 +1268,7 @@ static void clear_ftrace_mod_list(struct list_head *head) mutex_unlock(&ftrace_lock); } -static void free_ftrace_hash(struct ftrace_hash *hash) +void free_ftrace_hash(struct ftrace_hash *hash) { if (!hash || hash == EMPTY_HASH) return; @@ -1302,7 +1308,7 @@ void ftrace_free_filter(struct ftrace_ops *ops) } EXPORT_SYMBOL_GPL(ftrace_free_filter); -static struct ftrace_hash *alloc_ftrace_hash(int size_bits) +struct ftrace_hash *alloc_ftrace_hash(int size_bits) { struct ftrace_hash *hash; int size; @@ -1376,7 +1382,7 @@ alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash) size = 1 << hash->size_bits; for (i = 0; i < size; i++) { hlist_for_each_entry(entry, &hash->buckets[i], hlist) { - if (add_hash_entry(new_hash, entry->ip) == NULL) + if (add_hash_entry_direct(new_hash, entry->ip, entry->direct) == NULL) goto free_hash; } } @@ -2034,7 +2040,7 @@ static int __ftrace_hash_update_ipmodify(struct ftrace_ops *ops, */ if (!ops->ops_func) return -EBUSY; - ret = ops->ops_func(ops, FTRACE_OPS_CMD_ENABLE_SHARE_IPMODIFY_SELF); + ret = ops->ops_func(ops, rec->ip, FTRACE_OPS_CMD_ENABLE_SHARE_IPMODIFY_SELF); if (ret) return ret; } else if (is_ipmodify) { @@ -2587,16 +2593,6 @@ unsigned long ftrace_find_rec_direct(unsigned long ip) return entry->direct; } -static void call_direct_funcs(unsigned long ip, unsigned long pip, - struct ftrace_ops *ops, struct ftrace_regs *fregs) -{ - unsigned long addr = READ_ONCE(ops->direct_call); - - if (!addr) - return; - - arch_ftrace_set_direct_caller(fregs, addr); -} #endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */ /** @@ -5929,28 +5925,24 @@ static int check_direct_multi(struct ftrace_ops *ops) return 0; } -static void remove_direct_functions_hash(struct ftrace_hash *hash, unsigned long addr) +static void register_ftrace_direct_cb(struct rcu_head *rhp) { - struct ftrace_func_entry *entry, *del; - int size, i; + struct ftrace_hash *fhp = container_of(rhp, struct ftrace_hash, rcu); - size = 1 << hash->size_bits; - for (i = 0; i < size; i++) { - hlist_for_each_entry(entry, &hash->buckets[i], hlist) { - del = __ftrace_lookup_ip(direct_functions, entry->ip); - if (del && del->direct == addr) { - remove_hash_entry(direct_functions, del); - kfree(del); - } - } - } + free_ftrace_hash(fhp); } -static void register_ftrace_direct_cb(struct rcu_head *rhp) +static struct ftrace_hash *hash_from_ip(unsigned long ip, unsigned long addr) { - struct ftrace_hash *fhp = container_of(rhp, struct ftrace_hash, rcu); + struct ftrace_hash *hash; - free_ftrace_hash(fhp); + ip = ftrace_location(ip); + if (!ip) + return NULL; + hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS); + if (!hash || !add_hash_entry_direct(hash, ip, addr)) + return NULL; + return hash; } /** @@ -5975,79 +5967,192 @@ static void register_ftrace_direct_cb(struct rcu_head *rhp) * -ENODEV - @ip does not point to a ftrace nop location (or not supported) * -ENOMEM - There was an allocation failure. */ -int register_ftrace_direct(struct ftrace_ops *ops, unsigned long addr) +int register_ftrace_direct(struct ftrace_ops *ops, unsigned long ip, unsigned long addr) { - struct ftrace_hash *hash, *new_hash = NULL, *free_hash = NULL; - struct ftrace_func_entry *entry, *new; - int err = -EBUSY, size, i; + struct ftrace_hash *hash; + int err; - if (ops->func || ops->trampoline) - return -EINVAL; - if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED)) - return -EINVAL; - if (ops->flags & FTRACE_OPS_FL_ENABLED) - return -EINVAL; + hash = hash_from_ip(ip, addr); + if (!hash) + return -ENOMEM; - hash = ops->func_hash->filter_hash; - if (ftrace_hash_empty(hash)) - return -EINVAL; + err = register_ftrace_direct_hash(ops, hash); + free_ftrace_hash(hash); + return err; +} +EXPORT_SYMBOL_GPL(register_ftrace_direct); - mutex_lock(&direct_mutex); +/** + * unregister_ftrace_direct - Remove calls to custom trampoline + * previously registered by register_ftrace_direct for @ops object. + * @ops: The address of the struct ftrace_ops object + * @addr: The address of the direct function that is called by the @ops functions + * @free_filters: Set to true to remove all filters for the ftrace_ops, false otherwise + * + * This is used to remove a direct calls to @addr from the nop locations + * of the functions registered in @ops (with by ftrace_set_filter_ip + * function). + * + * Returns: + * 0 on success + * -EINVAL - The @ops object was not properly registered. + */ +int unregister_ftrace_direct(struct ftrace_ops *ops, unsigned long ip, unsigned long addr, + bool free_filters) +{ + struct ftrace_hash *hash; + int err; - /* Make sure requested entries are not already registered.. */ - size = 1 << hash->size_bits; - for (i = 0; i < size; i++) { - hlist_for_each_entry(entry, &hash->buckets[i], hlist) { - if (ftrace_find_rec_direct(entry->ip)) - goto out_unlock; - } - } + hash = hash_from_ip(ip, addr); + if (!hash) + return -ENOMEM; - err = -ENOMEM; + err = unregister_ftrace_direct_hash(ops, hash); + free_ftrace_hash(hash); + if (free_filters) + ftrace_free_filter(ops); + return err; +} +EXPORT_SYMBOL_GPL(unregister_ftrace_direct); - /* Make a copy hash to place the new and the old entries in */ - size = hash->count + direct_functions->count; - size = fls(size); - if (size > FTRACE_HASH_MAX_BITS) - size = FTRACE_HASH_MAX_BITS; - new_hash = alloc_ftrace_hash(size); - if (!new_hash) - goto out_unlock; +/** + * modify_ftrace_direct - Modify an existing direct 'multi' call + * to call something else + * @ops: The address of the struct ftrace_ops object + * @addr: The address of the new trampoline to call at @ops functions + * + * This is used to unregister currently registered direct caller and + * register new one @addr on functions registered in @ops object. + * + * Note there's window between ftrace_shutdown and ftrace_startup calls + * where there will be no callbacks called. + * + * Returns: zero on success. Non zero on error, which includes: + * -EINVAL - The @ops object was not properly registered. + */ +int modify_ftrace_direct(struct ftrace_ops *ops, unsigned long ip, unsigned long addr, bool lock_direct_mutex) +{ + struct ftrace_hash *hash; + int err; + + hash = hash_from_ip(ip, addr); + if (!hash) + return -ENOMEM; + + err = modify_ftrace_direct_hash(ops, hash, lock_direct_mutex); + free_ftrace_hash(hash); + return err; +} +EXPORT_SYMBOL_GPL(modify_ftrace_direct); - /* Now copy over the existing direct entries */ - size = 1 << direct_functions->size_bits; +static unsigned long hash_count(struct ftrace_hash *hash) +{ + return hash ? hash->count : 0; +} + +/** + * hash_add - adds two struct ftrace_hash and returns the result + * @a: struct ftrace_hash object + * @b: struct ftrace_hash object + * + * Returns struct ftrace_hash object on success, NULL on error. + */ +static struct ftrace_hash *hash_add(struct ftrace_hash *a, struct ftrace_hash *b) +{ + struct ftrace_func_entry *entry; + struct ftrace_hash *add; + int size, i; + + size = hash_count(a) + hash_count(b); + if (size > 32) + size = 32; + + add = alloc_and_copy_ftrace_hash(fls(size), a); + if (!add) + goto error; + + size = 1 << b->size_bits; for (i = 0; i < size; i++) { - hlist_for_each_entry(entry, &direct_functions->buckets[i], hlist) { - new = add_hash_entry(new_hash, entry->ip); - if (!new) - goto out_unlock; - new->direct = entry->direct; + hlist_for_each_entry(entry, &b->buckets[i], hlist) { + if (add_hash_entry_direct(add, entry->ip, entry->direct) == NULL) + goto error; } } + return add; + + error: + free_ftrace_hash(add); + return NULL; +} + +static void call_direct_funcs_hash(unsigned long ip, unsigned long pip, + struct ftrace_ops *ops, struct ftrace_regs *fregs) +{ + unsigned long addr; + + addr = ftrace_find_rec_direct(ip); + if (!addr) + return; + + arch_ftrace_set_direct_caller(fregs, addr); +} + +int register_ftrace_direct_hash(struct ftrace_ops *ops, struct ftrace_hash *hash) +{ + struct ftrace_hash *filter_hash = NULL, *new_hash = NULL, *free_hash = NULL; + struct ftrace_func_entry *entry; + int i, size, err; + bool reg; - /* ... and add the new entries */ + if (!hash_count(hash)) + return 0; + + mutex_lock(&direct_mutex); + + /* Make sure requested entry is not already registered. */ size = 1 << hash->size_bits; for (i = 0; i < size; i++) { hlist_for_each_entry(entry, &hash->buckets[i], hlist) { - new = add_hash_entry(new_hash, entry->ip); - if (!new) + if (__ftrace_lookup_ip(direct_functions, entry->ip)) goto out_unlock; - /* Update both the copy and the hash entry */ - new->direct = addr; - entry->direct = addr; } } + filter_hash = ops->func_hash ? ops->func_hash->filter_hash : NULL; + + /* If there's nothing in filter_hash we need to register the ops. */ + reg = hash_count(filter_hash) == 0; + if (reg) { + if (ops->func || ops->trampoline) + goto out_unlock; + if (ops->flags & FTRACE_OPS_FL_ENABLED) + goto out_unlock; + } + + filter_hash = hash_add(filter_hash, hash); + if (!filter_hash) + goto out_unlock; + + new_hash = hash_add(direct_functions, hash); + if (!new_hash) + goto out_unlock; + free_hash = direct_functions; rcu_assign_pointer(direct_functions, new_hash); new_hash = NULL; - ops->func = call_direct_funcs; - ops->flags = MULTI_FLAGS; - ops->trampoline = FTRACE_REGS_ADDR; - ops->direct_call = addr; + if (reg) { + ops->func = call_direct_funcs_hash; + ops->flags = MULTI_FLAGS; + ops->trampoline = FTRACE_REGS_ADDR; + ops->local_hash.filter_hash = filter_hash; - err = register_ftrace_function_nolock(ops); + err = register_ftrace_function_nolock(ops); + if (!err) + filter_hash = NULL; + } else { + err = ftrace_update_ops(ops, filter_hash, EMPTY_HASH); + } out_unlock: mutex_unlock(&direct_mutex); @@ -6055,76 +6160,140 @@ int register_ftrace_direct(struct ftrace_ops *ops, unsigned long addr) if (free_hash && free_hash != EMPTY_HASH) call_rcu_tasks(&free_hash->rcu, register_ftrace_direct_cb); - if (new_hash) - free_ftrace_hash(new_hash); + if (filter_hash) + free_ftrace_hash(filter_hash); return err; } -EXPORT_SYMBOL_GPL(register_ftrace_direct); +EXPORT_SYMBOL_GPL(register_ftrace_direct_hash); /** - * unregister_ftrace_direct - Remove calls to custom trampoline - * previously registered by register_ftrace_direct for @ops object. - * @ops: The address of the struct ftrace_ops object - * @addr: The address of the direct function that is called by the @ops functions - * @free_filters: Set to true to remove all filters for the ftrace_ops, false otherwise - * - * This is used to remove a direct calls to @addr from the nop locations - * of the functions registered in @ops (with by ftrace_set_filter_ip - * function). + * hash_sub - substracts @b from @a and returns the result + * @a: struct ftrace_hash object + * @b: struct ftrace_hash object * - * Returns: - * 0 on success - * -EINVAL - The @ops object was not properly registered. + * Returns struct ftrace_hash object on success, NULL on error. */ -int unregister_ftrace_direct(struct ftrace_ops *ops, unsigned long addr, - bool free_filters) +static struct ftrace_hash *hash_sub(struct ftrace_hash *a, struct ftrace_hash *b) { - struct ftrace_hash *hash = ops->func_hash->filter_hash; - int err; + struct ftrace_func_entry *entry, *del; + struct ftrace_hash *sub; + int size, i; + + sub = alloc_and_copy_ftrace_hash(a->size_bits, a); + if (!sub) + goto error; + + size = 1 << b->size_bits; + for (i = 0; i < size; i++) { + hlist_for_each_entry(entry, &b->buckets[i], hlist) { + del = __ftrace_lookup_ip(sub, entry->ip); + if (WARN_ON_ONCE(!del)) + goto error; + remove_hash_entry(sub, del); + kfree(del); + } + } + return sub; + + error: + free_ftrace_hash(sub); + return NULL; +} +int unregister_ftrace_direct_hash(struct ftrace_ops *ops, struct ftrace_hash *hash) +{ + struct ftrace_hash *new_hash = NULL, *filter_hash = NULL, *free_hash = NULL; + struct ftrace_func_entry *del, *entry; + unsigned long size, i; + int err = -EINVAL; + + if (!hash_count(hash)) + return 0; if (check_direct_multi(ops)) return -EINVAL; if (!(ops->flags & FTRACE_OPS_FL_ENABLED)) return -EINVAL; + if (direct_functions == EMPTY_HASH) + return -EINVAL; mutex_lock(&direct_mutex); - err = unregister_ftrace_function(ops); - remove_direct_functions_hash(hash, addr); - mutex_unlock(&direct_mutex); - /* cleanup for possible another register call */ - ops->func = NULL; - ops->trampoline = 0; + /* Make sure requested entries are already registered. */ + size = 1 << hash->size_bits; + for (i = 0; i < size; i++) { + hlist_for_each_entry(entry, &hash->buckets[i], hlist) { + del = __ftrace_lookup_ip(direct_functions, entry->ip); + if (!del || del->direct != entry->direct) + goto out_unlock; + } + } - if (free_filters) + filter_hash = hash_sub(ops->func_hash->filter_hash, hash); + if (!filter_hash) + goto out_unlock; + + new_hash = hash_sub(direct_functions, hash); + if (!new_hash) + goto out_unlock; + + /* If there's nothing left, we need to unregister the ops. */ + if (ftrace_hash_empty(filter_hash)) { + err = unregister_ftrace_function(ops); + /* cleanup for possible another register call */ + ops->func = NULL; + ops->trampoline = 0; ftrace_free_filter(ops); + ops->func_hash->filter_hash = NULL; + } else { + err = ftrace_update_ops(ops, filter_hash, EMPTY_HASH); + } + + free_hash = direct_functions; + rcu_assign_pointer(direct_functions, new_hash); + + out_unlock: + mutex_unlock(&direct_mutex); + + if (free_hash && free_hash != EMPTY_HASH) + call_rcu_tasks(&free_hash->rcu, register_ftrace_direct_cb); + + if (filter_hash) + free_ftrace_hash(filter_hash); + return err; } -EXPORT_SYMBOL_GPL(unregister_ftrace_direct); +EXPORT_SYMBOL_GPL(unregister_ftrace_direct_hash); -static int -__modify_ftrace_direct(struct ftrace_ops *ops, unsigned long addr) +int modify_ftrace_direct_hash(struct ftrace_ops *ops, struct ftrace_hash *hash, bool do_direct_lock) { - struct ftrace_hash *hash; - struct ftrace_func_entry *entry, *iter; + struct ftrace_func_entry *entry, *tmp; static struct ftrace_ops tmp_ops = { .func = ftrace_stub, .flags = FTRACE_OPS_FL_STUB, }; - int i, size; + unsigned long size, i; int err; - lockdep_assert_held_once(&direct_mutex); + if (!hash_count(hash)) + return 0; + if (check_direct_multi(ops)) + return -EINVAL; + if (!(ops->flags & FTRACE_OPS_FL_ENABLED)) + return -EINVAL; + if (direct_functions == EMPTY_HASH) + return -EINVAL; + + if (do_direct_lock) + mutex_lock(&direct_mutex); /* Enable the tmp_ops to have the same functions as the direct ops */ ftrace_ops_init(&tmp_ops); tmp_ops.func_hash = ops->func_hash; - tmp_ops.direct_call = addr; err = register_ftrace_function_nolock(&tmp_ops); if (err) - return err; + goto unlock; /* * Now the ftrace_ops_list_func() is called to do the direct callers. @@ -6132,86 +6301,28 @@ __modify_ftrace_direct(struct ftrace_ops *ops, unsigned long addr) */ mutex_lock(&ftrace_lock); - hash = ops->func_hash->filter_hash; size = 1 << hash->size_bits; for (i = 0; i < size; i++) { - hlist_for_each_entry(iter, &hash->buckets[i], hlist) { - entry = __ftrace_lookup_ip(direct_functions, iter->ip); - if (!entry) + hlist_for_each_entry(entry, &hash->buckets[i], hlist) { + tmp = __ftrace_lookup_ip(direct_functions, entry->ip); + if (!tmp) continue; - entry->direct = addr; + tmp->direct = entry->direct; } } - /* Prevent store tearing if a trampoline concurrently accesses the value */ - WRITE_ONCE(ops->direct_call, addr); mutex_unlock(&ftrace_lock); /* Removing the tmp_ops will add the updated direct callers to the functions */ unregister_ftrace_function(&tmp_ops); +unlock: + if (do_direct_lock) + mutex_unlock(&direct_mutex); return err; } +EXPORT_SYMBOL_GPL(modify_ftrace_direct_hash); -/** - * modify_ftrace_direct_nolock - Modify an existing direct 'multi' call - * to call something else - * @ops: The address of the struct ftrace_ops object - * @addr: The address of the new trampoline to call at @ops functions - * - * This is used to unregister currently registered direct caller and - * register new one @addr on functions registered in @ops object. - * - * Note there's window between ftrace_shutdown and ftrace_startup calls - * where there will be no callbacks called. - * - * Caller should already have direct_mutex locked, so we don't lock - * direct_mutex here. - * - * Returns: zero on success. Non zero on error, which includes: - * -EINVAL - The @ops object was not properly registered. - */ -int modify_ftrace_direct_nolock(struct ftrace_ops *ops, unsigned long addr) -{ - if (check_direct_multi(ops)) - return -EINVAL; - if (!(ops->flags & FTRACE_OPS_FL_ENABLED)) - return -EINVAL; - - return __modify_ftrace_direct(ops, addr); -} -EXPORT_SYMBOL_GPL(modify_ftrace_direct_nolock); - -/** - * modify_ftrace_direct - Modify an existing direct 'multi' call - * to call something else - * @ops: The address of the struct ftrace_ops object - * @addr: The address of the new trampoline to call at @ops functions - * - * This is used to unregister currently registered direct caller and - * register new one @addr on functions registered in @ops object. - * - * Note there's window between ftrace_shutdown and ftrace_startup calls - * where there will be no callbacks called. - * - * Returns: zero on success. Non zero on error, which includes: - * -EINVAL - The @ops object was not properly registered. - */ -int modify_ftrace_direct(struct ftrace_ops *ops, unsigned long addr) -{ - int err; - - if (check_direct_multi(ops)) - return -EINVAL; - if (!(ops->flags & FTRACE_OPS_FL_ENABLED)) - return -EINVAL; - - mutex_lock(&direct_mutex); - err = __modify_ftrace_direct(ops, addr); - mutex_unlock(&direct_mutex); - return err; -} -EXPORT_SYMBOL_GPL(modify_ftrace_direct); #endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */ /** @@ -8635,7 +8746,7 @@ static int prepare_direct_functions_for_ipmodify(struct ftrace_ops *ops) if (!op->ops_func) return -EBUSY; - ret = op->ops_func(op, FTRACE_OPS_CMD_ENABLE_SHARE_IPMODIFY_PEER); + ret = op->ops_func(op, ip, FTRACE_OPS_CMD_ENABLE_SHARE_IPMODIFY_PEER); if (ret) return ret; } @@ -8682,7 +8793,7 @@ static void cleanup_direct_functions_after_ipmodify(struct ftrace_ops *ops) /* The cleanup is optional, ignore any errors */ if (found_op && op->ops_func) - op->ops_func(op, FTRACE_OPS_CMD_DISABLE_SHARE_IPMODIFY_PEER); + op->ops_func(op, ip, FTRACE_OPS_CMD_DISABLE_SHARE_IPMODIFY_PEER); } } mutex_unlock(&direct_mutex); diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index bd084953a98be..74ef7755f361a 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -899,14 +899,6 @@ enum { FTRACE_HASH_FL_MOD = (1 << 0), }; -struct ftrace_hash { - unsigned long size_bits; - struct hlist_head *buckets; - unsigned long count; - unsigned long flags; - struct rcu_head rcu; -}; - struct ftrace_func_entry * ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip); diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c index d88c44f1dfa55..37f5eb1f252b9 100644 --- a/kernel/trace/trace_selftest.c +++ b/kernel/trace/trace_selftest.c @@ -1135,8 +1135,7 @@ trace_selftest_startup_function_graph(struct tracer *trace, * Register direct function together with graph tracer * and make sure we get graph trace. */ - ftrace_set_filter_ip(&direct, (unsigned long)DYN_FTRACE_TEST_NAME, 0, 0); - ret = register_ftrace_direct(&direct, + ret = register_ftrace_direct(&direct, (unsigned long)DYN_FTRACE_TEST_NAME, (unsigned long)ftrace_stub_direct_tramp); if (ret) goto out; @@ -1159,7 +1158,7 @@ trace_selftest_startup_function_graph(struct tracer *trace, unregister_ftrace_graph(&fgraph_ops); - ret = unregister_ftrace_direct(&direct, + ret = unregister_ftrace_direct(&direct, (unsigned long)DYN_FTRACE_TEST_NAME, (unsigned long)ftrace_stub_direct_tramp, true); if (ret)