@@ -547,6 +547,56 @@ static inline void copy_map_value_long(struct bpf_map *map, void *dst, void *src
547547 bpf_obj_memcpy (map -> record , dst , src , map -> value_size , true);
548548}
549549
550+ #ifdef CONFIG_BPF_SYSCALL
551+ static inline void bpf_percpu_copy_to_user (struct bpf_map * map , void __percpu * pptr , void * value ,
552+ u32 size , u64 flags )
553+ {
554+ int current_cpu = raw_smp_processor_id ();
555+ int cpu , off = 0 ;
556+
557+ if (flags & BPF_F_CPU ) {
558+ cpu = flags >> 32 ;
559+ copy_map_value_long (map , value , cpu != current_cpu ? per_cpu_ptr (pptr , cpu ) :
560+ this_cpu_ptr (pptr ));
561+ check_and_init_map_value (map , value );
562+ } else {
563+ for_each_possible_cpu (cpu ) {
564+ copy_map_value_long (map , value + off , per_cpu_ptr (pptr , cpu ));
565+ check_and_init_map_value (map , value + off );
566+ off += size ;
567+ }
568+ }
569+ }
570+
571+ void bpf_obj_free_fields (const struct btf_record * rec , void * obj );
572+
573+ static inline void bpf_percpu_copy_from_user (struct bpf_map * map , void __percpu * pptr , void * value ,
574+ u32 size , u64 flags )
575+ {
576+ int current_cpu = raw_smp_processor_id ();
577+ int cpu , off = 0 ;
578+ void * ptr ;
579+
580+ if (flags & BPF_F_CPU ) {
581+ cpu = flags >> 32 ;
582+ ptr = cpu == current_cpu ? this_cpu_ptr (pptr ) : per_cpu_ptr (pptr , cpu );
583+ copy_map_value_long (map , ptr , value );
584+ bpf_obj_free_fields (map -> record , ptr );
585+ } else {
586+ for_each_possible_cpu (cpu ) {
587+ copy_map_value_long (map , per_cpu_ptr (pptr , cpu ), value + off );
588+ /* same user-provided value is used if
589+ * BPF_F_ALL_CPUS is specified, otherwise value is
590+ * an array of per-cpu values.
591+ */
592+ if (!(flags & BPF_F_ALL_CPUS ))
593+ off += size ;
594+ bpf_obj_free_fields (map -> record , per_cpu_ptr (pptr , cpu ));
595+ }
596+ }
597+ }
598+ #endif
599+
550600static inline void bpf_obj_swap_uptrs (const struct btf_record * rec , void * dst , void * src )
551601{
552602 unsigned long * src_uptr , * dst_uptr ;
@@ -2417,7 +2467,6 @@ struct btf_record *btf_record_dup(const struct btf_record *rec);
24172467bool btf_record_equal (const struct btf_record * rec_a , const struct btf_record * rec_b );
24182468void bpf_obj_free_timer (const struct btf_record * rec , void * obj );
24192469void bpf_obj_free_workqueue (const struct btf_record * rec , void * obj );
2420- void bpf_obj_free_fields (const struct btf_record * rec , void * obj );
24212470void __bpf_obj_drop_impl (void * p , const struct btf_record * rec , bool percpu );
24222471
24232472struct bpf_map * bpf_map_get (u32 ufd );
@@ -3709,14 +3758,25 @@ int bpf_prog_get_file_line(struct bpf_prog *prog, unsigned long ip, const char *
37093758 const char * * linep , int * nump );
37103759struct bpf_prog * bpf_prog_find_from_stack (void );
37113760
3761+ static inline bool bpf_map_supports_cpu_flags (enum bpf_map_type map_type )
3762+ {
3763+ return false;
3764+ }
3765+
37123766static inline int bpf_map_check_op_flags (struct bpf_map * map , u64 flags , u64 extra_flags_mask )
37133767{
3714- if (extra_flags_mask && (flags & extra_flags_mask ))
3768+ if (extra_flags_mask && (( u32 ) flags & extra_flags_mask ))
37153769 return - EINVAL ;
37163770
37173771 if ((flags & BPF_F_LOCK ) && !btf_record_has_field (map -> record , BPF_SPIN_LOCK ))
37183772 return - EINVAL ;
37193773
3774+ if (!(flags & BPF_F_CPU ) && flags >> 32 )
3775+ return - EINVAL ;
3776+
3777+ if ((flags & (BPF_F_CPU | BPF_F_ALL_CPUS )) && !bpf_map_supports_cpu_flags (map -> map_type ))
3778+ return - EINVAL ;
3779+
37203780 return 0 ;
37213781}
37223782
@@ -3725,7 +3785,7 @@ static inline int bpf_map_check_update_flags(struct bpf_map *map, u64 flags)
37253785 return bpf_map_check_op_flags (map , flags , 0 );
37263786}
37273787
3728- #define BPF_MAP_LOOKUP_ELEM_EXTRA_FLAGS_MASK (~BPF_F_LOCK)
3788+ #define BPF_MAP_LOOKUP_ELEM_EXTRA_FLAGS_MASK (~( BPF_F_LOCK | BPF_F_CPU | BPF_F_ALL_CPUS) )
37293789
37303790static inline int bpf_map_check_lookup_flags (struct bpf_map * map , u64 flags )
37313791{
@@ -3737,4 +3797,27 @@ static inline int bpf_map_check_batch_flags(struct bpf_map *map, u64 flags)
37373797 return bpf_map_check_op_flags (map , flags , BPF_MAP_LOOKUP_ELEM_EXTRA_FLAGS_MASK );
37383798}
37393799
3800+ static inline int bpf_map_check_cpu_flags (u64 flags , bool check_all_cpus_flag )
3801+ {
3802+ const u64 cpu_flags = BPF_F_CPU | BPF_F_ALL_CPUS ;
3803+ u32 cpu ;
3804+
3805+ if (check_all_cpus_flag ) {
3806+ if (unlikely ((u32 )flags > BPF_F_ALL_CPUS ))
3807+ /* unknown flags */
3808+ return - EINVAL ;
3809+ if (unlikely ((flags & cpu_flags ) == cpu_flags ))
3810+ return - EINVAL ;
3811+ } else {
3812+ if (unlikely ((u32 )flags & ~BPF_F_CPU ))
3813+ return - EINVAL ;
3814+ }
3815+
3816+ cpu = flags >> 32 ;
3817+ if (unlikely ((flags & BPF_F_CPU ) && cpu >= num_possible_cpus ()))
3818+ return - ERANGE ;
3819+
3820+ return 0 ;
3821+ }
3822+
37403823#endif /* _LINUX_BPF_H */
0 commit comments