@@ -2940,7 +2940,7 @@ static void restore_regs(const struct btf_func_model *m, u8 **prog,
29402940
29412941static  int  invoke_bpf_prog (const  struct  btf_func_model  * m , u8  * * pprog ,
29422942			   struct  bpf_tramp_link  * l , int  stack_size ,
2943- 			   int  run_ctx_off , bool  save_ret ,
2943+ 			   int  run_ctx_off , bool  save_ret ,  int   ret_off , 
29442944			   void  * image , void  * rw_image )
29452945{
29462946	u8  * prog  =  * pprog ;
@@ -3005,7 +3005,7 @@ static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog,
30053005	 * value of BPF_PROG_TYPE_STRUCT_OPS prog. 
30063006	 */ 
30073007	if  (save_ret )
3008- 		emit_stx (& prog , BPF_DW , BPF_REG_FP , BPF_REG_0 , -8  );
3008+ 		emit_stx (& prog , BPF_DW , BPF_REG_FP , BPF_REG_0 , - ret_off );
30093009
30103010	/* replace 2 nops with JE insn, since jmp target is known */ 
30113011	jmp_insn [0 ] =  X86_JE ;
@@ -3055,15 +3055,16 @@ static int emit_cond_near_jump(u8 **pprog, void *func, void *ip, u8 jmp_cond)
30553055
30563056static  int  invoke_bpf (const  struct  btf_func_model  * m , u8  * * pprog ,
30573057		      struct  bpf_tramp_links  * tl , int  stack_size ,
3058- 		      int  run_ctx_off , bool  save_ret ,
3058+ 		      int  run_ctx_off , bool  save_ret ,  int   ret_off , 
30593059		      void  * image , void  * rw_image )
30603060{
30613061	int  i ;
30623062	u8  * prog  =  * pprog ;
30633063
30643064	for  (i  =  0 ; i  <  tl -> nr_links ; i ++ ) {
30653065		if  (invoke_bpf_prog (m , & prog , tl -> links [i ], stack_size ,
3066- 				    run_ctx_off , save_ret , image , rw_image ))
3066+ 				    run_ctx_off , save_ret , ret_off , image ,
3067+ 				    rw_image ))
30673068			return  - EINVAL ;
30683069	}
30693070	* pprog  =  prog ;
@@ -3072,7 +3073,7 @@ static int invoke_bpf(const struct btf_func_model *m, u8 **pprog,
30723073
30733074static  int  invoke_bpf_mod_ret (const  struct  btf_func_model  * m , u8  * * pprog ,
30743075			      struct  bpf_tramp_links  * tl , int  stack_size ,
3075- 			      int  run_ctx_off , u8  * * branches ,
3076+ 			      int  run_ctx_off , int   ret_off ,  u8  * * branches ,
30763077			      void  * image , void  * rw_image )
30773078{
30783079	u8  * prog  =  * pprog ;
@@ -3082,18 +3083,18 @@ static int invoke_bpf_mod_ret(const struct btf_func_model *m, u8 **pprog,
30823083	 * Set this to 0 to avoid confusing the program. 
30833084	 */ 
30843085	emit_mov_imm32 (& prog , false, BPF_REG_0 , 0 );
3085- 	emit_stx (& prog , BPF_DW , BPF_REG_FP , BPF_REG_0 , -8  );
3086+ 	emit_stx (& prog , BPF_DW , BPF_REG_FP , BPF_REG_0 , - ret_off );
30863087	for  (i  =  0 ; i  <  tl -> nr_links ; i ++ ) {
30873088		if  (invoke_bpf_prog (m , & prog , tl -> links [i ], stack_size , run_ctx_off , true,
3088- 				    image , rw_image ))
3089+ 				    ret_off ,  image , rw_image ))
30893090			return  - EINVAL ;
30903091
3091- 		/* mod_ret prog stored return value into [rbp - 8 ]. Emit: 
3092- 		 * if (*(u64 *)(rbp - 8 ) !=  0) 
3092+ 		/* mod_ret prog stored return value into [rbp - ret_off ]. Emit: 
3093+ 		 * if (*(u64 *)(rbp - ret_off ) !=  0) 
30933094		 *	goto do_fexit; 
30943095		 */ 
3095- 		/* cmp QWORD PTR [rbp - 0x8 ], 0x0 */ 
3096- 		EMIT4 (0x48 , 0x83 , 0x7d , 0xf8 ); EMIT1 (0x00 );
3096+ 		/* cmp QWORD PTR [rbp - ret_off ], 0x0 */ 
3097+ 		EMIT4 (0x48 , 0x83 , 0x7d , - ret_off ); EMIT1 (0x00 );
30973098
30983099		/* Save the location of the branch and Generate 6 nops 
30993100		 * (4 bytes for an offset and 2 bytes for the jump) These nops 
@@ -3179,7 +3180,8 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *rw_im
31793180					 void  * func_addr )
31803181{
31813182	int  i , ret , nr_regs  =  m -> nr_args , stack_size  =  0 ;
3182- 	int  regs_off , nregs_off , ip_off , run_ctx_off , arg_stack_off , rbx_off ;
3183+ 	int  ret_off , regs_off , nregs_off , ip_off , run_ctx_off , arg_stack_off ,
3184+ 	    rbx_off ;
31833185	struct  bpf_tramp_links  * fentry  =  & tlinks [BPF_TRAMP_FENTRY ];
31843186	struct  bpf_tramp_links  * fexit  =  & tlinks [BPF_TRAMP_FEXIT ];
31853187	struct  bpf_tramp_links  * fmod_ret  =  & tlinks [BPF_TRAMP_MODIFY_RETURN ];
@@ -3213,7 +3215,7 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *rw_im
32133215	 * RBP + 8         [ return address  ] 
32143216	 * RBP + 0         [ RBP             ] 
32153217	 * 
3216- 	 * RBP - 8          [ return value    ]  BPF_TRAMP_F_CALL_ORIG or 
3218+ 	 * RBP - ret_off    [ return value    ]  BPF_TRAMP_F_CALL_ORIG or 
32173219	 *                                      BPF_TRAMP_F_RET_FENTRY_RET flags 
32183220	 * 
32193221	 *                 [ reg_argN        ]  always 
@@ -3239,6 +3241,7 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *rw_im
32393241	save_ret  =  flags  &  (BPF_TRAMP_F_CALL_ORIG  | BPF_TRAMP_F_RET_FENTRY_RET );
32403242	if  (save_ret )
32413243		stack_size  +=  8 ;
3244+ 	ret_off  =  stack_size ;
32423245
32433246	stack_size  +=  nr_regs  *  8 ;
32443247	regs_off  =  stack_size ;
@@ -3341,7 +3344,8 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *rw_im
33413344
33423345	if  (fentry -> nr_links ) {
33433346		if  (invoke_bpf (m , & prog , fentry , regs_off , run_ctx_off ,
3344- 			       flags  &  BPF_TRAMP_F_RET_FENTRY_RET , image , rw_image ))
3347+ 			       flags  &  BPF_TRAMP_F_RET_FENTRY_RET , ret_off ,
3348+ 			       image , rw_image ))
33453349			return  - EINVAL ;
33463350	}
33473351
@@ -3352,7 +3356,8 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *rw_im
33523356			return  - ENOMEM ;
33533357
33543358		if  (invoke_bpf_mod_ret (m , & prog , fmod_ret , regs_off ,
3355- 				       run_ctx_off , branches , image , rw_image )) {
3359+ 				       run_ctx_off , ret_off , branches ,
3360+ 				       image , rw_image )) {
33563361			ret  =  - EINVAL ;
33573362			goto cleanup ;
33583363		}
@@ -3380,7 +3385,7 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *rw_im
33803385			}
33813386		}
33823387		/* remember return value in a stack for bpf prog to access */ 
3383- 		emit_stx (& prog , BPF_DW , BPF_REG_FP , BPF_REG_0 , -8  );
3388+ 		emit_stx (& prog , BPF_DW , BPF_REG_FP , BPF_REG_0 , - ret_off );
33843389		im -> ip_after_call  =  image  +  (prog  -  (u8  * )rw_image );
33853390		emit_nops (& prog , X86_PATCH_SIZE );
33863391	}
@@ -3403,7 +3408,7 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *rw_im
34033408
34043409	if  (fexit -> nr_links ) {
34053410		if  (invoke_bpf (m , & prog , fexit , regs_off , run_ctx_off ,
3406- 			       false, image , rw_image )) {
3411+ 			       false, ret_off ,  image , rw_image )) {
34073412			ret  =  - EINVAL ;
34083413			goto cleanup ;
34093414		}
@@ -3433,7 +3438,7 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *rw_im
34333438
34343439	/* restore return value of orig_call or fentry prog back into RAX */ 
34353440	if  (save_ret )
3436- 		emit_ldx (& prog , BPF_DW , BPF_REG_0 , BPF_REG_FP , -8  );
3441+ 		emit_ldx (& prog , BPF_DW , BPF_REG_0 , BPF_REG_FP , - ret_off );
34373442
34383443	emit_ldx (& prog , BPF_DW , BPF_REG_6 , BPF_REG_FP , - rbx_off );
34393444	EMIT1 (0xC9 ); /* leave */ 
0 commit comments