@@ -695,6 +695,323 @@ int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type poke_type,
695
695
return ret ;
696
696
}
697
697
698
+ static void store_args (int nregs , int args_off , struct rv_jit_context * ctx )
699
+ {
700
+ int i ;
701
+
702
+ for (i = 0 ; i < nregs ; i ++ ) {
703
+ emit_sd (RV_REG_FP , - args_off , RV_REG_A0 + i , ctx );
704
+ args_off -= 8 ;
705
+ }
706
+ }
707
+
708
+ static void restore_args (int nregs , int args_off , struct rv_jit_context * ctx )
709
+ {
710
+ int i ;
711
+
712
+ for (i = 0 ; i < nregs ; i ++ ) {
713
+ emit_ld (RV_REG_A0 + i , - args_off , RV_REG_FP , ctx );
714
+ args_off -= 8 ;
715
+ }
716
+ }
717
+
718
+ static int invoke_bpf_prog (struct bpf_tramp_link * l , int args_off , int retval_off ,
719
+ int run_ctx_off , bool save_ret , struct rv_jit_context * ctx )
720
+ {
721
+ int ret , branch_off ;
722
+ struct bpf_prog * p = l -> link .prog ;
723
+ int cookie_off = offsetof(struct bpf_tramp_run_ctx , bpf_cookie );
724
+
725
+ if (l -> cookie ) {
726
+ emit_imm (RV_REG_T1 , l -> cookie , ctx );
727
+ emit_sd (RV_REG_FP , - run_ctx_off + cookie_off , RV_REG_T1 , ctx );
728
+ } else {
729
+ emit_sd (RV_REG_FP , - run_ctx_off + cookie_off , RV_REG_ZERO , ctx );
730
+ }
731
+
732
+ /* arg1: prog */
733
+ emit_imm (RV_REG_A0 , (const s64 )p , ctx );
734
+ /* arg2: &run_ctx */
735
+ emit_addi (RV_REG_A1 , RV_REG_FP , - run_ctx_off , ctx );
736
+ ret = emit_call ((const u64 )bpf_trampoline_enter (p ), true, ctx );
737
+ if (ret )
738
+ return ret ;
739
+
740
+ /* if (__bpf_prog_enter(prog) == 0)
741
+ * goto skip_exec_of_prog;
742
+ */
743
+ branch_off = ctx -> ninsns ;
744
+ /* nop reserved for conditional jump */
745
+ emit (rv_nop (), ctx );
746
+
747
+ /* store prog start time */
748
+ emit_mv (RV_REG_S1 , RV_REG_A0 , ctx );
749
+
750
+ /* arg1: &args_off */
751
+ emit_addi (RV_REG_A0 , RV_REG_FP , - args_off , ctx );
752
+ if (!p -> jited )
753
+ /* arg2: progs[i]->insnsi for interpreter */
754
+ emit_imm (RV_REG_A1 , (const s64 )p -> insnsi , ctx );
755
+ ret = emit_call ((const u64 )p -> bpf_func , true, ctx );
756
+ if (ret )
757
+ return ret ;
758
+
759
+ if (save_ret )
760
+ emit_sd (RV_REG_FP , - retval_off , regmap [BPF_REG_0 ], ctx );
761
+
762
+ /* update branch with beqz */
763
+ if (ctx -> insns ) {
764
+ int offset = ninsns_rvoff (ctx -> ninsns - branch_off );
765
+ u32 insn = rv_beq (RV_REG_A0 , RV_REG_ZERO , offset >> 1 );
766
+ * (u32 * )(ctx -> insns + branch_off ) = insn ;
767
+ }
768
+
769
+ /* arg1: prog */
770
+ emit_imm (RV_REG_A0 , (const s64 )p , ctx );
771
+ /* arg2: prog start time */
772
+ emit_mv (RV_REG_A1 , RV_REG_S1 , ctx );
773
+ /* arg3: &run_ctx */
774
+ emit_addi (RV_REG_A2 , RV_REG_FP , - run_ctx_off , ctx );
775
+ ret = emit_call ((const u64 )bpf_trampoline_exit (p ), true, ctx );
776
+
777
+ return ret ;
778
+ }
779
+
780
+ static int __arch_prepare_bpf_trampoline (struct bpf_tramp_image * im ,
781
+ const struct btf_func_model * m ,
782
+ struct bpf_tramp_links * tlinks ,
783
+ void * func_addr , u32 flags ,
784
+ struct rv_jit_context * ctx )
785
+ {
786
+ int i , ret , offset ;
787
+ int * branches_off = NULL ;
788
+ int stack_size = 0 , nregs = m -> nr_args ;
789
+ int retaddr_off , fp_off , retval_off , args_off ;
790
+ int nregs_off , ip_off , run_ctx_off , sreg_off ;
791
+ struct bpf_tramp_links * fentry = & tlinks [BPF_TRAMP_FENTRY ];
792
+ struct bpf_tramp_links * fexit = & tlinks [BPF_TRAMP_FEXIT ];
793
+ struct bpf_tramp_links * fmod_ret = & tlinks [BPF_TRAMP_MODIFY_RETURN ];
794
+ void * orig_call = func_addr ;
795
+ bool save_ret ;
796
+ u32 insn ;
797
+
798
+ /* Generated trampoline stack layout:
799
+ *
800
+ * FP - 8 [ RA of parent func ] return address of parent
801
+ * function
802
+ * FP - retaddr_off [ RA of traced func ] return address of traced
803
+ * function
804
+ * FP - fp_off [ FP of parent func ]
805
+ *
806
+ * FP - retval_off [ return value ] BPF_TRAMP_F_CALL_ORIG or
807
+ * BPF_TRAMP_F_RET_FENTRY_RET
808
+ * [ argN ]
809
+ * [ ... ]
810
+ * FP - args_off [ arg1 ]
811
+ *
812
+ * FP - nregs_off [ regs count ]
813
+ *
814
+ * FP - ip_off [ traced func ] BPF_TRAMP_F_IP_ARG
815
+ *
816
+ * FP - run_ctx_off [ bpf_tramp_run_ctx ]
817
+ *
818
+ * FP - sreg_off [ callee saved reg ]
819
+ *
820
+ * [ pads ] pads for 16 bytes alignment
821
+ */
822
+
823
+ if (flags & (BPF_TRAMP_F_ORIG_STACK | BPF_TRAMP_F_SHARE_IPMODIFY ))
824
+ return - ENOTSUPP ;
825
+
826
+ /* extra regiters for struct arguments */
827
+ for (i = 0 ; i < m -> nr_args ; i ++ )
828
+ if (m -> arg_flags [i ] & BTF_FMODEL_STRUCT_ARG )
829
+ nregs += round_up (m -> arg_size [i ], 8 ) / 8 - 1 ;
830
+
831
+ /* 8 arguments passed by registers */
832
+ if (nregs > 8 )
833
+ return - ENOTSUPP ;
834
+
835
+ /* room for parent function return address */
836
+ stack_size += 8 ;
837
+
838
+ stack_size += 8 ;
839
+ retaddr_off = stack_size ;
840
+
841
+ stack_size += 8 ;
842
+ fp_off = stack_size ;
843
+
844
+ save_ret = flags & (BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_RET_FENTRY_RET );
845
+ if (save_ret ) {
846
+ stack_size += 8 ;
847
+ retval_off = stack_size ;
848
+ }
849
+
850
+ stack_size += nregs * 8 ;
851
+ args_off = stack_size ;
852
+
853
+ stack_size += 8 ;
854
+ nregs_off = stack_size ;
855
+
856
+ if (flags & BPF_TRAMP_F_IP_ARG ) {
857
+ stack_size += 8 ;
858
+ ip_off = stack_size ;
859
+ }
860
+
861
+ stack_size += round_up (sizeof (struct bpf_tramp_run_ctx ), 8 );
862
+ run_ctx_off = stack_size ;
863
+
864
+ stack_size += 8 ;
865
+ sreg_off = stack_size ;
866
+
867
+ stack_size = round_up (stack_size , 16 );
868
+
869
+ emit_addi (RV_REG_SP , RV_REG_SP , - stack_size , ctx );
870
+
871
+ emit_sd (RV_REG_SP , stack_size - retaddr_off , RV_REG_RA , ctx );
872
+ emit_sd (RV_REG_SP , stack_size - fp_off , RV_REG_FP , ctx );
873
+
874
+ emit_addi (RV_REG_FP , RV_REG_SP , stack_size , ctx );
875
+
876
+ /* callee saved register S1 to pass start time */
877
+ emit_sd (RV_REG_FP , - sreg_off , RV_REG_S1 , ctx );
878
+
879
+ /* store ip address of the traced function */
880
+ if (flags & BPF_TRAMP_F_IP_ARG ) {
881
+ emit_imm (RV_REG_T1 , (const s64 )func_addr , ctx );
882
+ emit_sd (RV_REG_FP , - ip_off , RV_REG_T1 , ctx );
883
+ }
884
+
885
+ emit_li (RV_REG_T1 , nregs , ctx );
886
+ emit_sd (RV_REG_FP , - nregs_off , RV_REG_T1 , ctx );
887
+
888
+ store_args (nregs , args_off , ctx );
889
+
890
+ /* skip to actual body of traced function */
891
+ if (flags & BPF_TRAMP_F_SKIP_FRAME )
892
+ orig_call += 16 ;
893
+
894
+ if (flags & BPF_TRAMP_F_CALL_ORIG ) {
895
+ emit_imm (RV_REG_A0 , (const s64 )im , ctx );
896
+ ret = emit_call ((const u64 )__bpf_tramp_enter , true, ctx );
897
+ if (ret )
898
+ return ret ;
899
+ }
900
+
901
+ for (i = 0 ; i < fentry -> nr_links ; i ++ ) {
902
+ ret = invoke_bpf_prog (fentry -> links [i ], args_off , retval_off , run_ctx_off ,
903
+ flags & BPF_TRAMP_F_RET_FENTRY_RET , ctx );
904
+ if (ret )
905
+ return ret ;
906
+ }
907
+
908
+ if (fmod_ret -> nr_links ) {
909
+ branches_off = kcalloc (fmod_ret -> nr_links , sizeof (int ), GFP_KERNEL );
910
+ if (!branches_off )
911
+ return - ENOMEM ;
912
+
913
+ /* cleanup to avoid garbage return value confusion */
914
+ emit_sd (RV_REG_FP , - retval_off , RV_REG_ZERO , ctx );
915
+ for (i = 0 ; i < fmod_ret -> nr_links ; i ++ ) {
916
+ ret = invoke_bpf_prog (fmod_ret -> links [i ], args_off , retval_off ,
917
+ run_ctx_off , true, ctx );
918
+ if (ret )
919
+ goto out ;
920
+ emit_ld (RV_REG_T1 , - retval_off , RV_REG_FP , ctx );
921
+ branches_off [i ] = ctx -> ninsns ;
922
+ /* nop reserved for conditional jump */
923
+ emit (rv_nop (), ctx );
924
+ }
925
+ }
926
+
927
+ if (flags & BPF_TRAMP_F_CALL_ORIG ) {
928
+ restore_args (nregs , args_off , ctx );
929
+ ret = emit_call ((const u64 )orig_call , true, ctx );
930
+ if (ret )
931
+ goto out ;
932
+ emit_sd (RV_REG_FP , - retval_off , RV_REG_A0 , ctx );
933
+ im -> ip_after_call = ctx -> insns + ctx -> ninsns ;
934
+ /* 2 nops reserved for auipc+jalr pair */
935
+ emit (rv_nop (), ctx );
936
+ emit (rv_nop (), ctx );
937
+ }
938
+
939
+ /* update branches saved in invoke_bpf_mod_ret with bnez */
940
+ for (i = 0 ; ctx -> insns && i < fmod_ret -> nr_links ; i ++ ) {
941
+ offset = ninsns_rvoff (ctx -> ninsns - branches_off [i ]);
942
+ insn = rv_bne (RV_REG_T1 , RV_REG_ZERO , offset >> 1 );
943
+ * (u32 * )(ctx -> insns + branches_off [i ]) = insn ;
944
+ }
945
+
946
+ for (i = 0 ; i < fexit -> nr_links ; i ++ ) {
947
+ ret = invoke_bpf_prog (fexit -> links [i ], args_off , retval_off ,
948
+ run_ctx_off , false, ctx );
949
+ if (ret )
950
+ goto out ;
951
+ }
952
+
953
+ if (flags & BPF_TRAMP_F_CALL_ORIG ) {
954
+ im -> ip_epilogue = ctx -> insns + ctx -> ninsns ;
955
+ emit_imm (RV_REG_A0 , (const s64 )im , ctx );
956
+ ret = emit_call ((const u64 )__bpf_tramp_exit , true, ctx );
957
+ if (ret )
958
+ goto out ;
959
+ }
960
+
961
+ if (flags & BPF_TRAMP_F_RESTORE_REGS )
962
+ restore_args (nregs , args_off , ctx );
963
+
964
+ if (save_ret )
965
+ emit_ld (RV_REG_A0 , - retval_off , RV_REG_FP , ctx );
966
+
967
+ emit_ld (RV_REG_S1 , - sreg_off , RV_REG_FP , ctx );
968
+
969
+ if (flags & BPF_TRAMP_F_SKIP_FRAME )
970
+ /* return address of parent function */
971
+ emit_ld (RV_REG_RA , stack_size - 8 , RV_REG_SP , ctx );
972
+ else
973
+ /* return address of traced function */
974
+ emit_ld (RV_REG_RA , stack_size - retaddr_off , RV_REG_SP , ctx );
975
+
976
+ emit_ld (RV_REG_FP , stack_size - fp_off , RV_REG_SP , ctx );
977
+ emit_addi (RV_REG_SP , RV_REG_SP , stack_size , ctx );
978
+
979
+ emit_jalr (RV_REG_ZERO , RV_REG_RA , 0 , ctx );
980
+
981
+ ret = ctx -> ninsns ;
982
+ out :
983
+ kfree (branches_off );
984
+ return ret ;
985
+ }
986
+
987
+ int arch_prepare_bpf_trampoline (struct bpf_tramp_image * im , void * image ,
988
+ void * image_end , const struct btf_func_model * m ,
989
+ u32 flags , struct bpf_tramp_links * tlinks ,
990
+ void * func_addr )
991
+ {
992
+ int ret ;
993
+ struct rv_jit_context ctx ;
994
+
995
+ ctx .ninsns = 0 ;
996
+ ctx .insns = NULL ;
997
+ ret = __arch_prepare_bpf_trampoline (im , m , tlinks , func_addr , flags , & ctx );
998
+ if (ret < 0 )
999
+ return ret ;
1000
+
1001
+ if (ninsns_rvoff (ret ) > (long )image_end - (long )image )
1002
+ return - EFBIG ;
1003
+
1004
+ ctx .ninsns = 0 ;
1005
+ ctx .insns = image ;
1006
+ ret = __arch_prepare_bpf_trampoline (im , m , tlinks , func_addr , flags , & ctx );
1007
+ if (ret < 0 )
1008
+ return ret ;
1009
+
1010
+ bpf_flush_icache (ctx .insns , ctx .insns + ctx .ninsns );
1011
+
1012
+ return ninsns_rvoff (ret );
1013
+ }
1014
+
698
1015
int bpf_jit_emit_insn (const struct bpf_insn * insn , struct rv_jit_context * ctx ,
699
1016
bool extra_pass )
700
1017
{
0 commit comments