@@ -392,6 +392,71 @@ asm (
392
392
" blr ;"
393
393
);
394
394
395
+ static int emit_atomic_ld_st (const struct bpf_insn insn , struct codegen_context * ctx , u32 * image )
396
+ {
397
+ u32 code = insn .code ;
398
+ u32 dst_reg = bpf_to_ppc (insn .dst_reg );
399
+ u32 src_reg = bpf_to_ppc (insn .src_reg );
400
+ u32 size = BPF_SIZE (code );
401
+ u32 tmp1_reg = bpf_to_ppc (TMP_REG_1 );
402
+ u32 tmp2_reg = bpf_to_ppc (TMP_REG_2 );
403
+ s16 off = insn .off ;
404
+ s32 imm = insn .imm ;
405
+
406
+ switch (imm ) {
407
+ case BPF_LOAD_ACQ :
408
+ switch (size ) {
409
+ case BPF_B :
410
+ EMIT (PPC_RAW_LBZ (dst_reg , src_reg , off ));
411
+ break ;
412
+ case BPF_H :
413
+ EMIT (PPC_RAW_LHZ (dst_reg , src_reg , off ));
414
+ break ;
415
+ case BPF_W :
416
+ EMIT (PPC_RAW_LWZ (dst_reg , src_reg , off ));
417
+ break ;
418
+ case BPF_DW :
419
+ if (off % 4 ) {
420
+ EMIT (PPC_RAW_LI (tmp1_reg , off ));
421
+ EMIT (PPC_RAW_LDX (dst_reg , src_reg , tmp1_reg ));
422
+ } else {
423
+ EMIT (PPC_RAW_LD (dst_reg , src_reg , off ));
424
+ }
425
+ break ;
426
+ }
427
+ EMIT (PPC_RAW_LWSYNC ());
428
+ break ;
429
+ case BPF_STORE_REL :
430
+ EMIT (PPC_RAW_LWSYNC ());
431
+ switch (size ) {
432
+ case BPF_B :
433
+ EMIT (PPC_RAW_STB (src_reg , dst_reg , off ));
434
+ break ;
435
+ case BPF_H :
436
+ EMIT (PPC_RAW_STH (src_reg , dst_reg , off ));
437
+ break ;
438
+ case BPF_W :
439
+ EMIT (PPC_RAW_STW (src_reg , dst_reg , off ));
440
+ break ;
441
+ case BPF_DW :
442
+ if (off % 4 ) {
443
+ EMIT (PPC_RAW_LI (tmp2_reg , off ));
444
+ EMIT (PPC_RAW_STDX (src_reg , dst_reg , tmp2_reg ));
445
+ } else {
446
+ EMIT (PPC_RAW_STD (src_reg , dst_reg , off ));
447
+ }
448
+ break ;
449
+ }
450
+ break ;
451
+ default :
452
+ pr_err_ratelimited ("unexpected atomic load/store op code %02x\n" ,
453
+ imm );
454
+ return - EINVAL ;
455
+ }
456
+
457
+ return 0 ;
458
+ }
459
+
395
460
/* Assemble the body code between the prologue & epilogue */
396
461
int bpf_jit_build_body (struct bpf_prog * fp , u32 * image , u32 * fimage , struct codegen_context * ctx ,
397
462
u32 * addrs , int pass , bool extra_pass )
@@ -859,8 +924,25 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct code
859
924
/*
860
925
* BPF_STX ATOMIC (atomic ops)
861
926
*/
927
+ case BPF_STX | BPF_ATOMIC | BPF_B :
928
+ case BPF_STX | BPF_ATOMIC | BPF_H :
862
929
case BPF_STX | BPF_ATOMIC | BPF_W :
863
930
case BPF_STX | BPF_ATOMIC | BPF_DW :
931
+ if (bpf_atomic_is_load_store (& insn [i ])) {
932
+ ret = emit_atomic_ld_st (insn [i ], ctx , image );
933
+ if (ret )
934
+ return ret ;
935
+
936
+ if (size != BPF_DW && insn_is_zext (& insn [i + 1 ]))
937
+ addrs [++ i ] = ctx -> idx * 4 ;
938
+ break ;
939
+ } else if (size == BPF_B || size == BPF_H ) {
940
+ pr_err_ratelimited (
941
+ "eBPF filter atomic op code %02x (@%d) unsupported\n" ,
942
+ code , i );
943
+ return - EOPNOTSUPP ;
944
+ }
945
+
864
946
save_reg = tmp2_reg ;
865
947
ret_reg = src_reg ;
866
948
0 commit comments