@@ -17402,9 +17402,8 @@ static int visit_insn(int t, struct bpf_verifier_env *env)
17402
17402
static int check_cfg(struct bpf_verifier_env *env)
17403
17403
{
17404
17404
int insn_cnt = env->prog->len;
17405
- int *insn_stack, *insn_state;
17405
+ int *insn_stack, *insn_state, *insn_postorder ;
17406
17406
int ex_insn_beg, i, ret = 0;
17407
- bool ex_done = false;
17408
17407
17409
17408
insn_state = env->cfg.insn_state = kvcalloc(insn_cnt, sizeof(int), GFP_KERNEL);
17410
17409
if (!insn_state)
@@ -17416,6 +17415,17 @@ static int check_cfg(struct bpf_verifier_env *env)
17416
17415
return -ENOMEM;
17417
17416
}
17418
17417
17418
+ insn_postorder = env->cfg.insn_postorder = kvcalloc(insn_cnt, sizeof(int), GFP_KERNEL);
17419
+ if (!insn_postorder) {
17420
+ kvfree(insn_state);
17421
+ kvfree(insn_stack);
17422
+ return -ENOMEM;
17423
+ }
17424
+
17425
+ ex_insn_beg = env->exception_callback_subprog
17426
+ ? env->subprog_info[env->exception_callback_subprog].start
17427
+ : 0;
17428
+
17419
17429
insn_state[0] = DISCOVERED; /* mark 1st insn as discovered */
17420
17430
insn_stack[0] = 0; /* 0 is the first instruction */
17421
17431
env->cfg.cur_stack = 1;
@@ -17429,6 +17439,7 @@ static int check_cfg(struct bpf_verifier_env *env)
17429
17439
case DONE_EXPLORING:
17430
17440
insn_state[t] = EXPLORED;
17431
17441
env->cfg.cur_stack--;
17442
+ insn_postorder[env->cfg.cur_postorder++] = t;
17432
17443
break;
17433
17444
case KEEP_EXPLORING:
17434
17445
break;
@@ -17447,13 +17458,10 @@ static int check_cfg(struct bpf_verifier_env *env)
17447
17458
goto err_free;
17448
17459
}
17449
17460
17450
- if (env->exception_callback_subprog && !ex_done) {
17451
- ex_insn_beg = env->subprog_info[env->exception_callback_subprog].start;
17452
-
17461
+ if (ex_insn_beg && insn_state[ex_insn_beg] != EXPLORED) {
17453
17462
insn_state[ex_insn_beg] = DISCOVERED;
17454
17463
insn_stack[0] = ex_insn_beg;
17455
17464
env->cfg.cur_stack = 1;
17456
- ex_done = true;
17457
17465
goto walk_cfg;
17458
17466
}
17459
17467
@@ -23379,6 +23387,301 @@ static int process_fd_array(struct bpf_verifier_env *env, union bpf_attr *attr,
23379
23387
return 0;
23380
23388
}
23381
23389
23390
+ static bool can_fallthrough(struct bpf_insn *insn)
23391
+ {
23392
+ u8 class = BPF_CLASS(insn->code);
23393
+ u8 opcode = BPF_OP(insn->code);
23394
+
23395
+ if (class != BPF_JMP && class != BPF_JMP32)
23396
+ return true;
23397
+
23398
+ if (opcode == BPF_EXIT || opcode == BPF_JA)
23399
+ return false;
23400
+
23401
+ return true;
23402
+ }
23403
+
23404
+ static bool can_jump(struct bpf_insn *insn)
23405
+ {
23406
+ u8 class = BPF_CLASS(insn->code);
23407
+ u8 opcode = BPF_OP(insn->code);
23408
+
23409
+ if (class != BPF_JMP && class != BPF_JMP32)
23410
+ return false;
23411
+
23412
+ switch (opcode) {
23413
+ case BPF_JA:
23414
+ case BPF_JEQ:
23415
+ case BPF_JNE:
23416
+ case BPF_JLT:
23417
+ case BPF_JLE:
23418
+ case BPF_JGT:
23419
+ case BPF_JGE:
23420
+ case BPF_JSGT:
23421
+ case BPF_JSGE:
23422
+ case BPF_JSLT:
23423
+ case BPF_JSLE:
23424
+ case BPF_JCOND:
23425
+ return true;
23426
+ }
23427
+
23428
+ return false;
23429
+ }
23430
+
23431
+ static int insn_successors(struct bpf_prog *prog, u32 idx, u32 succ[2])
23432
+ {
23433
+ struct bpf_insn *insn = &prog->insnsi[idx];
23434
+ int i = 0, insn_sz;
23435
+ u32 dst;
23436
+
23437
+ insn_sz = bpf_is_ldimm64(insn) ? 2 : 1;
23438
+ if (can_fallthrough(insn) && idx + 1 < prog->len)
23439
+ succ[i++] = idx + insn_sz;
23440
+
23441
+ if (can_jump(insn)) {
23442
+ dst = idx + jmp_offset(insn) + 1;
23443
+ if (i == 0 || succ[0] != dst)
23444
+ succ[i++] = dst;
23445
+ }
23446
+
23447
+ return i;
23448
+ }
23449
+
23450
+ /* Each field is a register bitmask */
23451
+ struct insn_live_regs {
23452
+ u16 use; /* registers read by instruction */
23453
+ u16 def; /* registers written by instruction */
23454
+ u16 in; /* registers that may be alive before instruction */
23455
+ u16 out; /* registers that may be alive after instruction */
23456
+ };
23457
+
23458
+ /* Bitmask with 1s for all caller saved registers */
23459
+ #define ALL_CALLER_SAVED_REGS ((1u << CALLER_SAVED_REGS) - 1)
23460
+
23461
+ /* Compute info->{use,def} fields for the instruction */
23462
+ static void compute_insn_live_regs(struct bpf_verifier_env *env,
23463
+ struct bpf_insn *insn,
23464
+ struct insn_live_regs *info)
23465
+ {
23466
+ struct call_summary cs;
23467
+ u8 class = BPF_CLASS(insn->code);
23468
+ u8 code = BPF_OP(insn->code);
23469
+ u8 mode = BPF_MODE(insn->code);
23470
+ u16 src = BIT(insn->src_reg);
23471
+ u16 dst = BIT(insn->dst_reg);
23472
+ u16 r0 = BIT(0);
23473
+ u16 def = 0;
23474
+ u16 use = 0xffff;
23475
+
23476
+ switch (class) {
23477
+ case BPF_LD:
23478
+ switch (mode) {
23479
+ case BPF_IMM:
23480
+ if (BPF_SIZE(insn->code) == BPF_DW) {
23481
+ def = dst;
23482
+ use = 0;
23483
+ }
23484
+ break;
23485
+ case BPF_LD | BPF_ABS:
23486
+ case BPF_LD | BPF_IND:
23487
+ /* stick with defaults */
23488
+ break;
23489
+ }
23490
+ break;
23491
+ case BPF_LDX:
23492
+ switch (mode) {
23493
+ case BPF_MEM:
23494
+ case BPF_MEMSX:
23495
+ def = dst;
23496
+ use = src;
23497
+ break;
23498
+ }
23499
+ break;
23500
+ case BPF_ST:
23501
+ switch (mode) {
23502
+ case BPF_MEM:
23503
+ def = 0;
23504
+ use = dst;
23505
+ break;
23506
+ }
23507
+ break;
23508
+ case BPF_STX:
23509
+ switch (mode) {
23510
+ case BPF_MEM:
23511
+ def = 0;
23512
+ use = dst | src;
23513
+ break;
23514
+ case BPF_ATOMIC:
23515
+ switch (insn->imm) {
23516
+ case BPF_CMPXCHG:
23517
+ use = r0 | dst | src;
23518
+ def = r0;
23519
+ break;
23520
+ case BPF_LOAD_ACQ:
23521
+ def = dst;
23522
+ use = src;
23523
+ break;
23524
+ case BPF_STORE_REL:
23525
+ def = 0;
23526
+ use = dst | src;
23527
+ break;
23528
+ default:
23529
+ use = dst | src;
23530
+ if (insn->imm & BPF_FETCH)
23531
+ def = src;
23532
+ else
23533
+ def = 0;
23534
+ }
23535
+ break;
23536
+ }
23537
+ break;
23538
+ case BPF_ALU:
23539
+ case BPF_ALU64:
23540
+ switch (code) {
23541
+ case BPF_END:
23542
+ use = dst;
23543
+ def = dst;
23544
+ break;
23545
+ case BPF_MOV:
23546
+ def = dst;
23547
+ if (BPF_SRC(insn->code) == BPF_K)
23548
+ use = 0;
23549
+ else
23550
+ use = src;
23551
+ break;
23552
+ default:
23553
+ def = dst;
23554
+ if (BPF_SRC(insn->code) == BPF_K)
23555
+ use = dst;
23556
+ else
23557
+ use = dst | src;
23558
+ }
23559
+ break;
23560
+ case BPF_JMP:
23561
+ case BPF_JMP32:
23562
+ switch (code) {
23563
+ case BPF_JA:
23564
+ def = 0;
23565
+ use = 0;
23566
+ break;
23567
+ case BPF_EXIT:
23568
+ def = 0;
23569
+ use = r0;
23570
+ break;
23571
+ case BPF_CALL:
23572
+ def = ALL_CALLER_SAVED_REGS;
23573
+ use = def & ~BIT(BPF_REG_0);
23574
+ if (get_call_summary(env, insn, &cs))
23575
+ use = GENMASK(cs.num_params, 1);
23576
+ break;
23577
+ default:
23578
+ def = 0;
23579
+ if (BPF_SRC(insn->code) == BPF_K)
23580
+ use = dst;
23581
+ else
23582
+ use = dst | src;
23583
+ }
23584
+ break;
23585
+ }
23586
+
23587
+ info->def = def;
23588
+ info->use = use;
23589
+ }
23590
+
23591
+ /* Compute may-live registers after each instruction in the program.
23592
+ * The register is live after the instruction I if it is read by some
23593
+ * instruction S following I during program execution and is not
23594
+ * overwritten between I and S.
23595
+ *
23596
+ * Store result in env->insn_aux_data[i].live_regs.
23597
+ */
23598
+ static int compute_live_registers(struct bpf_verifier_env *env)
23599
+ {
23600
+ struct bpf_insn_aux_data *insn_aux = env->insn_aux_data;
23601
+ struct bpf_insn *insns = env->prog->insnsi;
23602
+ struct insn_live_regs *state;
23603
+ int insn_cnt = env->prog->len;
23604
+ int err = 0, i, j;
23605
+ bool changed;
23606
+
23607
+ /* Use the following algorithm:
23608
+ * - define the following:
23609
+ * - I.use : a set of all registers read by instruction I;
23610
+ * - I.def : a set of all registers written by instruction I;
23611
+ * - I.in : a set of all registers that may be alive before I execution;
23612
+ * - I.out : a set of all registers that may be alive after I execution;
23613
+ * - insn_successors(I): a set of instructions S that might immediately
23614
+ * follow I for some program execution;
23615
+ * - associate separate empty sets 'I.in' and 'I.out' with each instruction;
23616
+ * - visit each instruction in a postorder and update
23617
+ * state[i].in, state[i].out as follows:
23618
+ *
23619
+ * state[i].out = U [state[s].in for S in insn_successors(i)]
23620
+ * state[i].in = (state[i].out / state[i].def) U state[i].use
23621
+ *
23622
+ * (where U stands for set union, / stands for set difference)
23623
+ * - repeat the computation while {in,out} fields changes for
23624
+ * any instruction.
23625
+ */
23626
+ state = kvcalloc(insn_cnt, sizeof(*state), GFP_KERNEL);
23627
+ if (!state) {
23628
+ err = -ENOMEM;
23629
+ goto out;
23630
+ }
23631
+
23632
+ for (i = 0; i < insn_cnt; ++i)
23633
+ compute_insn_live_regs(env, &insns[i], &state[i]);
23634
+
23635
+ changed = true;
23636
+ while (changed) {
23637
+ changed = false;
23638
+ for (i = 0; i < env->cfg.cur_postorder; ++i) {
23639
+ int insn_idx = env->cfg.insn_postorder[i];
23640
+ struct insn_live_regs *live = &state[insn_idx];
23641
+ int succ_num;
23642
+ u32 succ[2];
23643
+ u16 new_out = 0;
23644
+ u16 new_in = 0;
23645
+
23646
+ succ_num = insn_successors(env->prog, insn_idx, succ);
23647
+ for (int s = 0; s < succ_num; ++s)
23648
+ new_out |= state[succ[s]].in;
23649
+ new_in = (new_out & ~live->def) | live->use;
23650
+ if (new_out != live->out || new_in != live->in) {
23651
+ live->in = new_in;
23652
+ live->out = new_out;
23653
+ changed = true;
23654
+ }
23655
+ }
23656
+ }
23657
+
23658
+ for (i = 0; i < insn_cnt; ++i)
23659
+ insn_aux[i].live_regs_before = state[i].in;
23660
+
23661
+ if (env->log.level & BPF_LOG_LEVEL2) {
23662
+ verbose(env, "Live regs before insn:\n");
23663
+ for (i = 0; i < insn_cnt; ++i) {
23664
+ verbose(env, "%3d: ", i);
23665
+ for (j = BPF_REG_0; j < BPF_REG_10; ++j)
23666
+ if (insn_aux[i].live_regs_before & BIT(j))
23667
+ verbose(env, "%d", j);
23668
+ else
23669
+ verbose(env, ".");
23670
+ verbose(env, " ");
23671
+ verbose_insn(env, &insns[i]);
23672
+ if (bpf_is_ldimm64(&insns[i]))
23673
+ i++;
23674
+ }
23675
+ }
23676
+
23677
+ out:
23678
+ kvfree(state);
23679
+ kvfree(env->cfg.insn_postorder);
23680
+ env->cfg.insn_postorder = NULL;
23681
+ env->cfg.cur_postorder = 0;
23682
+ return err;
23683
+ }
23684
+
23382
23685
int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, bpfptr_t uattr, __u32 uattr_size)
23383
23686
{
23384
23687
u64 start_time = ktime_get_ns();
@@ -23500,6 +23803,10 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, bpfptr_t uattr, __u3
23500
23803
if (ret)
23501
23804
goto skip_full_check;
23502
23805
23806
+ ret = compute_live_registers(env);
23807
+ if (ret < 0)
23808
+ goto skip_full_check;
23809
+
23503
23810
ret = mark_fastcall_patterns(env);
23504
23811
if (ret < 0)
23505
23812
goto skip_full_check;
@@ -23638,6 +23945,7 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, bpfptr_t uattr, __u3
23638
23945
vfree(env->insn_aux_data);
23639
23946
kvfree(env->insn_hist);
23640
23947
err_free_env:
23948
+ kvfree(env->cfg.insn_postorder);
23641
23949
kvfree(env);
23642
23950
return ret;
23643
23951
}
0 commit comments