@@ -350,6 +350,20 @@ struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
350
350
return prog_adj ;
351
351
}
352
352
353
+ void bpf_prog_kallsyms_del_subprogs (struct bpf_prog * fp )
354
+ {
355
+ int i ;
356
+
357
+ for (i = 0 ; i < fp -> aux -> func_cnt ; i ++ )
358
+ bpf_prog_kallsyms_del (fp -> aux -> func [i ]);
359
+ }
360
+
361
+ void bpf_prog_kallsyms_del_all (struct bpf_prog * fp )
362
+ {
363
+ bpf_prog_kallsyms_del_subprogs (fp );
364
+ bpf_prog_kallsyms_del (fp );
365
+ }
366
+
353
367
#ifdef CONFIG_BPF_JIT
354
368
/* All BPF JIT sysctl knobs here. */
355
369
int bpf_jit_enable __read_mostly = IS_BUILTIN (CONFIG_BPF_JIT_ALWAYS_ON );
@@ -584,6 +598,8 @@ bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
584
598
bpf_fill_ill_insns (hdr , size );
585
599
586
600
hdr -> pages = size / PAGE_SIZE ;
601
+ hdr -> locked = 0 ;
602
+
587
603
hole = min_t (unsigned int , size - (proglen + sizeof (* hdr )),
588
604
PAGE_SIZE - sizeof (* hdr ));
589
605
start = (get_random_int () % hole ) & ~(alignment - 1 );
@@ -1434,6 +1450,33 @@ static int bpf_check_tail_call(const struct bpf_prog *fp)
1434
1450
return 0 ;
1435
1451
}
1436
1452
1453
+ static int bpf_prog_check_pages_ro_locked (const struct bpf_prog * fp )
1454
+ {
1455
+ #ifdef CONFIG_ARCH_HAS_SET_MEMORY
1456
+ int i , err ;
1457
+
1458
+ for (i = 0 ; i < fp -> aux -> func_cnt ; i ++ ) {
1459
+ err = bpf_prog_check_pages_ro_single (fp -> aux -> func [i ]);
1460
+ if (err )
1461
+ return err ;
1462
+ }
1463
+
1464
+ return bpf_prog_check_pages_ro_single (fp );
1465
+ #endif
1466
+ return 0 ;
1467
+ }
1468
+
1469
+ static void bpf_prog_select_func (struct bpf_prog * fp )
1470
+ {
1471
+ #ifndef CONFIG_BPF_JIT_ALWAYS_ON
1472
+ u32 stack_depth = max_t (u32 , fp -> aux -> stack_depth , 1 );
1473
+
1474
+ fp -> bpf_func = interpreters [(round_up (stack_depth , 32 ) / 32 ) - 1 ];
1475
+ #else
1476
+ fp -> bpf_func = __bpf_prog_ret0_warn ;
1477
+ #endif
1478
+ }
1479
+
1437
1480
/**
1438
1481
* bpf_prog_select_runtime - select exec runtime for BPF program
1439
1482
* @fp: bpf_prog populated with internal BPF program
@@ -1444,13 +1487,13 @@ static int bpf_check_tail_call(const struct bpf_prog *fp)
1444
1487
*/
1445
1488
struct bpf_prog * bpf_prog_select_runtime (struct bpf_prog * fp , int * err )
1446
1489
{
1447
- #ifndef CONFIG_BPF_JIT_ALWAYS_ON
1448
- u32 stack_depth = max_t (u32 , fp -> aux -> stack_depth , 1 );
1490
+ /* In case of BPF to BPF calls, verifier did all the prep
1491
+ * work with regards to JITing, etc.
1492
+ */
1493
+ if (fp -> bpf_func )
1494
+ goto finalize ;
1449
1495
1450
- fp -> bpf_func = interpreters [(round_up (stack_depth , 32 ) / 32 ) - 1 ];
1451
- #else
1452
- fp -> bpf_func = __bpf_prog_ret0_warn ;
1453
- #endif
1496
+ bpf_prog_select_func (fp );
1454
1497
1455
1498
/* eBPF JITs can rewrite the program in case constant
1456
1499
* blinding is active. However, in case of error during
@@ -1471,6 +1514,8 @@ struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
1471
1514
if (* err )
1472
1515
return fp ;
1473
1516
}
1517
+
1518
+ finalize :
1474
1519
bpf_prog_lock_ro (fp );
1475
1520
1476
1521
/* The tail call compatibility check can only be done at
@@ -1479,7 +1524,17 @@ struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
1479
1524
* all eBPF JITs might immediately support all features.
1480
1525
*/
1481
1526
* err = bpf_check_tail_call (fp );
1482
-
1527
+ if (* err )
1528
+ return fp ;
1529
+
1530
+ /* Checkpoint: at this point onwards any cBPF -> eBPF or
1531
+ * native eBPF program is read-only. If we failed to change
1532
+ * the page attributes (e.g. allocation failure from
1533
+ * splitting large pages), then reject the whole program
1534
+ * in order to guarantee not ending up with any W+X pages
1535
+ * from BPF side in kernel.
1536
+ */
1537
+ * err = bpf_prog_check_pages_ro_locked (fp );
1483
1538
return fp ;
1484
1539
}
1485
1540
EXPORT_SYMBOL_GPL (bpf_prog_select_runtime );
0 commit comments