|
5 | 5 | * more details. |
6 | 6 | */ |
7 | 7 | #include <linux/syscalls.h> |
| 8 | +#include <linux/completion.h> |
| 9 | +#include <linux/atomic.h> |
| 10 | +#include <linux/once.h> |
8 | 11 | #include <asm/cacheflush.h> |
9 | 12 | #include <asm/cpufeature.h> |
10 | 13 | #include <asm/hwprobe.h> |
@@ -454,28 +457,32 @@ static int hwprobe_get_cpus(struct riscv_hwprobe __user *pairs, |
454 | 457 | return 0; |
455 | 458 | } |
456 | 459 |
|
457 | | -static int do_riscv_hwprobe(struct riscv_hwprobe __user *pairs, |
458 | | - size_t pair_count, size_t cpusetsize, |
459 | | - unsigned long __user *cpus_user, |
460 | | - unsigned int flags) |
461 | | -{ |
462 | | - if (flags & RISCV_HWPROBE_WHICH_CPUS) |
463 | | - return hwprobe_get_cpus(pairs, pair_count, cpusetsize, |
464 | | - cpus_user, flags); |
| 460 | +#ifdef CONFIG_MMU |
465 | 461 |
|
466 | | - return hwprobe_get_values(pairs, pair_count, cpusetsize, |
467 | | - cpus_user, flags); |
| 462 | +static DECLARE_COMPLETION(boot_probes_done); |
| 463 | +static atomic_t pending_boot_probes = ATOMIC_INIT(1); |
| 464 | + |
| 465 | +void riscv_hwprobe_register_async_probe(void) |
| 466 | +{ |
| 467 | + atomic_inc(&pending_boot_probes); |
468 | 468 | } |
469 | 469 |
|
470 | | -#ifdef CONFIG_MMU |
| 470 | +void riscv_hwprobe_complete_async_probe(void) |
| 471 | +{ |
| 472 | + if (atomic_dec_and_test(&pending_boot_probes)) |
| 473 | + complete(&boot_probes_done); |
| 474 | +} |
471 | 475 |
|
472 | | -static int __init init_hwprobe_vdso_data(void) |
| 476 | +static int complete_hwprobe_vdso_data(void) |
473 | 477 | { |
474 | 478 | struct vdso_arch_data *avd = vdso_k_arch_data; |
475 | 479 | u64 id_bitsmash = 0; |
476 | 480 | struct riscv_hwprobe pair; |
477 | 481 | int key; |
478 | 482 |
|
| 483 | + if (unlikely(!atomic_dec_and_test(&pending_boot_probes))) |
| 484 | + wait_for_completion(&boot_probes_done); |
| 485 | + |
479 | 486 | /* |
480 | 487 | * Initialize vDSO data with the answers for the "all CPUs" case, to |
481 | 488 | * save a syscall in the common case. |
@@ -503,13 +510,52 @@ static int __init init_hwprobe_vdso_data(void) |
503 | 510 | * vDSO should defer to the kernel for exotic cpu masks. |
504 | 511 | */ |
505 | 512 | avd->homogeneous_cpus = id_bitsmash != 0 && id_bitsmash != -1; |
| 513 | + |
| 514 | + /* |
| 515 | + * Make sure all the VDSO values are visible before we look at them. |
| 516 | + * This pairs with the implicit "no speculativly visible accesses" |
| 517 | + * barrier in the VDSO hwprobe code. |
| 518 | + */ |
| 519 | + smp_wmb(); |
| 520 | + avd->ready = true; |
| 521 | + return 0; |
| 522 | +} |
| 523 | + |
| 524 | +static int __init init_hwprobe_vdso_data(void) |
| 525 | +{ |
| 526 | + struct vdso_arch_data *avd = vdso_k_arch_data; |
| 527 | + |
| 528 | + /* |
| 529 | + * Prevent the vDSO cached values from being used, as they're not ready |
| 530 | + * yet. |
| 531 | + */ |
| 532 | + avd->ready = false; |
506 | 533 | return 0; |
507 | 534 | } |
508 | 535 |
|
509 | 536 | arch_initcall_sync(init_hwprobe_vdso_data); |
510 | 537 |
|
| 538 | +#else |
| 539 | + |
| 540 | +static int complete_hwprobe_vdso_data(void) { return 0; } |
| 541 | + |
511 | 542 | #endif /* CONFIG_MMU */ |
512 | 543 |
|
| 544 | +static int do_riscv_hwprobe(struct riscv_hwprobe __user *pairs, |
| 545 | + size_t pair_count, size_t cpusetsize, |
| 546 | + unsigned long __user *cpus_user, |
| 547 | + unsigned int flags) |
| 548 | +{ |
| 549 | + DO_ONCE_SLEEPABLE(complete_hwprobe_vdso_data); |
| 550 | + |
| 551 | + if (flags & RISCV_HWPROBE_WHICH_CPUS) |
| 552 | + return hwprobe_get_cpus(pairs, pair_count, cpusetsize, |
| 553 | + cpus_user, flags); |
| 554 | + |
| 555 | + return hwprobe_get_values(pairs, pair_count, cpusetsize, |
| 556 | + cpus_user, flags); |
| 557 | +} |
| 558 | + |
513 | 559 | SYSCALL_DEFINE5(riscv_hwprobe, struct riscv_hwprobe __user *, pairs, |
514 | 560 | size_t, pair_count, size_t, cpusetsize, unsigned long __user *, |
515 | 561 | cpus, unsigned int, flags) |
|
0 commit comments