Skip to content

Commit 0772b9a

Browse files
KAGA-KOKObp3tk0v
authored andcommitted
x86/microcode: Sanitize __wait_for_cpus()
The code is too complicated for no reason: - The return value is pointless as this is a strict boolean. - It's way simpler to count down from num_online_cpus() and check for zero. - The timeout argument is pointless as this is always one second. - Touching the NMI watchdog every 100ns does not make any sense, neither does checking every 100ns. This is really not a hotpath operation. Preload the atomic counter with the number of online CPUs and simplify the whole timeout logic. Delay for one microsecond and touch the NMI watchdog once per millisecond. Signed-off-by: Thomas Gleixner <[email protected]> Signed-off-by: Borislav Petkov (AMD) <[email protected]> Link: https://lore.kernel.org/r/[email protected]
1 parent 6f059e6 commit 0772b9a

File tree

1 file changed

+17
-22
lines changed
  • arch/x86/kernel/cpu/microcode

1 file changed

+17
-22
lines changed

arch/x86/kernel/cpu/microcode/core.c

Lines changed: 17 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -252,31 +252,26 @@ static struct platform_device *microcode_pdev;
252252
* requirement can be relaxed in the future. Right now, this is conservative
253253
* and good.
254254
*/
255-
#define SPINUNIT 100 /* 100 nsec */
255+
static atomic_t late_cpus_in, late_cpus_out;
256256

257-
258-
static atomic_t late_cpus_in;
259-
static atomic_t late_cpus_out;
260-
261-
static int __wait_for_cpus(atomic_t *t, long long timeout)
257+
static bool wait_for_cpus(atomic_t *cnt)
262258
{
263-
int all_cpus = num_online_cpus();
259+
unsigned int timeout;
264260

265-
atomic_inc(t);
261+
WARN_ON_ONCE(atomic_dec_return(cnt) < 0);
266262

267-
while (atomic_read(t) < all_cpus) {
268-
if (timeout < SPINUNIT) {
269-
pr_err("Timeout while waiting for CPUs rendezvous, remaining: %d\n",
270-
all_cpus - atomic_read(t));
271-
return 1;
272-
}
263+
for (timeout = 0; timeout < USEC_PER_SEC; timeout++) {
264+
if (!atomic_read(cnt))
265+
return true;
273266

274-
ndelay(SPINUNIT);
275-
timeout -= SPINUNIT;
267+
udelay(1);
276268

277-
touch_nmi_watchdog();
269+
if (!(timeout % USEC_PER_MSEC))
270+
touch_nmi_watchdog();
278271
}
279-
return 0;
272+
/* Prevent the late comers from making progress and let them time out */
273+
atomic_inc(cnt);
274+
return false;
280275
}
281276

282277
/*
@@ -294,7 +289,7 @@ static int __reload_late(void *info)
294289
* Wait for all CPUs to arrive. A load will not be attempted unless all
295290
* CPUs show up.
296291
* */
297-
if (__wait_for_cpus(&late_cpus_in, NSEC_PER_SEC))
292+
if (!wait_for_cpus(&late_cpus_in))
298293
return -1;
299294

300295
/*
@@ -317,7 +312,7 @@ static int __reload_late(void *info)
317312
}
318313

319314
wait_for_siblings:
320-
if (__wait_for_cpus(&late_cpus_out, NSEC_PER_SEC))
315+
if (!wait_for_cpus(&late_cpus_out))
321316
panic("Timeout during microcode update!\n");
322317

323318
/*
@@ -344,8 +339,8 @@ static int microcode_reload_late(void)
344339
pr_err("Attempting late microcode loading - it is dangerous and taints the kernel.\n");
345340
pr_err("You should switch to early loading, if possible.\n");
346341

347-
atomic_set(&late_cpus_in, 0);
348-
atomic_set(&late_cpus_out, 0);
342+
atomic_set(&late_cpus_in, num_online_cpus());
343+
atomic_set(&late_cpus_out, num_online_cpus());
349344

350345
/*
351346
* Take a snapshot before the microcode update in order to compare and

0 commit comments

Comments
 (0)