@@ -1329,33 +1329,33 @@ static bool __init cpu_matches(const struct x86_cpu_id *table, unsigned long whi
1329
1329
1330
1330
u64 x86_read_arch_cap_msr (void )
1331
1331
{
1332
- u64 ia32_cap = 0 ;
1332
+ u64 x86_arch_cap_msr = 0 ;
1333
1333
1334
1334
if (boot_cpu_has (X86_FEATURE_ARCH_CAPABILITIES ))
1335
- rdmsrl (MSR_IA32_ARCH_CAPABILITIES , ia32_cap );
1335
+ rdmsrl (MSR_IA32_ARCH_CAPABILITIES , x86_arch_cap_msr );
1336
1336
1337
- return ia32_cap ;
1337
+ return x86_arch_cap_msr ;
1338
1338
}
1339
1339
1340
- static bool arch_cap_mmio_immune (u64 ia32_cap )
1340
+ static bool arch_cap_mmio_immune (u64 x86_arch_cap_msr )
1341
1341
{
1342
- return (ia32_cap & ARCH_CAP_FBSDP_NO &&
1343
- ia32_cap & ARCH_CAP_PSDP_NO &&
1344
- ia32_cap & ARCH_CAP_SBDR_SSDP_NO );
1342
+ return (x86_arch_cap_msr & ARCH_CAP_FBSDP_NO &&
1343
+ x86_arch_cap_msr & ARCH_CAP_PSDP_NO &&
1344
+ x86_arch_cap_msr & ARCH_CAP_SBDR_SSDP_NO );
1345
1345
}
1346
1346
1347
- static bool __init vulnerable_to_rfds (u64 ia32_cap )
1347
+ static bool __init vulnerable_to_rfds (u64 x86_arch_cap_msr )
1348
1348
{
1349
1349
/* The "immunity" bit trumps everything else: */
1350
- if (ia32_cap & ARCH_CAP_RFDS_NO )
1350
+ if (x86_arch_cap_msr & ARCH_CAP_RFDS_NO )
1351
1351
return false;
1352
1352
1353
1353
/*
1354
1354
* VMMs set ARCH_CAP_RFDS_CLEAR for processors not in the blacklist to
1355
1355
* indicate that mitigation is needed because guest is running on a
1356
1356
* vulnerable hardware or may migrate to such hardware:
1357
1357
*/
1358
- if (ia32_cap & ARCH_CAP_RFDS_CLEAR )
1358
+ if (x86_arch_cap_msr & ARCH_CAP_RFDS_CLEAR )
1359
1359
return true;
1360
1360
1361
1361
/* Only consult the blacklist when there is no enumeration: */
@@ -1364,11 +1364,11 @@ static bool __init vulnerable_to_rfds(u64 ia32_cap)
1364
1364
1365
1365
static void __init cpu_set_bug_bits (struct cpuinfo_x86 * c )
1366
1366
{
1367
- u64 ia32_cap = x86_read_arch_cap_msr ();
1367
+ u64 x86_arch_cap_msr = x86_read_arch_cap_msr ();
1368
1368
1369
1369
/* Set ITLB_MULTIHIT bug if cpu is not in the whitelist and not mitigated */
1370
1370
if (!cpu_matches (cpu_vuln_whitelist , NO_ITLB_MULTIHIT ) &&
1371
- !(ia32_cap & ARCH_CAP_PSCHANGE_MC_NO ))
1371
+ !(x86_arch_cap_msr & ARCH_CAP_PSCHANGE_MC_NO ))
1372
1372
setup_force_cpu_bug (X86_BUG_ITLB_MULTIHIT );
1373
1373
1374
1374
if (cpu_matches (cpu_vuln_whitelist , NO_SPECULATION ))
@@ -1380,23 +1380,23 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
1380
1380
setup_force_cpu_bug (X86_BUG_SPECTRE_V2 );
1381
1381
1382
1382
if (!cpu_matches (cpu_vuln_whitelist , NO_SSB ) &&
1383
- !(ia32_cap & ARCH_CAP_SSB_NO ) &&
1383
+ !(x86_arch_cap_msr & ARCH_CAP_SSB_NO ) &&
1384
1384
!cpu_has (c , X86_FEATURE_AMD_SSB_NO ))
1385
1385
setup_force_cpu_bug (X86_BUG_SPEC_STORE_BYPASS );
1386
1386
1387
1387
/*
1388
1388
* AMD's AutoIBRS is equivalent to Intel's eIBRS - use the Intel feature
1389
1389
* flag and protect from vendor-specific bugs via the whitelist.
1390
1390
*/
1391
- if ((ia32_cap & ARCH_CAP_IBRS_ALL ) || cpu_has (c , X86_FEATURE_AUTOIBRS )) {
1391
+ if ((x86_arch_cap_msr & ARCH_CAP_IBRS_ALL ) || cpu_has (c , X86_FEATURE_AUTOIBRS )) {
1392
1392
setup_force_cpu_cap (X86_FEATURE_IBRS_ENHANCED );
1393
1393
if (!cpu_matches (cpu_vuln_whitelist , NO_EIBRS_PBRSB ) &&
1394
- !(ia32_cap & ARCH_CAP_PBRSB_NO ))
1394
+ !(x86_arch_cap_msr & ARCH_CAP_PBRSB_NO ))
1395
1395
setup_force_cpu_bug (X86_BUG_EIBRS_PBRSB );
1396
1396
}
1397
1397
1398
1398
if (!cpu_matches (cpu_vuln_whitelist , NO_MDS ) &&
1399
- !(ia32_cap & ARCH_CAP_MDS_NO )) {
1399
+ !(x86_arch_cap_msr & ARCH_CAP_MDS_NO )) {
1400
1400
setup_force_cpu_bug (X86_BUG_MDS );
1401
1401
if (cpu_matches (cpu_vuln_whitelist , MSBDS_ONLY ))
1402
1402
setup_force_cpu_bug (X86_BUG_MSBDS_ONLY );
@@ -1415,9 +1415,9 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
1415
1415
* TSX_CTRL check alone is not sufficient for cases when the microcode
1416
1416
* update is not present or running as guest that don't get TSX_CTRL.
1417
1417
*/
1418
- if (!(ia32_cap & ARCH_CAP_TAA_NO ) &&
1418
+ if (!(x86_arch_cap_msr & ARCH_CAP_TAA_NO ) &&
1419
1419
(cpu_has (c , X86_FEATURE_RTM ) ||
1420
- (ia32_cap & ARCH_CAP_TSX_CTRL_MSR )))
1420
+ (x86_arch_cap_msr & ARCH_CAP_TSX_CTRL_MSR )))
1421
1421
setup_force_cpu_bug (X86_BUG_TAA );
1422
1422
1423
1423
/*
@@ -1443,15 +1443,15 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
1443
1443
* Set X86_BUG_MMIO_UNKNOWN for CPUs that are neither in the blacklist,
1444
1444
* nor in the whitelist and also don't enumerate MSR ARCH_CAP MMIO bits.
1445
1445
*/
1446
- if (!arch_cap_mmio_immune (ia32_cap )) {
1446
+ if (!arch_cap_mmio_immune (x86_arch_cap_msr )) {
1447
1447
if (cpu_matches (cpu_vuln_blacklist , MMIO ))
1448
1448
setup_force_cpu_bug (X86_BUG_MMIO_STALE_DATA );
1449
1449
else if (!cpu_matches (cpu_vuln_whitelist , NO_MMIO ))
1450
1450
setup_force_cpu_bug (X86_BUG_MMIO_UNKNOWN );
1451
1451
}
1452
1452
1453
1453
if (!cpu_has (c , X86_FEATURE_BTC_NO )) {
1454
- if (cpu_matches (cpu_vuln_blacklist , RETBLEED ) || (ia32_cap & ARCH_CAP_RSBA ))
1454
+ if (cpu_matches (cpu_vuln_blacklist , RETBLEED ) || (x86_arch_cap_msr & ARCH_CAP_RSBA ))
1455
1455
setup_force_cpu_bug (X86_BUG_RETBLEED );
1456
1456
}
1457
1457
@@ -1469,15 +1469,15 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
1469
1469
* disabling AVX2. The only way to do this in HW is to clear XCR0[2],
1470
1470
* which means that AVX will be disabled.
1471
1471
*/
1472
- if (cpu_matches (cpu_vuln_blacklist , GDS ) && !(ia32_cap & ARCH_CAP_GDS_NO ) &&
1472
+ if (cpu_matches (cpu_vuln_blacklist , GDS ) && !(x86_arch_cap_msr & ARCH_CAP_GDS_NO ) &&
1473
1473
boot_cpu_has (X86_FEATURE_AVX ))
1474
1474
setup_force_cpu_bug (X86_BUG_GDS );
1475
1475
1476
- if (vulnerable_to_rfds (ia32_cap ))
1476
+ if (vulnerable_to_rfds (x86_arch_cap_msr ))
1477
1477
setup_force_cpu_bug (X86_BUG_RFDS );
1478
1478
1479
1479
/* When virtualized, eIBRS could be hidden, assume vulnerable */
1480
- if (!(ia32_cap & ARCH_CAP_BHI_NO ) &&
1480
+ if (!(x86_arch_cap_msr & ARCH_CAP_BHI_NO ) &&
1481
1481
!cpu_matches (cpu_vuln_whitelist , NO_BHI ) &&
1482
1482
(boot_cpu_has (X86_FEATURE_IBRS_ENHANCED ) ||
1483
1483
boot_cpu_has (X86_FEATURE_HYPERVISOR )))
@@ -1487,7 +1487,7 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
1487
1487
return ;
1488
1488
1489
1489
/* Rogue Data Cache Load? No! */
1490
- if (ia32_cap & ARCH_CAP_RDCL_NO )
1490
+ if (x86_arch_cap_msr & ARCH_CAP_RDCL_NO )
1491
1491
return ;
1492
1492
1493
1493
setup_force_cpu_bug (X86_BUG_CPU_MELTDOWN );
0 commit comments