@@ -1227,6 +1227,8 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
1227
1227
#define GDS BIT(6)
1228
1228
/* CPU is affected by Register File Data Sampling */
1229
1229
#define RFDS BIT(7)
1230
+ /* CPU is affected by Indirect Target Selection */
1231
+ #define ITS BIT(8)
1230
1232
1231
1233
static const struct x86_cpu_id cpu_vuln_blacklist [] __initconst = {
1232
1234
VULNBL_INTEL_STEPS (INTEL_IVYBRIDGE , X86_STEP_MAX , SRBDS ),
@@ -1238,22 +1240,25 @@ static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = {
1238
1240
VULNBL_INTEL_STEPS (INTEL_BROADWELL_G , X86_STEP_MAX , SRBDS ),
1239
1241
VULNBL_INTEL_STEPS (INTEL_BROADWELL_X , X86_STEP_MAX , MMIO ),
1240
1242
VULNBL_INTEL_STEPS (INTEL_BROADWELL , X86_STEP_MAX , SRBDS ),
1241
- VULNBL_INTEL_STEPS (INTEL_SKYLAKE_X , X86_STEP_MAX , MMIO | RETBLEED | GDS ),
1243
+ VULNBL_INTEL_STEPS (INTEL_SKYLAKE_X , 0x5 , MMIO | RETBLEED | GDS ),
1244
+ VULNBL_INTEL_STEPS (INTEL_SKYLAKE_X , X86_STEP_MAX , MMIO | RETBLEED | GDS | ITS ),
1242
1245
VULNBL_INTEL_STEPS (INTEL_SKYLAKE_L , X86_STEP_MAX , MMIO | RETBLEED | GDS | SRBDS ),
1243
1246
VULNBL_INTEL_STEPS (INTEL_SKYLAKE , X86_STEP_MAX , MMIO | RETBLEED | GDS | SRBDS ),
1244
- VULNBL_INTEL_STEPS (INTEL_KABYLAKE_L , X86_STEP_MAX , MMIO | RETBLEED | GDS | SRBDS ),
1245
- VULNBL_INTEL_STEPS (INTEL_KABYLAKE , X86_STEP_MAX , MMIO | RETBLEED | GDS | SRBDS ),
1247
+ VULNBL_INTEL_STEPS (INTEL_KABYLAKE_L , 0xb , MMIO | RETBLEED | GDS | SRBDS ),
1248
+ VULNBL_INTEL_STEPS (INTEL_KABYLAKE_L , X86_STEP_MAX , MMIO | RETBLEED | GDS | SRBDS | ITS ),
1249
+ VULNBL_INTEL_STEPS (INTEL_KABYLAKE , 0xc , MMIO | RETBLEED | GDS | SRBDS ),
1250
+ VULNBL_INTEL_STEPS (INTEL_KABYLAKE , X86_STEP_MAX , MMIO | RETBLEED | GDS | SRBDS | ITS ),
1246
1251
VULNBL_INTEL_STEPS (INTEL_CANNONLAKE_L , X86_STEP_MAX , RETBLEED ),
1247
- VULNBL_INTEL_STEPS (INTEL_ICELAKE_L , X86_STEP_MAX , MMIO | MMIO_SBDS | RETBLEED | GDS ),
1248
- VULNBL_INTEL_STEPS (INTEL_ICELAKE_D , X86_STEP_MAX , MMIO | GDS ),
1249
- VULNBL_INTEL_STEPS (INTEL_ICELAKE_X , X86_STEP_MAX , MMIO | GDS ),
1250
- VULNBL_INTEL_STEPS (INTEL_COMETLAKE , X86_STEP_MAX , MMIO | MMIO_SBDS | RETBLEED | GDS ),
1251
- VULNBL_INTEL_STEPS (INTEL_COMETLAKE_L , 0x0 , MMIO | RETBLEED ),
1252
- VULNBL_INTEL_STEPS (INTEL_COMETLAKE_L , X86_STEP_MAX , MMIO | MMIO_SBDS | RETBLEED | GDS ),
1253
- VULNBL_INTEL_STEPS (INTEL_TIGERLAKE_L , X86_STEP_MAX , GDS ),
1254
- VULNBL_INTEL_STEPS (INTEL_TIGERLAKE , X86_STEP_MAX , GDS ),
1252
+ VULNBL_INTEL_STEPS (INTEL_ICELAKE_L , X86_STEP_MAX , MMIO | MMIO_SBDS | RETBLEED | GDS | ITS ),
1253
+ VULNBL_INTEL_STEPS (INTEL_ICELAKE_D , X86_STEP_MAX , MMIO | GDS | ITS ),
1254
+ VULNBL_INTEL_STEPS (INTEL_ICELAKE_X , X86_STEP_MAX , MMIO | GDS | ITS ),
1255
+ VULNBL_INTEL_STEPS (INTEL_COMETLAKE , X86_STEP_MAX , MMIO | MMIO_SBDS | RETBLEED | GDS | ITS ),
1256
+ VULNBL_INTEL_STEPS (INTEL_COMETLAKE_L , 0x0 , MMIO | RETBLEED | ITS ),
1257
+ VULNBL_INTEL_STEPS (INTEL_COMETLAKE_L , X86_STEP_MAX , MMIO | MMIO_SBDS | RETBLEED | GDS | ITS ),
1258
+ VULNBL_INTEL_STEPS (INTEL_TIGERLAKE_L , X86_STEP_MAX , GDS | ITS ),
1259
+ VULNBL_INTEL_STEPS (INTEL_TIGERLAKE , X86_STEP_MAX , GDS | ITS ),
1255
1260
VULNBL_INTEL_STEPS (INTEL_LAKEFIELD , X86_STEP_MAX , MMIO | MMIO_SBDS | RETBLEED ),
1256
- VULNBL_INTEL_STEPS (INTEL_ROCKETLAKE , X86_STEP_MAX , MMIO | RETBLEED | GDS ),
1261
+ VULNBL_INTEL_STEPS (INTEL_ROCKETLAKE , X86_STEP_MAX , MMIO | RETBLEED | GDS | ITS ),
1257
1262
VULNBL_INTEL_TYPE (INTEL_ALDERLAKE , ATOM , RFDS ),
1258
1263
VULNBL_INTEL_STEPS (INTEL_ALDERLAKE_L , X86_STEP_MAX , RFDS ),
1259
1264
VULNBL_INTEL_TYPE (INTEL_RAPTORLAKE , ATOM , RFDS ),
@@ -1318,6 +1323,32 @@ static bool __init vulnerable_to_rfds(u64 x86_arch_cap_msr)
1318
1323
return cpu_matches (cpu_vuln_blacklist , RFDS );
1319
1324
}
1320
1325
1326
+ static bool __init vulnerable_to_its (u64 x86_arch_cap_msr )
1327
+ {
1328
+ /* The "immunity" bit trumps everything else: */
1329
+ if (x86_arch_cap_msr & ARCH_CAP_ITS_NO )
1330
+ return false;
1331
+ if (boot_cpu_data .x86_vendor != X86_VENDOR_INTEL )
1332
+ return false;
1333
+
1334
+ /* None of the affected CPUs have BHI_CTRL */
1335
+ if (boot_cpu_has (X86_FEATURE_BHI_CTRL ))
1336
+ return false;
1337
+
1338
+ /*
1339
+ * If a VMM did not expose ITS_NO, assume that a guest could
1340
+ * be running on a vulnerable hardware or may migrate to such
1341
+ * hardware.
1342
+ */
1343
+ if (boot_cpu_has (X86_FEATURE_HYPERVISOR ))
1344
+ return true;
1345
+
1346
+ if (cpu_matches (cpu_vuln_blacklist , ITS ))
1347
+ return true;
1348
+
1349
+ return false;
1350
+ }
1351
+
1321
1352
static void __init cpu_set_bug_bits (struct cpuinfo_x86 * c )
1322
1353
{
1323
1354
u64 x86_arch_cap_msr = x86_read_arch_cap_msr ();
@@ -1449,6 +1480,9 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
1449
1480
if (cpu_has (c , X86_FEATURE_AMD_IBPB ) && !cpu_has (c , X86_FEATURE_AMD_IBPB_RET ))
1450
1481
setup_force_cpu_bug (X86_BUG_IBPB_NO_RET );
1451
1482
1483
+ if (vulnerable_to_its (x86_arch_cap_msr ))
1484
+ setup_force_cpu_bug (X86_BUG_ITS );
1485
+
1452
1486
if (cpu_matches (cpu_vuln_whitelist , NO_MELTDOWN ))
1453
1487
return ;
1454
1488
0 commit comments