Skip to content

Commit 99c13b8

Browse files
Mikulas PatockaKAGA-KOKO
authored andcommitted
x86/mm/pat: Don't report PAT on CPUs that don't support it
The pat_enabled() logic is broken on CPUs which do not support PAT and where the initialization code fails to call pat_init(). Due to that the enabled flag stays true and pat_enabled() returns true wrongfully. As a consequence the mappings, e.g. for Xorg, are set up with the wrong caching mode and the required MTRR setups are omitted. To cure this the following changes are required: 1) Make pat_enabled() return true only if PAT initialization was invoked and successful. 2) Invoke init_cache_modes() unconditionally in setup_arch() and remove the extra callsites in pat_disable() and the pat disabled code path in pat_init(). Also rename __pat_enabled to pat_disabled to reflect the real purpose of this variable. Fixes: 9cd25aa ("x86/mm/pat: Emulate PAT when it is disabled") Signed-off-by: Mikulas Patocka <[email protected]> Signed-off-by: Thomas Gleixner <[email protected]> Cc: Bernhard Held <[email protected]> Cc: Denys Vlasenko <[email protected]> Cc: Peter Zijlstra <[email protected]> Cc: Brian Gerst <[email protected]> Cc: "Luis R. Rodriguez" <[email protected]> Cc: Borislav Petkov <[email protected]> Cc: Andy Lutomirski <[email protected]> Cc: Josh Poimboeuf <[email protected]> Cc: Andrew Morton <[email protected]> Cc: Linus Torvalds <[email protected]> Cc: [email protected] Link: http://lkml.kernel.org/r/alpine.LRH.2.02.1707041749300.3456@file01.intranet.prod.int.rdu2.redhat.com
1 parent 5122daa commit 99c13b8

File tree

3 files changed

+20
-16
lines changed

3 files changed

+20
-16
lines changed

arch/x86/include/asm/pat.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,7 @@
77
bool pat_enabled(void);
88
void pat_disable(const char *reason);
99
extern void pat_init(void);
10+
extern void init_cache_modes(void);
1011

1112
extern int reserve_memtype(u64 start, u64 end,
1213
enum page_cache_mode req_pcm, enum page_cache_mode *ret_pcm);

arch/x86/kernel/setup.c

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1075,6 +1075,13 @@ void __init setup_arch(char **cmdline_p)
10751075

10761076
max_possible_pfn = max_pfn;
10771077

1078+
/*
1079+
* This call is required when the CPU does not support PAT. If
1080+
* mtrr_bp_init() invoked it already via pat_init() the call has no
1081+
* effect.
1082+
*/
1083+
init_cache_modes();
1084+
10781085
/*
10791086
* Define random base addresses for memory sections after max_pfn is
10801087
* defined and before each memory section base is used.

arch/x86/mm/pat.c

Lines changed: 12 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -37,25 +37,23 @@
3737
#undef pr_fmt
3838
#define pr_fmt(fmt) "" fmt
3939

40-
static bool boot_cpu_done;
41-
42-
static int __read_mostly __pat_enabled = IS_ENABLED(CONFIG_X86_PAT);
43-
static void init_cache_modes(void);
40+
static bool __read_mostly boot_cpu_done;
41+
static bool __read_mostly pat_disabled = !IS_ENABLED(CONFIG_X86_PAT);
42+
static bool __read_mostly pat_initialized;
43+
static bool __read_mostly init_cm_done;
4444

4545
void pat_disable(const char *reason)
4646
{
47-
if (!__pat_enabled)
47+
if (pat_disabled)
4848
return;
4949

5050
if (boot_cpu_done) {
5151
WARN_ONCE(1, "x86/PAT: PAT cannot be disabled after initialization\n");
5252
return;
5353
}
5454

55-
__pat_enabled = 0;
55+
pat_disabled = true;
5656
pr_info("x86/PAT: %s\n", reason);
57-
58-
init_cache_modes();
5957
}
6058

6159
static int __init nopat(char *str)
@@ -67,7 +65,7 @@ early_param("nopat", nopat);
6765

6866
bool pat_enabled(void)
6967
{
70-
return !!__pat_enabled;
68+
return pat_initialized;
7169
}
7270
EXPORT_SYMBOL_GPL(pat_enabled);
7371

@@ -205,6 +203,8 @@ static void __init_cache_modes(u64 pat)
205203
update_cache_mode_entry(i, cache);
206204
}
207205
pr_info("x86/PAT: Configuration [0-7]: %s\n", pat_msg);
206+
207+
init_cm_done = true;
208208
}
209209

210210
#define PAT(x, y) ((u64)PAT_ ## y << ((x)*8))
@@ -225,6 +225,7 @@ static void pat_bsp_init(u64 pat)
225225
}
226226

227227
wrmsrl(MSR_IA32_CR_PAT, pat);
228+
pat_initialized = true;
228229

229230
__init_cache_modes(pat);
230231
}
@@ -242,10 +243,9 @@ static void pat_ap_init(u64 pat)
242243
wrmsrl(MSR_IA32_CR_PAT, pat);
243244
}
244245

245-
static void init_cache_modes(void)
246+
void init_cache_modes(void)
246247
{
247248
u64 pat = 0;
248-
static int init_cm_done;
249249

250250
if (init_cm_done)
251251
return;
@@ -287,8 +287,6 @@ static void init_cache_modes(void)
287287
}
288288

289289
__init_cache_modes(pat);
290-
291-
init_cm_done = 1;
292290
}
293291

294292
/**
@@ -306,10 +304,8 @@ void pat_init(void)
306304
u64 pat;
307305
struct cpuinfo_x86 *c = &boot_cpu_data;
308306

309-
if (!pat_enabled()) {
310-
init_cache_modes();
307+
if (pat_disabled)
311308
return;
312-
}
313309

314310
if ((c->x86_vendor == X86_VENDOR_INTEL) &&
315311
(((c->x86 == 0x6) && (c->x86_model <= 0xd)) ||

0 commit comments

Comments
 (0)