Skip to content

Commit c7864ee

Browse files
committed
Merge tag 'x86_urgent_for_v6.18_rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 fixes from Borislav Petkov: - Reset the why-the-system-rebooted register on AMD to avoid stale bits remaining from previous boots - Add a missing barrier in the TLB flushing code to prevent erroneously not flushing a TLB generation - Make sure cpa_flush() does not overshoot when computing the end range of a flush region - Fix resctrl bandwidth counting on AMD systems when the amount of monitoring groups created exceeds the number the hardware can track * tag 'x86_urgent_for_v6.18_rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/CPU/AMD: Prevent reset reasons from being retained across reboot x86/mm: Fix SMP ordering in switch_mm_irqs_off() x86/mm: Fix overflow in __cpa_addr() x86/resctrl: Fix miscount of bandwidth event when reactivating previously unavailable RMID
2 parents 1c64efc + e6416c2 commit c7864ee

File tree

4 files changed

+47
-9
lines changed

4 files changed

+47
-9
lines changed

arch/x86/kernel/cpu/amd.c

Lines changed: 14 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1355,11 +1355,23 @@ static __init int print_s5_reset_status_mmio(void)
13551355
return 0;
13561356

13571357
value = ioread32(addr);
1358-
iounmap(addr);
13591358

13601359
/* Value with "all bits set" is an error response and should be ignored. */
1361-
if (value == U32_MAX)
1360+
if (value == U32_MAX) {
1361+
iounmap(addr);
13621362
return 0;
1363+
}
1364+
1365+
/*
1366+
* Clear all reason bits so they won't be retained if the next reset
1367+
* does not update the register. Besides, some bits are never cleared by
1368+
* hardware so it's software's responsibility to clear them.
1369+
*
1370+
* Writing the value back effectively clears all reason bits as they are
1371+
* write-1-to-clear.
1372+
*/
1373+
iowrite32(value, addr);
1374+
iounmap(addr);
13631375

13641376
for (i = 0; i < ARRAY_SIZE(s5_reset_reason_txt); i++) {
13651377
if (!(value & BIT(i)))

arch/x86/kernel/cpu/resctrl/monitor.c

Lines changed: 10 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -242,7 +242,9 @@ int resctrl_arch_rmid_read(struct rdt_resource *r, struct rdt_mon_domain *d,
242242
u32 unused, u32 rmid, enum resctrl_event_id eventid,
243243
u64 *val, void *ignored)
244244
{
245+
struct rdt_hw_mon_domain *hw_dom = resctrl_to_arch_mon_dom(d);
245246
int cpu = cpumask_any(&d->hdr.cpu_mask);
247+
struct arch_mbm_state *am;
246248
u64 msr_val;
247249
u32 prmid;
248250
int ret;
@@ -251,12 +253,16 @@ int resctrl_arch_rmid_read(struct rdt_resource *r, struct rdt_mon_domain *d,
251253

252254
prmid = logical_rmid_to_physical_rmid(cpu, rmid);
253255
ret = __rmid_read_phys(prmid, eventid, &msr_val);
254-
if (ret)
255-
return ret;
256256

257-
*val = get_corrected_val(r, d, rmid, eventid, msr_val);
257+
if (!ret) {
258+
*val = get_corrected_val(r, d, rmid, eventid, msr_val);
259+
} else if (ret == -EINVAL) {
260+
am = get_arch_mbm_state(hw_dom, rmid, eventid);
261+
if (am)
262+
am->prev_msr = 0;
263+
}
258264

259-
return 0;
265+
return ret;
260266
}
261267

262268
static int __cntr_id_read(u32 cntr_id, u64 *val)

arch/x86/mm/pat/set_memory.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -446,7 +446,7 @@ static void cpa_flush(struct cpa_data *cpa, int cache)
446446
}
447447

448448
start = fix_addr(__cpa_addr(cpa, 0));
449-
end = fix_addr(__cpa_addr(cpa, cpa->numpages));
449+
end = start + cpa->numpages * PAGE_SIZE;
450450
if (cpa->force_flush_all)
451451
end = TLB_FLUSH_ALL;
452452

arch/x86/mm/tlb.c

Lines changed: 22 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -911,11 +911,31 @@ void switch_mm_irqs_off(struct mm_struct *unused, struct mm_struct *next,
911911
* CR3 and cpu_tlbstate.loaded_mm are not all in sync.
912912
*/
913913
this_cpu_write(cpu_tlbstate.loaded_mm, LOADED_MM_SWITCHING);
914-
barrier();
915914

916-
/* Start receiving IPIs and then read tlb_gen (and LAM below) */
915+
/*
916+
* Make sure this CPU is set in mm_cpumask() such that we'll
917+
* receive invalidation IPIs.
918+
*
919+
* Rely on the smp_mb() implied by cpumask_set_cpu()'s atomic
920+
* operation, or explicitly provide one. Such that:
921+
*
922+
* switch_mm_irqs_off() flush_tlb_mm_range()
923+
* smp_store_release(loaded_mm, SWITCHING); atomic64_inc_return(tlb_gen)
924+
* smp_mb(); // here // smp_mb() implied
925+
* atomic64_read(tlb_gen); this_cpu_read(loaded_mm);
926+
*
927+
* we properly order against flush_tlb_mm_range(), where the
928+
* loaded_mm load can happen in mative_flush_tlb_multi() ->
929+
* should_flush_tlb().
930+
*
931+
* This way switch_mm() must see the new tlb_gen or
932+
* flush_tlb_mm_range() must see the new loaded_mm, or both.
933+
*/
917934
if (next != &init_mm && !cpumask_test_cpu(cpu, mm_cpumask(next)))
918935
cpumask_set_cpu(cpu, mm_cpumask(next));
936+
else
937+
smp_mb();
938+
919939
next_tlb_gen = atomic64_read(&next->context.tlb_gen);
920940

921941
ns = choose_new_asid(next, next_tlb_gen);

0 commit comments

Comments
 (0)