Skip to content

Commit 68f6051

Browse files
committed
Merge tag 'kvm-s390-next-6.18-1' of https://git.kernel.org/pub/scm/linux/kernel/git/kvms390/linux into HEAD
KVM: s390: A bugfix and a performance improvement * Improve interrupt cpu for wakeup, change the heuristic to decide wich vCPU to deliver a floating interrupt to. * Clear the pte when discarding a swapped page because of CMMA; this bug was introduced in 6.16 when refactoring gmap code.
2 parents a6ad541 + 5deafa2 commit 68f6051

File tree

5 files changed

+44
-35
lines changed

5 files changed

+44
-35
lines changed

arch/s390/include/asm/kvm_host.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -356,7 +356,7 @@ struct kvm_s390_float_interrupt {
356356
int counters[FIRQ_MAX_COUNT];
357357
struct kvm_s390_mchk_info mchk;
358358
struct kvm_s390_ext_info srv_signal;
359-
int next_rr_cpu;
359+
int last_sleep_cpu;
360360
struct mutex ais_lock;
361361
u8 simm;
362362
u8 nimm;

arch/s390/include/asm/pgtable.h

Lines changed: 22 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2055,4 +2055,26 @@ static inline unsigned long gmap_pgste_get_pgt_addr(unsigned long *pgt)
20552055
return res;
20562056
}
20572057

2058+
static inline pgste_t pgste_get_lock(pte_t *ptep)
2059+
{
2060+
unsigned long value = 0;
2061+
#ifdef CONFIG_PGSTE
2062+
unsigned long *ptr = (unsigned long *)(ptep + PTRS_PER_PTE);
2063+
2064+
do {
2065+
value = __atomic64_or_barrier(PGSTE_PCL_BIT, ptr);
2066+
} while (value & PGSTE_PCL_BIT);
2067+
value |= PGSTE_PCL_BIT;
2068+
#endif
2069+
return __pgste(value);
2070+
}
2071+
2072+
static inline void pgste_set_unlock(pte_t *ptep, pgste_t pgste)
2073+
{
2074+
#ifdef CONFIG_PGSTE
2075+
barrier();
2076+
WRITE_ONCE(*(unsigned long *)(ptep + PTRS_PER_PTE), pgste_val(pgste) & ~PGSTE_PCL_BIT);
2077+
#endif
2078+
}
2079+
20582080
#endif /* _S390_PAGE_H */

arch/s390/kvm/interrupt.c

Lines changed: 9 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -1323,6 +1323,7 @@ int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
13231323
VCPU_EVENT(vcpu, 4, "enabled wait: %llu ns", sltime);
13241324
no_timer:
13251325
kvm_vcpu_srcu_read_unlock(vcpu);
1326+
vcpu->kvm->arch.float_int.last_sleep_cpu = vcpu->vcpu_idx;
13261327
kvm_vcpu_halt(vcpu);
13271328
vcpu->valid_wakeup = false;
13281329
__unset_cpu_idle(vcpu);
@@ -1949,18 +1950,15 @@ static void __floating_irq_kick(struct kvm *kvm, u64 type)
19491950
if (!online_vcpus)
19501951
return;
19511952

1952-
/* find idle VCPUs first, then round robin */
1953-
sigcpu = find_first_bit(kvm->arch.idle_mask, online_vcpus);
1954-
if (sigcpu == online_vcpus) {
1955-
do {
1956-
sigcpu = kvm->arch.float_int.next_rr_cpu++;
1957-
kvm->arch.float_int.next_rr_cpu %= online_vcpus;
1958-
/* avoid endless loops if all vcpus are stopped */
1959-
if (nr_tries++ >= online_vcpus)
1960-
return;
1961-
} while (is_vcpu_stopped(kvm_get_vcpu(kvm, sigcpu)));
1953+
for (sigcpu = kvm->arch.float_int.last_sleep_cpu; ; sigcpu++) {
1954+
sigcpu %= online_vcpus;
1955+
dst_vcpu = kvm_get_vcpu(kvm, sigcpu);
1956+
if (!is_vcpu_stopped(dst_vcpu))
1957+
break;
1958+
/* avoid endless loops if all vcpus are stopped */
1959+
if (nr_tries++ >= online_vcpus)
1960+
return;
19621961
}
1963-
dst_vcpu = kvm_get_vcpu(kvm, sigcpu);
19641962

19651963
/* make the VCPU drop out of the SIE, or wake it up if sleeping */
19661964
switch (type) {

arch/s390/mm/gmap_helpers.c

Lines changed: 11 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,7 @@
1515
#include <linux/pagewalk.h>
1616
#include <linux/ksm.h>
1717
#include <asm/gmap_helpers.h>
18+
#include <asm/pgtable.h>
1819

1920
/**
2021
* ptep_zap_swap_entry() - discard a swap entry.
@@ -47,6 +48,7 @@ void gmap_helper_zap_one_page(struct mm_struct *mm, unsigned long vmaddr)
4748
{
4849
struct vm_area_struct *vma;
4950
spinlock_t *ptl;
51+
pgste_t pgste;
5052
pte_t *ptep;
5153

5254
mmap_assert_locked(mm);
@@ -60,8 +62,16 @@ void gmap_helper_zap_one_page(struct mm_struct *mm, unsigned long vmaddr)
6062
ptep = get_locked_pte(mm, vmaddr, &ptl);
6163
if (unlikely(!ptep))
6264
return;
63-
if (pte_swap(*ptep))
65+
if (pte_swap(*ptep)) {
66+
preempt_disable();
67+
pgste = pgste_get_lock(ptep);
68+
6469
ptep_zap_swap_entry(mm, pte_to_swp_entry(*ptep));
70+
pte_clear(mm, vmaddr, ptep);
71+
72+
pgste_set_unlock(ptep, pgste);
73+
preempt_enable();
74+
}
6575
pte_unmap_unlock(ptep, ptl);
6676
}
6777
EXPORT_SYMBOL_GPL(gmap_helper_zap_one_page);

arch/s390/mm/pgtable.c

Lines changed: 1 addition & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,7 @@
2424
#include <asm/tlbflush.h>
2525
#include <asm/mmu_context.h>
2626
#include <asm/page-states.h>
27+
#include <asm/pgtable.h>
2728
#include <asm/machine.h>
2829

2930
pgprot_t pgprot_writecombine(pgprot_t prot)
@@ -115,28 +116,6 @@ static inline pte_t ptep_flush_lazy(struct mm_struct *mm,
115116
return old;
116117
}
117118

118-
static inline pgste_t pgste_get_lock(pte_t *ptep)
119-
{
120-
unsigned long value = 0;
121-
#ifdef CONFIG_PGSTE
122-
unsigned long *ptr = (unsigned long *)(ptep + PTRS_PER_PTE);
123-
124-
do {
125-
value = __atomic64_or_barrier(PGSTE_PCL_BIT, ptr);
126-
} while (value & PGSTE_PCL_BIT);
127-
value |= PGSTE_PCL_BIT;
128-
#endif
129-
return __pgste(value);
130-
}
131-
132-
static inline void pgste_set_unlock(pte_t *ptep, pgste_t pgste)
133-
{
134-
#ifdef CONFIG_PGSTE
135-
barrier();
136-
WRITE_ONCE(*(unsigned long *)(ptep + PTRS_PER_PTE), pgste_val(pgste) & ~PGSTE_PCL_BIT);
137-
#endif
138-
}
139-
140119
static inline pgste_t pgste_get(pte_t *ptep)
141120
{
142121
unsigned long pgste = 0;

0 commit comments

Comments
 (0)