Skip to content

Commit 2609708

Browse files
vittyvkbonzini
authored andcommitted
KVM: x86: hyper-v: Handle HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST{,EX} calls gently
Currently, HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST{,EX} calls are handled the exact same way as HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE{,EX}: by flushing the whole VPID and this is sub-optimal. Switch to handling these requests with 'flush_tlb_gva()' hooks instead. Use the newly introduced TLB flush fifo to queue the requests. Reviewed-by: Sean Christopherson <[email protected]> Signed-off-by: Vitaly Kuznetsov <[email protected]> Signed-off-by: Paolo Bonzini <[email protected]> Message-Id: <[email protected]> Signed-off-by: Paolo Bonzini <[email protected]>
1 parent 56b5354 commit 2609708

File tree

1 file changed

+95
-16
lines changed

1 file changed

+95
-16
lines changed

arch/x86/kvm/hyperv.c

Lines changed: 95 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -1800,7 +1800,14 @@ static u64 kvm_get_sparse_vp_set(struct kvm *kvm, struct kvm_hv_hcall *hc,
18001800
sparse_banks, consumed_xmm_halves, offset);
18011801
}
18021802

1803-
static void hv_tlb_flush_enqueue(struct kvm_vcpu *vcpu)
1803+
static int kvm_hv_get_tlb_flush_entries(struct kvm *kvm, struct kvm_hv_hcall *hc, u64 entries[],
1804+
int consumed_xmm_halves, gpa_t offset)
1805+
{
1806+
return kvm_hv_get_hc_data(kvm, hc, hc->rep_cnt, hc->rep_cnt,
1807+
entries, consumed_xmm_halves, offset);
1808+
}
1809+
1810+
static void hv_tlb_flush_enqueue(struct kvm_vcpu *vcpu, u64 *entries, int count)
18041811
{
18051812
struct kvm_vcpu_hv_tlb_flush_fifo *tlb_flush_fifo;
18061813
struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
@@ -1811,24 +1818,64 @@ static void hv_tlb_flush_enqueue(struct kvm_vcpu *vcpu)
18111818

18121819
tlb_flush_fifo = &hv_vcpu->tlb_flush_fifo;
18131820

1814-
kfifo_in_spinlocked_noirqsave(&tlb_flush_fifo->entries, &flush_all_entry,
1815-
1, &tlb_flush_fifo->write_lock);
1821+
spin_lock(&tlb_flush_fifo->write_lock);
1822+
1823+
/*
1824+
* All entries should fit on the fifo leaving one free for 'flush all'
1825+
* entry in case another request comes in. In case there's not enough
1826+
* space, just put 'flush all' entry there.
1827+
*/
1828+
if (count && entries && count < kfifo_avail(&tlb_flush_fifo->entries)) {
1829+
WARN_ON(kfifo_in(&tlb_flush_fifo->entries, entries, count) != count);
1830+
goto out_unlock;
1831+
}
1832+
1833+
/*
1834+
* Note: full fifo always contains 'flush all' entry, no need to check the
1835+
* return value.
1836+
*/
1837+
kfifo_in(&tlb_flush_fifo->entries, &flush_all_entry, 1);
1838+
1839+
out_unlock:
1840+
spin_unlock(&tlb_flush_fifo->write_lock);
18161841
}
18171842

18181843
int kvm_hv_vcpu_flush_tlb(struct kvm_vcpu *vcpu)
18191844
{
18201845
struct kvm_vcpu_hv_tlb_flush_fifo *tlb_flush_fifo;
18211846
struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
1847+
u64 entries[KVM_HV_TLB_FLUSH_FIFO_SIZE];
1848+
int i, j, count;
1849+
gva_t gva;
18221850

1823-
if (!hv_vcpu)
1851+
if (!tdp_enabled || !hv_vcpu)
18241852
return -EINVAL;
18251853

18261854
tlb_flush_fifo = &hv_vcpu->tlb_flush_fifo;
18271855

1856+
count = kfifo_out(&tlb_flush_fifo->entries, entries, KVM_HV_TLB_FLUSH_FIFO_SIZE);
1857+
1858+
for (i = 0; i < count; i++) {
1859+
if (entries[i] == KVM_HV_TLB_FLUSHALL_ENTRY)
1860+
goto out_flush_all;
1861+
1862+
/*
1863+
* Lower 12 bits of 'address' encode the number of additional
1864+
* pages to flush.
1865+
*/
1866+
gva = entries[i] & PAGE_MASK;
1867+
for (j = 0; j < (entries[i] & ~PAGE_MASK) + 1; j++)
1868+
static_call(kvm_x86_flush_tlb_gva)(vcpu, gva + j * PAGE_SIZE);
1869+
1870+
++vcpu->stat.tlb_flush;
1871+
}
1872+
return 0;
1873+
1874+
out_flush_all:
18281875
kfifo_reset_out(&tlb_flush_fifo->entries);
18291876

1830-
/* Precise flushing isn't implemented yet. */
1831-
return -EOPNOTSUPP;
1877+
/* Fall back to full flush. */
1878+
return -ENOSPC;
18321879
}
18331880

18341881
static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc)
@@ -1837,11 +1884,21 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc)
18371884
struct hv_tlb_flush_ex flush_ex;
18381885
struct hv_tlb_flush flush;
18391886
DECLARE_BITMAP(vcpu_mask, KVM_MAX_VCPUS);
1887+
/*
1888+
* Normally, there can be no more than 'KVM_HV_TLB_FLUSH_FIFO_SIZE'
1889+
* entries on the TLB flush fifo. The last entry, however, needs to be
1890+
* always left free for 'flush all' entry which gets placed when
1891+
* there is not enough space to put all the requested entries.
1892+
*/
1893+
u64 __tlb_flush_entries[KVM_HV_TLB_FLUSH_FIFO_SIZE - 1];
1894+
u64 *tlb_flush_entries;
18401895
u64 valid_bank_mask;
18411896
u64 sparse_banks[KVM_HV_MAX_SPARSE_VCPU_SET_BITS];
18421897
struct kvm_vcpu *v;
18431898
unsigned long i;
18441899
bool all_cpus;
1900+
int consumed_xmm_halves = 0;
1901+
gpa_t data_offset;
18451902

18461903
/*
18471904
* The Hyper-V TLFS doesn't allow more than 64 sparse banks, e.g. the
@@ -1857,10 +1914,12 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc)
18571914
flush.address_space = hc->ingpa;
18581915
flush.flags = hc->outgpa;
18591916
flush.processor_mask = sse128_lo(hc->xmm[0]);
1917+
consumed_xmm_halves = 1;
18601918
} else {
18611919
if (unlikely(kvm_read_guest(kvm, hc->ingpa,
18621920
&flush, sizeof(flush))))
18631921
return HV_STATUS_INVALID_HYPERCALL_INPUT;
1922+
data_offset = sizeof(flush);
18641923
}
18651924

18661925
trace_kvm_hv_flush_tlb(flush.processor_mask,
@@ -1884,10 +1943,12 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc)
18841943
flush_ex.flags = hc->outgpa;
18851944
memcpy(&flush_ex.hv_vp_set,
18861945
&hc->xmm[0], sizeof(hc->xmm[0]));
1946+
consumed_xmm_halves = 2;
18871947
} else {
18881948
if (unlikely(kvm_read_guest(kvm, hc->ingpa, &flush_ex,
18891949
sizeof(flush_ex))))
18901950
return HV_STATUS_INVALID_HYPERCALL_INPUT;
1951+
data_offset = sizeof(flush_ex);
18911952
}
18921953

18931954
trace_kvm_hv_flush_tlb_ex(flush_ex.hv_vp_set.valid_bank_mask,
@@ -1902,26 +1963,44 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc)
19021963
if (hc->var_cnt != hweight64(valid_bank_mask))
19031964
return HV_STATUS_INVALID_HYPERCALL_INPUT;
19041965

1905-
if (all_cpus)
1906-
goto do_flush;
1966+
if (!all_cpus) {
1967+
if (!hc->var_cnt)
1968+
goto ret_success;
19071969

1908-
if (!hc->var_cnt)
1909-
goto ret_success;
1970+
if (kvm_get_sparse_vp_set(kvm, hc, sparse_banks,
1971+
consumed_xmm_halves, data_offset))
1972+
return HV_STATUS_INVALID_HYPERCALL_INPUT;
1973+
}
1974+
1975+
/*
1976+
* Hyper-V TLFS doesn't explicitly forbid non-empty sparse vCPU
1977+
* banks (and, thus, non-zero 'var_cnt') for the 'all vCPUs'
1978+
* case (HV_GENERIC_SET_ALL). Always adjust data_offset and
1979+
* consumed_xmm_halves to make sure TLB flush entries are read
1980+
* from the correct offset.
1981+
*/
1982+
data_offset += hc->var_cnt * sizeof(sparse_banks[0]);
1983+
consumed_xmm_halves += hc->var_cnt;
1984+
}
19101985

1911-
if (kvm_get_sparse_vp_set(kvm, hc, sparse_banks, 2,
1912-
offsetof(struct hv_tlb_flush_ex,
1913-
hv_vp_set.bank_contents)))
1986+
if (hc->code == HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE ||
1987+
hc->code == HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX ||
1988+
hc->rep_cnt > ARRAY_SIZE(__tlb_flush_entries)) {
1989+
tlb_flush_entries = NULL;
1990+
} else {
1991+
if (kvm_hv_get_tlb_flush_entries(kvm, hc, __tlb_flush_entries,
1992+
consumed_xmm_halves, data_offset))
19141993
return HV_STATUS_INVALID_HYPERCALL_INPUT;
1994+
tlb_flush_entries = __tlb_flush_entries;
19151995
}
19161996

1917-
do_flush:
19181997
/*
19191998
* vcpu->arch.cr3 may not be up-to-date for running vCPUs so we can't
19201999
* analyze it here, flush TLB regardless of the specified address space.
19212000
*/
19222001
if (all_cpus) {
19232002
kvm_for_each_vcpu(i, v, kvm)
1924-
hv_tlb_flush_enqueue(v);
2003+
hv_tlb_flush_enqueue(v, tlb_flush_entries, hc->rep_cnt);
19252004

19262005
kvm_make_all_cpus_request(kvm, KVM_REQ_HV_TLB_FLUSH);
19272006
} else {
@@ -1931,7 +2010,7 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc)
19312010
v = kvm_get_vcpu(kvm, i);
19322011
if (!v)
19332012
continue;
1934-
hv_tlb_flush_enqueue(v);
2013+
hv_tlb_flush_enqueue(v, tlb_flush_entries, hc->rep_cnt);
19352014
}
19362015

19372016
kvm_make_vcpus_request_mask(kvm, KVM_REQ_HV_TLB_FLUSH, vcpu_mask);

0 commit comments

Comments
 (0)