1818#include <asm/mmu_context.h>
1919#include <asm/sclp.h>
2020#include <asm/nmi.h>
21+ #include <asm/dis.h>
2122#include "kvm-s390.h"
2223#include "gaccess.h"
2324
@@ -27,7 +28,8 @@ struct vsie_page {
2728 struct kvm_s390_sie_block * scb_o ; /* 0x0200 */
2829 /* the shadow gmap in use by the vsie_page */
2930 struct gmap * gmap ; /* 0x0208 */
30- __u8 reserved [0x1000 - 0x0210 ]; /* 0x0210 */
31+ __u8 reserved [0x0800 - 0x0210 ]; /* 0x0210 */
32+ __u8 fac [S390_ARCH_FAC_LIST_SIZE_BYTE ]; /* 0x0800 */
3133} __packed ;
3234
3335/* trigger a validity icpt for the given scb */
@@ -194,6 +196,7 @@ static int shadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
194196 scb_s -> ecb2 = 0 ;
195197 scb_s -> ecb3 = 0 ;
196198 scb_s -> ecd = 0 ;
199+ scb_s -> fac = 0 ;
197200
198201 rc = prepare_cpuflags (vcpu , vsie_page );
199202 if (rc )
@@ -521,6 +524,44 @@ static inline void clear_vsie_icpt(struct vsie_page *vsie_page)
521524 vsie_page -> scb_s .icptcode = 0 ;
522525}
523526
527+ /* rewind the psw and clear the vsie icpt, so we can retry execution */
528+ static void retry_vsie_icpt (struct vsie_page * vsie_page )
529+ {
530+ struct kvm_s390_sie_block * scb_s = & vsie_page -> scb_s ;
531+ int ilen = insn_length (scb_s -> ipa >> 8 );
532+
533+ /* take care of EXECUTE instructions */
534+ if (scb_s -> icptstatus & 1 ) {
535+ ilen = (scb_s -> icptstatus >> 4 ) & 0x6 ;
536+ if (!ilen )
537+ ilen = 4 ;
538+ }
539+ scb_s -> gpsw .addr = __rewind_psw (scb_s -> gpsw , ilen );
540+ clear_vsie_icpt (vsie_page );
541+ }
542+
543+ /*
544+ * Try to shadow + enable the guest 2 provided facility list.
545+ * Retry instruction execution if enabled for and provided by guest 2.
546+ *
547+ * Returns: - 0 if handled (retry or guest 2 icpt)
548+ * - > 0 if control has to be given to guest 2
549+ */
550+ static int handle_stfle (struct kvm_vcpu * vcpu , struct vsie_page * vsie_page )
551+ {
552+ struct kvm_s390_sie_block * scb_s = & vsie_page -> scb_s ;
553+ __u32 fac = vsie_page -> scb_o -> fac & 0x7ffffff8U ;
554+
555+ if (fac && test_kvm_facility (vcpu -> kvm , 7 )) {
556+ retry_vsie_icpt (vsie_page );
557+ if (read_guest_real (vcpu , fac , & vsie_page -> fac ,
558+ sizeof (vsie_page -> fac )))
559+ return set_validity_icpt (scb_s , 0x1090U );
560+ scb_s -> fac = (__u32 )(__u64 ) & vsie_page -> fac ;
561+ }
562+ return 0 ;
563+ }
564+
524565/*
525566 * Run the vsie on a shadow scb and a shadow gmap, without any further
526567 * sanity checks, handling SIE faults.
@@ -558,6 +599,10 @@ static int do_vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
558599 return handle_fault (vcpu , vsie_page );
559600
560601 switch (scb_s -> icptcode ) {
602+ case ICPT_INST :
603+ if (scb_s -> ipa == 0xb2b0 )
604+ rc = handle_stfle (vcpu , vsie_page );
605+ break ;
561606 case ICPT_STOP :
562607 /* stop not requested by g2 - must have been a kick */
563608 if (!(atomic_read (& scb_o -> cpuflags ) & CPUSTAT_STOP_INT ))
@@ -690,7 +735,7 @@ static struct vsie_page *get_vsie_page(struct kvm *kvm, unsigned long addr)
690735
691736 mutex_lock (& kvm -> arch .vsie .mutex );
692737 if (kvm -> arch .vsie .page_count < nr_vcpus ) {
693- page = alloc_page (GFP_KERNEL | __GFP_ZERO );
738+ page = alloc_page (GFP_KERNEL | __GFP_ZERO | GFP_DMA );
694739 if (!page ) {
695740 mutex_unlock (& kvm -> arch .vsie .mutex );
696741 return ERR_PTR (- ENOMEM );
0 commit comments