Skip to content

Commit ab234a2

Browse files
jgross1suryasaimadhu
authored andcommitted
x86/pv: Rework arch_local_irq_restore() to not use popf
POPF is a rather expensive operation, so don't use it for restoring irq flags. Instead, test whether interrupts are enabled in the flags parameter and enable interrupts via STI in that case. This results in the restore_fl paravirt op to be no longer needed. Suggested-by: Andy Lutomirski <[email protected]> Signed-off-by: Juergen Gross <[email protected]> Signed-off-by: Borislav Petkov <[email protected]> Link: https://lkml.kernel.org/r/[email protected]
1 parent afd3052 commit ab234a2

File tree

10 files changed

+8
-93
lines changed

10 files changed

+8
-93
lines changed

arch/x86/include/asm/irqflags.h

Lines changed: 6 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -35,15 +35,6 @@ extern __always_inline unsigned long native_save_fl(void)
3535
return flags;
3636
}
3737

38-
extern inline void native_restore_fl(unsigned long flags);
39-
extern inline void native_restore_fl(unsigned long flags)
40-
{
41-
asm volatile("push %0 ; popf"
42-
: /* no output */
43-
:"g" (flags)
44-
:"memory", "cc");
45-
}
46-
4738
static __always_inline void native_irq_disable(void)
4839
{
4940
asm volatile("cli": : :"memory");
@@ -79,11 +70,6 @@ static __always_inline unsigned long arch_local_save_flags(void)
7970
return native_save_fl();
8071
}
8172

82-
static __always_inline void arch_local_irq_restore(unsigned long flags)
83-
{
84-
native_restore_fl(flags);
85-
}
86-
8773
static __always_inline void arch_local_irq_disable(void)
8874
{
8975
native_irq_disable();
@@ -152,6 +138,12 @@ static __always_inline int arch_irqs_disabled(void)
152138

153139
return arch_irqs_disabled_flags(flags);
154140
}
141+
142+
static __always_inline void arch_local_irq_restore(unsigned long flags)
143+
{
144+
if (!arch_irqs_disabled_flags(flags))
145+
arch_local_irq_enable();
146+
}
155147
#else
156148
#ifdef CONFIG_X86_64
157149
#ifdef CONFIG_XEN_PV

arch/x86/include/asm/paravirt.h

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -648,11 +648,6 @@ static inline notrace unsigned long arch_local_save_flags(void)
648648
return PVOP_CALLEE0(unsigned long, irq.save_fl);
649649
}
650650

651-
static inline notrace void arch_local_irq_restore(unsigned long f)
652-
{
653-
PVOP_VCALLEE1(irq.restore_fl, f);
654-
}
655-
656651
static inline notrace void arch_local_irq_disable(void)
657652
{
658653
PVOP_VCALLEE0(irq.irq_disable);

arch/x86/include/asm/paravirt_types.h

Lines changed: 2 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -168,16 +168,13 @@ struct pv_cpu_ops {
168168
struct pv_irq_ops {
169169
#ifdef CONFIG_PARAVIRT_XXL
170170
/*
171-
* Get/set interrupt state. save_fl and restore_fl are only
172-
* expected to use X86_EFLAGS_IF; all other bits
173-
* returned from save_fl are undefined, and may be ignored by
174-
* restore_fl.
171+
* Get/set interrupt state. save_fl is expected to use X86_EFLAGS_IF;
172+
* all other bits returned from save_fl are undefined.
175173
*
176174
* NOTE: These functions callers expect the callee to preserve
177175
* more registers than the standard C calling convention.
178176
*/
179177
struct paravirt_callee_save save_fl;
180-
struct paravirt_callee_save restore_fl;
181178
struct paravirt_callee_save irq_disable;
182179
struct paravirt_callee_save irq_enable;
183180

arch/x86/kernel/irqflags.S

Lines changed: 0 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -13,14 +13,3 @@ SYM_FUNC_START(native_save_fl)
1313
ret
1414
SYM_FUNC_END(native_save_fl)
1515
EXPORT_SYMBOL(native_save_fl)
16-
17-
/*
18-
* void native_restore_fl(unsigned long flags)
19-
* %eax/%rdi: flags
20-
*/
21-
SYM_FUNC_START(native_restore_fl)
22-
push %_ASM_ARG1
23-
popf
24-
ret
25-
SYM_FUNC_END(native_restore_fl)
26-
EXPORT_SYMBOL(native_restore_fl)

arch/x86/kernel/paravirt.c

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -320,7 +320,6 @@ struct paravirt_patch_template pv_ops = {
320320

321321
/* Irq ops. */
322322
.irq.save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
323-
.irq.restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
324323
.irq.irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
325324
.irq.irq_enable = __PV_IS_CALLEE_SAVE(native_irq_enable),
326325
.irq.safe_halt = native_safe_halt,

arch/x86/kernel/paravirt_patch.c

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,6 @@ struct patch_xxl {
2525
const unsigned char mmu_read_cr2[3];
2626
const unsigned char mmu_read_cr3[3];
2727
const unsigned char mmu_write_cr3[3];
28-
const unsigned char irq_restore_fl[2];
2928
const unsigned char cpu_wbinvd[2];
3029
const unsigned char mov64[3];
3130
};
@@ -37,7 +36,6 @@ static const struct patch_xxl patch_data_xxl = {
3736
.mmu_read_cr2 = { 0x0f, 0x20, 0xd0 }, // mov %cr2, %[re]ax
3837
.mmu_read_cr3 = { 0x0f, 0x20, 0xd8 }, // mov %cr3, %[re]ax
3938
.mmu_write_cr3 = { 0x0f, 0x22, 0xdf }, // mov %rdi, %cr3
40-
.irq_restore_fl = { 0x57, 0x9d }, // push %rdi; popfq
4139
.cpu_wbinvd = { 0x0f, 0x09 }, // wbinvd
4240
.mov64 = { 0x48, 0x89, 0xf8 }, // mov %rdi, %rax
4341
};
@@ -71,7 +69,6 @@ unsigned int native_patch(u8 type, void *insn_buff, unsigned long addr,
7169
switch (type) {
7270

7371
#ifdef CONFIG_PARAVIRT_XXL
74-
PATCH_CASE(irq, restore_fl, xxl, insn_buff, len);
7572
PATCH_CASE(irq, save_fl, xxl, insn_buff, len);
7673
PATCH_CASE(irq, irq_enable, xxl, insn_buff, len);
7774
PATCH_CASE(irq, irq_disable, xxl, insn_buff, len);

arch/x86/xen/enlighten_pv.c

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1035,8 +1035,6 @@ void __init xen_setup_vcpu_info_placement(void)
10351035
*/
10361036
if (xen_have_vcpu_info_placement) {
10371037
pv_ops.irq.save_fl = __PV_IS_CALLEE_SAVE(xen_save_fl_direct);
1038-
pv_ops.irq.restore_fl =
1039-
__PV_IS_CALLEE_SAVE(xen_restore_fl_direct);
10401038
pv_ops.irq.irq_disable =
10411039
__PV_IS_CALLEE_SAVE(xen_irq_disable_direct);
10421040
pv_ops.irq.irq_enable =

arch/x86/xen/irq.c

Lines changed: 0 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -42,28 +42,6 @@ asmlinkage __visible unsigned long xen_save_fl(void)
4242
}
4343
PV_CALLEE_SAVE_REGS_THUNK(xen_save_fl);
4444

45-
__visible void xen_restore_fl(unsigned long flags)
46-
{
47-
struct vcpu_info *vcpu;
48-
49-
/* convert from IF type flag */
50-
flags = !(flags & X86_EFLAGS_IF);
51-
52-
/* See xen_irq_enable() for why preemption must be disabled. */
53-
preempt_disable();
54-
vcpu = this_cpu_read(xen_vcpu);
55-
vcpu->evtchn_upcall_mask = flags;
56-
57-
if (flags == 0) {
58-
barrier(); /* unmask then check (avoid races) */
59-
if (unlikely(vcpu->evtchn_upcall_pending))
60-
xen_force_evtchn_callback();
61-
preempt_enable();
62-
} else
63-
preempt_enable_no_resched();
64-
}
65-
PV_CALLEE_SAVE_REGS_THUNK(xen_restore_fl);
66-
6745
asmlinkage __visible void xen_irq_disable(void)
6846
{
6947
/* There's a one instruction preempt window here. We need to
@@ -118,7 +96,6 @@ static void xen_halt(void)
11896

11997
static const struct pv_irq_ops xen_irq_ops __initconst = {
12098
.save_fl = PV_CALLEE_SAVE(xen_save_fl),
121-
.restore_fl = PV_CALLEE_SAVE(xen_restore_fl),
12299
.irq_disable = PV_CALLEE_SAVE(xen_irq_disable),
123100
.irq_enable = PV_CALLEE_SAVE(xen_irq_enable),
124101

arch/x86/xen/xen-asm.S

Lines changed: 0 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -72,34 +72,6 @@ SYM_FUNC_START(xen_save_fl_direct)
7272
ret
7373
SYM_FUNC_END(xen_save_fl_direct)
7474

75-
76-
/*
77-
* In principle the caller should be passing us a value return from
78-
* xen_save_fl_direct, but for robustness sake we test only the
79-
* X86_EFLAGS_IF flag rather than the whole byte. After setting the
80-
* interrupt mask state, it checks for unmasked pending events and
81-
* enters the hypervisor to get them delivered if so.
82-
*/
83-
SYM_FUNC_START(xen_restore_fl_direct)
84-
FRAME_BEGIN
85-
testw $X86_EFLAGS_IF, %di
86-
setz PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
87-
/*
88-
* Preempt here doesn't matter because that will deal with any
89-
* pending interrupts. The pending check may end up being run
90-
* on the wrong CPU, but that doesn't hurt.
91-
*/
92-
93-
/* check for unmasked and pending */
94-
cmpw $0x0001, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending
95-
jnz 1f
96-
call check_events
97-
1:
98-
FRAME_END
99-
ret
100-
SYM_FUNC_END(xen_restore_fl_direct)
101-
102-
10375
/*
10476
* Force an event check by making a hypercall, but preserve regs
10577
* before making the call.

arch/x86/xen/xen-ops.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -131,7 +131,6 @@ static inline void __init xen_efi_init(struct boot_params *boot_params)
131131
__visible void xen_irq_enable_direct(void);
132132
__visible void xen_irq_disable_direct(void);
133133
__visible unsigned long xen_save_fl_direct(void);
134-
__visible void xen_restore_fl_direct(unsigned long);
135134

136135
__visible unsigned long xen_read_cr2(void);
137136
__visible unsigned long xen_read_cr2_direct(void);

0 commit comments

Comments
 (0)