37c9b25886
commit afd30525a659ac0ae0904f0cb4a2ca75522c3123 upstream. USERGS_SYSRET64 is used to return from a syscall via SYSRET, but a Xen PV guest will nevertheless use the IRET hypercall, as there is no sysret PV hypercall defined. So instead of testing all the prerequisites for doing a sysret and then mangling the stack for Xen PV again for doing an iret just use the iret exit from the beginning. This can easily be done via an ALTERNATIVE like it is done for the sysenter compat case already. It should be noted that this drops the optimization in Xen for not restoring a few registers when returning to user mode, but it seems as if the saved instructions in the kernel more than compensate for this drop (a kernel build in a Xen PV guest was slightly faster with this patch applied). While at it remove the stale sysret32 remnants. [ pawan: Brad Spengler and Salvatore Bonaccorso <carnil@debian.org> reported a problem with the 5.10 backport commit edc702b4a820 ("x86/entry_64: Add VERW just before userspace transition"). When CONFIG_PARAVIRT_XXL=y, CLEAR_CPU_BUFFERS is not executed in syscall_return_via_sysret path as USERGS_SYSRET64 is runtime patched to: .cpu_usergs_sysret64 = { 0x0f, 0x01, 0xf8, 0x48, 0x0f, 0x07 }, // swapgs; sysretq which is missing CLEAR_CPU_BUFFERS. It turns out dropping USERGS_SYSRET64 simplifies the code, allowing CLEAR_CPU_BUFFERS to be explicitly added to syscall_return_via_sysret path. Below is with CONFIG_PARAVIRT_XXL=y and this patch applied: syscall_return_via_sysret: ... <+342>: swapgs <+345>: xchg %ax,%ax <+347>: verw -0x1a2(%rip) <------ <+354>: sysretq ] Signed-off-by: Juergen Gross <jgross@suse.com> Signed-off-by: Borislav Petkov <bp@suse.de> Signed-off-by: Pawan Gupta <pawan.kumar.gupta@linux.intel.com> Link: https://lkml.kernel.org/r/20210120135555.32594-6-jgross@suse.com Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
326 lines
8.1 KiB
ArmAsm
Executable file
326 lines
8.1 KiB
ArmAsm
Executable file
/* SPDX-License-Identifier: GPL-2.0 */
|
|
/*
|
|
* Asm versions of Xen pv-ops, suitable for direct use.
|
|
*
|
|
* We only bother with direct forms (ie, vcpu in percpu data) of the
|
|
* operations here; the indirect forms are better handled in C.
|
|
*/
|
|
|
|
#include <asm/errno.h>
|
|
#include <asm/asm-offsets.h>
|
|
#include <asm/percpu.h>
|
|
#include <asm/processor-flags.h>
|
|
#include <asm/segment.h>
|
|
#include <asm/thread_info.h>
|
|
#include <asm/asm.h>
|
|
#include <asm/frame.h>
|
|
#include <asm/unwind_hints.h>
|
|
|
|
#include <xen/interface/xen.h>
|
|
|
|
#include <linux/init.h>
|
|
#include <linux/linkage.h>
|
|
#include <../entry/calling.h>
|
|
|
|
/*
|
|
* Enable events. This clears the event mask and tests the pending
|
|
* event status with one and operation. If there are pending events,
|
|
* then enter the hypervisor to get them handled.
|
|
*/
|
|
SYM_FUNC_START(xen_irq_enable_direct)
|
|
FRAME_BEGIN
|
|
/* Unmask events */
|
|
movb $0, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
|
|
|
|
/*
|
|
* Preempt here doesn't matter because that will deal with any
|
|
* pending interrupts. The pending check may end up being run
|
|
* on the wrong CPU, but that doesn't hurt.
|
|
*/
|
|
|
|
/* Test for pending */
|
|
testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending
|
|
jz 1f
|
|
|
|
call check_events
|
|
1:
|
|
FRAME_END
|
|
RET
|
|
SYM_FUNC_END(xen_irq_enable_direct)
|
|
|
|
|
|
/*
|
|
* Disabling events is simply a matter of making the event mask
|
|
* non-zero.
|
|
*/
|
|
SYM_FUNC_START(xen_irq_disable_direct)
|
|
movb $1, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
|
|
RET
|
|
SYM_FUNC_END(xen_irq_disable_direct)
|
|
|
|
/*
|
|
* (xen_)save_fl is used to get the current interrupt enable status.
|
|
* Callers expect the status to be in X86_EFLAGS_IF, and other bits
|
|
* may be set in the return value. We take advantage of this by
|
|
* making sure that X86_EFLAGS_IF has the right value (and other bits
|
|
* in that byte are 0), but other bits in the return value are
|
|
* undefined. We need to toggle the state of the bit, because Xen and
|
|
* x86 use opposite senses (mask vs enable).
|
|
*/
|
|
SYM_FUNC_START(xen_save_fl_direct)
|
|
testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
|
|
setz %ah
|
|
addb %ah, %ah
|
|
RET
|
|
SYM_FUNC_END(xen_save_fl_direct)
|
|
|
|
|
|
/*
|
|
* In principle the caller should be passing us a value return from
|
|
* xen_save_fl_direct, but for robustness sake we test only the
|
|
* X86_EFLAGS_IF flag rather than the whole byte. After setting the
|
|
* interrupt mask state, it checks for unmasked pending events and
|
|
* enters the hypervisor to get them delivered if so.
|
|
*/
|
|
SYM_FUNC_START(xen_restore_fl_direct)
|
|
FRAME_BEGIN
|
|
testw $X86_EFLAGS_IF, %di
|
|
setz PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
|
|
/*
|
|
* Preempt here doesn't matter because that will deal with any
|
|
* pending interrupts. The pending check may end up being run
|
|
* on the wrong CPU, but that doesn't hurt.
|
|
*/
|
|
|
|
/* check for unmasked and pending */
|
|
cmpw $0x0001, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending
|
|
jnz 1f
|
|
call check_events
|
|
1:
|
|
FRAME_END
|
|
RET
|
|
SYM_FUNC_END(xen_restore_fl_direct)
|
|
|
|
|
|
/*
|
|
* Force an event check by making a hypercall, but preserve regs
|
|
* before making the call.
|
|
*/
|
|
SYM_FUNC_START(check_events)
|
|
FRAME_BEGIN
|
|
push %rax
|
|
push %rcx
|
|
push %rdx
|
|
push %rsi
|
|
push %rdi
|
|
push %r8
|
|
push %r9
|
|
push %r10
|
|
push %r11
|
|
call xen_force_evtchn_callback
|
|
pop %r11
|
|
pop %r10
|
|
pop %r9
|
|
pop %r8
|
|
pop %rdi
|
|
pop %rsi
|
|
pop %rdx
|
|
pop %rcx
|
|
pop %rax
|
|
FRAME_END
|
|
RET
|
|
SYM_FUNC_END(check_events)
|
|
|
|
SYM_FUNC_START(xen_read_cr2)
|
|
FRAME_BEGIN
|
|
_ASM_MOV PER_CPU_VAR(xen_vcpu), %_ASM_AX
|
|
_ASM_MOV XEN_vcpu_info_arch_cr2(%_ASM_AX), %_ASM_AX
|
|
FRAME_END
|
|
RET
|
|
SYM_FUNC_END(xen_read_cr2);
|
|
|
|
SYM_FUNC_START(xen_read_cr2_direct)
|
|
FRAME_BEGIN
|
|
_ASM_MOV PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_arch_cr2, %_ASM_AX
|
|
FRAME_END
|
|
RET
|
|
SYM_FUNC_END(xen_read_cr2_direct);
|
|
|
|
.macro xen_pv_trap name
|
|
SYM_CODE_START(xen_\name)
|
|
UNWIND_HINT_ENTRY
|
|
pop %rcx
|
|
pop %r11
|
|
jmp \name
|
|
SYM_CODE_END(xen_\name)
|
|
_ASM_NOKPROBE(xen_\name)
|
|
.endm
|
|
|
|
xen_pv_trap asm_exc_divide_error
|
|
xen_pv_trap asm_xenpv_exc_debug
|
|
xen_pv_trap asm_exc_int3
|
|
xen_pv_trap asm_xenpv_exc_nmi
|
|
xen_pv_trap asm_exc_overflow
|
|
xen_pv_trap asm_exc_bounds
|
|
xen_pv_trap asm_exc_invalid_op
|
|
xen_pv_trap asm_exc_device_not_available
|
|
xen_pv_trap asm_exc_double_fault
|
|
xen_pv_trap asm_exc_coproc_segment_overrun
|
|
xen_pv_trap asm_exc_invalid_tss
|
|
xen_pv_trap asm_exc_segment_not_present
|
|
xen_pv_trap asm_exc_stack_segment
|
|
xen_pv_trap asm_exc_general_protection
|
|
xen_pv_trap asm_exc_page_fault
|
|
xen_pv_trap asm_exc_spurious_interrupt_bug
|
|
xen_pv_trap asm_exc_coprocessor_error
|
|
xen_pv_trap asm_exc_alignment_check
|
|
#ifdef CONFIG_X86_MCE
|
|
xen_pv_trap asm_exc_machine_check
|
|
#endif /* CONFIG_X86_MCE */
|
|
xen_pv_trap asm_exc_simd_coprocessor_error
|
|
#ifdef CONFIG_IA32_EMULATION
|
|
xen_pv_trap entry_INT80_compat
|
|
#endif
|
|
xen_pv_trap asm_exc_xen_unknown_trap
|
|
xen_pv_trap asm_exc_xen_hypervisor_callback
|
|
|
|
__INIT
|
|
SYM_CODE_START(xen_early_idt_handler_array)
|
|
i = 0
|
|
.rept NUM_EXCEPTION_VECTORS
|
|
UNWIND_HINT_EMPTY
|
|
pop %rcx
|
|
pop %r11
|
|
jmp early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE
|
|
i = i + 1
|
|
.fill xen_early_idt_handler_array + i*XEN_EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc
|
|
.endr
|
|
SYM_CODE_END(xen_early_idt_handler_array)
|
|
__FINIT
|
|
|
|
hypercall_iret = hypercall_page + __HYPERVISOR_iret * 32
|
|
/*
|
|
* Xen64 iret frame:
|
|
*
|
|
* ss
|
|
* rsp
|
|
* rflags
|
|
* cs
|
|
* rip <-- standard iret frame
|
|
*
|
|
* flags
|
|
*
|
|
* rcx }
|
|
* r11 }<-- pushed by hypercall page
|
|
* rsp->rax }
|
|
*/
|
|
SYM_CODE_START(xen_iret)
|
|
UNWIND_HINT_EMPTY
|
|
pushq $0
|
|
jmp hypercall_iret
|
|
SYM_CODE_END(xen_iret)
|
|
|
|
/*
|
|
* XEN pv doesn't use trampoline stack, PER_CPU_VAR(cpu_tss_rw + TSS_sp0) is
|
|
* also the kernel stack. Reusing swapgs_restore_regs_and_return_to_usermode()
|
|
* in XEN pv would cause %rsp to move up to the top of the kernel stack and
|
|
* leave the IRET frame below %rsp, which is dangerous to be corrupted if #NMI
|
|
* interrupts. And swapgs_restore_regs_and_return_to_usermode() pushing the IRET
|
|
* frame at the same address is useless.
|
|
*/
|
|
SYM_CODE_START(xenpv_restore_regs_and_return_to_usermode)
|
|
UNWIND_HINT_REGS
|
|
POP_REGS
|
|
|
|
/* stackleak_erase() can work safely on the kernel stack. */
|
|
STACKLEAK_ERASE_NOCLOBBER
|
|
|
|
addq $8, %rsp /* skip regs->orig_ax */
|
|
jmp xen_iret
|
|
SYM_CODE_END(xenpv_restore_regs_and_return_to_usermode)
|
|
|
|
/*
|
|
* Xen handles syscall callbacks much like ordinary exceptions, which
|
|
* means we have:
|
|
* - kernel gs
|
|
* - kernel rsp
|
|
* - an iret-like stack frame on the stack (including rcx and r11):
|
|
* ss
|
|
* rsp
|
|
* rflags
|
|
* cs
|
|
* rip
|
|
* r11
|
|
* rsp->rcx
|
|
*/
|
|
|
|
/* Normal 64-bit system call target */
|
|
SYM_CODE_START(xen_entry_SYSCALL_64)
|
|
UNWIND_HINT_ENTRY
|
|
popq %rcx
|
|
popq %r11
|
|
|
|
/*
|
|
* Neither Xen nor the kernel really knows what the old SS and
|
|
* CS were. The kernel expects __USER_DS and __USER_CS, so
|
|
* report those values even though Xen will guess its own values.
|
|
*/
|
|
movq $__USER_DS, 4*8(%rsp)
|
|
movq $__USER_CS, 1*8(%rsp)
|
|
|
|
jmp entry_SYSCALL_64_after_hwframe
|
|
SYM_CODE_END(xen_entry_SYSCALL_64)
|
|
|
|
#ifdef CONFIG_IA32_EMULATION
|
|
|
|
/* 32-bit compat syscall target */
|
|
SYM_CODE_START(xen_entry_SYSCALL_compat)
|
|
UNWIND_HINT_ENTRY
|
|
popq %rcx
|
|
popq %r11
|
|
|
|
/*
|
|
* Neither Xen nor the kernel really knows what the old SS and
|
|
* CS were. The kernel expects __USER32_DS and __USER32_CS, so
|
|
* report those values even though Xen will guess its own values.
|
|
*/
|
|
movq $__USER32_DS, 4*8(%rsp)
|
|
movq $__USER32_CS, 1*8(%rsp)
|
|
|
|
jmp entry_SYSCALL_compat_after_hwframe
|
|
SYM_CODE_END(xen_entry_SYSCALL_compat)
|
|
|
|
/* 32-bit compat sysenter target */
|
|
SYM_CODE_START(xen_entry_SYSENTER_compat)
|
|
UNWIND_HINT_ENTRY
|
|
/*
|
|
* NB: Xen is polite and clears TF from EFLAGS for us. This means
|
|
* that we don't need to guard against single step exceptions here.
|
|
*/
|
|
popq %rcx
|
|
popq %r11
|
|
|
|
/*
|
|
* Neither Xen nor the kernel really knows what the old SS and
|
|
* CS were. The kernel expects __USER32_DS and __USER32_CS, so
|
|
* report those values even though Xen will guess its own values.
|
|
*/
|
|
movq $__USER32_DS, 4*8(%rsp)
|
|
movq $__USER32_CS, 1*8(%rsp)
|
|
|
|
jmp entry_SYSENTER_compat_after_hwframe
|
|
SYM_CODE_END(xen_entry_SYSENTER_compat)
|
|
|
|
#else /* !CONFIG_IA32_EMULATION */
|
|
|
|
SYM_CODE_START(xen_entry_SYSCALL_compat)
|
|
SYM_CODE_START(xen_entry_SYSENTER_compat)
|
|
UNWIND_HINT_ENTRY
|
|
lea 16(%rsp), %rsp /* strip %rcx, %r11 */
|
|
mov $-ENOSYS, %rax
|
|
pushq $0
|
|
jmp hypercall_iret
|
|
SYM_CODE_END(xen_entry_SYSENTER_compat)
|
|
SYM_CODE_END(xen_entry_SYSCALL_compat)
|
|
|
|
#endif /* CONFIG_IA32_EMULATION */
|