2 Asm versions of Xen pv-ops, suitable for either direct use or inlining.
3 The inline versions are the same as the direct-use versions, with the
4 pre- and post-amble chopped off.
6 This code is encoded for size rather than absolute efficiency,
7 with a view to being able to inline as much as possible.
9 We only bother with direct forms (ie, vcpu in pda) of the operations
10 here; the indirect forms are better handled in C, since they're
11 generally too large to inline anyway.
14 #include <linux/linkage.h>
16 #include <asm/asm-offsets.h>
17 #include <asm/processor-flags.h>
18 #include <asm/errno.h>
19 #include <asm/segment.h>
20 #include <asm/percpu.h>
22 #include <xen/interface/xen.h>
24 #define RELOC(x, v) .globl x##_reloc; x##_reloc=v
25 #define ENDPATCH(x) .globl x##_end; x##_end=.
27 /* Pseudo-flag used for virtual NMI, which we don't implement yet */
28 #define XEN_EFLAGS_NMI 0x80000000
32 FIXME: x86_64 now can support direct access to percpu variables
33 via a segment override. Update xen accordingly.
39 Enable events. This clears the event mask and tests the pending
40 event status with one and operation. If there are pending
41 events, then enter the hypervisor to get them handled.
43 ENTRY(xen_irq_enable_direct)
47 movb $0, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
49 /* Preempt here doesn't matter because that will deal with
50 any pending interrupts. The pending check may end up being
51 run on the wrong CPU, but that doesn't hurt. */
53 /* Test for pending */
54 testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending
59 ENDPATCH(xen_irq_enable_direct)
61 ENDPROC(xen_irq_enable_direct)
62 RELOC(xen_irq_enable_direct, 2b+1)
65 Disabling events is simply a matter of making the event mask
68 ENTRY(xen_irq_disable_direct)
71 movb $1, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
72 ENDPATCH(xen_irq_disable_direct)
74 ENDPROC(xen_irq_disable_direct)
75 RELOC(xen_irq_disable_direct, 0)
78 (xen_)save_fl is used to get the current interrupt enable status.
79 Callers expect the status to be in X86_EFLAGS_IF, and other bits
80 may be set in the return value. We take advantage of this by
81 making sure that X86_EFLAGS_IF has the right value (and other bits
82 in that byte are 0), but other bits in the return value are
83 undefined. We need to toggle the state of the bit, because
84 Xen and x86 use opposite senses (mask vs enable).
86 ENTRY(xen_save_fl_direct)
89 testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
92 ENDPATCH(xen_save_fl_direct)
94 ENDPROC(xen_save_fl_direct)
95 RELOC(xen_save_fl_direct, 0)
98 In principle the caller should be passing us a value return
99 from xen_save_fl_direct, but for robustness sake we test only
100 the X86_EFLAGS_IF flag rather than the whole byte. After
101 setting the interrupt mask state, it checks for unmasked
102 pending events and enters the hypervisor to get them delivered
105 ENTRY(xen_restore_fl_direct)
108 testb $X86_EFLAGS_IF>>8, %ah
109 setz PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
110 /* Preempt here doesn't matter because that will deal with
111 any pending interrupts. The pending check may end up being
112 run on the wrong CPU, but that doesn't hurt. */
114 /* check for unmasked and pending */
115 cmpw $0x0001, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending
119 ENDPATCH(xen_restore_fl_direct)
121 ENDPROC(xen_restore_fl_direct)
122 RELOC(xen_restore_fl_direct, 2b+1)
126 Force an event check by making a hypercall,
127 but preserve regs before making the call.
139 call xen_force_evtchn_callback
151 ENTRY(xen_adjust_exception_frame)
156 hypercall_iret = hypercall_page + __HYPERVISOR_iret * 32
164 rip <-- standard iret frame
169 r11 }<-- pushed by hypercall page
174 1: jmp hypercall_iret
176 RELOC(xen_iret, 1b+1)
179 sysexit is not used for 64-bit processes, so it's
180 only ever used to return to 32-bit compat userspace.
190 1: jmp hypercall_iret
191 ENDPATCH(xen_sysexit)
192 RELOC(xen_sysexit, 1b+1)
195 /* We're already on the usermode stack at this point, but still
196 with the kernel gs, so we can easily switch back */
197 movq %rsp, PER_CPU_VAR(old_rsp)
198 movq PER_CPU_VAR(kernel_stack),%rsp
201 pushq PER_CPU_VAR(old_rsp)
206 pushq $VGCF_in_syscall
207 1: jmp hypercall_iret
208 ENDPATCH(xen_sysret64)
209 RELOC(xen_sysret64, 1b+1)
212 /* We're already on the usermode stack at this point, but still
213 with the kernel gs, so we can easily switch back */
214 movq %rsp, PER_CPU_VAR(old_rsp)
215 movq PER_CPU_VAR(kernel_stack), %rsp
218 pushq PER_CPU_VAR(old_rsp)
223 pushq $VGCF_in_syscall
224 1: jmp hypercall_iret
225 ENDPATCH(xen_sysret32)
226 RELOC(xen_sysret32, 1b+1)
229 Xen handles syscall callbacks much like ordinary exceptions,
233 - an iret-like stack frame on the stack (including rcx and r11):
242 In all the entrypoints, we undo all that to make it look
243 like a CPU-generated syscall/sysenter and jump to the normal
247 .macro undo_xen_syscall
253 /* Normal 64-bit system call target */
254 ENTRY(xen_syscall_target)
256 jmp system_call_after_swapgs
257 ENDPROC(xen_syscall_target)
259 #ifdef CONFIG_IA32_EMULATION
261 /* 32-bit compat syscall target */
262 ENTRY(xen_syscall32_target)
264 jmp ia32_cstar_target
265 ENDPROC(xen_syscall32_target)
267 /* 32-bit compat sysenter target */
268 ENTRY(xen_sysenter_target)
270 jmp ia32_sysenter_target
271 ENDPROC(xen_sysenter_target)
273 #else /* !CONFIG_IA32_EMULATION */
275 ENTRY(xen_syscall32_target)
276 ENTRY(xen_sysenter_target)
277 lea 16(%rsp), %rsp /* strip %rcx,%r11 */
279 pushq $VGCF_in_syscall
281 ENDPROC(xen_syscall32_target)
282 ENDPROC(xen_sysenter_target)
284 #endif /* CONFIG_IA32_EMULATION */