sched: make the multiplication table more accurate
[usb.git] / include / asm-x86_64 / calling.h
blob6f4f63af96e1a661f215895e502155f402b40678
1 /*
2 * Some macros to handle stack frames in assembly.
3 */
6 #define R15 0
7 #define R14 8
8 #define R13 16
9 #define R12 24
10 #define RBP 32
11 #define RBX 40
12 /* arguments: interrupts/non tracing syscalls only save upto here*/
13 #define R11 48
14 #define R10 56
15 #define R9 64
16 #define R8 72
17 #define RAX 80
18 #define RCX 88
19 #define RDX 96
20 #define RSI 104
21 #define RDI 112
22 #define ORIG_RAX 120 /* + error_code */
23 /* end of arguments */
24 /* cpu exception frame or undefined in case of fast syscall. */
25 #define RIP 128
26 #define CS 136
27 #define EFLAGS 144
28 #define RSP 152
29 #define SS 160
30 #define ARGOFFSET R11
31 #define SWFRAME ORIG_RAX
33 .macro SAVE_ARGS addskip=0,norcx=0,nor891011=0
34 subq $9*8+\addskip,%rsp
35 CFI_ADJUST_CFA_OFFSET 9*8+\addskip
36 movq %rdi,8*8(%rsp)
37 CFI_REL_OFFSET rdi,8*8
38 movq %rsi,7*8(%rsp)
39 CFI_REL_OFFSET rsi,7*8
40 movq %rdx,6*8(%rsp)
41 CFI_REL_OFFSET rdx,6*8
42 .if \norcx
43 .else
44 movq %rcx,5*8(%rsp)
45 CFI_REL_OFFSET rcx,5*8
46 .endif
47 movq %rax,4*8(%rsp)
48 CFI_REL_OFFSET rax,4*8
49 .if \nor891011
50 .else
51 movq %r8,3*8(%rsp)
52 CFI_REL_OFFSET r8,3*8
53 movq %r9,2*8(%rsp)
54 CFI_REL_OFFSET r9,2*8
55 movq %r10,1*8(%rsp)
56 CFI_REL_OFFSET r10,1*8
57 movq %r11,(%rsp)
58 CFI_REL_OFFSET r11,0*8
59 .endif
60 .endm
62 #define ARG_SKIP 9*8
63 .macro RESTORE_ARGS skiprax=0,addskip=0,skiprcx=0,skipr11=0,skipr8910=0,skiprdx=0
64 .if \skipr11
65 .else
66 movq (%rsp),%r11
67 CFI_RESTORE r11
68 .endif
69 .if \skipr8910
70 .else
71 movq 1*8(%rsp),%r10
72 CFI_RESTORE r10
73 movq 2*8(%rsp),%r9
74 CFI_RESTORE r9
75 movq 3*8(%rsp),%r8
76 CFI_RESTORE r8
77 .endif
78 .if \skiprax
79 .else
80 movq 4*8(%rsp),%rax
81 CFI_RESTORE rax
82 .endif
83 .if \skiprcx
84 .else
85 movq 5*8(%rsp),%rcx
86 CFI_RESTORE rcx
87 .endif
88 .if \skiprdx
89 .else
90 movq 6*8(%rsp),%rdx
91 CFI_RESTORE rdx
92 .endif
93 movq 7*8(%rsp),%rsi
94 CFI_RESTORE rsi
95 movq 8*8(%rsp),%rdi
96 CFI_RESTORE rdi
97 .if ARG_SKIP+\addskip > 0
98 addq $ARG_SKIP+\addskip,%rsp
99 CFI_ADJUST_CFA_OFFSET -(ARG_SKIP+\addskip)
100 .endif
101 .endm
103 .macro LOAD_ARGS offset
104 movq \offset(%rsp),%r11
105 movq \offset+8(%rsp),%r10
106 movq \offset+16(%rsp),%r9
107 movq \offset+24(%rsp),%r8
108 movq \offset+40(%rsp),%rcx
109 movq \offset+48(%rsp),%rdx
110 movq \offset+56(%rsp),%rsi
111 movq \offset+64(%rsp),%rdi
112 movq \offset+72(%rsp),%rax
113 .endm
115 #define REST_SKIP 6*8
116 .macro SAVE_REST
117 subq $REST_SKIP,%rsp
118 CFI_ADJUST_CFA_OFFSET REST_SKIP
119 movq %rbx,5*8(%rsp)
120 CFI_REL_OFFSET rbx,5*8
121 movq %rbp,4*8(%rsp)
122 CFI_REL_OFFSET rbp,4*8
123 movq %r12,3*8(%rsp)
124 CFI_REL_OFFSET r12,3*8
125 movq %r13,2*8(%rsp)
126 CFI_REL_OFFSET r13,2*8
127 movq %r14,1*8(%rsp)
128 CFI_REL_OFFSET r14,1*8
129 movq %r15,(%rsp)
130 CFI_REL_OFFSET r15,0*8
131 .endm
133 .macro RESTORE_REST
134 movq (%rsp),%r15
135 CFI_RESTORE r15
136 movq 1*8(%rsp),%r14
137 CFI_RESTORE r14
138 movq 2*8(%rsp),%r13
139 CFI_RESTORE r13
140 movq 3*8(%rsp),%r12
141 CFI_RESTORE r12
142 movq 4*8(%rsp),%rbp
143 CFI_RESTORE rbp
144 movq 5*8(%rsp),%rbx
145 CFI_RESTORE rbx
146 addq $REST_SKIP,%rsp
147 CFI_ADJUST_CFA_OFFSET -(REST_SKIP)
148 .endm
150 .macro SAVE_ALL
151 SAVE_ARGS
152 SAVE_REST
153 .endm
155 .macro RESTORE_ALL addskip=0
156 RESTORE_REST
157 RESTORE_ARGS 0,\addskip
158 .endm
160 .macro icebp
161 .byte 0xf1
162 .endm