4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "exec/helper-proto.h"
25 #if defined(CONFIG_USER_ONLY)
27 void do_smm_enter(X86CPU
*cpu
)
31 void helper_rsm(CPUX86State
*env
)
38 #define SMM_REVISION_ID 0x00020064
40 #define SMM_REVISION_ID 0x00020000
43 void cpu_smm_update(X86CPU
*cpu
)
45 CPUX86State
*env
= &cpu
->env
;
46 bool smm_enabled
= (env
->hflags
& HF_SMM_MASK
);
49 memory_region_set_enabled(cpu
->smram
, smm_enabled
);
53 void do_smm_enter(X86CPU
*cpu
)
55 CPUX86State
*env
= &cpu
->env
;
56 CPUState
*cs
= CPU(cpu
);
57 target_ulong sm_state
;
61 qemu_log_mask(CPU_LOG_INT
, "SMM: enter\n");
62 log_cpu_state_mask(CPU_LOG_INT
, CPU(cpu
), CPU_DUMP_CCOP
);
64 env
->hflags
|= HF_SMM_MASK
;
65 if (env
->hflags2
& HF2_NMI_MASK
) {
66 env
->hflags2
|= HF2_SMM_INSIDE_NMI_MASK
;
68 env
->hflags2
|= HF2_NMI_MASK
;
72 sm_state
= env
->smbase
+ 0x8000;
75 for (i
= 0; i
< 6; i
++) {
77 offset
= 0x7e00 + i
* 16;
78 x86_stw_phys(cs
, sm_state
+ offset
, dt
->selector
);
79 x86_stw_phys(cs
, sm_state
+ offset
+ 2, (dt
->flags
>> 8) & 0xf0ff);
80 x86_stl_phys(cs
, sm_state
+ offset
+ 4, dt
->limit
);
81 x86_stq_phys(cs
, sm_state
+ offset
+ 8, dt
->base
);
84 x86_stq_phys(cs
, sm_state
+ 0x7e68, env
->gdt
.base
);
85 x86_stl_phys(cs
, sm_state
+ 0x7e64, env
->gdt
.limit
);
87 x86_stw_phys(cs
, sm_state
+ 0x7e70, env
->ldt
.selector
);
88 x86_stq_phys(cs
, sm_state
+ 0x7e78, env
->ldt
.base
);
89 x86_stl_phys(cs
, sm_state
+ 0x7e74, env
->ldt
.limit
);
90 x86_stw_phys(cs
, sm_state
+ 0x7e72, (env
->ldt
.flags
>> 8) & 0xf0ff);
92 x86_stq_phys(cs
, sm_state
+ 0x7e88, env
->idt
.base
);
93 x86_stl_phys(cs
, sm_state
+ 0x7e84, env
->idt
.limit
);
95 x86_stw_phys(cs
, sm_state
+ 0x7e90, env
->tr
.selector
);
96 x86_stq_phys(cs
, sm_state
+ 0x7e98, env
->tr
.base
);
97 x86_stl_phys(cs
, sm_state
+ 0x7e94, env
->tr
.limit
);
98 x86_stw_phys(cs
, sm_state
+ 0x7e92, (env
->tr
.flags
>> 8) & 0xf0ff);
100 x86_stq_phys(cs
, sm_state
+ 0x7ed0, env
->efer
);
102 x86_stq_phys(cs
, sm_state
+ 0x7ff8, env
->regs
[R_EAX
]);
103 x86_stq_phys(cs
, sm_state
+ 0x7ff0, env
->regs
[R_ECX
]);
104 x86_stq_phys(cs
, sm_state
+ 0x7fe8, env
->regs
[R_EDX
]);
105 x86_stq_phys(cs
, sm_state
+ 0x7fe0, env
->regs
[R_EBX
]);
106 x86_stq_phys(cs
, sm_state
+ 0x7fd8, env
->regs
[R_ESP
]);
107 x86_stq_phys(cs
, sm_state
+ 0x7fd0, env
->regs
[R_EBP
]);
108 x86_stq_phys(cs
, sm_state
+ 0x7fc8, env
->regs
[R_ESI
]);
109 x86_stq_phys(cs
, sm_state
+ 0x7fc0, env
->regs
[R_EDI
]);
110 for (i
= 8; i
< 16; i
++) {
111 x86_stq_phys(cs
, sm_state
+ 0x7ff8 - i
* 8, env
->regs
[i
]);
113 x86_stq_phys(cs
, sm_state
+ 0x7f78, env
->eip
);
114 x86_stl_phys(cs
, sm_state
+ 0x7f70, cpu_compute_eflags(env
));
115 x86_stl_phys(cs
, sm_state
+ 0x7f68, env
->dr
[6]);
116 x86_stl_phys(cs
, sm_state
+ 0x7f60, env
->dr
[7]);
118 x86_stl_phys(cs
, sm_state
+ 0x7f48, env
->cr
[4]);
119 x86_stq_phys(cs
, sm_state
+ 0x7f50, env
->cr
[3]);
120 x86_stl_phys(cs
, sm_state
+ 0x7f58, env
->cr
[0]);
122 x86_stl_phys(cs
, sm_state
+ 0x7efc, SMM_REVISION_ID
);
123 x86_stl_phys(cs
, sm_state
+ 0x7f00, env
->smbase
);
125 x86_stl_phys(cs
, sm_state
+ 0x7ffc, env
->cr
[0]);
126 x86_stl_phys(cs
, sm_state
+ 0x7ff8, env
->cr
[3]);
127 x86_stl_phys(cs
, sm_state
+ 0x7ff4, cpu_compute_eflags(env
));
128 x86_stl_phys(cs
, sm_state
+ 0x7ff0, env
->eip
);
129 x86_stl_phys(cs
, sm_state
+ 0x7fec, env
->regs
[R_EDI
]);
130 x86_stl_phys(cs
, sm_state
+ 0x7fe8, env
->regs
[R_ESI
]);
131 x86_stl_phys(cs
, sm_state
+ 0x7fe4, env
->regs
[R_EBP
]);
132 x86_stl_phys(cs
, sm_state
+ 0x7fe0, env
->regs
[R_ESP
]);
133 x86_stl_phys(cs
, sm_state
+ 0x7fdc, env
->regs
[R_EBX
]);
134 x86_stl_phys(cs
, sm_state
+ 0x7fd8, env
->regs
[R_EDX
]);
135 x86_stl_phys(cs
, sm_state
+ 0x7fd4, env
->regs
[R_ECX
]);
136 x86_stl_phys(cs
, sm_state
+ 0x7fd0, env
->regs
[R_EAX
]);
137 x86_stl_phys(cs
, sm_state
+ 0x7fcc, env
->dr
[6]);
138 x86_stl_phys(cs
, sm_state
+ 0x7fc8, env
->dr
[7]);
140 x86_stl_phys(cs
, sm_state
+ 0x7fc4, env
->tr
.selector
);
141 x86_stl_phys(cs
, sm_state
+ 0x7f64, env
->tr
.base
);
142 x86_stl_phys(cs
, sm_state
+ 0x7f60, env
->tr
.limit
);
143 x86_stl_phys(cs
, sm_state
+ 0x7f5c, (env
->tr
.flags
>> 8) & 0xf0ff);
145 x86_stl_phys(cs
, sm_state
+ 0x7fc0, env
->ldt
.selector
);
146 x86_stl_phys(cs
, sm_state
+ 0x7f80, env
->ldt
.base
);
147 x86_stl_phys(cs
, sm_state
+ 0x7f7c, env
->ldt
.limit
);
148 x86_stl_phys(cs
, sm_state
+ 0x7f78, (env
->ldt
.flags
>> 8) & 0xf0ff);
150 x86_stl_phys(cs
, sm_state
+ 0x7f74, env
->gdt
.base
);
151 x86_stl_phys(cs
, sm_state
+ 0x7f70, env
->gdt
.limit
);
153 x86_stl_phys(cs
, sm_state
+ 0x7f58, env
->idt
.base
);
154 x86_stl_phys(cs
, sm_state
+ 0x7f54, env
->idt
.limit
);
156 for (i
= 0; i
< 6; i
++) {
159 offset
= 0x7f84 + i
* 12;
161 offset
= 0x7f2c + (i
- 3) * 12;
163 x86_stl_phys(cs
, sm_state
+ 0x7fa8 + i
* 4, dt
->selector
);
164 x86_stl_phys(cs
, sm_state
+ offset
+ 8, dt
->base
);
165 x86_stl_phys(cs
, sm_state
+ offset
+ 4, dt
->limit
);
166 x86_stl_phys(cs
, sm_state
+ offset
, (dt
->flags
>> 8) & 0xf0ff);
168 x86_stl_phys(cs
, sm_state
+ 0x7f14, env
->cr
[4]);
170 x86_stl_phys(cs
, sm_state
+ 0x7efc, SMM_REVISION_ID
);
171 x86_stl_phys(cs
, sm_state
+ 0x7ef8, env
->smbase
);
173 /* init SMM cpu state */
176 cpu_load_efer(env
, 0);
178 cpu_load_eflags(env
, 0, ~(CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
|
180 env
->eip
= 0x00008000;
181 cpu_x86_update_cr0(env
,
182 env
->cr
[0] & ~(CR0_PE_MASK
| CR0_EM_MASK
| CR0_TS_MASK
|
184 cpu_x86_update_cr4(env
, 0);
185 env
->dr
[7] = 0x00000400;
187 cpu_x86_load_seg_cache(env
, R_CS
, (env
->smbase
>> 4) & 0xffff, env
->smbase
,
189 DESC_P_MASK
| DESC_S_MASK
| DESC_W_MASK
|
190 DESC_G_MASK
| DESC_A_MASK
);
191 cpu_x86_load_seg_cache(env
, R_DS
, 0, 0, 0xffffffff,
192 DESC_P_MASK
| DESC_S_MASK
| DESC_W_MASK
|
193 DESC_G_MASK
| DESC_A_MASK
);
194 cpu_x86_load_seg_cache(env
, R_ES
, 0, 0, 0xffffffff,
195 DESC_P_MASK
| DESC_S_MASK
| DESC_W_MASK
|
196 DESC_G_MASK
| DESC_A_MASK
);
197 cpu_x86_load_seg_cache(env
, R_SS
, 0, 0, 0xffffffff,
198 DESC_P_MASK
| DESC_S_MASK
| DESC_W_MASK
|
199 DESC_G_MASK
| DESC_A_MASK
);
200 cpu_x86_load_seg_cache(env
, R_FS
, 0, 0, 0xffffffff,
201 DESC_P_MASK
| DESC_S_MASK
| DESC_W_MASK
|
202 DESC_G_MASK
| DESC_A_MASK
);
203 cpu_x86_load_seg_cache(env
, R_GS
, 0, 0, 0xffffffff,
204 DESC_P_MASK
| DESC_S_MASK
| DESC_W_MASK
|
205 DESC_G_MASK
| DESC_A_MASK
);
208 void helper_rsm(CPUX86State
*env
)
210 X86CPU
*cpu
= x86_env_get_cpu(env
);
211 CPUState
*cs
= CPU(cpu
);
212 target_ulong sm_state
;
216 sm_state
= env
->smbase
+ 0x8000;
218 cpu_load_efer(env
, x86_ldq_phys(cs
, sm_state
+ 0x7ed0));
220 env
->gdt
.base
= x86_ldq_phys(cs
, sm_state
+ 0x7e68);
221 env
->gdt
.limit
= x86_ldl_phys(cs
, sm_state
+ 0x7e64);
223 env
->ldt
.selector
= x86_lduw_phys(cs
, sm_state
+ 0x7e70);
224 env
->ldt
.base
= x86_ldq_phys(cs
, sm_state
+ 0x7e78);
225 env
->ldt
.limit
= x86_ldl_phys(cs
, sm_state
+ 0x7e74);
226 env
->ldt
.flags
= (x86_lduw_phys(cs
, sm_state
+ 0x7e72) & 0xf0ff) << 8;
228 env
->idt
.base
= x86_ldq_phys(cs
, sm_state
+ 0x7e88);
229 env
->idt
.limit
= x86_ldl_phys(cs
, sm_state
+ 0x7e84);
231 env
->tr
.selector
= x86_lduw_phys(cs
, sm_state
+ 0x7e90);
232 env
->tr
.base
= x86_ldq_phys(cs
, sm_state
+ 0x7e98);
233 env
->tr
.limit
= x86_ldl_phys(cs
, sm_state
+ 0x7e94);
234 env
->tr
.flags
= (x86_lduw_phys(cs
, sm_state
+ 0x7e92) & 0xf0ff) << 8;
236 env
->regs
[R_EAX
] = x86_ldq_phys(cs
, sm_state
+ 0x7ff8);
237 env
->regs
[R_ECX
] = x86_ldq_phys(cs
, sm_state
+ 0x7ff0);
238 env
->regs
[R_EDX
] = x86_ldq_phys(cs
, sm_state
+ 0x7fe8);
239 env
->regs
[R_EBX
] = x86_ldq_phys(cs
, sm_state
+ 0x7fe0);
240 env
->regs
[R_ESP
] = x86_ldq_phys(cs
, sm_state
+ 0x7fd8);
241 env
->regs
[R_EBP
] = x86_ldq_phys(cs
, sm_state
+ 0x7fd0);
242 env
->regs
[R_ESI
] = x86_ldq_phys(cs
, sm_state
+ 0x7fc8);
243 env
->regs
[R_EDI
] = x86_ldq_phys(cs
, sm_state
+ 0x7fc0);
244 for (i
= 8; i
< 16; i
++) {
245 env
->regs
[i
] = x86_ldq_phys(cs
, sm_state
+ 0x7ff8 - i
* 8);
247 env
->eip
= x86_ldq_phys(cs
, sm_state
+ 0x7f78);
248 cpu_load_eflags(env
, x86_ldl_phys(cs
, sm_state
+ 0x7f70),
249 ~(CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
| DF_MASK
));
250 env
->dr
[6] = x86_ldl_phys(cs
, sm_state
+ 0x7f68);
251 env
->dr
[7] = x86_ldl_phys(cs
, sm_state
+ 0x7f60);
253 cpu_x86_update_cr4(env
, x86_ldl_phys(cs
, sm_state
+ 0x7f48));
254 cpu_x86_update_cr3(env
, x86_ldq_phys(cs
, sm_state
+ 0x7f50));
255 cpu_x86_update_cr0(env
, x86_ldl_phys(cs
, sm_state
+ 0x7f58));
257 for (i
= 0; i
< 6; i
++) {
258 offset
= 0x7e00 + i
* 16;
259 cpu_x86_load_seg_cache(env
, i
,
260 x86_lduw_phys(cs
, sm_state
+ offset
),
261 x86_ldq_phys(cs
, sm_state
+ offset
+ 8),
262 x86_ldl_phys(cs
, sm_state
+ offset
+ 4),
263 (x86_lduw_phys(cs
, sm_state
+ offset
+ 2) &
267 val
= x86_ldl_phys(cs
, sm_state
+ 0x7efc); /* revision ID */
269 env
->smbase
= x86_ldl_phys(cs
, sm_state
+ 0x7f00);
272 cpu_x86_update_cr0(env
, x86_ldl_phys(cs
, sm_state
+ 0x7ffc));
273 cpu_x86_update_cr3(env
, x86_ldl_phys(cs
, sm_state
+ 0x7ff8));
274 cpu_load_eflags(env
, x86_ldl_phys(cs
, sm_state
+ 0x7ff4),
275 ~(CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
| DF_MASK
));
276 env
->eip
= x86_ldl_phys(cs
, sm_state
+ 0x7ff0);
277 env
->regs
[R_EDI
] = x86_ldl_phys(cs
, sm_state
+ 0x7fec);
278 env
->regs
[R_ESI
] = x86_ldl_phys(cs
, sm_state
+ 0x7fe8);
279 env
->regs
[R_EBP
] = x86_ldl_phys(cs
, sm_state
+ 0x7fe4);
280 env
->regs
[R_ESP
] = x86_ldl_phys(cs
, sm_state
+ 0x7fe0);
281 env
->regs
[R_EBX
] = x86_ldl_phys(cs
, sm_state
+ 0x7fdc);
282 env
->regs
[R_EDX
] = x86_ldl_phys(cs
, sm_state
+ 0x7fd8);
283 env
->regs
[R_ECX
] = x86_ldl_phys(cs
, sm_state
+ 0x7fd4);
284 env
->regs
[R_EAX
] = x86_ldl_phys(cs
, sm_state
+ 0x7fd0);
285 env
->dr
[6] = x86_ldl_phys(cs
, sm_state
+ 0x7fcc);
286 env
->dr
[7] = x86_ldl_phys(cs
, sm_state
+ 0x7fc8);
288 env
->tr
.selector
= x86_ldl_phys(cs
, sm_state
+ 0x7fc4) & 0xffff;
289 env
->tr
.base
= x86_ldl_phys(cs
, sm_state
+ 0x7f64);
290 env
->tr
.limit
= x86_ldl_phys(cs
, sm_state
+ 0x7f60);
291 env
->tr
.flags
= (x86_ldl_phys(cs
, sm_state
+ 0x7f5c) & 0xf0ff) << 8;
293 env
->ldt
.selector
= x86_ldl_phys(cs
, sm_state
+ 0x7fc0) & 0xffff;
294 env
->ldt
.base
= x86_ldl_phys(cs
, sm_state
+ 0x7f80);
295 env
->ldt
.limit
= x86_ldl_phys(cs
, sm_state
+ 0x7f7c);
296 env
->ldt
.flags
= (x86_ldl_phys(cs
, sm_state
+ 0x7f78) & 0xf0ff) << 8;
298 env
->gdt
.base
= x86_ldl_phys(cs
, sm_state
+ 0x7f74);
299 env
->gdt
.limit
= x86_ldl_phys(cs
, sm_state
+ 0x7f70);
301 env
->idt
.base
= x86_ldl_phys(cs
, sm_state
+ 0x7f58);
302 env
->idt
.limit
= x86_ldl_phys(cs
, sm_state
+ 0x7f54);
304 for (i
= 0; i
< 6; i
++) {
306 offset
= 0x7f84 + i
* 12;
308 offset
= 0x7f2c + (i
- 3) * 12;
310 cpu_x86_load_seg_cache(env
, i
,
312 sm_state
+ 0x7fa8 + i
* 4) & 0xffff,
313 x86_ldl_phys(cs
, sm_state
+ offset
+ 8),
314 x86_ldl_phys(cs
, sm_state
+ offset
+ 4),
316 sm_state
+ offset
) & 0xf0ff) << 8);
318 cpu_x86_update_cr4(env
, x86_ldl_phys(cs
, sm_state
+ 0x7f14));
320 val
= x86_ldl_phys(cs
, sm_state
+ 0x7efc); /* revision ID */
322 env
->smbase
= x86_ldl_phys(cs
, sm_state
+ 0x7ef8);
325 if ((env
->hflags2
& HF2_SMM_INSIDE_NMI_MASK
) == 0) {
326 env
->hflags2
&= ~HF2_NMI_MASK
;
328 env
->hflags2
&= ~HF2_SMM_INSIDE_NMI_MASK
;
329 env
->hflags
&= ~HF_SMM_MASK
;
332 qemu_log_mask(CPU_LOG_INT
, "SMM: after RSM\n");
333 log_cpu_state_mask(CPU_LOG_INT
, CPU(cpu
), CPU_DUMP_CCOP
);
336 #endif /* !CONFIG_USER_ONLY */