4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
22 #include "exec/helper-proto.h"
27 #if defined(CONFIG_USER_ONLY)
29 void do_smm_enter(X86CPU
*cpu
)
33 void helper_rsm(CPUX86State
*env
)
40 #define SMM_REVISION_ID 0x00020064
42 #define SMM_REVISION_ID 0x00020000
45 void cpu_smm_update(X86CPU
*cpu
)
47 CPUX86State
*env
= &cpu
->env
;
48 bool smm_enabled
= (env
->hflags
& HF_SMM_MASK
);
51 memory_region_set_enabled(cpu
->smram
, smm_enabled
);
55 void do_smm_enter(X86CPU
*cpu
)
57 CPUX86State
*env
= &cpu
->env
;
58 CPUState
*cs
= CPU(cpu
);
59 target_ulong sm_state
;
63 qemu_log_mask(CPU_LOG_INT
, "SMM: enter\n");
64 log_cpu_state_mask(CPU_LOG_INT
, CPU(cpu
), CPU_DUMP_CCOP
);
66 env
->hflags
|= HF_SMM_MASK
;
67 if (env
->hflags2
& HF2_NMI_MASK
) {
68 env
->hflags2
|= HF2_SMM_INSIDE_NMI_MASK
;
70 env
->hflags2
|= HF2_NMI_MASK
;
74 sm_state
= env
->smbase
+ 0x8000;
77 for (i
= 0; i
< 6; i
++) {
79 offset
= 0x7e00 + i
* 16;
80 x86_stw_phys(cs
, sm_state
+ offset
, dt
->selector
);
81 x86_stw_phys(cs
, sm_state
+ offset
+ 2, (dt
->flags
>> 8) & 0xf0ff);
82 x86_stl_phys(cs
, sm_state
+ offset
+ 4, dt
->limit
);
83 x86_stq_phys(cs
, sm_state
+ offset
+ 8, dt
->base
);
86 x86_stq_phys(cs
, sm_state
+ 0x7e68, env
->gdt
.base
);
87 x86_stl_phys(cs
, sm_state
+ 0x7e64, env
->gdt
.limit
);
89 x86_stw_phys(cs
, sm_state
+ 0x7e70, env
->ldt
.selector
);
90 x86_stq_phys(cs
, sm_state
+ 0x7e78, env
->ldt
.base
);
91 x86_stl_phys(cs
, sm_state
+ 0x7e74, env
->ldt
.limit
);
92 x86_stw_phys(cs
, sm_state
+ 0x7e72, (env
->ldt
.flags
>> 8) & 0xf0ff);
94 x86_stq_phys(cs
, sm_state
+ 0x7e88, env
->idt
.base
);
95 x86_stl_phys(cs
, sm_state
+ 0x7e84, env
->idt
.limit
);
97 x86_stw_phys(cs
, sm_state
+ 0x7e90, env
->tr
.selector
);
98 x86_stq_phys(cs
, sm_state
+ 0x7e98, env
->tr
.base
);
99 x86_stl_phys(cs
, sm_state
+ 0x7e94, env
->tr
.limit
);
100 x86_stw_phys(cs
, sm_state
+ 0x7e92, (env
->tr
.flags
>> 8) & 0xf0ff);
102 x86_stq_phys(cs
, sm_state
+ 0x7ed0, env
->efer
);
104 x86_stq_phys(cs
, sm_state
+ 0x7ff8, env
->regs
[R_EAX
]);
105 x86_stq_phys(cs
, sm_state
+ 0x7ff0, env
->regs
[R_ECX
]);
106 x86_stq_phys(cs
, sm_state
+ 0x7fe8, env
->regs
[R_EDX
]);
107 x86_stq_phys(cs
, sm_state
+ 0x7fe0, env
->regs
[R_EBX
]);
108 x86_stq_phys(cs
, sm_state
+ 0x7fd8, env
->regs
[R_ESP
]);
109 x86_stq_phys(cs
, sm_state
+ 0x7fd0, env
->regs
[R_EBP
]);
110 x86_stq_phys(cs
, sm_state
+ 0x7fc8, env
->regs
[R_ESI
]);
111 x86_stq_phys(cs
, sm_state
+ 0x7fc0, env
->regs
[R_EDI
]);
112 for (i
= 8; i
< 16; i
++) {
113 x86_stq_phys(cs
, sm_state
+ 0x7ff8 - i
* 8, env
->regs
[i
]);
115 x86_stq_phys(cs
, sm_state
+ 0x7f78, env
->eip
);
116 x86_stl_phys(cs
, sm_state
+ 0x7f70, cpu_compute_eflags(env
));
117 x86_stl_phys(cs
, sm_state
+ 0x7f68, env
->dr
[6]);
118 x86_stl_phys(cs
, sm_state
+ 0x7f60, env
->dr
[7]);
120 x86_stl_phys(cs
, sm_state
+ 0x7f48, env
->cr
[4]);
121 x86_stq_phys(cs
, sm_state
+ 0x7f50, env
->cr
[3]);
122 x86_stl_phys(cs
, sm_state
+ 0x7f58, env
->cr
[0]);
124 x86_stl_phys(cs
, sm_state
+ 0x7efc, SMM_REVISION_ID
);
125 x86_stl_phys(cs
, sm_state
+ 0x7f00, env
->smbase
);
127 x86_stl_phys(cs
, sm_state
+ 0x7ffc, env
->cr
[0]);
128 x86_stl_phys(cs
, sm_state
+ 0x7ff8, env
->cr
[3]);
129 x86_stl_phys(cs
, sm_state
+ 0x7ff4, cpu_compute_eflags(env
));
130 x86_stl_phys(cs
, sm_state
+ 0x7ff0, env
->eip
);
131 x86_stl_phys(cs
, sm_state
+ 0x7fec, env
->regs
[R_EDI
]);
132 x86_stl_phys(cs
, sm_state
+ 0x7fe8, env
->regs
[R_ESI
]);
133 x86_stl_phys(cs
, sm_state
+ 0x7fe4, env
->regs
[R_EBP
]);
134 x86_stl_phys(cs
, sm_state
+ 0x7fe0, env
->regs
[R_ESP
]);
135 x86_stl_phys(cs
, sm_state
+ 0x7fdc, env
->regs
[R_EBX
]);
136 x86_stl_phys(cs
, sm_state
+ 0x7fd8, env
->regs
[R_EDX
]);
137 x86_stl_phys(cs
, sm_state
+ 0x7fd4, env
->regs
[R_ECX
]);
138 x86_stl_phys(cs
, sm_state
+ 0x7fd0, env
->regs
[R_EAX
]);
139 x86_stl_phys(cs
, sm_state
+ 0x7fcc, env
->dr
[6]);
140 x86_stl_phys(cs
, sm_state
+ 0x7fc8, env
->dr
[7]);
142 x86_stl_phys(cs
, sm_state
+ 0x7fc4, env
->tr
.selector
);
143 x86_stl_phys(cs
, sm_state
+ 0x7f64, env
->tr
.base
);
144 x86_stl_phys(cs
, sm_state
+ 0x7f60, env
->tr
.limit
);
145 x86_stl_phys(cs
, sm_state
+ 0x7f5c, (env
->tr
.flags
>> 8) & 0xf0ff);
147 x86_stl_phys(cs
, sm_state
+ 0x7fc0, env
->ldt
.selector
);
148 x86_stl_phys(cs
, sm_state
+ 0x7f80, env
->ldt
.base
);
149 x86_stl_phys(cs
, sm_state
+ 0x7f7c, env
->ldt
.limit
);
150 x86_stl_phys(cs
, sm_state
+ 0x7f78, (env
->ldt
.flags
>> 8) & 0xf0ff);
152 x86_stl_phys(cs
, sm_state
+ 0x7f74, env
->gdt
.base
);
153 x86_stl_phys(cs
, sm_state
+ 0x7f70, env
->gdt
.limit
);
155 x86_stl_phys(cs
, sm_state
+ 0x7f58, env
->idt
.base
);
156 x86_stl_phys(cs
, sm_state
+ 0x7f54, env
->idt
.limit
);
158 for (i
= 0; i
< 6; i
++) {
161 offset
= 0x7f84 + i
* 12;
163 offset
= 0x7f2c + (i
- 3) * 12;
165 x86_stl_phys(cs
, sm_state
+ 0x7fa8 + i
* 4, dt
->selector
);
166 x86_stl_phys(cs
, sm_state
+ offset
+ 8, dt
->base
);
167 x86_stl_phys(cs
, sm_state
+ offset
+ 4, dt
->limit
);
168 x86_stl_phys(cs
, sm_state
+ offset
, (dt
->flags
>> 8) & 0xf0ff);
170 x86_stl_phys(cs
, sm_state
+ 0x7f14, env
->cr
[4]);
172 x86_stl_phys(cs
, sm_state
+ 0x7efc, SMM_REVISION_ID
);
173 x86_stl_phys(cs
, sm_state
+ 0x7ef8, env
->smbase
);
175 /* init SMM cpu state */
178 cpu_load_efer(env
, 0);
180 cpu_load_eflags(env
, 0, ~(CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
|
182 env
->eip
= 0x00008000;
183 cpu_x86_update_cr0(env
,
184 env
->cr
[0] & ~(CR0_PE_MASK
| CR0_EM_MASK
| CR0_TS_MASK
|
186 cpu_x86_update_cr4(env
, 0);
187 env
->dr
[7] = 0x00000400;
189 cpu_x86_load_seg_cache(env
, R_CS
, (env
->smbase
>> 4) & 0xffff, env
->smbase
,
191 DESC_P_MASK
| DESC_S_MASK
| DESC_W_MASK
|
192 DESC_G_MASK
| DESC_A_MASK
);
193 cpu_x86_load_seg_cache(env
, R_DS
, 0, 0, 0xffffffff,
194 DESC_P_MASK
| DESC_S_MASK
| DESC_W_MASK
|
195 DESC_G_MASK
| DESC_A_MASK
);
196 cpu_x86_load_seg_cache(env
, R_ES
, 0, 0, 0xffffffff,
197 DESC_P_MASK
| DESC_S_MASK
| DESC_W_MASK
|
198 DESC_G_MASK
| DESC_A_MASK
);
199 cpu_x86_load_seg_cache(env
, R_SS
, 0, 0, 0xffffffff,
200 DESC_P_MASK
| DESC_S_MASK
| DESC_W_MASK
|
201 DESC_G_MASK
| DESC_A_MASK
);
202 cpu_x86_load_seg_cache(env
, R_FS
, 0, 0, 0xffffffff,
203 DESC_P_MASK
| DESC_S_MASK
| DESC_W_MASK
|
204 DESC_G_MASK
| DESC_A_MASK
);
205 cpu_x86_load_seg_cache(env
, R_GS
, 0, 0, 0xffffffff,
206 DESC_P_MASK
| DESC_S_MASK
| DESC_W_MASK
|
207 DESC_G_MASK
| DESC_A_MASK
);
210 void helper_rsm(CPUX86State
*env
)
212 X86CPU
*cpu
= x86_env_get_cpu(env
);
213 CPUState
*cs
= CPU(cpu
);
214 target_ulong sm_state
;
218 sm_state
= env
->smbase
+ 0x8000;
220 cpu_load_efer(env
, x86_ldq_phys(cs
, sm_state
+ 0x7ed0));
222 env
->gdt
.base
= x86_ldq_phys(cs
, sm_state
+ 0x7e68);
223 env
->gdt
.limit
= x86_ldl_phys(cs
, sm_state
+ 0x7e64);
225 env
->ldt
.selector
= x86_lduw_phys(cs
, sm_state
+ 0x7e70);
226 env
->ldt
.base
= x86_ldq_phys(cs
, sm_state
+ 0x7e78);
227 env
->ldt
.limit
= x86_ldl_phys(cs
, sm_state
+ 0x7e74);
228 env
->ldt
.flags
= (x86_lduw_phys(cs
, sm_state
+ 0x7e72) & 0xf0ff) << 8;
230 env
->idt
.base
= x86_ldq_phys(cs
, sm_state
+ 0x7e88);
231 env
->idt
.limit
= x86_ldl_phys(cs
, sm_state
+ 0x7e84);
233 env
->tr
.selector
= x86_lduw_phys(cs
, sm_state
+ 0x7e90);
234 env
->tr
.base
= x86_ldq_phys(cs
, sm_state
+ 0x7e98);
235 env
->tr
.limit
= x86_ldl_phys(cs
, sm_state
+ 0x7e94);
236 env
->tr
.flags
= (x86_lduw_phys(cs
, sm_state
+ 0x7e92) & 0xf0ff) << 8;
238 env
->regs
[R_EAX
] = x86_ldq_phys(cs
, sm_state
+ 0x7ff8);
239 env
->regs
[R_ECX
] = x86_ldq_phys(cs
, sm_state
+ 0x7ff0);
240 env
->regs
[R_EDX
] = x86_ldq_phys(cs
, sm_state
+ 0x7fe8);
241 env
->regs
[R_EBX
] = x86_ldq_phys(cs
, sm_state
+ 0x7fe0);
242 env
->regs
[R_ESP
] = x86_ldq_phys(cs
, sm_state
+ 0x7fd8);
243 env
->regs
[R_EBP
] = x86_ldq_phys(cs
, sm_state
+ 0x7fd0);
244 env
->regs
[R_ESI
] = x86_ldq_phys(cs
, sm_state
+ 0x7fc8);
245 env
->regs
[R_EDI
] = x86_ldq_phys(cs
, sm_state
+ 0x7fc0);
246 for (i
= 8; i
< 16; i
++) {
247 env
->regs
[i
] = x86_ldq_phys(cs
, sm_state
+ 0x7ff8 - i
* 8);
249 env
->eip
= x86_ldq_phys(cs
, sm_state
+ 0x7f78);
250 cpu_load_eflags(env
, x86_ldl_phys(cs
, sm_state
+ 0x7f70),
251 ~(CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
| DF_MASK
));
252 env
->dr
[6] = x86_ldl_phys(cs
, sm_state
+ 0x7f68);
253 env
->dr
[7] = x86_ldl_phys(cs
, sm_state
+ 0x7f60);
255 cpu_x86_update_cr4(env
, x86_ldl_phys(cs
, sm_state
+ 0x7f48));
256 cpu_x86_update_cr3(env
, x86_ldq_phys(cs
, sm_state
+ 0x7f50));
257 cpu_x86_update_cr0(env
, x86_ldl_phys(cs
, sm_state
+ 0x7f58));
259 for (i
= 0; i
< 6; i
++) {
260 offset
= 0x7e00 + i
* 16;
261 cpu_x86_load_seg_cache(env
, i
,
262 x86_lduw_phys(cs
, sm_state
+ offset
),
263 x86_ldq_phys(cs
, sm_state
+ offset
+ 8),
264 x86_ldl_phys(cs
, sm_state
+ offset
+ 4),
265 (x86_lduw_phys(cs
, sm_state
+ offset
+ 2) &
269 val
= x86_ldl_phys(cs
, sm_state
+ 0x7efc); /* revision ID */
271 env
->smbase
= x86_ldl_phys(cs
, sm_state
+ 0x7f00);
274 cpu_x86_update_cr0(env
, x86_ldl_phys(cs
, sm_state
+ 0x7ffc));
275 cpu_x86_update_cr3(env
, x86_ldl_phys(cs
, sm_state
+ 0x7ff8));
276 cpu_load_eflags(env
, x86_ldl_phys(cs
, sm_state
+ 0x7ff4),
277 ~(CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
| DF_MASK
));
278 env
->eip
= x86_ldl_phys(cs
, sm_state
+ 0x7ff0);
279 env
->regs
[R_EDI
] = x86_ldl_phys(cs
, sm_state
+ 0x7fec);
280 env
->regs
[R_ESI
] = x86_ldl_phys(cs
, sm_state
+ 0x7fe8);
281 env
->regs
[R_EBP
] = x86_ldl_phys(cs
, sm_state
+ 0x7fe4);
282 env
->regs
[R_ESP
] = x86_ldl_phys(cs
, sm_state
+ 0x7fe0);
283 env
->regs
[R_EBX
] = x86_ldl_phys(cs
, sm_state
+ 0x7fdc);
284 env
->regs
[R_EDX
] = x86_ldl_phys(cs
, sm_state
+ 0x7fd8);
285 env
->regs
[R_ECX
] = x86_ldl_phys(cs
, sm_state
+ 0x7fd4);
286 env
->regs
[R_EAX
] = x86_ldl_phys(cs
, sm_state
+ 0x7fd0);
287 env
->dr
[6] = x86_ldl_phys(cs
, sm_state
+ 0x7fcc);
288 env
->dr
[7] = x86_ldl_phys(cs
, sm_state
+ 0x7fc8);
290 env
->tr
.selector
= x86_ldl_phys(cs
, sm_state
+ 0x7fc4) & 0xffff;
291 env
->tr
.base
= x86_ldl_phys(cs
, sm_state
+ 0x7f64);
292 env
->tr
.limit
= x86_ldl_phys(cs
, sm_state
+ 0x7f60);
293 env
->tr
.flags
= (x86_ldl_phys(cs
, sm_state
+ 0x7f5c) & 0xf0ff) << 8;
295 env
->ldt
.selector
= x86_ldl_phys(cs
, sm_state
+ 0x7fc0) & 0xffff;
296 env
->ldt
.base
= x86_ldl_phys(cs
, sm_state
+ 0x7f80);
297 env
->ldt
.limit
= x86_ldl_phys(cs
, sm_state
+ 0x7f7c);
298 env
->ldt
.flags
= (x86_ldl_phys(cs
, sm_state
+ 0x7f78) & 0xf0ff) << 8;
300 env
->gdt
.base
= x86_ldl_phys(cs
, sm_state
+ 0x7f74);
301 env
->gdt
.limit
= x86_ldl_phys(cs
, sm_state
+ 0x7f70);
303 env
->idt
.base
= x86_ldl_phys(cs
, sm_state
+ 0x7f58);
304 env
->idt
.limit
= x86_ldl_phys(cs
, sm_state
+ 0x7f54);
306 for (i
= 0; i
< 6; i
++) {
308 offset
= 0x7f84 + i
* 12;
310 offset
= 0x7f2c + (i
- 3) * 12;
312 cpu_x86_load_seg_cache(env
, i
,
314 sm_state
+ 0x7fa8 + i
* 4) & 0xffff,
315 x86_ldl_phys(cs
, sm_state
+ offset
+ 8),
316 x86_ldl_phys(cs
, sm_state
+ offset
+ 4),
318 sm_state
+ offset
) & 0xf0ff) << 8);
320 cpu_x86_update_cr4(env
, x86_ldl_phys(cs
, sm_state
+ 0x7f14));
322 val
= x86_ldl_phys(cs
, sm_state
+ 0x7efc); /* revision ID */
324 env
->smbase
= x86_ldl_phys(cs
, sm_state
+ 0x7ef8);
327 if ((env
->hflags2
& HF2_SMM_INSIDE_NMI_MASK
) == 0) {
328 env
->hflags2
&= ~HF2_NMI_MASK
;
330 env
->hflags2
&= ~HF2_SMM_INSIDE_NMI_MASK
;
331 env
->hflags
&= ~HF_SMM_MASK
;
334 qemu_log_mask(CPU_LOG_INT
, "SMM: after RSM\n");
335 log_cpu_state_mask(CPU_LOG_INT
, CPU(cpu
), CPU_DUMP_CCOP
);
338 #endif /* !CONFIG_USER_ONLY */