2 * Suspend support specific for i386.
4 * Distribute under GPLv2
6 * Copyright (c) 2002 Pavel Machek <pavel@suse.cz>
7 * Copyright (c) 2001 Patrick Mochel <mochel@osdl.org>
10 #include <linux/smp.h>
11 #include <linux/suspend.h>
12 #include <asm/proto.h>
14 #include <asm/pgtable.h>
17 /* References to section boundaries */
18 extern const void __nosave_begin
, __nosave_end
;
20 struct saved_context saved_context
;
22 unsigned long saved_context_eax
, saved_context_ebx
, saved_context_ecx
, saved_context_edx
;
23 unsigned long saved_context_esp
, saved_context_ebp
, saved_context_esi
, saved_context_edi
;
24 unsigned long saved_context_r08
, saved_context_r09
, saved_context_r10
, saved_context_r11
;
25 unsigned long saved_context_r12
, saved_context_r13
, saved_context_r14
, saved_context_r15
;
26 unsigned long saved_context_eflags
;
28 void __save_processor_state(struct saved_context
*ctxt
)
35 asm volatile ("sgdt %0" : "=m" (ctxt
->gdt_limit
));
36 asm volatile ("sidt %0" : "=m" (ctxt
->idt_limit
));
37 asm volatile ("str %0" : "=m" (ctxt
->tr
));
39 /* XMM0..XMM15 should be handled by kernel_fpu_begin(). */
43 asm volatile ("movw %%ds, %0" : "=m" (ctxt
->ds
));
44 asm volatile ("movw %%es, %0" : "=m" (ctxt
->es
));
45 asm volatile ("movw %%fs, %0" : "=m" (ctxt
->fs
));
46 asm volatile ("movw %%gs, %0" : "=m" (ctxt
->gs
));
47 asm volatile ("movw %%ss, %0" : "=m" (ctxt
->ss
));
49 rdmsrl(MSR_FS_BASE
, ctxt
->fs_base
);
50 rdmsrl(MSR_GS_BASE
, ctxt
->gs_base
);
51 rdmsrl(MSR_KERNEL_GS_BASE
, ctxt
->gs_kernel_base
);
52 mtrr_save_fixed_ranges(NULL
);
57 rdmsrl(MSR_EFER
, ctxt
->efer
);
58 ctxt
->cr0
= read_cr0();
59 ctxt
->cr2
= read_cr2();
60 ctxt
->cr3
= read_cr3();
61 ctxt
->cr4
= read_cr4();
62 ctxt
->cr8
= read_cr8();
65 void save_processor_state(void)
67 __save_processor_state(&saved_context
);
70 static void do_fpu_end(void)
73 * Restore FPU regs if necessary
78 void __restore_processor_state(struct saved_context
*ctxt
)
83 wrmsrl(MSR_EFER
, ctxt
->efer
);
91 * now restore the descriptor tables to their proper values
92 * ltr is done i fix_processor_context().
94 asm volatile ("lgdt %0" :: "m" (ctxt
->gdt_limit
));
95 asm volatile ("lidt %0" :: "m" (ctxt
->idt_limit
));
100 asm volatile ("movw %0, %%ds" :: "r" (ctxt
->ds
));
101 asm volatile ("movw %0, %%es" :: "r" (ctxt
->es
));
102 asm volatile ("movw %0, %%fs" :: "r" (ctxt
->fs
));
103 load_gs_index(ctxt
->gs
);
104 asm volatile ("movw %0, %%ss" :: "r" (ctxt
->ss
));
106 wrmsrl(MSR_FS_BASE
, ctxt
->fs_base
);
107 wrmsrl(MSR_GS_BASE
, ctxt
->gs_base
);
108 wrmsrl(MSR_KERNEL_GS_BASE
, ctxt
->gs_kernel_base
);
110 fix_processor_context();
116 void restore_processor_state(void)
118 __restore_processor_state(&saved_context
);
121 void fix_processor_context(void)
123 int cpu
= smp_processor_id();
124 struct tss_struct
*t
= &per_cpu(init_tss
, cpu
);
126 set_tss_desc(cpu
,t
); /* This just modifies memory; should not be necessary. But... This is necessary, because 386 hardware has concept of busy TSS or some similar stupidity. */
128 cpu_gdt(cpu
)[GDT_ENTRY_TSS
].type
= 9;
130 syscall_init(); /* This sets MSR_*STAR and related */
131 load_TR_desc(); /* This does ltr */
132 load_LDT(¤t
->active_mm
->context
); /* This does lldt */
135 * Now maybe reload the debug registers
137 if (current
->thread
.debugreg7
){
138 loaddebug(¤t
->thread
, 0);
139 loaddebug(¤t
->thread
, 1);
140 loaddebug(¤t
->thread
, 2);
141 loaddebug(¤t
->thread
, 3);
143 loaddebug(¤t
->thread
, 6);
144 loaddebug(¤t
->thread
, 7);
149 #ifdef CONFIG_HIBERNATION
150 /* Defined in arch/x86_64/kernel/suspend_asm.S */
151 extern int restore_image(void);
154 * Address to jump to in the last phase of restore in order to get to the image
155 * kernel's text (this value is passed in the image header).
157 unsigned long restore_jump_address
;
160 * Value of the cr3 register from before the hibernation (this value is passed
161 * in the image header).
163 unsigned long restore_cr3
;
165 pgd_t
*temp_level4_pgt
;
167 void *relocated_restore_code
;
169 static int res_phys_pud_init(pud_t
*pud
, unsigned long address
, unsigned long end
)
173 i
= pud_index(address
);
175 for (; i
< PTRS_PER_PUD
; pud
++, i
++) {
179 paddr
= address
+ i
*PUD_SIZE
;
183 pmd
= (pmd_t
*)get_safe_page(GFP_ATOMIC
);
186 set_pud(pud
, __pud(__pa(pmd
) | _KERNPG_TABLE
));
187 for (j
= 0; j
< PTRS_PER_PMD
; pmd
++, j
++, paddr
+= PMD_SIZE
) {
192 pe
= __PAGE_KERNEL_LARGE_EXEC
| paddr
;
193 pe
&= __supported_pte_mask
;
194 set_pmd(pmd
, __pmd(pe
));
200 static int res_kernel_text_pud_init(pud_t
*pud
, unsigned long start
)
205 pmd
= (pmd_t
*)get_safe_page(GFP_ATOMIC
);
208 set_pud(pud
+ pud_index(start
), __pud(__pa(pmd
) | _KERNPG_TABLE
));
209 for (paddr
= 0; paddr
< KERNEL_TEXT_SIZE
; pmd
++, paddr
+= PMD_SIZE
) {
212 pe
= __PAGE_KERNEL_LARGE_EXEC
| _PAGE_GLOBAL
| paddr
;
213 pe
&= __supported_pte_mask
;
214 set_pmd(pmd
, __pmd(pe
));
220 static int set_up_temporary_mappings(void)
222 unsigned long start
, end
, next
;
226 temp_level4_pgt
= (pgd_t
*)get_safe_page(GFP_ATOMIC
);
227 if (!temp_level4_pgt
)
230 /* Set up the direct mapping from scratch */
231 start
= (unsigned long)pfn_to_kaddr(0);
232 end
= (unsigned long)pfn_to_kaddr(end_pfn
);
234 for (; start
< end
; start
= next
) {
235 pud
= (pud_t
*)get_safe_page(GFP_ATOMIC
);
238 next
= start
+ PGDIR_SIZE
;
241 if ((error
= res_phys_pud_init(pud
, __pa(start
), __pa(next
))))
243 set_pgd(temp_level4_pgt
+ pgd_index(start
),
244 mk_kernel_pgd(__pa(pud
)));
247 /* Set up the kernel text mapping from scratch */
248 pud
= (pud_t
*)get_safe_page(GFP_ATOMIC
);
251 error
= res_kernel_text_pud_init(pud
, __START_KERNEL_map
);
253 set_pgd(temp_level4_pgt
+ pgd_index(__START_KERNEL_map
),
254 __pgd(__pa(pud
) | _PAGE_TABLE
));
259 int swsusp_arch_resume(void)
263 /* We have got enough memory and from now on we cannot recover */
264 if ((error
= set_up_temporary_mappings()))
267 relocated_restore_code
= (void *)get_safe_page(GFP_ATOMIC
);
268 if (!relocated_restore_code
)
270 memcpy(relocated_restore_code
, &core_restore_code
,
271 &restore_registers
- &core_restore_code
);
278 * pfn_is_nosave - check if given pfn is in the 'nosave' section
281 int pfn_is_nosave(unsigned long pfn
)
283 unsigned long nosave_begin_pfn
= __pa_symbol(&__nosave_begin
) >> PAGE_SHIFT
;
284 unsigned long nosave_end_pfn
= PAGE_ALIGN(__pa_symbol(&__nosave_end
)) >> PAGE_SHIFT
;
285 return (pfn
>= nosave_begin_pfn
) && (pfn
< nosave_end_pfn
);
288 struct restore_data_record
{
289 unsigned long jump_address
;
294 #define RESTORE_MAGIC 0x0123456789ABCDEFUL
297 * arch_hibernation_header_save - populate the architecture specific part
298 * of a hibernation image header
299 * @addr: address to save the data at
301 int arch_hibernation_header_save(void *addr
, unsigned int max_size
)
303 struct restore_data_record
*rdr
= addr
;
305 if (max_size
< sizeof(struct restore_data_record
))
307 rdr
->jump_address
= restore_jump_address
;
308 rdr
->cr3
= restore_cr3
;
309 rdr
->magic
= RESTORE_MAGIC
;
314 * arch_hibernation_header_restore - read the architecture specific data
315 * from the hibernation image header
316 * @addr: address to read the data from
318 int arch_hibernation_header_restore(void *addr
)
320 struct restore_data_record
*rdr
= addr
;
322 restore_jump_address
= rdr
->jump_address
;
323 restore_cr3
= rdr
->cr3
;
324 return (rdr
->magic
== RESTORE_MAGIC
) ? 0 : -EINVAL
;
326 #endif /* CONFIG_HIBERNATION */