2 * Hibernation support for x86-64
4 * Distribute under GPLv2
6 * Copyright (c) 2007 Rafael J. Wysocki <rjw@sisk.pl>
7 * Copyright (c) 2002 Pavel Machek <pavel@suse.cz>
8 * Copyright (c) 2001 Patrick Mochel <mochel@osdl.org>
11 #include <linux/smp.h>
12 #include <linux/suspend.h>
13 #include <asm/proto.h>
15 #include <asm/pgtable.h>
18 /* References to section boundaries */
19 extern const void __nosave_begin
, __nosave_end
;
21 /* Defined in hibernate_asm_64.S */
22 extern int restore_image(void);
25 * Address to jump to in the last phase of restore in order to get to the image
26 * kernel's text (this value is passed in the image header).
28 unsigned long restore_jump_address
;
31 * Value of the cr3 register from before the hibernation (this value is passed
32 * in the image header).
34 unsigned long restore_cr3
;
36 pgd_t
*temp_level4_pgt
;
38 void *relocated_restore_code
;
40 static int res_phys_pud_init(pud_t
*pud
, unsigned long address
, unsigned long end
)
44 i
= pud_index(address
);
46 for (; i
< PTRS_PER_PUD
; pud
++, i
++) {
50 paddr
= address
+ i
*PUD_SIZE
;
54 pmd
= (pmd_t
*)get_safe_page(GFP_ATOMIC
);
57 set_pud(pud
, __pud(__pa(pmd
) | _KERNPG_TABLE
));
58 for (j
= 0; j
< PTRS_PER_PMD
; pmd
++, j
++, paddr
+= PMD_SIZE
) {
63 pe
= __PAGE_KERNEL_LARGE_EXEC
| paddr
;
64 pe
&= __supported_pte_mask
;
65 set_pmd(pmd
, __pmd(pe
));
71 static int set_up_temporary_mappings(void)
73 unsigned long start
, end
, next
;
76 temp_level4_pgt
= (pgd_t
*)get_safe_page(GFP_ATOMIC
);
80 /* It is safe to reuse the original kernel mapping */
81 set_pgd(temp_level4_pgt
+ pgd_index(__START_KERNEL_map
),
82 init_level4_pgt
[pgd_index(__START_KERNEL_map
)]);
84 /* Set up the direct mapping from scratch */
85 start
= (unsigned long)pfn_to_kaddr(0);
86 end
= (unsigned long)pfn_to_kaddr(end_pfn
);
88 for (; start
< end
; start
= next
) {
89 pud_t
*pud
= (pud_t
*)get_safe_page(GFP_ATOMIC
);
92 next
= start
+ PGDIR_SIZE
;
95 if ((error
= res_phys_pud_init(pud
, __pa(start
), __pa(next
))))
97 set_pgd(temp_level4_pgt
+ pgd_index(start
),
98 mk_kernel_pgd(__pa(pud
)));
103 int swsusp_arch_resume(void)
107 /* We have got enough memory and from now on we cannot recover */
108 if ((error
= set_up_temporary_mappings()))
111 relocated_restore_code
= (void *)get_safe_page(GFP_ATOMIC
);
112 if (!relocated_restore_code
)
114 memcpy(relocated_restore_code
, &core_restore_code
,
115 &restore_registers
- &core_restore_code
);
122 * pfn_is_nosave - check if given pfn is in the 'nosave' section
125 int pfn_is_nosave(unsigned long pfn
)
127 unsigned long nosave_begin_pfn
= __pa_symbol(&__nosave_begin
) >> PAGE_SHIFT
;
128 unsigned long nosave_end_pfn
= PAGE_ALIGN(__pa_symbol(&__nosave_end
)) >> PAGE_SHIFT
;
129 return (pfn
>= nosave_begin_pfn
) && (pfn
< nosave_end_pfn
);
132 struct restore_data_record
{
133 unsigned long jump_address
;
138 #define RESTORE_MAGIC 0x0123456789ABCDEFUL
141 * arch_hibernation_header_save - populate the architecture specific part
142 * of a hibernation image header
143 * @addr: address to save the data at
145 int arch_hibernation_header_save(void *addr
, unsigned int max_size
)
147 struct restore_data_record
*rdr
= addr
;
149 if (max_size
< sizeof(struct restore_data_record
))
151 rdr
->jump_address
= restore_jump_address
;
152 rdr
->cr3
= restore_cr3
;
153 rdr
->magic
= RESTORE_MAGIC
;
158 * arch_hibernation_header_restore - read the architecture specific data
159 * from the hibernation image header
160 * @addr: address to read the data from
162 int arch_hibernation_header_restore(void *addr
)
164 struct restore_data_record
*rdr
= addr
;
166 restore_jump_address
= rdr
->jump_address
;
167 restore_cr3
= rdr
->cr3
;
168 return (rdr
->magic
== RESTORE_MAGIC
) ? 0 : -EINVAL
;