2 * linux/kernel/power/snapshot.c
4 * This file provide system snapshot/restore functionality.
6 * Copyright (C) 1998-2005 Pavel Machek <pavel@suse.cz>
8 * This file is released under the GPLv2, and is based on swsusp.c.
13 #include <linux/version.h>
14 #include <linux/module.h>
16 #include <linux/suspend.h>
17 #include <linux/smp_lock.h>
18 #include <linux/delay.h>
19 #include <linux/bitops.h>
20 #include <linux/spinlock.h>
21 #include <linux/kernel.h>
23 #include <linux/device.h>
24 #include <linux/bootmem.h>
25 #include <linux/syscalls.h>
26 #include <linux/console.h>
27 #include <linux/highmem.h>
29 #include <asm/uaccess.h>
30 #include <asm/mmu_context.h>
31 #include <asm/pgtable.h>
32 #include <asm/tlbflush.h>
37 struct pbe
*pagedir_nosave
;
38 static unsigned int nr_copy_pages
;
39 static unsigned int nr_meta_pages
;
40 static unsigned long *buffer
;
43 unsigned int count_highmem_pages(void)
46 unsigned long zone_pfn
;
50 if (is_highmem(zone
)) {
51 mark_free_pages(zone
);
52 for (zone_pfn
= 0; zone_pfn
< zone
->spanned_pages
; zone_pfn
++) {
54 unsigned long pfn
= zone_pfn
+ zone
->zone_start_pfn
;
57 page
= pfn_to_page(pfn
);
58 if (PageReserved(page
))
60 if (PageNosaveFree(page
))
71 struct highmem_page
*next
;
74 static struct highmem_page
*highmem_copy
;
76 static int save_highmem_zone(struct zone
*zone
)
78 unsigned long zone_pfn
;
79 mark_free_pages(zone
);
80 for (zone_pfn
= 0; zone_pfn
< zone
->spanned_pages
; ++zone_pfn
) {
82 struct highmem_page
*save
;
84 unsigned long pfn
= zone_pfn
+ zone
->zone_start_pfn
;
90 page
= pfn_to_page(pfn
);
92 * This condition results from rvmalloc() sans vmalloc_32()
93 * and architectural memory reservations. This should be
94 * corrected eventually when the cases giving rise to this
95 * are better understood.
97 if (PageReserved(page
))
99 BUG_ON(PageNosave(page
));
100 if (PageNosaveFree(page
))
102 save
= kmalloc(sizeof(struct highmem_page
), GFP_ATOMIC
);
105 save
->next
= highmem_copy
;
107 save
->data
= (void *) get_zeroed_page(GFP_ATOMIC
);
112 kaddr
= kmap_atomic(page
, KM_USER0
);
113 memcpy(save
->data
, kaddr
, PAGE_SIZE
);
114 kunmap_atomic(kaddr
, KM_USER0
);
120 int save_highmem(void)
125 pr_debug("swsusp: Saving Highmem");
127 for_each_zone (zone
) {
128 if (is_highmem(zone
))
129 res
= save_highmem_zone(zone
);
137 int restore_highmem(void)
139 printk("swsusp: Restoring Highmem\n");
140 while (highmem_copy
) {
141 struct highmem_page
*save
= highmem_copy
;
143 highmem_copy
= save
->next
;
145 kaddr
= kmap_atomic(save
->page
, KM_USER0
);
146 memcpy(kaddr
, save
->data
, PAGE_SIZE
);
147 kunmap_atomic(kaddr
, KM_USER0
);
148 free_page((long) save
->data
);
155 static int pfn_is_nosave(unsigned long pfn
)
157 unsigned long nosave_begin_pfn
= __pa(&__nosave_begin
) >> PAGE_SHIFT
;
158 unsigned long nosave_end_pfn
= PAGE_ALIGN(__pa(&__nosave_end
)) >> PAGE_SHIFT
;
159 return (pfn
>= nosave_begin_pfn
) && (pfn
< nosave_end_pfn
);
163 * saveable - Determine whether a page should be cloned or not.
166 * We save a page if it's Reserved, and not in the range of pages
167 * statically defined as 'unsaveable', or if it isn't reserved, and
168 * isn't part of a free chunk of pages.
171 static int saveable(struct zone
*zone
, unsigned long *zone_pfn
)
173 unsigned long pfn
= *zone_pfn
+ zone
->zone_start_pfn
;
179 page
= pfn_to_page(pfn
);
180 BUG_ON(PageReserved(page
) && PageNosave(page
));
181 if (PageNosave(page
))
183 if (PageReserved(page
) && pfn_is_nosave(pfn
))
185 if (PageNosaveFree(page
))
191 unsigned int count_data_pages(void)
194 unsigned long zone_pfn
;
197 for_each_zone (zone
) {
198 if (is_highmem(zone
))
200 mark_free_pages(zone
);
201 for (zone_pfn
= 0; zone_pfn
< zone
->spanned_pages
; ++zone_pfn
)
202 n
+= saveable(zone
, &zone_pfn
);
207 static void copy_data_pages(struct pbe
*pblist
)
210 unsigned long zone_pfn
;
214 for_each_zone (zone
) {
215 if (is_highmem(zone
))
217 mark_free_pages(zone
);
218 /* This is necessary for swsusp_free() */
219 for_each_pb_page (p
, pblist
)
220 SetPageNosaveFree(virt_to_page(p
));
221 for_each_pbe (p
, pblist
)
222 SetPageNosaveFree(virt_to_page(p
->address
));
223 for (zone_pfn
= 0; zone_pfn
< zone
->spanned_pages
; ++zone_pfn
) {
224 if (saveable(zone
, &zone_pfn
)) {
226 page
= pfn_to_page(zone_pfn
+ zone
->zone_start_pfn
);
228 pbe
->orig_address
= (unsigned long)page_address(page
);
229 /* copy_page is not usable for copying task structs. */
230 memcpy((void *)pbe
->address
, (void *)pbe
->orig_address
, PAGE_SIZE
);
240 * free_pagedir - free pages allocated with alloc_pagedir()
243 static void free_pagedir(struct pbe
*pblist
)
248 pbe
= (pblist
+ PB_PAGE_SKIP
)->next
;
249 ClearPageNosave(virt_to_page(pblist
));
250 ClearPageNosaveFree(virt_to_page(pblist
));
251 free_page((unsigned long)pblist
);
257 * fill_pb_page - Create a list of PBEs on a given memory page
260 static inline void fill_pb_page(struct pbe
*pbpage
)
265 pbpage
+= PB_PAGE_SKIP
;
268 while (++p
< pbpage
);
272 * create_pbe_list - Create a list of PBEs on top of a given chain
273 * of memory pages allocated with alloc_pagedir()
276 static inline void create_pbe_list(struct pbe
*pblist
, unsigned int nr_pages
)
278 struct pbe
*pbpage
, *p
;
279 unsigned int num
= PBES_PER_PAGE
;
281 for_each_pb_page (pbpage
, pblist
) {
285 fill_pb_page(pbpage
);
286 num
+= PBES_PER_PAGE
;
289 for (num
-= PBES_PER_PAGE
- 1, p
= pbpage
; num
< nr_pages
; p
++, num
++)
296 * On resume it is necessary to trace and eventually free the unsafe
297 * pages that have been allocated, because they are needed for I/O
298 * (on x86-64 we likely will "eat" these pages once again while
299 * creating the temporary page translation tables)
303 struct eaten_page
*next
;
304 char padding
[PAGE_SIZE
- sizeof(void *)];
307 static struct eaten_page
*eaten_pages
= NULL
;
309 static void release_eaten_pages(void)
311 struct eaten_page
*p
, *q
;
316 /* We don't want swsusp_free() to free this page again */
317 ClearPageNosave(virt_to_page(p
));
318 free_page((unsigned long)p
);
325 * @safe_needed - on resume, for storing the PBE list and the image,
326 * we can only use memory pages that do not conflict with the pages
327 * which had been used before suspend.
329 * The unsafe pages are marked with the PG_nosave_free flag
331 * Allocated but unusable (ie eaten) memory pages should be marked
332 * so that swsusp_free() can release them
335 static inline void *alloc_image_page(gfp_t gfp_mask
, int safe_needed
)
341 res
= (void *)get_zeroed_page(gfp_mask
);
342 if (res
&& PageNosaveFree(virt_to_page(res
))) {
343 /* This is for swsusp_free() */
344 SetPageNosave(virt_to_page(res
));
345 ((struct eaten_page
*)res
)->next
= eaten_pages
;
348 } while (res
&& PageNosaveFree(virt_to_page(res
)));
350 res
= (void *)get_zeroed_page(gfp_mask
);
352 SetPageNosave(virt_to_page(res
));
353 SetPageNosaveFree(virt_to_page(res
));
358 unsigned long get_safe_page(gfp_t gfp_mask
)
360 return (unsigned long)alloc_image_page(gfp_mask
, 1);
364 * alloc_pagedir - Allocate the page directory.
366 * First, determine exactly how many pages we need and
369 * We arrange the pages in a chain: each page is an array of PBES_PER_PAGE
370 * struct pbe elements (pbes) and the last element in the page points
373 * On each page we set up a list of struct_pbe elements.
376 struct pbe
*alloc_pagedir(unsigned int nr_pages
, gfp_t gfp_mask
, int safe_needed
)
379 struct pbe
*pblist
, *pbe
;
384 pblist
= alloc_image_page(gfp_mask
, safe_needed
);
385 /* FIXME: rewrite this ugly loop */
386 for (pbe
= pblist
, num
= PBES_PER_PAGE
; pbe
&& num
< nr_pages
;
387 pbe
= pbe
->next
, num
+= PBES_PER_PAGE
) {
389 pbe
->next
= alloc_image_page(gfp_mask
, safe_needed
);
391 if (!pbe
) { /* get_zeroed_page() failed */
392 free_pagedir(pblist
);
395 create_pbe_list(pblist
, nr_pages
);
400 * Free pages we allocated for suspend. Suspend pages are alocated
401 * before atomic copy, so we need to free them after resume.
404 void swsusp_free(void)
407 unsigned long zone_pfn
;
409 for_each_zone(zone
) {
410 for (zone_pfn
= 0; zone_pfn
< zone
->spanned_pages
; ++zone_pfn
)
411 if (pfn_valid(zone_pfn
+ zone
->zone_start_pfn
)) {
413 page
= pfn_to_page(zone_pfn
+ zone
->zone_start_pfn
);
414 if (PageNosave(page
) && PageNosaveFree(page
)) {
415 ClearPageNosave(page
);
416 ClearPageNosaveFree(page
);
417 free_page((long) page_address(page
));
423 pagedir_nosave
= NULL
;
429 * enough_free_mem - Make sure we enough free memory to snapshot.
431 * Returns TRUE or FALSE after checking the number of available
435 static int enough_free_mem(unsigned int nr_pages
)
441 if (!is_highmem(zone
))
442 n
+= zone
->free_pages
;
443 pr_debug("swsusp: available memory: %u pages\n", n
);
444 return n
> (nr_pages
+ PAGES_FOR_IO
+
445 (nr_pages
+ PBES_PER_PAGE
- 1) / PBES_PER_PAGE
);
448 static int alloc_data_pages(struct pbe
*pblist
, gfp_t gfp_mask
, int safe_needed
)
452 for_each_pbe (p
, pblist
) {
453 p
->address
= (unsigned long)alloc_image_page(gfp_mask
, safe_needed
);
460 static struct pbe
*swsusp_alloc(unsigned int nr_pages
)
464 if (!(pblist
= alloc_pagedir(nr_pages
, GFP_ATOMIC
| __GFP_COLD
, 0))) {
465 printk(KERN_ERR
"suspend: Allocating pagedir failed.\n");
469 if (alloc_data_pages(pblist
, GFP_ATOMIC
| __GFP_COLD
, 0)) {
470 printk(KERN_ERR
"suspend: Allocating image pages failed.\n");
478 asmlinkage
int swsusp_save(void)
480 unsigned int nr_pages
;
482 pr_debug("swsusp: critical section: \n");
485 nr_pages
= count_data_pages();
486 printk("swsusp: Need to copy %u pages\n", nr_pages
);
488 pr_debug("swsusp: pages needed: %u + %lu + %u, free: %u\n",
490 (nr_pages
+ PBES_PER_PAGE
- 1) / PBES_PER_PAGE
,
491 PAGES_FOR_IO
, nr_free_pages());
493 if (!enough_free_mem(nr_pages
)) {
494 printk(KERN_ERR
"swsusp: Not enough free memory\n");
498 pagedir_nosave
= swsusp_alloc(nr_pages
);
502 /* During allocating of suspend pagedir, new cold pages may appear.
506 copy_data_pages(pagedir_nosave
);
509 * End of critical section. From now on, we can write to memory,
510 * but we should not touch disk. This specially means we must _not_
511 * touch swap space! Except we must write out our image of course.
514 nr_copy_pages
= nr_pages
;
515 nr_meta_pages
= (nr_pages
* sizeof(long) + PAGE_SIZE
- 1) >> PAGE_SHIFT
;
517 printk("swsusp: critical section/: done (%d pages copied)\n", nr_pages
);
521 static void init_header(struct swsusp_info
*info
)
523 memset(info
, 0, sizeof(struct swsusp_info
));
524 info
->version_code
= LINUX_VERSION_CODE
;
525 info
->num_physpages
= num_physpages
;
526 memcpy(&info
->uts
, &system_utsname
, sizeof(system_utsname
));
527 info
->cpus
= num_online_cpus();
528 info
->image_pages
= nr_copy_pages
;
529 info
->pages
= nr_copy_pages
+ nr_meta_pages
+ 1;
530 info
->size
= info
->pages
;
531 info
->size
<<= PAGE_SHIFT
;
535 * pack_orig_addresses - the .orig_address fields of the PBEs from the
536 * list starting at @pbe are stored in the array @buf[] (1 page)
539 static inline struct pbe
*pack_orig_addresses(unsigned long *buf
, struct pbe
*pbe
)
543 for (j
= 0; j
< PAGE_SIZE
/ sizeof(long) && pbe
; j
++) {
544 buf
[j
] = pbe
->orig_address
;
548 for (; j
< PAGE_SIZE
/ sizeof(long); j
++)
554 * snapshot_read_next - used for reading the system memory snapshot.
556 * On the first call to it @handle should point to a zeroed
557 * snapshot_handle structure. The structure gets updated and a pointer
558 * to it should be passed to this function every next time.
560 * The @count parameter should contain the number of bytes the caller
561 * wants to read from the snapshot. It must not be zero.
563 * On success the function returns a positive number. Then, the caller
564 * is allowed to read up to the returned number of bytes from the memory
565 * location computed by the data_of() macro. The number returned
566 * may be smaller than @count, but this only happens if the read would
567 * cross a page boundary otherwise.
569 * The function returns 0 to indicate the end of data stream condition,
570 * and a negative number is returned on error. In such cases the
571 * structure pointed to by @handle is not updated and should not be used
575 int snapshot_read_next(struct snapshot_handle
*handle
, size_t count
)
577 if (handle
->page
> nr_meta_pages
+ nr_copy_pages
)
580 /* This makes the buffer be freed by swsusp_free() */
581 buffer
= alloc_image_page(GFP_ATOMIC
, 0);
585 if (!handle
->offset
) {
586 init_header((struct swsusp_info
*)buffer
);
587 handle
->buffer
= buffer
;
588 handle
->pbe
= pagedir_nosave
;
590 if (handle
->prev
< handle
->page
) {
591 if (handle
->page
<= nr_meta_pages
) {
592 handle
->pbe
= pack_orig_addresses(buffer
, handle
->pbe
);
594 handle
->pbe
= pagedir_nosave
;
596 handle
->buffer
= (void *)handle
->pbe
->address
;
597 handle
->pbe
= handle
->pbe
->next
;
599 handle
->prev
= handle
->page
;
601 handle
->buf_offset
= handle
->page_offset
;
602 if (handle
->page_offset
+ count
>= PAGE_SIZE
) {
603 count
= PAGE_SIZE
- handle
->page_offset
;
604 handle
->page_offset
= 0;
607 handle
->page_offset
+= count
;
609 handle
->offset
+= count
;
614 * mark_unsafe_pages - mark the pages that cannot be used for storing
615 * the image during resume, because they conflict with the pages that
616 * had been used before suspend
619 static int mark_unsafe_pages(struct pbe
*pblist
)
622 unsigned long zone_pfn
;
625 if (!pblist
) /* a sanity check */
628 /* Clear page flags */
629 for_each_zone (zone
) {
630 for (zone_pfn
= 0; zone_pfn
< zone
->spanned_pages
; ++zone_pfn
)
631 if (pfn_valid(zone_pfn
+ zone
->zone_start_pfn
))
632 ClearPageNosaveFree(pfn_to_page(zone_pfn
+
633 zone
->zone_start_pfn
));
636 /* Mark orig addresses */
637 for_each_pbe (p
, pblist
) {
638 if (virt_addr_valid(p
->orig_address
))
639 SetPageNosaveFree(virt_to_page(p
->orig_address
));
647 static void copy_page_backup_list(struct pbe
*dst
, struct pbe
*src
)
649 /* We assume both lists contain the same number of elements */
651 dst
->orig_address
= src
->orig_address
;
657 static int check_header(struct swsusp_info
*info
)
661 if (info
->version_code
!= LINUX_VERSION_CODE
)
662 reason
= "kernel version";
663 if (info
->num_physpages
!= num_physpages
)
664 reason
= "memory size";
665 if (strcmp(info
->uts
.sysname
,system_utsname
.sysname
))
666 reason
= "system type";
667 if (strcmp(info
->uts
.release
,system_utsname
.release
))
668 reason
= "kernel release";
669 if (strcmp(info
->uts
.version
,system_utsname
.version
))
671 if (strcmp(info
->uts
.machine
,system_utsname
.machine
))
674 printk(KERN_ERR
"swsusp: Resume mismatch: %s\n", reason
);
681 * load header - check the image header and copy data from it
684 static int load_header(struct snapshot_handle
*handle
,
685 struct swsusp_info
*info
)
690 error
= check_header(info
);
692 pblist
= alloc_pagedir(info
->image_pages
, GFP_ATOMIC
, 0);
695 pagedir_nosave
= pblist
;
696 handle
->pbe
= pblist
;
697 nr_copy_pages
= info
->image_pages
;
698 nr_meta_pages
= info
->pages
- info
->image_pages
- 1;
704 * unpack_orig_addresses - copy the elements of @buf[] (1 page) to
705 * the PBEs in the list starting at @pbe
708 static inline struct pbe
*unpack_orig_addresses(unsigned long *buf
,
713 for (j
= 0; j
< PAGE_SIZE
/ sizeof(long) && pbe
; j
++) {
714 pbe
->orig_address
= buf
[j
];
721 * create_image - use metadata contained in the PBE list
722 * pointed to by pagedir_nosave to mark the pages that will
723 * be overwritten in the process of restoring the system
724 * memory state from the image and allocate memory for
725 * the image avoiding these pages
728 static int create_image(struct snapshot_handle
*handle
)
731 struct pbe
*p
, *pblist
;
734 error
= mark_unsafe_pages(p
);
736 pblist
= alloc_pagedir(nr_copy_pages
, GFP_ATOMIC
, 1);
738 copy_page_backup_list(pblist
, p
);
744 error
= alloc_data_pages(pblist
, GFP_ATOMIC
, 1);
746 release_eaten_pages();
747 pagedir_nosave
= pblist
;
749 pagedir_nosave
= NULL
;
758 * snapshot_write_next - used for writing the system memory snapshot.
760 * On the first call to it @handle should point to a zeroed
761 * snapshot_handle structure. The structure gets updated and a pointer
762 * to it should be passed to this function every next time.
764 * The @count parameter should contain the number of bytes the caller
765 * wants to write to the image. It must not be zero.
767 * On success the function returns a positive number. Then, the caller
768 * is allowed to write up to the returned number of bytes to the memory
769 * location computed by the data_of() macro. The number returned
770 * may be smaller than @count, but this only happens if the write would
771 * cross a page boundary otherwise.
773 * The function returns 0 to indicate the "end of file" condition,
774 * and a negative number is returned on error. In such cases the
775 * structure pointed to by @handle is not updated and should not be used
779 int snapshot_write_next(struct snapshot_handle
*handle
, size_t count
)
783 if (handle
->prev
&& handle
->page
> nr_meta_pages
+ nr_copy_pages
)
786 /* This makes the buffer be freed by swsusp_free() */
787 buffer
= alloc_image_page(GFP_ATOMIC
, 0);
792 handle
->buffer
= buffer
;
793 if (handle
->prev
< handle
->page
) {
795 error
= load_header(handle
, (struct swsusp_info
*)buffer
);
798 } else if (handle
->prev
<= nr_meta_pages
) {
799 handle
->pbe
= unpack_orig_addresses(buffer
, handle
->pbe
);
801 error
= create_image(handle
);
804 handle
->pbe
= pagedir_nosave
;
805 handle
->buffer
= (void *)handle
->pbe
->address
;
808 handle
->pbe
= handle
->pbe
->next
;
809 handle
->buffer
= (void *)handle
->pbe
->address
;
811 handle
->prev
= handle
->page
;
813 handle
->buf_offset
= handle
->page_offset
;
814 if (handle
->page_offset
+ count
>= PAGE_SIZE
) {
815 count
= PAGE_SIZE
- handle
->page_offset
;
816 handle
->page_offset
= 0;
819 handle
->page_offset
+= count
;
821 handle
->offset
+= count
;
825 int snapshot_image_loaded(struct snapshot_handle
*handle
)
827 return !(!handle
->pbe
|| handle
->pbe
->next
|| !nr_copy_pages
||
828 handle
->page
<= nr_meta_pages
+ nr_copy_pages
);