2 * linux/kernel/power/snapshot.c
4 * This file provides system snapshot/restore functionality for swsusp.
6 * Copyright (C) 1998-2005 Pavel Machek <pavel@suse.cz>
7 * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
9 * This file is released under the GPLv2.
13 #include <linux/version.h>
14 #include <linux/module.h>
16 #include <linux/suspend.h>
17 #include <linux/delay.h>
18 #include <linux/bitops.h>
19 #include <linux/spinlock.h>
20 #include <linux/kernel.h>
22 #include <linux/device.h>
23 #include <linux/init.h>
24 #include <linux/bootmem.h>
25 #include <linux/syscalls.h>
26 #include <linux/console.h>
27 #include <linux/highmem.h>
28 #include <linux/list.h>
30 #include <asm/uaccess.h>
31 #include <asm/mmu_context.h>
32 #include <asm/pgtable.h>
33 #include <asm/tlbflush.h>
38 static int swsusp_page_is_free(struct page
*);
39 static void swsusp_set_page_forbidden(struct page
*);
40 static void swsusp_unset_page_forbidden(struct page
*);
42 /* List of PBEs needed for restoring the pages that were allocated before
43 * the suspend and included in the suspend image, but have also been
44 * allocated by the "resume" kernel, so their contents cannot be written
45 * directly to their "original" page frames.
47 struct pbe
*restore_pblist
;
49 /* Pointer to an auxiliary buffer (1 page) */
53 * @safe_needed - on resume, for storing the PBE list and the image,
54 * we can only use memory pages that do not conflict with the pages
55 * used before suspend. The unsafe pages have PageNosaveFree set
56 * and we count them using unsafe_pages.
58 * Each allocated image page is marked as PageNosave and PageNosaveFree
59 * so that swsusp_free() can release it.
64 #define PG_UNSAFE_CLEAR 1
65 #define PG_UNSAFE_KEEP 0
67 static unsigned int allocated_unsafe_pages
;
69 static void *get_image_page(gfp_t gfp_mask
, int safe_needed
)
73 res
= (void *)get_zeroed_page(gfp_mask
);
75 while (res
&& swsusp_page_is_free(virt_to_page(res
))) {
76 /* The page is unsafe, mark it for swsusp_free() */
77 swsusp_set_page_forbidden(virt_to_page(res
));
78 allocated_unsafe_pages
++;
79 res
= (void *)get_zeroed_page(gfp_mask
);
82 swsusp_set_page_forbidden(virt_to_page(res
));
83 swsusp_set_page_free(virt_to_page(res
));
88 unsigned long get_safe_page(gfp_t gfp_mask
)
90 return (unsigned long)get_image_page(gfp_mask
, PG_SAFE
);
93 static struct page
*alloc_image_page(gfp_t gfp_mask
)
97 page
= alloc_page(gfp_mask
);
99 swsusp_set_page_forbidden(page
);
100 swsusp_set_page_free(page
);
106 * free_image_page - free page represented by @addr, allocated with
107 * get_image_page (page flags set by it must be cleared)
110 static inline void free_image_page(void *addr
, int clear_nosave_free
)
114 BUG_ON(!virt_addr_valid(addr
));
116 page
= virt_to_page(addr
);
118 swsusp_unset_page_forbidden(page
);
119 if (clear_nosave_free
)
120 swsusp_unset_page_free(page
);
125 /* struct linked_page is used to build chains of pages */
127 #define LINKED_PAGE_DATA_SIZE (PAGE_SIZE - sizeof(void *))
130 struct linked_page
*next
;
131 char data
[LINKED_PAGE_DATA_SIZE
];
132 } __attribute__((packed
));
135 free_list_of_pages(struct linked_page
*list
, int clear_page_nosave
)
138 struct linked_page
*lp
= list
->next
;
140 free_image_page(list
, clear_page_nosave
);
146 * struct chain_allocator is used for allocating small objects out of
147 * a linked list of pages called 'the chain'.
149 * The chain grows each time when there is no room for a new object in
150 * the current page. The allocated objects cannot be freed individually.
151 * It is only possible to free them all at once, by freeing the entire
154 * NOTE: The chain allocator may be inefficient if the allocated objects
155 * are not much smaller than PAGE_SIZE.
158 struct chain_allocator
{
159 struct linked_page
*chain
; /* the chain */
160 unsigned int used_space
; /* total size of objects allocated out
161 * of the current page
163 gfp_t gfp_mask
; /* mask for allocating pages */
164 int safe_needed
; /* if set, only "safe" pages are allocated */
168 chain_init(struct chain_allocator
*ca
, gfp_t gfp_mask
, int safe_needed
)
171 ca
->used_space
= LINKED_PAGE_DATA_SIZE
;
172 ca
->gfp_mask
= gfp_mask
;
173 ca
->safe_needed
= safe_needed
;
176 static void *chain_alloc(struct chain_allocator
*ca
, unsigned int size
)
180 if (LINKED_PAGE_DATA_SIZE
- ca
->used_space
< size
) {
181 struct linked_page
*lp
;
183 lp
= get_image_page(ca
->gfp_mask
, ca
->safe_needed
);
187 lp
->next
= ca
->chain
;
191 ret
= ca
->chain
->data
+ ca
->used_space
;
192 ca
->used_space
+= size
;
197 * Data types related to memory bitmaps.
199 * Memory bitmap is a structure consiting of many linked lists of
200 * objects. The main list's elements are of type struct zone_bitmap
201 * and each of them corresonds to one zone. For each zone bitmap
202 * object there is a list of objects of type struct bm_block that
203 * represent each blocks of bitmap in which information is stored.
205 * struct memory_bitmap contains a pointer to the main list of zone
206 * bitmap objects, a struct bm_position used for browsing the bitmap,
207 * and a pointer to the list of pages used for allocating all of the
208 * zone bitmap objects and bitmap block objects.
210 * NOTE: It has to be possible to lay out the bitmap in memory
211 * using only allocations of order 0. Additionally, the bitmap is
212 * designed to work with arbitrary number of zones (this is over the
213 * top for now, but let's avoid making unnecessary assumptions ;-).
215 * struct zone_bitmap contains a pointer to a list of bitmap block
216 * objects and a pointer to the bitmap block object that has been
217 * most recently used for setting bits. Additionally, it contains the
218 * pfns that correspond to the start and end of the represented zone.
220 * struct bm_block contains a pointer to the memory page in which
221 * information is stored (in the form of a block of bitmap)
222 * It also contains the pfns that correspond to the start and end of
223 * the represented memory area.
226 #define BM_END_OF_MAP (~0UL)
228 #define BM_BITS_PER_BLOCK (PAGE_SIZE << 3)
231 struct list_head hook
; /* hook into a list of bitmap blocks */
232 unsigned long start_pfn
; /* pfn represented by the first bit */
233 unsigned long end_pfn
; /* pfn represented by the last bit plus 1 */
234 unsigned long *data
; /* bitmap representing pages */
237 static inline unsigned long bm_block_bits(struct bm_block
*bb
)
239 return bb
->end_pfn
- bb
->start_pfn
;
242 /* strcut bm_position is used for browsing memory bitmaps */
245 struct bm_block
*block
;
249 struct memory_bitmap
{
250 struct list_head blocks
; /* list of bitmap blocks */
251 struct linked_page
*p_list
; /* list of pages used to store zone
252 * bitmap objects and bitmap block
255 struct bm_position cur
; /* most recently used bit position */
258 /* Functions that operate on memory bitmaps */
260 static void memory_bm_position_reset(struct memory_bitmap
*bm
)
262 bm
->cur
.block
= list_entry(bm
->blocks
.next
, struct bm_block
, hook
);
266 static void memory_bm_free(struct memory_bitmap
*bm
, int clear_nosave_free
);
269 * create_bm_block_list - create a list of block bitmap objects
270 * @nr_blocks - number of blocks to allocate
271 * @list - list to put the allocated blocks into
272 * @ca - chain allocator to be used for allocating memory
274 static int create_bm_block_list(unsigned long pages
,
275 struct list_head
*list
,
276 struct chain_allocator
*ca
)
278 unsigned int nr_blocks
= DIV_ROUND_UP(pages
, BM_BITS_PER_BLOCK
);
280 while (nr_blocks
-- > 0) {
283 bb
= chain_alloc(ca
, sizeof(struct bm_block
));
286 list_add(&bb
->hook
, list
);
293 struct list_head hook
;
299 * free_mem_extents - free a list of memory extents
300 * @list - list of extents to empty
302 static void free_mem_extents(struct list_head
*list
)
304 struct mem_extent
*ext
, *aux
;
306 list_for_each_entry_safe(ext
, aux
, list
, hook
) {
307 list_del(&ext
->hook
);
313 * create_mem_extents - create a list of memory extents representing
314 * contiguous ranges of PFNs
315 * @list - list to put the extents into
316 * @gfp_mask - mask to use for memory allocations
318 static int create_mem_extents(struct list_head
*list
, gfp_t gfp_mask
)
322 INIT_LIST_HEAD(list
);
324 for_each_zone(zone
) {
325 unsigned long zone_start
, zone_end
;
326 struct mem_extent
*ext
, *cur
, *aux
;
328 if (!populated_zone(zone
))
331 zone_start
= zone
->zone_start_pfn
;
332 zone_end
= zone
->zone_start_pfn
+ zone
->spanned_pages
;
334 list_for_each_entry(ext
, list
, hook
)
335 if (zone_start
<= ext
->end
)
338 if (&ext
->hook
== list
|| zone_end
< ext
->start
) {
339 /* New extent is necessary */
340 struct mem_extent
*new_ext
;
342 new_ext
= kzalloc(sizeof(struct mem_extent
), gfp_mask
);
344 free_mem_extents(list
);
347 new_ext
->start
= zone_start
;
348 new_ext
->end
= zone_end
;
349 list_add_tail(&new_ext
->hook
, &ext
->hook
);
353 /* Merge this zone's range of PFNs with the existing one */
354 if (zone_start
< ext
->start
)
355 ext
->start
= zone_start
;
356 if (zone_end
> ext
->end
)
359 /* More merging may be possible */
361 list_for_each_entry_safe_continue(cur
, aux
, list
, hook
) {
362 if (zone_end
< cur
->start
)
364 if (zone_end
< cur
->end
)
366 list_del(&cur
->hook
);
375 * memory_bm_create - allocate memory for a memory bitmap
378 memory_bm_create(struct memory_bitmap
*bm
, gfp_t gfp_mask
, int safe_needed
)
380 struct chain_allocator ca
;
381 struct list_head mem_extents
;
382 struct mem_extent
*ext
;
385 chain_init(&ca
, gfp_mask
, safe_needed
);
386 INIT_LIST_HEAD(&bm
->blocks
);
388 error
= create_mem_extents(&mem_extents
, gfp_mask
);
392 list_for_each_entry(ext
, &mem_extents
, hook
) {
394 unsigned long pfn
= ext
->start
;
395 unsigned long pages
= ext
->end
- ext
->start
;
397 bb
= list_entry(bm
->blocks
.prev
, struct bm_block
, hook
);
399 error
= create_bm_block_list(pages
, bm
->blocks
.prev
, &ca
);
403 list_for_each_entry_continue(bb
, &bm
->blocks
, hook
) {
404 bb
->data
= get_image_page(gfp_mask
, safe_needed
);
411 if (pages
>= BM_BITS_PER_BLOCK
) {
412 pfn
+= BM_BITS_PER_BLOCK
;
413 pages
-= BM_BITS_PER_BLOCK
;
415 /* This is executed only once in the loop */
422 bm
->p_list
= ca
.chain
;
423 memory_bm_position_reset(bm
);
425 free_mem_extents(&mem_extents
);
429 bm
->p_list
= ca
.chain
;
430 memory_bm_free(bm
, PG_UNSAFE_CLEAR
);
435 * memory_bm_free - free memory occupied by the memory bitmap @bm
437 static void memory_bm_free(struct memory_bitmap
*bm
, int clear_nosave_free
)
441 list_for_each_entry(bb
, &bm
->blocks
, hook
)
443 free_image_page(bb
->data
, clear_nosave_free
);
445 free_list_of_pages(bm
->p_list
, clear_nosave_free
);
447 INIT_LIST_HEAD(&bm
->blocks
);
451 * memory_bm_find_bit - find the bit in the bitmap @bm that corresponds
452 * to given pfn. The cur_zone_bm member of @bm and the cur_block member
453 * of @bm->cur_zone_bm are updated.
455 static int memory_bm_find_bit(struct memory_bitmap
*bm
, unsigned long pfn
,
456 void **addr
, unsigned int *bit_nr
)
461 * Check if the pfn corresponds to the current bitmap block and find
462 * the block where it fits if this is not the case.
465 if (pfn
< bb
->start_pfn
)
466 list_for_each_entry_continue_reverse(bb
, &bm
->blocks
, hook
)
467 if (pfn
>= bb
->start_pfn
)
470 if (pfn
>= bb
->end_pfn
)
471 list_for_each_entry_continue(bb
, &bm
->blocks
, hook
)
472 if (pfn
>= bb
->start_pfn
&& pfn
< bb
->end_pfn
)
475 if (&bb
->hook
== &bm
->blocks
)
478 /* The block has been found */
480 pfn
-= bb
->start_pfn
;
481 bm
->cur
.bit
= pfn
+ 1;
487 static void memory_bm_set_bit(struct memory_bitmap
*bm
, unsigned long pfn
)
493 error
= memory_bm_find_bit(bm
, pfn
, &addr
, &bit
);
498 static int mem_bm_set_bit_check(struct memory_bitmap
*bm
, unsigned long pfn
)
504 error
= memory_bm_find_bit(bm
, pfn
, &addr
, &bit
);
510 static void memory_bm_clear_bit(struct memory_bitmap
*bm
, unsigned long pfn
)
516 error
= memory_bm_find_bit(bm
, pfn
, &addr
, &bit
);
518 clear_bit(bit
, addr
);
521 static int memory_bm_test_bit(struct memory_bitmap
*bm
, unsigned long pfn
)
527 error
= memory_bm_find_bit(bm
, pfn
, &addr
, &bit
);
529 return test_bit(bit
, addr
);
532 static bool memory_bm_pfn_present(struct memory_bitmap
*bm
, unsigned long pfn
)
537 return !memory_bm_find_bit(bm
, pfn
, &addr
, &bit
);
541 * memory_bm_next_pfn - find the pfn that corresponds to the next set bit
542 * in the bitmap @bm. If the pfn cannot be found, BM_END_OF_MAP is
545 * It is required to run memory_bm_position_reset() before the first call to
549 static unsigned long memory_bm_next_pfn(struct memory_bitmap
*bm
)
557 bit
= find_next_bit(bb
->data
, bm_block_bits(bb
), bit
);
558 if (bit
< bm_block_bits(bb
))
561 bb
= list_entry(bb
->hook
.next
, struct bm_block
, hook
);
564 } while (&bb
->hook
!= &bm
->blocks
);
566 memory_bm_position_reset(bm
);
567 return BM_END_OF_MAP
;
570 bm
->cur
.bit
= bit
+ 1;
571 return bb
->start_pfn
+ bit
;
575 * This structure represents a range of page frames the contents of which
576 * should not be saved during the suspend.
579 struct nosave_region
{
580 struct list_head list
;
581 unsigned long start_pfn
;
582 unsigned long end_pfn
;
585 static LIST_HEAD(nosave_regions
);
588 * register_nosave_region - register a range of page frames the contents
589 * of which should not be saved during the suspend (to be used in the early
590 * initialization code)
594 __register_nosave_region(unsigned long start_pfn
, unsigned long end_pfn
,
597 struct nosave_region
*region
;
599 if (start_pfn
>= end_pfn
)
602 if (!list_empty(&nosave_regions
)) {
603 /* Try to extend the previous region (they should be sorted) */
604 region
= list_entry(nosave_regions
.prev
,
605 struct nosave_region
, list
);
606 if (region
->end_pfn
== start_pfn
) {
607 region
->end_pfn
= end_pfn
;
612 /* during init, this shouldn't fail */
613 region
= kmalloc(sizeof(struct nosave_region
), GFP_KERNEL
);
616 /* This allocation cannot fail */
617 region
= alloc_bootmem_low(sizeof(struct nosave_region
));
618 region
->start_pfn
= start_pfn
;
619 region
->end_pfn
= end_pfn
;
620 list_add_tail(®ion
->list
, &nosave_regions
);
622 printk(KERN_INFO
"PM: Registered nosave memory: %016lx - %016lx\n",
623 start_pfn
<< PAGE_SHIFT
, end_pfn
<< PAGE_SHIFT
);
627 * Set bits in this map correspond to the page frames the contents of which
628 * should not be saved during the suspend.
630 static struct memory_bitmap
*forbidden_pages_map
;
632 /* Set bits in this map correspond to free page frames. */
633 static struct memory_bitmap
*free_pages_map
;
636 * Each page frame allocated for creating the image is marked by setting the
637 * corresponding bits in forbidden_pages_map and free_pages_map simultaneously
640 void swsusp_set_page_free(struct page
*page
)
643 memory_bm_set_bit(free_pages_map
, page_to_pfn(page
));
646 static int swsusp_page_is_free(struct page
*page
)
648 return free_pages_map
?
649 memory_bm_test_bit(free_pages_map
, page_to_pfn(page
)) : 0;
652 void swsusp_unset_page_free(struct page
*page
)
655 memory_bm_clear_bit(free_pages_map
, page_to_pfn(page
));
658 static void swsusp_set_page_forbidden(struct page
*page
)
660 if (forbidden_pages_map
)
661 memory_bm_set_bit(forbidden_pages_map
, page_to_pfn(page
));
664 int swsusp_page_is_forbidden(struct page
*page
)
666 return forbidden_pages_map
?
667 memory_bm_test_bit(forbidden_pages_map
, page_to_pfn(page
)) : 0;
670 static void swsusp_unset_page_forbidden(struct page
*page
)
672 if (forbidden_pages_map
)
673 memory_bm_clear_bit(forbidden_pages_map
, page_to_pfn(page
));
677 * mark_nosave_pages - set bits corresponding to the page frames the
678 * contents of which should not be saved in a given bitmap.
681 static void mark_nosave_pages(struct memory_bitmap
*bm
)
683 struct nosave_region
*region
;
685 if (list_empty(&nosave_regions
))
688 list_for_each_entry(region
, &nosave_regions
, list
) {
691 pr_debug("PM: Marking nosave pages: %016lx - %016lx\n",
692 region
->start_pfn
<< PAGE_SHIFT
,
693 region
->end_pfn
<< PAGE_SHIFT
);
695 for (pfn
= region
->start_pfn
; pfn
< region
->end_pfn
; pfn
++)
696 if (pfn_valid(pfn
)) {
698 * It is safe to ignore the result of
699 * mem_bm_set_bit_check() here, since we won't
700 * touch the PFNs for which the error is
703 mem_bm_set_bit_check(bm
, pfn
);
709 * create_basic_memory_bitmaps - create bitmaps needed for marking page
710 * frames that should not be saved and free page frames. The pointers
711 * forbidden_pages_map and free_pages_map are only modified if everything
712 * goes well, because we don't want the bits to be used before both bitmaps
716 int create_basic_memory_bitmaps(void)
718 struct memory_bitmap
*bm1
, *bm2
;
721 BUG_ON(forbidden_pages_map
|| free_pages_map
);
723 bm1
= kzalloc(sizeof(struct memory_bitmap
), GFP_KERNEL
);
727 error
= memory_bm_create(bm1
, GFP_KERNEL
, PG_ANY
);
729 goto Free_first_object
;
731 bm2
= kzalloc(sizeof(struct memory_bitmap
), GFP_KERNEL
);
733 goto Free_first_bitmap
;
735 error
= memory_bm_create(bm2
, GFP_KERNEL
, PG_ANY
);
737 goto Free_second_object
;
739 forbidden_pages_map
= bm1
;
740 free_pages_map
= bm2
;
741 mark_nosave_pages(forbidden_pages_map
);
743 pr_debug("PM: Basic memory bitmaps created\n");
750 memory_bm_free(bm1
, PG_UNSAFE_CLEAR
);
757 * free_basic_memory_bitmaps - free memory bitmaps allocated by
758 * create_basic_memory_bitmaps(). The auxiliary pointers are necessary
759 * so that the bitmaps themselves are not referred to while they are being
763 void free_basic_memory_bitmaps(void)
765 struct memory_bitmap
*bm1
, *bm2
;
767 BUG_ON(!(forbidden_pages_map
&& free_pages_map
));
769 bm1
= forbidden_pages_map
;
770 bm2
= free_pages_map
;
771 forbidden_pages_map
= NULL
;
772 free_pages_map
= NULL
;
773 memory_bm_free(bm1
, PG_UNSAFE_CLEAR
);
775 memory_bm_free(bm2
, PG_UNSAFE_CLEAR
);
778 pr_debug("PM: Basic memory bitmaps freed\n");
782 * snapshot_additional_pages - estimate the number of additional pages
783 * be needed for setting up the suspend image data structures for given
784 * zone (usually the returned value is greater than the exact number)
787 unsigned int snapshot_additional_pages(struct zone
*zone
)
791 res
= DIV_ROUND_UP(zone
->spanned_pages
, BM_BITS_PER_BLOCK
);
792 res
+= DIV_ROUND_UP(res
* sizeof(struct bm_block
), PAGE_SIZE
);
796 #ifdef CONFIG_HIGHMEM
798 * count_free_highmem_pages - compute the total number of free highmem
799 * pages, system-wide.
802 static unsigned int count_free_highmem_pages(void)
805 unsigned int cnt
= 0;
808 if (populated_zone(zone
) && is_highmem(zone
))
809 cnt
+= zone_page_state(zone
, NR_FREE_PAGES
);
815 * saveable_highmem_page - Determine whether a highmem page should be
816 * included in the suspend image.
818 * We should save the page if it isn't Nosave or NosaveFree, or Reserved,
819 * and it isn't a part of a free chunk of pages.
821 static struct page
*saveable_highmem_page(struct zone
*zone
, unsigned long pfn
)
828 page
= pfn_to_page(pfn
);
829 if (page_zone(page
) != zone
)
832 BUG_ON(!PageHighMem(page
));
834 if (swsusp_page_is_forbidden(page
) || swsusp_page_is_free(page
) ||
842 * count_highmem_pages - compute the total number of saveable highmem
846 unsigned int count_highmem_pages(void)
851 for_each_zone(zone
) {
852 unsigned long pfn
, max_zone_pfn
;
854 if (!is_highmem(zone
))
857 mark_free_pages(zone
);
858 max_zone_pfn
= zone
->zone_start_pfn
+ zone
->spanned_pages
;
859 for (pfn
= zone
->zone_start_pfn
; pfn
< max_zone_pfn
; pfn
++)
860 if (saveable_highmem_page(zone
, pfn
))
866 static inline void *saveable_highmem_page(struct zone
*z
, unsigned long p
)
870 #endif /* CONFIG_HIGHMEM */
873 * saveable_page - Determine whether a non-highmem page should be included
874 * in the suspend image.
876 * We should save the page if it isn't Nosave, and is not in the range
877 * of pages statically defined as 'unsaveable', and it isn't a part of
878 * a free chunk of pages.
880 static struct page
*saveable_page(struct zone
*zone
, unsigned long pfn
)
887 page
= pfn_to_page(pfn
);
888 if (page_zone(page
) != zone
)
891 BUG_ON(PageHighMem(page
));
893 if (swsusp_page_is_forbidden(page
) || swsusp_page_is_free(page
))
896 if (PageReserved(page
)
897 && (!kernel_page_present(page
) || pfn_is_nosave(pfn
)))
904 * count_data_pages - compute the total number of saveable non-highmem
908 unsigned int count_data_pages(void)
911 unsigned long pfn
, max_zone_pfn
;
914 for_each_zone(zone
) {
915 if (is_highmem(zone
))
918 mark_free_pages(zone
);
919 max_zone_pfn
= zone
->zone_start_pfn
+ zone
->spanned_pages
;
920 for (pfn
= zone
->zone_start_pfn
; pfn
< max_zone_pfn
; pfn
++)
921 if (saveable_page(zone
, pfn
))
927 /* This is needed, because copy_page and memcpy are not usable for copying
930 static inline void do_copy_page(long *dst
, long *src
)
934 for (n
= PAGE_SIZE
/ sizeof(long); n
; n
--)
940 * safe_copy_page - check if the page we are going to copy is marked as
941 * present in the kernel page tables (this always is the case if
942 * CONFIG_DEBUG_PAGEALLOC is not set and in that case
943 * kernel_page_present() always returns 'true').
945 static void safe_copy_page(void *dst
, struct page
*s_page
)
947 if (kernel_page_present(s_page
)) {
948 do_copy_page(dst
, page_address(s_page
));
950 kernel_map_pages(s_page
, 1, 1);
951 do_copy_page(dst
, page_address(s_page
));
952 kernel_map_pages(s_page
, 1, 0);
957 #ifdef CONFIG_HIGHMEM
958 static inline struct page
*
959 page_is_saveable(struct zone
*zone
, unsigned long pfn
)
961 return is_highmem(zone
) ?
962 saveable_highmem_page(zone
, pfn
) : saveable_page(zone
, pfn
);
965 static void copy_data_page(unsigned long dst_pfn
, unsigned long src_pfn
)
967 struct page
*s_page
, *d_page
;
970 s_page
= pfn_to_page(src_pfn
);
971 d_page
= pfn_to_page(dst_pfn
);
972 if (PageHighMem(s_page
)) {
973 src
= kmap_atomic(s_page
, KM_USER0
);
974 dst
= kmap_atomic(d_page
, KM_USER1
);
975 do_copy_page(dst
, src
);
976 kunmap_atomic(src
, KM_USER0
);
977 kunmap_atomic(dst
, KM_USER1
);
979 if (PageHighMem(d_page
)) {
980 /* Page pointed to by src may contain some kernel
981 * data modified by kmap_atomic()
983 safe_copy_page(buffer
, s_page
);
984 dst
= kmap_atomic(d_page
, KM_USER0
);
985 memcpy(dst
, buffer
, PAGE_SIZE
);
986 kunmap_atomic(dst
, KM_USER0
);
988 safe_copy_page(page_address(d_page
), s_page
);
993 #define page_is_saveable(zone, pfn) saveable_page(zone, pfn)
995 static inline void copy_data_page(unsigned long dst_pfn
, unsigned long src_pfn
)
997 safe_copy_page(page_address(pfn_to_page(dst_pfn
)),
998 pfn_to_page(src_pfn
));
1000 #endif /* CONFIG_HIGHMEM */
1003 copy_data_pages(struct memory_bitmap
*copy_bm
, struct memory_bitmap
*orig_bm
)
1008 for_each_zone(zone
) {
1009 unsigned long max_zone_pfn
;
1011 mark_free_pages(zone
);
1012 max_zone_pfn
= zone
->zone_start_pfn
+ zone
->spanned_pages
;
1013 for (pfn
= zone
->zone_start_pfn
; pfn
< max_zone_pfn
; pfn
++)
1014 if (page_is_saveable(zone
, pfn
))
1015 memory_bm_set_bit(orig_bm
, pfn
);
1017 memory_bm_position_reset(orig_bm
);
1018 memory_bm_position_reset(copy_bm
);
1020 pfn
= memory_bm_next_pfn(orig_bm
);
1021 if (unlikely(pfn
== BM_END_OF_MAP
))
1023 copy_data_page(memory_bm_next_pfn(copy_bm
), pfn
);
1027 /* Total number of image pages */
1028 static unsigned int nr_copy_pages
;
1029 /* Number of pages needed for saving the original pfns of the image pages */
1030 static unsigned int nr_meta_pages
;
1033 * swsusp_free - free pages allocated for the suspend.
1035 * Suspend pages are alocated before the atomic copy is made, so we
1036 * need to release them after the resume.
1039 void swsusp_free(void)
1042 unsigned long pfn
, max_zone_pfn
;
1044 for_each_zone(zone
) {
1045 max_zone_pfn
= zone
->zone_start_pfn
+ zone
->spanned_pages
;
1046 for (pfn
= zone
->zone_start_pfn
; pfn
< max_zone_pfn
; pfn
++)
1047 if (pfn_valid(pfn
)) {
1048 struct page
*page
= pfn_to_page(pfn
);
1050 if (swsusp_page_is_forbidden(page
) &&
1051 swsusp_page_is_free(page
)) {
1052 swsusp_unset_page_forbidden(page
);
1053 swsusp_unset_page_free(page
);
1060 restore_pblist
= NULL
;
1064 #ifdef CONFIG_HIGHMEM
1066 * count_pages_for_highmem - compute the number of non-highmem pages
1067 * that will be necessary for creating copies of highmem pages.
1070 static unsigned int count_pages_for_highmem(unsigned int nr_highmem
)
1072 unsigned int free_highmem
= count_free_highmem_pages();
1074 if (free_highmem
>= nr_highmem
)
1077 nr_highmem
-= free_highmem
;
1083 count_pages_for_highmem(unsigned int nr_highmem
) { return 0; }
1084 #endif /* CONFIG_HIGHMEM */
1087 * enough_free_mem - Make sure we have enough free memory for the
1091 static int enough_free_mem(unsigned int nr_pages
, unsigned int nr_highmem
)
1094 unsigned int free
= 0, meta
= 0;
1096 for_each_zone(zone
) {
1097 meta
+= snapshot_additional_pages(zone
);
1098 if (!is_highmem(zone
))
1099 free
+= zone_page_state(zone
, NR_FREE_PAGES
);
1102 nr_pages
+= count_pages_for_highmem(nr_highmem
);
1103 pr_debug("PM: Normal pages needed: %u + %u + %u, available pages: %u\n",
1104 nr_pages
, PAGES_FOR_IO
, meta
, free
);
1106 return free
> nr_pages
+ PAGES_FOR_IO
+ meta
;
1109 #ifdef CONFIG_HIGHMEM
1111 * get_highmem_buffer - if there are some highmem pages in the suspend
1112 * image, we may need the buffer to copy them and/or load their data.
1115 static inline int get_highmem_buffer(int safe_needed
)
1117 buffer
= get_image_page(GFP_ATOMIC
| __GFP_COLD
, safe_needed
);
1118 return buffer
? 0 : -ENOMEM
;
1122 * alloc_highmem_image_pages - allocate some highmem pages for the image.
1123 * Try to allocate as many pages as needed, but if the number of free
1124 * highmem pages is lesser than that, allocate them all.
1127 static inline unsigned int
1128 alloc_highmem_image_pages(struct memory_bitmap
*bm
, unsigned int nr_highmem
)
1130 unsigned int to_alloc
= count_free_highmem_pages();
1132 if (to_alloc
> nr_highmem
)
1133 to_alloc
= nr_highmem
;
1135 nr_highmem
-= to_alloc
;
1136 while (to_alloc
-- > 0) {
1139 page
= alloc_image_page(__GFP_HIGHMEM
);
1140 memory_bm_set_bit(bm
, page_to_pfn(page
));
1145 static inline int get_highmem_buffer(int safe_needed
) { return 0; }
1147 static inline unsigned int
1148 alloc_highmem_image_pages(struct memory_bitmap
*bm
, unsigned int n
) { return 0; }
1149 #endif /* CONFIG_HIGHMEM */
1152 * swsusp_alloc - allocate memory for the suspend image
1154 * We first try to allocate as many highmem pages as there are
1155 * saveable highmem pages in the system. If that fails, we allocate
1156 * non-highmem pages for the copies of the remaining highmem ones.
1158 * In this approach it is likely that the copies of highmem pages will
1159 * also be located in the high memory, because of the way in which
1160 * copy_data_pages() works.
1164 swsusp_alloc(struct memory_bitmap
*orig_bm
, struct memory_bitmap
*copy_bm
,
1165 unsigned int nr_pages
, unsigned int nr_highmem
)
1169 error
= memory_bm_create(orig_bm
, GFP_ATOMIC
| __GFP_COLD
, PG_ANY
);
1173 error
= memory_bm_create(copy_bm
, GFP_ATOMIC
| __GFP_COLD
, PG_ANY
);
1177 if (nr_highmem
> 0) {
1178 error
= get_highmem_buffer(PG_ANY
);
1182 nr_pages
+= alloc_highmem_image_pages(copy_bm
, nr_highmem
);
1184 while (nr_pages
-- > 0) {
1185 struct page
*page
= alloc_image_page(GFP_ATOMIC
| __GFP_COLD
);
1190 memory_bm_set_bit(copy_bm
, page_to_pfn(page
));
1199 /* Memory bitmap used for marking saveable pages (during suspend) or the
1200 * suspend image pages (during resume)
1202 static struct memory_bitmap orig_bm
;
1203 /* Memory bitmap used on suspend for marking allocated pages that will contain
1204 * the copies of saveable pages. During resume it is initially used for
1205 * marking the suspend image pages, but then its set bits are duplicated in
1206 * @orig_bm and it is released. Next, on systems with high memory, it may be
1207 * used for marking "safe" highmem pages, but it has to be reinitialized for
1210 static struct memory_bitmap copy_bm
;
1212 asmlinkage
int swsusp_save(void)
1214 unsigned int nr_pages
, nr_highmem
;
1216 printk(KERN_INFO
"PM: Creating hibernation image: \n");
1218 drain_local_pages(NULL
);
1219 nr_pages
= count_data_pages();
1220 nr_highmem
= count_highmem_pages();
1221 printk(KERN_INFO
"PM: Need to copy %u pages\n", nr_pages
+ nr_highmem
);
1223 if (!enough_free_mem(nr_pages
, nr_highmem
)) {
1224 printk(KERN_ERR
"PM: Not enough free memory\n");
1228 if (swsusp_alloc(&orig_bm
, ©_bm
, nr_pages
, nr_highmem
)) {
1229 printk(KERN_ERR
"PM: Memory allocation failed\n");
1233 /* During allocating of suspend pagedir, new cold pages may appear.
1236 drain_local_pages(NULL
);
1237 copy_data_pages(©_bm
, &orig_bm
);
1240 * End of critical section. From now on, we can write to memory,
1241 * but we should not touch disk. This specially means we must _not_
1242 * touch swap space! Except we must write out our image of course.
1245 nr_pages
+= nr_highmem
;
1246 nr_copy_pages
= nr_pages
;
1247 nr_meta_pages
= DIV_ROUND_UP(nr_pages
* sizeof(long), PAGE_SIZE
);
1249 printk(KERN_INFO
"PM: Hibernation image created (%d pages copied)\n",
1255 #ifndef CONFIG_ARCH_HIBERNATION_HEADER
1256 static int init_header_complete(struct swsusp_info
*info
)
1258 memcpy(&info
->uts
, init_utsname(), sizeof(struct new_utsname
));
1259 info
->version_code
= LINUX_VERSION_CODE
;
1263 static char *check_image_kernel(struct swsusp_info
*info
)
1265 if (info
->version_code
!= LINUX_VERSION_CODE
)
1266 return "kernel version";
1267 if (strcmp(info
->uts
.sysname
,init_utsname()->sysname
))
1268 return "system type";
1269 if (strcmp(info
->uts
.release
,init_utsname()->release
))
1270 return "kernel release";
1271 if (strcmp(info
->uts
.version
,init_utsname()->version
))
1273 if (strcmp(info
->uts
.machine
,init_utsname()->machine
))
1277 #endif /* CONFIG_ARCH_HIBERNATION_HEADER */
1279 unsigned long snapshot_get_image_size(void)
1281 return nr_copy_pages
+ nr_meta_pages
+ 1;
1284 static int init_header(struct swsusp_info
*info
)
1286 memset(info
, 0, sizeof(struct swsusp_info
));
1287 info
->num_physpages
= num_physpages
;
1288 info
->image_pages
= nr_copy_pages
;
1289 info
->pages
= snapshot_get_image_size();
1290 info
->size
= info
->pages
;
1291 info
->size
<<= PAGE_SHIFT
;
1292 return init_header_complete(info
);
1296 * pack_pfns - pfns corresponding to the set bits found in the bitmap @bm
1297 * are stored in the array @buf[] (1 page at a time)
1301 pack_pfns(unsigned long *buf
, struct memory_bitmap
*bm
)
1305 for (j
= 0; j
< PAGE_SIZE
/ sizeof(long); j
++) {
1306 buf
[j
] = memory_bm_next_pfn(bm
);
1307 if (unlikely(buf
[j
] == BM_END_OF_MAP
))
1313 * snapshot_read_next - used for reading the system memory snapshot.
1315 * On the first call to it @handle should point to a zeroed
1316 * snapshot_handle structure. The structure gets updated and a pointer
1317 * to it should be passed to this function every next time.
1319 * The @count parameter should contain the number of bytes the caller
1320 * wants to read from the snapshot. It must not be zero.
1322 * On success the function returns a positive number. Then, the caller
1323 * is allowed to read up to the returned number of bytes from the memory
1324 * location computed by the data_of() macro. The number returned
1325 * may be smaller than @count, but this only happens if the read would
1326 * cross a page boundary otherwise.
1328 * The function returns 0 to indicate the end of data stream condition,
1329 * and a negative number is returned on error. In such cases the
1330 * structure pointed to by @handle is not updated and should not be used
1334 int snapshot_read_next(struct snapshot_handle
*handle
, size_t count
)
1336 if (handle
->cur
> nr_meta_pages
+ nr_copy_pages
)
1340 /* This makes the buffer be freed by swsusp_free() */
1341 buffer
= get_image_page(GFP_ATOMIC
, PG_ANY
);
1345 if (!handle
->offset
) {
1348 error
= init_header((struct swsusp_info
*)buffer
);
1351 handle
->buffer
= buffer
;
1352 memory_bm_position_reset(&orig_bm
);
1353 memory_bm_position_reset(©_bm
);
1355 if (handle
->prev
< handle
->cur
) {
1356 if (handle
->cur
<= nr_meta_pages
) {
1357 memset(buffer
, 0, PAGE_SIZE
);
1358 pack_pfns(buffer
, &orig_bm
);
1362 page
= pfn_to_page(memory_bm_next_pfn(©_bm
));
1363 if (PageHighMem(page
)) {
1364 /* Highmem pages are copied to the buffer,
1365 * because we can't return with a kmapped
1366 * highmem page (we may not be called again).
1370 kaddr
= kmap_atomic(page
, KM_USER0
);
1371 memcpy(buffer
, kaddr
, PAGE_SIZE
);
1372 kunmap_atomic(kaddr
, KM_USER0
);
1373 handle
->buffer
= buffer
;
1375 handle
->buffer
= page_address(page
);
1378 handle
->prev
= handle
->cur
;
1380 handle
->buf_offset
= handle
->cur_offset
;
1381 if (handle
->cur_offset
+ count
>= PAGE_SIZE
) {
1382 count
= PAGE_SIZE
- handle
->cur_offset
;
1383 handle
->cur_offset
= 0;
1386 handle
->cur_offset
+= count
;
1388 handle
->offset
+= count
;
1393 * mark_unsafe_pages - mark the pages that cannot be used for storing
1394 * the image during resume, because they conflict with the pages that
1395 * had been used before suspend
1398 static int mark_unsafe_pages(struct memory_bitmap
*bm
)
1401 unsigned long pfn
, max_zone_pfn
;
1403 /* Clear page flags */
1404 for_each_zone(zone
) {
1405 max_zone_pfn
= zone
->zone_start_pfn
+ zone
->spanned_pages
;
1406 for (pfn
= zone
->zone_start_pfn
; pfn
< max_zone_pfn
; pfn
++)
1408 swsusp_unset_page_free(pfn_to_page(pfn
));
1411 /* Mark pages that correspond to the "original" pfns as "unsafe" */
1412 memory_bm_position_reset(bm
);
1414 pfn
= memory_bm_next_pfn(bm
);
1415 if (likely(pfn
!= BM_END_OF_MAP
)) {
1416 if (likely(pfn_valid(pfn
)))
1417 swsusp_set_page_free(pfn_to_page(pfn
));
1421 } while (pfn
!= BM_END_OF_MAP
);
1423 allocated_unsafe_pages
= 0;
1429 duplicate_memory_bitmap(struct memory_bitmap
*dst
, struct memory_bitmap
*src
)
1433 memory_bm_position_reset(src
);
1434 pfn
= memory_bm_next_pfn(src
);
1435 while (pfn
!= BM_END_OF_MAP
) {
1436 memory_bm_set_bit(dst
, pfn
);
1437 pfn
= memory_bm_next_pfn(src
);
1441 static int check_header(struct swsusp_info
*info
)
1445 reason
= check_image_kernel(info
);
1446 if (!reason
&& info
->num_physpages
!= num_physpages
)
1447 reason
= "memory size";
1449 printk(KERN_ERR
"PM: Image mismatch: %s\n", reason
);
1456 * load header - check the image header and copy data from it
1460 load_header(struct swsusp_info
*info
)
1464 restore_pblist
= NULL
;
1465 error
= check_header(info
);
1467 nr_copy_pages
= info
->image_pages
;
1468 nr_meta_pages
= info
->pages
- info
->image_pages
- 1;
1474 * unpack_orig_pfns - for each element of @buf[] (1 page at a time) set
1475 * the corresponding bit in the memory bitmap @bm
1477 static int unpack_orig_pfns(unsigned long *buf
, struct memory_bitmap
*bm
)
1481 for (j
= 0; j
< PAGE_SIZE
/ sizeof(long); j
++) {
1482 if (unlikely(buf
[j
] == BM_END_OF_MAP
))
1485 if (memory_bm_pfn_present(bm
, buf
[j
]))
1486 memory_bm_set_bit(bm
, buf
[j
]);
1494 /* List of "safe" pages that may be used to store data loaded from the suspend
1497 static struct linked_page
*safe_pages_list
;
1499 #ifdef CONFIG_HIGHMEM
1500 /* struct highmem_pbe is used for creating the list of highmem pages that
1501 * should be restored atomically during the resume from disk, because the page
1502 * frames they have occupied before the suspend are in use.
1504 struct highmem_pbe
{
1505 struct page
*copy_page
; /* data is here now */
1506 struct page
*orig_page
; /* data was here before the suspend */
1507 struct highmem_pbe
*next
;
1510 /* List of highmem PBEs needed for restoring the highmem pages that were
1511 * allocated before the suspend and included in the suspend image, but have
1512 * also been allocated by the "resume" kernel, so their contents cannot be
1513 * written directly to their "original" page frames.
1515 static struct highmem_pbe
*highmem_pblist
;
1518 * count_highmem_image_pages - compute the number of highmem pages in the
1519 * suspend image. The bits in the memory bitmap @bm that correspond to the
1520 * image pages are assumed to be set.
1523 static unsigned int count_highmem_image_pages(struct memory_bitmap
*bm
)
1526 unsigned int cnt
= 0;
1528 memory_bm_position_reset(bm
);
1529 pfn
= memory_bm_next_pfn(bm
);
1530 while (pfn
!= BM_END_OF_MAP
) {
1531 if (PageHighMem(pfn_to_page(pfn
)))
1534 pfn
= memory_bm_next_pfn(bm
);
1540 * prepare_highmem_image - try to allocate as many highmem pages as
1541 * there are highmem image pages (@nr_highmem_p points to the variable
1542 * containing the number of highmem image pages). The pages that are
1543 * "safe" (ie. will not be overwritten when the suspend image is
1544 * restored) have the corresponding bits set in @bm (it must be
1547 * NOTE: This function should not be called if there are no highmem
1551 static unsigned int safe_highmem_pages
;
1553 static struct memory_bitmap
*safe_highmem_bm
;
1556 prepare_highmem_image(struct memory_bitmap
*bm
, unsigned int *nr_highmem_p
)
1558 unsigned int to_alloc
;
1560 if (memory_bm_create(bm
, GFP_ATOMIC
, PG_SAFE
))
1563 if (get_highmem_buffer(PG_SAFE
))
1566 to_alloc
= count_free_highmem_pages();
1567 if (to_alloc
> *nr_highmem_p
)
1568 to_alloc
= *nr_highmem_p
;
1570 *nr_highmem_p
= to_alloc
;
1572 safe_highmem_pages
= 0;
1573 while (to_alloc
-- > 0) {
1576 page
= alloc_page(__GFP_HIGHMEM
);
1577 if (!swsusp_page_is_free(page
)) {
1578 /* The page is "safe", set its bit the bitmap */
1579 memory_bm_set_bit(bm
, page_to_pfn(page
));
1580 safe_highmem_pages
++;
1582 /* Mark the page as allocated */
1583 swsusp_set_page_forbidden(page
);
1584 swsusp_set_page_free(page
);
1586 memory_bm_position_reset(bm
);
1587 safe_highmem_bm
= bm
;
1592 * get_highmem_page_buffer - for given highmem image page find the buffer
1593 * that suspend_write_next() should set for its caller to write to.
1595 * If the page is to be saved to its "original" page frame or a copy of
1596 * the page is to be made in the highmem, @buffer is returned. Otherwise,
1597 * the copy of the page is to be made in normal memory, so the address of
1598 * the copy is returned.
1600 * If @buffer is returned, the caller of suspend_write_next() will write
1601 * the page's contents to @buffer, so they will have to be copied to the
1602 * right location on the next call to suspend_write_next() and it is done
1603 * with the help of copy_last_highmem_page(). For this purpose, if
1604 * @buffer is returned, @last_highmem page is set to the page to which
1605 * the data will have to be copied from @buffer.
1608 static struct page
*last_highmem_page
;
1611 get_highmem_page_buffer(struct page
*page
, struct chain_allocator
*ca
)
1613 struct highmem_pbe
*pbe
;
1616 if (swsusp_page_is_forbidden(page
) && swsusp_page_is_free(page
)) {
1617 /* We have allocated the "original" page frame and we can
1618 * use it directly to store the loaded page.
1620 last_highmem_page
= page
;
1623 /* The "original" page frame has not been allocated and we have to
1624 * use a "safe" page frame to store the loaded page.
1626 pbe
= chain_alloc(ca
, sizeof(struct highmem_pbe
));
1629 return ERR_PTR(-ENOMEM
);
1631 pbe
->orig_page
= page
;
1632 if (safe_highmem_pages
> 0) {
1635 /* Copy of the page will be stored in high memory */
1637 tmp
= pfn_to_page(memory_bm_next_pfn(safe_highmem_bm
));
1638 safe_highmem_pages
--;
1639 last_highmem_page
= tmp
;
1640 pbe
->copy_page
= tmp
;
1642 /* Copy of the page will be stored in normal memory */
1643 kaddr
= safe_pages_list
;
1644 safe_pages_list
= safe_pages_list
->next
;
1645 pbe
->copy_page
= virt_to_page(kaddr
);
1647 pbe
->next
= highmem_pblist
;
1648 highmem_pblist
= pbe
;
1653 * copy_last_highmem_page - copy the contents of a highmem image from
1654 * @buffer, where the caller of snapshot_write_next() has place them,
1655 * to the right location represented by @last_highmem_page .
1658 static void copy_last_highmem_page(void)
1660 if (last_highmem_page
) {
1663 dst
= kmap_atomic(last_highmem_page
, KM_USER0
);
1664 memcpy(dst
, buffer
, PAGE_SIZE
);
1665 kunmap_atomic(dst
, KM_USER0
);
1666 last_highmem_page
= NULL
;
1670 static inline int last_highmem_page_copied(void)
1672 return !last_highmem_page
;
1675 static inline void free_highmem_data(void)
1677 if (safe_highmem_bm
)
1678 memory_bm_free(safe_highmem_bm
, PG_UNSAFE_CLEAR
);
1681 free_image_page(buffer
, PG_UNSAFE_CLEAR
);
1684 static inline int get_safe_write_buffer(void) { return 0; }
1687 count_highmem_image_pages(struct memory_bitmap
*bm
) { return 0; }
1690 prepare_highmem_image(struct memory_bitmap
*bm
, unsigned int *nr_highmem_p
)
1695 static inline void *
1696 get_highmem_page_buffer(struct page
*page
, struct chain_allocator
*ca
)
1698 return ERR_PTR(-EINVAL
);
1701 static inline void copy_last_highmem_page(void) {}
1702 static inline int last_highmem_page_copied(void) { return 1; }
1703 static inline void free_highmem_data(void) {}
1704 #endif /* CONFIG_HIGHMEM */
1707 * prepare_image - use the memory bitmap @bm to mark the pages that will
1708 * be overwritten in the process of restoring the system memory state
1709 * from the suspend image ("unsafe" pages) and allocate memory for the
1712 * The idea is to allocate a new memory bitmap first and then allocate
1713 * as many pages as needed for the image data, but not to assign these
1714 * pages to specific tasks initially. Instead, we just mark them as
1715 * allocated and create a lists of "safe" pages that will be used
1716 * later. On systems with high memory a list of "safe" highmem pages is
1720 #define PBES_PER_LINKED_PAGE (LINKED_PAGE_DATA_SIZE / sizeof(struct pbe))
1723 prepare_image(struct memory_bitmap
*new_bm
, struct memory_bitmap
*bm
)
1725 unsigned int nr_pages
, nr_highmem
;
1726 struct linked_page
*sp_list
, *lp
;
1729 /* If there is no highmem, the buffer will not be necessary */
1730 free_image_page(buffer
, PG_UNSAFE_CLEAR
);
1733 nr_highmem
= count_highmem_image_pages(bm
);
1734 error
= mark_unsafe_pages(bm
);
1738 error
= memory_bm_create(new_bm
, GFP_ATOMIC
, PG_SAFE
);
1742 duplicate_memory_bitmap(new_bm
, bm
);
1743 memory_bm_free(bm
, PG_UNSAFE_KEEP
);
1744 if (nr_highmem
> 0) {
1745 error
= prepare_highmem_image(bm
, &nr_highmem
);
1749 /* Reserve some safe pages for potential later use.
1751 * NOTE: This way we make sure there will be enough safe pages for the
1752 * chain_alloc() in get_buffer(). It is a bit wasteful, but
1753 * nr_copy_pages cannot be greater than 50% of the memory anyway.
1756 /* nr_copy_pages cannot be lesser than allocated_unsafe_pages */
1757 nr_pages
= nr_copy_pages
- nr_highmem
- allocated_unsafe_pages
;
1758 nr_pages
= DIV_ROUND_UP(nr_pages
, PBES_PER_LINKED_PAGE
);
1759 while (nr_pages
> 0) {
1760 lp
= get_image_page(GFP_ATOMIC
, PG_SAFE
);
1769 /* Preallocate memory for the image */
1770 safe_pages_list
= NULL
;
1771 nr_pages
= nr_copy_pages
- nr_highmem
- allocated_unsafe_pages
;
1772 while (nr_pages
> 0) {
1773 lp
= (struct linked_page
*)get_zeroed_page(GFP_ATOMIC
);
1778 if (!swsusp_page_is_free(virt_to_page(lp
))) {
1779 /* The page is "safe", add it to the list */
1780 lp
->next
= safe_pages_list
;
1781 safe_pages_list
= lp
;
1783 /* Mark the page as allocated */
1784 swsusp_set_page_forbidden(virt_to_page(lp
));
1785 swsusp_set_page_free(virt_to_page(lp
));
1788 /* Free the reserved safe pages so that chain_alloc() can use them */
1791 free_image_page(sp_list
, PG_UNSAFE_CLEAR
);
1802 * get_buffer - compute the address that snapshot_write_next() should
1803 * set for its caller to write to.
1806 static void *get_buffer(struct memory_bitmap
*bm
, struct chain_allocator
*ca
)
1810 unsigned long pfn
= memory_bm_next_pfn(bm
);
1812 if (pfn
== BM_END_OF_MAP
)
1813 return ERR_PTR(-EFAULT
);
1815 page
= pfn_to_page(pfn
);
1816 if (PageHighMem(page
))
1817 return get_highmem_page_buffer(page
, ca
);
1819 if (swsusp_page_is_forbidden(page
) && swsusp_page_is_free(page
))
1820 /* We have allocated the "original" page frame and we can
1821 * use it directly to store the loaded page.
1823 return page_address(page
);
1825 /* The "original" page frame has not been allocated and we have to
1826 * use a "safe" page frame to store the loaded page.
1828 pbe
= chain_alloc(ca
, sizeof(struct pbe
));
1831 return ERR_PTR(-ENOMEM
);
1833 pbe
->orig_address
= page_address(page
);
1834 pbe
->address
= safe_pages_list
;
1835 safe_pages_list
= safe_pages_list
->next
;
1836 pbe
->next
= restore_pblist
;
1837 restore_pblist
= pbe
;
1838 return pbe
->address
;
1842 * snapshot_write_next - used for writing the system memory snapshot.
1844 * On the first call to it @handle should point to a zeroed
1845 * snapshot_handle structure. The structure gets updated and a pointer
1846 * to it should be passed to this function every next time.
1848 * The @count parameter should contain the number of bytes the caller
1849 * wants to write to the image. It must not be zero.
1851 * On success the function returns a positive number. Then, the caller
1852 * is allowed to write up to the returned number of bytes to the memory
1853 * location computed by the data_of() macro. The number returned
1854 * may be smaller than @count, but this only happens if the write would
1855 * cross a page boundary otherwise.
1857 * The function returns 0 to indicate the "end of file" condition,
1858 * and a negative number is returned on error. In such cases the
1859 * structure pointed to by @handle is not updated and should not be used
1863 int snapshot_write_next(struct snapshot_handle
*handle
, size_t count
)
1865 static struct chain_allocator ca
;
1868 /* Check if we have already loaded the entire image */
1869 if (handle
->prev
&& handle
->cur
> nr_meta_pages
+ nr_copy_pages
)
1872 if (handle
->offset
== 0) {
1874 /* This makes the buffer be freed by swsusp_free() */
1875 buffer
= get_image_page(GFP_ATOMIC
, PG_ANY
);
1880 handle
->buffer
= buffer
;
1882 handle
->sync_read
= 1;
1883 if (handle
->prev
< handle
->cur
) {
1884 if (handle
->prev
== 0) {
1885 error
= load_header(buffer
);
1889 error
= memory_bm_create(©_bm
, GFP_ATOMIC
, PG_ANY
);
1893 } else if (handle
->prev
<= nr_meta_pages
) {
1894 error
= unpack_orig_pfns(buffer
, ©_bm
);
1898 if (handle
->prev
== nr_meta_pages
) {
1899 error
= prepare_image(&orig_bm
, ©_bm
);
1903 chain_init(&ca
, GFP_ATOMIC
, PG_SAFE
);
1904 memory_bm_position_reset(&orig_bm
);
1905 restore_pblist
= NULL
;
1906 handle
->buffer
= get_buffer(&orig_bm
, &ca
);
1907 handle
->sync_read
= 0;
1908 if (IS_ERR(handle
->buffer
))
1909 return PTR_ERR(handle
->buffer
);
1912 copy_last_highmem_page();
1913 handle
->buffer
= get_buffer(&orig_bm
, &ca
);
1914 if (IS_ERR(handle
->buffer
))
1915 return PTR_ERR(handle
->buffer
);
1916 if (handle
->buffer
!= buffer
)
1917 handle
->sync_read
= 0;
1919 handle
->prev
= handle
->cur
;
1921 handle
->buf_offset
= handle
->cur_offset
;
1922 if (handle
->cur_offset
+ count
>= PAGE_SIZE
) {
1923 count
= PAGE_SIZE
- handle
->cur_offset
;
1924 handle
->cur_offset
= 0;
1927 handle
->cur_offset
+= count
;
1929 handle
->offset
+= count
;
1934 * snapshot_write_finalize - must be called after the last call to
1935 * snapshot_write_next() in case the last page in the image happens
1936 * to be a highmem page and its contents should be stored in the
1937 * highmem. Additionally, it releases the memory that will not be
1941 void snapshot_write_finalize(struct snapshot_handle
*handle
)
1943 copy_last_highmem_page();
1944 /* Free only if we have loaded the image entirely */
1945 if (handle
->prev
&& handle
->cur
> nr_meta_pages
+ nr_copy_pages
) {
1946 memory_bm_free(&orig_bm
, PG_UNSAFE_CLEAR
);
1947 free_highmem_data();
1951 int snapshot_image_loaded(struct snapshot_handle
*handle
)
1953 return !(!nr_copy_pages
|| !last_highmem_page_copied() ||
1954 handle
->cur
<= nr_meta_pages
+ nr_copy_pages
);
1957 #ifdef CONFIG_HIGHMEM
1958 /* Assumes that @buf is ready and points to a "safe" page */
1960 swap_two_pages_data(struct page
*p1
, struct page
*p2
, void *buf
)
1962 void *kaddr1
, *kaddr2
;
1964 kaddr1
= kmap_atomic(p1
, KM_USER0
);
1965 kaddr2
= kmap_atomic(p2
, KM_USER1
);
1966 memcpy(buf
, kaddr1
, PAGE_SIZE
);
1967 memcpy(kaddr1
, kaddr2
, PAGE_SIZE
);
1968 memcpy(kaddr2
, buf
, PAGE_SIZE
);
1969 kunmap_atomic(kaddr1
, KM_USER0
);
1970 kunmap_atomic(kaddr2
, KM_USER1
);
1974 * restore_highmem - for each highmem page that was allocated before
1975 * the suspend and included in the suspend image, and also has been
1976 * allocated by the "resume" kernel swap its current (ie. "before
1977 * resume") contents with the previous (ie. "before suspend") one.
1979 * If the resume eventually fails, we can call this function once
1980 * again and restore the "before resume" highmem state.
1983 int restore_highmem(void)
1985 struct highmem_pbe
*pbe
= highmem_pblist
;
1991 buf
= get_image_page(GFP_ATOMIC
, PG_SAFE
);
1996 swap_two_pages_data(pbe
->copy_page
, pbe
->orig_page
, buf
);
1999 free_image_page(buf
, PG_UNSAFE_CLEAR
);
2002 #endif /* CONFIG_HIGHMEM */