[PATCH] namespaces: utsname: use init_utsname when appropriate
[linux-2.6/verdex.git] / kernel / power / snapshot.c
blob99f9b7d177d6a843f5bcb3ade50e67379e169013
1 /*
2 * linux/kernel/power/snapshot.c
4 * This file provide system snapshot/restore functionality.
6 * Copyright (C) 1998-2005 Pavel Machek <pavel@suse.cz>
8 * This file is released under the GPLv2, and is based on swsusp.c.
13 #include <linux/version.h>
14 #include <linux/module.h>
15 #include <linux/mm.h>
16 #include <linux/suspend.h>
17 #include <linux/smp_lock.h>
18 #include <linux/delay.h>
19 #include <linux/bitops.h>
20 #include <linux/spinlock.h>
21 #include <linux/kernel.h>
22 #include <linux/pm.h>
23 #include <linux/device.h>
24 #include <linux/bootmem.h>
25 #include <linux/syscalls.h>
26 #include <linux/console.h>
27 #include <linux/highmem.h>
29 #include <asm/uaccess.h>
30 #include <asm/mmu_context.h>
31 #include <asm/pgtable.h>
32 #include <asm/tlbflush.h>
33 #include <asm/io.h>
35 #include "power.h"
37 /* List of PBEs used for creating and restoring the suspend image */
38 struct pbe *restore_pblist;
40 static unsigned int nr_copy_pages;
41 static unsigned int nr_meta_pages;
42 static void *buffer;
44 #ifdef CONFIG_HIGHMEM
45 unsigned int count_highmem_pages(void)
47 struct zone *zone;
48 unsigned long zone_pfn;
49 unsigned int n = 0;
51 for_each_zone (zone)
52 if (is_highmem(zone)) {
53 mark_free_pages(zone);
54 for (zone_pfn = 0; zone_pfn < zone->spanned_pages; zone_pfn++) {
55 struct page *page;
56 unsigned long pfn = zone_pfn + zone->zone_start_pfn;
57 if (!pfn_valid(pfn))
58 continue;
59 page = pfn_to_page(pfn);
60 if (PageReserved(page))
61 continue;
62 if (PageNosaveFree(page))
63 continue;
64 n++;
67 return n;
70 struct highmem_page {
71 char *data;
72 struct page *page;
73 struct highmem_page *next;
76 static struct highmem_page *highmem_copy;
78 static int save_highmem_zone(struct zone *zone)
80 unsigned long zone_pfn;
81 mark_free_pages(zone);
82 for (zone_pfn = 0; zone_pfn < zone->spanned_pages; ++zone_pfn) {
83 struct page *page;
84 struct highmem_page *save;
85 void *kaddr;
86 unsigned long pfn = zone_pfn + zone->zone_start_pfn;
88 if (!(pfn%10000))
89 printk(".");
90 if (!pfn_valid(pfn))
91 continue;
92 page = pfn_to_page(pfn);
94 * This condition results from rvmalloc() sans vmalloc_32()
95 * and architectural memory reservations. This should be
96 * corrected eventually when the cases giving rise to this
97 * are better understood.
99 if (PageReserved(page))
100 continue;
101 BUG_ON(PageNosave(page));
102 if (PageNosaveFree(page))
103 continue;
104 save = kmalloc(sizeof(struct highmem_page), GFP_ATOMIC);
105 if (!save)
106 return -ENOMEM;
107 save->next = highmem_copy;
108 save->page = page;
109 save->data = (void *) get_zeroed_page(GFP_ATOMIC);
110 if (!save->data) {
111 kfree(save);
112 return -ENOMEM;
114 kaddr = kmap_atomic(page, KM_USER0);
115 memcpy(save->data, kaddr, PAGE_SIZE);
116 kunmap_atomic(kaddr, KM_USER0);
117 highmem_copy = save;
119 return 0;
122 int save_highmem(void)
124 struct zone *zone;
125 int res = 0;
127 pr_debug("swsusp: Saving Highmem");
128 drain_local_pages();
129 for_each_zone (zone) {
130 if (is_highmem(zone))
131 res = save_highmem_zone(zone);
132 if (res)
133 return res;
135 printk("\n");
136 return 0;
139 int restore_highmem(void)
141 printk("swsusp: Restoring Highmem\n");
142 while (highmem_copy) {
143 struct highmem_page *save = highmem_copy;
144 void *kaddr;
145 highmem_copy = save->next;
147 kaddr = kmap_atomic(save->page, KM_USER0);
148 memcpy(kaddr, save->data, PAGE_SIZE);
149 kunmap_atomic(kaddr, KM_USER0);
150 free_page((long) save->data);
151 kfree(save);
153 return 0;
155 #else
156 static inline unsigned int count_highmem_pages(void) {return 0;}
157 static inline int save_highmem(void) {return 0;}
158 static inline int restore_highmem(void) {return 0;}
159 #endif
162 * @safe_needed - on resume, for storing the PBE list and the image,
163 * we can only use memory pages that do not conflict with the pages
164 * used before suspend.
166 * The unsafe pages are marked with the PG_nosave_free flag
167 * and we count them using unsafe_pages
170 #define PG_ANY 0
171 #define PG_SAFE 1
172 #define PG_UNSAFE_CLEAR 1
173 #define PG_UNSAFE_KEEP 0
175 static unsigned int allocated_unsafe_pages;
177 static void *alloc_image_page(gfp_t gfp_mask, int safe_needed)
179 void *res;
181 res = (void *)get_zeroed_page(gfp_mask);
182 if (safe_needed)
183 while (res && PageNosaveFree(virt_to_page(res))) {
184 /* The page is unsafe, mark it for swsusp_free() */
185 SetPageNosave(virt_to_page(res));
186 allocated_unsafe_pages++;
187 res = (void *)get_zeroed_page(gfp_mask);
189 if (res) {
190 SetPageNosave(virt_to_page(res));
191 SetPageNosaveFree(virt_to_page(res));
193 return res;
196 unsigned long get_safe_page(gfp_t gfp_mask)
198 return (unsigned long)alloc_image_page(gfp_mask, PG_SAFE);
202 * free_image_page - free page represented by @addr, allocated with
203 * alloc_image_page (page flags set by it must be cleared)
206 static inline void free_image_page(void *addr, int clear_nosave_free)
208 ClearPageNosave(virt_to_page(addr));
209 if (clear_nosave_free)
210 ClearPageNosaveFree(virt_to_page(addr));
211 free_page((unsigned long)addr);
214 /* struct linked_page is used to build chains of pages */
216 #define LINKED_PAGE_DATA_SIZE (PAGE_SIZE - sizeof(void *))
218 struct linked_page {
219 struct linked_page *next;
220 char data[LINKED_PAGE_DATA_SIZE];
221 } __attribute__((packed));
223 static inline void
224 free_list_of_pages(struct linked_page *list, int clear_page_nosave)
226 while (list) {
227 struct linked_page *lp = list->next;
229 free_image_page(list, clear_page_nosave);
230 list = lp;
235 * struct chain_allocator is used for allocating small objects out of
236 * a linked list of pages called 'the chain'.
238 * The chain grows each time when there is no room for a new object in
239 * the current page. The allocated objects cannot be freed individually.
240 * It is only possible to free them all at once, by freeing the entire
241 * chain.
243 * NOTE: The chain allocator may be inefficient if the allocated objects
244 * are not much smaller than PAGE_SIZE.
247 struct chain_allocator {
248 struct linked_page *chain; /* the chain */
249 unsigned int used_space; /* total size of objects allocated out
250 * of the current page
252 gfp_t gfp_mask; /* mask for allocating pages */
253 int safe_needed; /* if set, only "safe" pages are allocated */
256 static void
257 chain_init(struct chain_allocator *ca, gfp_t gfp_mask, int safe_needed)
259 ca->chain = NULL;
260 ca->used_space = LINKED_PAGE_DATA_SIZE;
261 ca->gfp_mask = gfp_mask;
262 ca->safe_needed = safe_needed;
265 static void *chain_alloc(struct chain_allocator *ca, unsigned int size)
267 void *ret;
269 if (LINKED_PAGE_DATA_SIZE - ca->used_space < size) {
270 struct linked_page *lp;
272 lp = alloc_image_page(ca->gfp_mask, ca->safe_needed);
273 if (!lp)
274 return NULL;
276 lp->next = ca->chain;
277 ca->chain = lp;
278 ca->used_space = 0;
280 ret = ca->chain->data + ca->used_space;
281 ca->used_space += size;
282 return ret;
285 static void chain_free(struct chain_allocator *ca, int clear_page_nosave)
287 free_list_of_pages(ca->chain, clear_page_nosave);
288 memset(ca, 0, sizeof(struct chain_allocator));
292 * Data types related to memory bitmaps.
294 * Memory bitmap is a structure consiting of many linked lists of
295 * objects. The main list's elements are of type struct zone_bitmap
296 * and each of them corresonds to one zone. For each zone bitmap
297 * object there is a list of objects of type struct bm_block that
298 * represent each blocks of bit chunks in which information is
299 * stored.
301 * struct memory_bitmap contains a pointer to the main list of zone
302 * bitmap objects, a struct bm_position used for browsing the bitmap,
303 * and a pointer to the list of pages used for allocating all of the
304 * zone bitmap objects and bitmap block objects.
306 * NOTE: It has to be possible to lay out the bitmap in memory
307 * using only allocations of order 0. Additionally, the bitmap is
308 * designed to work with arbitrary number of zones (this is over the
309 * top for now, but let's avoid making unnecessary assumptions ;-).
311 * struct zone_bitmap contains a pointer to a list of bitmap block
312 * objects and a pointer to the bitmap block object that has been
313 * most recently used for setting bits. Additionally, it contains the
314 * pfns that correspond to the start and end of the represented zone.
316 * struct bm_block contains a pointer to the memory page in which
317 * information is stored (in the form of a block of bit chunks
318 * of type unsigned long each). It also contains the pfns that
319 * correspond to the start and end of the represented memory area and
320 * the number of bit chunks in the block.
322 * NOTE: Memory bitmaps are used for two types of operations only:
323 * "set a bit" and "find the next bit set". Moreover, the searching
324 * is always carried out after all of the "set a bit" operations
325 * on given bitmap.
328 #define BM_END_OF_MAP (~0UL)
330 #define BM_CHUNKS_PER_BLOCK (PAGE_SIZE / sizeof(long))
331 #define BM_BITS_PER_CHUNK (sizeof(long) << 3)
332 #define BM_BITS_PER_BLOCK (PAGE_SIZE << 3)
334 struct bm_block {
335 struct bm_block *next; /* next element of the list */
336 unsigned long start_pfn; /* pfn represented by the first bit */
337 unsigned long end_pfn; /* pfn represented by the last bit plus 1 */
338 unsigned int size; /* number of bit chunks */
339 unsigned long *data; /* chunks of bits representing pages */
342 struct zone_bitmap {
343 struct zone_bitmap *next; /* next element of the list */
344 unsigned long start_pfn; /* minimal pfn in this zone */
345 unsigned long end_pfn; /* maximal pfn in this zone plus 1 */
346 struct bm_block *bm_blocks; /* list of bitmap blocks */
347 struct bm_block *cur_block; /* recently used bitmap block */
350 /* strcut bm_position is used for browsing memory bitmaps */
352 struct bm_position {
353 struct zone_bitmap *zone_bm;
354 struct bm_block *block;
355 int chunk;
356 int bit;
359 struct memory_bitmap {
360 struct zone_bitmap *zone_bm_list; /* list of zone bitmaps */
361 struct linked_page *p_list; /* list of pages used to store zone
362 * bitmap objects and bitmap block
363 * objects
365 struct bm_position cur; /* most recently used bit position */
368 /* Functions that operate on memory bitmaps */
370 static inline void memory_bm_reset_chunk(struct memory_bitmap *bm)
372 bm->cur.chunk = 0;
373 bm->cur.bit = -1;
376 static void memory_bm_position_reset(struct memory_bitmap *bm)
378 struct zone_bitmap *zone_bm;
380 zone_bm = bm->zone_bm_list;
381 bm->cur.zone_bm = zone_bm;
382 bm->cur.block = zone_bm->bm_blocks;
383 memory_bm_reset_chunk(bm);
386 static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free);
389 * create_bm_block_list - create a list of block bitmap objects
392 static inline struct bm_block *
393 create_bm_block_list(unsigned int nr_blocks, struct chain_allocator *ca)
395 struct bm_block *bblist = NULL;
397 while (nr_blocks-- > 0) {
398 struct bm_block *bb;
400 bb = chain_alloc(ca, sizeof(struct bm_block));
401 if (!bb)
402 return NULL;
404 bb->next = bblist;
405 bblist = bb;
407 return bblist;
411 * create_zone_bm_list - create a list of zone bitmap objects
414 static inline struct zone_bitmap *
415 create_zone_bm_list(unsigned int nr_zones, struct chain_allocator *ca)
417 struct zone_bitmap *zbmlist = NULL;
419 while (nr_zones-- > 0) {
420 struct zone_bitmap *zbm;
422 zbm = chain_alloc(ca, sizeof(struct zone_bitmap));
423 if (!zbm)
424 return NULL;
426 zbm->next = zbmlist;
427 zbmlist = zbm;
429 return zbmlist;
433 * memory_bm_create - allocate memory for a memory bitmap
436 static int
437 memory_bm_create(struct memory_bitmap *bm, gfp_t gfp_mask, int safe_needed)
439 struct chain_allocator ca;
440 struct zone *zone;
441 struct zone_bitmap *zone_bm;
442 struct bm_block *bb;
443 unsigned int nr;
445 chain_init(&ca, gfp_mask, safe_needed);
447 /* Compute the number of zones */
448 nr = 0;
449 for_each_zone (zone)
450 if (populated_zone(zone) && !is_highmem(zone))
451 nr++;
453 /* Allocate the list of zones bitmap objects */
454 zone_bm = create_zone_bm_list(nr, &ca);
455 bm->zone_bm_list = zone_bm;
456 if (!zone_bm) {
457 chain_free(&ca, PG_UNSAFE_CLEAR);
458 return -ENOMEM;
461 /* Initialize the zone bitmap objects */
462 for_each_zone (zone) {
463 unsigned long pfn;
465 if (!populated_zone(zone) || is_highmem(zone))
466 continue;
468 zone_bm->start_pfn = zone->zone_start_pfn;
469 zone_bm->end_pfn = zone->zone_start_pfn + zone->spanned_pages;
470 /* Allocate the list of bitmap block objects */
471 nr = DIV_ROUND_UP(zone->spanned_pages, BM_BITS_PER_BLOCK);
472 bb = create_bm_block_list(nr, &ca);
473 zone_bm->bm_blocks = bb;
474 zone_bm->cur_block = bb;
475 if (!bb)
476 goto Free;
478 nr = zone->spanned_pages;
479 pfn = zone->zone_start_pfn;
480 /* Initialize the bitmap block objects */
481 while (bb) {
482 unsigned long *ptr;
484 ptr = alloc_image_page(gfp_mask, safe_needed);
485 bb->data = ptr;
486 if (!ptr)
487 goto Free;
489 bb->start_pfn = pfn;
490 if (nr >= BM_BITS_PER_BLOCK) {
491 pfn += BM_BITS_PER_BLOCK;
492 bb->size = BM_CHUNKS_PER_BLOCK;
493 nr -= BM_BITS_PER_BLOCK;
494 } else {
495 /* This is executed only once in the loop */
496 pfn += nr;
497 bb->size = DIV_ROUND_UP(nr, BM_BITS_PER_CHUNK);
499 bb->end_pfn = pfn;
500 bb = bb->next;
502 zone_bm = zone_bm->next;
504 bm->p_list = ca.chain;
505 memory_bm_position_reset(bm);
506 return 0;
508 Free:
509 bm->p_list = ca.chain;
510 memory_bm_free(bm, PG_UNSAFE_CLEAR);
511 return -ENOMEM;
515 * memory_bm_free - free memory occupied by the memory bitmap @bm
518 static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free)
520 struct zone_bitmap *zone_bm;
522 /* Free the list of bit blocks for each zone_bitmap object */
523 zone_bm = bm->zone_bm_list;
524 while (zone_bm) {
525 struct bm_block *bb;
527 bb = zone_bm->bm_blocks;
528 while (bb) {
529 if (bb->data)
530 free_image_page(bb->data, clear_nosave_free);
531 bb = bb->next;
533 zone_bm = zone_bm->next;
535 free_list_of_pages(bm->p_list, clear_nosave_free);
536 bm->zone_bm_list = NULL;
540 * memory_bm_set_bit - set the bit in the bitmap @bm that corresponds
541 * to given pfn. The cur_zone_bm member of @bm and the cur_block member
542 * of @bm->cur_zone_bm are updated.
544 * If the bit cannot be set, the function returns -EINVAL .
547 static int
548 memory_bm_set_bit(struct memory_bitmap *bm, unsigned long pfn)
550 struct zone_bitmap *zone_bm;
551 struct bm_block *bb;
553 /* Check if the pfn is from the current zone */
554 zone_bm = bm->cur.zone_bm;
555 if (pfn < zone_bm->start_pfn || pfn >= zone_bm->end_pfn) {
556 zone_bm = bm->zone_bm_list;
557 /* We don't assume that the zones are sorted by pfns */
558 while (pfn < zone_bm->start_pfn || pfn >= zone_bm->end_pfn) {
559 zone_bm = zone_bm->next;
560 if (unlikely(!zone_bm))
561 return -EINVAL;
563 bm->cur.zone_bm = zone_bm;
565 /* Check if the pfn corresponds to the current bitmap block */
566 bb = zone_bm->cur_block;
567 if (pfn < bb->start_pfn)
568 bb = zone_bm->bm_blocks;
570 while (pfn >= bb->end_pfn) {
571 bb = bb->next;
572 if (unlikely(!bb))
573 return -EINVAL;
575 zone_bm->cur_block = bb;
576 pfn -= bb->start_pfn;
577 set_bit(pfn % BM_BITS_PER_CHUNK, bb->data + pfn / BM_BITS_PER_CHUNK);
578 return 0;
581 /* Two auxiliary functions for memory_bm_next_pfn */
583 /* Find the first set bit in the given chunk, if there is one */
585 static inline int next_bit_in_chunk(int bit, unsigned long *chunk_p)
587 bit++;
588 while (bit < BM_BITS_PER_CHUNK) {
589 if (test_bit(bit, chunk_p))
590 return bit;
592 bit++;
594 return -1;
597 /* Find a chunk containing some bits set in given block of bits */
599 static inline int next_chunk_in_block(int n, struct bm_block *bb)
601 n++;
602 while (n < bb->size) {
603 if (bb->data[n])
604 return n;
606 n++;
608 return -1;
612 * memory_bm_next_pfn - find the pfn that corresponds to the next set bit
613 * in the bitmap @bm. If the pfn cannot be found, BM_END_OF_MAP is
614 * returned.
616 * It is required to run memory_bm_position_reset() before the first call to
617 * this function.
620 static unsigned long memory_bm_next_pfn(struct memory_bitmap *bm)
622 struct zone_bitmap *zone_bm;
623 struct bm_block *bb;
624 int chunk;
625 int bit;
627 do {
628 bb = bm->cur.block;
629 do {
630 chunk = bm->cur.chunk;
631 bit = bm->cur.bit;
632 do {
633 bit = next_bit_in_chunk(bit, bb->data + chunk);
634 if (bit >= 0)
635 goto Return_pfn;
637 chunk = next_chunk_in_block(chunk, bb);
638 bit = -1;
639 } while (chunk >= 0);
640 bb = bb->next;
641 bm->cur.block = bb;
642 memory_bm_reset_chunk(bm);
643 } while (bb);
644 zone_bm = bm->cur.zone_bm->next;
645 if (zone_bm) {
646 bm->cur.zone_bm = zone_bm;
647 bm->cur.block = zone_bm->bm_blocks;
648 memory_bm_reset_chunk(bm);
650 } while (zone_bm);
651 memory_bm_position_reset(bm);
652 return BM_END_OF_MAP;
654 Return_pfn:
655 bm->cur.chunk = chunk;
656 bm->cur.bit = bit;
657 return bb->start_pfn + chunk * BM_BITS_PER_CHUNK + bit;
661 * snapshot_additional_pages - estimate the number of additional pages
662 * be needed for setting up the suspend image data structures for given
663 * zone (usually the returned value is greater than the exact number)
666 unsigned int snapshot_additional_pages(struct zone *zone)
668 unsigned int res;
670 res = DIV_ROUND_UP(zone->spanned_pages, BM_BITS_PER_BLOCK);
671 res += DIV_ROUND_UP(res * sizeof(struct bm_block), PAGE_SIZE);
672 return res;
676 * pfn_is_nosave - check if given pfn is in the 'nosave' section
679 static inline int pfn_is_nosave(unsigned long pfn)
681 unsigned long nosave_begin_pfn = __pa(&__nosave_begin) >> PAGE_SHIFT;
682 unsigned long nosave_end_pfn = PAGE_ALIGN(__pa(&__nosave_end)) >> PAGE_SHIFT;
683 return (pfn >= nosave_begin_pfn) && (pfn < nosave_end_pfn);
687 * saveable - Determine whether a page should be cloned or not.
688 * @pfn: The page
690 * We save a page if it isn't Nosave, and is not in the range of pages
691 * statically defined as 'unsaveable', and it
692 * isn't a part of a free chunk of pages.
695 static struct page *saveable_page(unsigned long pfn)
697 struct page *page;
699 if (!pfn_valid(pfn))
700 return NULL;
702 page = pfn_to_page(pfn);
704 if (PageNosave(page))
705 return NULL;
706 if (PageReserved(page) && pfn_is_nosave(pfn))
707 return NULL;
708 if (PageNosaveFree(page))
709 return NULL;
711 return page;
714 unsigned int count_data_pages(void)
716 struct zone *zone;
717 unsigned long pfn, max_zone_pfn;
718 unsigned int n = 0;
720 for_each_zone (zone) {
721 if (is_highmem(zone))
722 continue;
723 mark_free_pages(zone);
724 max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
725 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
726 n += !!saveable_page(pfn);
728 return n;
731 static inline void copy_data_page(long *dst, long *src)
733 int n;
735 /* copy_page and memcpy are not usable for copying task structs. */
736 for (n = PAGE_SIZE / sizeof(long); n; n--)
737 *dst++ = *src++;
740 static void
741 copy_data_pages(struct memory_bitmap *copy_bm, struct memory_bitmap *orig_bm)
743 struct zone *zone;
744 unsigned long pfn;
746 for_each_zone (zone) {
747 unsigned long max_zone_pfn;
749 if (is_highmem(zone))
750 continue;
752 mark_free_pages(zone);
753 max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
754 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
755 if (saveable_page(pfn))
756 memory_bm_set_bit(orig_bm, pfn);
758 memory_bm_position_reset(orig_bm);
759 memory_bm_position_reset(copy_bm);
760 do {
761 pfn = memory_bm_next_pfn(orig_bm);
762 if (likely(pfn != BM_END_OF_MAP)) {
763 struct page *page;
764 void *src;
766 page = pfn_to_page(pfn);
767 src = page_address(page);
768 page = pfn_to_page(memory_bm_next_pfn(copy_bm));
769 copy_data_page(page_address(page), src);
771 } while (pfn != BM_END_OF_MAP);
775 * swsusp_free - free pages allocated for the suspend.
777 * Suspend pages are alocated before the atomic copy is made, so we
778 * need to release them after the resume.
781 void swsusp_free(void)
783 struct zone *zone;
784 unsigned long pfn, max_zone_pfn;
786 for_each_zone(zone) {
787 max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
788 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
789 if (pfn_valid(pfn)) {
790 struct page *page = pfn_to_page(pfn);
792 if (PageNosave(page) && PageNosaveFree(page)) {
793 ClearPageNosave(page);
794 ClearPageNosaveFree(page);
795 free_page((long) page_address(page));
799 nr_copy_pages = 0;
800 nr_meta_pages = 0;
801 restore_pblist = NULL;
802 buffer = NULL;
807 * enough_free_mem - Make sure we enough free memory to snapshot.
809 * Returns TRUE or FALSE after checking the number of available
810 * free pages.
813 static int enough_free_mem(unsigned int nr_pages)
815 struct zone *zone;
816 unsigned int free = 0, meta = 0;
818 for_each_zone (zone)
819 if (!is_highmem(zone)) {
820 free += zone->free_pages;
821 meta += snapshot_additional_pages(zone);
824 pr_debug("swsusp: pages needed: %u + %u + %u, available pages: %u\n",
825 nr_pages, PAGES_FOR_IO, meta, free);
827 return free > nr_pages + PAGES_FOR_IO + meta;
830 static int
831 swsusp_alloc(struct memory_bitmap *orig_bm, struct memory_bitmap *copy_bm,
832 unsigned int nr_pages)
834 int error;
836 error = memory_bm_create(orig_bm, GFP_ATOMIC | __GFP_COLD, PG_ANY);
837 if (error)
838 goto Free;
840 error = memory_bm_create(copy_bm, GFP_ATOMIC | __GFP_COLD, PG_ANY);
841 if (error)
842 goto Free;
844 while (nr_pages-- > 0) {
845 struct page *page = alloc_page(GFP_ATOMIC | __GFP_COLD);
846 if (!page)
847 goto Free;
849 SetPageNosave(page);
850 SetPageNosaveFree(page);
851 memory_bm_set_bit(copy_bm, page_to_pfn(page));
853 return 0;
855 Free:
856 swsusp_free();
857 return -ENOMEM;
860 /* Memory bitmap used for marking saveable pages */
861 static struct memory_bitmap orig_bm;
862 /* Memory bitmap used for marking allocated pages that will contain the copies
863 * of saveable pages
865 static struct memory_bitmap copy_bm;
867 asmlinkage int swsusp_save(void)
869 unsigned int nr_pages;
871 pr_debug("swsusp: critical section: \n");
873 drain_local_pages();
874 nr_pages = count_data_pages();
875 printk("swsusp: Need to copy %u pages\n", nr_pages);
877 if (!enough_free_mem(nr_pages)) {
878 printk(KERN_ERR "swsusp: Not enough free memory\n");
879 return -ENOMEM;
882 if (swsusp_alloc(&orig_bm, &copy_bm, nr_pages))
883 return -ENOMEM;
885 /* During allocating of suspend pagedir, new cold pages may appear.
886 * Kill them.
888 drain_local_pages();
889 copy_data_pages(&copy_bm, &orig_bm);
892 * End of critical section. From now on, we can write to memory,
893 * but we should not touch disk. This specially means we must _not_
894 * touch swap space! Except we must write out our image of course.
897 nr_copy_pages = nr_pages;
898 nr_meta_pages = (nr_pages * sizeof(long) + PAGE_SIZE - 1) >> PAGE_SHIFT;
900 printk("swsusp: critical section/: done (%d pages copied)\n", nr_pages);
901 return 0;
904 static void init_header(struct swsusp_info *info)
906 memset(info, 0, sizeof(struct swsusp_info));
907 info->version_code = LINUX_VERSION_CODE;
908 info->num_physpages = num_physpages;
909 memcpy(&info->uts, init_utsname(), sizeof(struct new_utsname));
910 info->cpus = num_online_cpus();
911 info->image_pages = nr_copy_pages;
912 info->pages = nr_copy_pages + nr_meta_pages + 1;
913 info->size = info->pages;
914 info->size <<= PAGE_SHIFT;
918 * pack_pfns - pfns corresponding to the set bits found in the bitmap @bm
919 * are stored in the array @buf[] (1 page at a time)
922 static inline void
923 pack_pfns(unsigned long *buf, struct memory_bitmap *bm)
925 int j;
927 for (j = 0; j < PAGE_SIZE / sizeof(long); j++) {
928 buf[j] = memory_bm_next_pfn(bm);
929 if (unlikely(buf[j] == BM_END_OF_MAP))
930 break;
935 * snapshot_read_next - used for reading the system memory snapshot.
937 * On the first call to it @handle should point to a zeroed
938 * snapshot_handle structure. The structure gets updated and a pointer
939 * to it should be passed to this function every next time.
941 * The @count parameter should contain the number of bytes the caller
942 * wants to read from the snapshot. It must not be zero.
944 * On success the function returns a positive number. Then, the caller
945 * is allowed to read up to the returned number of bytes from the memory
946 * location computed by the data_of() macro. The number returned
947 * may be smaller than @count, but this only happens if the read would
948 * cross a page boundary otherwise.
950 * The function returns 0 to indicate the end of data stream condition,
951 * and a negative number is returned on error. In such cases the
952 * structure pointed to by @handle is not updated and should not be used
953 * any more.
956 int snapshot_read_next(struct snapshot_handle *handle, size_t count)
958 if (handle->cur > nr_meta_pages + nr_copy_pages)
959 return 0;
961 if (!buffer) {
962 /* This makes the buffer be freed by swsusp_free() */
963 buffer = alloc_image_page(GFP_ATOMIC, PG_ANY);
964 if (!buffer)
965 return -ENOMEM;
967 if (!handle->offset) {
968 init_header((struct swsusp_info *)buffer);
969 handle->buffer = buffer;
970 memory_bm_position_reset(&orig_bm);
971 memory_bm_position_reset(&copy_bm);
973 if (handle->prev < handle->cur) {
974 if (handle->cur <= nr_meta_pages) {
975 memset(buffer, 0, PAGE_SIZE);
976 pack_pfns(buffer, &orig_bm);
977 } else {
978 unsigned long pfn = memory_bm_next_pfn(&copy_bm);
980 handle->buffer = page_address(pfn_to_page(pfn));
982 handle->prev = handle->cur;
984 handle->buf_offset = handle->cur_offset;
985 if (handle->cur_offset + count >= PAGE_SIZE) {
986 count = PAGE_SIZE - handle->cur_offset;
987 handle->cur_offset = 0;
988 handle->cur++;
989 } else {
990 handle->cur_offset += count;
992 handle->offset += count;
993 return count;
997 * mark_unsafe_pages - mark the pages that cannot be used for storing
998 * the image during resume, because they conflict with the pages that
999 * had been used before suspend
1002 static int mark_unsafe_pages(struct memory_bitmap *bm)
1004 struct zone *zone;
1005 unsigned long pfn, max_zone_pfn;
1007 /* Clear page flags */
1008 for_each_zone (zone) {
1009 max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
1010 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1011 if (pfn_valid(pfn))
1012 ClearPageNosaveFree(pfn_to_page(pfn));
1015 /* Mark pages that correspond to the "original" pfns as "unsafe" */
1016 memory_bm_position_reset(bm);
1017 do {
1018 pfn = memory_bm_next_pfn(bm);
1019 if (likely(pfn != BM_END_OF_MAP)) {
1020 if (likely(pfn_valid(pfn)))
1021 SetPageNosaveFree(pfn_to_page(pfn));
1022 else
1023 return -EFAULT;
1025 } while (pfn != BM_END_OF_MAP);
1027 allocated_unsafe_pages = 0;
1029 return 0;
1032 static void
1033 duplicate_memory_bitmap(struct memory_bitmap *dst, struct memory_bitmap *src)
1035 unsigned long pfn;
1037 memory_bm_position_reset(src);
1038 pfn = memory_bm_next_pfn(src);
1039 while (pfn != BM_END_OF_MAP) {
1040 memory_bm_set_bit(dst, pfn);
1041 pfn = memory_bm_next_pfn(src);
1045 static inline int check_header(struct swsusp_info *info)
1047 char *reason = NULL;
1049 if (info->version_code != LINUX_VERSION_CODE)
1050 reason = "kernel version";
1051 if (info->num_physpages != num_physpages)
1052 reason = "memory size";
1053 if (strcmp(info->uts.sysname,init_utsname()->sysname))
1054 reason = "system type";
1055 if (strcmp(info->uts.release,init_utsname()->release))
1056 reason = "kernel release";
1057 if (strcmp(info->uts.version,init_utsname()->version))
1058 reason = "version";
1059 if (strcmp(info->uts.machine,init_utsname()->machine))
1060 reason = "machine";
1061 if (reason) {
1062 printk(KERN_ERR "swsusp: Resume mismatch: %s\n", reason);
1063 return -EPERM;
1065 return 0;
1069 * load header - check the image header and copy data from it
1072 static int
1073 load_header(struct swsusp_info *info)
1075 int error;
1077 restore_pblist = NULL;
1078 error = check_header(info);
1079 if (!error) {
1080 nr_copy_pages = info->image_pages;
1081 nr_meta_pages = info->pages - info->image_pages - 1;
1083 return error;
1087 * unpack_orig_pfns - for each element of @buf[] (1 page at a time) set
1088 * the corresponding bit in the memory bitmap @bm
1091 static inline void
1092 unpack_orig_pfns(unsigned long *buf, struct memory_bitmap *bm)
1094 int j;
1096 for (j = 0; j < PAGE_SIZE / sizeof(long); j++) {
1097 if (unlikely(buf[j] == BM_END_OF_MAP))
1098 break;
1100 memory_bm_set_bit(bm, buf[j]);
1105 * prepare_image - use the memory bitmap @bm to mark the pages that will
1106 * be overwritten in the process of restoring the system memory state
1107 * from the suspend image ("unsafe" pages) and allocate memory for the
1108 * image.
1110 * The idea is to allocate a new memory bitmap first and then allocate
1111 * as many pages as needed for the image data, but not to assign these
1112 * pages to specific tasks initially. Instead, we just mark them as
1113 * allocated and create a list of "safe" pages that will be used later.
1116 #define PBES_PER_LINKED_PAGE (LINKED_PAGE_DATA_SIZE / sizeof(struct pbe))
1118 static struct linked_page *safe_pages_list;
1120 static int
1121 prepare_image(struct memory_bitmap *new_bm, struct memory_bitmap *bm)
1123 unsigned int nr_pages;
1124 struct linked_page *sp_list, *lp;
1125 int error;
1127 error = mark_unsafe_pages(bm);
1128 if (error)
1129 goto Free;
1131 error = memory_bm_create(new_bm, GFP_ATOMIC, PG_SAFE);
1132 if (error)
1133 goto Free;
1135 duplicate_memory_bitmap(new_bm, bm);
1136 memory_bm_free(bm, PG_UNSAFE_KEEP);
1137 /* Reserve some safe pages for potential later use.
1139 * NOTE: This way we make sure there will be enough safe pages for the
1140 * chain_alloc() in get_buffer(). It is a bit wasteful, but
1141 * nr_copy_pages cannot be greater than 50% of the memory anyway.
1143 sp_list = NULL;
1144 /* nr_copy_pages cannot be lesser than allocated_unsafe_pages */
1145 nr_pages = nr_copy_pages - allocated_unsafe_pages;
1146 nr_pages = DIV_ROUND_UP(nr_pages, PBES_PER_LINKED_PAGE);
1147 while (nr_pages > 0) {
1148 lp = alloc_image_page(GFP_ATOMIC, PG_SAFE);
1149 if (!lp) {
1150 error = -ENOMEM;
1151 goto Free;
1153 lp->next = sp_list;
1154 sp_list = lp;
1155 nr_pages--;
1157 /* Preallocate memory for the image */
1158 safe_pages_list = NULL;
1159 nr_pages = nr_copy_pages - allocated_unsafe_pages;
1160 while (nr_pages > 0) {
1161 lp = (struct linked_page *)get_zeroed_page(GFP_ATOMIC);
1162 if (!lp) {
1163 error = -ENOMEM;
1164 goto Free;
1166 if (!PageNosaveFree(virt_to_page(lp))) {
1167 /* The page is "safe", add it to the list */
1168 lp->next = safe_pages_list;
1169 safe_pages_list = lp;
1171 /* Mark the page as allocated */
1172 SetPageNosave(virt_to_page(lp));
1173 SetPageNosaveFree(virt_to_page(lp));
1174 nr_pages--;
1176 /* Free the reserved safe pages so that chain_alloc() can use them */
1177 while (sp_list) {
1178 lp = sp_list->next;
1179 free_image_page(sp_list, PG_UNSAFE_CLEAR);
1180 sp_list = lp;
1182 return 0;
1184 Free:
1185 swsusp_free();
1186 return error;
1190 * get_buffer - compute the address that snapshot_write_next() should
1191 * set for its caller to write to.
1194 static void *get_buffer(struct memory_bitmap *bm, struct chain_allocator *ca)
1196 struct pbe *pbe;
1197 struct page *page = pfn_to_page(memory_bm_next_pfn(bm));
1199 if (PageNosave(page) && PageNosaveFree(page))
1200 /* We have allocated the "original" page frame and we can
1201 * use it directly to store the loaded page.
1203 return page_address(page);
1205 /* The "original" page frame has not been allocated and we have to
1206 * use a "safe" page frame to store the loaded page.
1208 pbe = chain_alloc(ca, sizeof(struct pbe));
1209 if (!pbe) {
1210 swsusp_free();
1211 return NULL;
1213 pbe->orig_address = (unsigned long)page_address(page);
1214 pbe->address = (unsigned long)safe_pages_list;
1215 safe_pages_list = safe_pages_list->next;
1216 pbe->next = restore_pblist;
1217 restore_pblist = pbe;
1218 return (void *)pbe->address;
1222 * snapshot_write_next - used for writing the system memory snapshot.
1224 * On the first call to it @handle should point to a zeroed
1225 * snapshot_handle structure. The structure gets updated and a pointer
1226 * to it should be passed to this function every next time.
1228 * The @count parameter should contain the number of bytes the caller
1229 * wants to write to the image. It must not be zero.
1231 * On success the function returns a positive number. Then, the caller
1232 * is allowed to write up to the returned number of bytes to the memory
1233 * location computed by the data_of() macro. The number returned
1234 * may be smaller than @count, but this only happens if the write would
1235 * cross a page boundary otherwise.
1237 * The function returns 0 to indicate the "end of file" condition,
1238 * and a negative number is returned on error. In such cases the
1239 * structure pointed to by @handle is not updated and should not be used
1240 * any more.
1243 int snapshot_write_next(struct snapshot_handle *handle, size_t count)
1245 static struct chain_allocator ca;
1246 int error = 0;
1248 /* Check if we have already loaded the entire image */
1249 if (handle->prev && handle->cur > nr_meta_pages + nr_copy_pages)
1250 return 0;
1252 if (!buffer) {
1253 /* This makes the buffer be freed by swsusp_free() */
1254 buffer = alloc_image_page(GFP_ATOMIC, PG_ANY);
1255 if (!buffer)
1256 return -ENOMEM;
1258 if (!handle->offset)
1259 handle->buffer = buffer;
1260 handle->sync_read = 1;
1261 if (handle->prev < handle->cur) {
1262 if (handle->prev == 0) {
1263 error = load_header(buffer);
1264 if (error)
1265 return error;
1267 error = memory_bm_create(&copy_bm, GFP_ATOMIC, PG_ANY);
1268 if (error)
1269 return error;
1271 } else if (handle->prev <= nr_meta_pages) {
1272 unpack_orig_pfns(buffer, &copy_bm);
1273 if (handle->prev == nr_meta_pages) {
1274 error = prepare_image(&orig_bm, &copy_bm);
1275 if (error)
1276 return error;
1278 chain_init(&ca, GFP_ATOMIC, PG_SAFE);
1279 memory_bm_position_reset(&orig_bm);
1280 restore_pblist = NULL;
1281 handle->buffer = get_buffer(&orig_bm, &ca);
1282 handle->sync_read = 0;
1283 if (!handle->buffer)
1284 return -ENOMEM;
1286 } else {
1287 handle->buffer = get_buffer(&orig_bm, &ca);
1288 handle->sync_read = 0;
1290 handle->prev = handle->cur;
1292 handle->buf_offset = handle->cur_offset;
1293 if (handle->cur_offset + count >= PAGE_SIZE) {
1294 count = PAGE_SIZE - handle->cur_offset;
1295 handle->cur_offset = 0;
1296 handle->cur++;
1297 } else {
1298 handle->cur_offset += count;
1300 handle->offset += count;
1301 return count;
1304 int snapshot_image_loaded(struct snapshot_handle *handle)
1306 return !(!nr_copy_pages ||
1307 handle->cur <= nr_meta_pages + nr_copy_pages);
1310 void snapshot_free_unused_memory(struct snapshot_handle *handle)
1312 /* Free only if we have loaded the image entirely */
1313 if (handle->prev && handle->cur > nr_meta_pages + nr_copy_pages)
1314 memory_bm_free(&orig_bm, PG_UNSAFE_CLEAR);