[PATCH] Change the name of pagedir_nosave
[linux-2.6/linux-loongson.git] / kernel / power / snapshot.c
blob1d276b3ae15292df2e92c64de9069c8a13975b14
1 /*
2 * linux/kernel/power/snapshot.c
4 * This file provide system snapshot/restore functionality.
6 * Copyright (C) 1998-2005 Pavel Machek <pavel@suse.cz>
8 * This file is released under the GPLv2, and is based on swsusp.c.
13 #include <linux/version.h>
14 #include <linux/module.h>
15 #include <linux/mm.h>
16 #include <linux/suspend.h>
17 #include <linux/smp_lock.h>
18 #include <linux/delay.h>
19 #include <linux/bitops.h>
20 #include <linux/spinlock.h>
21 #include <linux/kernel.h>
22 #include <linux/pm.h>
23 #include <linux/device.h>
24 #include <linux/bootmem.h>
25 #include <linux/syscalls.h>
26 #include <linux/console.h>
27 #include <linux/highmem.h>
29 #include <asm/uaccess.h>
30 #include <asm/mmu_context.h>
31 #include <asm/pgtable.h>
32 #include <asm/tlbflush.h>
33 #include <asm/io.h>
35 #include "power.h"
37 /* List of PBEs used for creating and restoring the suspend image */
38 struct pbe *restore_pblist;
40 static unsigned int nr_copy_pages;
41 static unsigned int nr_meta_pages;
42 static unsigned long *buffer;
44 #ifdef CONFIG_HIGHMEM
45 unsigned int count_highmem_pages(void)
47 struct zone *zone;
48 unsigned long zone_pfn;
49 unsigned int n = 0;
51 for_each_zone (zone)
52 if (is_highmem(zone)) {
53 mark_free_pages(zone);
54 for (zone_pfn = 0; zone_pfn < zone->spanned_pages; zone_pfn++) {
55 struct page *page;
56 unsigned long pfn = zone_pfn + zone->zone_start_pfn;
57 if (!pfn_valid(pfn))
58 continue;
59 page = pfn_to_page(pfn);
60 if (PageReserved(page))
61 continue;
62 if (PageNosaveFree(page))
63 continue;
64 n++;
67 return n;
70 struct highmem_page {
71 char *data;
72 struct page *page;
73 struct highmem_page *next;
76 static struct highmem_page *highmem_copy;
78 static int save_highmem_zone(struct zone *zone)
80 unsigned long zone_pfn;
81 mark_free_pages(zone);
82 for (zone_pfn = 0; zone_pfn < zone->spanned_pages; ++zone_pfn) {
83 struct page *page;
84 struct highmem_page *save;
85 void *kaddr;
86 unsigned long pfn = zone_pfn + zone->zone_start_pfn;
88 if (!(pfn%10000))
89 printk(".");
90 if (!pfn_valid(pfn))
91 continue;
92 page = pfn_to_page(pfn);
94 * This condition results from rvmalloc() sans vmalloc_32()
95 * and architectural memory reservations. This should be
96 * corrected eventually when the cases giving rise to this
97 * are better understood.
99 if (PageReserved(page))
100 continue;
101 BUG_ON(PageNosave(page));
102 if (PageNosaveFree(page))
103 continue;
104 save = kmalloc(sizeof(struct highmem_page), GFP_ATOMIC);
105 if (!save)
106 return -ENOMEM;
107 save->next = highmem_copy;
108 save->page = page;
109 save->data = (void *) get_zeroed_page(GFP_ATOMIC);
110 if (!save->data) {
111 kfree(save);
112 return -ENOMEM;
114 kaddr = kmap_atomic(page, KM_USER0);
115 memcpy(save->data, kaddr, PAGE_SIZE);
116 kunmap_atomic(kaddr, KM_USER0);
117 highmem_copy = save;
119 return 0;
122 int save_highmem(void)
124 struct zone *zone;
125 int res = 0;
127 pr_debug("swsusp: Saving Highmem");
128 drain_local_pages();
129 for_each_zone (zone) {
130 if (is_highmem(zone))
131 res = save_highmem_zone(zone);
132 if (res)
133 return res;
135 printk("\n");
136 return 0;
139 int restore_highmem(void)
141 printk("swsusp: Restoring Highmem\n");
142 while (highmem_copy) {
143 struct highmem_page *save = highmem_copy;
144 void *kaddr;
145 highmem_copy = save->next;
147 kaddr = kmap_atomic(save->page, KM_USER0);
148 memcpy(kaddr, save->data, PAGE_SIZE);
149 kunmap_atomic(kaddr, KM_USER0);
150 free_page((long) save->data);
151 kfree(save);
153 return 0;
155 #else
156 static inline unsigned int count_highmem_pages(void) {return 0;}
157 static inline int save_highmem(void) {return 0;}
158 static inline int restore_highmem(void) {return 0;}
159 #endif
162 * @safe_needed - on resume, for storing the PBE list and the image,
163 * we can only use memory pages that do not conflict with the pages
164 * used before suspend.
166 * The unsafe pages are marked with the PG_nosave_free flag
167 * and we count them using unsafe_pages
170 static unsigned int unsafe_pages;
172 static void *alloc_image_page(gfp_t gfp_mask, int safe_needed)
174 void *res;
176 res = (void *)get_zeroed_page(gfp_mask);
177 if (safe_needed)
178 while (res && PageNosaveFree(virt_to_page(res))) {
179 /* The page is unsafe, mark it for swsusp_free() */
180 SetPageNosave(virt_to_page(res));
181 unsafe_pages++;
182 res = (void *)get_zeroed_page(gfp_mask);
184 if (res) {
185 SetPageNosave(virt_to_page(res));
186 SetPageNosaveFree(virt_to_page(res));
188 return res;
191 unsigned long get_safe_page(gfp_t gfp_mask)
193 return (unsigned long)alloc_image_page(gfp_mask, 1);
197 * free_image_page - free page represented by @addr, allocated with
198 * alloc_image_page (page flags set by it must be cleared)
201 static inline void free_image_page(void *addr, int clear_nosave_free)
203 ClearPageNosave(virt_to_page(addr));
204 if (clear_nosave_free)
205 ClearPageNosaveFree(virt_to_page(addr));
206 free_page((unsigned long)addr);
210 * pfn_is_nosave - check if given pfn is in the 'nosave' section
213 static inline int pfn_is_nosave(unsigned long pfn)
215 unsigned long nosave_begin_pfn = __pa(&__nosave_begin) >> PAGE_SHIFT;
216 unsigned long nosave_end_pfn = PAGE_ALIGN(__pa(&__nosave_end)) >> PAGE_SHIFT;
217 return (pfn >= nosave_begin_pfn) && (pfn < nosave_end_pfn);
221 * saveable - Determine whether a page should be cloned or not.
222 * @pfn: The page
224 * We save a page if it isn't Nosave, and is not in the range of pages
225 * statically defined as 'unsaveable', and it
226 * isn't a part of a free chunk of pages.
229 static struct page *saveable_page(unsigned long pfn)
231 struct page *page;
233 if (!pfn_valid(pfn))
234 return NULL;
236 page = pfn_to_page(pfn);
238 if (PageNosave(page))
239 return NULL;
240 if (PageReserved(page) && pfn_is_nosave(pfn))
241 return NULL;
242 if (PageNosaveFree(page))
243 return NULL;
245 return page;
248 unsigned int count_data_pages(void)
250 struct zone *zone;
251 unsigned long pfn, max_zone_pfn;
252 unsigned int n = 0;
254 for_each_zone (zone) {
255 if (is_highmem(zone))
256 continue;
257 mark_free_pages(zone);
258 max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
259 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
260 n += !!saveable_page(pfn);
262 return n;
265 static inline void copy_data_page(long *dst, long *src)
267 int n;
269 /* copy_page and memcpy are not usable for copying task structs. */
270 for (n = PAGE_SIZE / sizeof(long); n; n--)
271 *dst++ = *src++;
274 static void copy_data_pages(struct pbe *pblist)
276 struct zone *zone;
277 unsigned long pfn, max_zone_pfn;
278 struct pbe *pbe;
280 pbe = pblist;
281 for_each_zone (zone) {
282 if (is_highmem(zone))
283 continue;
284 mark_free_pages(zone);
285 max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
286 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) {
287 struct page *page = saveable_page(pfn);
289 if (page) {
290 void *ptr = page_address(page);
292 BUG_ON(!pbe);
293 copy_data_page((void *)pbe->address, ptr);
294 pbe->orig_address = (unsigned long)ptr;
295 pbe = pbe->next;
299 BUG_ON(pbe);
303 * free_pagedir - free pages allocated with alloc_pagedir()
306 static void free_pagedir(struct pbe *pblist, int clear_nosave_free)
308 struct pbe *pbe;
310 while (pblist) {
311 pbe = (pblist + PB_PAGE_SKIP)->next;
312 free_image_page(pblist, clear_nosave_free);
313 pblist = pbe;
318 * fill_pb_page - Create a list of PBEs on a given memory page
321 static inline void fill_pb_page(struct pbe *pbpage, unsigned int n)
323 struct pbe *p;
325 p = pbpage;
326 pbpage += n - 1;
328 p->next = p + 1;
329 while (++p < pbpage);
333 * create_pbe_list - Create a list of PBEs on top of a given chain
334 * of memory pages allocated with alloc_pagedir()
336 * This function assumes that pages allocated by alloc_image_page() will
337 * always be zeroed.
340 static inline void create_pbe_list(struct pbe *pblist, unsigned int nr_pages)
342 struct pbe *pbpage;
343 unsigned int num = PBES_PER_PAGE;
345 for_each_pb_page (pbpage, pblist) {
346 if (num >= nr_pages)
347 break;
349 fill_pb_page(pbpage, PBES_PER_PAGE);
350 num += PBES_PER_PAGE;
352 if (pbpage) {
353 num -= PBES_PER_PAGE;
354 fill_pb_page(pbpage, nr_pages - num);
359 * alloc_pagedir - Allocate the page directory.
361 * First, determine exactly how many pages we need and
362 * allocate them.
364 * We arrange the pages in a chain: each page is an array of PBES_PER_PAGE
365 * struct pbe elements (pbes) and the last element in the page points
366 * to the next page.
368 * On each page we set up a list of struct_pbe elements.
371 static struct pbe *alloc_pagedir(unsigned int nr_pages, gfp_t gfp_mask,
372 int safe_needed)
374 unsigned int num;
375 struct pbe *pblist, *pbe;
377 if (!nr_pages)
378 return NULL;
380 pblist = alloc_image_page(gfp_mask, safe_needed);
381 pbe = pblist;
382 for (num = PBES_PER_PAGE; num < nr_pages; num += PBES_PER_PAGE) {
383 if (!pbe) {
384 free_pagedir(pblist, 1);
385 return NULL;
387 pbe += PB_PAGE_SKIP;
388 pbe->next = alloc_image_page(gfp_mask, safe_needed);
389 pbe = pbe->next;
391 create_pbe_list(pblist, nr_pages);
392 return pblist;
396 * Free pages we allocated for suspend. Suspend pages are alocated
397 * before atomic copy, so we need to free them after resume.
400 void swsusp_free(void)
402 struct zone *zone;
403 unsigned long pfn, max_zone_pfn;
405 for_each_zone(zone) {
406 max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
407 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
408 if (pfn_valid(pfn)) {
409 struct page *page = pfn_to_page(pfn);
411 if (PageNosave(page) && PageNosaveFree(page)) {
412 ClearPageNosave(page);
413 ClearPageNosaveFree(page);
414 free_page((long) page_address(page));
418 nr_copy_pages = 0;
419 nr_meta_pages = 0;
420 restore_pblist = NULL;
421 buffer = NULL;
426 * enough_free_mem - Make sure we enough free memory to snapshot.
428 * Returns TRUE or FALSE after checking the number of available
429 * free pages.
432 static int enough_free_mem(unsigned int nr_pages)
434 struct zone *zone;
435 unsigned int n = 0;
437 for_each_zone (zone)
438 if (!is_highmem(zone))
439 n += zone->free_pages;
440 pr_debug("swsusp: available memory: %u pages\n", n);
441 return n > (nr_pages + PAGES_FOR_IO +
442 (nr_pages + PBES_PER_PAGE - 1) / PBES_PER_PAGE);
445 static int alloc_data_pages(struct pbe *pblist, gfp_t gfp_mask, int safe_needed)
447 struct pbe *p;
449 for_each_pbe (p, pblist) {
450 p->address = (unsigned long)alloc_image_page(gfp_mask, safe_needed);
451 if (!p->address)
452 return -ENOMEM;
454 return 0;
457 static struct pbe *swsusp_alloc(unsigned int nr_pages)
459 struct pbe *pblist;
461 if (!(pblist = alloc_pagedir(nr_pages, GFP_ATOMIC | __GFP_COLD, 0))) {
462 printk(KERN_ERR "suspend: Allocating pagedir failed.\n");
463 return NULL;
466 if (alloc_data_pages(pblist, GFP_ATOMIC | __GFP_COLD, 0)) {
467 printk(KERN_ERR "suspend: Allocating image pages failed.\n");
468 swsusp_free();
469 return NULL;
472 return pblist;
475 asmlinkage int swsusp_save(void)
477 unsigned int nr_pages;
479 pr_debug("swsusp: critical section: \n");
481 drain_local_pages();
482 nr_pages = count_data_pages();
483 printk("swsusp: Need to copy %u pages\n", nr_pages);
485 pr_debug("swsusp: pages needed: %u + %lu + %u, free: %u\n",
486 nr_pages,
487 (nr_pages + PBES_PER_PAGE - 1) / PBES_PER_PAGE,
488 PAGES_FOR_IO, nr_free_pages());
490 if (!enough_free_mem(nr_pages)) {
491 printk(KERN_ERR "swsusp: Not enough free memory\n");
492 return -ENOMEM;
495 restore_pblist = swsusp_alloc(nr_pages);
496 if (!restore_pblist)
497 return -ENOMEM;
499 /* During allocating of suspend pagedir, new cold pages may appear.
500 * Kill them.
502 drain_local_pages();
503 copy_data_pages(restore_pblist);
506 * End of critical section. From now on, we can write to memory,
507 * but we should not touch disk. This specially means we must _not_
508 * touch swap space! Except we must write out our image of course.
511 nr_copy_pages = nr_pages;
512 nr_meta_pages = (nr_pages * sizeof(long) + PAGE_SIZE - 1) >> PAGE_SHIFT;
514 printk("swsusp: critical section/: done (%d pages copied)\n", nr_pages);
515 return 0;
518 static void init_header(struct swsusp_info *info)
520 memset(info, 0, sizeof(struct swsusp_info));
521 info->version_code = LINUX_VERSION_CODE;
522 info->num_physpages = num_physpages;
523 memcpy(&info->uts, &system_utsname, sizeof(system_utsname));
524 info->cpus = num_online_cpus();
525 info->image_pages = nr_copy_pages;
526 info->pages = nr_copy_pages + nr_meta_pages + 1;
527 info->size = info->pages;
528 info->size <<= PAGE_SHIFT;
532 * pack_orig_addresses - the .orig_address fields of the PBEs from the
533 * list starting at @pbe are stored in the array @buf[] (1 page)
536 static inline struct pbe *pack_orig_addresses(unsigned long *buf, struct pbe *pbe)
538 int j;
540 for (j = 0; j < PAGE_SIZE / sizeof(long) && pbe; j++) {
541 buf[j] = pbe->orig_address;
542 pbe = pbe->next;
544 if (!pbe)
545 for (; j < PAGE_SIZE / sizeof(long); j++)
546 buf[j] = 0;
547 return pbe;
551 * snapshot_read_next - used for reading the system memory snapshot.
553 * On the first call to it @handle should point to a zeroed
554 * snapshot_handle structure. The structure gets updated and a pointer
555 * to it should be passed to this function every next time.
557 * The @count parameter should contain the number of bytes the caller
558 * wants to read from the snapshot. It must not be zero.
560 * On success the function returns a positive number. Then, the caller
561 * is allowed to read up to the returned number of bytes from the memory
562 * location computed by the data_of() macro. The number returned
563 * may be smaller than @count, but this only happens if the read would
564 * cross a page boundary otherwise.
566 * The function returns 0 to indicate the end of data stream condition,
567 * and a negative number is returned on error. In such cases the
568 * structure pointed to by @handle is not updated and should not be used
569 * any more.
572 int snapshot_read_next(struct snapshot_handle *handle, size_t count)
574 if (handle->cur > nr_meta_pages + nr_copy_pages)
575 return 0;
576 if (!buffer) {
577 /* This makes the buffer be freed by swsusp_free() */
578 buffer = alloc_image_page(GFP_ATOMIC, 0);
579 if (!buffer)
580 return -ENOMEM;
582 if (!handle->offset) {
583 init_header((struct swsusp_info *)buffer);
584 handle->buffer = buffer;
585 handle->pbe = restore_pblist;
587 if (handle->prev < handle->cur) {
588 if (handle->cur <= nr_meta_pages) {
589 handle->pbe = pack_orig_addresses(buffer, handle->pbe);
590 if (!handle->pbe)
591 handle->pbe = restore_pblist;
592 } else {
593 handle->buffer = (void *)handle->pbe->address;
594 handle->pbe = handle->pbe->next;
596 handle->prev = handle->cur;
598 handle->buf_offset = handle->cur_offset;
599 if (handle->cur_offset + count >= PAGE_SIZE) {
600 count = PAGE_SIZE - handle->cur_offset;
601 handle->cur_offset = 0;
602 handle->cur++;
603 } else {
604 handle->cur_offset += count;
606 handle->offset += count;
607 return count;
611 * mark_unsafe_pages - mark the pages that cannot be used for storing
612 * the image during resume, because they conflict with the pages that
613 * had been used before suspend
616 static int mark_unsafe_pages(struct pbe *pblist)
618 struct zone *zone;
619 unsigned long pfn, max_zone_pfn;
620 struct pbe *p;
622 if (!pblist) /* a sanity check */
623 return -EINVAL;
625 /* Clear page flags */
626 for_each_zone (zone) {
627 max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
628 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
629 if (pfn_valid(pfn))
630 ClearPageNosaveFree(pfn_to_page(pfn));
633 /* Mark orig addresses */
634 for_each_pbe (p, pblist) {
635 if (virt_addr_valid(p->orig_address))
636 SetPageNosaveFree(virt_to_page(p->orig_address));
637 else
638 return -EFAULT;
641 unsafe_pages = 0;
643 return 0;
646 static void copy_page_backup_list(struct pbe *dst, struct pbe *src)
648 /* We assume both lists contain the same number of elements */
649 while (src) {
650 dst->orig_address = src->orig_address;
651 dst = dst->next;
652 src = src->next;
656 static int check_header(struct swsusp_info *info)
658 char *reason = NULL;
660 if (info->version_code != LINUX_VERSION_CODE)
661 reason = "kernel version";
662 if (info->num_physpages != num_physpages)
663 reason = "memory size";
664 if (strcmp(info->uts.sysname,system_utsname.sysname))
665 reason = "system type";
666 if (strcmp(info->uts.release,system_utsname.release))
667 reason = "kernel release";
668 if (strcmp(info->uts.version,system_utsname.version))
669 reason = "version";
670 if (strcmp(info->uts.machine,system_utsname.machine))
671 reason = "machine";
672 if (reason) {
673 printk(KERN_ERR "swsusp: Resume mismatch: %s\n", reason);
674 return -EPERM;
676 return 0;
680 * load header - check the image header and copy data from it
683 static int load_header(struct snapshot_handle *handle,
684 struct swsusp_info *info)
686 int error;
687 struct pbe *pblist;
689 error = check_header(info);
690 if (!error) {
691 pblist = alloc_pagedir(info->image_pages, GFP_ATOMIC, 0);
692 if (!pblist)
693 return -ENOMEM;
694 restore_pblist = pblist;
695 handle->pbe = pblist;
696 nr_copy_pages = info->image_pages;
697 nr_meta_pages = info->pages - info->image_pages - 1;
699 return error;
703 * unpack_orig_addresses - copy the elements of @buf[] (1 page) to
704 * the PBEs in the list starting at @pbe
707 static inline struct pbe *unpack_orig_addresses(unsigned long *buf,
708 struct pbe *pbe)
710 int j;
712 for (j = 0; j < PAGE_SIZE / sizeof(long) && pbe; j++) {
713 pbe->orig_address = buf[j];
714 pbe = pbe->next;
716 return pbe;
720 * prepare_image - use metadata contained in the PBE list
721 * pointed to by restore_pblist to mark the pages that will
722 * be overwritten in the process of restoring the system
723 * memory state from the image ("unsafe" pages) and allocate
724 * memory for the image
726 * The idea is to allocate the PBE list first and then
727 * allocate as many pages as it's needed for the image data,
728 * but not to assign these pages to the PBEs initially.
729 * Instead, we just mark them as allocated and create a list
730 * of "safe" which will be used later
733 struct safe_page {
734 struct safe_page *next;
735 char padding[PAGE_SIZE - sizeof(void *)];
738 static struct safe_page *safe_pages;
740 static int prepare_image(struct snapshot_handle *handle)
742 int error = 0;
743 unsigned int nr_pages = nr_copy_pages;
744 struct pbe *p, *pblist = NULL;
746 p = restore_pblist;
747 error = mark_unsafe_pages(p);
748 if (!error) {
749 pblist = alloc_pagedir(nr_pages, GFP_ATOMIC, 1);
750 if (pblist)
751 copy_page_backup_list(pblist, p);
752 free_pagedir(p, 0);
753 if (!pblist)
754 error = -ENOMEM;
756 safe_pages = NULL;
757 if (!error && nr_pages > unsafe_pages) {
758 nr_pages -= unsafe_pages;
759 while (nr_pages--) {
760 struct safe_page *ptr;
762 ptr = (struct safe_page *)get_zeroed_page(GFP_ATOMIC);
763 if (!ptr) {
764 error = -ENOMEM;
765 break;
767 if (!PageNosaveFree(virt_to_page(ptr))) {
768 /* The page is "safe", add it to the list */
769 ptr->next = safe_pages;
770 safe_pages = ptr;
772 /* Mark the page as allocated */
773 SetPageNosave(virt_to_page(ptr));
774 SetPageNosaveFree(virt_to_page(ptr));
777 if (!error) {
778 restore_pblist = pblist;
779 } else {
780 handle->pbe = NULL;
781 swsusp_free();
783 return error;
786 static void *get_buffer(struct snapshot_handle *handle)
788 struct pbe *pbe = handle->pbe, *last = handle->last_pbe;
789 struct page *page = virt_to_page(pbe->orig_address);
791 if (PageNosave(page) && PageNosaveFree(page)) {
793 * We have allocated the "original" page frame and we can
794 * use it directly to store the read page
796 pbe->address = 0;
797 if (last && last->next)
798 last->next = NULL;
799 return (void *)pbe->orig_address;
802 * The "original" page frame has not been allocated and we have to
803 * use a "safe" page frame to store the read page
805 pbe->address = (unsigned long)safe_pages;
806 safe_pages = safe_pages->next;
807 if (last)
808 last->next = pbe;
809 handle->last_pbe = pbe;
810 return (void *)pbe->address;
814 * snapshot_write_next - used for writing the system memory snapshot.
816 * On the first call to it @handle should point to a zeroed
817 * snapshot_handle structure. The structure gets updated and a pointer
818 * to it should be passed to this function every next time.
820 * The @count parameter should contain the number of bytes the caller
821 * wants to write to the image. It must not be zero.
823 * On success the function returns a positive number. Then, the caller
824 * is allowed to write up to the returned number of bytes to the memory
825 * location computed by the data_of() macro. The number returned
826 * may be smaller than @count, but this only happens if the write would
827 * cross a page boundary otherwise.
829 * The function returns 0 to indicate the "end of file" condition,
830 * and a negative number is returned on error. In such cases the
831 * structure pointed to by @handle is not updated and should not be used
832 * any more.
835 int snapshot_write_next(struct snapshot_handle *handle, size_t count)
837 int error = 0;
839 if (handle->prev && handle->cur > nr_meta_pages + nr_copy_pages)
840 return 0;
841 if (!buffer) {
842 /* This makes the buffer be freed by swsusp_free() */
843 buffer = alloc_image_page(GFP_ATOMIC, 0);
844 if (!buffer)
845 return -ENOMEM;
847 if (!handle->offset)
848 handle->buffer = buffer;
849 handle->sync_read = 1;
850 if (handle->prev < handle->cur) {
851 if (!handle->prev) {
852 error = load_header(handle,
853 (struct swsusp_info *)buffer);
854 if (error)
855 return error;
856 } else if (handle->prev <= nr_meta_pages) {
857 handle->pbe = unpack_orig_addresses(buffer,
858 handle->pbe);
859 if (!handle->pbe) {
860 error = prepare_image(handle);
861 if (error)
862 return error;
863 handle->pbe = restore_pblist;
864 handle->last_pbe = NULL;
865 handle->buffer = get_buffer(handle);
866 handle->sync_read = 0;
868 } else {
869 handle->pbe = handle->pbe->next;
870 handle->buffer = get_buffer(handle);
871 handle->sync_read = 0;
873 handle->prev = handle->cur;
875 handle->buf_offset = handle->cur_offset;
876 if (handle->cur_offset + count >= PAGE_SIZE) {
877 count = PAGE_SIZE - handle->cur_offset;
878 handle->cur_offset = 0;
879 handle->cur++;
880 } else {
881 handle->cur_offset += count;
883 handle->offset += count;
884 return count;
887 int snapshot_image_loaded(struct snapshot_handle *handle)
889 return !(!handle->pbe || handle->pbe->next || !nr_copy_pages ||
890 handle->cur <= nr_meta_pages + nr_copy_pages);