replace some function names
[linux-2.6/zen-sources.git] / kernel / power / tuxonice_atomic_copy.c
blobdd5fb05926b844fdfc18a76b26a40c59e5d8c289
1 /*
2 * kernel/power/tuxonice_atomic_copy.c
4 * Copyright 2004-2008 Nigel Cunningham (nigel at tuxonice net)
5 * Copyright (C) 2006 Red Hat, inc.
7 * Distributed under GPLv2.
9 * Routines for doing the atomic save/restore.
12 #include <linux/suspend.h>
13 #include <linux/highmem.h>
14 #include <linux/cpu.h>
15 #include <linux/freezer.h>
16 #include <linux/console.h>
17 #include <linux/ftrace.h>
18 #include "tuxonice.h"
19 #include "tuxonice_storage.h"
20 #include "tuxonice_power_off.h"
21 #include "tuxonice_ui.h"
22 #include "power.h"
23 #include "tuxonice_io.h"
24 #include "tuxonice_prepare_image.h"
25 #include "tuxonice_pageflags.h"
26 #include "tuxonice_checksum.h"
27 #include "tuxonice_builtin.h"
28 #include "tuxonice_atomic_copy.h"
29 #include "tuxonice_alloc.h"
31 long extra_pd1_pages_used;
32 static int ftrace_save;
34 /**
35 * free_pbe_list: Free page backup entries used by the atomic copy code.
37 * Normally, this function isn't used. If, however, we need to abort before
38 * doing the atomic copy, we use this to free the pbes previously allocated.
39 **/
40 static void free_pbe_list(struct pbe **list, int highmem)
42 while (*list) {
43 int i;
44 struct pbe *free_pbe, *next_page = NULL;
45 struct page *page;
47 if (highmem) {
48 page = (struct page *) *list;
49 free_pbe = (struct pbe *) kmap(page);
50 } else {
51 page = virt_to_page(*list);
52 free_pbe = *list;
55 for (i = 0; i < PBES_PER_PAGE; i++) {
56 if (!free_pbe)
57 break;
58 if (highmem)
59 toi__free_page(29, free_pbe->address);
60 else
61 toi_free_page(29,
62 (unsigned long) free_pbe->address);
63 free_pbe = free_pbe->next;
66 if (highmem) {
67 if (free_pbe)
68 next_page = free_pbe;
69 kunmap(page);
70 } else {
71 if (free_pbe)
72 next_page = free_pbe;
75 toi__free_page(29, page);
76 *list = (struct pbe *) next_page;
80 /**
81 * copyback_post: Post atomic-restore actions.
83 * After doing the atomic restore, we have a few more things to do:
84 * 1) We want to retain some values across the restore, so we now copy
85 * these from the nosave variables to the normal ones.
86 * 2) Set the status flags.
87 * 3) Resume devices.
88 * 4) Tell userui so it can redraw & restore settings.
89 * 5) Reread the page cache.
90 **/
92 void copyback_post(void)
94 struct toi_boot_kernel_data *bkd =
95 (struct toi_boot_kernel_data *) boot_kernel_data_buffer;
98 * The boot kernel's data may be larger (newer version) or
99 * smaller (older version) than ours. Copy the minimum
100 * of the two sizes, so that we don't overwrite valid values
101 * from pre-atomic copy.
104 memcpy(&toi_bkd, (char *) boot_kernel_data_buffer,
105 min_t(int, sizeof(struct toi_boot_kernel_data),
106 bkd->size));
108 if (toi_activate_storage(1))
109 panic("Failed to reactivate our storage.");
111 toi_ui_post_atomic_restore();
113 toi_cond_pause(1, "About to reload secondary pagedir.");
115 if (read_pageset2(0))
116 panic("Unable to successfully reread the page cache.");
119 * If the user wants to sleep again after resuming from full-off,
120 * it's most likely to be in order to suspend to ram, so we'll
121 * do this check after loading pageset2, to give them the fastest
122 * wakeup when they are ready to use the computer again.
124 toi_check_resleep();
128 * toi_copy_pageset1: Do the atomic copy of pageset1.
130 * Make the atomic copy of pageset1. We can't use copy_page (as we once did)
131 * because we can't be sure what side effects it has. On my old Duron, with
132 * 3DNOW, kernel_fpu_begin increments preempt count, making our preempt
133 * count at resume time 4 instead of 3.
135 * We don't want to call kmap_atomic unconditionally because it has the side
136 * effect of incrementing the preempt count, which will leave it one too high
137 * post resume (the page containing the preempt count will be copied after
138 * its incremented. This is essentially the same problem.
141 void toi_copy_pageset1(void)
143 int i;
144 unsigned long source_index, dest_index;
146 source_index = get_next_bit_on(&pageset1_map, max_pfn + 1);
147 dest_index = get_next_bit_on(&pageset1_copy_map, max_pfn + 1);
149 for (i = 0; i < pagedir1.size; i++) {
150 unsigned long *origvirt, *copyvirt;
151 struct page *origpage, *copypage;
152 int loop = (PAGE_SIZE / sizeof(unsigned long)) - 1,
153 was_present;
155 origpage = pfn_to_page(source_index);
156 copypage = pfn_to_page(dest_index);
158 origvirt = PageHighMem(origpage) ?
159 kmap_atomic(origpage, KM_USER0) :
160 page_address(origpage);
162 copyvirt = PageHighMem(copypage) ?
163 kmap_atomic(copypage, KM_USER1) :
164 page_address(copypage);
166 was_present = kernel_page_present(origpage);
167 if (!was_present)
168 kernel_map_pages(origpage, 1, 1);
170 while (loop >= 0) {
171 *(copyvirt + loop) = *(origvirt + loop);
172 loop--;
175 if (!was_present)
176 kernel_map_pages(origpage, 1, 0);
178 if (PageHighMem(origpage))
179 kunmap_atomic(origvirt, KM_USER0);
181 if (PageHighMem(copypage))
182 kunmap_atomic(copyvirt, KM_USER1);
184 source_index = get_next_bit_on(&pageset1_map, source_index);
185 dest_index = get_next_bit_on(&pageset1_copy_map, dest_index);
190 * __toi_post_context_save: Steps after saving the cpu context.
192 * Steps taken after saving the CPU state to make the actual
193 * atomic copy.
195 * Called from swsusp_save in snapshot.c via toi_post_context_save.
198 int __toi_post_context_save(void)
200 long old_ps1_size = pagedir1.size;
202 check_checksums();
204 free_checksum_pages();
206 toi_recalculate_image_contents(1);
208 extra_pd1_pages_used = pagedir1.size - old_ps1_size;
210 if (extra_pd1_pages_used > extra_pd1_pages_allowance) {
211 printk(KERN_INFO "Pageset1 has grown by %ld pages. "
212 "extra_pages_allowance is currently only %lu.\n",
213 pagedir1.size - old_ps1_size,
214 extra_pd1_pages_allowance);
215 set_abort_result(TOI_EXTRA_PAGES_ALLOW_TOO_SMALL);
216 return -1;
219 if (!test_action_state(TOI_TEST_FILTER_SPEED) &&
220 !test_action_state(TOI_TEST_BIO))
221 toi_copy_pageset1();
223 return 0;
227 * toi_hibernate: High level code for doing the atomic copy.
229 * High-level code which prepares to do the atomic copy. Loosely based
230 * on the swsusp version, but with the following twists:
231 * - We set toi_running so the swsusp code uses our code paths.
232 * - We give better feedback regarding what goes wrong if there is a problem.
233 * - We use an extra function to call the assembly, just in case this code
234 * is in a module (return address).
237 int toi_hibernate(void)
239 int error;
241 toi_running = 1; /* For the swsusp code we use :< */
243 error = toi_lowlevel_builtin();
245 toi_running = 0;
246 return error;
250 * toi_atomic_restore: Prepare to do the atomic restore.
252 * Get ready to do the atomic restore. This part gets us into the same
253 * state we are in prior to do calling do_toi_lowlevel while
254 * hibernating: hot-unplugging secondary cpus and freeze processes,
255 * before starting the thread that will do the restore.
258 int toi_atomic_restore(void)
260 int error;
262 toi_running = 1;
264 toi_prepare_status(DONT_CLEAR_BAR, "Atomic restore.");
266 if (add_boot_kernel_data_pbe())
267 goto Failed;
269 if (toi_go_atomic(PMSG_QUIESCE, 0))
270 goto Failed;
272 /* We'll ignore saved state, but this gets preempt count (etc) right */
273 save_processor_state();
275 error = swsusp_arch_resume();
277 * Code below is only ever reached in case of failure. Otherwise
278 * execution continues at place where swsusp_arch_suspend was called.
280 * We don't know whether it's safe to continue (this shouldn't happen),
281 * so lets err on the side of caution.
283 BUG();
285 Failed:
286 free_pbe_list(&restore_pblist, 0);
287 #ifdef CONFIG_HIGHMEM
288 free_pbe_list(&restore_highmem_pblist, 1);
289 #endif
290 if (test_action_state(TOI_PM_PREPARE_CONSOLE))
291 pm_restore_console();
292 toi_running = 0;
293 return 1;
296 int toi_go_atomic(pm_message_t state, int suspend_time)
298 toi_prepare_status(DONT_CLEAR_BAR, "Doing atomic copy/restore.");
300 if (suspend_time && toi_platform_begin()) {
301 set_abort_result(TOI_PLATFORM_PREP_FAILED);
302 toi_end_atomic(ATOMIC_STEP_PLATFORM_END, suspend_time, 0);
303 return 1;
306 suspend_console();
307 ftrace_save = __ftrace_enabled_save();
309 if (device_suspend(state)) {
310 set_abort_result(TOI_DEVICE_REFUSED);
311 toi_end_atomic(ATOMIC_STEP_RESUME_CONSOLE, suspend_time, 1);
312 return 1;
315 if (suspend_time && toi_platform_pre_snapshot()) {
316 set_abort_result(TOI_PRE_SNAPSHOT_FAILED);
317 toi_end_atomic(ATOMIC_STEP_PLATFORM_FINISH, suspend_time, 0);
318 return 1;
321 if (!suspend_time && toi_platform_pre_restore()) {
322 set_abort_result(TOI_PRE_RESTORE_FAILED);
323 toi_end_atomic(ATOMIC_STEP_DEVICE_RESUME, suspend_time, 0);
324 return 1;
327 if (test_action_state(TOI_LATE_CPU_HOTPLUG)) {
328 if (disable_nonboot_cpus()) {
329 set_abort_result(TOI_CPU_HOTPLUG_FAILED);
330 toi_end_atomic(ATOMIC_STEP_CPU_HOTPLUG,
331 suspend_time, 0);
332 return 1;
336 if (suspend_time && arch_prepare_suspend()) {
337 set_abort_result(TOI_ARCH_PREPARE_FAILED);
338 toi_end_atomic(ATOMIC_STEP_CPU_HOTPLUG, suspend_time, 0);
339 return 1;
342 device_pm_lock();
343 local_irq_disable();
345 /* At this point, device_suspend() has been called, but *not*
346 * device_power_down(). We *must* device_power_down() now.
347 * Otherwise, drivers for some devices (e.g. interrupt controllers)
348 * become desynchronized with the actual state of the hardware
349 * at resume time, and evil weirdness ensues.
352 if (device_power_down(state)) {
353 set_abort_result(TOI_DEVICE_REFUSED);
354 toi_end_atomic(ATOMIC_STEP_IRQS, suspend_time, 0);
355 return 1;
358 return 0;
361 void toi_end_atomic(int stage, int suspend_time, int error)
363 switch (stage) {
364 case ATOMIC_ALL_STEPS:
365 if (!suspend_time)
366 toi_platform_leave();
367 device_power_up(error ? PMSG_RECOVER :
368 (suspend_time ? PMSG_THAW : PMSG_RESTORE));
369 case ATOMIC_STEP_IRQS:
370 local_irq_enable();
371 device_pm_unlock();
372 case ATOMIC_STEP_CPU_HOTPLUG:
373 if (test_action_state(TOI_LATE_CPU_HOTPLUG))
374 enable_nonboot_cpus();
375 case ATOMIC_STEP_PLATFORM_FINISH:
376 toi_platform_finish();
377 case ATOMIC_STEP_DEVICE_RESUME:
378 if (suspend_time && error)
379 toi_platform_recover();
380 device_resume(error ? PMSG_RECOVER :
381 (suspend_time ? PMSG_THAW : PMSG_RESTORE));
382 case ATOMIC_STEP_RESUME_CONSOLE:
383 __ftrace_enabled_restore(ftrace_save);
384 resume_console();
385 case ATOMIC_STEP_PLATFORM_END:
386 toi_platform_end();
388 toi_prepare_status(DONT_CLEAR_BAR, "Post atomic.");