x86, ioapic: Fix potential resume deadlock
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / arch / x86 / kernel / crash_dump_32.c
blobd5cd13945d5a48a30cc0c61c26824aa2dad38b5b
1 /*
2 * Memory preserving reboot related code.
4 * Created by: Hariprasad Nellitheertha (hari@in.ibm.com)
5 * Copyright (C) IBM Corporation, 2004. All rights reserved
6 */
8 #include <linux/slab.h>
9 #include <linux/errno.h>
10 #include <linux/highmem.h>
11 #include <linux/crash_dump.h>
13 #include <asm/uaccess.h>
15 static void *kdump_buf_page;
17 /* Stores the physical address of elf header of crash image. */
18 unsigned long long elfcorehdr_addr = ELFCORE_ADDR_MAX;
20 static inline bool is_crashed_pfn_valid(unsigned long pfn)
22 #ifndef CONFIG_X86_PAE
24 * non-PAE kdump kernel executed from a PAE one will crop high pte
25 * bits and poke unwanted space counting again from address 0, we
26 * don't want that. pte must fit into unsigned long. In fact the
27 * test checks high 12 bits for being zero (pfn will be shifted left
28 * by PAGE_SHIFT).
30 return pte_pfn(pfn_pte(pfn, __pgprot(0))) == pfn;
31 #else
32 return true;
33 #endif
36 /**
37 * copy_oldmem_page - copy one page from "oldmem"
38 * @pfn: page frame number to be copied
39 * @buf: target memory address for the copy; this can be in kernel address
40 * space or user address space (see @userbuf)
41 * @csize: number of bytes to copy
42 * @offset: offset in bytes into the page (based on pfn) to begin the copy
43 * @userbuf: if set, @buf is in user address space, use copy_to_user(),
44 * otherwise @buf is in kernel address space, use memcpy().
46 * Copy a page from "oldmem". For this page, there is no pte mapped
47 * in the current kernel. We stitch up a pte, similar to kmap_atomic.
49 * Calling copy_to_user() in atomic context is not desirable. Hence first
50 * copying the data to a pre-allocated kernel page and then copying to user
51 * space in non-atomic context.
53 ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
54 size_t csize, unsigned long offset, int userbuf)
56 void *vaddr;
58 if (!csize)
59 return 0;
61 if (!is_crashed_pfn_valid(pfn))
62 return -EFAULT;
64 vaddr = kmap_atomic_pfn(pfn);
66 if (!userbuf) {
67 memcpy(buf, (vaddr + offset), csize);
68 kunmap_atomic(vaddr, KM_PTE0);
69 } else {
70 if (!kdump_buf_page) {
71 printk(KERN_WARNING "Kdump: Kdump buffer page not"
72 " allocated\n");
73 kunmap_atomic(vaddr, KM_PTE0);
74 return -EFAULT;
76 copy_page(kdump_buf_page, vaddr);
77 kunmap_atomic(vaddr, KM_PTE0);
78 if (copy_to_user(buf, (kdump_buf_page + offset), csize))
79 return -EFAULT;
82 return csize;
85 static int __init kdump_buf_page_init(void)
87 int ret = 0;
89 kdump_buf_page = kmalloc(PAGE_SIZE, GFP_KERNEL);
90 if (!kdump_buf_page) {
91 printk(KERN_WARNING "Kdump: Failed to allocate kdump buffer"
92 " page\n");
93 ret = -ENOMEM;
96 return ret;
98 arch_initcall(kdump_buf_page_init);