[S390] lcs: add support for irq statistics
[linux-2.6/linux-2.6-ps3.git] / arch / tile / kernel / machine_kexec.c
blob0d8b9e933487c2847824efc1637364cba8ec84d8
1 /*
2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
14 * based on machine_kexec.c from other architectures in linux-2.6.18
17 #include <linux/mm.h>
18 #include <linux/kexec.h>
19 #include <linux/delay.h>
20 #include <linux/reboot.h>
21 #include <linux/errno.h>
22 #include <linux/vmalloc.h>
23 #include <linux/cpumask.h>
24 #include <linux/kernel.h>
25 #include <linux/elf.h>
26 #include <linux/highmem.h>
27 #include <linux/mmu_context.h>
28 #include <linux/io.h>
29 #include <linux/timex.h>
30 #include <asm/pgtable.h>
31 #include <asm/pgalloc.h>
32 #include <asm/cacheflush.h>
33 #include <asm/checksum.h>
34 #include <hv/hypervisor.h>
38 * This stuff is not in elf.h and is not in any other kernel include.
39 * This stuff is needed below in the little boot notes parser to
40 * extract the command line so we can pass it to the hypervisor.
42 struct Elf32_Bhdr {
43 Elf32_Word b_signature;
44 Elf32_Word b_size;
45 Elf32_Half b_checksum;
46 Elf32_Half b_records;
48 #define ELF_BOOT_MAGIC 0x0E1FB007
49 #define EBN_COMMAND_LINE 0x00000004
50 #define roundupsz(X) (((X) + 3) & ~3)
52 /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
55 void machine_shutdown(void)
58 * Normally we would stop all the other processors here, but
59 * the check in machine_kexec_prepare below ensures we'll only
60 * get this far if we've been booted with "nosmp" on the
61 * command line or without CONFIG_SMP so there's nothing to do
62 * here (for now).
66 void machine_crash_shutdown(struct pt_regs *regs)
69 * Cannot happen. This type of kexec is disabled on this
70 * architecture (and enforced in machine_kexec_prepare below).
75 int machine_kexec_prepare(struct kimage *image)
77 if (num_online_cpus() > 1) {
78 pr_warning("%s: detected attempt to kexec "
79 "with num_online_cpus() > 1\n",
80 __func__);
81 return -ENOSYS;
83 if (image->type != KEXEC_TYPE_DEFAULT) {
84 pr_warning("%s: detected attempt to kexec "
85 "with unsupported type: %d\n",
86 __func__,
87 image->type);
88 return -ENOSYS;
90 return 0;
93 void machine_kexec_cleanup(struct kimage *image)
96 * We did nothing in machine_kexec_prepare,
97 * so we have nothing to do here.
102 * If we can find elf boot notes on this page, return the command
103 * line. Otherwise, silently return null. Somewhat kludgy, but no
104 * good way to do this without significantly rearchitecting the
105 * architecture-independent kexec code.
108 static unsigned char *kexec_bn2cl(void *pg)
110 struct Elf32_Bhdr *bhdrp;
111 Elf32_Nhdr *nhdrp;
112 unsigned char *desc;
113 unsigned char *command_line;
114 __sum16 csum;
116 bhdrp = (struct Elf32_Bhdr *) pg;
119 * This routine is invoked for every source page, so make
120 * sure to quietly ignore every impossible page.
122 if (bhdrp->b_signature != ELF_BOOT_MAGIC ||
123 bhdrp->b_size > PAGE_SIZE)
124 return 0;
127 * If we get a checksum mismatch, warn with the checksum
128 * so we can diagnose better.
130 csum = ip_compute_csum(pg, bhdrp->b_size);
131 if (csum != 0) {
132 pr_warning("%s: bad checksum %#x (size %d)\n",
133 __func__, csum, bhdrp->b_size);
134 return 0;
137 nhdrp = (Elf32_Nhdr *) (bhdrp + 1);
139 while (nhdrp->n_type != EBN_COMMAND_LINE) {
141 desc = (unsigned char *) (nhdrp + 1);
142 desc += roundupsz(nhdrp->n_descsz);
144 nhdrp = (Elf32_Nhdr *) desc;
146 /* still in bounds? */
147 if ((unsigned char *) (nhdrp + 1) >
148 ((unsigned char *) pg) + bhdrp->b_size) {
150 pr_info("%s: out of bounds\n", __func__);
151 return 0;
155 command_line = (unsigned char *) (nhdrp + 1);
156 desc = command_line;
158 while (*desc != '\0') {
159 desc++;
160 if (((unsigned long)desc & PAGE_MASK) != (unsigned long)pg) {
161 pr_info("%s: ran off end of page\n",
162 __func__);
163 return 0;
167 return command_line;
170 static void kexec_find_and_set_command_line(struct kimage *image)
172 kimage_entry_t *ptr, entry;
174 unsigned char *command_line = 0;
175 unsigned char *r;
176 HV_Errno hverr;
178 for (ptr = &image->head;
179 (entry = *ptr) && !(entry & IND_DONE);
180 ptr = (entry & IND_INDIRECTION) ?
181 phys_to_virt((entry & PAGE_MASK)) : ptr + 1) {
183 if ((entry & IND_SOURCE)) {
184 void *va =
185 kmap_atomic_pfn(entry >> PAGE_SHIFT);
186 r = kexec_bn2cl(va);
187 if (r) {
188 command_line = r;
189 break;
191 kunmap_atomic(va);
195 if (command_line != 0) {
196 pr_info("setting new command line to \"%s\"\n",
197 command_line);
199 hverr = hv_set_command_line(
200 (HV_VirtAddr) command_line, strlen(command_line));
201 kunmap_atomic(command_line);
202 } else {
203 pr_info("%s: no command line found; making empty\n",
204 __func__);
205 hverr = hv_set_command_line((HV_VirtAddr) command_line, 0);
207 if (hverr)
208 pr_warning("%s: hv_set_command_line returned error: %d\n",
209 __func__, hverr);
213 * The kexec code range-checks all its PAs, so to avoid having it run
214 * amok and allocate memory and then sequester it from every other
215 * controller, we force it to come from controller zero. We also
216 * disable the oom-killer since if we do end up running out of memory,
217 * that almost certainly won't help.
219 struct page *kimage_alloc_pages_arch(gfp_t gfp_mask, unsigned int order)
221 gfp_mask |= __GFP_THISNODE | __GFP_NORETRY;
222 return alloc_pages_node(0, gfp_mask, order);
225 static void setup_quasi_va_is_pa(void)
227 HV_PTE *pgtable;
228 HV_PTE pte;
229 int i;
232 * Flush our TLB to prevent conflicts between the previous contents
233 * and the new stuff we're about to add.
235 local_flush_tlb_all();
237 /* setup VA is PA, at least up to PAGE_OFFSET */
239 pgtable = (HV_PTE *)current->mm->pgd;
240 pte = hv_pte(_PAGE_KERNEL | _PAGE_HUGE_PAGE);
241 pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_NO_L3);
243 for (i = 0; i < pgd_index(PAGE_OFFSET); i++)
244 pgtable[i] = pfn_pte(i << (HPAGE_SHIFT - PAGE_SHIFT), pte);
248 NORET_TYPE void machine_kexec(struct kimage *image)
250 void *reboot_code_buffer;
251 NORET_TYPE void (*rnk)(unsigned long, void *, unsigned long)
252 ATTRIB_NORET;
254 /* Mask all interrupts before starting to reboot. */
255 interrupt_mask_set_mask(~0ULL);
257 kexec_find_and_set_command_line(image);
260 * Adjust the home caching of the control page to be cached on
261 * this cpu, and copy the assembly helper into the control
262 * code page, which we map in the vmalloc area.
264 homecache_change_page_home(image->control_code_page, 0,
265 smp_processor_id());
266 reboot_code_buffer = vmap(&image->control_code_page, 1, 0,
267 __pgprot(_PAGE_KERNEL | _PAGE_EXECUTABLE));
268 memcpy(reboot_code_buffer, relocate_new_kernel,
269 relocate_new_kernel_size);
270 __flush_icache_range(
271 (unsigned long) reboot_code_buffer,
272 (unsigned long) reboot_code_buffer + relocate_new_kernel_size);
274 setup_quasi_va_is_pa();
276 /* now call it */
277 rnk = reboot_code_buffer;
278 (*rnk)(image->head, reboot_code_buffer, image->start);