ACPI / PM: Allow attach/detach routines to change device power states
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / arch / powerpc / kernel / setup_64.c
blobefb6a41b3131bc1f115ba0b15ec82dba8392b31a
1 /*
2 *
3 * Common boot and setup code.
5 * Copyright (C) 2001 PPC64 Team, IBM Corp
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
13 #undef DEBUG
15 #include <linux/export.h>
16 #include <linux/string.h>
17 #include <linux/sched.h>
18 #include <linux/init.h>
19 #include <linux/kernel.h>
20 #include <linux/reboot.h>
21 #include <linux/delay.h>
22 #include <linux/initrd.h>
23 #include <linux/seq_file.h>
24 #include <linux/ioport.h>
25 #include <linux/console.h>
26 #include <linux/utsname.h>
27 #include <linux/tty.h>
28 #include <linux/root_dev.h>
29 #include <linux/notifier.h>
30 #include <linux/cpu.h>
31 #include <linux/unistd.h>
32 #include <linux/serial.h>
33 #include <linux/serial_8250.h>
34 #include <linux/bootmem.h>
35 #include <linux/pci.h>
36 #include <linux/lockdep.h>
37 #include <linux/memblock.h>
38 #include <linux/hugetlb.h>
40 #include <asm/io.h>
41 #include <asm/kdump.h>
42 #include <asm/prom.h>
43 #include <asm/processor.h>
44 #include <asm/pgtable.h>
45 #include <asm/smp.h>
46 #include <asm/elf.h>
47 #include <asm/machdep.h>
48 #include <asm/paca.h>
49 #include <asm/time.h>
50 #include <asm/cputable.h>
51 #include <asm/sections.h>
52 #include <asm/btext.h>
53 #include <asm/nvram.h>
54 #include <asm/setup.h>
55 #include <asm/rtas.h>
56 #include <asm/iommu.h>
57 #include <asm/serial.h>
58 #include <asm/cache.h>
59 #include <asm/page.h>
60 #include <asm/mmu.h>
61 #include <asm/firmware.h>
62 #include <asm/xmon.h>
63 #include <asm/udbg.h>
64 #include <asm/kexec.h>
65 #include <asm/mmu_context.h>
66 #include <asm/code-patching.h>
67 #include <asm/kvm_ppc.h>
68 #include <asm/hugetlb.h>
70 #include "setup.h"
72 #ifdef DEBUG
73 #define DBG(fmt...) udbg_printf(fmt)
74 #else
75 #define DBG(fmt...)
76 #endif
78 int boot_cpuid = 0;
79 int __initdata spinning_secondaries;
80 u64 ppc64_pft_size;
82 /* Pick defaults since we might want to patch instructions
83 * before we've read this from the device tree.
85 struct ppc64_caches ppc64_caches = {
86 .dline_size = 0x40,
87 .log_dline_size = 6,
88 .iline_size = 0x40,
89 .log_iline_size = 6
91 EXPORT_SYMBOL_GPL(ppc64_caches);
94 * These are used in binfmt_elf.c to put aux entries on the stack
95 * for each elf executable being started.
97 int dcache_bsize;
98 int icache_bsize;
99 int ucache_bsize;
101 #ifdef CONFIG_SMP
103 static char *smt_enabled_cmdline;
105 /* Look for ibm,smt-enabled OF option */
106 static void check_smt_enabled(void)
108 struct device_node *dn;
109 const char *smt_option;
111 /* Default to enabling all threads */
112 smt_enabled_at_boot = threads_per_core;
114 /* Allow the command line to overrule the OF option */
115 if (smt_enabled_cmdline) {
116 if (!strcmp(smt_enabled_cmdline, "on"))
117 smt_enabled_at_boot = threads_per_core;
118 else if (!strcmp(smt_enabled_cmdline, "off"))
119 smt_enabled_at_boot = 0;
120 else {
121 long smt;
122 int rc;
124 rc = strict_strtol(smt_enabled_cmdline, 10, &smt);
125 if (!rc)
126 smt_enabled_at_boot =
127 min(threads_per_core, (int)smt);
129 } else {
130 dn = of_find_node_by_path("/options");
131 if (dn) {
132 smt_option = of_get_property(dn, "ibm,smt-enabled",
133 NULL);
135 if (smt_option) {
136 if (!strcmp(smt_option, "on"))
137 smt_enabled_at_boot = threads_per_core;
138 else if (!strcmp(smt_option, "off"))
139 smt_enabled_at_boot = 0;
142 of_node_put(dn);
147 /* Look for smt-enabled= cmdline option */
148 static int __init early_smt_enabled(char *p)
150 smt_enabled_cmdline = p;
151 return 0;
153 early_param("smt-enabled", early_smt_enabled);
155 #else
156 #define check_smt_enabled()
157 #endif /* CONFIG_SMP */
160 * Early initialization entry point. This is called by head.S
161 * with MMU translation disabled. We rely on the "feature" of
162 * the CPU that ignores the top 2 bits of the address in real
163 * mode so we can access kernel globals normally provided we
164 * only toy with things in the RMO region. From here, we do
165 * some early parsing of the device-tree to setup out MEMBLOCK
166 * data structures, and allocate & initialize the hash table
167 * and segment tables so we can start running with translation
168 * enabled.
170 * It is this function which will call the probe() callback of
171 * the various platform types and copy the matching one to the
172 * global ppc_md structure. Your platform can eventually do
173 * some very early initializations from the probe() routine, but
174 * this is not recommended, be very careful as, for example, the
175 * device-tree is not accessible via normal means at this point.
178 void __init early_setup(unsigned long dt_ptr)
180 /* -------- printk is _NOT_ safe to use here ! ------- */
182 /* Identify CPU type */
183 identify_cpu(0, mfspr(SPRN_PVR));
185 /* Assume we're on cpu 0 for now. Don't write to the paca yet! */
186 initialise_paca(&boot_paca, 0);
187 setup_paca(&boot_paca);
189 /* Initialize lockdep early or else spinlocks will blow */
190 lockdep_init();
192 /* -------- printk is now safe to use ------- */
194 /* Enable early debugging if any specified (see udbg.h) */
195 udbg_early_init();
197 DBG(" -> early_setup(), dt_ptr: 0x%lx\n", dt_ptr);
200 * Do early initialization using the flattened device
201 * tree, such as retrieving the physical memory map or
202 * calculating/retrieving the hash table size.
204 early_init_devtree(__va(dt_ptr));
206 /* Now we know the logical id of our boot cpu, setup the paca. */
207 setup_paca(&paca[boot_cpuid]);
209 /* Fix up paca fields required for the boot cpu */
210 get_paca()->cpu_start = 1;
211 /* Allow percpu accesses to "work" until we setup percpu data */
212 get_paca()->data_offset = 0;
214 /* Probe the machine type */
215 probe_machine();
217 setup_kdump_trampoline();
219 DBG("Found, Initializing memory management...\n");
221 /* Initialize the hash table or TLB handling */
222 early_init_mmu();
225 * Reserve any gigantic pages requested on the command line.
226 * memblock needs to have been initialized by the time this is
227 * called since this will reserve memory.
229 reserve_hugetlb_gpages();
231 DBG(" <- early_setup()\n");
234 #ifdef CONFIG_SMP
235 void early_setup_secondary(void)
237 /* Mark interrupts enabled in PACA */
238 get_paca()->soft_enabled = 0;
240 /* Initialize the hash table or TLB handling */
241 early_init_mmu_secondary();
244 #endif /* CONFIG_SMP */
246 #if defined(CONFIG_SMP) || defined(CONFIG_KEXEC)
247 void smp_release_cpus(void)
249 unsigned long *ptr;
250 int i;
252 DBG(" -> smp_release_cpus()\n");
254 /* All secondary cpus are spinning on a common spinloop, release them
255 * all now so they can start to spin on their individual paca
256 * spinloops. For non SMP kernels, the secondary cpus never get out
257 * of the common spinloop.
260 ptr = (unsigned long *)((unsigned long)&__secondary_hold_spinloop
261 - PHYSICAL_START);
262 *ptr = __pa(generic_secondary_smp_init);
264 /* And wait a bit for them to catch up */
265 for (i = 0; i < 100000; i++) {
266 mb();
267 HMT_low();
268 if (spinning_secondaries == 0)
269 break;
270 udelay(1);
272 DBG("spinning_secondaries = %d\n", spinning_secondaries);
274 DBG(" <- smp_release_cpus()\n");
276 #endif /* CONFIG_SMP || CONFIG_KEXEC */
279 * Initialize some remaining members of the ppc64_caches and systemcfg
280 * structures
281 * (at least until we get rid of them completely). This is mostly some
282 * cache informations about the CPU that will be used by cache flush
283 * routines and/or provided to userland
285 static void __init initialize_cache_info(void)
287 struct device_node *np;
288 unsigned long num_cpus = 0;
290 DBG(" -> initialize_cache_info()\n");
292 for_each_node_by_type(np, "cpu") {
293 num_cpus += 1;
296 * We're assuming *all* of the CPUs have the same
297 * d-cache and i-cache sizes... -Peter
299 if (num_cpus == 1) {
300 const u32 *sizep, *lsizep;
301 u32 size, lsize;
303 size = 0;
304 lsize = cur_cpu_spec->dcache_bsize;
305 sizep = of_get_property(np, "d-cache-size", NULL);
306 if (sizep != NULL)
307 size = *sizep;
308 lsizep = of_get_property(np, "d-cache-block-size",
309 NULL);
310 /* fallback if block size missing */
311 if (lsizep == NULL)
312 lsizep = of_get_property(np,
313 "d-cache-line-size",
314 NULL);
315 if (lsizep != NULL)
316 lsize = *lsizep;
317 if (sizep == 0 || lsizep == 0)
318 DBG("Argh, can't find dcache properties ! "
319 "sizep: %p, lsizep: %p\n", sizep, lsizep);
321 ppc64_caches.dsize = size;
322 ppc64_caches.dline_size = lsize;
323 ppc64_caches.log_dline_size = __ilog2(lsize);
324 ppc64_caches.dlines_per_page = PAGE_SIZE / lsize;
326 size = 0;
327 lsize = cur_cpu_spec->icache_bsize;
328 sizep = of_get_property(np, "i-cache-size", NULL);
329 if (sizep != NULL)
330 size = *sizep;
331 lsizep = of_get_property(np, "i-cache-block-size",
332 NULL);
333 if (lsizep == NULL)
334 lsizep = of_get_property(np,
335 "i-cache-line-size",
336 NULL);
337 if (lsizep != NULL)
338 lsize = *lsizep;
339 if (sizep == 0 || lsizep == 0)
340 DBG("Argh, can't find icache properties ! "
341 "sizep: %p, lsizep: %p\n", sizep, lsizep);
343 ppc64_caches.isize = size;
344 ppc64_caches.iline_size = lsize;
345 ppc64_caches.log_iline_size = __ilog2(lsize);
346 ppc64_caches.ilines_per_page = PAGE_SIZE / lsize;
350 DBG(" <- initialize_cache_info()\n");
355 * Do some initial setup of the system. The parameters are those which
356 * were passed in from the bootloader.
358 void __init setup_system(void)
360 DBG(" -> setup_system()\n");
362 /* Apply the CPUs-specific and firmware specific fixups to kernel
363 * text (nop out sections not relevant to this CPU or this firmware)
365 do_feature_fixups(cur_cpu_spec->cpu_features,
366 &__start___ftr_fixup, &__stop___ftr_fixup);
367 do_feature_fixups(cur_cpu_spec->mmu_features,
368 &__start___mmu_ftr_fixup, &__stop___mmu_ftr_fixup);
369 do_feature_fixups(powerpc_firmware_features,
370 &__start___fw_ftr_fixup, &__stop___fw_ftr_fixup);
371 do_lwsync_fixups(cur_cpu_spec->cpu_features,
372 &__start___lwsync_fixup, &__stop___lwsync_fixup);
373 do_final_fixups();
376 * Unflatten the device-tree passed by prom_init or kexec
378 unflatten_device_tree();
381 * Fill the ppc64_caches & systemcfg structures with informations
382 * retrieved from the device-tree.
384 initialize_cache_info();
386 #ifdef CONFIG_PPC_RTAS
388 * Initialize RTAS if available
390 rtas_initialize();
391 #endif /* CONFIG_PPC_RTAS */
394 * Check if we have an initrd provided via the device-tree
396 check_for_initrd();
399 * Do some platform specific early initializations, that includes
400 * setting up the hash table pointers. It also sets up some interrupt-mapping
401 * related options that will be used by finish_device_tree()
403 if (ppc_md.init_early)
404 ppc_md.init_early();
407 * We can discover serial ports now since the above did setup the
408 * hash table management for us, thus ioremap works. We do that early
409 * so that further code can be debugged
411 find_legacy_serial_ports();
414 * Register early console
416 register_early_udbg_console();
419 * Initialize xmon
421 xmon_setup();
423 smp_setup_cpu_maps();
424 check_smt_enabled();
426 #ifdef CONFIG_SMP
427 /* Release secondary cpus out of their spinloops at 0x60 now that
428 * we can map physical -> logical CPU ids
430 smp_release_cpus();
431 #endif
433 printk("Starting Linux PPC64 %s\n", init_utsname()->version);
435 printk("-----------------------------------------------------\n");
436 printk("ppc64_pft_size = 0x%llx\n", ppc64_pft_size);
437 printk("physicalMemorySize = 0x%llx\n", memblock_phys_mem_size());
438 if (ppc64_caches.dline_size != 0x80)
439 printk("ppc64_caches.dcache_line_size = 0x%x\n",
440 ppc64_caches.dline_size);
441 if (ppc64_caches.iline_size != 0x80)
442 printk("ppc64_caches.icache_line_size = 0x%x\n",
443 ppc64_caches.iline_size);
444 #ifdef CONFIG_PPC_STD_MMU_64
445 if (htab_address)
446 printk("htab_address = 0x%p\n", htab_address);
447 printk("htab_hash_mask = 0x%lx\n", htab_hash_mask);
448 #endif /* CONFIG_PPC_STD_MMU_64 */
449 if (PHYSICAL_START > 0)
450 printk("physical_start = 0x%llx\n",
451 (unsigned long long)PHYSICAL_START);
452 printk("-----------------------------------------------------\n");
454 DBG(" <- setup_system()\n");
457 /* This returns the limit below which memory accesses to the linear
458 * mapping are guarnateed not to cause a TLB or SLB miss. This is
459 * used to allocate interrupt or emergency stacks for which our
460 * exception entry path doesn't deal with being interrupted.
462 static u64 safe_stack_limit(void)
464 #ifdef CONFIG_PPC_BOOK3E
465 /* Freescale BookE bolts the entire linear mapping */
466 if (mmu_has_feature(MMU_FTR_TYPE_FSL_E))
467 return linear_map_top;
468 /* Other BookE, we assume the first GB is bolted */
469 return 1ul << 30;
470 #else
471 /* BookS, the first segment is bolted */
472 if (mmu_has_feature(MMU_FTR_1T_SEGMENT))
473 return 1UL << SID_SHIFT_1T;
474 return 1UL << SID_SHIFT;
475 #endif
478 static void __init irqstack_early_init(void)
480 u64 limit = safe_stack_limit();
481 unsigned int i;
484 * Interrupt stacks must be in the first segment since we
485 * cannot afford to take SLB misses on them.
487 for_each_possible_cpu(i) {
488 softirq_ctx[i] = (struct thread_info *)
489 __va(memblock_alloc_base(THREAD_SIZE,
490 THREAD_SIZE, limit));
491 hardirq_ctx[i] = (struct thread_info *)
492 __va(memblock_alloc_base(THREAD_SIZE,
493 THREAD_SIZE, limit));
497 #ifdef CONFIG_PPC_BOOK3E
498 static void __init exc_lvl_early_init(void)
500 extern unsigned int interrupt_base_book3e;
501 extern unsigned int exc_debug_debug_book3e;
503 unsigned int i;
505 for_each_possible_cpu(i) {
506 critirq_ctx[i] = (struct thread_info *)
507 __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE));
508 dbgirq_ctx[i] = (struct thread_info *)
509 __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE));
510 mcheckirq_ctx[i] = (struct thread_info *)
511 __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE));
514 if (cpu_has_feature(CPU_FTR_DEBUG_LVL_EXC))
515 patch_branch(&interrupt_base_book3e + (0x040 / 4) + 1,
516 (unsigned long)&exc_debug_debug_book3e, 0);
518 #else
519 #define exc_lvl_early_init()
520 #endif
523 * Stack space used when we detect a bad kernel stack pointer, and
524 * early in SMP boots before relocation is enabled.
526 static void __init emergency_stack_init(void)
528 u64 limit;
529 unsigned int i;
532 * Emergency stacks must be under 256MB, we cannot afford to take
533 * SLB misses on them. The ABI also requires them to be 128-byte
534 * aligned.
536 * Since we use these as temporary stacks during secondary CPU
537 * bringup, we need to get at them in real mode. This means they
538 * must also be within the RMO region.
540 limit = min(safe_stack_limit(), ppc64_rma_size);
542 for_each_possible_cpu(i) {
543 unsigned long sp;
544 sp = memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit);
545 sp += THREAD_SIZE;
546 paca[i].emergency_sp = __va(sp);
551 * Called into from start_kernel this initializes bootmem, which is used
552 * to manage page allocation until mem_init is called.
554 void __init setup_arch(char **cmdline_p)
556 ppc64_boot_msg(0x12, "Setup Arch");
558 *cmdline_p = cmd_line;
561 * Set cache line size based on type of cpu as a default.
562 * Systems with OF can look in the properties on the cpu node(s)
563 * for a possibly more accurate value.
565 dcache_bsize = ppc64_caches.dline_size;
566 icache_bsize = ppc64_caches.iline_size;
568 /* reboot on panic */
569 panic_timeout = 180;
571 if (ppc_md.panic)
572 setup_panic();
574 init_mm.start_code = (unsigned long)_stext;
575 init_mm.end_code = (unsigned long) _etext;
576 init_mm.end_data = (unsigned long) _edata;
577 init_mm.brk = klimit;
579 irqstack_early_init();
580 exc_lvl_early_init();
581 emergency_stack_init();
583 #ifdef CONFIG_PPC_STD_MMU_64
584 stabs_alloc();
585 #endif
586 /* set up the bootmem stuff with available memory */
587 do_init_bootmem();
588 sparse_init();
590 #ifdef CONFIG_DUMMY_CONSOLE
591 conswitchp = &dummy_con;
592 #endif
594 if (ppc_md.setup_arch)
595 ppc_md.setup_arch();
597 paging_init();
599 /* Initialize the MMU context management stuff */
600 mmu_context_init();
602 kvm_linear_init();
604 ppc64_boot_msg(0x15, "Setup Done");
608 /* ToDo: do something useful if ppc_md is not yet setup. */
609 #define PPC64_LINUX_FUNCTION 0x0f000000
610 #define PPC64_IPL_MESSAGE 0xc0000000
611 #define PPC64_TERM_MESSAGE 0xb0000000
613 static void ppc64_do_msg(unsigned int src, const char *msg)
615 if (ppc_md.progress) {
616 char buf[128];
618 sprintf(buf, "%08X\n", src);
619 ppc_md.progress(buf, 0);
620 snprintf(buf, 128, "%s", msg);
621 ppc_md.progress(buf, 0);
625 /* Print a boot progress message. */
626 void ppc64_boot_msg(unsigned int src, const char *msg)
628 ppc64_do_msg(PPC64_LINUX_FUNCTION|PPC64_IPL_MESSAGE|src, msg);
629 printk("[boot]%04x %s\n", src, msg);
632 #ifdef CONFIG_SMP
633 #define PCPU_DYN_SIZE ()
635 static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align)
637 return __alloc_bootmem_node(NODE_DATA(cpu_to_node(cpu)), size, align,
638 __pa(MAX_DMA_ADDRESS));
641 static void __init pcpu_fc_free(void *ptr, size_t size)
643 free_bootmem(__pa(ptr), size);
646 static int pcpu_cpu_distance(unsigned int from, unsigned int to)
648 if (cpu_to_node(from) == cpu_to_node(to))
649 return LOCAL_DISTANCE;
650 else
651 return REMOTE_DISTANCE;
654 unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
655 EXPORT_SYMBOL(__per_cpu_offset);
657 void __init setup_per_cpu_areas(void)
659 const size_t dyn_size = PERCPU_MODULE_RESERVE + PERCPU_DYNAMIC_RESERVE;
660 size_t atom_size;
661 unsigned long delta;
662 unsigned int cpu;
663 int rc;
666 * Linear mapping is one of 4K, 1M and 16M. For 4K, no need
667 * to group units. For larger mappings, use 1M atom which
668 * should be large enough to contain a number of units.
670 if (mmu_linear_psize == MMU_PAGE_4K)
671 atom_size = PAGE_SIZE;
672 else
673 atom_size = 1 << 20;
675 rc = pcpu_embed_first_chunk(0, dyn_size, atom_size, pcpu_cpu_distance,
676 pcpu_fc_alloc, pcpu_fc_free);
677 if (rc < 0)
678 panic("cannot initialize percpu area (err=%d)", rc);
680 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
681 for_each_possible_cpu(cpu) {
682 __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
683 paca[cpu].data_offset = __per_cpu_offset[cpu];
686 #endif
689 #ifdef CONFIG_PPC_INDIRECT_IO
690 struct ppc_pci_io ppc_pci_io;
691 EXPORT_SYMBOL(ppc_pci_io);
692 #endif /* CONFIG_PPC_INDIRECT_IO */