2 * Common prep/pmac/chrp boot and setup code.
5 #include <linux/module.h>
6 #include <linux/string.h>
7 #include <linux/sched.h>
8 #include <linux/init.h>
9 #include <linux/kernel.h>
10 #include <linux/reboot.h>
11 #include <linux/delay.h>
12 #include <linux/initrd.h>
13 #include <linux/tty.h>
14 #include <linux/bootmem.h>
15 #include <linux/seq_file.h>
16 #include <linux/root_dev.h>
17 #include <linux/cpu.h>
18 #include <linux/console.h>
19 #include <linux/lmb.h>
23 #include <asm/processor.h>
24 #include <asm/pgtable.h>
25 #include <asm/setup.h>
28 #include <asm/cputable.h>
29 #include <asm/bootx.h>
30 #include <asm/btext.h>
31 #include <asm/machdep.h>
32 #include <asm/uaccess.h>
33 #include <asm/system.h>
34 #include <asm/pmac_feature.h>
35 #include <asm/sections.h>
36 #include <asm/nvram.h>
39 #include <asm/serial.h>
46 #if defined CONFIG_KGDB
50 extern void bootx_init(unsigned long r4
, unsigned long phys
);
53 EXPORT_SYMBOL_GPL(boot_cpuid
);
56 unsigned long ISA_DMA_THRESHOLD
;
57 unsigned int DMA_MODE_READ
;
58 unsigned int DMA_MODE_WRITE
;
62 #ifdef CONFIG_VGA_CONSOLE
63 unsigned long vgacon_remap_base
;
64 EXPORT_SYMBOL(vgacon_remap_base
);
68 * These are used in binfmt_elf.c to put aux entries on the stack
69 * for each elf executable being started.
76 * We're called here very early in the boot. We determine the machine
77 * type and call the appropriate low-level setup functions.
78 * -- Cort <cort@fsmlabs.com>
80 * Note that the kernel may be running at an address which is different
81 * from the address that it was linked at, so we must use RELOC/PTRRELOC
82 * to access static data (including strings). -- paulus
84 notrace
unsigned long __init
early_init(unsigned long dt_ptr
)
86 unsigned long offset
= reloc_offset();
87 struct cpu_spec
*spec
;
89 /* First zero the BSS -- use memset_io, some platforms don't have
91 memset_io((void __iomem
*)PTRRELOC(&__bss_start
), 0,
92 __bss_stop
- __bss_start
);
95 * Identify the CPU type and fix up code sections
96 * that depend on which cpu we have.
98 spec
= identify_cpu(offset
, mfspr(SPRN_PVR
));
100 do_feature_fixups(spec
->cpu_features
,
101 PTRRELOC(&__start___ftr_fixup
),
102 PTRRELOC(&__stop___ftr_fixup
));
104 do_lwsync_fixups(spec
->cpu_features
,
105 PTRRELOC(&__start___lwsync_fixup
),
106 PTRRELOC(&__stop___lwsync_fixup
));
108 return KERNELBASE
+ offset
;
113 * Find out what kind of machine we're on and save any data we need
114 * from the early boot process (devtree is copied on pmac by prom_init()).
115 * This is called very early on the boot process, after a minimal
116 * MMU environment has been set up but before MMU_init is called.
118 notrace
void __init
machine_init(unsigned long dt_ptr
, unsigned long phys
)
120 /* Enable early debugging if any specified (see udbg.h) */
123 /* Do some early initialization based on the flat device tree */
124 early_init_devtree(__va(dt_ptr
));
129 if (cpu_has_feature(CPU_FTR_CAN_DOZE
) ||
130 cpu_has_feature(CPU_FTR_CAN_NAP
))
131 ppc_md
.power_save
= ppc6xx_idle
;
135 if (cpu_has_feature(CPU_FTR_CAN_DOZE
) ||
136 cpu_has_feature(CPU_FTR_CAN_NAP
))
137 ppc_md
.power_save
= e500_idle
;
140 ppc_md
.progress("id mach(): done", 0x200);
143 #ifdef CONFIG_BOOKE_WDT
144 /* Checks wdt=x and wdt_period=xx command-line option */
145 notrace
int __init
early_parse_wdt(char *p
)
147 if (p
&& strncmp(p
, "0", 1) != 0)
148 booke_wdt_enabled
= 1;
152 early_param("wdt", early_parse_wdt
);
154 int __init
early_parse_wdt_period (char *p
)
157 booke_wdt_period
= simple_strtoul(p
, NULL
, 0);
161 early_param("wdt_period", early_parse_wdt_period
);
162 #endif /* CONFIG_BOOKE_WDT */
164 /* Checks "l2cr=xxxx" command-line option */
165 int __init
ppc_setup_l2cr(char *str
)
167 if (cpu_has_feature(CPU_FTR_L2CR
)) {
168 unsigned long val
= simple_strtoul(str
, NULL
, 0);
169 printk(KERN_INFO
"l2cr set to %lx\n", val
);
170 _set_L2CR(0); /* force invalidate by disable cache */
171 _set_L2CR(val
); /* and enable it */
175 __setup("l2cr=", ppc_setup_l2cr
);
177 /* Checks "l3cr=xxxx" command-line option */
178 int __init
ppc_setup_l3cr(char *str
)
180 if (cpu_has_feature(CPU_FTR_L3CR
)) {
181 unsigned long val
= simple_strtoul(str
, NULL
, 0);
182 printk(KERN_INFO
"l3cr set to %lx\n", val
);
183 _set_L3CR(val
); /* and enable it */
187 __setup("l3cr=", ppc_setup_l3cr
);
189 #ifdef CONFIG_GENERIC_NVRAM
191 /* Generic nvram hooks used by drivers/char/gen_nvram.c */
192 unsigned char nvram_read_byte(int addr
)
194 if (ppc_md
.nvram_read_val
)
195 return ppc_md
.nvram_read_val(addr
);
198 EXPORT_SYMBOL(nvram_read_byte
);
200 void nvram_write_byte(unsigned char val
, int addr
)
202 if (ppc_md
.nvram_write_val
)
203 ppc_md
.nvram_write_val(addr
, val
);
205 EXPORT_SYMBOL(nvram_write_byte
);
207 void nvram_sync(void)
209 if (ppc_md
.nvram_sync
)
212 EXPORT_SYMBOL(nvram_sync
);
214 #endif /* CONFIG_NVRAM */
216 static DEFINE_PER_CPU(struct cpu
, cpu_devices
);
218 int __init
ppc_init(void)
222 /* clear the progress line */
224 ppc_md
.progress(" ", 0xffff);
226 /* register CPU devices */
227 for_each_possible_cpu(cpu
) {
228 struct cpu
*c
= &per_cpu(cpu_devices
, cpu
);
230 register_cpu(c
, cpu
);
233 /* call platform init */
234 if (ppc_md
.init
!= NULL
) {
240 arch_initcall(ppc_init
);
242 #ifdef CONFIG_IRQSTACKS
243 static void __init
irqstack_early_init(void)
247 /* interrupt stacks must be in lowmem, we get that for free on ppc32
248 * as the lmb is limited to lowmem by LMB_REAL_LIMIT */
249 for_each_possible_cpu(i
) {
250 softirq_ctx
[i
] = (struct thread_info
*)
251 __va(lmb_alloc(THREAD_SIZE
, THREAD_SIZE
));
252 hardirq_ctx
[i
] = (struct thread_info
*)
253 __va(lmb_alloc(THREAD_SIZE
, THREAD_SIZE
));
257 #define irqstack_early_init()
260 #if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
261 static void __init
exc_lvl_early_init(void)
265 /* interrupt stacks must be in lowmem, we get that for free on ppc32
266 * as the lmb is limited to lowmem by LMB_REAL_LIMIT */
267 for_each_possible_cpu(i
) {
268 critirq_ctx
[i
] = (struct thread_info
*)
269 __va(lmb_alloc(THREAD_SIZE
, THREAD_SIZE
));
271 dbgirq_ctx
[i
] = (struct thread_info
*)
272 __va(lmb_alloc(THREAD_SIZE
, THREAD_SIZE
));
273 mcheckirq_ctx
[i
] = (struct thread_info
*)
274 __va(lmb_alloc(THREAD_SIZE
, THREAD_SIZE
));
279 #define exc_lvl_early_init()
282 /* Warning, IO base is not yet inited */
283 void __init
setup_arch(char **cmdline_p
)
285 *cmdline_p
= cmd_line
;
287 /* so udelay does something sensible, assume <= 1000 bogomips */
288 loops_per_jiffy
= 500000000 / HZ
;
290 unflatten_device_tree();
293 if (ppc_md
.init_early
)
296 find_legacy_serial_ports();
298 smp_setup_cpu_maps();
300 /* Register early console */
301 register_early_udbg_console();
305 #if defined(CONFIG_KGDB)
306 if (ppc_md
.kgdb_map_scc
)
307 ppc_md
.kgdb_map_scc();
309 if (strstr(cmd_line
, "gdb")) {
311 ppc_md
.progress("setup_arch: kgdb breakpoint", 0x4000);
312 printk("kgdb breakpoint activated\n");
318 * Set cache line size based on type of cpu as a default.
319 * Systems with OF can look in the properties on the cpu node(s)
320 * for a possibly more accurate value.
322 dcache_bsize
= cur_cpu_spec
->dcache_bsize
;
323 icache_bsize
= cur_cpu_spec
->icache_bsize
;
325 if (cpu_has_feature(CPU_FTR_UNIFIED_ID_CACHE
))
326 ucache_bsize
= icache_bsize
= dcache_bsize
;
328 /* reboot on panic */
334 init_mm
.start_code
= (unsigned long)_stext
;
335 init_mm
.end_code
= (unsigned long) _etext
;
336 init_mm
.end_data
= (unsigned long) _edata
;
337 init_mm
.brk
= klimit
;
339 exc_lvl_early_init();
341 irqstack_early_init();
343 /* set up the bootmem stuff with available memory */
345 if ( ppc_md
.progress
) ppc_md
.progress("setup_arch: bootmem", 0x3eab);
347 #ifdef CONFIG_DUMMY_CONSOLE
348 conswitchp
= &dummy_con
;
351 if (ppc_md
.setup_arch
)
353 if ( ppc_md
.progress
) ppc_md
.progress("arch: exit", 0x3eab);