lxfb: fix console blanking
[linux-2.6/mini2440.git] / arch / arm / kernel / setup.c
blob38f0e7940a132b1c9e0458bc10e9d1dc3f2401ee
1 /*
2 * linux/arch/arm/kernel/setup.c
4 * Copyright (C) 1995-2001 Russell King
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10 #include <linux/module.h>
11 #include <linux/kernel.h>
12 #include <linux/stddef.h>
13 #include <linux/ioport.h>
14 #include <linux/delay.h>
15 #include <linux/utsname.h>
16 #include <linux/initrd.h>
17 #include <linux/console.h>
18 #include <linux/bootmem.h>
19 #include <linux/seq_file.h>
20 #include <linux/screen_info.h>
21 #include <linux/init.h>
22 #include <linux/root_dev.h>
23 #include <linux/cpu.h>
24 #include <linux/interrupt.h>
25 #include <linux/smp.h>
26 #include <linux/fs.h>
28 #include <asm/cpu.h>
29 #include <asm/elf.h>
30 #include <asm/procinfo.h>
31 #include <asm/setup.h>
32 #include <asm/mach-types.h>
33 #include <asm/cacheflush.h>
34 #include <asm/tlbflush.h>
36 #include <asm/mach/arch.h>
37 #include <asm/mach/irq.h>
38 #include <asm/mach/time.h>
39 #include <asm/traps.h>
41 #include "compat.h"
42 #include "atags.h"
44 #ifndef MEM_SIZE
45 #define MEM_SIZE (16*1024*1024)
46 #endif
48 #if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
49 char fpe_type[8];
51 static int __init fpe_setup(char *line)
53 memcpy(fpe_type, line, 8);
54 return 1;
57 __setup("fpe=", fpe_setup);
58 #endif
60 extern void paging_init(struct meminfo *, struct machine_desc *desc);
61 extern void reboot_setup(char *str);
62 extern int root_mountflags;
63 extern void _stext, _text, _etext, __data_start, _edata, _end;
65 unsigned int processor_id;
66 EXPORT_SYMBOL(processor_id);
67 unsigned int __machine_arch_type;
68 EXPORT_SYMBOL(__machine_arch_type);
70 unsigned int __atags_pointer __initdata;
72 unsigned int system_rev;
73 EXPORT_SYMBOL(system_rev);
75 unsigned int system_serial_low;
76 EXPORT_SYMBOL(system_serial_low);
78 unsigned int system_serial_high;
79 EXPORT_SYMBOL(system_serial_high);
81 unsigned int elf_hwcap;
82 EXPORT_SYMBOL(elf_hwcap);
85 #ifdef MULTI_CPU
86 struct processor processor;
87 #endif
88 #ifdef MULTI_TLB
89 struct cpu_tlb_fns cpu_tlb;
90 #endif
91 #ifdef MULTI_USER
92 struct cpu_user_fns cpu_user;
93 #endif
94 #ifdef MULTI_CACHE
95 struct cpu_cache_fns cpu_cache;
96 #endif
97 #ifdef CONFIG_OUTER_CACHE
98 struct outer_cache_fns outer_cache;
99 #endif
101 struct stack {
102 u32 irq[3];
103 u32 abt[3];
104 u32 und[3];
105 } ____cacheline_aligned;
107 static struct stack stacks[NR_CPUS];
109 char elf_platform[ELF_PLATFORM_SIZE];
110 EXPORT_SYMBOL(elf_platform);
112 unsigned long phys_initrd_start __initdata = 0;
113 unsigned long phys_initrd_size __initdata = 0;
115 static struct meminfo meminfo __initdata = { 0, };
116 static const char *cpu_name;
117 static const char *machine_name;
118 static char __initdata command_line[COMMAND_LINE_SIZE];
120 static char default_command_line[COMMAND_LINE_SIZE] __initdata = CONFIG_CMDLINE;
121 static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
122 #define ENDIANNESS ((char)endian_test.l)
124 DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data);
127 * Standard memory resources
129 static struct resource mem_res[] = {
131 .name = "Video RAM",
132 .start = 0,
133 .end = 0,
134 .flags = IORESOURCE_MEM
137 .name = "Kernel text",
138 .start = 0,
139 .end = 0,
140 .flags = IORESOURCE_MEM
143 .name = "Kernel data",
144 .start = 0,
145 .end = 0,
146 .flags = IORESOURCE_MEM
150 #define video_ram mem_res[0]
151 #define kernel_code mem_res[1]
152 #define kernel_data mem_res[2]
154 static struct resource io_res[] = {
156 .name = "reserved",
157 .start = 0x3bc,
158 .end = 0x3be,
159 .flags = IORESOURCE_IO | IORESOURCE_BUSY
162 .name = "reserved",
163 .start = 0x378,
164 .end = 0x37f,
165 .flags = IORESOURCE_IO | IORESOURCE_BUSY
168 .name = "reserved",
169 .start = 0x278,
170 .end = 0x27f,
171 .flags = IORESOURCE_IO | IORESOURCE_BUSY
175 #define lp0 io_res[0]
176 #define lp1 io_res[1]
177 #define lp2 io_res[2]
179 static const char *cache_types[16] = {
180 "write-through",
181 "write-back",
182 "write-back",
183 "undefined 3",
184 "undefined 4",
185 "undefined 5",
186 "write-back",
187 "write-back",
188 "undefined 8",
189 "undefined 9",
190 "undefined 10",
191 "undefined 11",
192 "undefined 12",
193 "undefined 13",
194 "write-back",
195 "undefined 15",
198 static const char *cache_clean[16] = {
199 "not required",
200 "read-block",
201 "cp15 c7 ops",
202 "undefined 3",
203 "undefined 4",
204 "undefined 5",
205 "cp15 c7 ops",
206 "cp15 c7 ops",
207 "undefined 8",
208 "undefined 9",
209 "undefined 10",
210 "undefined 11",
211 "undefined 12",
212 "undefined 13",
213 "cp15 c7 ops",
214 "undefined 15",
217 static const char *cache_lockdown[16] = {
218 "not supported",
219 "not supported",
220 "not supported",
221 "undefined 3",
222 "undefined 4",
223 "undefined 5",
224 "format A",
225 "format B",
226 "undefined 8",
227 "undefined 9",
228 "undefined 10",
229 "undefined 11",
230 "undefined 12",
231 "undefined 13",
232 "format C",
233 "undefined 15",
236 static const char *proc_arch[] = {
237 "undefined/unknown",
238 "3",
239 "4",
240 "4T",
241 "5",
242 "5T",
243 "5TE",
244 "5TEJ",
245 "6TEJ",
246 "7",
247 "?(11)",
248 "?(12)",
249 "?(13)",
250 "?(14)",
251 "?(15)",
252 "?(16)",
253 "?(17)",
256 #define CACHE_TYPE(x) (((x) >> 25) & 15)
257 #define CACHE_S(x) ((x) & (1 << 24))
258 #define CACHE_DSIZE(x) (((x) >> 12) & 4095) /* only if S=1 */
259 #define CACHE_ISIZE(x) ((x) & 4095)
261 #define CACHE_SIZE(y) (((y) >> 6) & 7)
262 #define CACHE_ASSOC(y) (((y) >> 3) & 7)
263 #define CACHE_M(y) ((y) & (1 << 2))
264 #define CACHE_LINE(y) ((y) & 3)
266 static inline void dump_cache(const char *prefix, int cpu, unsigned int cache)
268 unsigned int mult = 2 + (CACHE_M(cache) ? 1 : 0);
270 printk("CPU%u: %s: %d bytes, associativity %d, %d byte lines, %d sets\n",
271 cpu, prefix,
272 mult << (8 + CACHE_SIZE(cache)),
273 (mult << CACHE_ASSOC(cache)) >> 1,
274 8 << CACHE_LINE(cache),
275 1 << (6 + CACHE_SIZE(cache) - CACHE_ASSOC(cache) -
276 CACHE_LINE(cache)));
279 static void __init dump_cpu_info(int cpu)
281 unsigned int info = read_cpuid(CPUID_CACHETYPE);
283 if (info != processor_id) {
284 printk("CPU%u: D %s %s cache\n", cpu, cache_is_vivt() ? "VIVT" : "VIPT",
285 cache_types[CACHE_TYPE(info)]);
286 if (CACHE_S(info)) {
287 dump_cache("I cache", cpu, CACHE_ISIZE(info));
288 dump_cache("D cache", cpu, CACHE_DSIZE(info));
289 } else {
290 dump_cache("cache", cpu, CACHE_ISIZE(info));
294 if (arch_is_coherent())
295 printk("Cache coherency enabled\n");
298 int cpu_architecture(void)
300 int cpu_arch;
302 if ((processor_id & 0x0008f000) == 0) {
303 cpu_arch = CPU_ARCH_UNKNOWN;
304 } else if ((processor_id & 0x0008f000) == 0x00007000) {
305 cpu_arch = (processor_id & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
306 } else if ((processor_id & 0x00080000) == 0x00000000) {
307 cpu_arch = (processor_id >> 16) & 7;
308 if (cpu_arch)
309 cpu_arch += CPU_ARCH_ARMv3;
310 } else if ((processor_id & 0x000f0000) == 0x000f0000) {
311 unsigned int mmfr0;
313 /* Revised CPUID format. Read the Memory Model Feature
314 * Register 0 and check for VMSAv7 or PMSAv7 */
315 asm("mrc p15, 0, %0, c0, c1, 4"
316 : "=r" (mmfr0));
317 if ((mmfr0 & 0x0000000f) == 0x00000003 ||
318 (mmfr0 & 0x000000f0) == 0x00000030)
319 cpu_arch = CPU_ARCH_ARMv7;
320 else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
321 (mmfr0 & 0x000000f0) == 0x00000020)
322 cpu_arch = CPU_ARCH_ARMv6;
323 else
324 cpu_arch = CPU_ARCH_UNKNOWN;
325 } else
326 cpu_arch = CPU_ARCH_UNKNOWN;
328 return cpu_arch;
332 * These functions re-use the assembly code in head.S, which
333 * already provide the required functionality.
335 extern struct proc_info_list *lookup_processor_type(unsigned int);
336 extern struct machine_desc *lookup_machine_type(unsigned int);
338 static void __init setup_processor(void)
340 struct proc_info_list *list;
343 * locate processor in the list of supported processor
344 * types. The linker builds this table for us from the
345 * entries in arch/arm/mm/proc-*.S
347 list = lookup_processor_type(processor_id);
348 if (!list) {
349 printk("CPU configuration botched (ID %08x), unable "
350 "to continue.\n", processor_id);
351 while (1);
354 cpu_name = list->cpu_name;
356 #ifdef MULTI_CPU
357 processor = *list->proc;
358 #endif
359 #ifdef MULTI_TLB
360 cpu_tlb = *list->tlb;
361 #endif
362 #ifdef MULTI_USER
363 cpu_user = *list->user;
364 #endif
365 #ifdef MULTI_CACHE
366 cpu_cache = *list->cache;
367 #endif
369 printk("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
370 cpu_name, processor_id, (int)processor_id & 15,
371 proc_arch[cpu_architecture()], cr_alignment);
373 sprintf(init_utsname()->machine, "%s%c", list->arch_name, ENDIANNESS);
374 sprintf(elf_platform, "%s%c", list->elf_name, ENDIANNESS);
375 elf_hwcap = list->elf_hwcap;
376 #ifndef CONFIG_ARM_THUMB
377 elf_hwcap &= ~HWCAP_THUMB;
378 #endif
380 cpu_proc_init();
384 * cpu_init - initialise one CPU.
386 * cpu_init dumps the cache information, initialises SMP specific
387 * information, and sets up the per-CPU stacks.
389 void cpu_init(void)
391 unsigned int cpu = smp_processor_id();
392 struct stack *stk = &stacks[cpu];
394 if (cpu >= NR_CPUS) {
395 printk(KERN_CRIT "CPU%u: bad primary CPU number\n", cpu);
396 BUG();
399 if (system_state == SYSTEM_BOOTING)
400 dump_cpu_info(cpu);
403 * setup stacks for re-entrant exception handlers
405 __asm__ (
406 "msr cpsr_c, %1\n\t"
407 "add sp, %0, %2\n\t"
408 "msr cpsr_c, %3\n\t"
409 "add sp, %0, %4\n\t"
410 "msr cpsr_c, %5\n\t"
411 "add sp, %0, %6\n\t"
412 "msr cpsr_c, %7"
414 : "r" (stk),
415 "I" (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
416 "I" (offsetof(struct stack, irq[0])),
417 "I" (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
418 "I" (offsetof(struct stack, abt[0])),
419 "I" (PSR_F_BIT | PSR_I_BIT | UND_MODE),
420 "I" (offsetof(struct stack, und[0])),
421 "I" (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
422 : "r14");
425 static struct machine_desc * __init setup_machine(unsigned int nr)
427 struct machine_desc *list;
430 * locate machine in the list of supported machines.
432 list = lookup_machine_type(nr);
433 if (!list) {
434 printk("Machine configuration botched (nr %d), unable "
435 "to continue.\n", nr);
436 while (1);
439 printk("Machine: %s\n", list->name);
441 return list;
444 static void __init early_initrd(char **p)
446 unsigned long start, size;
448 start = memparse(*p, p);
449 if (**p == ',') {
450 size = memparse((*p) + 1, p);
452 phys_initrd_start = start;
453 phys_initrd_size = size;
456 __early_param("initrd=", early_initrd);
458 static void __init arm_add_memory(unsigned long start, unsigned long size)
460 struct membank *bank;
463 * Ensure that start/size are aligned to a page boundary.
464 * Size is appropriately rounded down, start is rounded up.
466 size -= start & ~PAGE_MASK;
468 bank = &meminfo.bank[meminfo.nr_banks++];
470 bank->start = PAGE_ALIGN(start);
471 bank->size = size & PAGE_MASK;
472 bank->node = PHYS_TO_NID(start);
476 * Pick out the memory size. We look for mem=size@start,
477 * where start and size are "size[KkMm]"
479 static void __init early_mem(char **p)
481 static int usermem __initdata = 0;
482 unsigned long size, start;
485 * If the user specifies memory size, we
486 * blow away any automatically generated
487 * size.
489 if (usermem == 0) {
490 usermem = 1;
491 meminfo.nr_banks = 0;
494 start = PHYS_OFFSET;
495 size = memparse(*p, p);
496 if (**p == '@')
497 start = memparse(*p + 1, p);
499 arm_add_memory(start, size);
501 __early_param("mem=", early_mem);
504 * Initial parsing of the command line.
506 static void __init parse_cmdline(char **cmdline_p, char *from)
508 char c = ' ', *to = command_line;
509 int len = 0;
511 for (;;) {
512 if (c == ' ') {
513 extern struct early_params __early_begin, __early_end;
514 struct early_params *p;
516 for (p = &__early_begin; p < &__early_end; p++) {
517 int len = strlen(p->arg);
519 if (memcmp(from, p->arg, len) == 0) {
520 if (to != command_line)
521 to -= 1;
522 from += len;
523 p->fn(&from);
525 while (*from != ' ' && *from != '\0')
526 from++;
527 break;
531 c = *from++;
532 if (!c)
533 break;
534 if (COMMAND_LINE_SIZE <= ++len)
535 break;
536 *to++ = c;
538 *to = '\0';
539 *cmdline_p = command_line;
542 static void __init
543 setup_ramdisk(int doload, int prompt, int image_start, unsigned int rd_sz)
545 #ifdef CONFIG_BLK_DEV_RAM
546 extern int rd_size, rd_image_start, rd_prompt, rd_doload;
548 rd_image_start = image_start;
549 rd_prompt = prompt;
550 rd_doload = doload;
552 if (rd_sz)
553 rd_size = rd_sz;
554 #endif
557 static void __init
558 request_standard_resources(struct meminfo *mi, struct machine_desc *mdesc)
560 struct resource *res;
561 int i;
563 kernel_code.start = virt_to_phys(&_text);
564 kernel_code.end = virt_to_phys(&_etext - 1);
565 kernel_data.start = virt_to_phys(&__data_start);
566 kernel_data.end = virt_to_phys(&_end - 1);
568 for (i = 0; i < mi->nr_banks; i++) {
569 unsigned long virt_start, virt_end;
571 if (mi->bank[i].size == 0)
572 continue;
574 virt_start = __phys_to_virt(mi->bank[i].start);
575 virt_end = virt_start + mi->bank[i].size - 1;
577 res = alloc_bootmem_low(sizeof(*res));
578 res->name = "System RAM";
579 res->start = __virt_to_phys(virt_start);
580 res->end = __virt_to_phys(virt_end);
581 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
583 request_resource(&iomem_resource, res);
585 if (kernel_code.start >= res->start &&
586 kernel_code.end <= res->end)
587 request_resource(res, &kernel_code);
588 if (kernel_data.start >= res->start &&
589 kernel_data.end <= res->end)
590 request_resource(res, &kernel_data);
593 if (mdesc->video_start) {
594 video_ram.start = mdesc->video_start;
595 video_ram.end = mdesc->video_end;
596 request_resource(&iomem_resource, &video_ram);
600 * Some machines don't have the possibility of ever
601 * possessing lp0, lp1 or lp2
603 if (mdesc->reserve_lp0)
604 request_resource(&ioport_resource, &lp0);
605 if (mdesc->reserve_lp1)
606 request_resource(&ioport_resource, &lp1);
607 if (mdesc->reserve_lp2)
608 request_resource(&ioport_resource, &lp2);
612 * Tag parsing.
614 * This is the new way of passing data to the kernel at boot time. Rather
615 * than passing a fixed inflexible structure to the kernel, we pass a list
616 * of variable-sized tags to the kernel. The first tag must be a ATAG_CORE
617 * tag for the list to be recognised (to distinguish the tagged list from
618 * a param_struct). The list is terminated with a zero-length tag (this tag
619 * is not parsed in any way).
621 static int __init parse_tag_core(const struct tag *tag)
623 if (tag->hdr.size > 2) {
624 if ((tag->u.core.flags & 1) == 0)
625 root_mountflags &= ~MS_RDONLY;
626 ROOT_DEV = old_decode_dev(tag->u.core.rootdev);
628 return 0;
631 __tagtable(ATAG_CORE, parse_tag_core);
633 static int __init parse_tag_mem32(const struct tag *tag)
635 if (meminfo.nr_banks >= NR_BANKS) {
636 printk(KERN_WARNING
637 "Ignoring memory bank 0x%08x size %dKB\n",
638 tag->u.mem.start, tag->u.mem.size / 1024);
639 return -EINVAL;
641 arm_add_memory(tag->u.mem.start, tag->u.mem.size);
642 return 0;
645 __tagtable(ATAG_MEM, parse_tag_mem32);
647 #if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
648 struct screen_info screen_info = {
649 .orig_video_lines = 30,
650 .orig_video_cols = 80,
651 .orig_video_mode = 0,
652 .orig_video_ega_bx = 0,
653 .orig_video_isVGA = 1,
654 .orig_video_points = 8
657 static int __init parse_tag_videotext(const struct tag *tag)
659 screen_info.orig_x = tag->u.videotext.x;
660 screen_info.orig_y = tag->u.videotext.y;
661 screen_info.orig_video_page = tag->u.videotext.video_page;
662 screen_info.orig_video_mode = tag->u.videotext.video_mode;
663 screen_info.orig_video_cols = tag->u.videotext.video_cols;
664 screen_info.orig_video_ega_bx = tag->u.videotext.video_ega_bx;
665 screen_info.orig_video_lines = tag->u.videotext.video_lines;
666 screen_info.orig_video_isVGA = tag->u.videotext.video_isvga;
667 screen_info.orig_video_points = tag->u.videotext.video_points;
668 return 0;
671 __tagtable(ATAG_VIDEOTEXT, parse_tag_videotext);
672 #endif
674 static int __init parse_tag_ramdisk(const struct tag *tag)
676 setup_ramdisk((tag->u.ramdisk.flags & 1) == 0,
677 (tag->u.ramdisk.flags & 2) == 0,
678 tag->u.ramdisk.start, tag->u.ramdisk.size);
679 return 0;
682 __tagtable(ATAG_RAMDISK, parse_tag_ramdisk);
684 static int __init parse_tag_initrd(const struct tag *tag)
686 printk(KERN_WARNING "ATAG_INITRD is deprecated; "
687 "please update your bootloader.\n");
688 phys_initrd_start = __virt_to_phys(tag->u.initrd.start);
689 phys_initrd_size = tag->u.initrd.size;
690 return 0;
693 __tagtable(ATAG_INITRD, parse_tag_initrd);
695 static int __init parse_tag_initrd2(const struct tag *tag)
697 phys_initrd_start = tag->u.initrd.start;
698 phys_initrd_size = tag->u.initrd.size;
699 return 0;
702 __tagtable(ATAG_INITRD2, parse_tag_initrd2);
704 static int __init parse_tag_serialnr(const struct tag *tag)
706 system_serial_low = tag->u.serialnr.low;
707 system_serial_high = tag->u.serialnr.high;
708 return 0;
711 __tagtable(ATAG_SERIAL, parse_tag_serialnr);
713 static int __init parse_tag_revision(const struct tag *tag)
715 system_rev = tag->u.revision.rev;
716 return 0;
719 __tagtable(ATAG_REVISION, parse_tag_revision);
721 static int __init parse_tag_cmdline(const struct tag *tag)
723 strlcpy(default_command_line, tag->u.cmdline.cmdline, COMMAND_LINE_SIZE);
724 return 0;
727 __tagtable(ATAG_CMDLINE, parse_tag_cmdline);
730 * Scan the tag table for this tag, and call its parse function.
731 * The tag table is built by the linker from all the __tagtable
732 * declarations.
734 static int __init parse_tag(const struct tag *tag)
736 extern struct tagtable __tagtable_begin, __tagtable_end;
737 struct tagtable *t;
739 for (t = &__tagtable_begin; t < &__tagtable_end; t++)
740 if (tag->hdr.tag == t->tag) {
741 t->parse(tag);
742 break;
745 return t < &__tagtable_end;
749 * Parse all tags in the list, checking both the global and architecture
750 * specific tag tables.
752 static void __init parse_tags(const struct tag *t)
754 for (; t->hdr.size; t = tag_next(t))
755 if (!parse_tag(t))
756 printk(KERN_WARNING
757 "Ignoring unrecognised tag 0x%08x\n",
758 t->hdr.tag);
762 * This holds our defaults.
764 static struct init_tags {
765 struct tag_header hdr1;
766 struct tag_core core;
767 struct tag_header hdr2;
768 struct tag_mem32 mem;
769 struct tag_header hdr3;
770 } init_tags __initdata = {
771 { tag_size(tag_core), ATAG_CORE },
772 { 1, PAGE_SIZE, 0xff },
773 { tag_size(tag_mem32), ATAG_MEM },
774 { MEM_SIZE, PHYS_OFFSET },
775 { 0, ATAG_NONE }
778 static void (*init_machine)(void) __initdata;
780 static int __init customize_machine(void)
782 /* customizes platform devices, or adds new ones */
783 if (init_machine)
784 init_machine();
785 return 0;
787 arch_initcall(customize_machine);
789 void __init setup_arch(char **cmdline_p)
791 struct tag *tags = (struct tag *)&init_tags;
792 struct machine_desc *mdesc;
793 char *from = default_command_line;
795 setup_processor();
796 mdesc = setup_machine(machine_arch_type);
797 machine_name = mdesc->name;
799 if (mdesc->soft_reboot)
800 reboot_setup("s");
802 if (__atags_pointer)
803 tags = phys_to_virt(__atags_pointer);
804 else if (mdesc->boot_params)
805 tags = phys_to_virt(mdesc->boot_params);
808 * If we have the old style parameters, convert them to
809 * a tag list.
811 if (tags->hdr.tag != ATAG_CORE)
812 convert_to_tag_list(tags);
813 if (tags->hdr.tag != ATAG_CORE)
814 tags = (struct tag *)&init_tags;
816 if (mdesc->fixup)
817 mdesc->fixup(mdesc, tags, &from, &meminfo);
819 if (tags->hdr.tag == ATAG_CORE) {
820 if (meminfo.nr_banks != 0)
821 squash_mem_tags(tags);
822 save_atags(tags);
823 parse_tags(tags);
826 init_mm.start_code = (unsigned long) &_text;
827 init_mm.end_code = (unsigned long) &_etext;
828 init_mm.end_data = (unsigned long) &_edata;
829 init_mm.brk = (unsigned long) &_end;
831 memcpy(boot_command_line, from, COMMAND_LINE_SIZE);
832 boot_command_line[COMMAND_LINE_SIZE-1] = '\0';
833 parse_cmdline(cmdline_p, from);
834 paging_init(&meminfo, mdesc);
835 request_standard_resources(&meminfo, mdesc);
837 #ifdef CONFIG_SMP
838 smp_init_cpus();
839 #endif
841 cpu_init();
844 * Set up various architecture-specific pointers
846 init_arch_irq = mdesc->init_irq;
847 system_timer = mdesc->timer;
848 init_machine = mdesc->init_machine;
850 #ifdef CONFIG_VT
851 #if defined(CONFIG_VGA_CONSOLE)
852 conswitchp = &vga_con;
853 #elif defined(CONFIG_DUMMY_CONSOLE)
854 conswitchp = &dummy_con;
855 #endif
856 #endif
857 early_trap_init();
861 static int __init topology_init(void)
863 int cpu;
865 for_each_possible_cpu(cpu) {
866 struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu);
867 cpuinfo->cpu.hotpluggable = 1;
868 register_cpu(&cpuinfo->cpu, cpu);
871 return 0;
874 subsys_initcall(topology_init);
876 static const char *hwcap_str[] = {
877 "swp",
878 "half",
879 "thumb",
880 "26bit",
881 "fastmult",
882 "fpa",
883 "vfp",
884 "edsp",
885 "java",
886 "iwmmxt",
887 "crunch",
888 NULL
891 static void
892 c_show_cache(struct seq_file *m, const char *type, unsigned int cache)
894 unsigned int mult = 2 + (CACHE_M(cache) ? 1 : 0);
896 seq_printf(m, "%s size\t\t: %d\n"
897 "%s assoc\t\t: %d\n"
898 "%s line length\t: %d\n"
899 "%s sets\t\t: %d\n",
900 type, mult << (8 + CACHE_SIZE(cache)),
901 type, (mult << CACHE_ASSOC(cache)) >> 1,
902 type, 8 << CACHE_LINE(cache),
903 type, 1 << (6 + CACHE_SIZE(cache) - CACHE_ASSOC(cache) -
904 CACHE_LINE(cache)));
907 static int c_show(struct seq_file *m, void *v)
909 int i;
911 seq_printf(m, "Processor\t: %s rev %d (%s)\n",
912 cpu_name, (int)processor_id & 15, elf_platform);
914 #if defined(CONFIG_SMP)
915 for_each_online_cpu(i) {
917 * glibc reads /proc/cpuinfo to determine the number of
918 * online processors, looking for lines beginning with
919 * "processor". Give glibc what it expects.
921 seq_printf(m, "processor\t: %d\n", i);
922 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n\n",
923 per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
924 (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
926 #else /* CONFIG_SMP */
927 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
928 loops_per_jiffy / (500000/HZ),
929 (loops_per_jiffy / (5000/HZ)) % 100);
930 #endif
932 /* dump out the processor features */
933 seq_puts(m, "Features\t: ");
935 for (i = 0; hwcap_str[i]; i++)
936 if (elf_hwcap & (1 << i))
937 seq_printf(m, "%s ", hwcap_str[i]);
939 seq_printf(m, "\nCPU implementer\t: 0x%02x\n", processor_id >> 24);
940 seq_printf(m, "CPU architecture: %s\n", proc_arch[cpu_architecture()]);
942 if ((processor_id & 0x0008f000) == 0x00000000) {
943 /* pre-ARM7 */
944 seq_printf(m, "CPU part\t: %07x\n", processor_id >> 4);
945 } else {
946 if ((processor_id & 0x0008f000) == 0x00007000) {
947 /* ARM7 */
948 seq_printf(m, "CPU variant\t: 0x%02x\n",
949 (processor_id >> 16) & 127);
950 } else {
951 /* post-ARM7 */
952 seq_printf(m, "CPU variant\t: 0x%x\n",
953 (processor_id >> 20) & 15);
955 seq_printf(m, "CPU part\t: 0x%03x\n",
956 (processor_id >> 4) & 0xfff);
958 seq_printf(m, "CPU revision\t: %d\n", processor_id & 15);
961 unsigned int cache_info = read_cpuid(CPUID_CACHETYPE);
962 if (cache_info != processor_id) {
963 seq_printf(m, "Cache type\t: %s\n"
964 "Cache clean\t: %s\n"
965 "Cache lockdown\t: %s\n"
966 "Cache format\t: %s\n",
967 cache_types[CACHE_TYPE(cache_info)],
968 cache_clean[CACHE_TYPE(cache_info)],
969 cache_lockdown[CACHE_TYPE(cache_info)],
970 CACHE_S(cache_info) ? "Harvard" : "Unified");
972 if (CACHE_S(cache_info)) {
973 c_show_cache(m, "I", CACHE_ISIZE(cache_info));
974 c_show_cache(m, "D", CACHE_DSIZE(cache_info));
975 } else {
976 c_show_cache(m, "Cache", CACHE_ISIZE(cache_info));
981 seq_puts(m, "\n");
983 seq_printf(m, "Hardware\t: %s\n", machine_name);
984 seq_printf(m, "Revision\t: %04x\n", system_rev);
985 seq_printf(m, "Serial\t\t: %08x%08x\n",
986 system_serial_high, system_serial_low);
988 return 0;
991 static void *c_start(struct seq_file *m, loff_t *pos)
993 return *pos < 1 ? (void *)1 : NULL;
996 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
998 ++*pos;
999 return NULL;
1002 static void c_stop(struct seq_file *m, void *v)
1006 const struct seq_operations cpuinfo_op = {
1007 .start = c_start,
1008 .next = c_next,
1009 .stop = c_stop,
1010 .show = c_show