[MIPS] SMTC: Add fordward declarations for mm_struct and task_struct.
[linux-2.6.git] / arch / arm / kernel / setup.c
blob03e37af315d76cc702ca0af3b5bfae6da904147e
1 /*
2 * linux/arch/arm/kernel/setup.c
4 * Copyright (C) 1995-2001 Russell King
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10 #include <linux/module.h>
11 #include <linux/kernel.h>
12 #include <linux/stddef.h>
13 #include <linux/ioport.h>
14 #include <linux/delay.h>
15 #include <linux/utsname.h>
16 #include <linux/initrd.h>
17 #include <linux/console.h>
18 #include <linux/bootmem.h>
19 #include <linux/seq_file.h>
20 #include <linux/screen_info.h>
21 #include <linux/init.h>
22 #include <linux/root_dev.h>
23 #include <linux/cpu.h>
24 #include <linux/interrupt.h>
25 #include <linux/smp.h>
27 #include <asm/cpu.h>
28 #include <asm/elf.h>
29 #include <asm/procinfo.h>
30 #include <asm/setup.h>
31 #include <asm/mach-types.h>
32 #include <asm/cacheflush.h>
33 #include <asm/tlbflush.h>
35 #include <asm/mach/arch.h>
36 #include <asm/mach/irq.h>
37 #include <asm/mach/time.h>
39 #include "compat.h"
41 #ifndef MEM_SIZE
42 #define MEM_SIZE (16*1024*1024)
43 #endif
45 #if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
46 char fpe_type[8];
48 static int __init fpe_setup(char *line)
50 memcpy(fpe_type, line, 8);
51 return 1;
54 __setup("fpe=", fpe_setup);
55 #endif
57 extern void paging_init(struct meminfo *, struct machine_desc *desc);
58 extern void reboot_setup(char *str);
59 extern int root_mountflags;
60 extern void _stext, _text, _etext, __data_start, _edata, _end;
62 unsigned int processor_id;
63 unsigned int __machine_arch_type;
64 EXPORT_SYMBOL(__machine_arch_type);
66 unsigned int system_rev;
67 EXPORT_SYMBOL(system_rev);
69 unsigned int system_serial_low;
70 EXPORT_SYMBOL(system_serial_low);
72 unsigned int system_serial_high;
73 EXPORT_SYMBOL(system_serial_high);
75 unsigned int elf_hwcap;
76 EXPORT_SYMBOL(elf_hwcap);
79 #ifdef MULTI_CPU
80 struct processor processor;
81 #endif
82 #ifdef MULTI_TLB
83 struct cpu_tlb_fns cpu_tlb;
84 #endif
85 #ifdef MULTI_USER
86 struct cpu_user_fns cpu_user;
87 #endif
88 #ifdef MULTI_CACHE
89 struct cpu_cache_fns cpu_cache;
90 #endif
91 #ifdef CONFIG_OUTER_CACHE
92 struct outer_cache_fns outer_cache;
93 #endif
95 struct stack {
96 u32 irq[3];
97 u32 abt[3];
98 u32 und[3];
99 } ____cacheline_aligned;
101 static struct stack stacks[NR_CPUS];
103 char elf_platform[ELF_PLATFORM_SIZE];
104 EXPORT_SYMBOL(elf_platform);
106 unsigned long phys_initrd_start __initdata = 0;
107 unsigned long phys_initrd_size __initdata = 0;
109 static struct meminfo meminfo __initdata = { 0, };
110 static const char *cpu_name;
111 static const char *machine_name;
112 static char __initdata command_line[COMMAND_LINE_SIZE];
114 static char default_command_line[COMMAND_LINE_SIZE] __initdata = CONFIG_CMDLINE;
115 static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
116 #define ENDIANNESS ((char)endian_test.l)
118 DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data);
121 * Standard memory resources
123 static struct resource mem_res[] = {
125 .name = "Video RAM",
126 .start = 0,
127 .end = 0,
128 .flags = IORESOURCE_MEM
131 .name = "Kernel text",
132 .start = 0,
133 .end = 0,
134 .flags = IORESOURCE_MEM
137 .name = "Kernel data",
138 .start = 0,
139 .end = 0,
140 .flags = IORESOURCE_MEM
144 #define video_ram mem_res[0]
145 #define kernel_code mem_res[1]
146 #define kernel_data mem_res[2]
148 static struct resource io_res[] = {
150 .name = "reserved",
151 .start = 0x3bc,
152 .end = 0x3be,
153 .flags = IORESOURCE_IO | IORESOURCE_BUSY
156 .name = "reserved",
157 .start = 0x378,
158 .end = 0x37f,
159 .flags = IORESOURCE_IO | IORESOURCE_BUSY
162 .name = "reserved",
163 .start = 0x278,
164 .end = 0x27f,
165 .flags = IORESOURCE_IO | IORESOURCE_BUSY
169 #define lp0 io_res[0]
170 #define lp1 io_res[1]
171 #define lp2 io_res[2]
173 static const char *cache_types[16] = {
174 "write-through",
175 "write-back",
176 "write-back",
177 "undefined 3",
178 "undefined 4",
179 "undefined 5",
180 "write-back",
181 "write-back",
182 "undefined 8",
183 "undefined 9",
184 "undefined 10",
185 "undefined 11",
186 "undefined 12",
187 "undefined 13",
188 "write-back",
189 "undefined 15",
192 static const char *cache_clean[16] = {
193 "not required",
194 "read-block",
195 "cp15 c7 ops",
196 "undefined 3",
197 "undefined 4",
198 "undefined 5",
199 "cp15 c7 ops",
200 "cp15 c7 ops",
201 "undefined 8",
202 "undefined 9",
203 "undefined 10",
204 "undefined 11",
205 "undefined 12",
206 "undefined 13",
207 "cp15 c7 ops",
208 "undefined 15",
211 static const char *cache_lockdown[16] = {
212 "not supported",
213 "not supported",
214 "not supported",
215 "undefined 3",
216 "undefined 4",
217 "undefined 5",
218 "format A",
219 "format B",
220 "undefined 8",
221 "undefined 9",
222 "undefined 10",
223 "undefined 11",
224 "undefined 12",
225 "undefined 13",
226 "format C",
227 "undefined 15",
230 static const char *proc_arch[] = {
231 "undefined/unknown",
232 "3",
233 "4",
234 "4T",
235 "5",
236 "5T",
237 "5TE",
238 "5TEJ",
239 "6TEJ",
240 "7",
241 "?(11)",
242 "?(12)",
243 "?(13)",
244 "?(14)",
245 "?(15)",
246 "?(16)",
247 "?(17)",
250 #define CACHE_TYPE(x) (((x) >> 25) & 15)
251 #define CACHE_S(x) ((x) & (1 << 24))
252 #define CACHE_DSIZE(x) (((x) >> 12) & 4095) /* only if S=1 */
253 #define CACHE_ISIZE(x) ((x) & 4095)
255 #define CACHE_SIZE(y) (((y) >> 6) & 7)
256 #define CACHE_ASSOC(y) (((y) >> 3) & 7)
257 #define CACHE_M(y) ((y) & (1 << 2))
258 #define CACHE_LINE(y) ((y) & 3)
260 static inline void dump_cache(const char *prefix, int cpu, unsigned int cache)
262 unsigned int mult = 2 + (CACHE_M(cache) ? 1 : 0);
264 printk("CPU%u: %s: %d bytes, associativity %d, %d byte lines, %d sets\n",
265 cpu, prefix,
266 mult << (8 + CACHE_SIZE(cache)),
267 (mult << CACHE_ASSOC(cache)) >> 1,
268 8 << CACHE_LINE(cache),
269 1 << (6 + CACHE_SIZE(cache) - CACHE_ASSOC(cache) -
270 CACHE_LINE(cache)));
273 static void __init dump_cpu_info(int cpu)
275 unsigned int info = read_cpuid(CPUID_CACHETYPE);
277 if (info != processor_id) {
278 printk("CPU%u: D %s %s cache\n", cpu, cache_is_vivt() ? "VIVT" : "VIPT",
279 cache_types[CACHE_TYPE(info)]);
280 if (CACHE_S(info)) {
281 dump_cache("I cache", cpu, CACHE_ISIZE(info));
282 dump_cache("D cache", cpu, CACHE_DSIZE(info));
283 } else {
284 dump_cache("cache", cpu, CACHE_ISIZE(info));
288 if (arch_is_coherent())
289 printk("Cache coherency enabled\n");
292 int cpu_architecture(void)
294 int cpu_arch;
296 if ((processor_id & 0x0008f000) == 0) {
297 cpu_arch = CPU_ARCH_UNKNOWN;
298 } else if ((processor_id & 0x0008f000) == 0x00007000) {
299 cpu_arch = (processor_id & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
300 } else if ((processor_id & 0x00080000) == 0x00000000) {
301 cpu_arch = (processor_id >> 16) & 7;
302 if (cpu_arch)
303 cpu_arch += CPU_ARCH_ARMv3;
304 } else {
305 /* the revised CPUID */
306 cpu_arch = ((processor_id >> 12) & 0xf) - 0xb + CPU_ARCH_ARMv6;
309 return cpu_arch;
313 * These functions re-use the assembly code in head.S, which
314 * already provide the required functionality.
316 extern struct proc_info_list *lookup_processor_type(unsigned int);
317 extern struct machine_desc *lookup_machine_type(unsigned int);
319 static void __init setup_processor(void)
321 struct proc_info_list *list;
324 * locate processor in the list of supported processor
325 * types. The linker builds this table for us from the
326 * entries in arch/arm/mm/proc-*.S
328 list = lookup_processor_type(processor_id);
329 if (!list) {
330 printk("CPU configuration botched (ID %08x), unable "
331 "to continue.\n", processor_id);
332 while (1);
335 cpu_name = list->cpu_name;
337 #ifdef MULTI_CPU
338 processor = *list->proc;
339 #endif
340 #ifdef MULTI_TLB
341 cpu_tlb = *list->tlb;
342 #endif
343 #ifdef MULTI_USER
344 cpu_user = *list->user;
345 #endif
346 #ifdef MULTI_CACHE
347 cpu_cache = *list->cache;
348 #endif
350 printk("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
351 cpu_name, processor_id, (int)processor_id & 15,
352 proc_arch[cpu_architecture()], cr_alignment);
354 sprintf(init_utsname()->machine, "%s%c", list->arch_name, ENDIANNESS);
355 sprintf(elf_platform, "%s%c", list->elf_name, ENDIANNESS);
356 elf_hwcap = list->elf_hwcap;
357 #ifndef CONFIG_ARM_THUMB
358 elf_hwcap &= ~HWCAP_THUMB;
359 #endif
361 cpu_proc_init();
365 * cpu_init - initialise one CPU.
367 * cpu_init dumps the cache information, initialises SMP specific
368 * information, and sets up the per-CPU stacks.
370 void cpu_init(void)
372 unsigned int cpu = smp_processor_id();
373 struct stack *stk = &stacks[cpu];
375 if (cpu >= NR_CPUS) {
376 printk(KERN_CRIT "CPU%u: bad primary CPU number\n", cpu);
377 BUG();
380 if (system_state == SYSTEM_BOOTING)
381 dump_cpu_info(cpu);
384 * setup stacks for re-entrant exception handlers
386 __asm__ (
387 "msr cpsr_c, %1\n\t"
388 "add sp, %0, %2\n\t"
389 "msr cpsr_c, %3\n\t"
390 "add sp, %0, %4\n\t"
391 "msr cpsr_c, %5\n\t"
392 "add sp, %0, %6\n\t"
393 "msr cpsr_c, %7"
395 : "r" (stk),
396 "I" (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
397 "I" (offsetof(struct stack, irq[0])),
398 "I" (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
399 "I" (offsetof(struct stack, abt[0])),
400 "I" (PSR_F_BIT | PSR_I_BIT | UND_MODE),
401 "I" (offsetof(struct stack, und[0])),
402 "I" (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
403 : "r14");
406 static struct machine_desc * __init setup_machine(unsigned int nr)
408 struct machine_desc *list;
411 * locate machine in the list of supported machines.
413 list = lookup_machine_type(nr);
414 if (!list) {
415 printk("Machine configuration botched (nr %d), unable "
416 "to continue.\n", nr);
417 while (1);
420 printk("Machine: %s\n", list->name);
422 return list;
425 static void __init early_initrd(char **p)
427 unsigned long start, size;
429 start = memparse(*p, p);
430 if (**p == ',') {
431 size = memparse((*p) + 1, p);
433 phys_initrd_start = start;
434 phys_initrd_size = size;
437 __early_param("initrd=", early_initrd);
439 static void __init arm_add_memory(unsigned long start, unsigned long size)
441 struct membank *bank;
444 * Ensure that start/size are aligned to a page boundary.
445 * Size is appropriately rounded down, start is rounded up.
447 size -= start & ~PAGE_MASK;
449 bank = &meminfo.bank[meminfo.nr_banks++];
451 bank->start = PAGE_ALIGN(start);
452 bank->size = size & PAGE_MASK;
453 bank->node = PHYS_TO_NID(start);
457 * Pick out the memory size. We look for mem=size@start,
458 * where start and size are "size[KkMm]"
460 static void __init early_mem(char **p)
462 static int usermem __initdata = 0;
463 unsigned long size, start;
466 * If the user specifies memory size, we
467 * blow away any automatically generated
468 * size.
470 if (usermem == 0) {
471 usermem = 1;
472 meminfo.nr_banks = 0;
475 start = PHYS_OFFSET;
476 size = memparse(*p, p);
477 if (**p == '@')
478 start = memparse(*p + 1, p);
480 arm_add_memory(start, size);
482 __early_param("mem=", early_mem);
485 * Initial parsing of the command line.
487 static void __init parse_cmdline(char **cmdline_p, char *from)
489 char c = ' ', *to = command_line;
490 int len = 0;
492 for (;;) {
493 if (c == ' ') {
494 extern struct early_params __early_begin, __early_end;
495 struct early_params *p;
497 for (p = &__early_begin; p < &__early_end; p++) {
498 int len = strlen(p->arg);
500 if (memcmp(from, p->arg, len) == 0) {
501 if (to != command_line)
502 to -= 1;
503 from += len;
504 p->fn(&from);
506 while (*from != ' ' && *from != '\0')
507 from++;
508 break;
512 c = *from++;
513 if (!c)
514 break;
515 if (COMMAND_LINE_SIZE <= ++len)
516 break;
517 *to++ = c;
519 *to = '\0';
520 *cmdline_p = command_line;
523 static void __init
524 setup_ramdisk(int doload, int prompt, int image_start, unsigned int rd_sz)
526 #ifdef CONFIG_BLK_DEV_RAM
527 extern int rd_size, rd_image_start, rd_prompt, rd_doload;
529 rd_image_start = image_start;
530 rd_prompt = prompt;
531 rd_doload = doload;
533 if (rd_sz)
534 rd_size = rd_sz;
535 #endif
538 static void __init
539 request_standard_resources(struct meminfo *mi, struct machine_desc *mdesc)
541 struct resource *res;
542 int i;
544 kernel_code.start = virt_to_phys(&_text);
545 kernel_code.end = virt_to_phys(&_etext - 1);
546 kernel_data.start = virt_to_phys(&__data_start);
547 kernel_data.end = virt_to_phys(&_end - 1);
549 for (i = 0; i < mi->nr_banks; i++) {
550 unsigned long virt_start, virt_end;
552 if (mi->bank[i].size == 0)
553 continue;
555 virt_start = __phys_to_virt(mi->bank[i].start);
556 virt_end = virt_start + mi->bank[i].size - 1;
558 res = alloc_bootmem_low(sizeof(*res));
559 res->name = "System RAM";
560 res->start = __virt_to_phys(virt_start);
561 res->end = __virt_to_phys(virt_end);
562 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
564 request_resource(&iomem_resource, res);
566 if (kernel_code.start >= res->start &&
567 kernel_code.end <= res->end)
568 request_resource(res, &kernel_code);
569 if (kernel_data.start >= res->start &&
570 kernel_data.end <= res->end)
571 request_resource(res, &kernel_data);
574 if (mdesc->video_start) {
575 video_ram.start = mdesc->video_start;
576 video_ram.end = mdesc->video_end;
577 request_resource(&iomem_resource, &video_ram);
581 * Some machines don't have the possibility of ever
582 * possessing lp0, lp1 or lp2
584 if (mdesc->reserve_lp0)
585 request_resource(&ioport_resource, &lp0);
586 if (mdesc->reserve_lp1)
587 request_resource(&ioport_resource, &lp1);
588 if (mdesc->reserve_lp2)
589 request_resource(&ioport_resource, &lp2);
593 * Tag parsing.
595 * This is the new way of passing data to the kernel at boot time. Rather
596 * than passing a fixed inflexible structure to the kernel, we pass a list
597 * of variable-sized tags to the kernel. The first tag must be a ATAG_CORE
598 * tag for the list to be recognised (to distinguish the tagged list from
599 * a param_struct). The list is terminated with a zero-length tag (this tag
600 * is not parsed in any way).
602 static int __init parse_tag_core(const struct tag *tag)
604 if (tag->hdr.size > 2) {
605 if ((tag->u.core.flags & 1) == 0)
606 root_mountflags &= ~MS_RDONLY;
607 ROOT_DEV = old_decode_dev(tag->u.core.rootdev);
609 return 0;
612 __tagtable(ATAG_CORE, parse_tag_core);
614 static int __init parse_tag_mem32(const struct tag *tag)
616 if (meminfo.nr_banks >= NR_BANKS) {
617 printk(KERN_WARNING
618 "Ignoring memory bank 0x%08x size %dKB\n",
619 tag->u.mem.start, tag->u.mem.size / 1024);
620 return -EINVAL;
622 arm_add_memory(tag->u.mem.start, tag->u.mem.size);
623 return 0;
626 __tagtable(ATAG_MEM, parse_tag_mem32);
628 #if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
629 struct screen_info screen_info = {
630 .orig_video_lines = 30,
631 .orig_video_cols = 80,
632 .orig_video_mode = 0,
633 .orig_video_ega_bx = 0,
634 .orig_video_isVGA = 1,
635 .orig_video_points = 8
638 static int __init parse_tag_videotext(const struct tag *tag)
640 screen_info.orig_x = tag->u.videotext.x;
641 screen_info.orig_y = tag->u.videotext.y;
642 screen_info.orig_video_page = tag->u.videotext.video_page;
643 screen_info.orig_video_mode = tag->u.videotext.video_mode;
644 screen_info.orig_video_cols = tag->u.videotext.video_cols;
645 screen_info.orig_video_ega_bx = tag->u.videotext.video_ega_bx;
646 screen_info.orig_video_lines = tag->u.videotext.video_lines;
647 screen_info.orig_video_isVGA = tag->u.videotext.video_isvga;
648 screen_info.orig_video_points = tag->u.videotext.video_points;
649 return 0;
652 __tagtable(ATAG_VIDEOTEXT, parse_tag_videotext);
653 #endif
655 static int __init parse_tag_ramdisk(const struct tag *tag)
657 setup_ramdisk((tag->u.ramdisk.flags & 1) == 0,
658 (tag->u.ramdisk.flags & 2) == 0,
659 tag->u.ramdisk.start, tag->u.ramdisk.size);
660 return 0;
663 __tagtable(ATAG_RAMDISK, parse_tag_ramdisk);
665 static int __init parse_tag_initrd(const struct tag *tag)
667 printk(KERN_WARNING "ATAG_INITRD is deprecated; "
668 "please update your bootloader.\n");
669 phys_initrd_start = __virt_to_phys(tag->u.initrd.start);
670 phys_initrd_size = tag->u.initrd.size;
671 return 0;
674 __tagtable(ATAG_INITRD, parse_tag_initrd);
676 static int __init parse_tag_initrd2(const struct tag *tag)
678 phys_initrd_start = tag->u.initrd.start;
679 phys_initrd_size = tag->u.initrd.size;
680 return 0;
683 __tagtable(ATAG_INITRD2, parse_tag_initrd2);
685 static int __init parse_tag_serialnr(const struct tag *tag)
687 system_serial_low = tag->u.serialnr.low;
688 system_serial_high = tag->u.serialnr.high;
689 return 0;
692 __tagtable(ATAG_SERIAL, parse_tag_serialnr);
694 static int __init parse_tag_revision(const struct tag *tag)
696 system_rev = tag->u.revision.rev;
697 return 0;
700 __tagtable(ATAG_REVISION, parse_tag_revision);
702 static int __init parse_tag_cmdline(const struct tag *tag)
704 strlcpy(default_command_line, tag->u.cmdline.cmdline, COMMAND_LINE_SIZE);
705 return 0;
708 __tagtable(ATAG_CMDLINE, parse_tag_cmdline);
711 * Scan the tag table for this tag, and call its parse function.
712 * The tag table is built by the linker from all the __tagtable
713 * declarations.
715 static int __init parse_tag(const struct tag *tag)
717 extern struct tagtable __tagtable_begin, __tagtable_end;
718 struct tagtable *t;
720 for (t = &__tagtable_begin; t < &__tagtable_end; t++)
721 if (tag->hdr.tag == t->tag) {
722 t->parse(tag);
723 break;
726 return t < &__tagtable_end;
730 * Parse all tags in the list, checking both the global and architecture
731 * specific tag tables.
733 static void __init parse_tags(const struct tag *t)
735 for (; t->hdr.size; t = tag_next(t))
736 if (!parse_tag(t))
737 printk(KERN_WARNING
738 "Ignoring unrecognised tag 0x%08x\n",
739 t->hdr.tag);
743 * This holds our defaults.
745 static struct init_tags {
746 struct tag_header hdr1;
747 struct tag_core core;
748 struct tag_header hdr2;
749 struct tag_mem32 mem;
750 struct tag_header hdr3;
751 } init_tags __initdata = {
752 { tag_size(tag_core), ATAG_CORE },
753 { 1, PAGE_SIZE, 0xff },
754 { tag_size(tag_mem32), ATAG_MEM },
755 { MEM_SIZE, PHYS_OFFSET },
756 { 0, ATAG_NONE }
759 static void (*init_machine)(void) __initdata;
761 static int __init customize_machine(void)
763 /* customizes platform devices, or adds new ones */
764 if (init_machine)
765 init_machine();
766 return 0;
768 arch_initcall(customize_machine);
770 void __init setup_arch(char **cmdline_p)
772 struct tag *tags = (struct tag *)&init_tags;
773 struct machine_desc *mdesc;
774 char *from = default_command_line;
776 setup_processor();
777 mdesc = setup_machine(machine_arch_type);
778 machine_name = mdesc->name;
780 if (mdesc->soft_reboot)
781 reboot_setup("s");
783 if (mdesc->boot_params)
784 tags = phys_to_virt(mdesc->boot_params);
787 * If we have the old style parameters, convert them to
788 * a tag list.
790 if (tags->hdr.tag != ATAG_CORE)
791 convert_to_tag_list(tags);
792 if (tags->hdr.tag != ATAG_CORE)
793 tags = (struct tag *)&init_tags;
795 if (mdesc->fixup)
796 mdesc->fixup(mdesc, tags, &from, &meminfo);
798 if (tags->hdr.tag == ATAG_CORE) {
799 if (meminfo.nr_banks != 0)
800 squash_mem_tags(tags);
801 parse_tags(tags);
804 init_mm.start_code = (unsigned long) &_text;
805 init_mm.end_code = (unsigned long) &_etext;
806 init_mm.end_data = (unsigned long) &_edata;
807 init_mm.brk = (unsigned long) &_end;
809 memcpy(boot_command_line, from, COMMAND_LINE_SIZE);
810 boot_command_line[COMMAND_LINE_SIZE-1] = '\0';
811 parse_cmdline(cmdline_p, from);
812 paging_init(&meminfo, mdesc);
813 request_standard_resources(&meminfo, mdesc);
815 #ifdef CONFIG_SMP
816 smp_init_cpus();
817 #endif
819 cpu_init();
822 * Set up various architecture-specific pointers
824 init_arch_irq = mdesc->init_irq;
825 system_timer = mdesc->timer;
826 init_machine = mdesc->init_machine;
828 #ifdef CONFIG_VT
829 #if defined(CONFIG_VGA_CONSOLE)
830 conswitchp = &vga_con;
831 #elif defined(CONFIG_DUMMY_CONSOLE)
832 conswitchp = &dummy_con;
833 #endif
834 #endif
838 static int __init topology_init(void)
840 int cpu;
842 for_each_possible_cpu(cpu)
843 register_cpu(&per_cpu(cpu_data, cpu).cpu, cpu);
845 return 0;
848 subsys_initcall(topology_init);
850 static const char *hwcap_str[] = {
851 "swp",
852 "half",
853 "thumb",
854 "26bit",
855 "fastmult",
856 "fpa",
857 "vfp",
858 "edsp",
859 "java",
860 "iwmmxt",
861 "crunch",
862 NULL
865 static void
866 c_show_cache(struct seq_file *m, const char *type, unsigned int cache)
868 unsigned int mult = 2 + (CACHE_M(cache) ? 1 : 0);
870 seq_printf(m, "%s size\t\t: %d\n"
871 "%s assoc\t\t: %d\n"
872 "%s line length\t: %d\n"
873 "%s sets\t\t: %d\n",
874 type, mult << (8 + CACHE_SIZE(cache)),
875 type, (mult << CACHE_ASSOC(cache)) >> 1,
876 type, 8 << CACHE_LINE(cache),
877 type, 1 << (6 + CACHE_SIZE(cache) - CACHE_ASSOC(cache) -
878 CACHE_LINE(cache)));
881 static int c_show(struct seq_file *m, void *v)
883 int i;
885 seq_printf(m, "Processor\t: %s rev %d (%s)\n",
886 cpu_name, (int)processor_id & 15, elf_platform);
888 #if defined(CONFIG_SMP)
889 for_each_online_cpu(i) {
891 * glibc reads /proc/cpuinfo to determine the number of
892 * online processors, looking for lines beginning with
893 * "processor". Give glibc what it expects.
895 seq_printf(m, "processor\t: %d\n", i);
896 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n\n",
897 per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
898 (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
900 #else /* CONFIG_SMP */
901 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
902 loops_per_jiffy / (500000/HZ),
903 (loops_per_jiffy / (5000/HZ)) % 100);
904 #endif
906 /* dump out the processor features */
907 seq_puts(m, "Features\t: ");
909 for (i = 0; hwcap_str[i]; i++)
910 if (elf_hwcap & (1 << i))
911 seq_printf(m, "%s ", hwcap_str[i]);
913 seq_printf(m, "\nCPU implementer\t: 0x%02x\n", processor_id >> 24);
914 seq_printf(m, "CPU architecture: %s\n", proc_arch[cpu_architecture()]);
916 if ((processor_id & 0x0008f000) == 0x00000000) {
917 /* pre-ARM7 */
918 seq_printf(m, "CPU part\t\t: %07x\n", processor_id >> 4);
919 } else {
920 if ((processor_id & 0x0008f000) == 0x00007000) {
921 /* ARM7 */
922 seq_printf(m, "CPU variant\t: 0x%02x\n",
923 (processor_id >> 16) & 127);
924 } else {
925 /* post-ARM7 */
926 seq_printf(m, "CPU variant\t: 0x%x\n",
927 (processor_id >> 20) & 15);
929 seq_printf(m, "CPU part\t: 0x%03x\n",
930 (processor_id >> 4) & 0xfff);
932 seq_printf(m, "CPU revision\t: %d\n", processor_id & 15);
935 unsigned int cache_info = read_cpuid(CPUID_CACHETYPE);
936 if (cache_info != processor_id) {
937 seq_printf(m, "Cache type\t: %s\n"
938 "Cache clean\t: %s\n"
939 "Cache lockdown\t: %s\n"
940 "Cache format\t: %s\n",
941 cache_types[CACHE_TYPE(cache_info)],
942 cache_clean[CACHE_TYPE(cache_info)],
943 cache_lockdown[CACHE_TYPE(cache_info)],
944 CACHE_S(cache_info) ? "Harvard" : "Unified");
946 if (CACHE_S(cache_info)) {
947 c_show_cache(m, "I", CACHE_ISIZE(cache_info));
948 c_show_cache(m, "D", CACHE_DSIZE(cache_info));
949 } else {
950 c_show_cache(m, "Cache", CACHE_ISIZE(cache_info));
955 seq_puts(m, "\n");
957 seq_printf(m, "Hardware\t: %s\n", machine_name);
958 seq_printf(m, "Revision\t: %04x\n", system_rev);
959 seq_printf(m, "Serial\t\t: %08x%08x\n",
960 system_serial_high, system_serial_low);
962 return 0;
965 static void *c_start(struct seq_file *m, loff_t *pos)
967 return *pos < 1 ? (void *)1 : NULL;
970 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
972 ++*pos;
973 return NULL;
976 static void c_stop(struct seq_file *m, void *v)
980 struct seq_operations cpuinfo_op = {
981 .start = c_start,
982 .next = c_next,
983 .stop = c_stop,
984 .show = c_show