[PATCH] 64bit resource: C99 changes for struct resource declarations
[linux-2.6/suspend2-2.6.18.git] / arch / arm / kernel / setup.c
blobd69412728854fb49030aa5fe1e10b4014a31dd35
1 /*
2 * linux/arch/arm/kernel/setup.c
4 * Copyright (C) 1995-2001 Russell King
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10 #include <linux/config.h>
11 #include <linux/module.h>
12 #include <linux/kernel.h>
13 #include <linux/stddef.h>
14 #include <linux/ioport.h>
15 #include <linux/delay.h>
16 #include <linux/utsname.h>
17 #include <linux/initrd.h>
18 #include <linux/console.h>
19 #include <linux/bootmem.h>
20 #include <linux/seq_file.h>
21 #include <linux/tty.h>
22 #include <linux/init.h>
23 #include <linux/root_dev.h>
24 #include <linux/cpu.h>
25 #include <linux/interrupt.h>
26 #include <linux/smp.h>
28 #include <asm/cpu.h>
29 #include <asm/elf.h>
30 #include <asm/procinfo.h>
31 #include <asm/setup.h>
32 #include <asm/mach-types.h>
33 #include <asm/cacheflush.h>
34 #include <asm/tlbflush.h>
36 #include <asm/mach/arch.h>
37 #include <asm/mach/irq.h>
38 #include <asm/mach/time.h>
40 #include "compat.h"
42 #ifndef MEM_SIZE
43 #define MEM_SIZE (16*1024*1024)
44 #endif
46 #if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
47 char fpe_type[8];
49 static int __init fpe_setup(char *line)
51 memcpy(fpe_type, line, 8);
52 return 1;
55 __setup("fpe=", fpe_setup);
56 #endif
58 extern void paging_init(struct meminfo *, struct machine_desc *desc);
59 extern void reboot_setup(char *str);
60 extern int root_mountflags;
61 extern void _stext, _text, _etext, __data_start, _edata, _end;
63 unsigned int processor_id;
64 unsigned int __machine_arch_type;
65 EXPORT_SYMBOL(__machine_arch_type);
67 unsigned int system_rev;
68 EXPORT_SYMBOL(system_rev);
70 unsigned int system_serial_low;
71 EXPORT_SYMBOL(system_serial_low);
73 unsigned int system_serial_high;
74 EXPORT_SYMBOL(system_serial_high);
76 unsigned int elf_hwcap;
77 EXPORT_SYMBOL(elf_hwcap);
80 #ifdef MULTI_CPU
81 struct processor processor;
82 #endif
83 #ifdef MULTI_TLB
84 struct cpu_tlb_fns cpu_tlb;
85 #endif
86 #ifdef MULTI_USER
87 struct cpu_user_fns cpu_user;
88 #endif
89 #ifdef MULTI_CACHE
90 struct cpu_cache_fns cpu_cache;
91 #endif
93 struct stack {
94 u32 irq[3];
95 u32 abt[3];
96 u32 und[3];
97 } ____cacheline_aligned;
99 static struct stack stacks[NR_CPUS];
101 char elf_platform[ELF_PLATFORM_SIZE];
102 EXPORT_SYMBOL(elf_platform);
104 unsigned long phys_initrd_start __initdata = 0;
105 unsigned long phys_initrd_size __initdata = 0;
107 static struct meminfo meminfo __initdata = { 0, };
108 static const char *cpu_name;
109 static const char *machine_name;
110 static char command_line[COMMAND_LINE_SIZE];
112 static char default_command_line[COMMAND_LINE_SIZE] __initdata = CONFIG_CMDLINE;
113 static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
114 #define ENDIANNESS ((char)endian_test.l)
116 DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data);
119 * Standard memory resources
121 static struct resource mem_res[] = {
123 .name = "Video RAM",
124 .start = 0,
125 .end = 0,
126 .flags = IORESOURCE_MEM
129 .name = "Kernel text",
130 .start = 0,
131 .end = 0,
132 .flags = IORESOURCE_MEM
135 .name = "Kernel data",
136 .start = 0,
137 .end = 0,
138 .flags = IORESOURCE_MEM
142 #define video_ram mem_res[0]
143 #define kernel_code mem_res[1]
144 #define kernel_data mem_res[2]
146 static struct resource io_res[] = {
148 .name = "reserved",
149 .start = 0x3bc,
150 .end = 0x3be,
151 .flags = IORESOURCE_IO | IORESOURCE_BUSY
154 .name = "reserved",
155 .start = 0x378,
156 .end = 0x37f,
157 .flags = IORESOURCE_IO | IORESOURCE_BUSY
160 .name = "reserved",
161 .start = 0x278,
162 .end = 0x27f,
163 .flags = IORESOURCE_IO | IORESOURCE_BUSY
167 #define lp0 io_res[0]
168 #define lp1 io_res[1]
169 #define lp2 io_res[2]
171 static const char *cache_types[16] = {
172 "write-through",
173 "write-back",
174 "write-back",
175 "undefined 3",
176 "undefined 4",
177 "undefined 5",
178 "write-back",
179 "write-back",
180 "undefined 8",
181 "undefined 9",
182 "undefined 10",
183 "undefined 11",
184 "undefined 12",
185 "undefined 13",
186 "write-back",
187 "undefined 15",
190 static const char *cache_clean[16] = {
191 "not required",
192 "read-block",
193 "cp15 c7 ops",
194 "undefined 3",
195 "undefined 4",
196 "undefined 5",
197 "cp15 c7 ops",
198 "cp15 c7 ops",
199 "undefined 8",
200 "undefined 9",
201 "undefined 10",
202 "undefined 11",
203 "undefined 12",
204 "undefined 13",
205 "cp15 c7 ops",
206 "undefined 15",
209 static const char *cache_lockdown[16] = {
210 "not supported",
211 "not supported",
212 "not supported",
213 "undefined 3",
214 "undefined 4",
215 "undefined 5",
216 "format A",
217 "format B",
218 "undefined 8",
219 "undefined 9",
220 "undefined 10",
221 "undefined 11",
222 "undefined 12",
223 "undefined 13",
224 "format C",
225 "undefined 15",
228 static const char *proc_arch[] = {
229 "undefined/unknown",
230 "3",
231 "4",
232 "4T",
233 "5",
234 "5T",
235 "5TE",
236 "5TEJ",
237 "6TEJ",
238 "7",
239 "?(11)",
240 "?(12)",
241 "?(13)",
242 "?(14)",
243 "?(15)",
244 "?(16)",
245 "?(17)",
248 #define CACHE_TYPE(x) (((x) >> 25) & 15)
249 #define CACHE_S(x) ((x) & (1 << 24))
250 #define CACHE_DSIZE(x) (((x) >> 12) & 4095) /* only if S=1 */
251 #define CACHE_ISIZE(x) ((x) & 4095)
253 #define CACHE_SIZE(y) (((y) >> 6) & 7)
254 #define CACHE_ASSOC(y) (((y) >> 3) & 7)
255 #define CACHE_M(y) ((y) & (1 << 2))
256 #define CACHE_LINE(y) ((y) & 3)
258 static inline void dump_cache(const char *prefix, int cpu, unsigned int cache)
260 unsigned int mult = 2 + (CACHE_M(cache) ? 1 : 0);
262 printk("CPU%u: %s: %d bytes, associativity %d, %d byte lines, %d sets\n",
263 cpu, prefix,
264 mult << (8 + CACHE_SIZE(cache)),
265 (mult << CACHE_ASSOC(cache)) >> 1,
266 8 << CACHE_LINE(cache),
267 1 << (6 + CACHE_SIZE(cache) - CACHE_ASSOC(cache) -
268 CACHE_LINE(cache)));
271 static void __init dump_cpu_info(int cpu)
273 unsigned int info = read_cpuid(CPUID_CACHETYPE);
275 if (info != processor_id) {
276 printk("CPU%u: D %s %s cache\n", cpu, cache_is_vivt() ? "VIVT" : "VIPT",
277 cache_types[CACHE_TYPE(info)]);
278 if (CACHE_S(info)) {
279 dump_cache("I cache", cpu, CACHE_ISIZE(info));
280 dump_cache("D cache", cpu, CACHE_DSIZE(info));
281 } else {
282 dump_cache("cache", cpu, CACHE_ISIZE(info));
286 if (arch_is_coherent())
287 printk("Cache coherency enabled\n");
290 int cpu_architecture(void)
292 int cpu_arch;
294 if ((processor_id & 0x0008f000) == 0) {
295 cpu_arch = CPU_ARCH_UNKNOWN;
296 } else if ((processor_id & 0x0008f000) == 0x00007000) {
297 cpu_arch = (processor_id & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
298 } else if ((processor_id & 0x00080000) == 0x00000000) {
299 cpu_arch = (processor_id >> 16) & 7;
300 if (cpu_arch)
301 cpu_arch += CPU_ARCH_ARMv3;
302 } else {
303 /* the revised CPUID */
304 cpu_arch = ((processor_id >> 12) & 0xf) - 0xb + CPU_ARCH_ARMv6;
307 return cpu_arch;
311 * These functions re-use the assembly code in head.S, which
312 * already provide the required functionality.
314 extern struct proc_info_list *lookup_processor_type(unsigned int);
315 extern struct machine_desc *lookup_machine_type(unsigned int);
317 static void __init setup_processor(void)
319 struct proc_info_list *list;
322 * locate processor in the list of supported processor
323 * types. The linker builds this table for us from the
324 * entries in arch/arm/mm/proc-*.S
326 list = lookup_processor_type(processor_id);
327 if (!list) {
328 printk("CPU configuration botched (ID %08x), unable "
329 "to continue.\n", processor_id);
330 while (1);
333 cpu_name = list->cpu_name;
335 #ifdef MULTI_CPU
336 processor = *list->proc;
337 #endif
338 #ifdef MULTI_TLB
339 cpu_tlb = *list->tlb;
340 #endif
341 #ifdef MULTI_USER
342 cpu_user = *list->user;
343 #endif
344 #ifdef MULTI_CACHE
345 cpu_cache = *list->cache;
346 #endif
348 printk("CPU: %s [%08x] revision %d (ARMv%s)\n",
349 cpu_name, processor_id, (int)processor_id & 15,
350 proc_arch[cpu_architecture()]);
352 sprintf(system_utsname.machine, "%s%c", list->arch_name, ENDIANNESS);
353 sprintf(elf_platform, "%s%c", list->elf_name, ENDIANNESS);
354 elf_hwcap = list->elf_hwcap;
355 #ifndef CONFIG_ARM_THUMB
356 elf_hwcap &= ~HWCAP_THUMB;
357 #endif
358 #ifndef CONFIG_VFP
359 elf_hwcap &= ~HWCAP_VFP;
360 #endif
362 cpu_proc_init();
366 * cpu_init - initialise one CPU.
368 * cpu_init dumps the cache information, initialises SMP specific
369 * information, and sets up the per-CPU stacks.
371 void cpu_init(void)
373 unsigned int cpu = smp_processor_id();
374 struct stack *stk = &stacks[cpu];
376 if (cpu >= NR_CPUS) {
377 printk(KERN_CRIT "CPU%u: bad primary CPU number\n", cpu);
378 BUG();
381 if (system_state == SYSTEM_BOOTING)
382 dump_cpu_info(cpu);
385 * setup stacks for re-entrant exception handlers
387 __asm__ (
388 "msr cpsr_c, %1\n\t"
389 "add sp, %0, %2\n\t"
390 "msr cpsr_c, %3\n\t"
391 "add sp, %0, %4\n\t"
392 "msr cpsr_c, %5\n\t"
393 "add sp, %0, %6\n\t"
394 "msr cpsr_c, %7"
396 : "r" (stk),
397 "I" (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
398 "I" (offsetof(struct stack, irq[0])),
399 "I" (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
400 "I" (offsetof(struct stack, abt[0])),
401 "I" (PSR_F_BIT | PSR_I_BIT | UND_MODE),
402 "I" (offsetof(struct stack, und[0])),
403 "I" (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
404 : "r14");
407 static struct machine_desc * __init setup_machine(unsigned int nr)
409 struct machine_desc *list;
412 * locate machine in the list of supported machines.
414 list = lookup_machine_type(nr);
415 if (!list) {
416 printk("Machine configuration botched (nr %d), unable "
417 "to continue.\n", nr);
418 while (1);
421 printk("Machine: %s\n", list->name);
423 return list;
426 static void __init early_initrd(char **p)
428 unsigned long start, size;
430 start = memparse(*p, p);
431 if (**p == ',') {
432 size = memparse((*p) + 1, p);
434 phys_initrd_start = start;
435 phys_initrd_size = size;
438 __early_param("initrd=", early_initrd);
440 static void __init arm_add_memory(unsigned long start, unsigned long size)
443 * Ensure that start/size are aligned to a page boundary.
444 * Size is appropriately rounded down, start is rounded up.
446 size -= start & ~PAGE_MASK;
448 meminfo.bank[meminfo.nr_banks].start = PAGE_ALIGN(start);
449 meminfo.bank[meminfo.nr_banks].size = size & PAGE_MASK;
450 meminfo.bank[meminfo.nr_banks].node = PHYS_TO_NID(start);
451 meminfo.nr_banks += 1;
455 * Pick out the memory size. We look for mem=size@start,
456 * where start and size are "size[KkMm]"
458 static void __init early_mem(char **p)
460 static int usermem __initdata = 0;
461 unsigned long size, start;
464 * If the user specifies memory size, we
465 * blow away any automatically generated
466 * size.
468 if (usermem == 0) {
469 usermem = 1;
470 meminfo.nr_banks = 0;
473 start = PHYS_OFFSET;
474 size = memparse(*p, p);
475 if (**p == '@')
476 start = memparse(*p + 1, p);
478 arm_add_memory(start, size);
480 __early_param("mem=", early_mem);
483 * Initial parsing of the command line.
485 static void __init parse_cmdline(char **cmdline_p, char *from)
487 char c = ' ', *to = command_line;
488 int len = 0;
490 for (;;) {
491 if (c == ' ') {
492 extern struct early_params __early_begin, __early_end;
493 struct early_params *p;
495 for (p = &__early_begin; p < &__early_end; p++) {
496 int len = strlen(p->arg);
498 if (memcmp(from, p->arg, len) == 0) {
499 if (to != command_line)
500 to -= 1;
501 from += len;
502 p->fn(&from);
504 while (*from != ' ' && *from != '\0')
505 from++;
506 break;
510 c = *from++;
511 if (!c)
512 break;
513 if (COMMAND_LINE_SIZE <= ++len)
514 break;
515 *to++ = c;
517 *to = '\0';
518 *cmdline_p = command_line;
521 static void __init
522 setup_ramdisk(int doload, int prompt, int image_start, unsigned int rd_sz)
524 #ifdef CONFIG_BLK_DEV_RAM
525 extern int rd_size, rd_image_start, rd_prompt, rd_doload;
527 rd_image_start = image_start;
528 rd_prompt = prompt;
529 rd_doload = doload;
531 if (rd_sz)
532 rd_size = rd_sz;
533 #endif
536 static void __init
537 request_standard_resources(struct meminfo *mi, struct machine_desc *mdesc)
539 struct resource *res;
540 int i;
542 kernel_code.start = virt_to_phys(&_text);
543 kernel_code.end = virt_to_phys(&_etext - 1);
544 kernel_data.start = virt_to_phys(&__data_start);
545 kernel_data.end = virt_to_phys(&_end - 1);
547 for (i = 0; i < mi->nr_banks; i++) {
548 unsigned long virt_start, virt_end;
550 if (mi->bank[i].size == 0)
551 continue;
553 virt_start = __phys_to_virt(mi->bank[i].start);
554 virt_end = virt_start + mi->bank[i].size - 1;
556 res = alloc_bootmem_low(sizeof(*res));
557 res->name = "System RAM";
558 res->start = __virt_to_phys(virt_start);
559 res->end = __virt_to_phys(virt_end);
560 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
562 request_resource(&iomem_resource, res);
564 if (kernel_code.start >= res->start &&
565 kernel_code.end <= res->end)
566 request_resource(res, &kernel_code);
567 if (kernel_data.start >= res->start &&
568 kernel_data.end <= res->end)
569 request_resource(res, &kernel_data);
572 if (mdesc->video_start) {
573 video_ram.start = mdesc->video_start;
574 video_ram.end = mdesc->video_end;
575 request_resource(&iomem_resource, &video_ram);
579 * Some machines don't have the possibility of ever
580 * possessing lp0, lp1 or lp2
582 if (mdesc->reserve_lp0)
583 request_resource(&ioport_resource, &lp0);
584 if (mdesc->reserve_lp1)
585 request_resource(&ioport_resource, &lp1);
586 if (mdesc->reserve_lp2)
587 request_resource(&ioport_resource, &lp2);
591 * Tag parsing.
593 * This is the new way of passing data to the kernel at boot time. Rather
594 * than passing a fixed inflexible structure to the kernel, we pass a list
595 * of variable-sized tags to the kernel. The first tag must be a ATAG_CORE
596 * tag for the list to be recognised (to distinguish the tagged list from
597 * a param_struct). The list is terminated with a zero-length tag (this tag
598 * is not parsed in any way).
600 static int __init parse_tag_core(const struct tag *tag)
602 if (tag->hdr.size > 2) {
603 if ((tag->u.core.flags & 1) == 0)
604 root_mountflags &= ~MS_RDONLY;
605 ROOT_DEV = old_decode_dev(tag->u.core.rootdev);
607 return 0;
610 __tagtable(ATAG_CORE, parse_tag_core);
612 static int __init parse_tag_mem32(const struct tag *tag)
614 if (meminfo.nr_banks >= NR_BANKS) {
615 printk(KERN_WARNING
616 "Ignoring memory bank 0x%08x size %dKB\n",
617 tag->u.mem.start, tag->u.mem.size / 1024);
618 return -EINVAL;
620 arm_add_memory(tag->u.mem.start, tag->u.mem.size);
621 return 0;
624 __tagtable(ATAG_MEM, parse_tag_mem32);
626 #if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
627 struct screen_info screen_info = {
628 .orig_video_lines = 30,
629 .orig_video_cols = 80,
630 .orig_video_mode = 0,
631 .orig_video_ega_bx = 0,
632 .orig_video_isVGA = 1,
633 .orig_video_points = 8
636 static int __init parse_tag_videotext(const struct tag *tag)
638 screen_info.orig_x = tag->u.videotext.x;
639 screen_info.orig_y = tag->u.videotext.y;
640 screen_info.orig_video_page = tag->u.videotext.video_page;
641 screen_info.orig_video_mode = tag->u.videotext.video_mode;
642 screen_info.orig_video_cols = tag->u.videotext.video_cols;
643 screen_info.orig_video_ega_bx = tag->u.videotext.video_ega_bx;
644 screen_info.orig_video_lines = tag->u.videotext.video_lines;
645 screen_info.orig_video_isVGA = tag->u.videotext.video_isvga;
646 screen_info.orig_video_points = tag->u.videotext.video_points;
647 return 0;
650 __tagtable(ATAG_VIDEOTEXT, parse_tag_videotext);
651 #endif
653 static int __init parse_tag_ramdisk(const struct tag *tag)
655 setup_ramdisk((tag->u.ramdisk.flags & 1) == 0,
656 (tag->u.ramdisk.flags & 2) == 0,
657 tag->u.ramdisk.start, tag->u.ramdisk.size);
658 return 0;
661 __tagtable(ATAG_RAMDISK, parse_tag_ramdisk);
663 static int __init parse_tag_initrd(const struct tag *tag)
665 printk(KERN_WARNING "ATAG_INITRD is deprecated; "
666 "please update your bootloader.\n");
667 phys_initrd_start = __virt_to_phys(tag->u.initrd.start);
668 phys_initrd_size = tag->u.initrd.size;
669 return 0;
672 __tagtable(ATAG_INITRD, parse_tag_initrd);
674 static int __init parse_tag_initrd2(const struct tag *tag)
676 phys_initrd_start = tag->u.initrd.start;
677 phys_initrd_size = tag->u.initrd.size;
678 return 0;
681 __tagtable(ATAG_INITRD2, parse_tag_initrd2);
683 static int __init parse_tag_serialnr(const struct tag *tag)
685 system_serial_low = tag->u.serialnr.low;
686 system_serial_high = tag->u.serialnr.high;
687 return 0;
690 __tagtable(ATAG_SERIAL, parse_tag_serialnr);
692 static int __init parse_tag_revision(const struct tag *tag)
694 system_rev = tag->u.revision.rev;
695 return 0;
698 __tagtable(ATAG_REVISION, parse_tag_revision);
700 static int __init parse_tag_cmdline(const struct tag *tag)
702 strlcpy(default_command_line, tag->u.cmdline.cmdline, COMMAND_LINE_SIZE);
703 return 0;
706 __tagtable(ATAG_CMDLINE, parse_tag_cmdline);
709 * Scan the tag table for this tag, and call its parse function.
710 * The tag table is built by the linker from all the __tagtable
711 * declarations.
713 static int __init parse_tag(const struct tag *tag)
715 extern struct tagtable __tagtable_begin, __tagtable_end;
716 struct tagtable *t;
718 for (t = &__tagtable_begin; t < &__tagtable_end; t++)
719 if (tag->hdr.tag == t->tag) {
720 t->parse(tag);
721 break;
724 return t < &__tagtable_end;
728 * Parse all tags in the list, checking both the global and architecture
729 * specific tag tables.
731 static void __init parse_tags(const struct tag *t)
733 for (; t->hdr.size; t = tag_next(t))
734 if (!parse_tag(t))
735 printk(KERN_WARNING
736 "Ignoring unrecognised tag 0x%08x\n",
737 t->hdr.tag);
741 * This holds our defaults.
743 static struct init_tags {
744 struct tag_header hdr1;
745 struct tag_core core;
746 struct tag_header hdr2;
747 struct tag_mem32 mem;
748 struct tag_header hdr3;
749 } init_tags __initdata = {
750 { tag_size(tag_core), ATAG_CORE },
751 { 1, PAGE_SIZE, 0xff },
752 { tag_size(tag_mem32), ATAG_MEM },
753 { MEM_SIZE, PHYS_OFFSET },
754 { 0, ATAG_NONE }
757 static void (*init_machine)(void) __initdata;
759 static int __init customize_machine(void)
761 /* customizes platform devices, or adds new ones */
762 if (init_machine)
763 init_machine();
764 return 0;
766 arch_initcall(customize_machine);
768 void __init setup_arch(char **cmdline_p)
770 struct tag *tags = (struct tag *)&init_tags;
771 struct machine_desc *mdesc;
772 char *from = default_command_line;
774 setup_processor();
775 mdesc = setup_machine(machine_arch_type);
776 machine_name = mdesc->name;
778 if (mdesc->soft_reboot)
779 reboot_setup("s");
781 if (mdesc->boot_params)
782 tags = phys_to_virt(mdesc->boot_params);
785 * If we have the old style parameters, convert them to
786 * a tag list.
788 if (tags->hdr.tag != ATAG_CORE)
789 convert_to_tag_list(tags);
790 if (tags->hdr.tag != ATAG_CORE)
791 tags = (struct tag *)&init_tags;
793 if (mdesc->fixup)
794 mdesc->fixup(mdesc, tags, &from, &meminfo);
796 if (tags->hdr.tag == ATAG_CORE) {
797 if (meminfo.nr_banks != 0)
798 squash_mem_tags(tags);
799 parse_tags(tags);
802 init_mm.start_code = (unsigned long) &_text;
803 init_mm.end_code = (unsigned long) &_etext;
804 init_mm.end_data = (unsigned long) &_edata;
805 init_mm.brk = (unsigned long) &_end;
807 memcpy(saved_command_line, from, COMMAND_LINE_SIZE);
808 saved_command_line[COMMAND_LINE_SIZE-1] = '\0';
809 parse_cmdline(cmdline_p, from);
810 paging_init(&meminfo, mdesc);
811 request_standard_resources(&meminfo, mdesc);
813 #ifdef CONFIG_SMP
814 smp_init_cpus();
815 #endif
817 cpu_init();
820 * Set up various architecture-specific pointers
822 init_arch_irq = mdesc->init_irq;
823 system_timer = mdesc->timer;
824 init_machine = mdesc->init_machine;
826 #ifdef CONFIG_VT
827 #if defined(CONFIG_VGA_CONSOLE)
828 conswitchp = &vga_con;
829 #elif defined(CONFIG_DUMMY_CONSOLE)
830 conswitchp = &dummy_con;
831 #endif
832 #endif
836 static int __init topology_init(void)
838 int cpu;
840 for_each_possible_cpu(cpu)
841 register_cpu(&per_cpu(cpu_data, cpu).cpu, cpu, NULL);
843 return 0;
846 subsys_initcall(topology_init);
848 static const char *hwcap_str[] = {
849 "swp",
850 "half",
851 "thumb",
852 "26bit",
853 "fastmult",
854 "fpa",
855 "vfp",
856 "edsp",
857 "java",
858 NULL
861 static void
862 c_show_cache(struct seq_file *m, const char *type, unsigned int cache)
864 unsigned int mult = 2 + (CACHE_M(cache) ? 1 : 0);
866 seq_printf(m, "%s size\t\t: %d\n"
867 "%s assoc\t\t: %d\n"
868 "%s line length\t: %d\n"
869 "%s sets\t\t: %d\n",
870 type, mult << (8 + CACHE_SIZE(cache)),
871 type, (mult << CACHE_ASSOC(cache)) >> 1,
872 type, 8 << CACHE_LINE(cache),
873 type, 1 << (6 + CACHE_SIZE(cache) - CACHE_ASSOC(cache) -
874 CACHE_LINE(cache)));
877 static int c_show(struct seq_file *m, void *v)
879 int i;
881 seq_printf(m, "Processor\t: %s rev %d (%s)\n",
882 cpu_name, (int)processor_id & 15, elf_platform);
884 #if defined(CONFIG_SMP)
885 for_each_online_cpu(i) {
887 * glibc reads /proc/cpuinfo to determine the number of
888 * online processors, looking for lines beginning with
889 * "processor". Give glibc what it expects.
891 seq_printf(m, "processor\t: %d\n", i);
892 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n\n",
893 per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
894 (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
896 #else /* CONFIG_SMP */
897 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
898 loops_per_jiffy / (500000/HZ),
899 (loops_per_jiffy / (5000/HZ)) % 100);
900 #endif
902 /* dump out the processor features */
903 seq_puts(m, "Features\t: ");
905 for (i = 0; hwcap_str[i]; i++)
906 if (elf_hwcap & (1 << i))
907 seq_printf(m, "%s ", hwcap_str[i]);
909 seq_printf(m, "\nCPU implementer\t: 0x%02x\n", processor_id >> 24);
910 seq_printf(m, "CPU architecture: %s\n", proc_arch[cpu_architecture()]);
912 if ((processor_id & 0x0008f000) == 0x00000000) {
913 /* pre-ARM7 */
914 seq_printf(m, "CPU part\t\t: %07x\n", processor_id >> 4);
915 } else {
916 if ((processor_id & 0x0008f000) == 0x00007000) {
917 /* ARM7 */
918 seq_printf(m, "CPU variant\t: 0x%02x\n",
919 (processor_id >> 16) & 127);
920 } else {
921 /* post-ARM7 */
922 seq_printf(m, "CPU variant\t: 0x%x\n",
923 (processor_id >> 20) & 15);
925 seq_printf(m, "CPU part\t: 0x%03x\n",
926 (processor_id >> 4) & 0xfff);
928 seq_printf(m, "CPU revision\t: %d\n", processor_id & 15);
931 unsigned int cache_info = read_cpuid(CPUID_CACHETYPE);
932 if (cache_info != processor_id) {
933 seq_printf(m, "Cache type\t: %s\n"
934 "Cache clean\t: %s\n"
935 "Cache lockdown\t: %s\n"
936 "Cache format\t: %s\n",
937 cache_types[CACHE_TYPE(cache_info)],
938 cache_clean[CACHE_TYPE(cache_info)],
939 cache_lockdown[CACHE_TYPE(cache_info)],
940 CACHE_S(cache_info) ? "Harvard" : "Unified");
942 if (CACHE_S(cache_info)) {
943 c_show_cache(m, "I", CACHE_ISIZE(cache_info));
944 c_show_cache(m, "D", CACHE_DSIZE(cache_info));
945 } else {
946 c_show_cache(m, "Cache", CACHE_ISIZE(cache_info));
951 seq_puts(m, "\n");
953 seq_printf(m, "Hardware\t: %s\n", machine_name);
954 seq_printf(m, "Revision\t: %04x\n", system_rev);
955 seq_printf(m, "Serial\t\t: %08x%08x\n",
956 system_serial_high, system_serial_low);
958 return 0;
961 static void *c_start(struct seq_file *m, loff_t *pos)
963 return *pos < 1 ? (void *)1 : NULL;
966 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
968 ++*pos;
969 return NULL;
972 static void c_stop(struct seq_file *m, void *v)
976 struct seq_operations cpuinfo_op = {
977 .start = c_start,
978 .next = c_next,
979 .stop = c_stop,
980 .show = c_show