MOXA linux-2.6.x / linux-2.6.19-uc1 from UC-7110-LX-BOOTLOADER-1.9_VERSION-4.2.tgz
[linux-2.6.19-moxart.git] / arch / arm / kernel / setup.c
blob05e4a490fbeea430e98999af4254c56d5820f9d5
1 /*
2 * linux/arch/arm/kernel/setup.c
4 * Copyright (C) 1995-2001 Russell King
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10 #include <linux/module.h>
11 #include <linux/kernel.h>
12 #include <linux/stddef.h>
13 #include <linux/ioport.h>
14 #include <linux/delay.h>
15 #include <linux/utsname.h>
16 #include <linux/initrd.h>
17 #include <linux/console.h>
18 #include <linux/bootmem.h>
19 #include <linux/seq_file.h>
20 #include <linux/screen_info.h>
21 #include <linux/init.h>
22 #include <linux/root_dev.h>
23 #include <linux/cpu.h>
24 #include <linux/interrupt.h>
25 #include <linux/smp.h>
27 #include <asm/cpu.h>
28 #include <asm/elf.h>
29 #include <asm/procinfo.h>
30 #include <asm/setup.h>
31 #include <asm/mach-types.h>
32 #include <asm/cacheflush.h>
33 #include <asm/tlbflush.h>
35 #include <asm/mach/arch.h>
36 #include <asm/mach/irq.h>
37 #include <asm/mach/time.h>
39 #include "compat.h"
41 #ifndef MEM_SIZE
42 #define MEM_SIZE (16*1024*1024)
43 #endif
45 #if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
46 char fpe_type[8];
48 static int __init fpe_setup(char *line)
50 memcpy(fpe_type, line, 8);
51 return 1;
54 __setup("fpe=", fpe_setup);
55 #endif
57 extern void paging_init(struct meminfo *, struct machine_desc *desc);
58 extern void reboot_setup(char *str);
59 extern int root_mountflags;
60 extern void _stext, _text, _etext, __data_start, _edata, _end;
62 unsigned int processor_id;
63 unsigned int __machine_arch_type;
64 EXPORT_SYMBOL(__machine_arch_type);
66 unsigned int system_rev;
67 EXPORT_SYMBOL(system_rev);
69 unsigned int system_serial_low;
70 EXPORT_SYMBOL(system_serial_low);
72 unsigned int system_serial_high;
73 EXPORT_SYMBOL(system_serial_high);
75 unsigned int elf_hwcap;
76 EXPORT_SYMBOL(elf_hwcap);
79 #ifdef MULTI_CPU
80 struct processor processor;
81 #endif
82 #ifdef MULTI_TLB
83 struct cpu_tlb_fns cpu_tlb;
84 #endif
85 #ifdef MULTI_USER
86 struct cpu_user_fns cpu_user;
87 #endif
88 #ifdef MULTI_CACHE
89 struct cpu_cache_fns cpu_cache;
90 #endif
92 struct stack {
93 u32 irq[3];
94 u32 abt[3];
95 u32 und[3];
96 } ____cacheline_aligned;
98 static struct stack stacks[NR_CPUS];
100 char elf_platform[ELF_PLATFORM_SIZE];
101 EXPORT_SYMBOL(elf_platform);
103 unsigned long phys_initrd_start __initdata = 0;
104 unsigned long phys_initrd_size __initdata = 0;
106 static struct meminfo meminfo __initdata = { 0, };
107 static const char *cpu_name;
108 static const char *machine_name;
109 static char command_line[COMMAND_LINE_SIZE];
111 static char default_command_line[COMMAND_LINE_SIZE] __initdata = CONFIG_CMDLINE;
112 static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
113 #define ENDIANNESS ((char)endian_test.l)
115 DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data);
118 * Standard memory resources
120 static struct resource mem_res[] = {
122 .name = "Video RAM",
123 .start = 0,
124 .end = 0,
125 .flags = IORESOURCE_MEM
128 .name = "Kernel text",
129 .start = 0,
130 .end = 0,
131 .flags = IORESOURCE_MEM
134 .name = "Kernel data",
135 .start = 0,
136 .end = 0,
137 .flags = IORESOURCE_MEM
141 #define video_ram mem_res[0]
142 #define kernel_code mem_res[1]
143 #define kernel_data mem_res[2]
145 static struct resource io_res[] = {
147 .name = "reserved",
148 .start = 0x3bc,
149 .end = 0x3be,
150 .flags = IORESOURCE_IO | IORESOURCE_BUSY
153 .name = "reserved",
154 .start = 0x378,
155 .end = 0x37f,
156 .flags = IORESOURCE_IO | IORESOURCE_BUSY
159 .name = "reserved",
160 .start = 0x278,
161 .end = 0x27f,
162 .flags = IORESOURCE_IO | IORESOURCE_BUSY
166 #define lp0 io_res[0]
167 #define lp1 io_res[1]
168 #define lp2 io_res[2]
170 static const char *cache_types[16] = {
171 "write-through",
172 "write-back",
173 "write-back",
174 "undefined 3",
175 "undefined 4",
176 "undefined 5",
177 "write-back",
178 "write-back",
179 "undefined 8",
180 "undefined 9",
181 "undefined 10",
182 "undefined 11",
183 "undefined 12",
184 "undefined 13",
185 "write-back",
186 "undefined 15",
189 static const char *cache_clean[16] = {
190 "not required",
191 "read-block",
192 "cp15 c7 ops",
193 "undefined 3",
194 "undefined 4",
195 "undefined 5",
196 "cp15 c7 ops",
197 "cp15 c7 ops",
198 "undefined 8",
199 "undefined 9",
200 "undefined 10",
201 "undefined 11",
202 "undefined 12",
203 "undefined 13",
204 "cp15 c7 ops",
205 "undefined 15",
208 static const char *cache_lockdown[16] = {
209 "not supported",
210 "not supported",
211 "not supported",
212 "undefined 3",
213 "undefined 4",
214 "undefined 5",
215 "format A",
216 "format B",
217 "undefined 8",
218 "undefined 9",
219 "undefined 10",
220 "undefined 11",
221 "undefined 12",
222 "undefined 13",
223 "format C",
224 "undefined 15",
227 static const char *proc_arch[] = {
228 "undefined/unknown",
229 "3",
230 "4",
231 "4T",
232 "5",
233 "5T",
234 "5TE",
235 "5TEJ",
236 "6TEJ",
237 "7",
238 "?(11)",
239 "?(12)",
240 "?(13)",
241 "?(14)",
242 "?(15)",
243 "?(16)",
244 "?(17)",
247 #define CACHE_TYPE(x) (((x) >> 25) & 15)
248 #define CACHE_S(x) ((x) & (1 << 24))
249 #define CACHE_DSIZE(x) (((x) >> 12) & 4095) /* only if S=1 */
250 #define CACHE_ISIZE(x) ((x) & 4095)
252 #define CACHE_SIZE(y) (((y) >> 6) & 7)
253 #define CACHE_ASSOC(y) (((y) >> 3) & 7)
254 #define CACHE_M(y) ((y) & (1 << 2))
255 #define CACHE_LINE(y) ((y) & 3)
257 static inline void dump_cache(const char *prefix, int cpu, unsigned int cache)
259 unsigned int mult = 2 + (CACHE_M(cache) ? 1 : 0);
261 printk("CPU%u: %s: %d bytes, associativity %d, %d byte lines, %d sets\n",
262 cpu, prefix,
263 mult << (8 + CACHE_SIZE(cache)),
264 (mult << CACHE_ASSOC(cache)) >> 1,
265 8 << CACHE_LINE(cache),
266 1 << (6 + CACHE_SIZE(cache) - CACHE_ASSOC(cache) -
267 CACHE_LINE(cache)));
270 static void __init dump_cpu_info(int cpu)
272 unsigned int info = read_cpuid(CPUID_CACHETYPE);
274 if (info != processor_id) {
275 printk("CPU%u: D %s %s cache\n", cpu, cache_is_vivt() ? "VIVT" : "VIPT",
276 cache_types[CACHE_TYPE(info)]);
277 if (CACHE_S(info)) {
278 dump_cache("I cache", cpu, CACHE_ISIZE(info));
279 dump_cache("D cache", cpu, CACHE_DSIZE(info));
280 } else {
281 dump_cache("cache", cpu, CACHE_ISIZE(info));
285 if (arch_is_coherent())
286 printk("Cache coherency enabled\n");
289 int cpu_architecture(void)
291 int cpu_arch;
293 if ((processor_id & 0x0008f000) == 0) {
294 cpu_arch = CPU_ARCH_UNKNOWN;
295 } else if ((processor_id & 0x0008f000) == 0x00007000) {
296 cpu_arch = (processor_id & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
297 } else if ((processor_id & 0x00080000) == 0x00000000) {
298 cpu_arch = (processor_id >> 16) & 7;
299 if (cpu_arch)
300 cpu_arch += CPU_ARCH_ARMv3;
301 } else {
302 /* the revised CPUID */
303 cpu_arch = ((processor_id >> 12) & 0xf) - 0xb + CPU_ARCH_ARMv6;
306 return cpu_arch;
310 * These functions re-use the assembly code in head.S, which
311 * already provide the required functionality.
313 extern struct proc_info_list *lookup_processor_type(unsigned int);
314 extern struct machine_desc *lookup_machine_type(unsigned int);
316 static void __init setup_processor(void)
318 struct proc_info_list *list;
321 * locate processor in the list of supported processor
322 * types. The linker builds this table for us from the
323 * entries in arch/arm/mm/proc-*.S
325 list = lookup_processor_type(processor_id);
326 if (!list) {
327 printk("CPU configuration botched (ID %08x), unable "
328 "to continue.\n", processor_id);
329 while (1);
332 cpu_name = list->cpu_name;
334 #ifdef MULTI_CPU
335 processor = *list->proc;
336 #endif
337 #ifdef MULTI_TLB
338 cpu_tlb = *list->tlb;
339 #endif
340 #ifdef MULTI_USER
341 cpu_user = *list->user;
342 #endif
343 #ifdef MULTI_CACHE
344 cpu_cache = *list->cache;
345 #endif
347 printk("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
348 cpu_name, processor_id, (int)processor_id & 15,
349 proc_arch[cpu_architecture()], cr_alignment);
351 sprintf(init_utsname()->machine, "%s%c", list->arch_name, ENDIANNESS);
352 sprintf(elf_platform, "%s%c", list->elf_name, ENDIANNESS);
353 elf_hwcap = list->elf_hwcap;
354 #ifndef CONFIG_ARM_THUMB
355 elf_hwcap &= ~HWCAP_THUMB;
356 #endif
357 #ifndef CONFIG_VFP
358 elf_hwcap &= ~HWCAP_VFP;
359 #endif
360 #ifndef CONFIG_IWMMXT
361 elf_hwcap &= ~HWCAP_IWMMXT;
362 #endif
364 cpu_proc_init();
368 * cpu_init - initialise one CPU.
370 * cpu_init dumps the cache information, initialises SMP specific
371 * information, and sets up the per-CPU stacks.
373 void cpu_init(void)
375 unsigned int cpu = smp_processor_id();
376 struct stack *stk = &stacks[cpu];
378 if (cpu >= NR_CPUS) {
379 printk(KERN_CRIT "CPU%u: bad primary CPU number\n", cpu);
380 BUG();
383 if (system_state == SYSTEM_BOOTING)
384 dump_cpu_info(cpu);
387 * setup stacks for re-entrant exception handlers
389 __asm__ (
390 "msr cpsr_c, %1\n\t"
391 "add sp, %0, %2\n\t"
392 "msr cpsr_c, %3\n\t"
393 "add sp, %0, %4\n\t"
394 "msr cpsr_c, %5\n\t"
395 "add sp, %0, %6\n\t"
396 "msr cpsr_c, %7"
398 : "r" (stk),
399 "I" (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
400 "I" (offsetof(struct stack, irq[0])),
401 "I" (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
402 "I" (offsetof(struct stack, abt[0])),
403 "I" (PSR_F_BIT | PSR_I_BIT | UND_MODE),
404 "I" (offsetof(struct stack, und[0])),
405 "I" (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
406 : "r14");
409 static struct machine_desc * __init setup_machine(unsigned int nr)
411 struct machine_desc *list;
414 * locate machine in the list of supported machines.
416 list = lookup_machine_type(nr);
417 if (!list) {
418 printk("Machine configuration botched (nr %d), unable "
419 "to continue.\n", nr);
420 while (1);
423 printk("Machine: %s\n", list->name);
425 return list;
428 static void __init early_initrd(char **p)
430 unsigned long start, size;
432 start = memparse(*p, p);
433 if (**p == ',') {
434 size = memparse((*p) + 1, p);
436 phys_initrd_start = start;
437 phys_initrd_size = size;
440 __early_param("initrd=", early_initrd);
442 static void __init arm_add_memory(unsigned long start, unsigned long size)
445 * Ensure that start/size are aligned to a page boundary.
446 * Size is appropriately rounded down, start is rounded up.
448 size -= start & ~PAGE_MASK;
450 meminfo.bank[meminfo.nr_banks].start = PAGE_ALIGN(start);
451 meminfo.bank[meminfo.nr_banks].size = size & PAGE_MASK;
452 meminfo.bank[meminfo.nr_banks].node = PHYS_TO_NID(start);
453 meminfo.nr_banks += 1;
457 * Pick out the memory size. We look for mem=size@start,
458 * where start and size are "size[KkMm]"
460 static void __init early_mem(char **p)
462 static int usermem __initdata = 0;
463 unsigned long size, start;
466 * If the user specifies memory size, we
467 * blow away any automatically generated
468 * size.
470 if (usermem == 0) {
471 usermem = 1;
472 meminfo.nr_banks = 0;
475 start = PHYS_OFFSET;
476 size = memparse(*p, p);
477 if (**p == '@')
478 start = memparse(*p + 1, p);
480 arm_add_memory(start, size);
482 __early_param("mem=", early_mem);
485 * Initial parsing of the command line.
487 static void __init parse_cmdline(char **cmdline_p, char *from)
489 char c = ' ', *to = command_line;
490 int len = 0;
492 for (;;) {
493 if (c == ' ') {
494 extern struct early_params __early_begin, __early_end;
495 struct early_params *p;
497 for (p = &__early_begin; p < &__early_end; p++) {
498 int len = strlen(p->arg);
500 if (memcmp(from, p->arg, len) == 0) {
501 if (to != command_line)
502 to -= 1;
503 from += len;
504 p->fn(&from);
506 while (*from != ' ' && *from != '\0')
507 from++;
508 break;
512 c = *from++;
513 if (!c)
514 break;
515 if (COMMAND_LINE_SIZE <= ++len)
516 break;
517 *to++ = c;
519 *to = '\0';
520 *cmdline_p = command_line;
523 static void __init
524 setup_ramdisk(int doload, int prompt, int image_start, unsigned int rd_sz)
526 #ifdef CONFIG_BLK_DEV_RAM
527 extern int rd_size, rd_image_start, rd_prompt, rd_doload;
529 rd_image_start = image_start;
530 rd_prompt = prompt;
531 rd_doload = doload;
533 if (rd_sz)
534 rd_size = rd_sz;
535 #endif
538 static void __init
539 request_standard_resources(struct meminfo *mi, struct machine_desc *mdesc)
541 struct resource *res;
542 int i;
544 kernel_code.start = virt_to_phys(&_text);
545 kernel_code.end = virt_to_phys(&_etext - 1);
546 kernel_data.start = virt_to_phys(&__data_start);
547 kernel_data.end = virt_to_phys(&_end - 1);
549 for (i = 0; i < mi->nr_banks; i++) {
550 unsigned long virt_start, virt_end;
552 if (mi->bank[i].size == 0)
553 continue;
555 virt_start = __phys_to_virt(mi->bank[i].start);
556 virt_end = virt_start + mi->bank[i].size - 1;
558 res = alloc_bootmem_low(sizeof(*res));
559 res->name = "System RAM";
560 res->start = __virt_to_phys(virt_start);
561 res->end = __virt_to_phys(virt_end);
562 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
564 request_resource(&iomem_resource, res);
566 if (kernel_code.start >= res->start &&
567 kernel_code.end <= res->end)
568 request_resource(res, &kernel_code);
569 if (kernel_data.start >= res->start &&
570 kernel_data.end <= res->end)
571 request_resource(res, &kernel_data);
574 if (mdesc->video_start) {
575 video_ram.start = mdesc->video_start;
576 video_ram.end = mdesc->video_end;
577 request_resource(&iomem_resource, &video_ram);
581 * Some machines don't have the possibility of ever
582 * possessing lp0, lp1 or lp2
584 if (mdesc->reserve_lp0)
585 request_resource(&ioport_resource, &lp0);
586 if (mdesc->reserve_lp1)
587 request_resource(&ioport_resource, &lp1);
588 if (mdesc->reserve_lp2)
589 request_resource(&ioport_resource, &lp2);
593 * Tag parsing.
595 * This is the new way of passing data to the kernel at boot time. Rather
596 * than passing a fixed inflexible structure to the kernel, we pass a list
597 * of variable-sized tags to the kernel. The first tag must be a ATAG_CORE
598 * tag for the list to be recognised (to distinguish the tagged list from
599 * a param_struct). The list is terminated with a zero-length tag (this tag
600 * is not parsed in any way).
602 static int __init parse_tag_core(const struct tag *tag)
604 if (tag->hdr.size > 2) {
605 if ((tag->u.core.flags & 1) == 0)
606 root_mountflags &= ~MS_RDONLY;
607 ROOT_DEV = old_decode_dev(tag->u.core.rootdev);
609 return 0;
612 __tagtable(ATAG_CORE, parse_tag_core);
614 static int __init parse_tag_mem32(const struct tag *tag)
616 if (meminfo.nr_banks >= NR_BANKS) {
617 printk(KERN_WARNING
618 "Ignoring memory bank 0x%08x size %dKB\n",
619 tag->u.mem.start, tag->u.mem.size / 1024);
620 return -EINVAL;
622 arm_add_memory(tag->u.mem.start, tag->u.mem.size);
623 return 0;
626 __tagtable(ATAG_MEM, parse_tag_mem32);
628 #if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
629 struct screen_info screen_info = {
630 .orig_video_lines = 30,
631 .orig_video_cols = 80,
632 .orig_video_mode = 0,
633 .orig_video_ega_bx = 0,
634 .orig_video_isVGA = 1,
635 .orig_video_points = 8
638 static int __init parse_tag_videotext(const struct tag *tag)
640 screen_info.orig_x = tag->u.videotext.x;
641 screen_info.orig_y = tag->u.videotext.y;
642 screen_info.orig_video_page = tag->u.videotext.video_page;
643 screen_info.orig_video_mode = tag->u.videotext.video_mode;
644 screen_info.orig_video_cols = tag->u.videotext.video_cols;
645 screen_info.orig_video_ega_bx = tag->u.videotext.video_ega_bx;
646 screen_info.orig_video_lines = tag->u.videotext.video_lines;
647 screen_info.orig_video_isVGA = tag->u.videotext.video_isvga;
648 screen_info.orig_video_points = tag->u.videotext.video_points;
649 return 0;
652 __tagtable(ATAG_VIDEOTEXT, parse_tag_videotext);
653 #endif
655 static int __init parse_tag_ramdisk(const struct tag *tag)
657 setup_ramdisk((tag->u.ramdisk.flags & 1) == 0,
658 (tag->u.ramdisk.flags & 2) == 0,
659 tag->u.ramdisk.start, tag->u.ramdisk.size);
660 return 0;
663 __tagtable(ATAG_RAMDISK, parse_tag_ramdisk);
665 static int __init parse_tag_initrd(const struct tag *tag)
667 printk(KERN_WARNING "ATAG_INITRD is deprecated; "
668 "please update your bootloader.\n");
669 phys_initrd_start = __virt_to_phys(tag->u.initrd.start);
670 phys_initrd_size = tag->u.initrd.size;
671 return 0;
674 __tagtable(ATAG_INITRD, parse_tag_initrd);
676 static int __init parse_tag_initrd2(const struct tag *tag)
678 phys_initrd_start = tag->u.initrd.start;
679 phys_initrd_size = tag->u.initrd.size;
680 return 0;
683 __tagtable(ATAG_INITRD2, parse_tag_initrd2);
685 static int __init parse_tag_serialnr(const struct tag *tag)
687 system_serial_low = tag->u.serialnr.low;
688 system_serial_high = tag->u.serialnr.high;
689 return 0;
692 __tagtable(ATAG_SERIAL, parse_tag_serialnr);
694 static int __init parse_tag_revision(const struct tag *tag)
696 system_rev = tag->u.revision.rev;
697 return 0;
700 __tagtable(ATAG_REVISION, parse_tag_revision);
702 static int __init parse_tag_cmdline(const struct tag *tag)
704 #ifndef CONFIG_CMDLINE_FORCE
705 strlcpy(default_command_line, tag->u.cmdline.cmdline, COMMAND_LINE_SIZE);
706 #endif
707 return 0;
710 __tagtable(ATAG_CMDLINE, parse_tag_cmdline);
713 * Scan the tag table for this tag, and call its parse function.
714 * The tag table is built by the linker from all the __tagtable
715 * declarations.
717 static int __init parse_tag(const struct tag *tag)
719 extern struct tagtable __tagtable_begin, __tagtable_end;
720 struct tagtable *t;
722 for (t = &__tagtable_begin; t < &__tagtable_end; t++)
723 if (tag->hdr.tag == t->tag) {
724 t->parse(tag);
725 break;
728 return t < &__tagtable_end;
732 * Parse all tags in the list, checking both the global and architecture
733 * specific tag tables.
735 static void __init parse_tags(const struct tag *t)
737 for (; t->hdr.size; t = tag_next(t))
738 if (!parse_tag(t))
739 printk(KERN_WARNING
740 "Ignoring unrecognised tag 0x%08x\n",
741 t->hdr.tag);
745 * This holds our defaults.
747 static struct init_tags {
748 struct tag_header hdr1;
749 struct tag_core core;
750 struct tag_header hdr2;
751 struct tag_mem32 mem;
752 struct tag_header hdr3;
753 } init_tags __initdata = {
754 { tag_size(tag_core), ATAG_CORE },
755 { 1, PAGE_SIZE, 0xff },
756 { tag_size(tag_mem32), ATAG_MEM },
757 { MEM_SIZE, PHYS_OFFSET },
758 { 0, ATAG_NONE }
761 static void (*init_machine)(void) __initdata;
763 static int __init customize_machine(void)
765 /* customizes platform devices, or adds new ones */
766 if (init_machine)
767 init_machine();
768 return 0;
770 arch_initcall(customize_machine);
772 void __init setup_arch(char **cmdline_p)
774 struct tag *tags = (struct tag *)&init_tags;
775 struct machine_desc *mdesc;
776 char *from = default_command_line;
778 setup_processor();
779 mdesc = setup_machine(machine_arch_type);
780 machine_name = mdesc->name;
782 if (mdesc->soft_reboot)
783 reboot_setup("s");
785 if (mdesc->boot_params)
786 tags = phys_to_virt(mdesc->boot_params);
789 * If we have the old style parameters, convert them to
790 * a tag list.
792 if (tags->hdr.tag != ATAG_CORE)
793 convert_to_tag_list(tags);
794 if (tags->hdr.tag != ATAG_CORE)
795 tags = (struct tag *)&init_tags;
797 if (mdesc->fixup)
798 mdesc->fixup(mdesc, tags, &from, &meminfo);
800 if (tags->hdr.tag == ATAG_CORE) {
801 if (meminfo.nr_banks != 0)
802 squash_mem_tags(tags);
803 parse_tags(tags);
806 init_mm.start_code = (unsigned long) &_text;
807 init_mm.end_code = (unsigned long) &_etext;
808 init_mm.end_data = (unsigned long) &_edata;
809 init_mm.brk = (unsigned long) &_end;
811 memcpy(saved_command_line, from, COMMAND_LINE_SIZE);
812 saved_command_line[COMMAND_LINE_SIZE-1] = '\0';
813 parse_cmdline(cmdline_p, from);
814 paging_init(&meminfo, mdesc);
815 request_standard_resources(&meminfo, mdesc);
817 #ifdef CONFIG_SMP
818 smp_init_cpus();
819 #endif
821 cpu_init();
824 * Set up various architecture-specific pointers
826 init_arch_irq = mdesc->init_irq;
827 system_timer = mdesc->timer;
828 init_machine = mdesc->init_machine;
830 #ifdef CONFIG_VT
831 #if defined(CONFIG_VGA_CONSOLE)
832 conswitchp = &vga_con;
833 #elif defined(CONFIG_DUMMY_CONSOLE)
834 conswitchp = &dummy_con;
835 #endif
836 #endif
840 static int __init topology_init(void)
842 int cpu;
844 for_each_possible_cpu(cpu)
845 register_cpu(&per_cpu(cpu_data, cpu).cpu, cpu);
847 return 0;
850 subsys_initcall(topology_init);
852 static const char *hwcap_str[] = {
853 "swp",
854 "half",
855 "thumb",
856 "26bit",
857 "fastmult",
858 "fpa",
859 "vfp",
860 "edsp",
861 "java",
862 "iwmmxt",
863 NULL
866 static void
867 c_show_cache(struct seq_file *m, const char *type, unsigned int cache)
869 unsigned int mult = 2 + (CACHE_M(cache) ? 1 : 0);
871 seq_printf(m, "%s size\t\t: %d\n"
872 "%s assoc\t\t: %d\n"
873 "%s line length\t: %d\n"
874 "%s sets\t\t: %d\n",
875 type, mult << (8 + CACHE_SIZE(cache)),
876 type, (mult << CACHE_ASSOC(cache)) >> 1,
877 type, 8 << CACHE_LINE(cache),
878 type, 1 << (6 + CACHE_SIZE(cache) - CACHE_ASSOC(cache) -
879 CACHE_LINE(cache)));
882 static int c_show(struct seq_file *m, void *v)
884 int i;
886 seq_printf(m, "Processor\t: %s rev %d (%s)\n",
887 cpu_name, (int)processor_id & 15, elf_platform);
889 #if defined(CONFIG_SMP)
890 for_each_online_cpu(i) {
892 * glibc reads /proc/cpuinfo to determine the number of
893 * online processors, looking for lines beginning with
894 * "processor". Give glibc what it expects.
896 seq_printf(m, "processor\t: %d\n", i);
897 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n\n",
898 per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
899 (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
901 #else /* CONFIG_SMP */
902 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
903 loops_per_jiffy / (500000/HZ),
904 (loops_per_jiffy / (5000/HZ)) % 100);
905 #endif
907 /* dump out the processor features */
908 seq_puts(m, "Features\t: ");
910 for (i = 0; hwcap_str[i]; i++)
911 if (elf_hwcap & (1 << i))
912 seq_printf(m, "%s ", hwcap_str[i]);
914 seq_printf(m, "\nCPU implementer\t: 0x%02x\n", processor_id >> 24);
915 seq_printf(m, "CPU architecture: %s\n", proc_arch[cpu_architecture()]);
917 if ((processor_id & 0x0008f000) == 0x00000000) {
918 /* pre-ARM7 */
919 seq_printf(m, "CPU part\t\t: %07x\n", processor_id >> 4);
920 } else {
921 if ((processor_id & 0x0008f000) == 0x00007000) {
922 /* ARM7 */
923 seq_printf(m, "CPU variant\t: 0x%02x\n",
924 (processor_id >> 16) & 127);
925 } else {
926 /* post-ARM7 */
927 seq_printf(m, "CPU variant\t: 0x%x\n",
928 (processor_id >> 20) & 15);
930 seq_printf(m, "CPU part\t: 0x%03x\n",
931 (processor_id >> 4) & 0xfff);
933 seq_printf(m, "CPU revision\t: %d\n", processor_id & 15);
936 unsigned int cache_info = read_cpuid(CPUID_CACHETYPE);
937 if (cache_info != processor_id) {
938 seq_printf(m, "Cache type\t: %s\n"
939 "Cache clean\t: %s\n"
940 "Cache lockdown\t: %s\n"
941 "Cache format\t: %s\n",
942 cache_types[CACHE_TYPE(cache_info)],
943 cache_clean[CACHE_TYPE(cache_info)],
944 cache_lockdown[CACHE_TYPE(cache_info)],
945 CACHE_S(cache_info) ? "Harvard" : "Unified");
947 if (CACHE_S(cache_info)) {
948 c_show_cache(m, "I", CACHE_ISIZE(cache_info));
949 c_show_cache(m, "D", CACHE_DSIZE(cache_info));
950 } else {
951 c_show_cache(m, "Cache", CACHE_ISIZE(cache_info));
956 seq_puts(m, "\n");
958 seq_printf(m, "Hardware\t: %s\n", machine_name);
959 seq_printf(m, "Revision\t: %04x\n", system_rev);
960 seq_printf(m, "Serial\t\t: %08x%08x\n",
961 system_serial_high, system_serial_low);
963 return 0;
966 static void *c_start(struct seq_file *m, loff_t *pos)
968 return *pos < 1 ? (void *)1 : NULL;
971 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
973 ++*pos;
974 return NULL;
977 static void c_stop(struct seq_file *m, void *v)
981 struct seq_operations cpuinfo_op = {
982 .start = c_start,
983 .next = c_next,
984 .stop = c_stop,
985 .show = c_show