net: rps: fix the wrong network header pointer
[linux-2.6/libata-dev.git] / arch / arm / kernel / setup.c
blobd5231ae7355aa286bf5503e0180954f84e4f6022
1 /*
2 * linux/arch/arm/kernel/setup.c
4 * Copyright (C) 1995-2001 Russell King
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10 #include <linux/module.h>
11 #include <linux/kernel.h>
12 #include <linux/stddef.h>
13 #include <linux/ioport.h>
14 #include <linux/delay.h>
15 #include <linux/utsname.h>
16 #include <linux/initrd.h>
17 #include <linux/console.h>
18 #include <linux/bootmem.h>
19 #include <linux/seq_file.h>
20 #include <linux/screen_info.h>
21 #include <linux/init.h>
22 #include <linux/kexec.h>
23 #include <linux/crash_dump.h>
24 #include <linux/root_dev.h>
25 #include <linux/cpu.h>
26 #include <linux/interrupt.h>
27 #include <linux/smp.h>
28 #include <linux/fs.h>
29 #include <linux/proc_fs.h>
30 #include <linux/memblock.h>
32 #include <asm/unified.h>
33 #include <asm/cpu.h>
34 #include <asm/cputype.h>
35 #include <asm/elf.h>
36 #include <asm/procinfo.h>
37 #include <asm/sections.h>
38 #include <asm/setup.h>
39 #include <asm/mach-types.h>
40 #include <asm/cacheflush.h>
41 #include <asm/cachetype.h>
42 #include <asm/tlbflush.h>
44 #include <asm/mach/arch.h>
45 #include <asm/mach/irq.h>
46 #include <asm/mach/time.h>
47 #include <asm/traps.h>
48 #include <asm/unwind.h>
50 #if defined(CONFIG_DEPRECATED_PARAM_STRUCT)
51 #include "compat.h"
52 #endif
53 #include "atags.h"
54 #include "tcm.h"
56 #ifndef MEM_SIZE
57 #define MEM_SIZE (16*1024*1024)
58 #endif
60 #if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
61 char fpe_type[8];
63 static int __init fpe_setup(char *line)
65 memcpy(fpe_type, line, 8);
66 return 1;
69 __setup("fpe=", fpe_setup);
70 #endif
72 extern void paging_init(struct machine_desc *desc);
73 extern void reboot_setup(char *str);
75 unsigned int processor_id;
76 EXPORT_SYMBOL(processor_id);
77 unsigned int __machine_arch_type;
78 EXPORT_SYMBOL(__machine_arch_type);
79 unsigned int cacheid;
80 EXPORT_SYMBOL(cacheid);
82 unsigned int __atags_pointer __initdata;
84 unsigned int system_rev;
85 EXPORT_SYMBOL(system_rev);
87 unsigned int system_serial_low;
88 EXPORT_SYMBOL(system_serial_low);
90 unsigned int system_serial_high;
91 EXPORT_SYMBOL(system_serial_high);
93 unsigned int elf_hwcap;
94 EXPORT_SYMBOL(elf_hwcap);
97 #ifdef MULTI_CPU
98 struct processor processor;
99 #endif
100 #ifdef MULTI_TLB
101 struct cpu_tlb_fns cpu_tlb;
102 #endif
103 #ifdef MULTI_USER
104 struct cpu_user_fns cpu_user;
105 #endif
106 #ifdef MULTI_CACHE
107 struct cpu_cache_fns cpu_cache;
108 #endif
109 #ifdef CONFIG_OUTER_CACHE
110 struct outer_cache_fns outer_cache;
111 EXPORT_SYMBOL(outer_cache);
112 #endif
114 struct stack {
115 u32 irq[3];
116 u32 abt[3];
117 u32 und[3];
118 } ____cacheline_aligned;
120 static struct stack stacks[NR_CPUS];
122 char elf_platform[ELF_PLATFORM_SIZE];
123 EXPORT_SYMBOL(elf_platform);
125 static const char *cpu_name;
126 static const char *machine_name;
127 static char __initdata cmd_line[COMMAND_LINE_SIZE];
129 static char default_command_line[COMMAND_LINE_SIZE] __initdata = CONFIG_CMDLINE;
130 static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
131 #define ENDIANNESS ((char)endian_test.l)
133 DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data);
136 * Standard memory resources
138 static struct resource mem_res[] = {
140 .name = "Video RAM",
141 .start = 0,
142 .end = 0,
143 .flags = IORESOURCE_MEM
146 .name = "Kernel text",
147 .start = 0,
148 .end = 0,
149 .flags = IORESOURCE_MEM
152 .name = "Kernel data",
153 .start = 0,
154 .end = 0,
155 .flags = IORESOURCE_MEM
159 #define video_ram mem_res[0]
160 #define kernel_code mem_res[1]
161 #define kernel_data mem_res[2]
163 static struct resource io_res[] = {
165 .name = "reserved",
166 .start = 0x3bc,
167 .end = 0x3be,
168 .flags = IORESOURCE_IO | IORESOURCE_BUSY
171 .name = "reserved",
172 .start = 0x378,
173 .end = 0x37f,
174 .flags = IORESOURCE_IO | IORESOURCE_BUSY
177 .name = "reserved",
178 .start = 0x278,
179 .end = 0x27f,
180 .flags = IORESOURCE_IO | IORESOURCE_BUSY
184 #define lp0 io_res[0]
185 #define lp1 io_res[1]
186 #define lp2 io_res[2]
188 static const char *proc_arch[] = {
189 "undefined/unknown",
190 "3",
191 "4",
192 "4T",
193 "5",
194 "5T",
195 "5TE",
196 "5TEJ",
197 "6TEJ",
198 "7",
199 "?(11)",
200 "?(12)",
201 "?(13)",
202 "?(14)",
203 "?(15)",
204 "?(16)",
205 "?(17)",
208 int cpu_architecture(void)
210 int cpu_arch;
212 if ((read_cpuid_id() & 0x0008f000) == 0) {
213 cpu_arch = CPU_ARCH_UNKNOWN;
214 } else if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
215 cpu_arch = (read_cpuid_id() & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
216 } else if ((read_cpuid_id() & 0x00080000) == 0x00000000) {
217 cpu_arch = (read_cpuid_id() >> 16) & 7;
218 if (cpu_arch)
219 cpu_arch += CPU_ARCH_ARMv3;
220 } else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
221 unsigned int mmfr0;
223 /* Revised CPUID format. Read the Memory Model Feature
224 * Register 0 and check for VMSAv7 or PMSAv7 */
225 asm("mrc p15, 0, %0, c0, c1, 4"
226 : "=r" (mmfr0));
227 if ((mmfr0 & 0x0000000f) == 0x00000003 ||
228 (mmfr0 & 0x000000f0) == 0x00000030)
229 cpu_arch = CPU_ARCH_ARMv7;
230 else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
231 (mmfr0 & 0x000000f0) == 0x00000020)
232 cpu_arch = CPU_ARCH_ARMv6;
233 else
234 cpu_arch = CPU_ARCH_UNKNOWN;
235 } else
236 cpu_arch = CPU_ARCH_UNKNOWN;
238 return cpu_arch;
241 static void __init cacheid_init(void)
243 unsigned int cachetype = read_cpuid_cachetype();
244 unsigned int arch = cpu_architecture();
246 if (arch >= CPU_ARCH_ARMv6) {
247 if ((cachetype & (7 << 29)) == 4 << 29) {
248 /* ARMv7 register format */
249 cacheid = CACHEID_VIPT_NONALIASING;
250 if ((cachetype & (3 << 14)) == 1 << 14)
251 cacheid |= CACHEID_ASID_TAGGED;
252 } else if (cachetype & (1 << 23))
253 cacheid = CACHEID_VIPT_ALIASING;
254 else
255 cacheid = CACHEID_VIPT_NONALIASING;
256 } else {
257 cacheid = CACHEID_VIVT;
260 printk("CPU: %s data cache, %s instruction cache\n",
261 cache_is_vivt() ? "VIVT" :
262 cache_is_vipt_aliasing() ? "VIPT aliasing" :
263 cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown",
264 cache_is_vivt() ? "VIVT" :
265 icache_is_vivt_asid_tagged() ? "VIVT ASID tagged" :
266 cache_is_vipt_aliasing() ? "VIPT aliasing" :
267 cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown");
271 * These functions re-use the assembly code in head.S, which
272 * already provide the required functionality.
274 extern struct proc_info_list *lookup_processor_type(unsigned int);
275 extern struct machine_desc *lookup_machine_type(unsigned int);
277 static void __init feat_v6_fixup(void)
279 int id = read_cpuid_id();
281 if ((id & 0xff0f0000) != 0x41070000)
282 return;
285 * HWCAP_TLS is available only on 1136 r1p0 and later,
286 * see also kuser_get_tls_init.
288 if ((((id >> 4) & 0xfff) == 0xb36) && (((id >> 20) & 3) == 0))
289 elf_hwcap &= ~HWCAP_TLS;
292 static void __init setup_processor(void)
294 struct proc_info_list *list;
297 * locate processor in the list of supported processor
298 * types. The linker builds this table for us from the
299 * entries in arch/arm/mm/proc-*.S
301 list = lookup_processor_type(read_cpuid_id());
302 if (!list) {
303 printk("CPU configuration botched (ID %08x), unable "
304 "to continue.\n", read_cpuid_id());
305 while (1);
308 cpu_name = list->cpu_name;
310 #ifdef MULTI_CPU
311 processor = *list->proc;
312 #endif
313 #ifdef MULTI_TLB
314 cpu_tlb = *list->tlb;
315 #endif
316 #ifdef MULTI_USER
317 cpu_user = *list->user;
318 #endif
319 #ifdef MULTI_CACHE
320 cpu_cache = *list->cache;
321 #endif
323 printk("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
324 cpu_name, read_cpuid_id(), read_cpuid_id() & 15,
325 proc_arch[cpu_architecture()], cr_alignment);
327 sprintf(init_utsname()->machine, "%s%c", list->arch_name, ENDIANNESS);
328 sprintf(elf_platform, "%s%c", list->elf_name, ENDIANNESS);
329 elf_hwcap = list->elf_hwcap;
330 #ifndef CONFIG_ARM_THUMB
331 elf_hwcap &= ~HWCAP_THUMB;
332 #endif
334 feat_v6_fixup();
336 cacheid_init();
337 cpu_proc_init();
341 * cpu_init - initialise one CPU.
343 * cpu_init sets up the per-CPU stacks.
345 void cpu_init(void)
347 unsigned int cpu = smp_processor_id();
348 struct stack *stk = &stacks[cpu];
350 if (cpu >= NR_CPUS) {
351 printk(KERN_CRIT "CPU%u: bad primary CPU number\n", cpu);
352 BUG();
356 * Define the placement constraint for the inline asm directive below.
357 * In Thumb-2, msr with an immediate value is not allowed.
359 #ifdef CONFIG_THUMB2_KERNEL
360 #define PLC "r"
361 #else
362 #define PLC "I"
363 #endif
366 * setup stacks for re-entrant exception handlers
368 __asm__ (
369 "msr cpsr_c, %1\n\t"
370 "add r14, %0, %2\n\t"
371 "mov sp, r14\n\t"
372 "msr cpsr_c, %3\n\t"
373 "add r14, %0, %4\n\t"
374 "mov sp, r14\n\t"
375 "msr cpsr_c, %5\n\t"
376 "add r14, %0, %6\n\t"
377 "mov sp, r14\n\t"
378 "msr cpsr_c, %7"
380 : "r" (stk),
381 PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
382 "I" (offsetof(struct stack, irq[0])),
383 PLC (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
384 "I" (offsetof(struct stack, abt[0])),
385 PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE),
386 "I" (offsetof(struct stack, und[0])),
387 PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
388 : "r14");
391 static struct machine_desc * __init setup_machine(unsigned int nr)
393 struct machine_desc *list;
396 * locate machine in the list of supported machines.
398 list = lookup_machine_type(nr);
399 if (!list) {
400 printk("Machine configuration botched (nr %d), unable "
401 "to continue.\n", nr);
402 while (1);
405 printk("Machine: %s\n", list->name);
407 return list;
410 static int __init arm_add_memory(unsigned long start, unsigned long size)
412 struct membank *bank = &meminfo.bank[meminfo.nr_banks];
414 if (meminfo.nr_banks >= NR_BANKS) {
415 printk(KERN_CRIT "NR_BANKS too low, "
416 "ignoring memory at %#lx\n", start);
417 return -EINVAL;
421 * Ensure that start/size are aligned to a page boundary.
422 * Size is appropriately rounded down, start is rounded up.
424 size -= start & ~PAGE_MASK;
425 bank->start = PAGE_ALIGN(start);
426 bank->size = size & PAGE_MASK;
429 * Check whether this memory region has non-zero size or
430 * invalid node number.
432 if (bank->size == 0)
433 return -EINVAL;
435 meminfo.nr_banks++;
436 return 0;
440 * Pick out the memory size. We look for mem=size@start,
441 * where start and size are "size[KkMm]"
443 static int __init early_mem(char *p)
445 static int usermem __initdata = 0;
446 unsigned long size, start;
447 char *endp;
450 * If the user specifies memory size, we
451 * blow away any automatically generated
452 * size.
454 if (usermem == 0) {
455 usermem = 1;
456 meminfo.nr_banks = 0;
459 start = PHYS_OFFSET;
460 size = memparse(p, &endp);
461 if (*endp == '@')
462 start = memparse(endp + 1, NULL);
464 arm_add_memory(start, size);
466 return 0;
468 early_param("mem", early_mem);
470 static void __init
471 setup_ramdisk(int doload, int prompt, int image_start, unsigned int rd_sz)
473 #ifdef CONFIG_BLK_DEV_RAM
474 extern int rd_size, rd_image_start, rd_prompt, rd_doload;
476 rd_image_start = image_start;
477 rd_prompt = prompt;
478 rd_doload = doload;
480 if (rd_sz)
481 rd_size = rd_sz;
482 #endif
485 static void __init
486 request_standard_resources(struct meminfo *mi, struct machine_desc *mdesc)
488 struct resource *res;
489 int i;
491 kernel_code.start = virt_to_phys(_text);
492 kernel_code.end = virt_to_phys(_etext - 1);
493 kernel_data.start = virt_to_phys(_data);
494 kernel_data.end = virt_to_phys(_end - 1);
496 for (i = 0; i < mi->nr_banks; i++) {
497 if (mi->bank[i].size == 0)
498 continue;
500 res = alloc_bootmem_low(sizeof(*res));
501 res->name = "System RAM";
502 res->start = mi->bank[i].start;
503 res->end = mi->bank[i].start + mi->bank[i].size - 1;
504 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
506 request_resource(&iomem_resource, res);
508 if (kernel_code.start >= res->start &&
509 kernel_code.end <= res->end)
510 request_resource(res, &kernel_code);
511 if (kernel_data.start >= res->start &&
512 kernel_data.end <= res->end)
513 request_resource(res, &kernel_data);
516 if (mdesc->video_start) {
517 video_ram.start = mdesc->video_start;
518 video_ram.end = mdesc->video_end;
519 request_resource(&iomem_resource, &video_ram);
523 * Some machines don't have the possibility of ever
524 * possessing lp0, lp1 or lp2
526 if (mdesc->reserve_lp0)
527 request_resource(&ioport_resource, &lp0);
528 if (mdesc->reserve_lp1)
529 request_resource(&ioport_resource, &lp1);
530 if (mdesc->reserve_lp2)
531 request_resource(&ioport_resource, &lp2);
535 * Tag parsing.
537 * This is the new way of passing data to the kernel at boot time. Rather
538 * than passing a fixed inflexible structure to the kernel, we pass a list
539 * of variable-sized tags to the kernel. The first tag must be a ATAG_CORE
540 * tag for the list to be recognised (to distinguish the tagged list from
541 * a param_struct). The list is terminated with a zero-length tag (this tag
542 * is not parsed in any way).
544 static int __init parse_tag_core(const struct tag *tag)
546 if (tag->hdr.size > 2) {
547 if ((tag->u.core.flags & 1) == 0)
548 root_mountflags &= ~MS_RDONLY;
549 ROOT_DEV = old_decode_dev(tag->u.core.rootdev);
551 return 0;
554 __tagtable(ATAG_CORE, parse_tag_core);
556 static int __init parse_tag_mem32(const struct tag *tag)
558 return arm_add_memory(tag->u.mem.start, tag->u.mem.size);
561 __tagtable(ATAG_MEM, parse_tag_mem32);
563 #if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
564 struct screen_info screen_info = {
565 .orig_video_lines = 30,
566 .orig_video_cols = 80,
567 .orig_video_mode = 0,
568 .orig_video_ega_bx = 0,
569 .orig_video_isVGA = 1,
570 .orig_video_points = 8
573 static int __init parse_tag_videotext(const struct tag *tag)
575 screen_info.orig_x = tag->u.videotext.x;
576 screen_info.orig_y = tag->u.videotext.y;
577 screen_info.orig_video_page = tag->u.videotext.video_page;
578 screen_info.orig_video_mode = tag->u.videotext.video_mode;
579 screen_info.orig_video_cols = tag->u.videotext.video_cols;
580 screen_info.orig_video_ega_bx = tag->u.videotext.video_ega_bx;
581 screen_info.orig_video_lines = tag->u.videotext.video_lines;
582 screen_info.orig_video_isVGA = tag->u.videotext.video_isvga;
583 screen_info.orig_video_points = tag->u.videotext.video_points;
584 return 0;
587 __tagtable(ATAG_VIDEOTEXT, parse_tag_videotext);
588 #endif
590 static int __init parse_tag_ramdisk(const struct tag *tag)
592 setup_ramdisk((tag->u.ramdisk.flags & 1) == 0,
593 (tag->u.ramdisk.flags & 2) == 0,
594 tag->u.ramdisk.start, tag->u.ramdisk.size);
595 return 0;
598 __tagtable(ATAG_RAMDISK, parse_tag_ramdisk);
600 static int __init parse_tag_serialnr(const struct tag *tag)
602 system_serial_low = tag->u.serialnr.low;
603 system_serial_high = tag->u.serialnr.high;
604 return 0;
607 __tagtable(ATAG_SERIAL, parse_tag_serialnr);
609 static int __init parse_tag_revision(const struct tag *tag)
611 system_rev = tag->u.revision.rev;
612 return 0;
615 __tagtable(ATAG_REVISION, parse_tag_revision);
617 #ifndef CONFIG_CMDLINE_FORCE
618 static int __init parse_tag_cmdline(const struct tag *tag)
620 strlcpy(default_command_line, tag->u.cmdline.cmdline, COMMAND_LINE_SIZE);
621 return 0;
624 __tagtable(ATAG_CMDLINE, parse_tag_cmdline);
625 #endif /* CONFIG_CMDLINE_FORCE */
628 * Scan the tag table for this tag, and call its parse function.
629 * The tag table is built by the linker from all the __tagtable
630 * declarations.
632 static int __init parse_tag(const struct tag *tag)
634 extern struct tagtable __tagtable_begin, __tagtable_end;
635 struct tagtable *t;
637 for (t = &__tagtable_begin; t < &__tagtable_end; t++)
638 if (tag->hdr.tag == t->tag) {
639 t->parse(tag);
640 break;
643 return t < &__tagtable_end;
647 * Parse all tags in the list, checking both the global and architecture
648 * specific tag tables.
650 static void __init parse_tags(const struct tag *t)
652 for (; t->hdr.size; t = tag_next(t))
653 if (!parse_tag(t))
654 printk(KERN_WARNING
655 "Ignoring unrecognised tag 0x%08x\n",
656 t->hdr.tag);
660 * This holds our defaults.
662 static struct init_tags {
663 struct tag_header hdr1;
664 struct tag_core core;
665 struct tag_header hdr2;
666 struct tag_mem32 mem;
667 struct tag_header hdr3;
668 } init_tags __initdata = {
669 { tag_size(tag_core), ATAG_CORE },
670 { 1, PAGE_SIZE, 0xff },
671 { tag_size(tag_mem32), ATAG_MEM },
672 { MEM_SIZE, PHYS_OFFSET },
673 { 0, ATAG_NONE }
676 static void (*init_machine)(void) __initdata;
678 static int __init customize_machine(void)
680 /* customizes platform devices, or adds new ones */
681 if (init_machine)
682 init_machine();
683 return 0;
685 arch_initcall(customize_machine);
687 #ifdef CONFIG_KEXEC
688 static inline unsigned long long get_total_mem(void)
690 unsigned long total;
692 total = max_low_pfn - min_low_pfn;
693 return total << PAGE_SHIFT;
697 * reserve_crashkernel() - reserves memory are for crash kernel
699 * This function reserves memory area given in "crashkernel=" kernel command
700 * line parameter. The memory reserved is used by a dump capture kernel when
701 * primary kernel is crashing.
703 static void __init reserve_crashkernel(void)
705 unsigned long long crash_size, crash_base;
706 unsigned long long total_mem;
707 int ret;
709 total_mem = get_total_mem();
710 ret = parse_crashkernel(boot_command_line, total_mem,
711 &crash_size, &crash_base);
712 if (ret)
713 return;
715 ret = reserve_bootmem(crash_base, crash_size, BOOTMEM_EXCLUSIVE);
716 if (ret < 0) {
717 printk(KERN_WARNING "crashkernel reservation failed - "
718 "memory is in use (0x%lx)\n", (unsigned long)crash_base);
719 return;
722 printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
723 "for crashkernel (System RAM: %ldMB)\n",
724 (unsigned long)(crash_size >> 20),
725 (unsigned long)(crash_base >> 20),
726 (unsigned long)(total_mem >> 20));
728 crashk_res.start = crash_base;
729 crashk_res.end = crash_base + crash_size - 1;
730 insert_resource(&iomem_resource, &crashk_res);
732 #else
733 static inline void reserve_crashkernel(void) {}
734 #endif /* CONFIG_KEXEC */
737 * Note: elfcorehdr_addr is not just limited to vmcore. It is also used by
738 * is_kdump_kernel() to determine if we are booting after a panic. Hence
739 * ifdef it under CONFIG_CRASH_DUMP and not CONFIG_PROC_VMCORE.
742 #ifdef CONFIG_CRASH_DUMP
744 * elfcorehdr= specifies the location of elf core header stored by the crashed
745 * kernel. This option will be passed by kexec loader to the capture kernel.
747 static int __init setup_elfcorehdr(char *arg)
749 char *end;
751 if (!arg)
752 return -EINVAL;
754 elfcorehdr_addr = memparse(arg, &end);
755 return end > arg ? 0 : -EINVAL;
757 early_param("elfcorehdr", setup_elfcorehdr);
758 #endif /* CONFIG_CRASH_DUMP */
760 static void __init squash_mem_tags(struct tag *tag)
762 for (; tag->hdr.size; tag = tag_next(tag))
763 if (tag->hdr.tag == ATAG_MEM)
764 tag->hdr.tag = ATAG_NONE;
767 void __init setup_arch(char **cmdline_p)
769 struct tag *tags = (struct tag *)&init_tags;
770 struct machine_desc *mdesc;
771 char *from = default_command_line;
773 unwind_init();
775 setup_processor();
776 mdesc = setup_machine(machine_arch_type);
777 machine_name = mdesc->name;
779 if (mdesc->soft_reboot)
780 reboot_setup("s");
782 if (__atags_pointer)
783 tags = phys_to_virt(__atags_pointer);
784 else if (mdesc->boot_params)
785 tags = phys_to_virt(mdesc->boot_params);
787 #if defined(CONFIG_DEPRECATED_PARAM_STRUCT)
789 * If we have the old style parameters, convert them to
790 * a tag list.
792 if (tags->hdr.tag != ATAG_CORE)
793 convert_to_tag_list(tags);
794 #endif
795 if (tags->hdr.tag != ATAG_CORE)
796 tags = (struct tag *)&init_tags;
798 if (mdesc->fixup)
799 mdesc->fixup(mdesc, tags, &from, &meminfo);
801 if (tags->hdr.tag == ATAG_CORE) {
802 if (meminfo.nr_banks != 0)
803 squash_mem_tags(tags);
804 save_atags(tags);
805 parse_tags(tags);
808 init_mm.start_code = (unsigned long) _text;
809 init_mm.end_code = (unsigned long) _etext;
810 init_mm.end_data = (unsigned long) _edata;
811 init_mm.brk = (unsigned long) _end;
813 /* parse_early_param needs a boot_command_line */
814 strlcpy(boot_command_line, from, COMMAND_LINE_SIZE);
816 /* populate cmd_line too for later use, preserving boot_command_line */
817 strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
818 *cmdline_p = cmd_line;
820 parse_early_param();
822 arm_memblock_init(&meminfo, mdesc);
824 paging_init(mdesc);
825 request_standard_resources(&meminfo, mdesc);
827 #ifdef CONFIG_SMP
828 smp_init_cpus();
829 #endif
830 reserve_crashkernel();
832 cpu_init();
833 tcm_init();
836 * Set up various architecture-specific pointers
838 arch_nr_irqs = mdesc->nr_irqs;
839 init_arch_irq = mdesc->init_irq;
840 system_timer = mdesc->timer;
841 init_machine = mdesc->init_machine;
843 #ifdef CONFIG_VT
844 #if defined(CONFIG_VGA_CONSOLE)
845 conswitchp = &vga_con;
846 #elif defined(CONFIG_DUMMY_CONSOLE)
847 conswitchp = &dummy_con;
848 #endif
849 #endif
850 early_trap_init();
854 static int __init topology_init(void)
856 int cpu;
858 for_each_possible_cpu(cpu) {
859 struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu);
860 cpuinfo->cpu.hotpluggable = 1;
861 register_cpu(&cpuinfo->cpu, cpu);
864 return 0;
866 subsys_initcall(topology_init);
868 #ifdef CONFIG_HAVE_PROC_CPU
869 static int __init proc_cpu_init(void)
871 struct proc_dir_entry *res;
873 res = proc_mkdir("cpu", NULL);
874 if (!res)
875 return -ENOMEM;
876 return 0;
878 fs_initcall(proc_cpu_init);
879 #endif
881 static const char *hwcap_str[] = {
882 "swp",
883 "half",
884 "thumb",
885 "26bit",
886 "fastmult",
887 "fpa",
888 "vfp",
889 "edsp",
890 "java",
891 "iwmmxt",
892 "crunch",
893 "thumbee",
894 "neon",
895 "vfpv3",
896 "vfpv3d16",
897 NULL
900 static int c_show(struct seq_file *m, void *v)
902 int i;
904 seq_printf(m, "Processor\t: %s rev %d (%s)\n",
905 cpu_name, read_cpuid_id() & 15, elf_platform);
907 #if defined(CONFIG_SMP)
908 for_each_online_cpu(i) {
910 * glibc reads /proc/cpuinfo to determine the number of
911 * online processors, looking for lines beginning with
912 * "processor". Give glibc what it expects.
914 seq_printf(m, "processor\t: %d\n", i);
915 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n\n",
916 per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
917 (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
919 #else /* CONFIG_SMP */
920 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
921 loops_per_jiffy / (500000/HZ),
922 (loops_per_jiffy / (5000/HZ)) % 100);
923 #endif
925 /* dump out the processor features */
926 seq_puts(m, "Features\t: ");
928 for (i = 0; hwcap_str[i]; i++)
929 if (elf_hwcap & (1 << i))
930 seq_printf(m, "%s ", hwcap_str[i]);
932 seq_printf(m, "\nCPU implementer\t: 0x%02x\n", read_cpuid_id() >> 24);
933 seq_printf(m, "CPU architecture: %s\n", proc_arch[cpu_architecture()]);
935 if ((read_cpuid_id() & 0x0008f000) == 0x00000000) {
936 /* pre-ARM7 */
937 seq_printf(m, "CPU part\t: %07x\n", read_cpuid_id() >> 4);
938 } else {
939 if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
940 /* ARM7 */
941 seq_printf(m, "CPU variant\t: 0x%02x\n",
942 (read_cpuid_id() >> 16) & 127);
943 } else {
944 /* post-ARM7 */
945 seq_printf(m, "CPU variant\t: 0x%x\n",
946 (read_cpuid_id() >> 20) & 15);
948 seq_printf(m, "CPU part\t: 0x%03x\n",
949 (read_cpuid_id() >> 4) & 0xfff);
951 seq_printf(m, "CPU revision\t: %d\n", read_cpuid_id() & 15);
953 seq_puts(m, "\n");
955 seq_printf(m, "Hardware\t: %s\n", machine_name);
956 seq_printf(m, "Revision\t: %04x\n", system_rev);
957 seq_printf(m, "Serial\t\t: %08x%08x\n",
958 system_serial_high, system_serial_low);
960 return 0;
963 static void *c_start(struct seq_file *m, loff_t *pos)
965 return *pos < 1 ? (void *)1 : NULL;
968 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
970 ++*pos;
971 return NULL;
974 static void c_stop(struct seq_file *m, void *v)
978 const struct seq_operations cpuinfo_op = {
979 .start = c_start,
980 .next = c_next,
981 .stop = c_stop,
982 .show = c_show