2 * linux/arch/arm/kernel/setup.c
4 * Copyright (C) 1995-2001 Russell King
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 #include <linux/module.h>
11 #include <linux/kernel.h>
12 #include <linux/stddef.h>
13 #include <linux/ioport.h>
14 #include <linux/delay.h>
15 #include <linux/utsname.h>
16 #include <linux/initrd.h>
17 #include <linux/console.h>
18 #include <linux/bootmem.h>
19 #include <linux/seq_file.h>
20 #include <linux/screen_info.h>
21 #include <linux/init.h>
22 #include <linux/kexec.h>
23 #include <linux/crash_dump.h>
24 #include <linux/root_dev.h>
25 #include <linux/cpu.h>
26 #include <linux/interrupt.h>
27 #include <linux/smp.h>
29 #include <linux/proc_fs.h>
30 #include <linux/memblock.h>
32 #include <asm/unified.h>
34 #include <asm/cputype.h>
36 #include <asm/procinfo.h>
37 #include <asm/sections.h>
38 #include <asm/setup.h>
39 #include <asm/smp_plat.h>
40 #include <asm/mach-types.h>
41 #include <asm/cacheflush.h>
42 #include <asm/cachetype.h>
43 #include <asm/tlbflush.h>
45 #include <asm/mach/arch.h>
46 #include <asm/mach/irq.h>
47 #include <asm/mach/time.h>
48 #include <asm/traps.h>
49 #include <asm/unwind.h>
51 #if defined(CONFIG_DEPRECATED_PARAM_STRUCT)
58 #define MEM_SIZE (16*1024*1024)
61 #if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
64 static int __init
fpe_setup(char *line
)
66 memcpy(fpe_type
, line
, 8);
70 __setup("fpe=", fpe_setup
);
73 extern void paging_init(struct machine_desc
*desc
);
74 extern void reboot_setup(char *str
);
76 unsigned int processor_id
;
77 EXPORT_SYMBOL(processor_id
);
78 unsigned int __machine_arch_type
;
79 EXPORT_SYMBOL(__machine_arch_type
);
81 EXPORT_SYMBOL(cacheid
);
83 unsigned int __atags_pointer __initdata
;
85 unsigned int system_rev
;
86 EXPORT_SYMBOL(system_rev
);
88 unsigned int system_serial_low
;
89 EXPORT_SYMBOL(system_serial_low
);
91 unsigned int system_serial_high
;
92 EXPORT_SYMBOL(system_serial_high
);
94 unsigned int elf_hwcap
;
95 EXPORT_SYMBOL(elf_hwcap
);
99 struct processor processor
;
102 struct cpu_tlb_fns cpu_tlb
;
105 struct cpu_user_fns cpu_user
;
108 struct cpu_cache_fns cpu_cache
;
110 #ifdef CONFIG_OUTER_CACHE
111 struct outer_cache_fns outer_cache
;
112 EXPORT_SYMBOL(outer_cache
);
119 } ____cacheline_aligned
;
121 static struct stack stacks
[NR_CPUS
];
123 char elf_platform
[ELF_PLATFORM_SIZE
];
124 EXPORT_SYMBOL(elf_platform
);
126 static const char *cpu_name
;
127 static const char *machine_name
;
128 static char __initdata cmd_line
[COMMAND_LINE_SIZE
];
130 static char default_command_line
[COMMAND_LINE_SIZE
] __initdata
= CONFIG_CMDLINE
;
131 static union { char c
[4]; unsigned long l
; } endian_test __initdata
= { { 'l', '?', '?', 'b' } };
132 #define ENDIANNESS ((char)endian_test.l)
134 DEFINE_PER_CPU(struct cpuinfo_arm
, cpu_data
);
137 * Standard memory resources
139 static struct resource mem_res
[] = {
144 .flags
= IORESOURCE_MEM
147 .name
= "Kernel text",
150 .flags
= IORESOURCE_MEM
153 .name
= "Kernel data",
156 .flags
= IORESOURCE_MEM
160 #define video_ram mem_res[0]
161 #define kernel_code mem_res[1]
162 #define kernel_data mem_res[2]
164 static struct resource io_res
[] = {
169 .flags
= IORESOURCE_IO
| IORESOURCE_BUSY
175 .flags
= IORESOURCE_IO
| IORESOURCE_BUSY
181 .flags
= IORESOURCE_IO
| IORESOURCE_BUSY
185 #define lp0 io_res[0]
186 #define lp1 io_res[1]
187 #define lp2 io_res[2]
189 static const char *proc_arch
[] = {
209 int cpu_architecture(void)
213 if ((read_cpuid_id() & 0x0008f000) == 0) {
214 cpu_arch
= CPU_ARCH_UNKNOWN
;
215 } else if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
216 cpu_arch
= (read_cpuid_id() & (1 << 23)) ? CPU_ARCH_ARMv4T
: CPU_ARCH_ARMv3
;
217 } else if ((read_cpuid_id() & 0x00080000) == 0x00000000) {
218 cpu_arch
= (read_cpuid_id() >> 16) & 7;
220 cpu_arch
+= CPU_ARCH_ARMv3
;
221 } else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
224 /* Revised CPUID format. Read the Memory Model Feature
225 * Register 0 and check for VMSAv7 or PMSAv7 */
226 asm("mrc p15, 0, %0, c0, c1, 4"
228 if ((mmfr0
& 0x0000000f) == 0x00000003 ||
229 (mmfr0
& 0x000000f0) == 0x00000030)
230 cpu_arch
= CPU_ARCH_ARMv7
;
231 else if ((mmfr0
& 0x0000000f) == 0x00000002 ||
232 (mmfr0
& 0x000000f0) == 0x00000020)
233 cpu_arch
= CPU_ARCH_ARMv6
;
235 cpu_arch
= CPU_ARCH_UNKNOWN
;
237 cpu_arch
= CPU_ARCH_UNKNOWN
;
242 static int cpu_has_aliasing_icache(unsigned int arch
)
245 unsigned int id_reg
, num_sets
, line_size
;
247 /* arch specifies the register format */
250 asm("mcr p15, 2, %0, c0, c0, 0 @ set CSSELR"
251 : /* No output operands */
254 asm("mrc p15, 1, %0, c0, c0, 0 @ read CCSIDR"
256 line_size
= 4 << ((id_reg
& 0x7) + 2);
257 num_sets
= ((id_reg
>> 13) & 0x7fff) + 1;
258 aliasing_icache
= (line_size
* num_sets
) > PAGE_SIZE
;
261 aliasing_icache
= read_cpuid_cachetype() & (1 << 11);
264 /* I-cache aliases will be handled by D-cache aliasing code */
268 return aliasing_icache
;
271 static void __init
cacheid_init(void)
273 unsigned int cachetype
= read_cpuid_cachetype();
274 unsigned int arch
= cpu_architecture();
276 if (arch
>= CPU_ARCH_ARMv6
) {
277 if ((cachetype
& (7 << 29)) == 4 << 29) {
278 /* ARMv7 register format */
279 cacheid
= CACHEID_VIPT_NONALIASING
;
280 if ((cachetype
& (3 << 14)) == 1 << 14)
281 cacheid
|= CACHEID_ASID_TAGGED
;
282 else if (cpu_has_aliasing_icache(CPU_ARCH_ARMv7
))
283 cacheid
|= CACHEID_VIPT_I_ALIASING
;
284 } else if (cachetype
& (1 << 23)) {
285 cacheid
= CACHEID_VIPT_ALIASING
;
287 cacheid
= CACHEID_VIPT_NONALIASING
;
288 if (cpu_has_aliasing_icache(CPU_ARCH_ARMv6
))
289 cacheid
|= CACHEID_VIPT_I_ALIASING
;
292 cacheid
= CACHEID_VIVT
;
295 printk("CPU: %s data cache, %s instruction cache\n",
296 cache_is_vivt() ? "VIVT" :
297 cache_is_vipt_aliasing() ? "VIPT aliasing" :
298 cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown",
299 cache_is_vivt() ? "VIVT" :
300 icache_is_vivt_asid_tagged() ? "VIVT ASID tagged" :
301 icache_is_vipt_aliasing() ? "VIPT aliasing" :
302 cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown");
306 * These functions re-use the assembly code in head.S, which
307 * already provide the required functionality.
309 extern struct proc_info_list
*lookup_processor_type(unsigned int);
310 extern struct machine_desc
*lookup_machine_type(unsigned int);
312 static void __init
feat_v6_fixup(void)
314 int id
= read_cpuid_id();
316 if ((id
& 0xff0f0000) != 0x41070000)
320 * HWCAP_TLS is available only on 1136 r1p0 and later,
321 * see also kuser_get_tls_init.
323 if ((((id
>> 4) & 0xfff) == 0xb36) && (((id
>> 20) & 3) == 0))
324 elf_hwcap
&= ~HWCAP_TLS
;
327 static void __init
setup_processor(void)
329 struct proc_info_list
*list
;
332 * locate processor in the list of supported processor
333 * types. The linker builds this table for us from the
334 * entries in arch/arm/mm/proc-*.S
336 list
= lookup_processor_type(read_cpuid_id());
338 printk("CPU configuration botched (ID %08x), unable "
339 "to continue.\n", read_cpuid_id());
343 cpu_name
= list
->cpu_name
;
346 processor
= *list
->proc
;
349 cpu_tlb
= *list
->tlb
;
352 cpu_user
= *list
->user
;
355 cpu_cache
= *list
->cache
;
358 printk("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
359 cpu_name
, read_cpuid_id(), read_cpuid_id() & 15,
360 proc_arch
[cpu_architecture()], cr_alignment
);
362 sprintf(init_utsname()->machine
, "%s%c", list
->arch_name
, ENDIANNESS
);
363 sprintf(elf_platform
, "%s%c", list
->elf_name
, ENDIANNESS
);
364 elf_hwcap
= list
->elf_hwcap
;
365 #ifndef CONFIG_ARM_THUMB
366 elf_hwcap
&= ~HWCAP_THUMB
;
376 * cpu_init - initialise one CPU.
378 * cpu_init sets up the per-CPU stacks.
382 unsigned int cpu
= smp_processor_id();
383 struct stack
*stk
= &stacks
[cpu
];
385 if (cpu
>= NR_CPUS
) {
386 printk(KERN_CRIT
"CPU%u: bad primary CPU number\n", cpu
);
391 * Define the placement constraint for the inline asm directive below.
392 * In Thumb-2, msr with an immediate value is not allowed.
394 #ifdef CONFIG_THUMB2_KERNEL
401 * setup stacks for re-entrant exception handlers
405 "add r14, %0, %2\n\t"
408 "add r14, %0, %4\n\t"
411 "add r14, %0, %6\n\t"
416 PLC (PSR_F_BIT
| PSR_I_BIT
| IRQ_MODE
),
417 "I" (offsetof(struct stack
, irq
[0])),
418 PLC (PSR_F_BIT
| PSR_I_BIT
| ABT_MODE
),
419 "I" (offsetof(struct stack
, abt
[0])),
420 PLC (PSR_F_BIT
| PSR_I_BIT
| UND_MODE
),
421 "I" (offsetof(struct stack
, und
[0])),
422 PLC (PSR_F_BIT
| PSR_I_BIT
| SVC_MODE
)
426 static struct machine_desc
* __init
setup_machine(unsigned int nr
)
428 struct machine_desc
*list
;
431 * locate machine in the list of supported machines.
433 list
= lookup_machine_type(nr
);
435 printk("Machine configuration botched (nr %d), unable "
436 "to continue.\n", nr
);
440 printk("Machine: %s\n", list
->name
);
445 static int __init
arm_add_memory(unsigned long start
, unsigned long size
)
447 struct membank
*bank
= &meminfo
.bank
[meminfo
.nr_banks
];
449 if (meminfo
.nr_banks
>= NR_BANKS
) {
450 printk(KERN_CRIT
"NR_BANKS too low, "
451 "ignoring memory at 0x%08llx\n", (long long)start
);
456 * Ensure that start/size are aligned to a page boundary.
457 * Size is appropriately rounded down, start is rounded up.
459 size
-= start
& ~PAGE_MASK
;
460 bank
->start
= PAGE_ALIGN(start
);
461 bank
->size
= size
& PAGE_MASK
;
464 * Check whether this memory region has non-zero size or
465 * invalid node number.
475 * Pick out the memory size. We look for mem=size@start,
476 * where start and size are "size[KkMm]"
478 static int __init
early_mem(char *p
)
480 static int usermem __initdata
= 0;
481 unsigned long size
, start
;
485 * If the user specifies memory size, we
486 * blow away any automatically generated
491 meminfo
.nr_banks
= 0;
495 size
= memparse(p
, &endp
);
497 start
= memparse(endp
+ 1, NULL
);
499 arm_add_memory(start
, size
);
503 early_param("mem", early_mem
);
506 setup_ramdisk(int doload
, int prompt
, int image_start
, unsigned int rd_sz
)
508 #ifdef CONFIG_BLK_DEV_RAM
509 extern int rd_size
, rd_image_start
, rd_prompt
, rd_doload
;
511 rd_image_start
= image_start
;
521 request_standard_resources(struct meminfo
*mi
, struct machine_desc
*mdesc
)
523 struct resource
*res
;
526 kernel_code
.start
= virt_to_phys(_text
);
527 kernel_code
.end
= virt_to_phys(_etext
- 1);
528 kernel_data
.start
= virt_to_phys(_sdata
);
529 kernel_data
.end
= virt_to_phys(_end
- 1);
531 for (i
= 0; i
< mi
->nr_banks
; i
++) {
532 if (mi
->bank
[i
].size
== 0)
535 res
= alloc_bootmem_low(sizeof(*res
));
536 res
->name
= "System RAM";
537 res
->start
= mi
->bank
[i
].start
;
538 res
->end
= mi
->bank
[i
].start
+ mi
->bank
[i
].size
- 1;
539 res
->flags
= IORESOURCE_MEM
| IORESOURCE_BUSY
;
541 request_resource(&iomem_resource
, res
);
543 if (kernel_code
.start
>= res
->start
&&
544 kernel_code
.end
<= res
->end
)
545 request_resource(res
, &kernel_code
);
546 if (kernel_data
.start
>= res
->start
&&
547 kernel_data
.end
<= res
->end
)
548 request_resource(res
, &kernel_data
);
551 if (mdesc
->video_start
) {
552 video_ram
.start
= mdesc
->video_start
;
553 video_ram
.end
= mdesc
->video_end
;
554 request_resource(&iomem_resource
, &video_ram
);
558 * Some machines don't have the possibility of ever
559 * possessing lp0, lp1 or lp2
561 if (mdesc
->reserve_lp0
)
562 request_resource(&ioport_resource
, &lp0
);
563 if (mdesc
->reserve_lp1
)
564 request_resource(&ioport_resource
, &lp1
);
565 if (mdesc
->reserve_lp2
)
566 request_resource(&ioport_resource
, &lp2
);
572 * This is the new way of passing data to the kernel at boot time. Rather
573 * than passing a fixed inflexible structure to the kernel, we pass a list
574 * of variable-sized tags to the kernel. The first tag must be a ATAG_CORE
575 * tag for the list to be recognised (to distinguish the tagged list from
576 * a param_struct). The list is terminated with a zero-length tag (this tag
577 * is not parsed in any way).
579 static int __init
parse_tag_core(const struct tag
*tag
)
581 if (tag
->hdr
.size
> 2) {
582 if ((tag
->u
.core
.flags
& 1) == 0)
583 root_mountflags
&= ~MS_RDONLY
;
584 ROOT_DEV
= old_decode_dev(tag
->u
.core
.rootdev
);
589 __tagtable(ATAG_CORE
, parse_tag_core
);
591 static int __init
parse_tag_mem32(const struct tag
*tag
)
593 return arm_add_memory(tag
->u
.mem
.start
, tag
->u
.mem
.size
);
596 __tagtable(ATAG_MEM
, parse_tag_mem32
);
598 #if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
599 struct screen_info screen_info
= {
600 .orig_video_lines
= 30,
601 .orig_video_cols
= 80,
602 .orig_video_mode
= 0,
603 .orig_video_ega_bx
= 0,
604 .orig_video_isVGA
= 1,
605 .orig_video_points
= 8
608 static int __init
parse_tag_videotext(const struct tag
*tag
)
610 screen_info
.orig_x
= tag
->u
.videotext
.x
;
611 screen_info
.orig_y
= tag
->u
.videotext
.y
;
612 screen_info
.orig_video_page
= tag
->u
.videotext
.video_page
;
613 screen_info
.orig_video_mode
= tag
->u
.videotext
.video_mode
;
614 screen_info
.orig_video_cols
= tag
->u
.videotext
.video_cols
;
615 screen_info
.orig_video_ega_bx
= tag
->u
.videotext
.video_ega_bx
;
616 screen_info
.orig_video_lines
= tag
->u
.videotext
.video_lines
;
617 screen_info
.orig_video_isVGA
= tag
->u
.videotext
.video_isvga
;
618 screen_info
.orig_video_points
= tag
->u
.videotext
.video_points
;
622 __tagtable(ATAG_VIDEOTEXT
, parse_tag_videotext
);
625 static int __init
parse_tag_ramdisk(const struct tag
*tag
)
627 setup_ramdisk((tag
->u
.ramdisk
.flags
& 1) == 0,
628 (tag
->u
.ramdisk
.flags
& 2) == 0,
629 tag
->u
.ramdisk
.start
, tag
->u
.ramdisk
.size
);
633 __tagtable(ATAG_RAMDISK
, parse_tag_ramdisk
);
635 static int __init
parse_tag_serialnr(const struct tag
*tag
)
637 system_serial_low
= tag
->u
.serialnr
.low
;
638 system_serial_high
= tag
->u
.serialnr
.high
;
642 __tagtable(ATAG_SERIAL
, parse_tag_serialnr
);
644 static int __init
parse_tag_revision(const struct tag
*tag
)
646 system_rev
= tag
->u
.revision
.rev
;
650 __tagtable(ATAG_REVISION
, parse_tag_revision
);
652 #ifndef CONFIG_CMDLINE_FORCE
653 static int __init
parse_tag_cmdline(const struct tag
*tag
)
655 strlcpy(default_command_line
, tag
->u
.cmdline
.cmdline
, COMMAND_LINE_SIZE
);
659 __tagtable(ATAG_CMDLINE
, parse_tag_cmdline
);
660 #endif /* CONFIG_CMDLINE_FORCE */
663 * Scan the tag table for this tag, and call its parse function.
664 * The tag table is built by the linker from all the __tagtable
667 static int __init
parse_tag(const struct tag
*tag
)
669 extern struct tagtable __tagtable_begin
, __tagtable_end
;
672 for (t
= &__tagtable_begin
; t
< &__tagtable_end
; t
++)
673 if (tag
->hdr
.tag
== t
->tag
) {
678 return t
< &__tagtable_end
;
682 * Parse all tags in the list, checking both the global and architecture
683 * specific tag tables.
685 static void __init
parse_tags(const struct tag
*t
)
687 for (; t
->hdr
.size
; t
= tag_next(t
))
690 "Ignoring unrecognised tag 0x%08x\n",
695 * This holds our defaults.
697 static struct init_tags
{
698 struct tag_header hdr1
;
699 struct tag_core core
;
700 struct tag_header hdr2
;
701 struct tag_mem32 mem
;
702 struct tag_header hdr3
;
703 } init_tags __initdata
= {
704 { tag_size(tag_core
), ATAG_CORE
},
705 { 1, PAGE_SIZE
, 0xff },
706 { tag_size(tag_mem32
), ATAG_MEM
},
707 { MEM_SIZE
, PHYS_OFFSET
},
711 static void (*init_machine
)(void) __initdata
;
713 static int __init
customize_machine(void)
715 /* customizes platform devices, or adds new ones */
720 arch_initcall(customize_machine
);
723 static inline unsigned long long get_total_mem(void)
727 total
= max_low_pfn
- min_low_pfn
;
728 return total
<< PAGE_SHIFT
;
732 * reserve_crashkernel() - reserves memory are for crash kernel
734 * This function reserves memory area given in "crashkernel=" kernel command
735 * line parameter. The memory reserved is used by a dump capture kernel when
736 * primary kernel is crashing.
738 static void __init
reserve_crashkernel(void)
740 unsigned long long crash_size
, crash_base
;
741 unsigned long long total_mem
;
744 total_mem
= get_total_mem();
745 ret
= parse_crashkernel(boot_command_line
, total_mem
,
746 &crash_size
, &crash_base
);
750 ret
= reserve_bootmem(crash_base
, crash_size
, BOOTMEM_EXCLUSIVE
);
752 printk(KERN_WARNING
"crashkernel reservation failed - "
753 "memory is in use (0x%lx)\n", (unsigned long)crash_base
);
757 printk(KERN_INFO
"Reserving %ldMB of memory at %ldMB "
758 "for crashkernel (System RAM: %ldMB)\n",
759 (unsigned long)(crash_size
>> 20),
760 (unsigned long)(crash_base
>> 20),
761 (unsigned long)(total_mem
>> 20));
763 crashk_res
.start
= crash_base
;
764 crashk_res
.end
= crash_base
+ crash_size
- 1;
765 insert_resource(&iomem_resource
, &crashk_res
);
768 static inline void reserve_crashkernel(void) {}
769 #endif /* CONFIG_KEXEC */
772 * Note: elfcorehdr_addr is not just limited to vmcore. It is also used by
773 * is_kdump_kernel() to determine if we are booting after a panic. Hence
774 * ifdef it under CONFIG_CRASH_DUMP and not CONFIG_PROC_VMCORE.
777 #ifdef CONFIG_CRASH_DUMP
779 * elfcorehdr= specifies the location of elf core header stored by the crashed
780 * kernel. This option will be passed by kexec loader to the capture kernel.
782 static int __init
setup_elfcorehdr(char *arg
)
789 elfcorehdr_addr
= memparse(arg
, &end
);
790 return end
> arg
? 0 : -EINVAL
;
792 early_param("elfcorehdr", setup_elfcorehdr
);
793 #endif /* CONFIG_CRASH_DUMP */
795 static void __init
squash_mem_tags(struct tag
*tag
)
797 for (; tag
->hdr
.size
; tag
= tag_next(tag
))
798 if (tag
->hdr
.tag
== ATAG_MEM
)
799 tag
->hdr
.tag
= ATAG_NONE
;
802 void __init
setup_arch(char **cmdline_p
)
804 struct tag
*tags
= (struct tag
*)&init_tags
;
805 struct machine_desc
*mdesc
;
806 char *from
= default_command_line
;
811 mdesc
= setup_machine(machine_arch_type
);
812 machine_name
= mdesc
->name
;
814 if (mdesc
->soft_reboot
)
818 tags
= phys_to_virt(__atags_pointer
);
819 else if (mdesc
->boot_params
)
820 tags
= phys_to_virt(mdesc
->boot_params
);
822 #if defined(CONFIG_DEPRECATED_PARAM_STRUCT)
824 * If we have the old style parameters, convert them to
827 if (tags
->hdr
.tag
!= ATAG_CORE
)
828 convert_to_tag_list(tags
);
830 if (tags
->hdr
.tag
!= ATAG_CORE
)
831 tags
= (struct tag
*)&init_tags
;
834 mdesc
->fixup(mdesc
, tags
, &from
, &meminfo
);
836 if (tags
->hdr
.tag
== ATAG_CORE
) {
837 if (meminfo
.nr_banks
!= 0)
838 squash_mem_tags(tags
);
843 init_mm
.start_code
= (unsigned long) _text
;
844 init_mm
.end_code
= (unsigned long) _etext
;
845 init_mm
.end_data
= (unsigned long) _edata
;
846 init_mm
.brk
= (unsigned long) _end
;
848 /* parse_early_param needs a boot_command_line */
849 strlcpy(boot_command_line
, from
, COMMAND_LINE_SIZE
);
851 /* populate cmd_line too for later use, preserving boot_command_line */
852 strlcpy(cmd_line
, boot_command_line
, COMMAND_LINE_SIZE
);
853 *cmdline_p
= cmd_line
;
857 arm_memblock_init(&meminfo
, mdesc
);
860 request_standard_resources(&meminfo
, mdesc
);
866 reserve_crashkernel();
872 * Set up various architecture-specific pointers
874 arch_nr_irqs
= mdesc
->nr_irqs
;
875 init_arch_irq
= mdesc
->init_irq
;
876 system_timer
= mdesc
->timer
;
877 init_machine
= mdesc
->init_machine
;
880 #if defined(CONFIG_VGA_CONSOLE)
881 conswitchp
= &vga_con
;
882 #elif defined(CONFIG_DUMMY_CONSOLE)
883 conswitchp
= &dummy_con
;
890 static int __init
topology_init(void)
894 for_each_possible_cpu(cpu
) {
895 struct cpuinfo_arm
*cpuinfo
= &per_cpu(cpu_data
, cpu
);
896 cpuinfo
->cpu
.hotpluggable
= 1;
897 register_cpu(&cpuinfo
->cpu
, cpu
);
902 subsys_initcall(topology_init
);
904 #ifdef CONFIG_HAVE_PROC_CPU
905 static int __init
proc_cpu_init(void)
907 struct proc_dir_entry
*res
;
909 res
= proc_mkdir("cpu", NULL
);
914 fs_initcall(proc_cpu_init
);
917 static const char *hwcap_str
[] = {
936 static int c_show(struct seq_file
*m
, void *v
)
940 seq_printf(m
, "Processor\t: %s rev %d (%s)\n",
941 cpu_name
, read_cpuid_id() & 15, elf_platform
);
943 #if defined(CONFIG_SMP)
944 for_each_online_cpu(i
) {
946 * glibc reads /proc/cpuinfo to determine the number of
947 * online processors, looking for lines beginning with
948 * "processor". Give glibc what it expects.
950 seq_printf(m
, "processor\t: %d\n", i
);
951 seq_printf(m
, "BogoMIPS\t: %lu.%02lu\n\n",
952 per_cpu(cpu_data
, i
).loops_per_jiffy
/ (500000UL/HZ
),
953 (per_cpu(cpu_data
, i
).loops_per_jiffy
/ (5000UL/HZ
)) % 100);
955 #else /* CONFIG_SMP */
956 seq_printf(m
, "BogoMIPS\t: %lu.%02lu\n",
957 loops_per_jiffy
/ (500000/HZ
),
958 (loops_per_jiffy
/ (5000/HZ
)) % 100);
961 /* dump out the processor features */
962 seq_puts(m
, "Features\t: ");
964 for (i
= 0; hwcap_str
[i
]; i
++)
965 if (elf_hwcap
& (1 << i
))
966 seq_printf(m
, "%s ", hwcap_str
[i
]);
968 seq_printf(m
, "\nCPU implementer\t: 0x%02x\n", read_cpuid_id() >> 24);
969 seq_printf(m
, "CPU architecture: %s\n", proc_arch
[cpu_architecture()]);
971 if ((read_cpuid_id() & 0x0008f000) == 0x00000000) {
973 seq_printf(m
, "CPU part\t: %07x\n", read_cpuid_id() >> 4);
975 if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
977 seq_printf(m
, "CPU variant\t: 0x%02x\n",
978 (read_cpuid_id() >> 16) & 127);
981 seq_printf(m
, "CPU variant\t: 0x%x\n",
982 (read_cpuid_id() >> 20) & 15);
984 seq_printf(m
, "CPU part\t: 0x%03x\n",
985 (read_cpuid_id() >> 4) & 0xfff);
987 seq_printf(m
, "CPU revision\t: %d\n", read_cpuid_id() & 15);
991 seq_printf(m
, "Hardware\t: %s\n", machine_name
);
992 seq_printf(m
, "Revision\t: %04x\n", system_rev
);
993 seq_printf(m
, "Serial\t\t: %08x%08x\n",
994 system_serial_high
, system_serial_low
);
999 static void *c_start(struct seq_file
*m
, loff_t
*pos
)
1001 return *pos
< 1 ? (void *)1 : NULL
;
1004 static void *c_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
1010 static void c_stop(struct seq_file
*m
, void *v
)
1014 const struct seq_operations cpuinfo_op
= {