2 * linux/arch/arm/kernel/setup.c
4 * Copyright (C) 1995-2001 Russell King
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 #include <linux/config.h>
11 #include <linux/module.h>
12 #include <linux/kernel.h>
13 #include <linux/stddef.h>
14 #include <linux/ioport.h>
15 #include <linux/delay.h>
16 #include <linux/utsname.h>
17 #include <linux/initrd.h>
18 #include <linux/console.h>
19 #include <linux/bootmem.h>
20 #include <linux/seq_file.h>
21 #include <linux/tty.h>
22 #include <linux/init.h>
23 #include <linux/root_dev.h>
24 #include <linux/cpu.h>
25 #include <linux/interrupt.h>
26 #include <linux/smp.h>
30 #include <asm/procinfo.h>
31 #include <asm/setup.h>
32 #include <asm/mach-types.h>
33 #include <asm/cacheflush.h>
34 #include <asm/tlbflush.h>
36 #include <asm/mach/arch.h>
37 #include <asm/mach/irq.h>
38 #include <asm/mach/time.h>
43 #define MEM_SIZE (16*1024*1024)
46 #if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
49 static int __init
fpe_setup(char *line
)
51 memcpy(fpe_type
, line
, 8);
55 __setup("fpe=", fpe_setup
);
58 extern void paging_init(struct meminfo
*, struct machine_desc
*desc
);
59 extern void reboot_setup(char *str
);
60 extern int root_mountflags
;
61 extern void _stext
, _text
, _etext
, __data_start
, _edata
, _end
;
63 unsigned int processor_id
;
64 unsigned int __machine_arch_type
;
65 EXPORT_SYMBOL(__machine_arch_type
);
67 unsigned int system_rev
;
68 EXPORT_SYMBOL(system_rev
);
70 unsigned int system_serial_low
;
71 EXPORT_SYMBOL(system_serial_low
);
73 unsigned int system_serial_high
;
74 EXPORT_SYMBOL(system_serial_high
);
76 unsigned int elf_hwcap
;
77 EXPORT_SYMBOL(elf_hwcap
);
81 struct processor processor
;
84 struct cpu_tlb_fns cpu_tlb
;
87 struct cpu_user_fns cpu_user
;
90 struct cpu_cache_fns cpu_cache
;
97 } ____cacheline_aligned
;
99 static struct stack stacks
[NR_CPUS
];
101 char elf_platform
[ELF_PLATFORM_SIZE
];
102 EXPORT_SYMBOL(elf_platform
);
104 unsigned long phys_initrd_start __initdata
= 0;
105 unsigned long phys_initrd_size __initdata
= 0;
107 static struct meminfo meminfo __initdata
= { 0, };
108 static const char *cpu_name
;
109 static const char *machine_name
;
110 static char command_line
[COMMAND_LINE_SIZE
];
112 static char default_command_line
[COMMAND_LINE_SIZE
] __initdata
= CONFIG_CMDLINE
;
113 static union { char c
[4]; unsigned long l
; } endian_test __initdata
= { { 'l', '?', '?', 'b' } };
114 #define ENDIANNESS ((char)endian_test.l)
116 DEFINE_PER_CPU(struct cpuinfo_arm
, cpu_data
);
119 * Standard memory resources
121 static struct resource mem_res
[] = {
126 .flags
= IORESOURCE_MEM
129 .name
= "Kernel text",
132 .flags
= IORESOURCE_MEM
135 .name
= "Kernel data",
138 .flags
= IORESOURCE_MEM
142 #define video_ram mem_res[0]
143 #define kernel_code mem_res[1]
144 #define kernel_data mem_res[2]
146 static struct resource io_res
[] = {
151 .flags
= IORESOURCE_IO
| IORESOURCE_BUSY
157 .flags
= IORESOURCE_IO
| IORESOURCE_BUSY
163 .flags
= IORESOURCE_IO
| IORESOURCE_BUSY
167 #define lp0 io_res[0]
168 #define lp1 io_res[1]
169 #define lp2 io_res[2]
171 static const char *cache_types
[16] = {
190 static const char *cache_clean
[16] = {
209 static const char *cache_lockdown
[16] = {
228 static const char *proc_arch
[] = {
248 #define CACHE_TYPE(x) (((x) >> 25) & 15)
249 #define CACHE_S(x) ((x) & (1 << 24))
250 #define CACHE_DSIZE(x) (((x) >> 12) & 4095) /* only if S=1 */
251 #define CACHE_ISIZE(x) ((x) & 4095)
253 #define CACHE_SIZE(y) (((y) >> 6) & 7)
254 #define CACHE_ASSOC(y) (((y) >> 3) & 7)
255 #define CACHE_M(y) ((y) & (1 << 2))
256 #define CACHE_LINE(y) ((y) & 3)
258 static inline void dump_cache(const char *prefix
, int cpu
, unsigned int cache
)
260 unsigned int mult
= 2 + (CACHE_M(cache
) ? 1 : 0);
262 printk("CPU%u: %s: %d bytes, associativity %d, %d byte lines, %d sets\n",
264 mult
<< (8 + CACHE_SIZE(cache
)),
265 (mult
<< CACHE_ASSOC(cache
)) >> 1,
266 8 << CACHE_LINE(cache
),
267 1 << (6 + CACHE_SIZE(cache
) - CACHE_ASSOC(cache
) -
271 static void __init
dump_cpu_info(int cpu
)
273 unsigned int info
= read_cpuid(CPUID_CACHETYPE
);
275 if (info
!= processor_id
) {
276 printk("CPU%u: D %s %s cache\n", cpu
, cache_is_vivt() ? "VIVT" : "VIPT",
277 cache_types
[CACHE_TYPE(info
)]);
279 dump_cache("I cache", cpu
, CACHE_ISIZE(info
));
280 dump_cache("D cache", cpu
, CACHE_DSIZE(info
));
282 dump_cache("cache", cpu
, CACHE_ISIZE(info
));
286 if (arch_is_coherent())
287 printk("Cache coherency enabled\n");
290 int cpu_architecture(void)
294 if ((processor_id
& 0x0008f000) == 0) {
295 cpu_arch
= CPU_ARCH_UNKNOWN
;
296 } else if ((processor_id
& 0x0008f000) == 0x00007000) {
297 cpu_arch
= (processor_id
& (1 << 23)) ? CPU_ARCH_ARMv4T
: CPU_ARCH_ARMv3
;
298 } else if ((processor_id
& 0x00080000) == 0x00000000) {
299 cpu_arch
= (processor_id
>> 16) & 7;
301 cpu_arch
+= CPU_ARCH_ARMv3
;
303 /* the revised CPUID */
304 cpu_arch
= ((processor_id
>> 12) & 0xf) - 0xb + CPU_ARCH_ARMv6
;
311 * These functions re-use the assembly code in head.S, which
312 * already provide the required functionality.
314 extern struct proc_info_list
*lookup_processor_type(unsigned int);
315 extern struct machine_desc
*lookup_machine_type(unsigned int);
317 static void __init
setup_processor(void)
319 struct proc_info_list
*list
;
322 * locate processor in the list of supported processor
323 * types. The linker builds this table for us from the
324 * entries in arch/arm/mm/proc-*.S
326 list
= lookup_processor_type(processor_id
);
328 printk("CPU configuration botched (ID %08x), unable "
329 "to continue.\n", processor_id
);
333 cpu_name
= list
->cpu_name
;
336 processor
= *list
->proc
;
339 cpu_tlb
= *list
->tlb
;
342 cpu_user
= *list
->user
;
345 cpu_cache
= *list
->cache
;
348 printk("CPU: %s [%08x] revision %d (ARMv%s)\n",
349 cpu_name
, processor_id
, (int)processor_id
& 15,
350 proc_arch
[cpu_architecture()]);
352 sprintf(system_utsname
.machine
, "%s%c", list
->arch_name
, ENDIANNESS
);
353 sprintf(elf_platform
, "%s%c", list
->elf_name
, ENDIANNESS
);
354 elf_hwcap
= list
->elf_hwcap
;
355 #ifndef CONFIG_ARM_THUMB
356 elf_hwcap
&= ~HWCAP_THUMB
;
359 elf_hwcap
&= ~HWCAP_VFP
;
366 * cpu_init - initialise one CPU.
368 * cpu_init dumps the cache information, initialises SMP specific
369 * information, and sets up the per-CPU stacks.
373 unsigned int cpu
= smp_processor_id();
374 struct stack
*stk
= &stacks
[cpu
];
376 if (cpu
>= NR_CPUS
) {
377 printk(KERN_CRIT
"CPU%u: bad primary CPU number\n", cpu
);
381 if (system_state
== SYSTEM_BOOTING
)
385 * setup stacks for re-entrant exception handlers
397 "I" (PSR_F_BIT
| PSR_I_BIT
| IRQ_MODE
),
398 "I" (offsetof(struct stack
, irq
[0])),
399 "I" (PSR_F_BIT
| PSR_I_BIT
| ABT_MODE
),
400 "I" (offsetof(struct stack
, abt
[0])),
401 "I" (PSR_F_BIT
| PSR_I_BIT
| UND_MODE
),
402 "I" (offsetof(struct stack
, und
[0])),
403 "I" (PSR_F_BIT
| PSR_I_BIT
| SVC_MODE
)
407 static struct machine_desc
* __init
setup_machine(unsigned int nr
)
409 struct machine_desc
*list
;
412 * locate machine in the list of supported machines.
414 list
= lookup_machine_type(nr
);
416 printk("Machine configuration botched (nr %d), unable "
417 "to continue.\n", nr
);
421 printk("Machine: %s\n", list
->name
);
426 static void __init
early_initrd(char **p
)
428 unsigned long start
, size
;
430 start
= memparse(*p
, p
);
432 size
= memparse((*p
) + 1, p
);
434 phys_initrd_start
= start
;
435 phys_initrd_size
= size
;
438 __early_param("initrd=", early_initrd
);
440 static void __init
arm_add_memory(unsigned long start
, unsigned long size
)
443 * Ensure that start/size are aligned to a page boundary.
444 * Size is appropriately rounded down, start is rounded up.
446 size
-= start
& ~PAGE_MASK
;
448 meminfo
.bank
[meminfo
.nr_banks
].start
= PAGE_ALIGN(start
);
449 meminfo
.bank
[meminfo
.nr_banks
].size
= size
& PAGE_MASK
;
450 meminfo
.bank
[meminfo
.nr_banks
].node
= PHYS_TO_NID(start
);
451 meminfo
.nr_banks
+= 1;
455 * Pick out the memory size. We look for mem=size@start,
456 * where start and size are "size[KkMm]"
458 static void __init
early_mem(char **p
)
460 static int usermem __initdata
= 0;
461 unsigned long size
, start
;
464 * If the user specifies memory size, we
465 * blow away any automatically generated
470 meminfo
.nr_banks
= 0;
474 size
= memparse(*p
, p
);
476 start
= memparse(*p
+ 1, p
);
478 arm_add_memory(start
, size
);
480 __early_param("mem=", early_mem
);
483 * Initial parsing of the command line.
485 static void __init
parse_cmdline(char **cmdline_p
, char *from
)
487 char c
= ' ', *to
= command_line
;
492 extern struct early_params __early_begin
, __early_end
;
493 struct early_params
*p
;
495 for (p
= &__early_begin
; p
< &__early_end
; p
++) {
496 int len
= strlen(p
->arg
);
498 if (memcmp(from
, p
->arg
, len
) == 0) {
499 if (to
!= command_line
)
504 while (*from
!= ' ' && *from
!= '\0')
513 if (COMMAND_LINE_SIZE
<= ++len
)
518 *cmdline_p
= command_line
;
522 setup_ramdisk(int doload
, int prompt
, int image_start
, unsigned int rd_sz
)
524 #ifdef CONFIG_BLK_DEV_RAM
525 extern int rd_size
, rd_image_start
, rd_prompt
, rd_doload
;
527 rd_image_start
= image_start
;
537 request_standard_resources(struct meminfo
*mi
, struct machine_desc
*mdesc
)
539 struct resource
*res
;
542 kernel_code
.start
= virt_to_phys(&_text
);
543 kernel_code
.end
= virt_to_phys(&_etext
- 1);
544 kernel_data
.start
= virt_to_phys(&__data_start
);
545 kernel_data
.end
= virt_to_phys(&_end
- 1);
547 for (i
= 0; i
< mi
->nr_banks
; i
++) {
548 unsigned long virt_start
, virt_end
;
550 if (mi
->bank
[i
].size
== 0)
553 virt_start
= __phys_to_virt(mi
->bank
[i
].start
);
554 virt_end
= virt_start
+ mi
->bank
[i
].size
- 1;
556 res
= alloc_bootmem_low(sizeof(*res
));
557 res
->name
= "System RAM";
558 res
->start
= __virt_to_phys(virt_start
);
559 res
->end
= __virt_to_phys(virt_end
);
560 res
->flags
= IORESOURCE_MEM
| IORESOURCE_BUSY
;
562 request_resource(&iomem_resource
, res
);
564 if (kernel_code
.start
>= res
->start
&&
565 kernel_code
.end
<= res
->end
)
566 request_resource(res
, &kernel_code
);
567 if (kernel_data
.start
>= res
->start
&&
568 kernel_data
.end
<= res
->end
)
569 request_resource(res
, &kernel_data
);
572 if (mdesc
->video_start
) {
573 video_ram
.start
= mdesc
->video_start
;
574 video_ram
.end
= mdesc
->video_end
;
575 request_resource(&iomem_resource
, &video_ram
);
579 * Some machines don't have the possibility of ever
580 * possessing lp0, lp1 or lp2
582 if (mdesc
->reserve_lp0
)
583 request_resource(&ioport_resource
, &lp0
);
584 if (mdesc
->reserve_lp1
)
585 request_resource(&ioport_resource
, &lp1
);
586 if (mdesc
->reserve_lp2
)
587 request_resource(&ioport_resource
, &lp2
);
593 * This is the new way of passing data to the kernel at boot time. Rather
594 * than passing a fixed inflexible structure to the kernel, we pass a list
595 * of variable-sized tags to the kernel. The first tag must be a ATAG_CORE
596 * tag for the list to be recognised (to distinguish the tagged list from
597 * a param_struct). The list is terminated with a zero-length tag (this tag
598 * is not parsed in any way).
600 static int __init
parse_tag_core(const struct tag
*tag
)
602 if (tag
->hdr
.size
> 2) {
603 if ((tag
->u
.core
.flags
& 1) == 0)
604 root_mountflags
&= ~MS_RDONLY
;
605 ROOT_DEV
= old_decode_dev(tag
->u
.core
.rootdev
);
610 __tagtable(ATAG_CORE
, parse_tag_core
);
612 static int __init
parse_tag_mem32(const struct tag
*tag
)
614 if (meminfo
.nr_banks
>= NR_BANKS
) {
616 "Ignoring memory bank 0x%08x size %dKB\n",
617 tag
->u
.mem
.start
, tag
->u
.mem
.size
/ 1024);
620 arm_add_memory(tag
->u
.mem
.start
, tag
->u
.mem
.size
);
624 __tagtable(ATAG_MEM
, parse_tag_mem32
);
626 #if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
627 struct screen_info screen_info
= {
628 .orig_video_lines
= 30,
629 .orig_video_cols
= 80,
630 .orig_video_mode
= 0,
631 .orig_video_ega_bx
= 0,
632 .orig_video_isVGA
= 1,
633 .orig_video_points
= 8
636 static int __init
parse_tag_videotext(const struct tag
*tag
)
638 screen_info
.orig_x
= tag
->u
.videotext
.x
;
639 screen_info
.orig_y
= tag
->u
.videotext
.y
;
640 screen_info
.orig_video_page
= tag
->u
.videotext
.video_page
;
641 screen_info
.orig_video_mode
= tag
->u
.videotext
.video_mode
;
642 screen_info
.orig_video_cols
= tag
->u
.videotext
.video_cols
;
643 screen_info
.orig_video_ega_bx
= tag
->u
.videotext
.video_ega_bx
;
644 screen_info
.orig_video_lines
= tag
->u
.videotext
.video_lines
;
645 screen_info
.orig_video_isVGA
= tag
->u
.videotext
.video_isvga
;
646 screen_info
.orig_video_points
= tag
->u
.videotext
.video_points
;
650 __tagtable(ATAG_VIDEOTEXT
, parse_tag_videotext
);
653 static int __init
parse_tag_ramdisk(const struct tag
*tag
)
655 setup_ramdisk((tag
->u
.ramdisk
.flags
& 1) == 0,
656 (tag
->u
.ramdisk
.flags
& 2) == 0,
657 tag
->u
.ramdisk
.start
, tag
->u
.ramdisk
.size
);
661 __tagtable(ATAG_RAMDISK
, parse_tag_ramdisk
);
663 static int __init
parse_tag_initrd(const struct tag
*tag
)
665 printk(KERN_WARNING
"ATAG_INITRD is deprecated; "
666 "please update your bootloader.\n");
667 phys_initrd_start
= __virt_to_phys(tag
->u
.initrd
.start
);
668 phys_initrd_size
= tag
->u
.initrd
.size
;
672 __tagtable(ATAG_INITRD
, parse_tag_initrd
);
674 static int __init
parse_tag_initrd2(const struct tag
*tag
)
676 phys_initrd_start
= tag
->u
.initrd
.start
;
677 phys_initrd_size
= tag
->u
.initrd
.size
;
681 __tagtable(ATAG_INITRD2
, parse_tag_initrd2
);
683 static int __init
parse_tag_serialnr(const struct tag
*tag
)
685 system_serial_low
= tag
->u
.serialnr
.low
;
686 system_serial_high
= tag
->u
.serialnr
.high
;
690 __tagtable(ATAG_SERIAL
, parse_tag_serialnr
);
692 static int __init
parse_tag_revision(const struct tag
*tag
)
694 system_rev
= tag
->u
.revision
.rev
;
698 __tagtable(ATAG_REVISION
, parse_tag_revision
);
700 static int __init
parse_tag_cmdline(const struct tag
*tag
)
702 strlcpy(default_command_line
, tag
->u
.cmdline
.cmdline
, COMMAND_LINE_SIZE
);
706 __tagtable(ATAG_CMDLINE
, parse_tag_cmdline
);
709 * Scan the tag table for this tag, and call its parse function.
710 * The tag table is built by the linker from all the __tagtable
713 static int __init
parse_tag(const struct tag
*tag
)
715 extern struct tagtable __tagtable_begin
, __tagtable_end
;
718 for (t
= &__tagtable_begin
; t
< &__tagtable_end
; t
++)
719 if (tag
->hdr
.tag
== t
->tag
) {
724 return t
< &__tagtable_end
;
728 * Parse all tags in the list, checking both the global and architecture
729 * specific tag tables.
731 static void __init
parse_tags(const struct tag
*t
)
733 for (; t
->hdr
.size
; t
= tag_next(t
))
736 "Ignoring unrecognised tag 0x%08x\n",
741 * This holds our defaults.
743 static struct init_tags
{
744 struct tag_header hdr1
;
745 struct tag_core core
;
746 struct tag_header hdr2
;
747 struct tag_mem32 mem
;
748 struct tag_header hdr3
;
749 } init_tags __initdata
= {
750 { tag_size(tag_core
), ATAG_CORE
},
751 { 1, PAGE_SIZE
, 0xff },
752 { tag_size(tag_mem32
), ATAG_MEM
},
753 { MEM_SIZE
, PHYS_OFFSET
},
757 static void (*init_machine
)(void) __initdata
;
759 static int __init
customize_machine(void)
761 /* customizes platform devices, or adds new ones */
766 arch_initcall(customize_machine
);
768 void __init
setup_arch(char **cmdline_p
)
770 struct tag
*tags
= (struct tag
*)&init_tags
;
771 struct machine_desc
*mdesc
;
772 char *from
= default_command_line
;
775 mdesc
= setup_machine(machine_arch_type
);
776 machine_name
= mdesc
->name
;
778 if (mdesc
->soft_reboot
)
781 if (mdesc
->boot_params
)
782 tags
= phys_to_virt(mdesc
->boot_params
);
785 * If we have the old style parameters, convert them to
788 if (tags
->hdr
.tag
!= ATAG_CORE
)
789 convert_to_tag_list(tags
);
790 if (tags
->hdr
.tag
!= ATAG_CORE
)
791 tags
= (struct tag
*)&init_tags
;
794 mdesc
->fixup(mdesc
, tags
, &from
, &meminfo
);
796 if (tags
->hdr
.tag
== ATAG_CORE
) {
797 if (meminfo
.nr_banks
!= 0)
798 squash_mem_tags(tags
);
802 init_mm
.start_code
= (unsigned long) &_text
;
803 init_mm
.end_code
= (unsigned long) &_etext
;
804 init_mm
.end_data
= (unsigned long) &_edata
;
805 init_mm
.brk
= (unsigned long) &_end
;
807 memcpy(saved_command_line
, from
, COMMAND_LINE_SIZE
);
808 saved_command_line
[COMMAND_LINE_SIZE
-1] = '\0';
809 parse_cmdline(cmdline_p
, from
);
810 paging_init(&meminfo
, mdesc
);
811 request_standard_resources(&meminfo
, mdesc
);
820 * Set up various architecture-specific pointers
822 init_arch_irq
= mdesc
->init_irq
;
823 system_timer
= mdesc
->timer
;
824 init_machine
= mdesc
->init_machine
;
827 #if defined(CONFIG_VGA_CONSOLE)
828 conswitchp
= &vga_con
;
829 #elif defined(CONFIG_DUMMY_CONSOLE)
830 conswitchp
= &dummy_con
;
836 static int __init
topology_init(void)
840 for_each_possible_cpu(cpu
)
841 register_cpu(&per_cpu(cpu_data
, cpu
).cpu
, cpu
, NULL
);
846 subsys_initcall(topology_init
);
848 static const char *hwcap_str
[] = {
862 c_show_cache(struct seq_file
*m
, const char *type
, unsigned int cache
)
864 unsigned int mult
= 2 + (CACHE_M(cache
) ? 1 : 0);
866 seq_printf(m
, "%s size\t\t: %d\n"
868 "%s line length\t: %d\n"
870 type
, mult
<< (8 + CACHE_SIZE(cache
)),
871 type
, (mult
<< CACHE_ASSOC(cache
)) >> 1,
872 type
, 8 << CACHE_LINE(cache
),
873 type
, 1 << (6 + CACHE_SIZE(cache
) - CACHE_ASSOC(cache
) -
877 static int c_show(struct seq_file
*m
, void *v
)
881 seq_printf(m
, "Processor\t: %s rev %d (%s)\n",
882 cpu_name
, (int)processor_id
& 15, elf_platform
);
884 #if defined(CONFIG_SMP)
885 for_each_online_cpu(i
) {
887 * glibc reads /proc/cpuinfo to determine the number of
888 * online processors, looking for lines beginning with
889 * "processor". Give glibc what it expects.
891 seq_printf(m
, "processor\t: %d\n", i
);
892 seq_printf(m
, "BogoMIPS\t: %lu.%02lu\n\n",
893 per_cpu(cpu_data
, i
).loops_per_jiffy
/ (500000UL/HZ
),
894 (per_cpu(cpu_data
, i
).loops_per_jiffy
/ (5000UL/HZ
)) % 100);
896 #else /* CONFIG_SMP */
897 seq_printf(m
, "BogoMIPS\t: %lu.%02lu\n",
898 loops_per_jiffy
/ (500000/HZ
),
899 (loops_per_jiffy
/ (5000/HZ
)) % 100);
902 /* dump out the processor features */
903 seq_puts(m
, "Features\t: ");
905 for (i
= 0; hwcap_str
[i
]; i
++)
906 if (elf_hwcap
& (1 << i
))
907 seq_printf(m
, "%s ", hwcap_str
[i
]);
909 seq_printf(m
, "\nCPU implementer\t: 0x%02x\n", processor_id
>> 24);
910 seq_printf(m
, "CPU architecture: %s\n", proc_arch
[cpu_architecture()]);
912 if ((processor_id
& 0x0008f000) == 0x00000000) {
914 seq_printf(m
, "CPU part\t\t: %07x\n", processor_id
>> 4);
916 if ((processor_id
& 0x0008f000) == 0x00007000) {
918 seq_printf(m
, "CPU variant\t: 0x%02x\n",
919 (processor_id
>> 16) & 127);
922 seq_printf(m
, "CPU variant\t: 0x%x\n",
923 (processor_id
>> 20) & 15);
925 seq_printf(m
, "CPU part\t: 0x%03x\n",
926 (processor_id
>> 4) & 0xfff);
928 seq_printf(m
, "CPU revision\t: %d\n", processor_id
& 15);
931 unsigned int cache_info
= read_cpuid(CPUID_CACHETYPE
);
932 if (cache_info
!= processor_id
) {
933 seq_printf(m
, "Cache type\t: %s\n"
934 "Cache clean\t: %s\n"
935 "Cache lockdown\t: %s\n"
936 "Cache format\t: %s\n",
937 cache_types
[CACHE_TYPE(cache_info
)],
938 cache_clean
[CACHE_TYPE(cache_info
)],
939 cache_lockdown
[CACHE_TYPE(cache_info
)],
940 CACHE_S(cache_info
) ? "Harvard" : "Unified");
942 if (CACHE_S(cache_info
)) {
943 c_show_cache(m
, "I", CACHE_ISIZE(cache_info
));
944 c_show_cache(m
, "D", CACHE_DSIZE(cache_info
));
946 c_show_cache(m
, "Cache", CACHE_ISIZE(cache_info
));
953 seq_printf(m
, "Hardware\t: %s\n", machine_name
);
954 seq_printf(m
, "Revision\t: %04x\n", system_rev
);
955 seq_printf(m
, "Serial\t\t: %08x%08x\n",
956 system_serial_high
, system_serial_low
);
961 static void *c_start(struct seq_file
*m
, loff_t
*pos
)
963 return *pos
< 1 ? (void *)1 : NULL
;
966 static void *c_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
972 static void c_stop(struct seq_file
*m
, void *v
)
976 struct seq_operations cpuinfo_op
= {