2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 1995 Linus Torvalds
7 * Copyright (C) 1995 Waldorf Electronics
8 * Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 01, 02, 03 Ralf Baechle
9 * Copyright (C) 1996 Stoned Elipot
10 * Copyright (C) 1999 Silicon Graphics, Inc.
11 * Copyright (C) 2000, 2001, 2002, 2007 Maciej W. Rozycki
13 #include <linux/init.h>
14 #include <linux/ioport.h>
15 #include <linux/export.h>
16 #include <linux/screen_info.h>
17 #include <linux/memblock.h>
18 #include <linux/bootmem.h>
19 #include <linux/initrd.h>
20 #include <linux/root_dev.h>
21 #include <linux/highmem.h>
22 #include <linux/console.h>
23 #include <linux/pfn.h>
24 #include <linux/debugfs.h>
25 #include <linux/kexec.h>
26 #include <linux/sizes.h>
27 #include <linux/device.h>
28 #include <linux/dma-contiguous.h>
29 #include <linux/decompress/generic.h>
30 #include <linux/of_fdt.h>
32 #include <asm/addrspace.h>
33 #include <asm/bootinfo.h>
35 #include <asm/cache.h>
38 #include <asm/debug.h>
39 #include <asm/dma-coherence.h>
40 #include <asm/sections.h>
41 #include <asm/setup.h>
42 #include <asm/smp-ops.h>
45 #ifdef CONFIG_MIPS_ELF_APPENDED_DTB
46 const char __section(.appended_dtb
) __appended_dtb
[0x100000];
47 #endif /* CONFIG_MIPS_ELF_APPENDED_DTB */
49 struct cpuinfo_mips cpu_data
[NR_CPUS
] __read_mostly
;
51 EXPORT_SYMBOL(cpu_data
);
54 struct screen_info screen_info
;
60 * These are initialized so they are in the .data section
62 unsigned long mips_machtype __read_mostly
= MACH_UNKNOWN
;
64 EXPORT_SYMBOL(mips_machtype
);
66 struct boot_mem_map boot_mem_map
;
68 static char __initdata command_line
[COMMAND_LINE_SIZE
];
69 char __initdata arcs_cmdline
[COMMAND_LINE_SIZE
];
71 #ifdef CONFIG_CMDLINE_BOOL
72 static char __initdata builtin_cmdline
[COMMAND_LINE_SIZE
] = CONFIG_CMDLINE
;
76 * mips_io_port_base is the begin of the address space to which x86 style
77 * I/O ports are mapped.
79 const unsigned long mips_io_port_base
= -1;
80 EXPORT_SYMBOL(mips_io_port_base
);
82 static struct resource code_resource
= { .name
= "Kernel code", };
83 static struct resource data_resource
= { .name
= "Kernel data", };
84 static struct resource bss_resource
= { .name
= "Kernel bss", };
86 static void *detect_magic __initdata
= detect_memory_region
;
88 #ifdef CONFIG_MIPS_AUTO_PFN_OFFSET
89 unsigned long ARCH_PFN_OFFSET
;
90 EXPORT_SYMBOL(ARCH_PFN_OFFSET
);
93 void __init
add_memory_region(phys_addr_t start
, phys_addr_t size
, long type
)
95 int x
= boot_mem_map
.nr_map
;
99 * If the region reaches the top of the physical address space, adjust
100 * the size slightly so that (start + size) doesn't overflow
102 if (start
+ size
- 1 == PHYS_ADDR_MAX
)
106 if (start
+ size
< start
) {
107 pr_warn("Trying to add an invalid memory region, skipped\n");
112 * Try to merge with existing entry, if any.
114 for (i
= 0; i
< boot_mem_map
.nr_map
; i
++) {
115 struct boot_mem_map_entry
*entry
= boot_mem_map
.map
+ i
;
118 if (entry
->type
!= type
)
121 if (start
+ size
< entry
->addr
)
122 continue; /* no overlap */
124 if (entry
->addr
+ entry
->size
< start
)
125 continue; /* no overlap */
127 top
= max(entry
->addr
+ entry
->size
, start
+ size
);
128 entry
->addr
= min(entry
->addr
, start
);
129 entry
->size
= top
- entry
->addr
;
134 if (boot_mem_map
.nr_map
== BOOT_MEM_MAP_MAX
) {
135 pr_err("Ooops! Too many entries in the memory map!\n");
139 boot_mem_map
.map
[x
].addr
= start
;
140 boot_mem_map
.map
[x
].size
= size
;
141 boot_mem_map
.map
[x
].type
= type
;
142 boot_mem_map
.nr_map
++;
145 void __init
detect_memory_region(phys_addr_t start
, phys_addr_t sz_min
, phys_addr_t sz_max
)
147 void *dm
= &detect_magic
;
150 for (size
= sz_min
; size
< sz_max
; size
<<= 1) {
151 if (!memcmp(dm
, dm
+ size
, sizeof(detect_magic
)))
155 pr_debug("Memory: %lluMB of RAM detected at 0x%llx (min: %lluMB, max: %lluMB)\n",
156 ((unsigned long long) size
) / SZ_1M
,
157 (unsigned long long) start
,
158 ((unsigned long long) sz_min
) / SZ_1M
,
159 ((unsigned long long) sz_max
) / SZ_1M
);
161 add_memory_region(start
, size
, BOOT_MEM_RAM
);
164 static bool __init __maybe_unused
memory_region_available(phys_addr_t start
,
168 bool in_ram
= false, free
= true;
170 for (i
= 0; i
< boot_mem_map
.nr_map
; i
++) {
171 phys_addr_t start_
, end_
;
173 start_
= boot_mem_map
.map
[i
].addr
;
174 end_
= boot_mem_map
.map
[i
].addr
+ boot_mem_map
.map
[i
].size
;
176 switch (boot_mem_map
.map
[i
].type
) {
178 if (start
>= start_
&& start
+ size
<= end_
)
181 case BOOT_MEM_RESERVED
:
182 if ((start
>= start_
&& start
< end_
) ||
183 (start
< start_
&& start
+ size
>= start_
))
191 return in_ram
&& free
;
194 static void __init
print_memory_map(void)
197 const int field
= 2 * sizeof(unsigned long);
199 for (i
= 0; i
< boot_mem_map
.nr_map
; i
++) {
200 printk(KERN_INFO
" memory: %0*Lx @ %0*Lx ",
201 field
, (unsigned long long) boot_mem_map
.map
[i
].size
,
202 field
, (unsigned long long) boot_mem_map
.map
[i
].addr
);
204 switch (boot_mem_map
.map
[i
].type
) {
206 printk(KERN_CONT
"(usable)\n");
208 case BOOT_MEM_INIT_RAM
:
209 printk(KERN_CONT
"(usable after init)\n");
211 case BOOT_MEM_ROM_DATA
:
212 printk(KERN_CONT
"(ROM data)\n");
214 case BOOT_MEM_RESERVED
:
215 printk(KERN_CONT
"(reserved)\n");
218 printk(KERN_CONT
"type %lu\n", boot_mem_map
.map
[i
].type
);
227 #ifdef CONFIG_BLK_DEV_INITRD
229 static int __init
rd_start_early(char *p
)
231 unsigned long start
= memparse(p
, &p
);
234 /* Guess if the sign extension was forgotten by bootloader */
238 initrd_start
= start
;
242 early_param("rd_start", rd_start_early
);
244 static int __init
rd_size_early(char *p
)
246 initrd_end
+= memparse(p
, &p
);
249 early_param("rd_size", rd_size_early
);
251 /* it returns the next free pfn after initrd */
252 static unsigned long __init
init_initrd(void)
257 * Board specific code or command line parser should have
258 * already set up initrd_start and initrd_end. In these cases
259 * perfom sanity checks and use them if all looks good.
261 if (!initrd_start
|| initrd_end
<= initrd_start
)
264 if (initrd_start
& ~PAGE_MASK
) {
265 pr_err("initrd start must be page aligned\n");
268 if (initrd_start
< PAGE_OFFSET
) {
269 pr_err("initrd start < PAGE_OFFSET\n");
274 * Sanitize initrd addresses. For example firmware
275 * can't guess if they need to pass them through
276 * 64-bits values if the kernel has been built in pure
277 * 32-bit. We need also to switch from KSEG0 to XKPHYS
278 * addresses now, so the code can now safely use __pa().
280 end
= __pa(initrd_end
);
281 initrd_end
= (unsigned long)__va(end
);
282 initrd_start
= (unsigned long)__va(__pa(initrd_start
));
284 ROOT_DEV
= Root_RAM0
;
292 /* In some conditions (e.g. big endian bootloader with a little endian
293 kernel), the initrd might appear byte swapped. Try to detect this and
294 byte swap it if needed. */
295 static void __init
maybe_bswap_initrd(void)
297 #if defined(CONFIG_CPU_CAVIUM_OCTEON)
300 /* Check for CPIO signature */
301 if (!memcmp((void *)initrd_start
, "070701", 6))
304 /* Check for compressed initrd */
305 if (decompress_method((unsigned char *)initrd_start
, 8, NULL
))
308 /* Try again with a byte swapped header */
309 buf
= swab64p((u64
*)initrd_start
);
310 if (!memcmp(&buf
, "070701", 6) ||
311 decompress_method((unsigned char *)(&buf
), 8, NULL
)) {
314 pr_info("Byteswapped initrd detected\n");
315 for (i
= initrd_start
; i
< ALIGN(initrd_end
, 8); i
+= 8)
321 static void __init
finalize_initrd(void)
323 unsigned long size
= initrd_end
- initrd_start
;
326 printk(KERN_INFO
"Initrd not found or empty");
329 if (__pa(initrd_end
) > PFN_PHYS(max_low_pfn
)) {
330 printk(KERN_ERR
"Initrd extends beyond end of memory");
334 maybe_bswap_initrd();
336 reserve_bootmem(__pa(initrd_start
), size
, BOOTMEM_DEFAULT
);
337 initrd_below_start_ok
= 1;
339 pr_info("Initial ramdisk at: 0x%lx (%lu bytes)\n",
343 printk(KERN_CONT
" - disabling initrd\n");
348 #else /* !CONFIG_BLK_DEV_INITRD */
350 static unsigned long __init
init_initrd(void)
355 #define finalize_initrd() do {} while (0)
360 * Initialize the bootmem allocator. It also setup initrd related data
363 #if defined(CONFIG_SGI_IP27) || (defined(CONFIG_CPU_LOONGSON3) && defined(CONFIG_NUMA))
365 static void __init
bootmem_init(void)
371 #else /* !CONFIG_SGI_IP27 */
373 static unsigned long __init
bootmap_bytes(unsigned long pages
)
375 unsigned long bytes
= DIV_ROUND_UP(pages
, 8);
377 return ALIGN(bytes
, sizeof(long));
380 static void __init
bootmem_init(void)
382 unsigned long reserved_end
;
383 unsigned long mapstart
= ~0UL;
384 unsigned long bootmap_size
;
385 phys_addr_t ramstart
= PHYS_ADDR_MAX
;
386 bool bootmap_valid
= false;
390 * Sanity check any INITRD first. We don't take it into account
391 * for bootmem setup initially, rely on the end-of-kernel-code
392 * as our memory range starting point. Once bootmem is inited we
393 * will reserve the area used for the initrd.
396 reserved_end
= (unsigned long) PFN_UP(__pa_symbol(&_end
));
399 * max_low_pfn is not a number of pages. The number of pages
400 * of the system is given by 'max_low_pfn - min_low_pfn'.
406 * Find the highest page frame number we have available
407 * and the lowest used RAM address
409 for (i
= 0; i
< boot_mem_map
.nr_map
; i
++) {
410 unsigned long start
, end
;
412 if (boot_mem_map
.map
[i
].type
!= BOOT_MEM_RAM
)
415 start
= PFN_UP(boot_mem_map
.map
[i
].addr
);
416 end
= PFN_DOWN(boot_mem_map
.map
[i
].addr
417 + boot_mem_map
.map
[i
].size
);
419 ramstart
= min(ramstart
, boot_mem_map
.map
[i
].addr
);
421 #ifndef CONFIG_HIGHMEM
423 * Skip highmem here so we get an accurate max_low_pfn if low
424 * memory stops short of high memory.
425 * If the region overlaps HIGHMEM_START, end is clipped so
426 * max_pfn excludes the highmem portion.
428 if (start
>= PFN_DOWN(HIGHMEM_START
))
430 if (end
> PFN_DOWN(HIGHMEM_START
))
431 end
= PFN_DOWN(HIGHMEM_START
);
434 if (end
> max_low_pfn
)
436 if (start
< min_low_pfn
)
438 if (end
<= reserved_end
)
440 #ifdef CONFIG_BLK_DEV_INITRD
441 /* Skip zones before initrd and initrd itself */
442 if (initrd_end
&& end
<= (unsigned long)PFN_UP(__pa(initrd_end
)))
445 if (start
>= mapstart
)
447 mapstart
= max(reserved_end
, start
);
450 if (min_low_pfn
>= max_low_pfn
)
451 panic("Incorrect memory mapping !!!");
453 #ifdef CONFIG_MIPS_AUTO_PFN_OFFSET
454 ARCH_PFN_OFFSET
= PFN_UP(ramstart
);
457 * Reserve any memory between the start of RAM and PHYS_OFFSET
459 if (ramstart
> PHYS_OFFSET
)
460 add_memory_region(PHYS_OFFSET
, ramstart
- PHYS_OFFSET
,
463 if (min_low_pfn
> ARCH_PFN_OFFSET
) {
464 pr_info("Wasting %lu bytes for tracking %lu unused pages\n",
465 (min_low_pfn
- ARCH_PFN_OFFSET
) * sizeof(struct page
),
466 min_low_pfn
- ARCH_PFN_OFFSET
);
467 } else if (ARCH_PFN_OFFSET
- min_low_pfn
> 0UL) {
468 pr_info("%lu free pages won't be used\n",
469 ARCH_PFN_OFFSET
- min_low_pfn
);
471 min_low_pfn
= ARCH_PFN_OFFSET
;
475 * Determine low and high memory ranges
477 max_pfn
= max_low_pfn
;
478 if (max_low_pfn
> PFN_DOWN(HIGHMEM_START
)) {
479 #ifdef CONFIG_HIGHMEM
480 highstart_pfn
= PFN_DOWN(HIGHMEM_START
);
481 highend_pfn
= max_low_pfn
;
483 max_low_pfn
= PFN_DOWN(HIGHMEM_START
);
486 #ifdef CONFIG_BLK_DEV_INITRD
488 * mapstart should be after initrd_end
491 mapstart
= max(mapstart
, (unsigned long)PFN_UP(__pa(initrd_end
)));
495 * check that mapstart doesn't overlap with any of
496 * memory regions that have been reserved through eg. DTB
498 bootmap_size
= bootmap_bytes(max_low_pfn
- min_low_pfn
);
500 bootmap_valid
= memory_region_available(PFN_PHYS(mapstart
),
502 for (i
= 0; i
< boot_mem_map
.nr_map
&& !bootmap_valid
; i
++) {
503 unsigned long mapstart_addr
;
505 switch (boot_mem_map
.map
[i
].type
) {
506 case BOOT_MEM_RESERVED
:
507 mapstart_addr
= PFN_ALIGN(boot_mem_map
.map
[i
].addr
+
508 boot_mem_map
.map
[i
].size
);
509 if (PHYS_PFN(mapstart_addr
) < mapstart
)
512 bootmap_valid
= memory_region_available(mapstart_addr
,
515 mapstart
= PHYS_PFN(mapstart_addr
);
523 panic("No memory area to place a bootmap bitmap");
526 * Initialize the boot-time allocator with low memory only.
528 if (bootmap_size
!= init_bootmem_node(NODE_DATA(0), mapstart
,
529 min_low_pfn
, max_low_pfn
))
530 panic("Unexpected memory size required for bootmap");
532 for (i
= 0; i
< boot_mem_map
.nr_map
; i
++) {
533 unsigned long start
, end
;
535 start
= PFN_UP(boot_mem_map
.map
[i
].addr
);
536 end
= PFN_DOWN(boot_mem_map
.map
[i
].addr
537 + boot_mem_map
.map
[i
].size
);
539 if (start
<= min_low_pfn
)
544 #ifndef CONFIG_HIGHMEM
545 if (end
> max_low_pfn
)
549 * ... finally, is the area going away?
555 memblock_add_node(PFN_PHYS(start
), PFN_PHYS(end
- start
), 0);
559 * Register fully available low RAM pages with the bootmem allocator.
561 for (i
= 0; i
< boot_mem_map
.nr_map
; i
++) {
562 unsigned long start
, end
, size
;
564 start
= PFN_UP(boot_mem_map
.map
[i
].addr
);
565 end
= PFN_DOWN(boot_mem_map
.map
[i
].addr
566 + boot_mem_map
.map
[i
].size
);
569 * Reserve usable memory.
571 switch (boot_mem_map
.map
[i
].type
) {
574 case BOOT_MEM_INIT_RAM
:
575 memory_present(0, start
, end
);
578 /* Not usable memory */
579 if (start
> min_low_pfn
&& end
< max_low_pfn
)
580 reserve_bootmem(boot_mem_map
.map
[i
].addr
,
581 boot_mem_map
.map
[i
].size
,
587 * We are rounding up the start address of usable memory
588 * and at the end of the usable range downwards.
590 if (start
>= max_low_pfn
)
592 if (start
< reserved_end
)
593 start
= reserved_end
;
594 if (end
> max_low_pfn
)
598 * ... finally, is the area going away?
604 /* Register lowmem ranges */
605 free_bootmem(PFN_PHYS(start
), size
<< PAGE_SHIFT
);
606 memory_present(0, start
, end
);
610 * Reserve the bootmap memory.
612 reserve_bootmem(PFN_PHYS(mapstart
), bootmap_size
, BOOTMEM_DEFAULT
);
614 #ifdef CONFIG_RELOCATABLE
616 * The kernel reserves all memory below its _end symbol as bootmem,
617 * but the kernel may now be at a much higher address. The memory
618 * between the original and new locations may be returned to the system.
620 if (__pa_symbol(_text
) > __pa_symbol(VMLINUX_LOAD_ADDRESS
)) {
621 unsigned long offset
;
622 extern void show_kernel_relocation(const char *level
);
624 offset
= __pa_symbol(_text
) - __pa_symbol(VMLINUX_LOAD_ADDRESS
);
625 free_bootmem(__pa_symbol(VMLINUX_LOAD_ADDRESS
), offset
);
627 #if defined(CONFIG_DEBUG_KERNEL) && defined(CONFIG_DEBUG_INFO)
629 * This information is necessary when debugging the kernel
630 * But is a security vulnerability otherwise!
632 show_kernel_relocation(KERN_INFO
);
638 * Reserve initrd memory if needed.
643 #endif /* CONFIG_SGI_IP27 */
646 * arch_mem_init - initialize memory management subsystem
648 * o plat_mem_setup() detects the memory configuration and will record detected
649 * memory areas using add_memory_region.
651 * At this stage the memory configuration of the system is known to the
652 * kernel but generic memory management system is still entirely uninitialized.
657 * o dma_contiguous_reserve()
659 * At this stage the bootmem allocator is ready to use.
661 * NOTE: historically plat_mem_setup did the entire platform initialization.
662 * This was rather impractical because it meant plat_mem_setup had to
663 * get away without any kind of memory allocator. To keep old code from
664 * breaking plat_setup was just renamed to plat_mem_setup and a second platform
665 * initialization hook for anything else was introduced.
668 static int usermem __initdata
;
670 static int __init
early_parse_mem(char *p
)
672 phys_addr_t start
, size
;
675 * If a user specifies memory size, we
676 * blow away any automatically generated
680 boot_mem_map
.nr_map
= 0;
684 size
= memparse(p
, &p
);
686 start
= memparse(p
+ 1, &p
);
688 add_memory_region(start
, size
, BOOT_MEM_RAM
);
692 early_param("mem", early_parse_mem
);
694 static int __init
early_parse_memmap(char *p
)
697 u64 start_at
, mem_size
;
702 if (!strncmp(p
, "exactmap", 8)) {
703 pr_err("\"memmap=exactmap\" invalid on MIPS\n");
708 mem_size
= memparse(p
, &p
);
713 start_at
= memparse(p
+1, &p
);
714 add_memory_region(start_at
, mem_size
, BOOT_MEM_RAM
);
715 } else if (*p
== '#') {
716 pr_err("\"memmap=nn#ss\" (force ACPI data) invalid on MIPS\n");
718 } else if (*p
== '$') {
719 start_at
= memparse(p
+1, &p
);
720 add_memory_region(start_at
, mem_size
, BOOT_MEM_RESERVED
);
722 pr_err("\"memmap\" invalid format!\n");
732 early_param("memmap", early_parse_memmap
);
734 #ifdef CONFIG_PROC_VMCORE
735 unsigned long setup_elfcorehdr
, setup_elfcorehdr_size
;
736 static int __init
early_parse_elfcorehdr(char *p
)
740 setup_elfcorehdr
= memparse(p
, &p
);
742 for (i
= 0; i
< boot_mem_map
.nr_map
; i
++) {
743 unsigned long start
= boot_mem_map
.map
[i
].addr
;
744 unsigned long end
= (boot_mem_map
.map
[i
].addr
+
745 boot_mem_map
.map
[i
].size
);
746 if (setup_elfcorehdr
>= start
&& setup_elfcorehdr
< end
) {
748 * Reserve from the elf core header to the end of
749 * the memory segment, that should all be kdump
752 setup_elfcorehdr_size
= end
- setup_elfcorehdr
;
757 * If we don't find it in the memory map, then we shouldn't
758 * have to worry about it, as the new kernel won't use it.
762 early_param("elfcorehdr", early_parse_elfcorehdr
);
765 static void __init
arch_mem_addpart(phys_addr_t mem
, phys_addr_t end
, int type
)
774 /* Make sure it is in the boot_mem_map */
775 for (i
= 0; i
< boot_mem_map
.nr_map
; i
++) {
776 if (mem
>= boot_mem_map
.map
[i
].addr
&&
777 mem
< (boot_mem_map
.map
[i
].addr
+
778 boot_mem_map
.map
[i
].size
))
781 add_memory_region(mem
, size
, type
);
785 static inline unsigned long long get_total_mem(void)
787 unsigned long long total
;
789 total
= max_pfn
- min_low_pfn
;
790 return total
<< PAGE_SHIFT
;
793 static void __init
mips_parse_crashkernel(void)
795 unsigned long long total_mem
;
796 unsigned long long crash_size
, crash_base
;
799 total_mem
= get_total_mem();
800 ret
= parse_crashkernel(boot_command_line
, total_mem
,
801 &crash_size
, &crash_base
);
802 if (ret
!= 0 || crash_size
<= 0)
805 if (!memory_region_available(crash_base
, crash_size
)) {
806 pr_warn("Invalid memory region reserved for crash kernel\n");
810 crashk_res
.start
= crash_base
;
811 crashk_res
.end
= crash_base
+ crash_size
- 1;
814 static void __init
request_crashkernel(struct resource
*res
)
818 if (crashk_res
.start
== crashk_res
.end
)
821 ret
= request_resource(res
, &crashk_res
);
823 pr_info("Reserving %ldMB of memory at %ldMB for crashkernel\n",
824 (unsigned long)((crashk_res
.end
-
825 crashk_res
.start
+ 1) >> 20),
826 (unsigned long)(crashk_res
.start
>> 20));
828 #else /* !defined(CONFIG_KEXEC) */
829 static void __init
mips_parse_crashkernel(void)
833 static void __init
request_crashkernel(struct resource
*res
)
836 #endif /* !defined(CONFIG_KEXEC) */
838 #define USE_PROM_CMDLINE IS_ENABLED(CONFIG_MIPS_CMDLINE_FROM_BOOTLOADER)
839 #define USE_DTB_CMDLINE IS_ENABLED(CONFIG_MIPS_CMDLINE_FROM_DTB)
840 #define EXTEND_WITH_PROM IS_ENABLED(CONFIG_MIPS_CMDLINE_DTB_EXTEND)
841 #define BUILTIN_EXTEND_WITH_PROM \
842 IS_ENABLED(CONFIG_MIPS_CMDLINE_BUILTIN_EXTEND)
844 static void __init
arch_mem_init(char **cmdline_p
)
846 struct memblock_region
*reg
;
847 extern void plat_mem_setup(void);
849 #if defined(CONFIG_CMDLINE_BOOL) && defined(CONFIG_CMDLINE_OVERRIDE)
850 strlcpy(boot_command_line
, builtin_cmdline
, COMMAND_LINE_SIZE
);
852 if ((USE_PROM_CMDLINE
&& arcs_cmdline
[0]) ||
853 (USE_DTB_CMDLINE
&& !boot_command_line
[0]))
854 strlcpy(boot_command_line
, arcs_cmdline
, COMMAND_LINE_SIZE
);
856 if (EXTEND_WITH_PROM
&& arcs_cmdline
[0]) {
857 if (boot_command_line
[0])
858 strlcat(boot_command_line
, " ", COMMAND_LINE_SIZE
);
859 strlcat(boot_command_line
, arcs_cmdline
, COMMAND_LINE_SIZE
);
862 #if defined(CONFIG_CMDLINE_BOOL)
863 if (builtin_cmdline
[0]) {
864 if (boot_command_line
[0])
865 strlcat(boot_command_line
, " ", COMMAND_LINE_SIZE
);
866 strlcat(boot_command_line
, builtin_cmdline
, COMMAND_LINE_SIZE
);
869 if (BUILTIN_EXTEND_WITH_PROM
&& arcs_cmdline
[0]) {
870 if (boot_command_line
[0])
871 strlcat(boot_command_line
, " ", COMMAND_LINE_SIZE
);
872 strlcat(boot_command_line
, arcs_cmdline
, COMMAND_LINE_SIZE
);
877 /* call board setup routine */
881 * Make sure all kernel memory is in the maps. The "UP" and
882 * "DOWN" are opposite for initdata since if it crosses over
883 * into another memory section you don't want that to be
884 * freed when the initdata is freed.
886 arch_mem_addpart(PFN_DOWN(__pa_symbol(&_text
)) << PAGE_SHIFT
,
887 PFN_UP(__pa_symbol(&_edata
)) << PAGE_SHIFT
,
889 arch_mem_addpart(PFN_UP(__pa_symbol(&__init_begin
)) << PAGE_SHIFT
,
890 PFN_DOWN(__pa_symbol(&__init_end
)) << PAGE_SHIFT
,
893 pr_info("Determined physical RAM map:\n");
896 strlcpy(command_line
, boot_command_line
, COMMAND_LINE_SIZE
);
898 *cmdline_p
= command_line
;
903 pr_info("User-defined physical RAM map:\n");
907 early_init_fdt_reserve_self();
908 early_init_fdt_scan_reserved_mem();
911 #ifdef CONFIG_PROC_VMCORE
912 if (setup_elfcorehdr
&& setup_elfcorehdr_size
) {
913 printk(KERN_INFO
"kdump reserved memory at %lx-%lx\n",
914 setup_elfcorehdr
, setup_elfcorehdr_size
);
915 reserve_bootmem(setup_elfcorehdr
, setup_elfcorehdr_size
,
920 mips_parse_crashkernel();
922 if (crashk_res
.start
!= crashk_res
.end
)
923 reserve_bootmem(crashk_res
.start
,
924 crashk_res
.end
- crashk_res
.start
+ 1,
929 plat_swiotlb_setup();
931 dma_contiguous_reserve(PFN_PHYS(max_low_pfn
));
932 /* Tell bootmem about cma reserved memblock section */
933 for_each_memblock(reserved
, reg
)
935 reserve_bootmem(reg
->base
, reg
->size
, BOOTMEM_DEFAULT
);
937 reserve_bootmem_region(__pa_symbol(&__nosave_begin
),
938 __pa_symbol(&__nosave_end
)); /* Reserve for hibernation */
941 static void __init
resource_init(void)
945 if (UNCAC_BASE
!= IO_BASE
)
948 code_resource
.start
= __pa_symbol(&_text
);
949 code_resource
.end
= __pa_symbol(&_etext
) - 1;
950 data_resource
.start
= __pa_symbol(&_etext
);
951 data_resource
.end
= __pa_symbol(&_edata
) - 1;
952 bss_resource
.start
= __pa_symbol(&__bss_start
);
953 bss_resource
.end
= __pa_symbol(&__bss_stop
) - 1;
955 for (i
= 0; i
< boot_mem_map
.nr_map
; i
++) {
956 struct resource
*res
;
957 unsigned long start
, end
;
959 start
= boot_mem_map
.map
[i
].addr
;
960 end
= boot_mem_map
.map
[i
].addr
+ boot_mem_map
.map
[i
].size
- 1;
961 if (start
>= HIGHMEM_START
)
963 if (end
>= HIGHMEM_START
)
964 end
= HIGHMEM_START
- 1;
966 res
= alloc_bootmem(sizeof(struct resource
));
970 res
->flags
= IORESOURCE_MEM
| IORESOURCE_BUSY
;
972 switch (boot_mem_map
.map
[i
].type
) {
974 case BOOT_MEM_INIT_RAM
:
975 case BOOT_MEM_ROM_DATA
:
976 res
->name
= "System RAM";
977 res
->flags
|= IORESOURCE_SYSRAM
;
979 case BOOT_MEM_RESERVED
:
981 res
->name
= "reserved";
984 request_resource(&iomem_resource
, res
);
987 * We don't know which RAM region contains kernel data,
988 * so we try it repeatedly and let the resource manager
991 request_resource(res
, &code_resource
);
992 request_resource(res
, &data_resource
);
993 request_resource(res
, &bss_resource
);
994 request_crashkernel(res
);
999 static void __init
prefill_possible_map(void)
1001 int i
, possible
= num_possible_cpus();
1003 if (possible
> nr_cpu_ids
)
1004 possible
= nr_cpu_ids
;
1006 for (i
= 0; i
< possible
; i
++)
1007 set_cpu_possible(i
, true);
1008 for (; i
< NR_CPUS
; i
++)
1009 set_cpu_possible(i
, false);
1011 nr_cpu_ids
= possible
;
1014 static inline void prefill_possible_map(void) {}
1017 void __init
setup_arch(char **cmdline_p
)
1023 setup_early_fdc_console();
1024 #ifdef CONFIG_EARLY_PRINTK
1025 setup_early_printk();
1030 #if defined(CONFIG_VT)
1031 #if defined(CONFIG_VGA_CONSOLE)
1032 conswitchp
= &vga_con
;
1033 #elif defined(CONFIG_DUMMY_CONSOLE)
1034 conswitchp
= &dummy_con
;
1038 arch_mem_init(cmdline_p
);
1042 prefill_possible_map();
1048 unsigned long kernelsp
[NR_CPUS
];
1049 unsigned long fw_arg0
, fw_arg1
, fw_arg2
, fw_arg3
;
1051 #ifdef CONFIG_USE_OF
1052 unsigned long fw_passed_dtb
;
1055 #ifdef CONFIG_DEBUG_FS
1056 struct dentry
*mips_debugfs_dir
;
1057 static int __init
debugfs_mips(void)
1061 d
= debugfs_create_dir("mips", NULL
);
1064 mips_debugfs_dir
= d
;
1067 arch_initcall(debugfs_mips
);
1070 #if defined(CONFIG_DMA_MAYBE_COHERENT) && !defined(CONFIG_DMA_PERDEV_COHERENT)
1071 /* User defined DMA coherency from command line. */
1072 enum coherent_io_user_state coherentio
= IO_COHERENCE_DEFAULT
;
1073 EXPORT_SYMBOL_GPL(coherentio
);
1074 int hw_coherentio
= 0; /* Actual hardware supported DMA coherency setting. */
1076 static int __init
setcoherentio(char *str
)
1078 coherentio
= IO_COHERENCE_ENABLED
;
1079 pr_info("Hardware DMA cache coherency (command line)\n");
1082 early_param("coherentio", setcoherentio
);
1084 static int __init
setnocoherentio(char *str
)
1086 coherentio
= IO_COHERENCE_DISABLED
;
1087 pr_info("Software DMA cache coherency (command line)\n");
1090 early_param("nocoherentio", setnocoherentio
);