2 * Copyright 2004-2009 Analog Devices Inc.
4 * Licensed under the GPL-2 or later.
7 #include <linux/delay.h>
8 #include <linux/console.h>
9 #include <linux/bootmem.h>
10 #include <linux/seq_file.h>
11 #include <linux/cpu.h>
13 #include <linux/module.h>
14 #include <linux/tty.h>
15 #include <linux/pfn.h>
17 #ifdef CONFIG_MTD_UCLINUX
18 #include <linux/mtd/map.h>
19 #include <linux/ext2_fs.h>
20 #include <linux/cramfs_fs.h>
21 #include <linux/romfs_fs.h>
25 #include <asm/cacheflush.h>
26 #include <asm/blackfin.h>
27 #include <asm/cplbinit.h>
28 #include <asm/div64.h>
30 #include <asm/fixed_code.h>
31 #include <asm/early_printk.h>
34 EXPORT_SYMBOL(_bfin_swrst
);
36 unsigned long memory_start
, memory_end
, physical_mem_end
;
37 unsigned long _rambase
, _ramstart
, _ramend
;
38 unsigned long reserved_mem_dcache_on
;
39 unsigned long reserved_mem_icache_on
;
40 EXPORT_SYMBOL(memory_start
);
41 EXPORT_SYMBOL(memory_end
);
42 EXPORT_SYMBOL(physical_mem_end
);
43 EXPORT_SYMBOL(_ramend
);
44 EXPORT_SYMBOL(reserved_mem_dcache_on
);
46 #ifdef CONFIG_MTD_UCLINUX
47 extern struct map_info uclinux_ram_map
;
48 unsigned long memory_mtd_end
, memory_mtd_start
, mtd_size
;
50 EXPORT_SYMBOL(memory_mtd_end
);
51 EXPORT_SYMBOL(memory_mtd_start
);
52 EXPORT_SYMBOL(mtd_size
);
55 char __initdata command_line
[COMMAND_LINE_SIZE
];
56 void __initdata
*init_retx
, *init_saved_retx
, *init_saved_seqstat
,
57 *init_saved_icplb_fault_addr
, *init_saved_dcplb_fault_addr
;
59 /* boot memmap, for parsing "memmap=" */
60 #define BFIN_MEMMAP_MAX 128 /* number of entries in bfin_memmap */
61 #define BFIN_MEMMAP_RAM 1
62 #define BFIN_MEMMAP_RESERVED 2
63 static struct bfin_memmap
{
65 struct bfin_memmap_entry
{
66 unsigned long long addr
; /* start of memory segment */
67 unsigned long long size
;
69 } map
[BFIN_MEMMAP_MAX
];
70 } bfin_memmap __initdata
;
72 /* for memmap sanitization */
73 struct change_member
{
74 struct bfin_memmap_entry
*pentry
; /* pointer to original entry */
75 unsigned long long addr
; /* address for this change point */
77 static struct change_member change_point_list
[2*BFIN_MEMMAP_MAX
] __initdata
;
78 static struct change_member
*change_point
[2*BFIN_MEMMAP_MAX
] __initdata
;
79 static struct bfin_memmap_entry
*overlap_list
[BFIN_MEMMAP_MAX
] __initdata
;
80 static struct bfin_memmap_entry new_map
[BFIN_MEMMAP_MAX
] __initdata
;
82 DEFINE_PER_CPU(struct blackfin_cpudata
, cpu_data
);
84 static int early_init_clkin_hz(char *buf
);
86 #if defined(CONFIG_BFIN_DCACHE) || defined(CONFIG_BFIN_ICACHE)
87 void __init
generate_cplb_tables(void)
91 generate_cplb_tables_all();
92 /* Generate per-CPU I&D CPLB tables */
93 for (cpu
= 0; cpu
< num_possible_cpus(); ++cpu
)
94 generate_cplb_tables_cpu(cpu
);
98 void __cpuinit
bfin_setup_caches(unsigned int cpu
)
100 #ifdef CONFIG_BFIN_ICACHE
101 bfin_icache_init(icplb_tbl
[cpu
]);
104 #ifdef CONFIG_BFIN_DCACHE
105 bfin_dcache_init(dcplb_tbl
[cpu
]);
109 * In cache coherence emulation mode, we need to have the
110 * D-cache enabled before running any atomic operation which
111 * might involve cache invalidation (i.e. spinlock, rwlock).
112 * So printk's are deferred until then.
114 #ifdef CONFIG_BFIN_ICACHE
115 printk(KERN_INFO
"Instruction Cache Enabled for CPU%u\n", cpu
);
116 printk(KERN_INFO
" External memory:"
117 # ifdef CONFIG_BFIN_EXTMEM_ICACHEABLE
122 " in instruction cache\n");
124 printk(KERN_INFO
" L2 SRAM :"
125 # ifdef CONFIG_BFIN_L2_ICACHEABLE
130 " in instruction cache\n");
133 printk(KERN_INFO
"Instruction Cache Disabled for CPU%u\n", cpu
);
136 #ifdef CONFIG_BFIN_DCACHE
137 printk(KERN_INFO
"Data Cache Enabled for CPU%u\n", cpu
);
138 printk(KERN_INFO
" External memory:"
139 # if defined CONFIG_BFIN_EXTMEM_WRITEBACK
140 " cacheable (write-back)"
141 # elif defined CONFIG_BFIN_EXTMEM_WRITETHROUGH
142 " cacheable (write-through)"
148 printk(KERN_INFO
" L2 SRAM :"
149 # if defined CONFIG_BFIN_L2_WRITEBACK
150 " cacheable (write-back)"
151 # elif defined CONFIG_BFIN_L2_WRITETHROUGH
152 " cacheable (write-through)"
158 printk(KERN_INFO
"Data Cache Disabled for CPU%u\n", cpu
);
162 void __cpuinit
bfin_setup_cpudata(unsigned int cpu
)
164 struct blackfin_cpudata
*cpudata
= &per_cpu(cpu_data
, cpu
);
166 cpudata
->idle
= current
;
167 cpudata
->imemctl
= bfin_read_IMEM_CONTROL();
168 cpudata
->dmemctl
= bfin_read_DMEM_CONTROL();
171 void __init
bfin_cache_init(void)
173 #if defined(CONFIG_BFIN_DCACHE) || defined(CONFIG_BFIN_ICACHE)
174 generate_cplb_tables();
176 bfin_setup_caches(0);
179 void __init
bfin_relocate_l1_mem(void)
181 unsigned long text_l1_len
= (unsigned long)_text_l1_len
;
182 unsigned long data_l1_len
= (unsigned long)_data_l1_len
;
183 unsigned long data_b_l1_len
= (unsigned long)_data_b_l1_len
;
184 unsigned long l2_len
= (unsigned long)_l2_len
;
186 early_shadow_stamp();
189 * due to the ALIGN(4) in the arch/blackfin/kernel/vmlinux.lds.S
190 * we know that everything about l1 text/data is nice and aligned,
191 * so copy by 4 byte chunks, and don't worry about overlapping
194 * We can't use the dma_memcpy functions, since they can call
195 * scheduler functions which might be in L1 :( and core writes
196 * into L1 instruction cause bad access errors, so we are stuck,
197 * we are required to use DMA, but can't use the common dma
198 * functions. We can't use memcpy either - since that might be
199 * going to be in the relocated L1
202 blackfin_dma_early_init();
204 /* if necessary, copy L1 text to L1 instruction SRAM */
205 if (L1_CODE_LENGTH
&& text_l1_len
)
206 early_dma_memcpy(_stext_l1
, _text_l1_lma
, text_l1_len
);
208 /* if necessary, copy L1 data to L1 data bank A SRAM */
209 if (L1_DATA_A_LENGTH
&& data_l1_len
)
210 early_dma_memcpy(_sdata_l1
, _data_l1_lma
, data_l1_len
);
212 /* if necessary, copy L1 data B to L1 data bank B SRAM */
213 if (L1_DATA_B_LENGTH
&& data_b_l1_len
)
214 early_dma_memcpy(_sdata_b_l1
, _data_b_l1_lma
, data_b_l1_len
);
216 early_dma_memcpy_done();
218 /* if necessary, copy L2 text/data to L2 SRAM */
219 if (L2_LENGTH
&& l2_len
)
220 memcpy(_stext_l2
, _l2_lma
, l2_len
);
223 /* add_memory_region to memmap */
224 static void __init
add_memory_region(unsigned long long start
,
225 unsigned long long size
, int type
)
229 i
= bfin_memmap
.nr_map
;
231 if (i
== BFIN_MEMMAP_MAX
) {
232 printk(KERN_ERR
"Ooops! Too many entries in the memory map!\n");
236 bfin_memmap
.map
[i
].addr
= start
;
237 bfin_memmap
.map
[i
].size
= size
;
238 bfin_memmap
.map
[i
].type
= type
;
239 bfin_memmap
.nr_map
++;
243 * Sanitize the boot memmap, removing overlaps.
245 static int __init
sanitize_memmap(struct bfin_memmap_entry
*map
, int *pnr_map
)
247 struct change_member
*change_tmp
;
248 unsigned long current_type
, last_type
;
249 unsigned long long last_addr
;
250 int chgidx
, still_changing
;
253 int old_nr
, new_nr
, chg_nr
;
257 Visually we're performing the following (1,2,3,4 = memory types)
259 Sample memory map (w/overlaps):
260 ____22__________________
261 ______________________4_
262 ____1111________________
263 _44_____________________
264 11111111________________
265 ____________________33__
266 ___________44___________
267 __________33333_________
268 ______________22________
269 ___________________2222_
270 _________111111111______
271 _____________________11_
272 _________________4______
274 Sanitized equivalent (no overlap):
275 1_______________________
276 _44_____________________
277 ___1____________________
278 ____22__________________
279 ______11________________
280 _________1______________
281 __________3_____________
282 ___________44___________
283 _____________33_________
284 _______________2________
285 ________________1_______
286 _________________4______
287 ___________________2____
288 ____________________33__
289 ______________________4_
291 /* if there's only one memory region, don't bother */
297 /* bail out if we find any unreasonable addresses in memmap */
298 for (i
= 0; i
< old_nr
; i
++)
299 if (map
[i
].addr
+ map
[i
].size
< map
[i
].addr
)
302 /* create pointers for initial change-point information (for sorting) */
303 for (i
= 0; i
< 2*old_nr
; i
++)
304 change_point
[i
] = &change_point_list
[i
];
306 /* record all known change-points (starting and ending addresses),
307 omitting those that are for empty memory regions */
309 for (i
= 0; i
< old_nr
; i
++) {
310 if (map
[i
].size
!= 0) {
311 change_point
[chgidx
]->addr
= map
[i
].addr
;
312 change_point
[chgidx
++]->pentry
= &map
[i
];
313 change_point
[chgidx
]->addr
= map
[i
].addr
+ map
[i
].size
;
314 change_point
[chgidx
++]->pentry
= &map
[i
];
317 chg_nr
= chgidx
; /* true number of change-points */
319 /* sort change-point list by memory addresses (low -> high) */
321 while (still_changing
) {
323 for (i
= 1; i
< chg_nr
; i
++) {
324 /* if <current_addr> > <last_addr>, swap */
325 /* or, if current=<start_addr> & last=<end_addr>, swap */
326 if ((change_point
[i
]->addr
< change_point
[i
-1]->addr
) ||
327 ((change_point
[i
]->addr
== change_point
[i
-1]->addr
) &&
328 (change_point
[i
]->addr
== change_point
[i
]->pentry
->addr
) &&
329 (change_point
[i
-1]->addr
!= change_point
[i
-1]->pentry
->addr
))
331 change_tmp
= change_point
[i
];
332 change_point
[i
] = change_point
[i
-1];
333 change_point
[i
-1] = change_tmp
;
339 /* create a new memmap, removing overlaps */
340 overlap_entries
= 0; /* number of entries in the overlap table */
341 new_entry
= 0; /* index for creating new memmap entries */
342 last_type
= 0; /* start with undefined memory type */
343 last_addr
= 0; /* start with 0 as last starting address */
344 /* loop through change-points, determining affect on the new memmap */
345 for (chgidx
= 0; chgidx
< chg_nr
; chgidx
++) {
346 /* keep track of all overlapping memmap entries */
347 if (change_point
[chgidx
]->addr
== change_point
[chgidx
]->pentry
->addr
) {
348 /* add map entry to overlap list (> 1 entry implies an overlap) */
349 overlap_list
[overlap_entries
++] = change_point
[chgidx
]->pentry
;
351 /* remove entry from list (order independent, so swap with last) */
352 for (i
= 0; i
< overlap_entries
; i
++) {
353 if (overlap_list
[i
] == change_point
[chgidx
]->pentry
)
354 overlap_list
[i
] = overlap_list
[overlap_entries
-1];
358 /* if there are overlapping entries, decide which "type" to use */
359 /* (larger value takes precedence -- 1=usable, 2,3,4,4+=unusable) */
361 for (i
= 0; i
< overlap_entries
; i
++)
362 if (overlap_list
[i
]->type
> current_type
)
363 current_type
= overlap_list
[i
]->type
;
364 /* continue building up new memmap based on this information */
365 if (current_type
!= last_type
) {
366 if (last_type
!= 0) {
367 new_map
[new_entry
].size
=
368 change_point
[chgidx
]->addr
- last_addr
;
369 /* move forward only if the new size was non-zero */
370 if (new_map
[new_entry
].size
!= 0)
371 if (++new_entry
>= BFIN_MEMMAP_MAX
)
372 break; /* no more space left for new entries */
374 if (current_type
!= 0) {
375 new_map
[new_entry
].addr
= change_point
[chgidx
]->addr
;
376 new_map
[new_entry
].type
= current_type
;
377 last_addr
= change_point
[chgidx
]->addr
;
379 last_type
= current_type
;
382 new_nr
= new_entry
; /* retain count for new entries */
384 /* copy new mapping into original location */
385 memcpy(map
, new_map
, new_nr
*sizeof(struct bfin_memmap_entry
));
391 static void __init
print_memory_map(char *who
)
395 for (i
= 0; i
< bfin_memmap
.nr_map
; i
++) {
396 printk(KERN_DEBUG
" %s: %016Lx - %016Lx ", who
,
397 bfin_memmap
.map
[i
].addr
,
398 bfin_memmap
.map
[i
].addr
+ bfin_memmap
.map
[i
].size
);
399 switch (bfin_memmap
.map
[i
].type
) {
400 case BFIN_MEMMAP_RAM
:
401 printk(KERN_CONT
"(usable)\n");
403 case BFIN_MEMMAP_RESERVED
:
404 printk(KERN_CONT
"(reserved)\n");
407 printk(KERN_CONT
"type %lu\n", bfin_memmap
.map
[i
].type
);
413 static __init
int parse_memmap(char *arg
)
415 unsigned long long start_at
, mem_size
;
420 mem_size
= memparse(arg
, &arg
);
422 start_at
= memparse(arg
+1, &arg
);
423 add_memory_region(start_at
, mem_size
, BFIN_MEMMAP_RAM
);
424 } else if (*arg
== '$') {
425 start_at
= memparse(arg
+1, &arg
);
426 add_memory_region(start_at
, mem_size
, BFIN_MEMMAP_RESERVED
);
433 * Initial parsing of the command line. Currently, we support:
434 * - Controlling the linux memory size: mem=xxx[KMG]
435 * - Controlling the physical memory size: max_mem=xxx[KMG][$][#]
436 * $ -> reserved memory is dcacheable
437 * # -> reserved memory is icacheable
438 * - "memmap=XXX[KkmM][@][$]XXX[KkmM]" defines a memory region
439 * @ from <start> to <start>+<mem>, type RAM
440 * $ from <start> to <start>+<mem>, type RESERVED
442 static __init
void parse_cmdline_early(char *cmdline_p
)
444 char c
= ' ', *to
= cmdline_p
;
445 unsigned int memsize
;
448 if (!memcmp(to
, "mem=", 4)) {
450 memsize
= memparse(to
, &to
);
454 } else if (!memcmp(to
, "max_mem=", 8)) {
456 memsize
= memparse(to
, &to
);
458 physical_mem_end
= memsize
;
462 reserved_mem_dcache_on
= 1;
465 reserved_mem_icache_on
= 1;
468 } else if (!memcmp(to
, "clkin_hz=", 9)) {
470 early_init_clkin_hz(to
);
471 #ifdef CONFIG_EARLY_PRINTK
472 } else if (!memcmp(to
, "earlyprintk=", 12)) {
474 setup_early_printk(to
);
476 } else if (!memcmp(to
, "memmap=", 7)) {
488 * Setup memory defaults from user config.
489 * The physical memory layout looks like:
491 * [_rambase, _ramstart]: kernel image
492 * [memory_start, memory_end]: dynamic memory managed by kernel
493 * [memory_end, _ramend]: reserved memory
494 * [memory_mtd_start(memory_end),
495 * memory_mtd_start + mtd_size]: rootfs (if any)
496 * [_ramend - DMA_UNCACHED_REGION,
497 * _ramend]: uncached DMA region
498 * [_ramend, physical_mem_end]: memory not managed by kernel
500 static __init
void memory_setup(void)
502 #ifdef CONFIG_MTD_UCLINUX
503 unsigned long mtd_phys
= 0;
505 unsigned long max_mem
;
507 _rambase
= (unsigned long)_stext
;
508 _ramstart
= (unsigned long)_end
;
510 if (DMA_UNCACHED_REGION
> (_ramend
- _ramstart
)) {
512 panic("DMA region exceeds memory limit: %lu.",
513 _ramend
- _ramstart
);
515 max_mem
= memory_end
= _ramend
- DMA_UNCACHED_REGION
;
517 #if (defined(CONFIG_BFIN_EXTMEM_ICACHEABLE) && ANOMALY_05000263)
518 /* Due to a Hardware Anomaly we need to limit the size of usable
519 * instruction memory to max 60MB, 56 if HUNT_FOR_ZERO is on
520 * 05000263 - Hardware loop corrupted when taking an ICPLB exception
522 # if (defined(CONFIG_DEBUG_HUNT_FOR_ZERO))
523 if (max_mem
>= 56 * 1024 * 1024)
524 max_mem
= 56 * 1024 * 1024;
526 if (max_mem
>= 60 * 1024 * 1024)
527 max_mem
= 60 * 1024 * 1024;
528 # endif /* CONFIG_DEBUG_HUNT_FOR_ZERO */
529 #endif /* ANOMALY_05000263 */
533 /* Round up to multiple of 4MB */
534 memory_start
= (_ramstart
+ 0x3fffff) & ~0x3fffff;
536 memory_start
= PAGE_ALIGN(_ramstart
);
539 #if defined(CONFIG_MTD_UCLINUX)
540 /* generic memory mapped MTD driver */
541 memory_mtd_end
= memory_end
;
543 mtd_phys
= _ramstart
;
544 mtd_size
= PAGE_ALIGN(*((unsigned long *)(mtd_phys
+ 8)));
546 # if defined(CONFIG_EXT2_FS) || defined(CONFIG_EXT3_FS)
547 if (*((unsigned short *)(mtd_phys
+ 0x438)) == EXT2_SUPER_MAGIC
)
549 PAGE_ALIGN(*((unsigned long *)(mtd_phys
+ 0x404)) << 10);
552 # if defined(CONFIG_CRAMFS)
553 if (*((unsigned long *)(mtd_phys
)) == CRAMFS_MAGIC
)
554 mtd_size
= PAGE_ALIGN(*((unsigned long *)(mtd_phys
+ 0x4)));
557 # if defined(CONFIG_ROMFS_FS)
558 if (((unsigned long *)mtd_phys
)[0] == ROMSB_WORD0
559 && ((unsigned long *)mtd_phys
)[1] == ROMSB_WORD1
) {
561 PAGE_ALIGN(be32_to_cpu(((unsigned long *)mtd_phys
)[2]));
563 /* ROM_FS is XIP, so if we found it, we need to limit memory */
564 if (memory_end
> max_mem
) {
565 pr_info("Limiting kernel memory to %liMB due to anomaly 05000263\n", max_mem
>> 20);
566 memory_end
= max_mem
;
569 # endif /* CONFIG_ROMFS_FS */
571 /* Since the default MTD_UCLINUX has no magic number, we just blindly
572 * read 8 past the end of the kernel's image, and look at it.
573 * When no image is attached, mtd_size is set to a random number
574 * Do some basic sanity checks before operating on things
576 if (mtd_size
== 0 || memory_end
<= mtd_size
) {
577 pr_emerg("Could not find valid ram mtd attached.\n");
579 memory_end
-= mtd_size
;
581 /* Relocate MTD image to the top of memory after the uncached memory area */
582 uclinux_ram_map
.phys
= memory_mtd_start
= memory_end
;
583 uclinux_ram_map
.size
= mtd_size
;
584 pr_info("Found mtd parition at 0x%p, (len=0x%lx), moving to 0x%p\n",
585 _end
, mtd_size
, (void *)memory_mtd_start
);
586 dma_memcpy((void *)uclinux_ram_map
.phys
, _end
, uclinux_ram_map
.size
);
588 #endif /* CONFIG_MTD_UCLINUX */
590 /* We need lo limit memory, since everything could have a text section
591 * of userspace in it, and expose anomaly 05000263. If the anomaly
592 * doesn't exist, or we don't need to - then dont.
594 if (memory_end
> max_mem
) {
595 pr_info("Limiting kernel memory to %liMB due to anomaly 05000263\n", max_mem
>> 20);
596 memory_end
= max_mem
;
600 page_mask_nelts
= ((_ramend
>> PAGE_SHIFT
) + 31) / 32;
601 page_mask_order
= get_order(3 * page_mask_nelts
* sizeof(long));
604 init_mm
.start_code
= (unsigned long)_stext
;
605 init_mm
.end_code
= (unsigned long)_etext
;
606 init_mm
.end_data
= (unsigned long)_edata
;
607 init_mm
.brk
= (unsigned long)0;
609 printk(KERN_INFO
"Board Memory: %ldMB\n", physical_mem_end
>> 20);
610 printk(KERN_INFO
"Kernel Managed Memory: %ldMB\n", _ramend
>> 20);
612 printk(KERN_INFO
"Memory map:\n"
613 " fixedcode = 0x%p-0x%p\n"
614 " text = 0x%p-0x%p\n"
615 " rodata = 0x%p-0x%p\n"
617 " data = 0x%p-0x%p\n"
618 " stack = 0x%p-0x%p\n"
619 " init = 0x%p-0x%p\n"
620 " available = 0x%p-0x%p\n"
621 #ifdef CONFIG_MTD_UCLINUX
622 " rootfs = 0x%p-0x%p\n"
624 #if DMA_UNCACHED_REGION > 0
625 " DMA Zone = 0x%p-0x%p\n"
627 , (void *)FIXED_CODE_START
, (void *)FIXED_CODE_END
,
629 __start_rodata
, __end_rodata
,
630 __bss_start
, __bss_stop
,
632 (void *)&init_thread_union
,
633 (void *)((int)(&init_thread_union
) + 0x2000),
634 __init_begin
, __init_end
,
635 (void *)_ramstart
, (void *)memory_end
636 #ifdef CONFIG_MTD_UCLINUX
637 , (void *)memory_mtd_start
, (void *)(memory_mtd_start
+ mtd_size
)
639 #if DMA_UNCACHED_REGION > 0
640 , (void *)(_ramend
- DMA_UNCACHED_REGION
), (void *)(_ramend
)
646 * Find the lowest, highest page frame number we have available
648 void __init
find_min_max_pfn(void)
653 min_low_pfn
= memory_end
;
655 for (i
= 0; i
< bfin_memmap
.nr_map
; i
++) {
656 unsigned long start
, end
;
658 if (bfin_memmap
.map
[i
].type
!= BFIN_MEMMAP_RAM
)
660 start
= PFN_UP(bfin_memmap
.map
[i
].addr
);
661 end
= PFN_DOWN(bfin_memmap
.map
[i
].addr
+
662 bfin_memmap
.map
[i
].size
);
667 if (start
< min_low_pfn
)
672 static __init
void setup_bootmem_allocator(void)
676 unsigned long start_pfn
, end_pfn
;
677 unsigned long curr_pfn
, last_pfn
, size
;
679 /* mark memory between memory_start and memory_end usable */
680 add_memory_region(memory_start
,
681 memory_end
- memory_start
, BFIN_MEMMAP_RAM
);
682 /* sanity check for overlap */
683 sanitize_memmap(bfin_memmap
.map
, &bfin_memmap
.nr_map
);
684 print_memory_map("boot memmap");
686 /* initialize globals in linux/bootmem.h */
688 /* pfn of the last usable page frame */
689 if (max_pfn
> memory_end
>> PAGE_SHIFT
)
690 max_pfn
= memory_end
>> PAGE_SHIFT
;
691 /* pfn of last page frame directly mapped by kernel */
692 max_low_pfn
= max_pfn
;
693 /* pfn of the first usable page frame after kernel image*/
694 if (min_low_pfn
< memory_start
>> PAGE_SHIFT
)
695 min_low_pfn
= memory_start
>> PAGE_SHIFT
;
697 start_pfn
= PAGE_OFFSET
>> PAGE_SHIFT
;
698 end_pfn
= memory_end
>> PAGE_SHIFT
;
701 * give all the memory to the bootmap allocator, tell it to put the
702 * boot mem_map at the start of memory.
704 bootmap_size
= init_bootmem_node(NODE_DATA(0),
705 memory_start
>> PAGE_SHIFT
, /* map goes here */
708 /* register the memmap regions with the bootmem allocator */
709 for (i
= 0; i
< bfin_memmap
.nr_map
; i
++) {
711 * Reserve usable memory
713 if (bfin_memmap
.map
[i
].type
!= BFIN_MEMMAP_RAM
)
716 * We are rounding up the start address of usable memory:
718 curr_pfn
= PFN_UP(bfin_memmap
.map
[i
].addr
);
719 if (curr_pfn
>= end_pfn
)
722 * ... and at the end of the usable range downwards:
724 last_pfn
= PFN_DOWN(bfin_memmap
.map
[i
].addr
+
725 bfin_memmap
.map
[i
].size
);
727 if (last_pfn
> end_pfn
)
731 * .. finally, did all the rounding and playing
732 * around just make the area go away?
734 if (last_pfn
<= curr_pfn
)
737 size
= last_pfn
- curr_pfn
;
738 free_bootmem(PFN_PHYS(curr_pfn
), PFN_PHYS(size
));
741 /* reserve memory before memory_start, including bootmap */
742 reserve_bootmem(PAGE_OFFSET
,
743 memory_start
+ bootmap_size
+ PAGE_SIZE
- 1 - PAGE_OFFSET
,
747 #define EBSZ_TO_MEG(ebsz) \
750 switch (ebsz & 0xf) { \
751 case 0x1: meg = 16; break; \
752 case 0x3: meg = 32; break; \
753 case 0x5: meg = 64; break; \
754 case 0x7: meg = 128; break; \
755 case 0x9: meg = 256; break; \
756 case 0xb: meg = 512; break; \
760 static inline int __init
get_mem_size(void)
762 #if defined(EBIU_SDBCTL)
763 # if defined(BF561_FAMILY)
765 u32 sdbctl
= bfin_read_EBIU_SDBCTL();
766 ret
+= EBSZ_TO_MEG(sdbctl
>> 0);
767 ret
+= EBSZ_TO_MEG(sdbctl
>> 8);
768 ret
+= EBSZ_TO_MEG(sdbctl
>> 16);
769 ret
+= EBSZ_TO_MEG(sdbctl
>> 24);
772 return EBSZ_TO_MEG(bfin_read_EBIU_SDBCTL());
774 #elif defined(EBIU_DDRCTL1)
775 u32 ddrctl
= bfin_read_EBIU_DDRCTL1();
777 switch (ddrctl
& 0xc0000) {
778 case DEVSZ_64
: ret
= 64 / 8;
779 case DEVSZ_128
: ret
= 128 / 8;
780 case DEVSZ_256
: ret
= 256 / 8;
781 case DEVSZ_512
: ret
= 512 / 8;
783 switch (ddrctl
& 0x30000) {
784 case DEVWD_4
: ret
*= 2;
785 case DEVWD_8
: ret
*= 2;
786 case DEVWD_16
: break;
788 if ((ddrctl
& 0xc000) == 0x4000)
795 void __init
setup_arch(char **cmdline_p
)
797 unsigned long sclk
, cclk
;
799 enable_shadow_console();
801 /* Check to make sure we are running on the right processor */
802 if (unlikely(CPUID
!= bfin_cpuid()))
803 printk(KERN_ERR
"ERROR: Not running on ADSP-%s: unknown CPUID 0x%04x Rev 0.%d\n",
804 CPU
, bfin_cpuid(), bfin_revid());
806 #ifdef CONFIG_DUMMY_CONSOLE
807 conswitchp
= &dummy_con
;
810 #if defined(CONFIG_CMDLINE_BOOL)
811 strncpy(&command_line
[0], CONFIG_CMDLINE
, sizeof(command_line
));
812 command_line
[sizeof(command_line
) - 1] = 0;
815 /* Keep a copy of command line */
816 *cmdline_p
= &command_line
[0];
817 memcpy(boot_command_line
, command_line
, COMMAND_LINE_SIZE
);
818 boot_command_line
[COMMAND_LINE_SIZE
- 1] = '\0';
820 memset(&bfin_memmap
, 0, sizeof(bfin_memmap
));
822 /* If the user does not specify things on the command line, use
823 * what the bootloader set things up as
825 physical_mem_end
= 0;
826 parse_cmdline_early(&command_line
[0]);
829 _ramend
= get_mem_size() * 1024 * 1024;
831 if (physical_mem_end
== 0)
832 physical_mem_end
= _ramend
;
836 /* Initialize Async memory banks */
837 bfin_write_EBIU_AMBCTL0(AMBCTL0VAL
);
838 bfin_write_EBIU_AMBCTL1(AMBCTL1VAL
);
839 bfin_write_EBIU_AMGCTL(AMGCTLVAL
);
840 #ifdef CONFIG_EBIU_MBSCTLVAL
841 bfin_write_EBIU_MBSCTL(CONFIG_EBIU_MBSCTLVAL
);
842 bfin_write_EBIU_MODE(CONFIG_EBIU_MODEVAL
);
843 bfin_write_EBIU_FCTL(CONFIG_EBIU_FCTLVAL
);
849 if ((ANOMALY_05000273
|| ANOMALY_05000274
) && (cclk
>> 1) < sclk
)
850 panic("ANOMALY 05000273 or 05000274: CCLK must be >= 2*SCLK");
853 if (ANOMALY_05000266
) {
854 bfin_read_IMDMA_D0_IRQ_STATUS();
855 bfin_read_IMDMA_D1_IRQ_STATUS();
858 printk(KERN_INFO
"Hardware Trace ");
859 if (bfin_read_TBUFCTL() & 0x1)
860 printk(KERN_CONT
"Active ");
862 printk(KERN_CONT
"Off ");
863 if (bfin_read_TBUFCTL() & 0x2)
864 printk(KERN_CONT
"and Enabled\n");
866 printk(KERN_CONT
"and Disabled\n");
868 printk(KERN_INFO
"Boot Mode: %i\n", bfin_read_SYSCR() & 0xF);
870 /* Newer parts mirror SWRST bits in SYSCR */
871 #if defined(CONFIG_BF53x) || defined(CONFIG_BF561) || \
872 defined(CONFIG_BF538) || defined(CONFIG_BF539)
873 _bfin_swrst
= bfin_read_SWRST();
875 /* Clear boot mode field */
876 _bfin_swrst
= bfin_read_SYSCR() & ~0xf;
879 #ifdef CONFIG_DEBUG_DOUBLEFAULT_PRINT
880 bfin_write_SWRST(_bfin_swrst
& ~DOUBLE_FAULT
);
882 #ifdef CONFIG_DEBUG_DOUBLEFAULT_RESET
883 bfin_write_SWRST(_bfin_swrst
| DOUBLE_FAULT
);
887 if (_bfin_swrst
& SWRST_DBL_FAULT_A
) {
889 if (_bfin_swrst
& RESET_DOUBLE
) {
891 printk(KERN_EMERG
"Recovering from DOUBLE FAULT event\n");
892 #ifdef CONFIG_DEBUG_DOUBLEFAULT
893 /* We assume the crashing kernel, and the current symbol table match */
894 printk(KERN_EMERG
" While handling exception (EXCAUSE = 0x%x) at %pF\n",
895 (int)init_saved_seqstat
& SEQSTAT_EXCAUSE
, init_saved_retx
);
896 printk(KERN_NOTICE
" DCPLB_FAULT_ADDR: %pF\n", init_saved_dcplb_fault_addr
);
897 printk(KERN_NOTICE
" ICPLB_FAULT_ADDR: %pF\n", init_saved_icplb_fault_addr
);
899 printk(KERN_NOTICE
" The instruction at %pF caused a double exception\n",
901 } else if (_bfin_swrst
& RESET_WDOG
)
902 printk(KERN_INFO
"Recovering from Watchdog event\n");
903 else if (_bfin_swrst
& RESET_SOFTWARE
)
904 printk(KERN_NOTICE
"Reset caused by Software reset\n");
906 printk(KERN_INFO
"Blackfin support (C) 2004-2009 Analog Devices, Inc.\n");
907 if (bfin_compiled_revid() == 0xffff)
908 printk(KERN_INFO
"Compiled for ADSP-%s Rev any, running on 0.%d\n", CPU
, bfin_revid());
909 else if (bfin_compiled_revid() == -1)
910 printk(KERN_INFO
"Compiled for ADSP-%s Rev none\n", CPU
);
912 printk(KERN_INFO
"Compiled for ADSP-%s Rev 0.%d\n", CPU
, bfin_compiled_revid());
914 if (likely(CPUID
== bfin_cpuid())) {
915 if (bfin_revid() != bfin_compiled_revid()) {
916 if (bfin_compiled_revid() == -1)
917 printk(KERN_ERR
"Warning: Compiled for Rev none, but running on Rev %d\n",
919 else if (bfin_compiled_revid() != 0xffff) {
920 printk(KERN_ERR
"Warning: Compiled for Rev %d, but running on Rev %d\n",
921 bfin_compiled_revid(), bfin_revid());
922 if (bfin_compiled_revid() > bfin_revid())
923 panic("Error: you are missing anomaly workarounds for this rev");
926 if (bfin_revid() < CONFIG_BF_REV_MIN
|| bfin_revid() > CONFIG_BF_REV_MAX
)
927 printk(KERN_ERR
"Warning: Unsupported Chip Revision ADSP-%s Rev 0.%d detected\n",
931 printk(KERN_INFO
"Blackfin Linux support by http://blackfin.uclinux.org/\n");
933 printk(KERN_INFO
"Processor Speed: %lu MHz core clock and %lu MHz System Clock\n",
934 cclk
/ 1000000, sclk
/ 1000000);
936 setup_bootmem_allocator();
940 /* Copy atomic sequences to their fixed location, and sanity check that
941 these locations are the ones that we advertise to userspace. */
942 memcpy((void *)FIXED_CODE_START
, &fixed_code_start
,
943 FIXED_CODE_END
- FIXED_CODE_START
);
944 BUG_ON((char *)&sigreturn_stub
- (char *)&fixed_code_start
945 != SIGRETURN_STUB
- FIXED_CODE_START
);
946 BUG_ON((char *)&atomic_xchg32
- (char *)&fixed_code_start
947 != ATOMIC_XCHG32
- FIXED_CODE_START
);
948 BUG_ON((char *)&atomic_cas32
- (char *)&fixed_code_start
949 != ATOMIC_CAS32
- FIXED_CODE_START
);
950 BUG_ON((char *)&atomic_add32
- (char *)&fixed_code_start
951 != ATOMIC_ADD32
- FIXED_CODE_START
);
952 BUG_ON((char *)&atomic_sub32
- (char *)&fixed_code_start
953 != ATOMIC_SUB32
- FIXED_CODE_START
);
954 BUG_ON((char *)&atomic_ior32
- (char *)&fixed_code_start
955 != ATOMIC_IOR32
- FIXED_CODE_START
);
956 BUG_ON((char *)&atomic_and32
- (char *)&fixed_code_start
957 != ATOMIC_AND32
- FIXED_CODE_START
);
958 BUG_ON((char *)&atomic_xor32
- (char *)&fixed_code_start
959 != ATOMIC_XOR32
- FIXED_CODE_START
);
960 BUG_ON((char *)&safe_user_instruction
- (char *)&fixed_code_start
961 != SAFE_USER_INSTRUCTION
- FIXED_CODE_START
);
964 platform_init_cpus();
966 init_exception_vectors();
967 bfin_cache_init(); /* Initialize caches for the boot CPU */
970 static int __init
topology_init(void)
973 /* Record CPU-private information for the boot processor. */
974 bfin_setup_cpudata(0);
976 for_each_possible_cpu(cpu
) {
977 register_cpu(&per_cpu(cpu_data
, cpu
).cpu
, cpu
);
983 subsys_initcall(topology_init
);
985 /* Get the input clock frequency */
986 static u_long cached_clkin_hz
= CONFIG_CLKIN_HZ
;
987 static u_long
get_clkin_hz(void)
989 return cached_clkin_hz
;
991 static int __init
early_init_clkin_hz(char *buf
)
993 cached_clkin_hz
= simple_strtoul(buf
, NULL
, 0);
994 #ifdef BFIN_KERNEL_CLOCK
995 if (cached_clkin_hz
!= CONFIG_CLKIN_HZ
)
996 panic("cannot change clkin_hz when reprogramming clocks");
1000 early_param("clkin_hz=", early_init_clkin_hz
);
1002 /* Get the voltage input multiplier */
1003 static u_long
get_vco(void)
1005 static u_long cached_vco
;
1006 u_long msel
, pll_ctl
;
1008 /* The assumption here is that VCO never changes at runtime.
1009 * If, someday, we support that, then we'll have to change this.
1014 pll_ctl
= bfin_read_PLL_CTL();
1015 msel
= (pll_ctl
>> 9) & 0x3F;
1019 cached_vco
= get_clkin_hz();
1020 cached_vco
>>= (1 & pll_ctl
); /* DF bit */
1025 /* Get the Core clock */
1026 u_long
get_cclk(void)
1028 static u_long cached_cclk_pll_div
, cached_cclk
;
1031 if (bfin_read_PLL_STAT() & 0x1)
1032 return get_clkin_hz();
1034 ssel
= bfin_read_PLL_DIV();
1035 if (ssel
== cached_cclk_pll_div
)
1038 cached_cclk_pll_div
= ssel
;
1040 csel
= ((ssel
>> 4) & 0x03);
1042 if (ssel
&& ssel
< (1 << csel
)) /* SCLK > CCLK */
1043 cached_cclk
= get_vco() / ssel
;
1045 cached_cclk
= get_vco() >> csel
;
1048 EXPORT_SYMBOL(get_cclk
);
1050 /* Get the System clock */
1051 u_long
get_sclk(void)
1053 static u_long cached_sclk
;
1056 /* The assumption here is that SCLK never changes at runtime.
1057 * If, someday, we support that, then we'll have to change this.
1062 if (bfin_read_PLL_STAT() & 0x1)
1063 return get_clkin_hz();
1065 ssel
= bfin_read_PLL_DIV() & 0xf;
1067 printk(KERN_WARNING
"Invalid System Clock\n");
1071 cached_sclk
= get_vco() / ssel
;
1074 EXPORT_SYMBOL(get_sclk
);
1076 unsigned long sclk_to_usecs(unsigned long sclk
)
1078 u64 tmp
= USEC_PER_SEC
* (u64
)sclk
;
1079 do_div(tmp
, get_sclk());
1082 EXPORT_SYMBOL(sclk_to_usecs
);
1084 unsigned long usecs_to_sclk(unsigned long usecs
)
1086 u64 tmp
= get_sclk() * (u64
)usecs
;
1087 do_div(tmp
, USEC_PER_SEC
);
1090 EXPORT_SYMBOL(usecs_to_sclk
);
1093 * Get CPU information for use by the procfs.
1095 static int show_cpuinfo(struct seq_file
*m
, void *v
)
1097 char *cpu
, *mmu
, *fpu
, *vendor
, *cache
;
1099 int cpu_num
= *(unsigned int *)v
;
1101 u_int icache_size
= BFIN_ICACHESIZE
/ 1024, dcache_size
= 0, dsup_banks
= 0;
1102 struct blackfin_cpudata
*cpudata
= &per_cpu(cpu_data
, cpu_num
);
1107 revid
= bfin_revid();
1112 switch (bfin_read_CHIPID() & CHIPID_MANUFACTURE
) {
1114 vendor
= "Analog Devices";
1121 seq_printf(m
, "processor\t: %d\n" "vendor_id\t: %s\n", cpu_num
, vendor
);
1123 if (CPUID
== bfin_cpuid())
1124 seq_printf(m
, "cpu family\t: 0x%04x\n", CPUID
);
1126 seq_printf(m
, "cpu family\t: Compiled for:0x%04x, running on:0x%04x\n",
1127 CPUID
, bfin_cpuid());
1129 seq_printf(m
, "model name\t: ADSP-%s %lu(MHz CCLK) %lu(MHz SCLK) (%s)\n"
1131 cpu
, cclk
/1000000, sclk
/1000000,
1139 if (bfin_revid() != bfin_compiled_revid()) {
1140 if (bfin_compiled_revid() == -1)
1141 seq_printf(m
, "(Compiled for Rev none)");
1142 else if (bfin_compiled_revid() == 0xffff)
1143 seq_printf(m
, "(Compiled for Rev any)");
1145 seq_printf(m
, "(Compiled for Rev %d)", bfin_compiled_revid());
1148 seq_printf(m
, "\ncpu MHz\t\t: %lu.%03lu/%lu.%03lu\n",
1149 cclk
/1000000, cclk
%1000000,
1150 sclk
/1000000, sclk
%1000000);
1151 seq_printf(m
, "bogomips\t: %lu.%02lu\n"
1152 "Calibration\t: %lu loops\n",
1153 (loops_per_jiffy
* HZ
) / 500000,
1154 ((loops_per_jiffy
* HZ
) / 5000) % 100,
1155 (loops_per_jiffy
* HZ
));
1157 /* Check Cache configutation */
1158 switch (cpudata
->dmemctl
& (1 << DMC0_P
| 1 << DMC1_P
)) {
1160 cache
= "dbank-A/B\t: cache/sram";
1165 cache
= "dbank-A/B\t: cache/cache";
1170 cache
= "dbank-A/B\t: sram/sram";
1181 /* Is it turned on? */
1182 if ((cpudata
->dmemctl
& (ENDCPLB
| DMC_ENABLE
)) != (ENDCPLB
| DMC_ENABLE
))
1185 if ((cpudata
->imemctl
& (IMC
| ENICPLB
)) != (IMC
| ENICPLB
))
1188 seq_printf(m
, "cache size\t: %d KB(L1 icache) "
1189 "%d KB(L1 dcache) %d KB(L2 cache)\n",
1190 icache_size
, dcache_size
, 0);
1191 seq_printf(m
, "%s\n", cache
);
1192 seq_printf(m
, "external memory\t: "
1193 #if defined(CONFIG_BFIN_EXTMEM_ICACHEABLE)
1198 " in instruction cache\n");
1199 seq_printf(m
, "external memory\t: "
1200 #if defined(CONFIG_BFIN_EXTMEM_WRITEBACK)
1201 "cacheable (write-back)"
1202 #elif defined(CONFIG_BFIN_EXTMEM_WRITETHROUGH)
1203 "cacheable (write-through)"
1207 " in data cache\n");
1210 seq_printf(m
, "icache setup\t: %d Sub-banks/%d Ways, %d Lines/Way\n",
1211 BFIN_ISUBBANKS
, BFIN_IWAYS
, BFIN_ILINES
);
1213 seq_printf(m
, "icache setup\t: off\n");
1216 "dcache setup\t: %d Super-banks/%d Sub-banks/%d Ways, %d Lines/Way\n",
1217 dsup_banks
, BFIN_DSUBBANKS
, BFIN_DWAYS
,
1219 #ifdef __ARCH_SYNC_CORE_DCACHE
1220 seq_printf(m
, "SMP Dcache Flushes\t: %lu\n\n", cpudata
->dcache_invld_count
);
1222 #ifdef __ARCH_SYNC_CORE_ICACHE
1223 seq_printf(m
, "SMP Icache Flushes\t: %lu\n\n", cpudata
->icache_invld_count
);
1226 if (cpu_num
!= num_possible_cpus() - 1)
1230 seq_printf(m
, "L2 SRAM\t\t: %dKB\n", L2_LENGTH
/0x400);
1231 seq_printf(m
, "L2 SRAM\t\t: "
1232 #if defined(CONFIG_BFIN_L2_ICACHEABLE)
1237 " in instruction cache\n");
1238 seq_printf(m
, "L2 SRAM\t\t: "
1239 #if defined(CONFIG_BFIN_L2_WRITEBACK)
1240 "cacheable (write-back)"
1241 #elif defined(CONFIG_BFIN_L2_WRITETHROUGH)
1242 "cacheable (write-through)"
1246 " in data cache\n");
1248 seq_printf(m
, "board name\t: %s\n", bfin_board_name
);
1249 seq_printf(m
, "board memory\t: %ld kB (0x%p -> 0x%p)\n",
1250 physical_mem_end
>> 10, (void *)0, (void *)physical_mem_end
);
1251 seq_printf(m
, "kernel memory\t: %d kB (0x%p -> 0x%p)\n",
1252 ((int)memory_end
- (int)_stext
) >> 10,
1254 (void *)memory_end
);
1255 seq_printf(m
, "\n");
1260 static void *c_start(struct seq_file
*m
, loff_t
*pos
)
1263 *pos
= first_cpu(cpu_online_map
);
1264 if (*pos
>= num_online_cpus())
1270 static void *c_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
1272 *pos
= next_cpu(*pos
, cpu_online_map
);
1274 return c_start(m
, pos
);
1277 static void c_stop(struct seq_file
*m
, void *v
)
1281 const struct seq_operations cpuinfo_op
= {
1285 .show
= show_cpuinfo
,
1288 void __init
cmdline_init(const char *r0
)
1290 early_shadow_stamp();
1292 strncpy(command_line
, r0
, COMMAND_LINE_SIZE
);