1 /* $Id: init.c,v 1.209 2002/02/09 19:49:31 davem Exp $
2 * arch/sparc64/mm/init.c
4 * Copyright (C) 1996-1999 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1997-1999 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
8 #include <linux/config.h>
9 #include <linux/kernel.h>
10 #include <linux/sched.h>
11 #include <linux/string.h>
12 #include <linux/init.h>
13 #include <linux/bootmem.h>
15 #include <linux/hugetlb.h>
16 #include <linux/slab.h>
17 #include <linux/initrd.h>
18 #include <linux/swap.h>
19 #include <linux/pagemap.h>
21 #include <linux/seq_file.h>
22 #include <linux/kprobes.h>
23 #include <linux/cache.h>
26 #include <asm/system.h>
28 #include <asm/pgalloc.h>
29 #include <asm/pgtable.h>
30 #include <asm/oplib.h>
31 #include <asm/iommu.h>
33 #include <asm/uaccess.h>
34 #include <asm/mmu_context.h>
35 #include <asm/tlbflush.h>
37 #include <asm/starfire.h>
39 #include <asm/spitfire.h>
40 #include <asm/sections.h>
42 extern void device_scan(void);
44 struct sparc_phys_banks
{
45 unsigned long base_addr
;
46 unsigned long num_bytes
;
49 #define SPARC_PHYS_BANKS 32
51 static struct sparc_phys_banks sp_banks
[SPARC_PHYS_BANKS
];
53 unsigned long *sparc64_valid_addr_bitmap __read_mostly
;
55 /* Ugly, but necessary... -DaveM */
56 unsigned long phys_base __read_mostly
;
57 unsigned long kern_base __read_mostly
;
58 unsigned long kern_size __read_mostly
;
59 unsigned long pfn_base __read_mostly
;
61 /* get_new_mmu_context() uses "cache + 1". */
62 DEFINE_SPINLOCK(ctx_alloc_lock
);
63 unsigned long tlb_context_cache
= CTX_FIRST_VERSION
- 1;
64 #define CTX_BMAP_SLOTS (1UL << (CTX_NR_BITS - 6))
65 unsigned long mmu_context_bmap
[CTX_BMAP_SLOTS
];
67 /* References to special section boundaries */
68 extern char _start
[], _end
[];
70 /* Initial ramdisk setup */
71 extern unsigned long sparc_ramdisk_image64
;
72 extern unsigned int sparc_ramdisk_image
;
73 extern unsigned int sparc_ramdisk_size
;
75 struct page
*mem_map_zero __read_mostly
;
79 /* XXX Tune this... */
80 #define PGT_CACHE_LOW 25
81 #define PGT_CACHE_HIGH 50
83 void check_pgt_cache(void)
86 if (pgtable_cache_size
> PGT_CACHE_HIGH
) {
89 free_pgd_slow(get_pgd_fast());
91 free_pte_slow(pte_alloc_one_fast(NULL
, 0));
93 free_pte_slow(pte_alloc_one_fast(NULL
, 1 << (PAGE_SHIFT
+ 10)));
94 } while (pgtable_cache_size
> PGT_CACHE_LOW
);
99 #ifdef CONFIG_DEBUG_DCFLUSH
100 atomic_t dcpage_flushes
= ATOMIC_INIT(0);
102 atomic_t dcpage_flushes_xcall
= ATOMIC_INIT(0);
106 __inline__
void flush_dcache_page_impl(struct page
*page
)
108 #ifdef CONFIG_DEBUG_DCFLUSH
109 atomic_inc(&dcpage_flushes
);
112 #ifdef DCACHE_ALIASING_POSSIBLE
113 __flush_dcache_page(page_address(page
),
114 ((tlb_type
== spitfire
) &&
115 page_mapping(page
) != NULL
));
117 if (page_mapping(page
) != NULL
&&
118 tlb_type
== spitfire
)
119 __flush_icache_page(__pa(page_address(page
)));
123 #define PG_dcache_dirty PG_arch_1
124 #define PG_dcache_cpu_shift 24
125 #define PG_dcache_cpu_mask (256 - 1)
128 #error D-cache dirty tracking and thread_info->cpu need fixing for > 256 cpus
131 #define dcache_dirty_cpu(page) \
132 (((page)->flags >> PG_dcache_cpu_shift) & PG_dcache_cpu_mask)
134 static __inline__
void set_dcache_dirty(struct page
*page
, int this_cpu
)
136 unsigned long mask
= this_cpu
;
137 unsigned long non_cpu_bits
;
139 non_cpu_bits
= ~(PG_dcache_cpu_mask
<< PG_dcache_cpu_shift
);
140 mask
= (mask
<< PG_dcache_cpu_shift
) | (1UL << PG_dcache_dirty
);
142 __asm__
__volatile__("1:\n\t"
144 "and %%g7, %1, %%g1\n\t"
145 "or %%g1, %0, %%g1\n\t"
146 "casx [%2], %%g7, %%g1\n\t"
148 "membar #StoreLoad | #StoreStore\n\t"
149 "bne,pn %%xcc, 1b\n\t"
152 : "r" (mask
), "r" (non_cpu_bits
), "r" (&page
->flags
)
156 static __inline__
void clear_dcache_dirty_cpu(struct page
*page
, unsigned long cpu
)
158 unsigned long mask
= (1UL << PG_dcache_dirty
);
160 __asm__
__volatile__("! test_and_clear_dcache_dirty\n"
163 "srlx %%g7, %4, %%g1\n\t"
164 "and %%g1, %3, %%g1\n\t"
166 "bne,pn %%icc, 2f\n\t"
167 " andn %%g7, %1, %%g1\n\t"
168 "casx [%2], %%g7, %%g1\n\t"
170 "membar #StoreLoad | #StoreStore\n\t"
171 "bne,pn %%xcc, 1b\n\t"
175 : "r" (cpu
), "r" (mask
), "r" (&page
->flags
),
176 "i" (PG_dcache_cpu_mask
),
177 "i" (PG_dcache_cpu_shift
)
181 void update_mmu_cache(struct vm_area_struct
*vma
, unsigned long address
, pte_t pte
)
185 unsigned long pg_flags
;
188 if (pfn_valid(pfn
) &&
189 (page
= pfn_to_page(pfn
), page_mapping(page
)) &&
190 ((pg_flags
= page
->flags
) & (1UL << PG_dcache_dirty
))) {
191 int cpu
= ((pg_flags
>> PG_dcache_cpu_shift
) &
193 int this_cpu
= get_cpu();
195 /* This is just to optimize away some function calls
199 flush_dcache_page_impl(page
);
201 smp_flush_dcache_page_impl(page
, cpu
);
203 clear_dcache_dirty_cpu(page
, cpu
);
209 void flush_dcache_page(struct page
*page
)
211 struct address_space
*mapping
;
214 /* Do not bother with the expensive D-cache flush if it
215 * is merely the zero page. The 'bigcore' testcase in GDB
216 * causes this case to run millions of times.
218 if (page
== ZERO_PAGE(0))
221 this_cpu
= get_cpu();
223 mapping
= page_mapping(page
);
224 if (mapping
&& !mapping_mapped(mapping
)) {
225 int dirty
= test_bit(PG_dcache_dirty
, &page
->flags
);
227 int dirty_cpu
= dcache_dirty_cpu(page
);
229 if (dirty_cpu
== this_cpu
)
231 smp_flush_dcache_page_impl(page
, dirty_cpu
);
233 set_dcache_dirty(page
, this_cpu
);
235 /* We could delay the flush for the !page_mapping
236 * case too. But that case is for exec env/arg
237 * pages and those are %99 certainly going to get
238 * faulted into the tlb (and thus flushed) anyways.
240 flush_dcache_page_impl(page
);
247 void __kprobes
flush_icache_range(unsigned long start
, unsigned long end
)
249 /* Cheetah has coherent I-cache. */
250 if (tlb_type
== spitfire
) {
253 for (kaddr
= start
; kaddr
< end
; kaddr
+= PAGE_SIZE
)
254 __flush_icache_page(__get_phys(kaddr
));
258 unsigned long page_to_pfn(struct page
*page
)
260 return (unsigned long) ((page
- mem_map
) + pfn_base
);
263 struct page
*pfn_to_page(unsigned long pfn
)
265 return (mem_map
+ (pfn
- pfn_base
));
270 printk("Mem-info:\n");
272 printk("Free swap: %6ldkB\n",
273 nr_swap_pages
<< (PAGE_SHIFT
-10));
274 printk("%ld pages of RAM\n", num_physpages
);
275 printk("%d free pages\n", nr_free_pages());
276 printk("%d pages in page table cache\n",pgtable_cache_size
);
279 void mmu_info(struct seq_file
*m
)
281 if (tlb_type
== cheetah
)
282 seq_printf(m
, "MMU Type\t: Cheetah\n");
283 else if (tlb_type
== cheetah_plus
)
284 seq_printf(m
, "MMU Type\t: Cheetah+\n");
285 else if (tlb_type
== spitfire
)
286 seq_printf(m
, "MMU Type\t: Spitfire\n");
288 seq_printf(m
, "MMU Type\t: ???\n");
290 #ifdef CONFIG_DEBUG_DCFLUSH
291 seq_printf(m
, "DCPageFlushes\t: %d\n",
292 atomic_read(&dcpage_flushes
));
294 seq_printf(m
, "DCPageFlushesXC\t: %d\n",
295 atomic_read(&dcpage_flushes_xcall
));
296 #endif /* CONFIG_SMP */
297 #endif /* CONFIG_DEBUG_DCFLUSH */
300 struct linux_prom_translation
{
305 static struct linux_prom_translation prom_trans
[512] __initdata
;
307 extern unsigned long prom_boot_page
;
308 extern void prom_remap(unsigned long physpage
, unsigned long virtpage
, int mmu_ihandle
);
309 extern int prom_get_mmu_ihandle(void);
310 extern void register_prom_callbacks(void);
312 /* Exported for SMP bootup purposes. */
313 unsigned long kern_locked_tte_data
;
315 /* Exported for kernel TLB miss handling in ktlb.S */
316 unsigned long prom_pmd_phys __read_mostly
;
317 unsigned int swapper_pgd_zero __read_mostly
;
319 /* Allocate power-of-2 aligned chunks from the end of the
320 * kernel image. Return physical address.
322 static inline unsigned long early_alloc_phys(unsigned long size
)
326 BUILD_BUG_ON(size
& (size
- 1));
328 kern_size
= (kern_size
+ (size
- 1)) & ~(size
- 1);
329 base
= kern_base
+ kern_size
;
335 static inline unsigned long load_phys32(unsigned long pa
)
339 __asm__
__volatile__("lduwa [%1] %2, %0"
341 : "r" (pa
), "i" (ASI_PHYS_USE_EC
));
346 static inline unsigned long load_phys64(unsigned long pa
)
350 __asm__
__volatile__("ldxa [%1] %2, %0"
352 : "r" (pa
), "i" (ASI_PHYS_USE_EC
));
357 static inline void store_phys32(unsigned long pa
, unsigned long val
)
359 __asm__
__volatile__("stwa %0, [%1] %2"
361 : "r" (val
), "r" (pa
), "i" (ASI_PHYS_USE_EC
));
364 static inline void store_phys64(unsigned long pa
, unsigned long val
)
366 __asm__
__volatile__("stxa %0, [%1] %2"
368 : "r" (val
), "r" (pa
), "i" (ASI_PHYS_USE_EC
));
371 #define BASE_PAGE_SIZE 8192
374 * Translate PROM's mapping we capture at boot time into physical address.
375 * The second parameter is only set from prom_callback() invocations.
377 unsigned long prom_virt_to_phys(unsigned long promva
, int *error
)
379 unsigned long pmd_phys
= (prom_pmd_phys
+
380 ((promva
>> 23) & 0x7ff) * sizeof(pmd_t
));
381 unsigned long pte_phys
;
386 pmd_val(pmd_ent
) = load_phys32(pmd_phys
);
387 if (pmd_none(pmd_ent
)) {
393 pte_phys
= (unsigned long)pmd_val(pmd_ent
) << 11UL;
394 pte_phys
+= ((promva
>> 13) & 0x3ff) * sizeof(pte_t
);
395 pte_val(pte_ent
) = load_phys64(pte_phys
);
396 if (!pte_present(pte_ent
)) {
403 return pte_val(pte_ent
);
405 base
= pte_val(pte_ent
) & _PAGE_PADDR
;
406 return (base
+ (promva
& (BASE_PAGE_SIZE
- 1)));
409 /* The obp translations are saved based on 8k pagesize, since obp can
410 * use a mixture of pagesizes. Misses to the LOW_OBP_ADDRESS ->
411 * HI_OBP_ADDRESS range are handled in entry.S and do not use the vpte
412 * scheme (also, see rant in inherit_locked_prom_mappings()).
414 static void __init
build_obp_range(unsigned long start
, unsigned long end
, unsigned long data
)
418 for (vaddr
= start
; vaddr
< end
; vaddr
+= BASE_PAGE_SIZE
) {
419 unsigned long val
, pte_phys
, pmd_phys
;
423 pmd_phys
= (prom_pmd_phys
+
424 (((vaddr
>> 23) & 0x7ff) * sizeof(pmd_t
)));
425 pmd_val(pmd_ent
) = load_phys32(pmd_phys
);
426 if (pmd_none(pmd_ent
)) {
427 pte_phys
= early_alloc_phys(BASE_PAGE_SIZE
);
429 for (i
= 0; i
< BASE_PAGE_SIZE
/ sizeof(pte_t
); i
++)
430 store_phys64(pte_phys
+i
*sizeof(pte_t
),0);
432 pmd_val(pmd_ent
) = pte_phys
>> 11UL;
433 store_phys32(pmd_phys
, pmd_val(pmd_ent
));
436 pte_phys
= (unsigned long)pmd_val(pmd_ent
) << 11UL;
437 pte_phys
+= (((vaddr
>> 13) & 0x3ff) * sizeof(pte_t
));
441 /* Clear diag TTE bits. */
442 if (tlb_type
== spitfire
)
443 val
&= ~0x0003fe0000000000UL
;
445 store_phys64(pte_phys
, val
| _PAGE_MODIFIED
);
447 data
+= BASE_PAGE_SIZE
;
451 static inline int in_obp_range(unsigned long vaddr
)
453 return (vaddr
>= LOW_OBP_ADDRESS
&&
454 vaddr
< HI_OBP_ADDRESS
);
457 #define OBP_PMD_SIZE 2048
458 static void __init
build_obp_pgtable(int prom_trans_ents
)
462 prom_pmd_phys
= early_alloc_phys(OBP_PMD_SIZE
);
463 for (i
= 0; i
< OBP_PMD_SIZE
; i
+= 4)
464 store_phys32(prom_pmd_phys
+ i
, 0);
466 for (i
= 0; i
< prom_trans_ents
; i
++) {
467 unsigned long start
, end
;
469 if (!in_obp_range(prom_trans
[i
].virt
))
472 start
= prom_trans
[i
].virt
;
473 end
= start
+ prom_trans
[i
].size
;
474 if (end
> HI_OBP_ADDRESS
)
475 end
= HI_OBP_ADDRESS
;
477 build_obp_range(start
, end
, prom_trans
[i
].data
);
481 /* Read OBP translations property into 'prom_trans[]'.
482 * Return the number of entries.
484 static int __init
read_obp_translations(void)
488 node
= prom_finddevice("/virtual-memory");
489 n
= prom_getproplen(node
, "translations");
490 if (unlikely(n
== 0 || n
== -1)) {
491 prom_printf("prom_mappings: Couldn't get size.\n");
494 if (unlikely(n
> sizeof(prom_trans
))) {
495 prom_printf("prom_mappings: Size %Zd is too big.\n", n
);
499 if ((n
= prom_getproperty(node
, "translations",
500 (char *)&prom_trans
[0],
501 sizeof(prom_trans
))) == -1) {
502 prom_printf("prom_mappings: Couldn't get property.\n");
505 n
= n
/ sizeof(struct linux_prom_translation
);
509 static void __init
remap_kernel(void)
511 unsigned long phys_page
, tte_vaddr
, tte_data
;
512 int tlb_ent
= sparc64_highest_locked_tlbent();
514 tte_vaddr
= (unsigned long) KERNBASE
;
515 phys_page
= (prom_boot_mapping_phys_low
>> 22UL) << 22UL;
516 tte_data
= (phys_page
| (_PAGE_VALID
| _PAGE_SZ4MB
|
517 _PAGE_CP
| _PAGE_CV
| _PAGE_P
|
520 kern_locked_tte_data
= tte_data
;
522 /* Now lock us into the TLBs via OBP. */
523 prom_dtlb_load(tlb_ent
, tte_data
, tte_vaddr
);
524 prom_itlb_load(tlb_ent
, tte_data
, tte_vaddr
);
526 prom_dtlb_load(tlb_ent
- 1,
528 tte_vaddr
+ 0x400000);
529 prom_itlb_load(tlb_ent
- 1,
531 tte_vaddr
+ 0x400000);
535 static void __init
inherit_prom_mappings(void)
539 n
= read_obp_translations();
540 build_obp_pgtable(n
);
542 /* Now fixup OBP's idea about where we really are mapped. */
543 prom_printf("Remapping the kernel... ");
546 prom_printf("done.\n");
548 register_prom_callbacks();
551 /* The OBP specifications for sun4u mark 0xfffffffc00000000 and
552 * upwards as reserved for use by the firmware (I wonder if this
553 * will be the same on Cheetah...). We use this virtual address
554 * range for the VPTE table mappings of the nucleus so we need
555 * to zap them when we enter the PROM. -DaveM
557 static void __flush_nucleus_vptes(void)
559 unsigned long prom_reserved_base
= 0xfffffffc00000000UL
;
562 /* Only DTLB must be checked for VPTE entries. */
563 if (tlb_type
== spitfire
) {
564 for (i
= 0; i
< 63; i
++) {
567 /* Spitfire Errata #32 workaround */
568 /* NOTE: Always runs on spitfire, so no cheetah+
569 * page size encodings.
571 __asm__
__volatile__("stxa %0, [%1] %2\n\t"
575 "r" (PRIMARY_CONTEXT
), "i" (ASI_DMMU
));
577 tag
= spitfire_get_dtlb_tag(i
);
578 if (((tag
& ~(PAGE_MASK
)) == 0) &&
579 ((tag
& (PAGE_MASK
)) >= prom_reserved_base
)) {
580 __asm__
__volatile__("stxa %%g0, [%0] %1\n\t"
583 : "r" (TLB_TAG_ACCESS
), "i" (ASI_DMMU
));
584 spitfire_put_dtlb_data(i
, 0x0UL
);
587 } else if (tlb_type
== cheetah
|| tlb_type
== cheetah_plus
) {
588 for (i
= 0; i
< 512; i
++) {
589 unsigned long tag
= cheetah_get_dtlb_tag(i
, 2);
591 if ((tag
& ~PAGE_MASK
) == 0 &&
592 (tag
& PAGE_MASK
) >= prom_reserved_base
) {
593 __asm__
__volatile__("stxa %%g0, [%0] %1\n\t"
596 : "r" (TLB_TAG_ACCESS
), "i" (ASI_DMMU
));
597 cheetah_put_dtlb_data(i
, 0x0UL
, 2);
600 if (tlb_type
!= cheetah_plus
)
603 tag
= cheetah_get_dtlb_tag(i
, 3);
605 if ((tag
& ~PAGE_MASK
) == 0 &&
606 (tag
& PAGE_MASK
) >= prom_reserved_base
) {
607 __asm__
__volatile__("stxa %%g0, [%0] %1\n\t"
610 : "r" (TLB_TAG_ACCESS
), "i" (ASI_DMMU
));
611 cheetah_put_dtlb_data(i
, 0x0UL
, 3);
615 /* Implement me :-) */
620 static int prom_ditlb_set
;
621 struct prom_tlb_entry
{
623 unsigned long tlb_tag
;
624 unsigned long tlb_data
;
626 struct prom_tlb_entry prom_itlb
[16], prom_dtlb
[16];
628 void prom_world(int enter
)
630 unsigned long pstate
;
634 set_fs((mm_segment_t
) { get_thread_current_ds() });
639 /* Make sure the following runs atomically. */
640 __asm__
__volatile__("flushw\n\t"
641 "rdpr %%pstate, %0\n\t"
642 "wrpr %0, %1, %%pstate"
647 /* Kick out nucleus VPTEs. */
648 __flush_nucleus_vptes();
650 /* Install PROM world. */
651 for (i
= 0; i
< 16; i
++) {
652 if (prom_dtlb
[i
].tlb_ent
!= -1) {
653 __asm__
__volatile__("stxa %0, [%1] %2\n\t"
655 : : "r" (prom_dtlb
[i
].tlb_tag
), "r" (TLB_TAG_ACCESS
),
657 if (tlb_type
== spitfire
)
658 spitfire_put_dtlb_data(prom_dtlb
[i
].tlb_ent
,
659 prom_dtlb
[i
].tlb_data
);
660 else if (tlb_type
== cheetah
|| tlb_type
== cheetah_plus
)
661 cheetah_put_ldtlb_data(prom_dtlb
[i
].tlb_ent
,
662 prom_dtlb
[i
].tlb_data
);
664 if (prom_itlb
[i
].tlb_ent
!= -1) {
665 __asm__
__volatile__("stxa %0, [%1] %2\n\t"
667 : : "r" (prom_itlb
[i
].tlb_tag
),
668 "r" (TLB_TAG_ACCESS
),
670 if (tlb_type
== spitfire
)
671 spitfire_put_itlb_data(prom_itlb
[i
].tlb_ent
,
672 prom_itlb
[i
].tlb_data
);
673 else if (tlb_type
== cheetah
|| tlb_type
== cheetah_plus
)
674 cheetah_put_litlb_data(prom_itlb
[i
].tlb_ent
,
675 prom_itlb
[i
].tlb_data
);
679 for (i
= 0; i
< 16; i
++) {
680 if (prom_dtlb
[i
].tlb_ent
!= -1) {
681 __asm__
__volatile__("stxa %%g0, [%0] %1\n\t"
683 : : "r" (TLB_TAG_ACCESS
), "i" (ASI_DMMU
));
684 if (tlb_type
== spitfire
)
685 spitfire_put_dtlb_data(prom_dtlb
[i
].tlb_ent
, 0x0UL
);
687 cheetah_put_ldtlb_data(prom_dtlb
[i
].tlb_ent
, 0x0UL
);
689 if (prom_itlb
[i
].tlb_ent
!= -1) {
690 __asm__
__volatile__("stxa %%g0, [%0] %1\n\t"
692 : : "r" (TLB_TAG_ACCESS
),
694 if (tlb_type
== spitfire
)
695 spitfire_put_itlb_data(prom_itlb
[i
].tlb_ent
, 0x0UL
);
697 cheetah_put_litlb_data(prom_itlb
[i
].tlb_ent
, 0x0UL
);
701 __asm__
__volatile__("wrpr %0, 0, %%pstate"
705 void inherit_locked_prom_mappings(int save_p
)
711 /* Fucking losing PROM has more mappings in the TLB, but
712 * it (conveniently) fails to mention any of these in the
713 * translations property. The only ones that matter are
714 * the locked PROM tlb entries, so we impose the following
715 * irrecovable rule on the PROM, it is allowed 8 locked
716 * entries in the ITLB and 8 in the DTLB.
718 * Supposedly the upper 16GB of the address space is
719 * reserved for OBP, BUT I WISH THIS WAS DOCUMENTED
720 * SOMEWHERE!!!!!!!!!!!!!!!!! Furthermore the entire interface
721 * used between the client program and the firmware on sun5
722 * systems to coordinate mmu mappings is also COMPLETELY
723 * UNDOCUMENTED!!!!!! Thanks S(t)un!
726 for (i
= 0; i
< 16; i
++) {
727 prom_itlb
[i
].tlb_ent
= -1;
728 prom_dtlb
[i
].tlb_ent
= -1;
731 if (tlb_type
== spitfire
) {
732 int high
= SPITFIRE_HIGHEST_LOCKED_TLBENT
- bigkernel
;
733 for (i
= 0; i
< high
; i
++) {
736 /* Spitfire Errata #32 workaround */
737 /* NOTE: Always runs on spitfire, so no cheetah+
738 * page size encodings.
740 __asm__
__volatile__("stxa %0, [%1] %2\n\t"
744 "r" (PRIMARY_CONTEXT
), "i" (ASI_DMMU
));
746 data
= spitfire_get_dtlb_data(i
);
747 if ((data
& (_PAGE_L
|_PAGE_VALID
)) == (_PAGE_L
|_PAGE_VALID
)) {
750 /* Spitfire Errata #32 workaround */
751 /* NOTE: Always runs on spitfire, so no
752 * cheetah+ page size encodings.
754 __asm__
__volatile__("stxa %0, [%1] %2\n\t"
758 "r" (PRIMARY_CONTEXT
), "i" (ASI_DMMU
));
760 tag
= spitfire_get_dtlb_tag(i
);
762 prom_dtlb
[dtlb_seen
].tlb_ent
= i
;
763 prom_dtlb
[dtlb_seen
].tlb_tag
= tag
;
764 prom_dtlb
[dtlb_seen
].tlb_data
= data
;
766 __asm__
__volatile__("stxa %%g0, [%0] %1\n\t"
768 : : "r" (TLB_TAG_ACCESS
), "i" (ASI_DMMU
));
769 spitfire_put_dtlb_data(i
, 0x0UL
);
777 for (i
= 0; i
< high
; i
++) {
780 /* Spitfire Errata #32 workaround */
781 /* NOTE: Always runs on spitfire, so no
782 * cheetah+ page size encodings.
784 __asm__
__volatile__("stxa %0, [%1] %2\n\t"
788 "r" (PRIMARY_CONTEXT
), "i" (ASI_DMMU
));
790 data
= spitfire_get_itlb_data(i
);
791 if ((data
& (_PAGE_L
|_PAGE_VALID
)) == (_PAGE_L
|_PAGE_VALID
)) {
794 /* Spitfire Errata #32 workaround */
795 /* NOTE: Always runs on spitfire, so no
796 * cheetah+ page size encodings.
798 __asm__
__volatile__("stxa %0, [%1] %2\n\t"
802 "r" (PRIMARY_CONTEXT
), "i" (ASI_DMMU
));
804 tag
= spitfire_get_itlb_tag(i
);
806 prom_itlb
[itlb_seen
].tlb_ent
= i
;
807 prom_itlb
[itlb_seen
].tlb_tag
= tag
;
808 prom_itlb
[itlb_seen
].tlb_data
= data
;
810 __asm__
__volatile__("stxa %%g0, [%0] %1\n\t"
812 : : "r" (TLB_TAG_ACCESS
), "i" (ASI_IMMU
));
813 spitfire_put_itlb_data(i
, 0x0UL
);
820 } else if (tlb_type
== cheetah
|| tlb_type
== cheetah_plus
) {
821 int high
= CHEETAH_HIGHEST_LOCKED_TLBENT
- bigkernel
;
823 for (i
= 0; i
< high
; i
++) {
826 data
= cheetah_get_ldtlb_data(i
);
827 if ((data
& (_PAGE_L
|_PAGE_VALID
)) == (_PAGE_L
|_PAGE_VALID
)) {
830 tag
= cheetah_get_ldtlb_tag(i
);
832 prom_dtlb
[dtlb_seen
].tlb_ent
= i
;
833 prom_dtlb
[dtlb_seen
].tlb_tag
= tag
;
834 prom_dtlb
[dtlb_seen
].tlb_data
= data
;
836 __asm__
__volatile__("stxa %%g0, [%0] %1\n\t"
838 : : "r" (TLB_TAG_ACCESS
), "i" (ASI_DMMU
));
839 cheetah_put_ldtlb_data(i
, 0x0UL
);
847 for (i
= 0; i
< high
; i
++) {
850 data
= cheetah_get_litlb_data(i
);
851 if ((data
& (_PAGE_L
|_PAGE_VALID
)) == (_PAGE_L
|_PAGE_VALID
)) {
854 tag
= cheetah_get_litlb_tag(i
);
856 prom_itlb
[itlb_seen
].tlb_ent
= i
;
857 prom_itlb
[itlb_seen
].tlb_tag
= tag
;
858 prom_itlb
[itlb_seen
].tlb_data
= data
;
860 __asm__
__volatile__("stxa %%g0, [%0] %1\n\t"
862 : : "r" (TLB_TAG_ACCESS
), "i" (ASI_IMMU
));
863 cheetah_put_litlb_data(i
, 0x0UL
);
871 /* Implement me :-) */
878 /* Give PROM back his world, done during reboots... */
879 void prom_reload_locked(void)
883 for (i
= 0; i
< 16; i
++) {
884 if (prom_dtlb
[i
].tlb_ent
!= -1) {
885 __asm__
__volatile__("stxa %0, [%1] %2\n\t"
887 : : "r" (prom_dtlb
[i
].tlb_tag
), "r" (TLB_TAG_ACCESS
),
889 if (tlb_type
== spitfire
)
890 spitfire_put_dtlb_data(prom_dtlb
[i
].tlb_ent
,
891 prom_dtlb
[i
].tlb_data
);
892 else if (tlb_type
== cheetah
|| tlb_type
== cheetah_plus
)
893 cheetah_put_ldtlb_data(prom_dtlb
[i
].tlb_ent
,
894 prom_dtlb
[i
].tlb_data
);
897 if (prom_itlb
[i
].tlb_ent
!= -1) {
898 __asm__
__volatile__("stxa %0, [%1] %2\n\t"
900 : : "r" (prom_itlb
[i
].tlb_tag
),
901 "r" (TLB_TAG_ACCESS
),
903 if (tlb_type
== spitfire
)
904 spitfire_put_itlb_data(prom_itlb
[i
].tlb_ent
,
905 prom_itlb
[i
].tlb_data
);
907 cheetah_put_litlb_data(prom_itlb
[i
].tlb_ent
,
908 prom_itlb
[i
].tlb_data
);
913 #ifdef DCACHE_ALIASING_POSSIBLE
914 void __flush_dcache_range(unsigned long start
, unsigned long end
)
918 if (tlb_type
== spitfire
) {
921 for (va
= start
; va
< end
; va
+= 32) {
922 spitfire_put_dcache_tag(va
& 0x3fe0, 0x0);
929 for (va
= start
; va
< end
; va
+= 32)
930 __asm__
__volatile__("stxa %%g0, [%0] %1\n\t"
934 "i" (ASI_DCACHE_INVALIDATE
));
937 #endif /* DCACHE_ALIASING_POSSIBLE */
939 /* If not locked, zap it. */
940 void __flush_tlb_all(void)
942 unsigned long pstate
;
945 __asm__
__volatile__("flushw\n\t"
946 "rdpr %%pstate, %0\n\t"
947 "wrpr %0, %1, %%pstate"
950 if (tlb_type
== spitfire
) {
951 for (i
= 0; i
< 64; i
++) {
952 /* Spitfire Errata #32 workaround */
953 /* NOTE: Always runs on spitfire, so no
954 * cheetah+ page size encodings.
956 __asm__
__volatile__("stxa %0, [%1] %2\n\t"
960 "r" (PRIMARY_CONTEXT
), "i" (ASI_DMMU
));
962 if (!(spitfire_get_dtlb_data(i
) & _PAGE_L
)) {
963 __asm__
__volatile__("stxa %%g0, [%0] %1\n\t"
966 : "r" (TLB_TAG_ACCESS
), "i" (ASI_DMMU
));
967 spitfire_put_dtlb_data(i
, 0x0UL
);
970 /* Spitfire Errata #32 workaround */
971 /* NOTE: Always runs on spitfire, so no
972 * cheetah+ page size encodings.
974 __asm__
__volatile__("stxa %0, [%1] %2\n\t"
978 "r" (PRIMARY_CONTEXT
), "i" (ASI_DMMU
));
980 if (!(spitfire_get_itlb_data(i
) & _PAGE_L
)) {
981 __asm__
__volatile__("stxa %%g0, [%0] %1\n\t"
984 : "r" (TLB_TAG_ACCESS
), "i" (ASI_IMMU
));
985 spitfire_put_itlb_data(i
, 0x0UL
);
988 } else if (tlb_type
== cheetah
|| tlb_type
== cheetah_plus
) {
989 cheetah_flush_dtlb_all();
990 cheetah_flush_itlb_all();
992 __asm__
__volatile__("wrpr %0, 0, %%pstate"
996 /* Caller does TLB context flushing on local CPU if necessary.
997 * The caller also ensures that CTX_VALID(mm->context) is false.
999 * We must be careful about boundary cases so that we never
1000 * let the user have CTX 0 (nucleus) or we ever use a CTX
1001 * version of zero (and thus NO_CONTEXT would not be caught
1002 * by version mis-match tests in mmu_context.h).
1004 void get_new_mmu_context(struct mm_struct
*mm
)
1006 unsigned long ctx
, new_ctx
;
1007 unsigned long orig_pgsz_bits
;
1010 spin_lock(&ctx_alloc_lock
);
1011 orig_pgsz_bits
= (mm
->context
.sparc64_ctx_val
& CTX_PGSZ_MASK
);
1012 ctx
= (tlb_context_cache
+ 1) & CTX_NR_MASK
;
1013 new_ctx
= find_next_zero_bit(mmu_context_bmap
, 1 << CTX_NR_BITS
, ctx
);
1014 if (new_ctx
>= (1 << CTX_NR_BITS
)) {
1015 new_ctx
= find_next_zero_bit(mmu_context_bmap
, ctx
, 1);
1016 if (new_ctx
>= ctx
) {
1018 new_ctx
= (tlb_context_cache
& CTX_VERSION_MASK
) +
1021 new_ctx
= CTX_FIRST_VERSION
;
1023 /* Don't call memset, for 16 entries that's just
1026 mmu_context_bmap
[0] = 3;
1027 mmu_context_bmap
[1] = 0;
1028 mmu_context_bmap
[2] = 0;
1029 mmu_context_bmap
[3] = 0;
1030 for (i
= 4; i
< CTX_BMAP_SLOTS
; i
+= 4) {
1031 mmu_context_bmap
[i
+ 0] = 0;
1032 mmu_context_bmap
[i
+ 1] = 0;
1033 mmu_context_bmap
[i
+ 2] = 0;
1034 mmu_context_bmap
[i
+ 3] = 0;
1039 mmu_context_bmap
[new_ctx
>>6] |= (1UL << (new_ctx
& 63));
1040 new_ctx
|= (tlb_context_cache
& CTX_VERSION_MASK
);
1042 tlb_context_cache
= new_ctx
;
1043 mm
->context
.sparc64_ctx_val
= new_ctx
| orig_pgsz_bits
;
1044 spin_unlock(&ctx_alloc_lock
);
1048 struct pgtable_cache_struct pgt_quicklists
;
1051 /* OK, we have to color these pages. The page tables are accessed
1052 * by non-Dcache enabled mapping in the VPTE area by the dtlb_backend.S
1053 * code, as well as by PAGE_OFFSET range direct-mapped addresses by
1054 * other parts of the kernel. By coloring, we make sure that the tlbmiss
1055 * fast handlers do not get data from old/garbage dcache lines that
1056 * correspond to an old/stale virtual address (user/kernel) that
1057 * previously mapped the pagetable page while accessing vpte range
1058 * addresses. The idea is that if the vpte color and PAGE_OFFSET range
1059 * color is the same, then when the kernel initializes the pagetable
1060 * using the later address range, accesses with the first address
1061 * range will see the newly initialized data rather than the garbage.
1063 #ifdef DCACHE_ALIASING_POSSIBLE
1064 #define DC_ALIAS_SHIFT 1
1066 #define DC_ALIAS_SHIFT 0
1068 pte_t
*pte_alloc_one_kernel(struct mm_struct
*mm
, unsigned long address
)
1071 unsigned long color
;
1074 pte_t
*ptep
= pte_alloc_one_fast(mm
, address
);
1080 color
= VPTE_COLOR(address
);
1081 page
= alloc_pages(GFP_KERNEL
|__GFP_REPEAT
, DC_ALIAS_SHIFT
);
1083 unsigned long *to_free
;
1084 unsigned long paddr
;
1087 #ifdef DCACHE_ALIASING_POSSIBLE
1088 set_page_count(page
, 1);
1089 ClearPageCompound(page
);
1091 set_page_count((page
+ 1), 1);
1092 ClearPageCompound(page
+ 1);
1094 paddr
= (unsigned long) page_address(page
);
1095 memset((char *)paddr
, 0, (PAGE_SIZE
<< DC_ALIAS_SHIFT
));
1098 pte
= (pte_t
*) paddr
;
1099 to_free
= (unsigned long *) (paddr
+ PAGE_SIZE
);
1101 pte
= (pte_t
*) (paddr
+ PAGE_SIZE
);
1102 to_free
= (unsigned long *) paddr
;
1105 #ifdef DCACHE_ALIASING_POSSIBLE
1106 /* Now free the other one up, adjust cache size. */
1108 *to_free
= (unsigned long) pte_quicklist
[color
^ 0x1];
1109 pte_quicklist
[color
^ 0x1] = to_free
;
1110 pgtable_cache_size
++;
1119 void sparc_ultra_dump_itlb(void)
1123 if (tlb_type
== spitfire
) {
1124 printk ("Contents of itlb: ");
1125 for (slot
= 0; slot
< 14; slot
++) printk (" ");
1126 printk ("%2x:%016lx,%016lx\n",
1128 spitfire_get_itlb_tag(0), spitfire_get_itlb_data(0));
1129 for (slot
= 1; slot
< 64; slot
+=3) {
1130 printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx %2x:%016lx,%016lx\n",
1132 spitfire_get_itlb_tag(slot
), spitfire_get_itlb_data(slot
),
1134 spitfire_get_itlb_tag(slot
+1), spitfire_get_itlb_data(slot
+1),
1136 spitfire_get_itlb_tag(slot
+2), spitfire_get_itlb_data(slot
+2));
1138 } else if (tlb_type
== cheetah
|| tlb_type
== cheetah_plus
) {
1139 printk ("Contents of itlb0:\n");
1140 for (slot
= 0; slot
< 16; slot
+=2) {
1141 printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx\n",
1143 cheetah_get_litlb_tag(slot
), cheetah_get_litlb_data(slot
),
1145 cheetah_get_litlb_tag(slot
+1), cheetah_get_litlb_data(slot
+1));
1147 printk ("Contents of itlb2:\n");
1148 for (slot
= 0; slot
< 128; slot
+=2) {
1149 printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx\n",
1151 cheetah_get_itlb_tag(slot
), cheetah_get_itlb_data(slot
),
1153 cheetah_get_itlb_tag(slot
+1), cheetah_get_itlb_data(slot
+1));
1158 void sparc_ultra_dump_dtlb(void)
1162 if (tlb_type
== spitfire
) {
1163 printk ("Contents of dtlb: ");
1164 for (slot
= 0; slot
< 14; slot
++) printk (" ");
1165 printk ("%2x:%016lx,%016lx\n", 0,
1166 spitfire_get_dtlb_tag(0), spitfire_get_dtlb_data(0));
1167 for (slot
= 1; slot
< 64; slot
+=3) {
1168 printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx %2x:%016lx,%016lx\n",
1170 spitfire_get_dtlb_tag(slot
), spitfire_get_dtlb_data(slot
),
1172 spitfire_get_dtlb_tag(slot
+1), spitfire_get_dtlb_data(slot
+1),
1174 spitfire_get_dtlb_tag(slot
+2), spitfire_get_dtlb_data(slot
+2));
1176 } else if (tlb_type
== cheetah
|| tlb_type
== cheetah_plus
) {
1177 printk ("Contents of dtlb0:\n");
1178 for (slot
= 0; slot
< 16; slot
+=2) {
1179 printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx\n",
1181 cheetah_get_ldtlb_tag(slot
), cheetah_get_ldtlb_data(slot
),
1183 cheetah_get_ldtlb_tag(slot
+1), cheetah_get_ldtlb_data(slot
+1));
1185 printk ("Contents of dtlb2:\n");
1186 for (slot
= 0; slot
< 512; slot
+=2) {
1187 printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx\n",
1189 cheetah_get_dtlb_tag(slot
, 2), cheetah_get_dtlb_data(slot
, 2),
1191 cheetah_get_dtlb_tag(slot
+1, 2), cheetah_get_dtlb_data(slot
+1, 2));
1193 if (tlb_type
== cheetah_plus
) {
1194 printk ("Contents of dtlb3:\n");
1195 for (slot
= 0; slot
< 512; slot
+=2) {
1196 printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx\n",
1198 cheetah_get_dtlb_tag(slot
, 3), cheetah_get_dtlb_data(slot
, 3),
1200 cheetah_get_dtlb_tag(slot
+1, 3), cheetah_get_dtlb_data(slot
+1, 3));
1206 extern unsigned long cmdline_memory_size
;
1208 unsigned long __init
bootmem_init(unsigned long *pages_avail
)
1210 unsigned long bootmap_size
, start_pfn
, end_pfn
;
1211 unsigned long end_of_phys_memory
= 0UL;
1212 unsigned long bootmap_pfn
, bytes_avail
, size
;
1215 #ifdef CONFIG_DEBUG_BOOTMEM
1216 prom_printf("bootmem_init: Scan sp_banks, ");
1220 for (i
= 0; sp_banks
[i
].num_bytes
!= 0; i
++) {
1221 end_of_phys_memory
= sp_banks
[i
].base_addr
+
1222 sp_banks
[i
].num_bytes
;
1223 bytes_avail
+= sp_banks
[i
].num_bytes
;
1224 if (cmdline_memory_size
) {
1225 if (bytes_avail
> cmdline_memory_size
) {
1226 unsigned long slack
= bytes_avail
- cmdline_memory_size
;
1228 bytes_avail
-= slack
;
1229 end_of_phys_memory
-= slack
;
1231 sp_banks
[i
].num_bytes
-= slack
;
1232 if (sp_banks
[i
].num_bytes
== 0) {
1233 sp_banks
[i
].base_addr
= 0xdeadbeef;
1235 sp_banks
[i
+1].num_bytes
= 0;
1236 sp_banks
[i
+1].base_addr
= 0xdeadbeef;
1243 *pages_avail
= bytes_avail
>> PAGE_SHIFT
;
1245 /* Start with page aligned address of last symbol in kernel
1246 * image. The kernel is hard mapped below PAGE_OFFSET in a
1247 * 4MB locked TLB translation.
1249 start_pfn
= PAGE_ALIGN(kern_base
+ kern_size
) >> PAGE_SHIFT
;
1251 bootmap_pfn
= start_pfn
;
1253 end_pfn
= end_of_phys_memory
>> PAGE_SHIFT
;
1255 #ifdef CONFIG_BLK_DEV_INITRD
1256 /* Now have to check initial ramdisk, so that bootmap does not overwrite it */
1257 if (sparc_ramdisk_image
|| sparc_ramdisk_image64
) {
1258 unsigned long ramdisk_image
= sparc_ramdisk_image
?
1259 sparc_ramdisk_image
: sparc_ramdisk_image64
;
1260 if (ramdisk_image
>= (unsigned long)_end
- 2 * PAGE_SIZE
)
1261 ramdisk_image
-= KERNBASE
;
1262 initrd_start
= ramdisk_image
+ phys_base
;
1263 initrd_end
= initrd_start
+ sparc_ramdisk_size
;
1264 if (initrd_end
> end_of_phys_memory
) {
1265 printk(KERN_CRIT
"initrd extends beyond end of memory "
1266 "(0x%016lx > 0x%016lx)\ndisabling initrd\n",
1267 initrd_end
, end_of_phys_memory
);
1271 if (initrd_start
>= (start_pfn
<< PAGE_SHIFT
) &&
1272 initrd_start
< (start_pfn
<< PAGE_SHIFT
) + 2 * PAGE_SIZE
)
1273 bootmap_pfn
= PAGE_ALIGN (initrd_end
) >> PAGE_SHIFT
;
1277 /* Initialize the boot-time allocator. */
1278 max_pfn
= max_low_pfn
= end_pfn
;
1279 min_low_pfn
= pfn_base
;
1281 #ifdef CONFIG_DEBUG_BOOTMEM
1282 prom_printf("init_bootmem(min[%lx], bootmap[%lx], max[%lx])\n",
1283 min_low_pfn
, bootmap_pfn
, max_low_pfn
);
1285 bootmap_size
= init_bootmem_node(NODE_DATA(0), bootmap_pfn
, pfn_base
, end_pfn
);
1287 /* Now register the available physical memory with the
1290 for (i
= 0; sp_banks
[i
].num_bytes
!= 0; i
++) {
1291 #ifdef CONFIG_DEBUG_BOOTMEM
1292 prom_printf("free_bootmem(sp_banks:%d): base[%lx] size[%lx]\n",
1293 i
, sp_banks
[i
].base_addr
, sp_banks
[i
].num_bytes
);
1295 free_bootmem(sp_banks
[i
].base_addr
, sp_banks
[i
].num_bytes
);
1298 #ifdef CONFIG_BLK_DEV_INITRD
1300 size
= initrd_end
- initrd_start
;
1302 /* Resert the initrd image area. */
1303 #ifdef CONFIG_DEBUG_BOOTMEM
1304 prom_printf("reserve_bootmem(initrd): base[%llx] size[%lx]\n",
1305 initrd_start
, initrd_end
);
1307 reserve_bootmem(initrd_start
, size
);
1308 *pages_avail
-= PAGE_ALIGN(size
) >> PAGE_SHIFT
;
1310 initrd_start
+= PAGE_OFFSET
;
1311 initrd_end
+= PAGE_OFFSET
;
1314 /* Reserve the kernel text/data/bss. */
1315 #ifdef CONFIG_DEBUG_BOOTMEM
1316 prom_printf("reserve_bootmem(kernel): base[%lx] size[%lx]\n", kern_base
, kern_size
);
1318 reserve_bootmem(kern_base
, kern_size
);
1319 *pages_avail
-= PAGE_ALIGN(kern_size
) >> PAGE_SHIFT
;
1321 /* Reserve the bootmem map. We do not account for it
1322 * in pages_avail because we will release that memory
1323 * in free_all_bootmem.
1325 size
= bootmap_size
;
1326 #ifdef CONFIG_DEBUG_BOOTMEM
1327 prom_printf("reserve_bootmem(bootmap): base[%lx] size[%lx]\n",
1328 (bootmap_pfn
<< PAGE_SHIFT
), size
);
1330 reserve_bootmem((bootmap_pfn
<< PAGE_SHIFT
), size
);
1331 *pages_avail
-= PAGE_ALIGN(size
) >> PAGE_SHIFT
;
1336 #ifdef CONFIG_DEBUG_PAGEALLOC
1337 static unsigned long kernel_map_range(unsigned long pstart
, unsigned long pend
, pgprot_t prot
)
1339 unsigned long vstart
= PAGE_OFFSET
+ pstart
;
1340 unsigned long vend
= PAGE_OFFSET
+ pend
;
1341 unsigned long alloc_bytes
= 0UL;
1343 if ((vstart
& ~PAGE_MASK
) || (vend
& ~PAGE_MASK
)) {
1344 prom_printf("kernel_map: Unaligned sp_banks[%lx:%lx]\n",
1349 while (vstart
< vend
) {
1350 unsigned long this_end
, paddr
= __pa(vstart
);
1351 pgd_t
*pgd
= pgd_offset_k(vstart
);
1356 pud
= pud_offset(pgd
, vstart
);
1357 if (pud_none(*pud
)) {
1360 new = __alloc_bootmem(PAGE_SIZE
, PAGE_SIZE
, PAGE_SIZE
);
1361 alloc_bytes
+= PAGE_SIZE
;
1362 pud_populate(&init_mm
, pud
, new);
1365 pmd
= pmd_offset(pud
, vstart
);
1366 if (!pmd_present(*pmd
)) {
1369 new = __alloc_bootmem(PAGE_SIZE
, PAGE_SIZE
, PAGE_SIZE
);
1370 alloc_bytes
+= PAGE_SIZE
;
1371 pmd_populate_kernel(&init_mm
, pmd
, new);
1374 pte
= pte_offset_kernel(pmd
, vstart
);
1375 this_end
= (vstart
+ PMD_SIZE
) & PMD_MASK
;
1376 if (this_end
> vend
)
1379 while (vstart
< this_end
) {
1380 pte_val(*pte
) = (paddr
| pgprot_val(prot
));
1382 vstart
+= PAGE_SIZE
;
1391 extern struct linux_mlist_p1275
*prom_ptot_ptr
;
1392 extern unsigned int kvmap_linear_patch
[1];
1394 static void __init
kernel_physical_mapping_init(void)
1396 struct linux_mlist_p1275
*p
= prom_ptot_ptr
;
1397 unsigned long mem_alloced
= 0UL;
1400 unsigned long phys_start
, phys_end
;
1402 phys_start
= p
->start_adr
;
1403 phys_end
= phys_start
+ p
->num_bytes
;
1404 mem_alloced
+= kernel_map_range(phys_start
, phys_end
,
1410 printk("Allocated %ld bytes for kernel page tables.\n",
1413 kvmap_linear_patch
[0] = 0x01000000; /* nop */
1414 flushi(&kvmap_linear_patch
[0]);
1419 void kernel_map_pages(struct page
*page
, int numpages
, int enable
)
1421 unsigned long phys_start
= page_to_pfn(page
) << PAGE_SHIFT
;
1422 unsigned long phys_end
= phys_start
+ (numpages
* PAGE_SIZE
);
1424 kernel_map_range(phys_start
, phys_end
,
1425 (enable
? PAGE_KERNEL
: __pgprot(0)));
1427 /* we should perform an IPI and flush all tlbs,
1428 * but that can deadlock->flush only current cpu.
1430 __flush_tlb_kernel_range(PAGE_OFFSET
+ phys_start
,
1431 PAGE_OFFSET
+ phys_end
);
1435 unsigned long __init
find_ecache_flush_span(unsigned long size
)
1439 for (i
= 0; ; i
++) {
1440 if (sp_banks
[i
].num_bytes
== 0)
1442 if (sp_banks
[i
].num_bytes
>= size
)
1443 return sp_banks
[i
].base_addr
;
1449 static void __init
prom_probe_memory(void)
1451 struct linux_mlist_p1275
*mlist
;
1452 unsigned long bytes
, base_paddr
, tally
;
1456 mlist
= *prom_meminfo()->p1275_available
;
1457 bytes
= tally
= mlist
->num_bytes
;
1458 base_paddr
= mlist
->start_adr
;
1460 sp_banks
[0].base_addr
= base_paddr
;
1461 sp_banks
[0].num_bytes
= bytes
;
1463 while (mlist
->theres_more
!= (void *) 0) {
1465 mlist
= mlist
->theres_more
;
1466 bytes
= mlist
->num_bytes
;
1468 if (i
>= SPARC_PHYS_BANKS
-1) {
1469 printk ("The machine has more banks than "
1470 "this kernel can support\n"
1471 "Increase the SPARC_PHYS_BANKS "
1472 "setting (currently %d)\n",
1474 i
= SPARC_PHYS_BANKS
-1;
1478 sp_banks
[i
].base_addr
= mlist
->start_adr
;
1479 sp_banks
[i
].num_bytes
= mlist
->num_bytes
;
1483 sp_banks
[i
].base_addr
= 0xdeadbeefbeefdeadUL
;
1484 sp_banks
[i
].num_bytes
= 0;
1486 /* Now mask all bank sizes on a page boundary, it is all we can
1489 for (i
= 0; sp_banks
[i
].num_bytes
!= 0; i
++)
1490 sp_banks
[i
].num_bytes
&= PAGE_MASK
;
1493 /* paging_init() sets up the page tables */
1495 extern void cheetah_ecache_flush_init(void);
1497 static unsigned long last_valid_pfn
;
1498 pgd_t swapper_pg_dir
[2048];
1500 void __init
paging_init(void)
1502 unsigned long end_pfn
, pages_avail
, shift
;
1503 unsigned long real_end
, i
;
1505 prom_probe_memory();
1507 phys_base
= 0xffffffffffffffffUL
;
1508 for (i
= 0; sp_banks
[i
].num_bytes
!= 0; i
++) {
1511 if (sp_banks
[i
].base_addr
< phys_base
)
1512 phys_base
= sp_banks
[i
].base_addr
;
1513 top
= sp_banks
[i
].base_addr
+
1514 sp_banks
[i
].num_bytes
;
1516 pfn_base
= phys_base
>> PAGE_SHIFT
;
1518 kern_base
= (prom_boot_mapping_phys_low
>> 22UL) << 22UL;
1519 kern_size
= (unsigned long)&_end
- (unsigned long)KERNBASE
;
1521 set_bit(0, mmu_context_bmap
);
1523 shift
= kern_base
+ PAGE_OFFSET
- ((unsigned long)KERNBASE
);
1525 real_end
= (unsigned long)_end
;
1526 if ((real_end
> ((unsigned long)KERNBASE
+ 0x400000)))
1528 if ((real_end
> ((unsigned long)KERNBASE
+ 0x800000))) {
1529 prom_printf("paging_init: Kernel > 8MB, too large.\n");
1533 /* Set kernel pgd to upper alias so physical page computations
1536 init_mm
.pgd
+= ((shift
) / (sizeof(pgd_t
)));
1538 memset(swapper_low_pmd_dir
, 0, sizeof(swapper_low_pmd_dir
));
1540 /* Now can init the kernel/bad page tables. */
1541 pud_set(pud_offset(&swapper_pg_dir
[0], 0),
1542 swapper_low_pmd_dir
+ (shift
/ sizeof(pgd_t
)));
1544 swapper_pgd_zero
= pgd_val(swapper_pg_dir
[0]);
1546 /* Inherit non-locked OBP mappings. */
1547 inherit_prom_mappings();
1549 /* Ok, we can use our TLB miss and window trap handlers safely.
1550 * We need to do a quick peek here to see if we are on StarFire
1551 * or not, so setup_tba can setup the IRQ globals correctly (it
1552 * needs to get the hard smp processor id correctly).
1555 extern void setup_tba(int);
1556 setup_tba(this_is_starfire
);
1559 inherit_locked_prom_mappings(1);
1563 /* Setup bootmem... */
1565 last_valid_pfn
= end_pfn
= bootmem_init(&pages_avail
);
1567 #ifdef CONFIG_DEBUG_PAGEALLOC
1568 kernel_physical_mapping_init();
1572 unsigned long zones_size
[MAX_NR_ZONES
];
1573 unsigned long zholes_size
[MAX_NR_ZONES
];
1574 unsigned long npages
;
1577 for (znum
= 0; znum
< MAX_NR_ZONES
; znum
++)
1578 zones_size
[znum
] = zholes_size
[znum
] = 0;
1580 npages
= end_pfn
- pfn_base
;
1581 zones_size
[ZONE_DMA
] = npages
;
1582 zholes_size
[ZONE_DMA
] = npages
- pages_avail
;
1584 free_area_init_node(0, &contig_page_data
, zones_size
,
1585 phys_base
>> PAGE_SHIFT
, zholes_size
);
1591 /* Ok, it seems that the prom can allocate some more memory chunks
1592 * as a side effect of some prom calls we perform during the
1593 * boot sequence. My most likely theory is that it is from the
1594 * prom_set_traptable() call, and OBP is allocating a scratchpad
1595 * for saving client program register state etc.
1597 static void __init
sort_memlist(struct linux_mlist_p1275
*thislist
)
1601 unsigned long tmpaddr
, tmpsize
;
1602 unsigned long lowest
;
1604 for (i
= 0; thislist
[i
].theres_more
!= 0; i
++) {
1605 lowest
= thislist
[i
].start_adr
;
1606 for (mitr
= i
+1; thislist
[mitr
-1].theres_more
!= 0; mitr
++)
1607 if (thislist
[mitr
].start_adr
< lowest
) {
1608 lowest
= thislist
[mitr
].start_adr
;
1611 if (lowest
== thislist
[i
].start_adr
)
1613 tmpaddr
= thislist
[swapi
].start_adr
;
1614 tmpsize
= thislist
[swapi
].num_bytes
;
1615 for (mitr
= swapi
; mitr
> i
; mitr
--) {
1616 thislist
[mitr
].start_adr
= thislist
[mitr
-1].start_adr
;
1617 thislist
[mitr
].num_bytes
= thislist
[mitr
-1].num_bytes
;
1619 thislist
[i
].start_adr
= tmpaddr
;
1620 thislist
[i
].num_bytes
= tmpsize
;
1624 void __init
rescan_sp_banks(void)
1626 struct linux_prom64_registers memlist
[64];
1627 struct linux_mlist_p1275 avail
[64], *mlist
;
1628 unsigned long bytes
, base_paddr
;
1629 int num_regs
, node
= prom_finddevice("/memory");
1632 num_regs
= prom_getproperty(node
, "available",
1633 (char *) memlist
, sizeof(memlist
));
1634 num_regs
= (num_regs
/ sizeof(struct linux_prom64_registers
));
1635 for (i
= 0; i
< num_regs
; i
++) {
1636 avail
[i
].start_adr
= memlist
[i
].phys_addr
;
1637 avail
[i
].num_bytes
= memlist
[i
].reg_size
;
1638 avail
[i
].theres_more
= &avail
[i
+ 1];
1640 avail
[i
- 1].theres_more
= NULL
;
1641 sort_memlist(avail
);
1645 bytes
= mlist
->num_bytes
;
1646 base_paddr
= mlist
->start_adr
;
1648 sp_banks
[0].base_addr
= base_paddr
;
1649 sp_banks
[0].num_bytes
= bytes
;
1651 while (mlist
->theres_more
!= NULL
){
1653 mlist
= mlist
->theres_more
;
1654 bytes
= mlist
->num_bytes
;
1655 if (i
>= SPARC_PHYS_BANKS
-1) {
1656 printk ("The machine has more banks than "
1657 "this kernel can support\n"
1658 "Increase the SPARC_PHYS_BANKS "
1659 "setting (currently %d)\n",
1661 i
= SPARC_PHYS_BANKS
-1;
1665 sp_banks
[i
].base_addr
= mlist
->start_adr
;
1666 sp_banks
[i
].num_bytes
= mlist
->num_bytes
;
1670 sp_banks
[i
].base_addr
= 0xdeadbeefbeefdeadUL
;
1671 sp_banks
[i
].num_bytes
= 0;
1673 for (i
= 0; sp_banks
[i
].num_bytes
!= 0; i
++)
1674 sp_banks
[i
].num_bytes
&= PAGE_MASK
;
1677 static void __init
taint_real_pages(void)
1679 struct sparc_phys_banks saved_sp_banks
[SPARC_PHYS_BANKS
];
1682 for (i
= 0; i
< SPARC_PHYS_BANKS
; i
++) {
1683 saved_sp_banks
[i
].base_addr
=
1684 sp_banks
[i
].base_addr
;
1685 saved_sp_banks
[i
].num_bytes
=
1686 sp_banks
[i
].num_bytes
;
1691 /* Find changes discovered in the sp_bank rescan and
1692 * reserve the lost portions in the bootmem maps.
1694 for (i
= 0; saved_sp_banks
[i
].num_bytes
; i
++) {
1695 unsigned long old_start
, old_end
;
1697 old_start
= saved_sp_banks
[i
].base_addr
;
1698 old_end
= old_start
+
1699 saved_sp_banks
[i
].num_bytes
;
1700 while (old_start
< old_end
) {
1703 for (n
= 0; sp_banks
[n
].num_bytes
; n
++) {
1704 unsigned long new_start
, new_end
;
1706 new_start
= sp_banks
[n
].base_addr
;
1707 new_end
= new_start
+ sp_banks
[n
].num_bytes
;
1709 if (new_start
<= old_start
&&
1710 new_end
>= (old_start
+ PAGE_SIZE
)) {
1711 set_bit (old_start
>> 22,
1712 sparc64_valid_addr_bitmap
);
1716 reserve_bootmem(old_start
, PAGE_SIZE
);
1719 old_start
+= PAGE_SIZE
;
1724 void __init
mem_init(void)
1726 unsigned long codepages
, datapages
, initpages
;
1727 unsigned long addr
, last
;
1730 i
= last_valid_pfn
>> ((22 - PAGE_SHIFT
) + 6);
1732 sparc64_valid_addr_bitmap
= (unsigned long *) alloc_bootmem(i
<< 3);
1733 if (sparc64_valid_addr_bitmap
== NULL
) {
1734 prom_printf("mem_init: Cannot alloc valid_addr_bitmap.\n");
1737 memset(sparc64_valid_addr_bitmap
, 0, i
<< 3);
1739 addr
= PAGE_OFFSET
+ kern_base
;
1740 last
= PAGE_ALIGN(kern_size
) + addr
;
1741 while (addr
< last
) {
1742 set_bit(__pa(addr
) >> 22, sparc64_valid_addr_bitmap
);
1748 max_mapnr
= last_valid_pfn
- pfn_base
;
1749 high_memory
= __va(last_valid_pfn
<< PAGE_SHIFT
);
1751 #ifdef CONFIG_DEBUG_BOOTMEM
1752 prom_printf("mem_init: Calling free_all_bootmem().\n");
1754 totalram_pages
= num_physpages
= free_all_bootmem() - 1;
1757 * Set up the zero page, mark it reserved, so that page count
1758 * is not manipulated when freeing the page from user ptes.
1760 mem_map_zero
= alloc_pages(GFP_KERNEL
|__GFP_ZERO
, 0);
1761 if (mem_map_zero
== NULL
) {
1762 prom_printf("paging_init: Cannot alloc zero page.\n");
1765 SetPageReserved(mem_map_zero
);
1767 codepages
= (((unsigned long) _etext
) - ((unsigned long) _start
));
1768 codepages
= PAGE_ALIGN(codepages
) >> PAGE_SHIFT
;
1769 datapages
= (((unsigned long) _edata
) - ((unsigned long) _etext
));
1770 datapages
= PAGE_ALIGN(datapages
) >> PAGE_SHIFT
;
1771 initpages
= (((unsigned long) __init_end
) - ((unsigned long) __init_begin
));
1772 initpages
= PAGE_ALIGN(initpages
) >> PAGE_SHIFT
;
1774 printk("Memory: %uk available (%ldk kernel code, %ldk data, %ldk init) [%016lx,%016lx]\n",
1775 nr_free_pages() << (PAGE_SHIFT
-10),
1776 codepages
<< (PAGE_SHIFT
-10),
1777 datapages
<< (PAGE_SHIFT
-10),
1778 initpages
<< (PAGE_SHIFT
-10),
1779 PAGE_OFFSET
, (last_valid_pfn
<< PAGE_SHIFT
));
1781 if (tlb_type
== cheetah
|| tlb_type
== cheetah_plus
)
1782 cheetah_ecache_flush_init();
1785 void free_initmem(void)
1787 unsigned long addr
, initend
;
1790 * The init section is aligned to 8k in vmlinux.lds. Page align for >8k pagesizes.
1792 addr
= PAGE_ALIGN((unsigned long)(__init_begin
));
1793 initend
= (unsigned long)(__init_end
) & PAGE_MASK
;
1794 for (; addr
< initend
; addr
+= PAGE_SIZE
) {
1799 ((unsigned long) __va(kern_base
)) -
1800 ((unsigned long) KERNBASE
));
1801 memset((void *)addr
, 0xcc, PAGE_SIZE
);
1802 p
= virt_to_page(page
);
1804 ClearPageReserved(p
);
1805 set_page_count(p
, 1);
1812 #ifdef CONFIG_BLK_DEV_INITRD
1813 void free_initrd_mem(unsigned long start
, unsigned long end
)
1816 printk ("Freeing initrd memory: %ldk freed\n", (end
- start
) >> 10);
1817 for (; start
< end
; start
+= PAGE_SIZE
) {
1818 struct page
*p
= virt_to_page(start
);
1820 ClearPageReserved(p
);
1821 set_page_count(p
, 1);