2 * arch/sparc64/mm/init.c
4 * Copyright (C) 1996-1999 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1997-1999 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
8 #include <linux/module.h>
9 #include <linux/kernel.h>
10 #include <linux/sched.h>
11 #include <linux/string.h>
12 #include <linux/init.h>
13 #include <linux/bootmem.h>
15 #include <linux/hugetlb.h>
16 #include <linux/slab.h>
17 #include <linux/initrd.h>
18 #include <linux/swap.h>
19 #include <linux/pagemap.h>
20 #include <linux/poison.h>
22 #include <linux/seq_file.h>
23 #include <linux/kprobes.h>
24 #include <linux/cache.h>
25 #include <linux/sort.h>
26 #include <linux/percpu.h>
27 #include <linux/lmb.h>
28 #include <linux/mmzone.h>
31 #include <asm/system.h>
33 #include <asm/pgalloc.h>
34 #include <asm/pgtable.h>
35 #include <asm/oplib.h>
36 #include <asm/iommu.h>
38 #include <asm/uaccess.h>
39 #include <asm/mmu_context.h>
40 #include <asm/tlbflush.h>
42 #include <asm/starfire.h>
44 #include <asm/spitfire.h>
45 #include <asm/sections.h>
47 #include <asm/hypervisor.h>
49 #include <asm/mdesc.h>
50 #include <asm/cpudata.h>
55 unsigned long kern_linear_pte_xor
[2] __read_mostly
;
57 /* A bitmap, one bit for every 256MB of physical memory. If the bit
58 * is clear, we should use a 4MB page (via kern_linear_pte_xor[0]) else
59 * if set we should use a 256MB page (via kern_linear_pte_xor[1]).
61 unsigned long kpte_linear_bitmap
[KPTE_BITMAP_BYTES
/ sizeof(unsigned long)];
63 #ifndef CONFIG_DEBUG_PAGEALLOC
64 /* A special kernel TSB for 4MB and 256MB linear mappings.
65 * Space is allocated for this right after the trap table
66 * in arch/sparc64/kernel/head.S
68 extern struct tsb swapper_4m_tsb
[KERNEL_TSB4M_NENTRIES
];
73 static struct linux_prom64_registers pavail
[MAX_BANKS
] __initdata
;
74 static int pavail_ents __initdata
;
76 static int cmp_p64(const void *a
, const void *b
)
78 const struct linux_prom64_registers
*x
= a
, *y
= b
;
80 if (x
->phys_addr
> y
->phys_addr
)
82 if (x
->phys_addr
< y
->phys_addr
)
87 static void __init
read_obp_memory(const char *property
,
88 struct linux_prom64_registers
*regs
,
91 int node
= prom_finddevice("/memory");
92 int prop_size
= prom_getproplen(node
, property
);
95 ents
= prop_size
/ sizeof(struct linux_prom64_registers
);
96 if (ents
> MAX_BANKS
) {
97 prom_printf("The machine has more %s property entries than "
98 "this kernel can support (%d).\n",
103 ret
= prom_getproperty(node
, property
, (char *) regs
, prop_size
);
105 prom_printf("Couldn't get %s property from /memory.\n");
109 /* Sanitize what we got from the firmware, by page aligning
112 for (i
= 0; i
< ents
; i
++) {
113 unsigned long base
, size
;
115 base
= regs
[i
].phys_addr
;
116 size
= regs
[i
].reg_size
;
119 if (base
& ~PAGE_MASK
) {
120 unsigned long new_base
= PAGE_ALIGN(base
);
122 size
-= new_base
- base
;
123 if ((long) size
< 0L)
128 /* If it is empty, simply get rid of it.
129 * This simplifies the logic of the other
130 * functions that process these arrays.
132 memmove(®s
[i
], ®s
[i
+ 1],
133 (ents
- i
- 1) * sizeof(regs
[0]));
138 regs
[i
].phys_addr
= base
;
139 regs
[i
].reg_size
= size
;
144 sort(regs
, ents
, sizeof(struct linux_prom64_registers
),
148 unsigned long *sparc64_valid_addr_bitmap __read_mostly
;
150 /* Kernel physical address base and size in bytes. */
151 unsigned long kern_base __read_mostly
;
152 unsigned long kern_size __read_mostly
;
154 /* Initial ramdisk setup */
155 extern unsigned long sparc_ramdisk_image64
;
156 extern unsigned int sparc_ramdisk_image
;
157 extern unsigned int sparc_ramdisk_size
;
159 struct page
*mem_map_zero __read_mostly
;
160 EXPORT_SYMBOL(mem_map_zero
);
162 unsigned int sparc64_highest_unlocked_tlb_ent __read_mostly
;
164 unsigned long sparc64_kern_pri_context __read_mostly
;
165 unsigned long sparc64_kern_pri_nuc_bits __read_mostly
;
166 unsigned long sparc64_kern_sec_context __read_mostly
;
168 int num_kernel_image_mappings
;
170 #ifdef CONFIG_DEBUG_DCFLUSH
171 atomic_t dcpage_flushes
= ATOMIC_INIT(0);
173 atomic_t dcpage_flushes_xcall
= ATOMIC_INIT(0);
177 inline void flush_dcache_page_impl(struct page
*page
)
179 BUG_ON(tlb_type
== hypervisor
);
180 #ifdef CONFIG_DEBUG_DCFLUSH
181 atomic_inc(&dcpage_flushes
);
184 #ifdef DCACHE_ALIASING_POSSIBLE
185 __flush_dcache_page(page_address(page
),
186 ((tlb_type
== spitfire
) &&
187 page_mapping(page
) != NULL
));
189 if (page_mapping(page
) != NULL
&&
190 tlb_type
== spitfire
)
191 __flush_icache_page(__pa(page_address(page
)));
195 #define PG_dcache_dirty PG_arch_1
196 #define PG_dcache_cpu_shift 32UL
197 #define PG_dcache_cpu_mask \
198 ((1UL<<ilog2(roundup_pow_of_two(NR_CPUS)))-1UL)
200 #define dcache_dirty_cpu(page) \
201 (((page)->flags >> PG_dcache_cpu_shift) & PG_dcache_cpu_mask)
203 static inline void set_dcache_dirty(struct page
*page
, int this_cpu
)
205 unsigned long mask
= this_cpu
;
206 unsigned long non_cpu_bits
;
208 non_cpu_bits
= ~(PG_dcache_cpu_mask
<< PG_dcache_cpu_shift
);
209 mask
= (mask
<< PG_dcache_cpu_shift
) | (1UL << PG_dcache_dirty
);
211 __asm__
__volatile__("1:\n\t"
213 "and %%g7, %1, %%g1\n\t"
214 "or %%g1, %0, %%g1\n\t"
215 "casx [%2], %%g7, %%g1\n\t"
217 "membar #StoreLoad | #StoreStore\n\t"
218 "bne,pn %%xcc, 1b\n\t"
221 : "r" (mask
), "r" (non_cpu_bits
), "r" (&page
->flags
)
225 static inline void clear_dcache_dirty_cpu(struct page
*page
, unsigned long cpu
)
227 unsigned long mask
= (1UL << PG_dcache_dirty
);
229 __asm__
__volatile__("! test_and_clear_dcache_dirty\n"
232 "srlx %%g7, %4, %%g1\n\t"
233 "and %%g1, %3, %%g1\n\t"
235 "bne,pn %%icc, 2f\n\t"
236 " andn %%g7, %1, %%g1\n\t"
237 "casx [%2], %%g7, %%g1\n\t"
239 "membar #StoreLoad | #StoreStore\n\t"
240 "bne,pn %%xcc, 1b\n\t"
244 : "r" (cpu
), "r" (mask
), "r" (&page
->flags
),
245 "i" (PG_dcache_cpu_mask
),
246 "i" (PG_dcache_cpu_shift
)
250 static inline void tsb_insert(struct tsb
*ent
, unsigned long tag
, unsigned long pte
)
252 unsigned long tsb_addr
= (unsigned long) ent
;
254 if (tlb_type
== cheetah_plus
|| tlb_type
== hypervisor
)
255 tsb_addr
= __pa(tsb_addr
);
257 __tsb_insert(tsb_addr
, tag
, pte
);
260 unsigned long _PAGE_ALL_SZ_BITS __read_mostly
;
261 unsigned long _PAGE_SZBITS __read_mostly
;
263 void update_mmu_cache(struct vm_area_struct
*vma
, unsigned long address
, pte_t pte
)
265 struct mm_struct
*mm
;
267 unsigned long tag
, flags
;
268 unsigned long tsb_index
, tsb_hash_shift
;
270 if (tlb_type
!= hypervisor
) {
271 unsigned long pfn
= pte_pfn(pte
);
272 unsigned long pg_flags
;
275 if (pfn_valid(pfn
) &&
276 (page
= pfn_to_page(pfn
), page_mapping(page
)) &&
277 ((pg_flags
= page
->flags
) & (1UL << PG_dcache_dirty
))) {
278 int cpu
= ((pg_flags
>> PG_dcache_cpu_shift
) &
280 int this_cpu
= get_cpu();
282 /* This is just to optimize away some function calls
286 flush_dcache_page_impl(page
);
288 smp_flush_dcache_page_impl(page
, cpu
);
290 clear_dcache_dirty_cpu(page
, cpu
);
298 tsb_index
= MM_TSB_BASE
;
299 tsb_hash_shift
= PAGE_SHIFT
;
301 spin_lock_irqsave(&mm
->context
.lock
, flags
);
303 #ifdef CONFIG_HUGETLB_PAGE
304 if (mm
->context
.tsb_block
[MM_TSB_HUGE
].tsb
!= NULL
) {
305 if ((tlb_type
== hypervisor
&&
306 (pte_val(pte
) & _PAGE_SZALL_4V
) == _PAGE_SZHUGE_4V
) ||
307 (tlb_type
!= hypervisor
&&
308 (pte_val(pte
) & _PAGE_SZALL_4U
) == _PAGE_SZHUGE_4U
)) {
309 tsb_index
= MM_TSB_HUGE
;
310 tsb_hash_shift
= HPAGE_SHIFT
;
315 tsb
= mm
->context
.tsb_block
[tsb_index
].tsb
;
316 tsb
+= ((address
>> tsb_hash_shift
) &
317 (mm
->context
.tsb_block
[tsb_index
].tsb_nentries
- 1UL));
318 tag
= (address
>> 22UL);
319 tsb_insert(tsb
, tag
, pte_val(pte
));
321 spin_unlock_irqrestore(&mm
->context
.lock
, flags
);
324 void flush_dcache_page(struct page
*page
)
326 struct address_space
*mapping
;
329 if (tlb_type
== hypervisor
)
332 /* Do not bother with the expensive D-cache flush if it
333 * is merely the zero page. The 'bigcore' testcase in GDB
334 * causes this case to run millions of times.
336 if (page
== ZERO_PAGE(0))
339 this_cpu
= get_cpu();
341 mapping
= page_mapping(page
);
342 if (mapping
&& !mapping_mapped(mapping
)) {
343 int dirty
= test_bit(PG_dcache_dirty
, &page
->flags
);
345 int dirty_cpu
= dcache_dirty_cpu(page
);
347 if (dirty_cpu
== this_cpu
)
349 smp_flush_dcache_page_impl(page
, dirty_cpu
);
351 set_dcache_dirty(page
, this_cpu
);
353 /* We could delay the flush for the !page_mapping
354 * case too. But that case is for exec env/arg
355 * pages and those are %99 certainly going to get
356 * faulted into the tlb (and thus flushed) anyways.
358 flush_dcache_page_impl(page
);
365 void __kprobes
flush_icache_range(unsigned long start
, unsigned long end
)
367 /* Cheetah and Hypervisor platform cpus have coherent I-cache. */
368 if (tlb_type
== spitfire
) {
371 /* This code only runs on Spitfire cpus so this is
372 * why we can assume _PAGE_PADDR_4U.
374 for (kaddr
= start
; kaddr
< end
; kaddr
+= PAGE_SIZE
) {
375 unsigned long paddr
, mask
= _PAGE_PADDR_4U
;
377 if (kaddr
>= PAGE_OFFSET
)
378 paddr
= kaddr
& mask
;
380 pgd_t
*pgdp
= pgd_offset_k(kaddr
);
381 pud_t
*pudp
= pud_offset(pgdp
, kaddr
);
382 pmd_t
*pmdp
= pmd_offset(pudp
, kaddr
);
383 pte_t
*ptep
= pte_offset_kernel(pmdp
, kaddr
);
385 paddr
= pte_val(*ptep
) & mask
;
387 __flush_icache_page(paddr
);
392 void mmu_info(struct seq_file
*m
)
394 if (tlb_type
== cheetah
)
395 seq_printf(m
, "MMU Type\t: Cheetah\n");
396 else if (tlb_type
== cheetah_plus
)
397 seq_printf(m
, "MMU Type\t: Cheetah+\n");
398 else if (tlb_type
== spitfire
)
399 seq_printf(m
, "MMU Type\t: Spitfire\n");
400 else if (tlb_type
== hypervisor
)
401 seq_printf(m
, "MMU Type\t: Hypervisor (sun4v)\n");
403 seq_printf(m
, "MMU Type\t: ???\n");
405 #ifdef CONFIG_DEBUG_DCFLUSH
406 seq_printf(m
, "DCPageFlushes\t: %d\n",
407 atomic_read(&dcpage_flushes
));
409 seq_printf(m
, "DCPageFlushesXC\t: %d\n",
410 atomic_read(&dcpage_flushes_xcall
));
411 #endif /* CONFIG_SMP */
412 #endif /* CONFIG_DEBUG_DCFLUSH */
415 struct linux_prom_translation prom_trans
[512] __read_mostly
;
416 unsigned int prom_trans_ents __read_mostly
;
418 unsigned long kern_locked_tte_data
;
420 /* The obp translations are saved based on 8k pagesize, since obp can
421 * use a mixture of pagesizes. Misses to the LOW_OBP_ADDRESS ->
422 * HI_OBP_ADDRESS range are handled in ktlb.S.
424 static inline int in_obp_range(unsigned long vaddr
)
426 return (vaddr
>= LOW_OBP_ADDRESS
&&
427 vaddr
< HI_OBP_ADDRESS
);
430 static int cmp_ptrans(const void *a
, const void *b
)
432 const struct linux_prom_translation
*x
= a
, *y
= b
;
434 if (x
->virt
> y
->virt
)
436 if (x
->virt
< y
->virt
)
441 /* Read OBP translations property into 'prom_trans[]'. */
442 static void __init
read_obp_translations(void)
444 int n
, node
, ents
, first
, last
, i
;
446 node
= prom_finddevice("/virtual-memory");
447 n
= prom_getproplen(node
, "translations");
448 if (unlikely(n
== 0 || n
== -1)) {
449 prom_printf("prom_mappings: Couldn't get size.\n");
452 if (unlikely(n
> sizeof(prom_trans
))) {
453 prom_printf("prom_mappings: Size %Zd is too big.\n", n
);
457 if ((n
= prom_getproperty(node
, "translations",
458 (char *)&prom_trans
[0],
459 sizeof(prom_trans
))) == -1) {
460 prom_printf("prom_mappings: Couldn't get property.\n");
464 n
= n
/ sizeof(struct linux_prom_translation
);
468 sort(prom_trans
, ents
, sizeof(struct linux_prom_translation
),
471 /* Now kick out all the non-OBP entries. */
472 for (i
= 0; i
< ents
; i
++) {
473 if (in_obp_range(prom_trans
[i
].virt
))
477 for (; i
< ents
; i
++) {
478 if (!in_obp_range(prom_trans
[i
].virt
))
483 for (i
= 0; i
< (last
- first
); i
++) {
484 struct linux_prom_translation
*src
= &prom_trans
[i
+ first
];
485 struct linux_prom_translation
*dest
= &prom_trans
[i
];
489 for (; i
< ents
; i
++) {
490 struct linux_prom_translation
*dest
= &prom_trans
[i
];
491 dest
->virt
= dest
->size
= dest
->data
= 0x0UL
;
494 prom_trans_ents
= last
- first
;
496 if (tlb_type
== spitfire
) {
497 /* Clear diag TTE bits. */
498 for (i
= 0; i
< prom_trans_ents
; i
++)
499 prom_trans
[i
].data
&= ~0x0003fe0000000000UL
;
503 static void __init
hypervisor_tlb_lock(unsigned long vaddr
,
507 unsigned long ret
= sun4v_mmu_map_perm_addr(vaddr
, 0, pte
, mmu
);
510 prom_printf("hypervisor_tlb_lock[%lx:%lx:%lx:%lx]: "
511 "errors with %lx\n", vaddr
, 0, pte
, mmu
, ret
);
516 static unsigned long kern_large_tte(unsigned long paddr
);
518 static void __init
remap_kernel(void)
520 unsigned long phys_page
, tte_vaddr
, tte_data
;
521 int i
, tlb_ent
= sparc64_highest_locked_tlbent();
523 tte_vaddr
= (unsigned long) KERNBASE
;
524 phys_page
= (prom_boot_mapping_phys_low
>> 22UL) << 22UL;
525 tte_data
= kern_large_tte(phys_page
);
527 kern_locked_tte_data
= tte_data
;
529 /* Now lock us into the TLBs via Hypervisor or OBP. */
530 if (tlb_type
== hypervisor
) {
531 for (i
= 0; i
< num_kernel_image_mappings
; i
++) {
532 hypervisor_tlb_lock(tte_vaddr
, tte_data
, HV_MMU_DMMU
);
533 hypervisor_tlb_lock(tte_vaddr
, tte_data
, HV_MMU_IMMU
);
534 tte_vaddr
+= 0x400000;
535 tte_data
+= 0x400000;
538 for (i
= 0; i
< num_kernel_image_mappings
; i
++) {
539 prom_dtlb_load(tlb_ent
- i
, tte_data
, tte_vaddr
);
540 prom_itlb_load(tlb_ent
- i
, tte_data
, tte_vaddr
);
541 tte_vaddr
+= 0x400000;
542 tte_data
+= 0x400000;
544 sparc64_highest_unlocked_tlb_ent
= tlb_ent
- i
;
546 if (tlb_type
== cheetah_plus
) {
547 sparc64_kern_pri_context
= (CTX_CHEETAH_PLUS_CTX0
|
548 CTX_CHEETAH_PLUS_NUC
);
549 sparc64_kern_pri_nuc_bits
= CTX_CHEETAH_PLUS_NUC
;
550 sparc64_kern_sec_context
= CTX_CHEETAH_PLUS_CTX0
;
555 static void __init
inherit_prom_mappings(void)
557 /* Now fixup OBP's idea about where we really are mapped. */
558 printk("Remapping the kernel... ");
563 void prom_world(int enter
)
566 set_fs((mm_segment_t
) { get_thread_current_ds() });
568 __asm__
__volatile__("flushw");
571 void __flush_dcache_range(unsigned long start
, unsigned long end
)
575 if (tlb_type
== spitfire
) {
578 for (va
= start
; va
< end
; va
+= 32) {
579 spitfire_put_dcache_tag(va
& 0x3fe0, 0x0);
583 } else if (tlb_type
== cheetah
|| tlb_type
== cheetah_plus
) {
586 for (va
= start
; va
< end
; va
+= 32)
587 __asm__
__volatile__("stxa %%g0, [%0] %1\n\t"
591 "i" (ASI_DCACHE_INVALIDATE
));
595 /* get_new_mmu_context() uses "cache + 1". */
596 DEFINE_SPINLOCK(ctx_alloc_lock
);
597 unsigned long tlb_context_cache
= CTX_FIRST_VERSION
- 1;
598 #define MAX_CTX_NR (1UL << CTX_NR_BITS)
599 #define CTX_BMAP_SLOTS BITS_TO_LONGS(MAX_CTX_NR)
600 DECLARE_BITMAP(mmu_context_bmap
, MAX_CTX_NR
);
602 /* Caller does TLB context flushing on local CPU if necessary.
603 * The caller also ensures that CTX_VALID(mm->context) is false.
605 * We must be careful about boundary cases so that we never
606 * let the user have CTX 0 (nucleus) or we ever use a CTX
607 * version of zero (and thus NO_CONTEXT would not be caught
608 * by version mis-match tests in mmu_context.h).
610 * Always invoked with interrupts disabled.
612 void get_new_mmu_context(struct mm_struct
*mm
)
614 unsigned long ctx
, new_ctx
;
615 unsigned long orig_pgsz_bits
;
619 spin_lock_irqsave(&ctx_alloc_lock
, flags
);
620 orig_pgsz_bits
= (mm
->context
.sparc64_ctx_val
& CTX_PGSZ_MASK
);
621 ctx
= (tlb_context_cache
+ 1) & CTX_NR_MASK
;
622 new_ctx
= find_next_zero_bit(mmu_context_bmap
, 1 << CTX_NR_BITS
, ctx
);
624 if (new_ctx
>= (1 << CTX_NR_BITS
)) {
625 new_ctx
= find_next_zero_bit(mmu_context_bmap
, ctx
, 1);
626 if (new_ctx
>= ctx
) {
628 new_ctx
= (tlb_context_cache
& CTX_VERSION_MASK
) +
631 new_ctx
= CTX_FIRST_VERSION
;
633 /* Don't call memset, for 16 entries that's just
636 mmu_context_bmap
[0] = 3;
637 mmu_context_bmap
[1] = 0;
638 mmu_context_bmap
[2] = 0;
639 mmu_context_bmap
[3] = 0;
640 for (i
= 4; i
< CTX_BMAP_SLOTS
; i
+= 4) {
641 mmu_context_bmap
[i
+ 0] = 0;
642 mmu_context_bmap
[i
+ 1] = 0;
643 mmu_context_bmap
[i
+ 2] = 0;
644 mmu_context_bmap
[i
+ 3] = 0;
650 mmu_context_bmap
[new_ctx
>>6] |= (1UL << (new_ctx
& 63));
651 new_ctx
|= (tlb_context_cache
& CTX_VERSION_MASK
);
653 tlb_context_cache
= new_ctx
;
654 mm
->context
.sparc64_ctx_val
= new_ctx
| orig_pgsz_bits
;
655 spin_unlock_irqrestore(&ctx_alloc_lock
, flags
);
657 if (unlikely(new_version
))
658 smp_new_mmu_context_version();
661 static int numa_enabled
= 1;
662 static int numa_debug
;
664 static int __init
early_numa(char *p
)
669 if (strstr(p
, "off"))
672 if (strstr(p
, "debug"))
677 early_param("numa", early_numa
);
679 #define numadbg(f, a...) \
680 do { if (numa_debug) \
681 printk(KERN_INFO f, ## a); \
684 static void __init
find_ramdisk(unsigned long phys_base
)
686 #ifdef CONFIG_BLK_DEV_INITRD
687 if (sparc_ramdisk_image
|| sparc_ramdisk_image64
) {
688 unsigned long ramdisk_image
;
690 /* Older versions of the bootloader only supported a
691 * 32-bit physical address for the ramdisk image
692 * location, stored at sparc_ramdisk_image. Newer
693 * SILO versions set sparc_ramdisk_image to zero and
694 * provide a full 64-bit physical address at
695 * sparc_ramdisk_image64.
697 ramdisk_image
= sparc_ramdisk_image
;
699 ramdisk_image
= sparc_ramdisk_image64
;
701 /* Another bootloader quirk. The bootloader normalizes
702 * the physical address to KERNBASE, so we have to
703 * factor that back out and add in the lowest valid
704 * physical page address to get the true physical address.
706 ramdisk_image
-= KERNBASE
;
707 ramdisk_image
+= phys_base
;
709 numadbg("Found ramdisk at physical address 0x%lx, size %u\n",
710 ramdisk_image
, sparc_ramdisk_size
);
712 initrd_start
= ramdisk_image
;
713 initrd_end
= ramdisk_image
+ sparc_ramdisk_size
;
715 lmb_reserve(initrd_start
, sparc_ramdisk_size
);
717 initrd_start
+= PAGE_OFFSET
;
718 initrd_end
+= PAGE_OFFSET
;
723 struct node_mem_mask
{
726 unsigned long bootmem_paddr
;
728 static struct node_mem_mask node_masks
[MAX_NUMNODES
];
729 static int num_node_masks
;
731 int numa_cpu_lookup_table
[NR_CPUS
];
732 cpumask_t numa_cpumask_lookup_table
[MAX_NUMNODES
];
734 #ifdef CONFIG_NEED_MULTIPLE_NODES
736 struct mdesc_mblock
{
739 u64 offset
; /* RA-to-PA */
741 static struct mdesc_mblock
*mblocks
;
742 static int num_mblocks
;
744 static unsigned long ra_to_pa(unsigned long addr
)
748 for (i
= 0; i
< num_mblocks
; i
++) {
749 struct mdesc_mblock
*m
= &mblocks
[i
];
751 if (addr
>= m
->base
&&
752 addr
< (m
->base
+ m
->size
)) {
760 static int find_node(unsigned long addr
)
764 addr
= ra_to_pa(addr
);
765 for (i
= 0; i
< num_node_masks
; i
++) {
766 struct node_mem_mask
*p
= &node_masks
[i
];
768 if ((addr
& p
->mask
) == p
->val
)
774 static unsigned long nid_range(unsigned long start
, unsigned long end
,
777 *nid
= find_node(start
);
779 while (start
< end
) {
780 int n
= find_node(start
);
793 static unsigned long nid_range(unsigned long start
, unsigned long end
,
801 /* This must be invoked after performing all of the necessary
802 * add_active_range() calls for 'nid'. We need to be able to get
803 * correct data from get_pfn_range_for_nid().
805 static void __init
allocate_node_data(int nid
)
807 unsigned long paddr
, num_pages
, start_pfn
, end_pfn
;
808 struct pglist_data
*p
;
810 #ifdef CONFIG_NEED_MULTIPLE_NODES
811 paddr
= lmb_alloc_nid(sizeof(struct pglist_data
),
812 SMP_CACHE_BYTES
, nid
, nid_range
);
814 prom_printf("Cannot allocate pglist_data for nid[%d]\n", nid
);
817 NODE_DATA(nid
) = __va(paddr
);
818 memset(NODE_DATA(nid
), 0, sizeof(struct pglist_data
));
820 NODE_DATA(nid
)->bdata
= &bootmem_node_data
[nid
];
825 get_pfn_range_for_nid(nid
, &start_pfn
, &end_pfn
);
826 p
->node_start_pfn
= start_pfn
;
827 p
->node_spanned_pages
= end_pfn
- start_pfn
;
829 if (p
->node_spanned_pages
) {
830 num_pages
= bootmem_bootmap_pages(p
->node_spanned_pages
);
832 paddr
= lmb_alloc_nid(num_pages
<< PAGE_SHIFT
, PAGE_SIZE
, nid
,
835 prom_printf("Cannot allocate bootmap for nid[%d]\n",
839 node_masks
[nid
].bootmem_paddr
= paddr
;
843 static void init_node_masks_nonnuma(void)
847 numadbg("Initializing tables for non-numa.\n");
849 node_masks
[0].mask
= node_masks
[0].val
= 0;
852 for (i
= 0; i
< NR_CPUS
; i
++)
853 numa_cpu_lookup_table
[i
] = 0;
855 numa_cpumask_lookup_table
[0] = CPU_MASK_ALL
;
858 #ifdef CONFIG_NEED_MULTIPLE_NODES
859 struct pglist_data
*node_data
[MAX_NUMNODES
];
861 EXPORT_SYMBOL(numa_cpu_lookup_table
);
862 EXPORT_SYMBOL(numa_cpumask_lookup_table
);
863 EXPORT_SYMBOL(node_data
);
865 struct mdesc_mlgroup
{
871 static struct mdesc_mlgroup
*mlgroups
;
872 static int num_mlgroups
;
874 static int scan_pio_for_cfg_handle(struct mdesc_handle
*md
, u64 pio
,
879 mdesc_for_each_arc(arc
, md
, pio
, MDESC_ARC_TYPE_FWD
) {
880 u64 target
= mdesc_arc_target(md
, arc
);
883 val
= mdesc_get_property(md
, target
,
885 if (val
&& *val
== cfg_handle
)
891 static int scan_arcs_for_cfg_handle(struct mdesc_handle
*md
, u64 grp
,
894 u64 arc
, candidate
, best_latency
= ~(u64
)0;
896 candidate
= MDESC_NODE_NULL
;
897 mdesc_for_each_arc(arc
, md
, grp
, MDESC_ARC_TYPE_FWD
) {
898 u64 target
= mdesc_arc_target(md
, arc
);
899 const char *name
= mdesc_node_name(md
, target
);
902 if (strcmp(name
, "pio-latency-group"))
905 val
= mdesc_get_property(md
, target
, "latency", NULL
);
909 if (*val
< best_latency
) {
915 if (candidate
== MDESC_NODE_NULL
)
918 return scan_pio_for_cfg_handle(md
, candidate
, cfg_handle
);
921 int of_node_to_nid(struct device_node
*dp
)
923 const struct linux_prom64_registers
*regs
;
924 struct mdesc_handle
*md
;
929 /* This is the right thing to do on currently supported
930 * SUN4U NUMA platforms as well, as the PCI controller does
931 * not sit behind any particular memory controller.
936 regs
= of_get_property(dp
, "reg", NULL
);
940 cfg_handle
= (regs
->phys_addr
>> 32UL) & 0x0fffffff;
946 mdesc_for_each_node_by_name(md
, grp
, "group") {
947 if (!scan_arcs_for_cfg_handle(md
, grp
, cfg_handle
)) {
959 static void add_node_ranges(void)
963 for (i
= 0; i
< lmb
.memory
.cnt
; i
++) {
964 unsigned long size
= lmb_size_bytes(&lmb
.memory
, i
);
965 unsigned long start
, end
;
967 start
= lmb
.memory
.region
[i
].base
;
969 while (start
< end
) {
970 unsigned long this_end
;
973 this_end
= nid_range(start
, end
, &nid
);
975 numadbg("Adding active range nid[%d] "
976 "start[%lx] end[%lx]\n",
977 nid
, start
, this_end
);
979 add_active_range(nid
,
981 this_end
>> PAGE_SHIFT
);
988 static int __init
grab_mlgroups(struct mdesc_handle
*md
)
994 mdesc_for_each_node_by_name(md
, node
, "memory-latency-group")
999 paddr
= lmb_alloc(count
* sizeof(struct mdesc_mlgroup
),
1004 mlgroups
= __va(paddr
);
1005 num_mlgroups
= count
;
1008 mdesc_for_each_node_by_name(md
, node
, "memory-latency-group") {
1009 struct mdesc_mlgroup
*m
= &mlgroups
[count
++];
1014 val
= mdesc_get_property(md
, node
, "latency", NULL
);
1016 val
= mdesc_get_property(md
, node
, "address-match", NULL
);
1018 val
= mdesc_get_property(md
, node
, "address-mask", NULL
);
1021 numadbg("MLGROUP[%d]: node[%lx] latency[%lx] "
1022 "match[%lx] mask[%lx]\n",
1023 count
- 1, m
->node
, m
->latency
, m
->match
, m
->mask
);
1029 static int __init
grab_mblocks(struct mdesc_handle
*md
)
1031 unsigned long paddr
;
1035 mdesc_for_each_node_by_name(md
, node
, "mblock")
1040 paddr
= lmb_alloc(count
* sizeof(struct mdesc_mblock
),
1045 mblocks
= __va(paddr
);
1046 num_mblocks
= count
;
1049 mdesc_for_each_node_by_name(md
, node
, "mblock") {
1050 struct mdesc_mblock
*m
= &mblocks
[count
++];
1053 val
= mdesc_get_property(md
, node
, "base", NULL
);
1055 val
= mdesc_get_property(md
, node
, "size", NULL
);
1057 val
= mdesc_get_property(md
, node
,
1058 "address-congruence-offset", NULL
);
1061 numadbg("MBLOCK[%d]: base[%lx] size[%lx] offset[%lx]\n",
1062 count
- 1, m
->base
, m
->size
, m
->offset
);
1068 static void __init
numa_parse_mdesc_group_cpus(struct mdesc_handle
*md
,
1069 u64 grp
, cpumask_t
*mask
)
1075 mdesc_for_each_arc(arc
, md
, grp
, MDESC_ARC_TYPE_BACK
) {
1076 u64 target
= mdesc_arc_target(md
, arc
);
1077 const char *name
= mdesc_node_name(md
, target
);
1080 if (strcmp(name
, "cpu"))
1082 id
= mdesc_get_property(md
, target
, "id", NULL
);
1084 cpu_set(*id
, *mask
);
1088 static struct mdesc_mlgroup
* __init
find_mlgroup(u64 node
)
1092 for (i
= 0; i
< num_mlgroups
; i
++) {
1093 struct mdesc_mlgroup
*m
= &mlgroups
[i
];
1094 if (m
->node
== node
)
1100 static int __init
numa_attach_mlgroup(struct mdesc_handle
*md
, u64 grp
,
1103 struct mdesc_mlgroup
*candidate
= NULL
;
1104 u64 arc
, best_latency
= ~(u64
)0;
1105 struct node_mem_mask
*n
;
1107 mdesc_for_each_arc(arc
, md
, grp
, MDESC_ARC_TYPE_FWD
) {
1108 u64 target
= mdesc_arc_target(md
, arc
);
1109 struct mdesc_mlgroup
*m
= find_mlgroup(target
);
1112 if (m
->latency
< best_latency
) {
1114 best_latency
= m
->latency
;
1120 if (num_node_masks
!= index
) {
1121 printk(KERN_ERR
"Inconsistent NUMA state, "
1122 "index[%d] != num_node_masks[%d]\n",
1123 index
, num_node_masks
);
1127 n
= &node_masks
[num_node_masks
++];
1129 n
->mask
= candidate
->mask
;
1130 n
->val
= candidate
->match
;
1132 numadbg("NUMA NODE[%d]: mask[%lx] val[%lx] (latency[%lx])\n",
1133 index
, n
->mask
, n
->val
, candidate
->latency
);
1138 static int __init
numa_parse_mdesc_group(struct mdesc_handle
*md
, u64 grp
,
1144 numa_parse_mdesc_group_cpus(md
, grp
, &mask
);
1146 for_each_cpu_mask(cpu
, mask
)
1147 numa_cpu_lookup_table
[cpu
] = index
;
1148 numa_cpumask_lookup_table
[index
] = mask
;
1151 printk(KERN_INFO
"NUMA GROUP[%d]: cpus [ ", index
);
1152 for_each_cpu_mask(cpu
, mask
)
1157 return numa_attach_mlgroup(md
, grp
, index
);
1160 static int __init
numa_parse_mdesc(void)
1162 struct mdesc_handle
*md
= mdesc_grab();
1166 node
= mdesc_node_by_name(md
, MDESC_NODE_NULL
, "latency-groups");
1167 if (node
== MDESC_NODE_NULL
) {
1172 err
= grab_mblocks(md
);
1176 err
= grab_mlgroups(md
);
1181 mdesc_for_each_node_by_name(md
, node
, "group") {
1182 err
= numa_parse_mdesc_group(md
, node
, count
);
1190 for (i
= 0; i
< num_node_masks
; i
++) {
1191 allocate_node_data(i
);
1201 static int __init
numa_parse_jbus(void)
1203 unsigned long cpu
, index
;
1205 /* NUMA node id is encoded in bits 36 and higher, and there is
1206 * a 1-to-1 mapping from CPU ID to NUMA node ID.
1209 for_each_present_cpu(cpu
) {
1210 numa_cpu_lookup_table
[cpu
] = index
;
1211 numa_cpumask_lookup_table
[index
] = cpumask_of_cpu(cpu
);
1212 node_masks
[index
].mask
= ~((1UL << 36UL) - 1UL);
1213 node_masks
[index
].val
= cpu
<< 36UL;
1217 num_node_masks
= index
;
1221 for (index
= 0; index
< num_node_masks
; index
++) {
1222 allocate_node_data(index
);
1223 node_set_online(index
);
1229 static int __init
numa_parse_sun4u(void)
1231 if (tlb_type
== cheetah
|| tlb_type
== cheetah_plus
) {
1234 __asm__ ("rdpr %%ver, %0" : "=r" (ver
));
1235 if ((ver
>> 32UL) == __JALAPENO_ID
||
1236 (ver
>> 32UL) == __SERRANO_ID
)
1237 return numa_parse_jbus();
1242 static int __init
bootmem_init_numa(void)
1246 numadbg("bootmem_init_numa()\n");
1249 if (tlb_type
== hypervisor
)
1250 err
= numa_parse_mdesc();
1252 err
= numa_parse_sun4u();
1259 static int bootmem_init_numa(void)
1266 static void __init
bootmem_init_nonnuma(void)
1268 unsigned long top_of_ram
= lmb_end_of_DRAM();
1269 unsigned long total_ram
= lmb_phys_mem_size();
1272 numadbg("bootmem_init_nonnuma()\n");
1274 printk(KERN_INFO
"Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
1275 top_of_ram
, total_ram
);
1276 printk(KERN_INFO
"Memory hole size: %ldMB\n",
1277 (top_of_ram
- total_ram
) >> 20);
1279 init_node_masks_nonnuma();
1281 for (i
= 0; i
< lmb
.memory
.cnt
; i
++) {
1282 unsigned long size
= lmb_size_bytes(&lmb
.memory
, i
);
1283 unsigned long start_pfn
, end_pfn
;
1288 start_pfn
= lmb
.memory
.region
[i
].base
>> PAGE_SHIFT
;
1289 end_pfn
= start_pfn
+ lmb_size_pages(&lmb
.memory
, i
);
1290 add_active_range(0, start_pfn
, end_pfn
);
1293 allocate_node_data(0);
1298 static void __init
reserve_range_in_node(int nid
, unsigned long start
,
1301 numadbg(" reserve_range_in_node(nid[%d],start[%lx],end[%lx]\n",
1303 while (start
< end
) {
1304 unsigned long this_end
;
1307 this_end
= nid_range(start
, end
, &n
);
1309 numadbg(" MATCH reserving range [%lx:%lx]\n",
1311 reserve_bootmem_node(NODE_DATA(nid
), start
,
1312 (this_end
- start
), BOOTMEM_DEFAULT
);
1314 numadbg(" NO MATCH, advancing start to %lx\n",
1321 static void __init
trim_reserved_in_node(int nid
)
1325 numadbg(" trim_reserved_in_node(%d)\n", nid
);
1327 for (i
= 0; i
< lmb
.reserved
.cnt
; i
++) {
1328 unsigned long start
= lmb
.reserved
.region
[i
].base
;
1329 unsigned long size
= lmb_size_bytes(&lmb
.reserved
, i
);
1330 unsigned long end
= start
+ size
;
1332 reserve_range_in_node(nid
, start
, end
);
1336 static void __init
bootmem_init_one_node(int nid
)
1338 struct pglist_data
*p
;
1340 numadbg("bootmem_init_one_node(%d)\n", nid
);
1344 if (p
->node_spanned_pages
) {
1345 unsigned long paddr
= node_masks
[nid
].bootmem_paddr
;
1346 unsigned long end_pfn
;
1348 end_pfn
= p
->node_start_pfn
+ p
->node_spanned_pages
;
1350 numadbg(" init_bootmem_node(%d, %lx, %lx, %lx)\n",
1351 nid
, paddr
>> PAGE_SHIFT
, p
->node_start_pfn
, end_pfn
);
1353 init_bootmem_node(p
, paddr
>> PAGE_SHIFT
,
1354 p
->node_start_pfn
, end_pfn
);
1356 numadbg(" free_bootmem_with_active_regions(%d, %lx)\n",
1358 free_bootmem_with_active_regions(nid
, end_pfn
);
1360 trim_reserved_in_node(nid
);
1362 numadbg(" sparse_memory_present_with_active_regions(%d)\n",
1364 sparse_memory_present_with_active_regions(nid
);
1368 static unsigned long __init
bootmem_init(unsigned long phys_base
)
1370 unsigned long end_pfn
;
1373 end_pfn
= lmb_end_of_DRAM() >> PAGE_SHIFT
;
1374 max_pfn
= max_low_pfn
= end_pfn
;
1375 min_low_pfn
= (phys_base
>> PAGE_SHIFT
);
1377 if (bootmem_init_numa() < 0)
1378 bootmem_init_nonnuma();
1380 /* XXX cpu notifier XXX */
1382 for_each_online_node(nid
)
1383 bootmem_init_one_node(nid
);
1390 static struct linux_prom64_registers pall
[MAX_BANKS
] __initdata
;
1391 static int pall_ents __initdata
;
1393 #ifdef CONFIG_DEBUG_PAGEALLOC
1394 static unsigned long __ref
kernel_map_range(unsigned long pstart
,
1395 unsigned long pend
, pgprot_t prot
)
1397 unsigned long vstart
= PAGE_OFFSET
+ pstart
;
1398 unsigned long vend
= PAGE_OFFSET
+ pend
;
1399 unsigned long alloc_bytes
= 0UL;
1401 if ((vstart
& ~PAGE_MASK
) || (vend
& ~PAGE_MASK
)) {
1402 prom_printf("kernel_map: Unaligned physmem[%lx:%lx]\n",
1407 while (vstart
< vend
) {
1408 unsigned long this_end
, paddr
= __pa(vstart
);
1409 pgd_t
*pgd
= pgd_offset_k(vstart
);
1414 pud
= pud_offset(pgd
, vstart
);
1415 if (pud_none(*pud
)) {
1418 new = __alloc_bootmem(PAGE_SIZE
, PAGE_SIZE
, PAGE_SIZE
);
1419 alloc_bytes
+= PAGE_SIZE
;
1420 pud_populate(&init_mm
, pud
, new);
1423 pmd
= pmd_offset(pud
, vstart
);
1424 if (!pmd_present(*pmd
)) {
1427 new = __alloc_bootmem(PAGE_SIZE
, PAGE_SIZE
, PAGE_SIZE
);
1428 alloc_bytes
+= PAGE_SIZE
;
1429 pmd_populate_kernel(&init_mm
, pmd
, new);
1432 pte
= pte_offset_kernel(pmd
, vstart
);
1433 this_end
= (vstart
+ PMD_SIZE
) & PMD_MASK
;
1434 if (this_end
> vend
)
1437 while (vstart
< this_end
) {
1438 pte_val(*pte
) = (paddr
| pgprot_val(prot
));
1440 vstart
+= PAGE_SIZE
;
1449 extern unsigned int kvmap_linear_patch
[1];
1450 #endif /* CONFIG_DEBUG_PAGEALLOC */
1452 static void __init
mark_kpte_bitmap(unsigned long start
, unsigned long end
)
1454 const unsigned long shift_256MB
= 28;
1455 const unsigned long mask_256MB
= ((1UL << shift_256MB
) - 1UL);
1456 const unsigned long size_256MB
= (1UL << shift_256MB
);
1458 while (start
< end
) {
1461 remains
= end
- start
;
1462 if (remains
< size_256MB
)
1465 if (start
& mask_256MB
) {
1466 start
= (start
+ size_256MB
) & ~mask_256MB
;
1470 while (remains
>= size_256MB
) {
1471 unsigned long index
= start
>> shift_256MB
;
1473 __set_bit(index
, kpte_linear_bitmap
);
1475 start
+= size_256MB
;
1476 remains
-= size_256MB
;
1481 static void __init
init_kpte_bitmap(void)
1485 for (i
= 0; i
< pall_ents
; i
++) {
1486 unsigned long phys_start
, phys_end
;
1488 phys_start
= pall
[i
].phys_addr
;
1489 phys_end
= phys_start
+ pall
[i
].reg_size
;
1491 mark_kpte_bitmap(phys_start
, phys_end
);
1495 static void __init
kernel_physical_mapping_init(void)
1497 #ifdef CONFIG_DEBUG_PAGEALLOC
1498 unsigned long i
, mem_alloced
= 0UL;
1500 for (i
= 0; i
< pall_ents
; i
++) {
1501 unsigned long phys_start
, phys_end
;
1503 phys_start
= pall
[i
].phys_addr
;
1504 phys_end
= phys_start
+ pall
[i
].reg_size
;
1506 mem_alloced
+= kernel_map_range(phys_start
, phys_end
,
1510 printk("Allocated %ld bytes for kernel page tables.\n",
1513 kvmap_linear_patch
[0] = 0x01000000; /* nop */
1514 flushi(&kvmap_linear_patch
[0]);
1520 #ifdef CONFIG_DEBUG_PAGEALLOC
1521 void kernel_map_pages(struct page
*page
, int numpages
, int enable
)
1523 unsigned long phys_start
= page_to_pfn(page
) << PAGE_SHIFT
;
1524 unsigned long phys_end
= phys_start
+ (numpages
* PAGE_SIZE
);
1526 kernel_map_range(phys_start
, phys_end
,
1527 (enable
? PAGE_KERNEL
: __pgprot(0)));
1529 flush_tsb_kernel_range(PAGE_OFFSET
+ phys_start
,
1530 PAGE_OFFSET
+ phys_end
);
1532 /* we should perform an IPI and flush all tlbs,
1533 * but that can deadlock->flush only current cpu.
1535 __flush_tlb_kernel_range(PAGE_OFFSET
+ phys_start
,
1536 PAGE_OFFSET
+ phys_end
);
1540 unsigned long __init
find_ecache_flush_span(unsigned long size
)
1544 for (i
= 0; i
< pavail_ents
; i
++) {
1545 if (pavail
[i
].reg_size
>= size
)
1546 return pavail
[i
].phys_addr
;
1552 static void __init
tsb_phys_patch(void)
1554 struct tsb_ldquad_phys_patch_entry
*pquad
;
1555 struct tsb_phys_patch_entry
*p
;
1557 pquad
= &__tsb_ldquad_phys_patch
;
1558 while (pquad
< &__tsb_ldquad_phys_patch_end
) {
1559 unsigned long addr
= pquad
->addr
;
1561 if (tlb_type
== hypervisor
)
1562 *(unsigned int *) addr
= pquad
->sun4v_insn
;
1564 *(unsigned int *) addr
= pquad
->sun4u_insn
;
1566 __asm__
__volatile__("flush %0"
1573 p
= &__tsb_phys_patch
;
1574 while (p
< &__tsb_phys_patch_end
) {
1575 unsigned long addr
= p
->addr
;
1577 *(unsigned int *) addr
= p
->insn
;
1579 __asm__
__volatile__("flush %0"
1587 /* Don't mark as init, we give this to the Hypervisor. */
1588 #ifndef CONFIG_DEBUG_PAGEALLOC
1589 #define NUM_KTSB_DESCR 2
1591 #define NUM_KTSB_DESCR 1
1593 static struct hv_tsb_descr ktsb_descr
[NUM_KTSB_DESCR
];
1594 extern struct tsb swapper_tsb
[KERNEL_TSB_NENTRIES
];
1596 static void __init
sun4v_ktsb_init(void)
1598 unsigned long ktsb_pa
;
1600 /* First KTSB for PAGE_SIZE mappings. */
1601 ktsb_pa
= kern_base
+ ((unsigned long)&swapper_tsb
[0] - KERNBASE
);
1603 switch (PAGE_SIZE
) {
1606 ktsb_descr
[0].pgsz_idx
= HV_PGSZ_IDX_8K
;
1607 ktsb_descr
[0].pgsz_mask
= HV_PGSZ_MASK_8K
;
1611 ktsb_descr
[0].pgsz_idx
= HV_PGSZ_IDX_64K
;
1612 ktsb_descr
[0].pgsz_mask
= HV_PGSZ_MASK_64K
;
1616 ktsb_descr
[0].pgsz_idx
= HV_PGSZ_IDX_512K
;
1617 ktsb_descr
[0].pgsz_mask
= HV_PGSZ_MASK_512K
;
1620 case 4 * 1024 * 1024:
1621 ktsb_descr
[0].pgsz_idx
= HV_PGSZ_IDX_4MB
;
1622 ktsb_descr
[0].pgsz_mask
= HV_PGSZ_MASK_4MB
;
1626 ktsb_descr
[0].assoc
= 1;
1627 ktsb_descr
[0].num_ttes
= KERNEL_TSB_NENTRIES
;
1628 ktsb_descr
[0].ctx_idx
= 0;
1629 ktsb_descr
[0].tsb_base
= ktsb_pa
;
1630 ktsb_descr
[0].resv
= 0;
1632 #ifndef CONFIG_DEBUG_PAGEALLOC
1633 /* Second KTSB for 4MB/256MB mappings. */
1634 ktsb_pa
= (kern_base
+
1635 ((unsigned long)&swapper_4m_tsb
[0] - KERNBASE
));
1637 ktsb_descr
[1].pgsz_idx
= HV_PGSZ_IDX_4MB
;
1638 ktsb_descr
[1].pgsz_mask
= (HV_PGSZ_MASK_4MB
|
1639 HV_PGSZ_MASK_256MB
);
1640 ktsb_descr
[1].assoc
= 1;
1641 ktsb_descr
[1].num_ttes
= KERNEL_TSB4M_NENTRIES
;
1642 ktsb_descr
[1].ctx_idx
= 0;
1643 ktsb_descr
[1].tsb_base
= ktsb_pa
;
1644 ktsb_descr
[1].resv
= 0;
1648 void __cpuinit
sun4v_ktsb_register(void)
1650 unsigned long pa
, ret
;
1652 pa
= kern_base
+ ((unsigned long)&ktsb_descr
[0] - KERNBASE
);
1654 ret
= sun4v_mmu_tsb_ctx0(NUM_KTSB_DESCR
, pa
);
1656 prom_printf("hypervisor_mmu_tsb_ctx0[%lx]: "
1657 "errors with %lx\n", pa
, ret
);
1662 /* paging_init() sets up the page tables */
1664 static unsigned long last_valid_pfn
;
1665 pgd_t swapper_pg_dir
[2048];
1667 static void sun4u_pgprot_init(void);
1668 static void sun4v_pgprot_init(void);
1670 /* Dummy function */
1671 void __init
setup_per_cpu_areas(void)
1675 void __init
paging_init(void)
1677 unsigned long end_pfn
, shift
, phys_base
;
1678 unsigned long real_end
, i
;
1680 /* These build time checkes make sure that the dcache_dirty_cpu()
1681 * page->flags usage will work.
1683 * When a page gets marked as dcache-dirty, we store the
1684 * cpu number starting at bit 32 in the page->flags. Also,
1685 * functions like clear_dcache_dirty_cpu use the cpu mask
1686 * in 13-bit signed-immediate instruction fields.
1690 * Page flags must not reach into upper 32 bits that are used
1691 * for the cpu number
1693 BUILD_BUG_ON(NR_PAGEFLAGS
> 32);
1696 * The bit fields placed in the high range must not reach below
1697 * the 32 bit boundary. Otherwise we cannot place the cpu field
1698 * at the 32 bit boundary.
1700 BUILD_BUG_ON(SECTIONS_WIDTH
+ NODES_WIDTH
+ ZONES_WIDTH
+
1701 ilog2(roundup_pow_of_two(NR_CPUS
)) > 32);
1703 BUILD_BUG_ON(NR_CPUS
> 4096);
1705 kern_base
= (prom_boot_mapping_phys_low
>> 22UL) << 22UL;
1706 kern_size
= (unsigned long)&_end
- (unsigned long)KERNBASE
;
1708 /* Invalidate both kernel TSBs. */
1709 memset(swapper_tsb
, 0x40, sizeof(swapper_tsb
));
1710 #ifndef CONFIG_DEBUG_PAGEALLOC
1711 memset(swapper_4m_tsb
, 0x40, sizeof(swapper_4m_tsb
));
1714 if (tlb_type
== hypervisor
)
1715 sun4v_pgprot_init();
1717 sun4u_pgprot_init();
1719 if (tlb_type
== cheetah_plus
||
1720 tlb_type
== hypervisor
)
1723 if (tlb_type
== hypervisor
) {
1724 sun4v_patch_tlb_handlers();
1730 /* Find available physical memory...
1732 * Read it twice in order to work around a bug in openfirmware.
1733 * The call to grab this table itself can cause openfirmware to
1734 * allocate memory, which in turn can take away some space from
1735 * the list of available memory. Reading it twice makes sure
1736 * we really do get the final value.
1738 read_obp_translations();
1739 read_obp_memory("reg", &pall
[0], &pall_ents
);
1740 read_obp_memory("available", &pavail
[0], &pavail_ents
);
1741 read_obp_memory("available", &pavail
[0], &pavail_ents
);
1743 phys_base
= 0xffffffffffffffffUL
;
1744 for (i
= 0; i
< pavail_ents
; i
++) {
1745 phys_base
= min(phys_base
, pavail
[i
].phys_addr
);
1746 lmb_add(pavail
[i
].phys_addr
, pavail
[i
].reg_size
);
1749 lmb_reserve(kern_base
, kern_size
);
1751 find_ramdisk(phys_base
);
1753 lmb_enforce_memory_limit(cmdline_memory_size
);
1758 set_bit(0, mmu_context_bmap
);
1760 shift
= kern_base
+ PAGE_OFFSET
- ((unsigned long)KERNBASE
);
1762 real_end
= (unsigned long)_end
;
1763 num_kernel_image_mappings
= DIV_ROUND_UP(real_end
- KERNBASE
, 1 << 22);
1764 printk("Kernel: Using %d locked TLB entries for main kernel image.\n",
1765 num_kernel_image_mappings
);
1767 /* Set kernel pgd to upper alias so physical page computations
1770 init_mm
.pgd
+= ((shift
) / (sizeof(pgd_t
)));
1772 memset(swapper_low_pmd_dir
, 0, sizeof(swapper_low_pmd_dir
));
1774 /* Now can init the kernel/bad page tables. */
1775 pud_set(pud_offset(&swapper_pg_dir
[0], 0),
1776 swapper_low_pmd_dir
+ (shift
/ sizeof(pgd_t
)));
1778 inherit_prom_mappings();
1782 /* Ok, we can use our TLB miss and window trap handlers safely. */
1787 if (tlb_type
== hypervisor
)
1788 sun4v_ktsb_register();
1790 /* We must setup the per-cpu areas before we pull in the
1791 * PROM and the MDESC. The code there fills in cpu and
1792 * other information into per-cpu data structures.
1794 real_setup_per_cpu_areas();
1796 prom_build_devicetree();
1798 if (tlb_type
== hypervisor
)
1801 /* Once the OF device tree and MDESC have been setup, we know
1802 * the list of possible cpus. Therefore we can allocate the
1805 for_each_possible_cpu(i
) {
1806 /* XXX Use node local allocations... XXX */
1807 softirq_stack
[i
] = __va(lmb_alloc(THREAD_SIZE
, THREAD_SIZE
));
1808 hardirq_stack
[i
] = __va(lmb_alloc(THREAD_SIZE
, THREAD_SIZE
));
1811 /* Setup bootmem... */
1812 last_valid_pfn
= end_pfn
= bootmem_init(phys_base
);
1814 #ifndef CONFIG_NEED_MULTIPLE_NODES
1815 max_mapnr
= last_valid_pfn
;
1817 kernel_physical_mapping_init();
1820 unsigned long max_zone_pfns
[MAX_NR_ZONES
];
1822 memset(max_zone_pfns
, 0, sizeof(max_zone_pfns
));
1824 max_zone_pfns
[ZONE_NORMAL
] = end_pfn
;
1826 free_area_init_nodes(max_zone_pfns
);
1829 printk("Booting Linux...\n");
1832 int __init
page_in_phys_avail(unsigned long paddr
)
1838 for (i
= 0; i
< pavail_ents
; i
++) {
1839 unsigned long start
, end
;
1841 start
= pavail
[i
].phys_addr
;
1842 end
= start
+ pavail
[i
].reg_size
;
1844 if (paddr
>= start
&& paddr
< end
)
1847 if (paddr
>= kern_base
&& paddr
< (kern_base
+ kern_size
))
1849 #ifdef CONFIG_BLK_DEV_INITRD
1850 if (paddr
>= __pa(initrd_start
) &&
1851 paddr
< __pa(PAGE_ALIGN(initrd_end
)))
1858 static struct linux_prom64_registers pavail_rescan
[MAX_BANKS
] __initdata
;
1859 static int pavail_rescan_ents __initdata
;
1861 /* Certain OBP calls, such as fetching "available" properties, can
1862 * claim physical memory. So, along with initializing the valid
1863 * address bitmap, what we do here is refetch the physical available
1864 * memory list again, and make sure it provides at least as much
1865 * memory as 'pavail' does.
1867 static void __init
setup_valid_addr_bitmap_from_pavail(void)
1871 read_obp_memory("available", &pavail_rescan
[0], &pavail_rescan_ents
);
1873 for (i
= 0; i
< pavail_ents
; i
++) {
1874 unsigned long old_start
, old_end
;
1876 old_start
= pavail
[i
].phys_addr
;
1877 old_end
= old_start
+ pavail
[i
].reg_size
;
1878 while (old_start
< old_end
) {
1881 for (n
= 0; n
< pavail_rescan_ents
; n
++) {
1882 unsigned long new_start
, new_end
;
1884 new_start
= pavail_rescan
[n
].phys_addr
;
1885 new_end
= new_start
+
1886 pavail_rescan
[n
].reg_size
;
1888 if (new_start
<= old_start
&&
1889 new_end
>= (old_start
+ PAGE_SIZE
)) {
1890 set_bit(old_start
>> 22,
1891 sparc64_valid_addr_bitmap
);
1896 prom_printf("mem_init: Lost memory in pavail\n");
1897 prom_printf("mem_init: OLD start[%lx] size[%lx]\n",
1898 pavail
[i
].phys_addr
,
1899 pavail
[i
].reg_size
);
1900 prom_printf("mem_init: NEW start[%lx] size[%lx]\n",
1901 pavail_rescan
[i
].phys_addr
,
1902 pavail_rescan
[i
].reg_size
);
1903 prom_printf("mem_init: Cannot continue, aborting.\n");
1907 old_start
+= PAGE_SIZE
;
1912 void __init
mem_init(void)
1914 unsigned long codepages
, datapages
, initpages
;
1915 unsigned long addr
, last
;
1918 i
= last_valid_pfn
>> ((22 - PAGE_SHIFT
) + 6);
1920 sparc64_valid_addr_bitmap
= (unsigned long *) alloc_bootmem(i
<< 3);
1921 if (sparc64_valid_addr_bitmap
== NULL
) {
1922 prom_printf("mem_init: Cannot alloc valid_addr_bitmap.\n");
1925 memset(sparc64_valid_addr_bitmap
, 0, i
<< 3);
1927 addr
= PAGE_OFFSET
+ kern_base
;
1928 last
= PAGE_ALIGN(kern_size
) + addr
;
1929 while (addr
< last
) {
1930 set_bit(__pa(addr
) >> 22, sparc64_valid_addr_bitmap
);
1934 setup_valid_addr_bitmap_from_pavail();
1936 high_memory
= __va(last_valid_pfn
<< PAGE_SHIFT
);
1938 #ifdef CONFIG_NEED_MULTIPLE_NODES
1939 for_each_online_node(i
) {
1940 if (NODE_DATA(i
)->node_spanned_pages
!= 0) {
1942 free_all_bootmem_node(NODE_DATA(i
));
1946 totalram_pages
= free_all_bootmem();
1949 /* We subtract one to account for the mem_map_zero page
1952 totalram_pages
-= 1;
1953 num_physpages
= totalram_pages
;
1956 * Set up the zero page, mark it reserved, so that page count
1957 * is not manipulated when freeing the page from user ptes.
1959 mem_map_zero
= alloc_pages(GFP_KERNEL
|__GFP_ZERO
, 0);
1960 if (mem_map_zero
== NULL
) {
1961 prom_printf("paging_init: Cannot alloc zero page.\n");
1964 SetPageReserved(mem_map_zero
);
1966 codepages
= (((unsigned long) _etext
) - ((unsigned long) _start
));
1967 codepages
= PAGE_ALIGN(codepages
) >> PAGE_SHIFT
;
1968 datapages
= (((unsigned long) _edata
) - ((unsigned long) _etext
));
1969 datapages
= PAGE_ALIGN(datapages
) >> PAGE_SHIFT
;
1970 initpages
= (((unsigned long) __init_end
) - ((unsigned long) __init_begin
));
1971 initpages
= PAGE_ALIGN(initpages
) >> PAGE_SHIFT
;
1973 printk("Memory: %luk available (%ldk kernel code, %ldk data, %ldk init) [%016lx,%016lx]\n",
1974 nr_free_pages() << (PAGE_SHIFT
-10),
1975 codepages
<< (PAGE_SHIFT
-10),
1976 datapages
<< (PAGE_SHIFT
-10),
1977 initpages
<< (PAGE_SHIFT
-10),
1978 PAGE_OFFSET
, (last_valid_pfn
<< PAGE_SHIFT
));
1980 if (tlb_type
== cheetah
|| tlb_type
== cheetah_plus
)
1981 cheetah_ecache_flush_init();
1984 void free_initmem(void)
1986 unsigned long addr
, initend
;
1989 /* If the physical memory maps were trimmed by kernel command
1990 * line options, don't even try freeing this initmem stuff up.
1991 * The kernel image could have been in the trimmed out region
1992 * and if so the freeing below will free invalid page structs.
1994 if (cmdline_memory_size
)
1998 * The init section is aligned to 8k in vmlinux.lds. Page align for >8k pagesizes.
2000 addr
= PAGE_ALIGN((unsigned long)(__init_begin
));
2001 initend
= (unsigned long)(__init_end
) & PAGE_MASK
;
2002 for (; addr
< initend
; addr
+= PAGE_SIZE
) {
2007 ((unsigned long) __va(kern_base
)) -
2008 ((unsigned long) KERNBASE
));
2009 memset((void *)addr
, POISON_FREE_INITMEM
, PAGE_SIZE
);
2012 p
= virt_to_page(page
);
2014 ClearPageReserved(p
);
2023 #ifdef CONFIG_BLK_DEV_INITRD
2024 void free_initrd_mem(unsigned long start
, unsigned long end
)
2027 printk ("Freeing initrd memory: %ldk freed\n", (end
- start
) >> 10);
2028 for (; start
< end
; start
+= PAGE_SIZE
) {
2029 struct page
*p
= virt_to_page(start
);
2031 ClearPageReserved(p
);
2040 #define _PAGE_CACHE_4U (_PAGE_CP_4U | _PAGE_CV_4U)
2041 #define _PAGE_CACHE_4V (_PAGE_CP_4V | _PAGE_CV_4V)
2042 #define __DIRTY_BITS_4U (_PAGE_MODIFIED_4U | _PAGE_WRITE_4U | _PAGE_W_4U)
2043 #define __DIRTY_BITS_4V (_PAGE_MODIFIED_4V | _PAGE_WRITE_4V | _PAGE_W_4V)
2044 #define __ACCESS_BITS_4U (_PAGE_ACCESSED_4U | _PAGE_READ_4U | _PAGE_R)
2045 #define __ACCESS_BITS_4V (_PAGE_ACCESSED_4V | _PAGE_READ_4V | _PAGE_R)
2047 pgprot_t PAGE_KERNEL __read_mostly
;
2048 EXPORT_SYMBOL(PAGE_KERNEL
);
2050 pgprot_t PAGE_KERNEL_LOCKED __read_mostly
;
2051 pgprot_t PAGE_COPY __read_mostly
;
2053 pgprot_t PAGE_SHARED __read_mostly
;
2054 EXPORT_SYMBOL(PAGE_SHARED
);
2056 unsigned long pg_iobits __read_mostly
;
2058 unsigned long _PAGE_IE __read_mostly
;
2059 EXPORT_SYMBOL(_PAGE_IE
);
2061 unsigned long _PAGE_E __read_mostly
;
2062 EXPORT_SYMBOL(_PAGE_E
);
2064 unsigned long _PAGE_CACHE __read_mostly
;
2065 EXPORT_SYMBOL(_PAGE_CACHE
);
2067 #ifdef CONFIG_SPARSEMEM_VMEMMAP
2068 unsigned long vmemmap_table
[VMEMMAP_SIZE
];
2070 int __meminit
vmemmap_populate(struct page
*start
, unsigned long nr
, int node
)
2072 unsigned long vstart
= (unsigned long) start
;
2073 unsigned long vend
= (unsigned long) (start
+ nr
);
2074 unsigned long phys_start
= (vstart
- VMEMMAP_BASE
);
2075 unsigned long phys_end
= (vend
- VMEMMAP_BASE
);
2076 unsigned long addr
= phys_start
& VMEMMAP_CHUNK_MASK
;
2077 unsigned long end
= VMEMMAP_ALIGN(phys_end
);
2078 unsigned long pte_base
;
2080 pte_base
= (_PAGE_VALID
| _PAGE_SZ4MB_4U
|
2081 _PAGE_CP_4U
| _PAGE_CV_4U
|
2082 _PAGE_P_4U
| _PAGE_W_4U
);
2083 if (tlb_type
== hypervisor
)
2084 pte_base
= (_PAGE_VALID
| _PAGE_SZ4MB_4V
|
2085 _PAGE_CP_4V
| _PAGE_CV_4V
|
2086 _PAGE_P_4V
| _PAGE_W_4V
);
2088 for (; addr
< end
; addr
+= VMEMMAP_CHUNK
) {
2089 unsigned long *vmem_pp
=
2090 vmemmap_table
+ (addr
>> VMEMMAP_CHUNK_SHIFT
);
2093 if (!(*vmem_pp
& _PAGE_VALID
)) {
2094 block
= vmemmap_alloc_block(1UL << 22, node
);
2098 *vmem_pp
= pte_base
| __pa(block
);
2100 printk(KERN_INFO
"[%p-%p] page_structs=%lu "
2101 "node=%d entry=%lu/%lu\n", start
, block
, nr
,
2103 addr
>> VMEMMAP_CHUNK_SHIFT
,
2104 VMEMMAP_SIZE
>> VMEMMAP_CHUNK_SHIFT
);
2109 #endif /* CONFIG_SPARSEMEM_VMEMMAP */
2111 static void prot_init_common(unsigned long page_none
,
2112 unsigned long page_shared
,
2113 unsigned long page_copy
,
2114 unsigned long page_readonly
,
2115 unsigned long page_exec_bit
)
2117 PAGE_COPY
= __pgprot(page_copy
);
2118 PAGE_SHARED
= __pgprot(page_shared
);
2120 protection_map
[0x0] = __pgprot(page_none
);
2121 protection_map
[0x1] = __pgprot(page_readonly
& ~page_exec_bit
);
2122 protection_map
[0x2] = __pgprot(page_copy
& ~page_exec_bit
);
2123 protection_map
[0x3] = __pgprot(page_copy
& ~page_exec_bit
);
2124 protection_map
[0x4] = __pgprot(page_readonly
);
2125 protection_map
[0x5] = __pgprot(page_readonly
);
2126 protection_map
[0x6] = __pgprot(page_copy
);
2127 protection_map
[0x7] = __pgprot(page_copy
);
2128 protection_map
[0x8] = __pgprot(page_none
);
2129 protection_map
[0x9] = __pgprot(page_readonly
& ~page_exec_bit
);
2130 protection_map
[0xa] = __pgprot(page_shared
& ~page_exec_bit
);
2131 protection_map
[0xb] = __pgprot(page_shared
& ~page_exec_bit
);
2132 protection_map
[0xc] = __pgprot(page_readonly
);
2133 protection_map
[0xd] = __pgprot(page_readonly
);
2134 protection_map
[0xe] = __pgprot(page_shared
);
2135 protection_map
[0xf] = __pgprot(page_shared
);
2138 static void __init
sun4u_pgprot_init(void)
2140 unsigned long page_none
, page_shared
, page_copy
, page_readonly
;
2141 unsigned long page_exec_bit
;
2143 PAGE_KERNEL
= __pgprot (_PAGE_PRESENT_4U
| _PAGE_VALID
|
2144 _PAGE_CACHE_4U
| _PAGE_P_4U
|
2145 __ACCESS_BITS_4U
| __DIRTY_BITS_4U
|
2147 PAGE_KERNEL_LOCKED
= __pgprot (_PAGE_PRESENT_4U
| _PAGE_VALID
|
2148 _PAGE_CACHE_4U
| _PAGE_P_4U
|
2149 __ACCESS_BITS_4U
| __DIRTY_BITS_4U
|
2150 _PAGE_EXEC_4U
| _PAGE_L_4U
);
2152 _PAGE_IE
= _PAGE_IE_4U
;
2153 _PAGE_E
= _PAGE_E_4U
;
2154 _PAGE_CACHE
= _PAGE_CACHE_4U
;
2156 pg_iobits
= (_PAGE_VALID
| _PAGE_PRESENT_4U
| __DIRTY_BITS_4U
|
2157 __ACCESS_BITS_4U
| _PAGE_E_4U
);
2159 #ifdef CONFIG_DEBUG_PAGEALLOC
2160 kern_linear_pte_xor
[0] = (_PAGE_VALID
| _PAGE_SZBITS_4U
) ^
2161 0xfffff80000000000UL
;
2163 kern_linear_pte_xor
[0] = (_PAGE_VALID
| _PAGE_SZ4MB_4U
) ^
2164 0xfffff80000000000UL
;
2166 kern_linear_pte_xor
[0] |= (_PAGE_CP_4U
| _PAGE_CV_4U
|
2167 _PAGE_P_4U
| _PAGE_W_4U
);
2169 /* XXX Should use 256MB on Panther. XXX */
2170 kern_linear_pte_xor
[1] = kern_linear_pte_xor
[0];
2172 _PAGE_SZBITS
= _PAGE_SZBITS_4U
;
2173 _PAGE_ALL_SZ_BITS
= (_PAGE_SZ4MB_4U
| _PAGE_SZ512K_4U
|
2174 _PAGE_SZ64K_4U
| _PAGE_SZ8K_4U
|
2175 _PAGE_SZ32MB_4U
| _PAGE_SZ256MB_4U
);
2178 page_none
= _PAGE_PRESENT_4U
| _PAGE_ACCESSED_4U
| _PAGE_CACHE_4U
;
2179 page_shared
= (_PAGE_VALID
| _PAGE_PRESENT_4U
| _PAGE_CACHE_4U
|
2180 __ACCESS_BITS_4U
| _PAGE_WRITE_4U
| _PAGE_EXEC_4U
);
2181 page_copy
= (_PAGE_VALID
| _PAGE_PRESENT_4U
| _PAGE_CACHE_4U
|
2182 __ACCESS_BITS_4U
| _PAGE_EXEC_4U
);
2183 page_readonly
= (_PAGE_VALID
| _PAGE_PRESENT_4U
| _PAGE_CACHE_4U
|
2184 __ACCESS_BITS_4U
| _PAGE_EXEC_4U
);
2186 page_exec_bit
= _PAGE_EXEC_4U
;
2188 prot_init_common(page_none
, page_shared
, page_copy
, page_readonly
,
2192 static void __init
sun4v_pgprot_init(void)
2194 unsigned long page_none
, page_shared
, page_copy
, page_readonly
;
2195 unsigned long page_exec_bit
;
2197 PAGE_KERNEL
= __pgprot (_PAGE_PRESENT_4V
| _PAGE_VALID
|
2198 _PAGE_CACHE_4V
| _PAGE_P_4V
|
2199 __ACCESS_BITS_4V
| __DIRTY_BITS_4V
|
2201 PAGE_KERNEL_LOCKED
= PAGE_KERNEL
;
2203 _PAGE_IE
= _PAGE_IE_4V
;
2204 _PAGE_E
= _PAGE_E_4V
;
2205 _PAGE_CACHE
= _PAGE_CACHE_4V
;
2207 #ifdef CONFIG_DEBUG_PAGEALLOC
2208 kern_linear_pte_xor
[0] = (_PAGE_VALID
| _PAGE_SZBITS_4V
) ^
2209 0xfffff80000000000UL
;
2211 kern_linear_pte_xor
[0] = (_PAGE_VALID
| _PAGE_SZ4MB_4V
) ^
2212 0xfffff80000000000UL
;
2214 kern_linear_pte_xor
[0] |= (_PAGE_CP_4V
| _PAGE_CV_4V
|
2215 _PAGE_P_4V
| _PAGE_W_4V
);
2217 #ifdef CONFIG_DEBUG_PAGEALLOC
2218 kern_linear_pte_xor
[1] = (_PAGE_VALID
| _PAGE_SZBITS_4V
) ^
2219 0xfffff80000000000UL
;
2221 kern_linear_pte_xor
[1] = (_PAGE_VALID
| _PAGE_SZ256MB_4V
) ^
2222 0xfffff80000000000UL
;
2224 kern_linear_pte_xor
[1] |= (_PAGE_CP_4V
| _PAGE_CV_4V
|
2225 _PAGE_P_4V
| _PAGE_W_4V
);
2227 pg_iobits
= (_PAGE_VALID
| _PAGE_PRESENT_4V
| __DIRTY_BITS_4V
|
2228 __ACCESS_BITS_4V
| _PAGE_E_4V
);
2230 _PAGE_SZBITS
= _PAGE_SZBITS_4V
;
2231 _PAGE_ALL_SZ_BITS
= (_PAGE_SZ16GB_4V
| _PAGE_SZ2GB_4V
|
2232 _PAGE_SZ256MB_4V
| _PAGE_SZ32MB_4V
|
2233 _PAGE_SZ4MB_4V
| _PAGE_SZ512K_4V
|
2234 _PAGE_SZ64K_4V
| _PAGE_SZ8K_4V
);
2236 page_none
= _PAGE_PRESENT_4V
| _PAGE_ACCESSED_4V
| _PAGE_CACHE_4V
;
2237 page_shared
= (_PAGE_VALID
| _PAGE_PRESENT_4V
| _PAGE_CACHE_4V
|
2238 __ACCESS_BITS_4V
| _PAGE_WRITE_4V
| _PAGE_EXEC_4V
);
2239 page_copy
= (_PAGE_VALID
| _PAGE_PRESENT_4V
| _PAGE_CACHE_4V
|
2240 __ACCESS_BITS_4V
| _PAGE_EXEC_4V
);
2241 page_readonly
= (_PAGE_VALID
| _PAGE_PRESENT_4V
| _PAGE_CACHE_4V
|
2242 __ACCESS_BITS_4V
| _PAGE_EXEC_4V
);
2244 page_exec_bit
= _PAGE_EXEC_4V
;
2246 prot_init_common(page_none
, page_shared
, page_copy
, page_readonly
,
2250 unsigned long pte_sz_bits(unsigned long sz
)
2252 if (tlb_type
== hypervisor
) {
2256 return _PAGE_SZ8K_4V
;
2258 return _PAGE_SZ64K_4V
;
2260 return _PAGE_SZ512K_4V
;
2261 case 4 * 1024 * 1024:
2262 return _PAGE_SZ4MB_4V
;
2268 return _PAGE_SZ8K_4U
;
2270 return _PAGE_SZ64K_4U
;
2272 return _PAGE_SZ512K_4U
;
2273 case 4 * 1024 * 1024:
2274 return _PAGE_SZ4MB_4U
;
2279 pte_t
mk_pte_io(unsigned long page
, pgprot_t prot
, int space
, unsigned long page_size
)
2283 pte_val(pte
) = page
| pgprot_val(pgprot_noncached(prot
));
2284 pte_val(pte
) |= (((unsigned long)space
) << 32);
2285 pte_val(pte
) |= pte_sz_bits(page_size
);
2290 static unsigned long kern_large_tte(unsigned long paddr
)
2294 val
= (_PAGE_VALID
| _PAGE_SZ4MB_4U
|
2295 _PAGE_CP_4U
| _PAGE_CV_4U
| _PAGE_P_4U
|
2296 _PAGE_EXEC_4U
| _PAGE_L_4U
| _PAGE_W_4U
);
2297 if (tlb_type
== hypervisor
)
2298 val
= (_PAGE_VALID
| _PAGE_SZ4MB_4V
|
2299 _PAGE_CP_4V
| _PAGE_CV_4V
| _PAGE_P_4V
|
2300 _PAGE_EXEC_4V
| _PAGE_W_4V
);
2305 /* If not locked, zap it. */
2306 void __flush_tlb_all(void)
2308 unsigned long pstate
;
2311 __asm__
__volatile__("flushw\n\t"
2312 "rdpr %%pstate, %0\n\t"
2313 "wrpr %0, %1, %%pstate"
2316 if (tlb_type
== hypervisor
) {
2317 sun4v_mmu_demap_all();
2318 } else if (tlb_type
== spitfire
) {
2319 for (i
= 0; i
< 64; i
++) {
2320 /* Spitfire Errata #32 workaround */
2321 /* NOTE: Always runs on spitfire, so no
2322 * cheetah+ page size encodings.
2324 __asm__
__volatile__("stxa %0, [%1] %2\n\t"
2328 "r" (PRIMARY_CONTEXT
), "i" (ASI_DMMU
));
2330 if (!(spitfire_get_dtlb_data(i
) & _PAGE_L_4U
)) {
2331 __asm__
__volatile__("stxa %%g0, [%0] %1\n\t"
2334 : "r" (TLB_TAG_ACCESS
), "i" (ASI_DMMU
));
2335 spitfire_put_dtlb_data(i
, 0x0UL
);
2338 /* Spitfire Errata #32 workaround */
2339 /* NOTE: Always runs on spitfire, so no
2340 * cheetah+ page size encodings.
2342 __asm__
__volatile__("stxa %0, [%1] %2\n\t"
2346 "r" (PRIMARY_CONTEXT
), "i" (ASI_DMMU
));
2348 if (!(spitfire_get_itlb_data(i
) & _PAGE_L_4U
)) {
2349 __asm__
__volatile__("stxa %%g0, [%0] %1\n\t"
2352 : "r" (TLB_TAG_ACCESS
), "i" (ASI_IMMU
));
2353 spitfire_put_itlb_data(i
, 0x0UL
);
2356 } else if (tlb_type
== cheetah
|| tlb_type
== cheetah_plus
) {
2357 cheetah_flush_dtlb_all();
2358 cheetah_flush_itlb_all();
2360 __asm__
__volatile__("wrpr %0, 0, %%pstate"