2 * arch/sparc64/mm/init.c
4 * Copyright (C) 1996-1999 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1997-1999 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
8 #include <linux/module.h>
9 #include <linux/kernel.h>
10 #include <linux/sched.h>
11 #include <linux/string.h>
12 #include <linux/init.h>
13 #include <linux/bootmem.h>
15 #include <linux/hugetlb.h>
16 #include <linux/slab.h>
17 #include <linux/initrd.h>
18 #include <linux/swap.h>
19 #include <linux/pagemap.h>
20 #include <linux/poison.h>
22 #include <linux/seq_file.h>
23 #include <linux/kprobes.h>
24 #include <linux/cache.h>
25 #include <linux/sort.h>
26 #include <linux/percpu.h>
27 #include <linux/lmb.h>
28 #include <linux/mmzone.h>
31 #include <asm/system.h>
33 #include <asm/pgalloc.h>
34 #include <asm/pgtable.h>
35 #include <asm/oplib.h>
36 #include <asm/iommu.h>
38 #include <asm/uaccess.h>
39 #include <asm/mmu_context.h>
40 #include <asm/tlbflush.h>
42 #include <asm/starfire.h>
44 #include <asm/spitfire.h>
45 #include <asm/sections.h>
47 #include <asm/hypervisor.h>
49 #include <asm/mdesc.h>
50 #include <asm/cpudata.h>
55 unsigned long kern_linear_pte_xor
[2] __read_mostly
;
57 /* A bitmap, one bit for every 256MB of physical memory. If the bit
58 * is clear, we should use a 4MB page (via kern_linear_pte_xor[0]) else
59 * if set we should use a 256MB page (via kern_linear_pte_xor[1]).
61 unsigned long kpte_linear_bitmap
[KPTE_BITMAP_BYTES
/ sizeof(unsigned long)];
63 #ifndef CONFIG_DEBUG_PAGEALLOC
64 /* A special kernel TSB for 4MB and 256MB linear mappings.
65 * Space is allocated for this right after the trap table
66 * in arch/sparc64/kernel/head.S
68 extern struct tsb swapper_4m_tsb
[KERNEL_TSB4M_NENTRIES
];
73 static struct linux_prom64_registers pavail
[MAX_BANKS
] __initdata
;
74 static int pavail_ents __initdata
;
76 static int cmp_p64(const void *a
, const void *b
)
78 const struct linux_prom64_registers
*x
= a
, *y
= b
;
80 if (x
->phys_addr
> y
->phys_addr
)
82 if (x
->phys_addr
< y
->phys_addr
)
87 static void __init
read_obp_memory(const char *property
,
88 struct linux_prom64_registers
*regs
,
91 int node
= prom_finddevice("/memory");
92 int prop_size
= prom_getproplen(node
, property
);
95 ents
= prop_size
/ sizeof(struct linux_prom64_registers
);
96 if (ents
> MAX_BANKS
) {
97 prom_printf("The machine has more %s property entries than "
98 "this kernel can support (%d).\n",
103 ret
= prom_getproperty(node
, property
, (char *) regs
, prop_size
);
105 prom_printf("Couldn't get %s property from /memory.\n");
109 /* Sanitize what we got from the firmware, by page aligning
112 for (i
= 0; i
< ents
; i
++) {
113 unsigned long base
, size
;
115 base
= regs
[i
].phys_addr
;
116 size
= regs
[i
].reg_size
;
119 if (base
& ~PAGE_MASK
) {
120 unsigned long new_base
= PAGE_ALIGN(base
);
122 size
-= new_base
- base
;
123 if ((long) size
< 0L)
128 /* If it is empty, simply get rid of it.
129 * This simplifies the logic of the other
130 * functions that process these arrays.
132 memmove(®s
[i
], ®s
[i
+ 1],
133 (ents
- i
- 1) * sizeof(regs
[0]));
138 regs
[i
].phys_addr
= base
;
139 regs
[i
].reg_size
= size
;
144 sort(regs
, ents
, sizeof(struct linux_prom64_registers
),
148 unsigned long *sparc64_valid_addr_bitmap __read_mostly
;
150 /* Kernel physical address base and size in bytes. */
151 unsigned long kern_base __read_mostly
;
152 unsigned long kern_size __read_mostly
;
154 /* Initial ramdisk setup */
155 extern unsigned long sparc_ramdisk_image64
;
156 extern unsigned int sparc_ramdisk_image
;
157 extern unsigned int sparc_ramdisk_size
;
159 struct page
*mem_map_zero __read_mostly
;
160 EXPORT_SYMBOL(mem_map_zero
);
162 unsigned int sparc64_highest_unlocked_tlb_ent __read_mostly
;
164 unsigned long sparc64_kern_pri_context __read_mostly
;
165 unsigned long sparc64_kern_pri_nuc_bits __read_mostly
;
166 unsigned long sparc64_kern_sec_context __read_mostly
;
168 int num_kernel_image_mappings
;
170 #ifdef CONFIG_DEBUG_DCFLUSH
171 atomic_t dcpage_flushes
= ATOMIC_INIT(0);
173 atomic_t dcpage_flushes_xcall
= ATOMIC_INIT(0);
177 inline void flush_dcache_page_impl(struct page
*page
)
179 BUG_ON(tlb_type
== hypervisor
);
180 #ifdef CONFIG_DEBUG_DCFLUSH
181 atomic_inc(&dcpage_flushes
);
184 #ifdef DCACHE_ALIASING_POSSIBLE
185 __flush_dcache_page(page_address(page
),
186 ((tlb_type
== spitfire
) &&
187 page_mapping(page
) != NULL
));
189 if (page_mapping(page
) != NULL
&&
190 tlb_type
== spitfire
)
191 __flush_icache_page(__pa(page_address(page
)));
195 #define PG_dcache_dirty PG_arch_1
196 #define PG_dcache_cpu_shift 32UL
197 #define PG_dcache_cpu_mask \
198 ((1UL<<ilog2(roundup_pow_of_two(NR_CPUS)))-1UL)
200 #define dcache_dirty_cpu(page) \
201 (((page)->flags >> PG_dcache_cpu_shift) & PG_dcache_cpu_mask)
203 static inline void set_dcache_dirty(struct page
*page
, int this_cpu
)
205 unsigned long mask
= this_cpu
;
206 unsigned long non_cpu_bits
;
208 non_cpu_bits
= ~(PG_dcache_cpu_mask
<< PG_dcache_cpu_shift
);
209 mask
= (mask
<< PG_dcache_cpu_shift
) | (1UL << PG_dcache_dirty
);
211 __asm__
__volatile__("1:\n\t"
213 "and %%g7, %1, %%g1\n\t"
214 "or %%g1, %0, %%g1\n\t"
215 "casx [%2], %%g7, %%g1\n\t"
217 "bne,pn %%xcc, 1b\n\t"
220 : "r" (mask
), "r" (non_cpu_bits
), "r" (&page
->flags
)
224 static inline void clear_dcache_dirty_cpu(struct page
*page
, unsigned long cpu
)
226 unsigned long mask
= (1UL << PG_dcache_dirty
);
228 __asm__
__volatile__("! test_and_clear_dcache_dirty\n"
231 "srlx %%g7, %4, %%g1\n\t"
232 "and %%g1, %3, %%g1\n\t"
234 "bne,pn %%icc, 2f\n\t"
235 " andn %%g7, %1, %%g1\n\t"
236 "casx [%2], %%g7, %%g1\n\t"
238 "bne,pn %%xcc, 1b\n\t"
242 : "r" (cpu
), "r" (mask
), "r" (&page
->flags
),
243 "i" (PG_dcache_cpu_mask
),
244 "i" (PG_dcache_cpu_shift
)
248 static inline void tsb_insert(struct tsb
*ent
, unsigned long tag
, unsigned long pte
)
250 unsigned long tsb_addr
= (unsigned long) ent
;
252 if (tlb_type
== cheetah_plus
|| tlb_type
== hypervisor
)
253 tsb_addr
= __pa(tsb_addr
);
255 __tsb_insert(tsb_addr
, tag
, pte
);
258 unsigned long _PAGE_ALL_SZ_BITS __read_mostly
;
259 unsigned long _PAGE_SZBITS __read_mostly
;
261 void update_mmu_cache(struct vm_area_struct
*vma
, unsigned long address
, pte_t pte
)
263 struct mm_struct
*mm
;
265 unsigned long tag
, flags
;
266 unsigned long tsb_index
, tsb_hash_shift
;
268 if (tlb_type
!= hypervisor
) {
269 unsigned long pfn
= pte_pfn(pte
);
270 unsigned long pg_flags
;
273 if (pfn_valid(pfn
) &&
274 (page
= pfn_to_page(pfn
), page_mapping(page
)) &&
275 ((pg_flags
= page
->flags
) & (1UL << PG_dcache_dirty
))) {
276 int cpu
= ((pg_flags
>> PG_dcache_cpu_shift
) &
278 int this_cpu
= get_cpu();
280 /* This is just to optimize away some function calls
284 flush_dcache_page_impl(page
);
286 smp_flush_dcache_page_impl(page
, cpu
);
288 clear_dcache_dirty_cpu(page
, cpu
);
296 tsb_index
= MM_TSB_BASE
;
297 tsb_hash_shift
= PAGE_SHIFT
;
299 spin_lock_irqsave(&mm
->context
.lock
, flags
);
301 #ifdef CONFIG_HUGETLB_PAGE
302 if (mm
->context
.tsb_block
[MM_TSB_HUGE
].tsb
!= NULL
) {
303 if ((tlb_type
== hypervisor
&&
304 (pte_val(pte
) & _PAGE_SZALL_4V
) == _PAGE_SZHUGE_4V
) ||
305 (tlb_type
!= hypervisor
&&
306 (pte_val(pte
) & _PAGE_SZALL_4U
) == _PAGE_SZHUGE_4U
)) {
307 tsb_index
= MM_TSB_HUGE
;
308 tsb_hash_shift
= HPAGE_SHIFT
;
313 tsb
= mm
->context
.tsb_block
[tsb_index
].tsb
;
314 tsb
+= ((address
>> tsb_hash_shift
) &
315 (mm
->context
.tsb_block
[tsb_index
].tsb_nentries
- 1UL));
316 tag
= (address
>> 22UL);
317 tsb_insert(tsb
, tag
, pte_val(pte
));
319 spin_unlock_irqrestore(&mm
->context
.lock
, flags
);
322 void flush_dcache_page(struct page
*page
)
324 struct address_space
*mapping
;
327 if (tlb_type
== hypervisor
)
330 /* Do not bother with the expensive D-cache flush if it
331 * is merely the zero page. The 'bigcore' testcase in GDB
332 * causes this case to run millions of times.
334 if (page
== ZERO_PAGE(0))
337 this_cpu
= get_cpu();
339 mapping
= page_mapping(page
);
340 if (mapping
&& !mapping_mapped(mapping
)) {
341 int dirty
= test_bit(PG_dcache_dirty
, &page
->flags
);
343 int dirty_cpu
= dcache_dirty_cpu(page
);
345 if (dirty_cpu
== this_cpu
)
347 smp_flush_dcache_page_impl(page
, dirty_cpu
);
349 set_dcache_dirty(page
, this_cpu
);
351 /* We could delay the flush for the !page_mapping
352 * case too. But that case is for exec env/arg
353 * pages and those are %99 certainly going to get
354 * faulted into the tlb (and thus flushed) anyways.
356 flush_dcache_page_impl(page
);
363 void __kprobes
flush_icache_range(unsigned long start
, unsigned long end
)
365 /* Cheetah and Hypervisor platform cpus have coherent I-cache. */
366 if (tlb_type
== spitfire
) {
369 /* This code only runs on Spitfire cpus so this is
370 * why we can assume _PAGE_PADDR_4U.
372 for (kaddr
= start
; kaddr
< end
; kaddr
+= PAGE_SIZE
) {
373 unsigned long paddr
, mask
= _PAGE_PADDR_4U
;
375 if (kaddr
>= PAGE_OFFSET
)
376 paddr
= kaddr
& mask
;
378 pgd_t
*pgdp
= pgd_offset_k(kaddr
);
379 pud_t
*pudp
= pud_offset(pgdp
, kaddr
);
380 pmd_t
*pmdp
= pmd_offset(pudp
, kaddr
);
381 pte_t
*ptep
= pte_offset_kernel(pmdp
, kaddr
);
383 paddr
= pte_val(*ptep
) & mask
;
385 __flush_icache_page(paddr
);
390 void mmu_info(struct seq_file
*m
)
392 if (tlb_type
== cheetah
)
393 seq_printf(m
, "MMU Type\t: Cheetah\n");
394 else if (tlb_type
== cheetah_plus
)
395 seq_printf(m
, "MMU Type\t: Cheetah+\n");
396 else if (tlb_type
== spitfire
)
397 seq_printf(m
, "MMU Type\t: Spitfire\n");
398 else if (tlb_type
== hypervisor
)
399 seq_printf(m
, "MMU Type\t: Hypervisor (sun4v)\n");
401 seq_printf(m
, "MMU Type\t: ???\n");
403 #ifdef CONFIG_DEBUG_DCFLUSH
404 seq_printf(m
, "DCPageFlushes\t: %d\n",
405 atomic_read(&dcpage_flushes
));
407 seq_printf(m
, "DCPageFlushesXC\t: %d\n",
408 atomic_read(&dcpage_flushes_xcall
));
409 #endif /* CONFIG_SMP */
410 #endif /* CONFIG_DEBUG_DCFLUSH */
413 struct linux_prom_translation prom_trans
[512] __read_mostly
;
414 unsigned int prom_trans_ents __read_mostly
;
416 unsigned long kern_locked_tte_data
;
418 /* The obp translations are saved based on 8k pagesize, since obp can
419 * use a mixture of pagesizes. Misses to the LOW_OBP_ADDRESS ->
420 * HI_OBP_ADDRESS range are handled in ktlb.S.
422 static inline int in_obp_range(unsigned long vaddr
)
424 return (vaddr
>= LOW_OBP_ADDRESS
&&
425 vaddr
< HI_OBP_ADDRESS
);
428 static int cmp_ptrans(const void *a
, const void *b
)
430 const struct linux_prom_translation
*x
= a
, *y
= b
;
432 if (x
->virt
> y
->virt
)
434 if (x
->virt
< y
->virt
)
439 /* Read OBP translations property into 'prom_trans[]'. */
440 static void __init
read_obp_translations(void)
442 int n
, node
, ents
, first
, last
, i
;
444 node
= prom_finddevice("/virtual-memory");
445 n
= prom_getproplen(node
, "translations");
446 if (unlikely(n
== 0 || n
== -1)) {
447 prom_printf("prom_mappings: Couldn't get size.\n");
450 if (unlikely(n
> sizeof(prom_trans
))) {
451 prom_printf("prom_mappings: Size %Zd is too big.\n", n
);
455 if ((n
= prom_getproperty(node
, "translations",
456 (char *)&prom_trans
[0],
457 sizeof(prom_trans
))) == -1) {
458 prom_printf("prom_mappings: Couldn't get property.\n");
462 n
= n
/ sizeof(struct linux_prom_translation
);
466 sort(prom_trans
, ents
, sizeof(struct linux_prom_translation
),
469 /* Now kick out all the non-OBP entries. */
470 for (i
= 0; i
< ents
; i
++) {
471 if (in_obp_range(prom_trans
[i
].virt
))
475 for (; i
< ents
; i
++) {
476 if (!in_obp_range(prom_trans
[i
].virt
))
481 for (i
= 0; i
< (last
- first
); i
++) {
482 struct linux_prom_translation
*src
= &prom_trans
[i
+ first
];
483 struct linux_prom_translation
*dest
= &prom_trans
[i
];
487 for (; i
< ents
; i
++) {
488 struct linux_prom_translation
*dest
= &prom_trans
[i
];
489 dest
->virt
= dest
->size
= dest
->data
= 0x0UL
;
492 prom_trans_ents
= last
- first
;
494 if (tlb_type
== spitfire
) {
495 /* Clear diag TTE bits. */
496 for (i
= 0; i
< prom_trans_ents
; i
++)
497 prom_trans
[i
].data
&= ~0x0003fe0000000000UL
;
501 static void __init
hypervisor_tlb_lock(unsigned long vaddr
,
505 unsigned long ret
= sun4v_mmu_map_perm_addr(vaddr
, 0, pte
, mmu
);
508 prom_printf("hypervisor_tlb_lock[%lx:%lx:%lx:%lx]: "
509 "errors with %lx\n", vaddr
, 0, pte
, mmu
, ret
);
514 static unsigned long kern_large_tte(unsigned long paddr
);
516 static void __init
remap_kernel(void)
518 unsigned long phys_page
, tte_vaddr
, tte_data
;
519 int i
, tlb_ent
= sparc64_highest_locked_tlbent();
521 tte_vaddr
= (unsigned long) KERNBASE
;
522 phys_page
= (prom_boot_mapping_phys_low
>> 22UL) << 22UL;
523 tte_data
= kern_large_tte(phys_page
);
525 kern_locked_tte_data
= tte_data
;
527 /* Now lock us into the TLBs via Hypervisor or OBP. */
528 if (tlb_type
== hypervisor
) {
529 for (i
= 0; i
< num_kernel_image_mappings
; i
++) {
530 hypervisor_tlb_lock(tte_vaddr
, tte_data
, HV_MMU_DMMU
);
531 hypervisor_tlb_lock(tte_vaddr
, tte_data
, HV_MMU_IMMU
);
532 tte_vaddr
+= 0x400000;
533 tte_data
+= 0x400000;
536 for (i
= 0; i
< num_kernel_image_mappings
; i
++) {
537 prom_dtlb_load(tlb_ent
- i
, tte_data
, tte_vaddr
);
538 prom_itlb_load(tlb_ent
- i
, tte_data
, tte_vaddr
);
539 tte_vaddr
+= 0x400000;
540 tte_data
+= 0x400000;
542 sparc64_highest_unlocked_tlb_ent
= tlb_ent
- i
;
544 if (tlb_type
== cheetah_plus
) {
545 sparc64_kern_pri_context
= (CTX_CHEETAH_PLUS_CTX0
|
546 CTX_CHEETAH_PLUS_NUC
);
547 sparc64_kern_pri_nuc_bits
= CTX_CHEETAH_PLUS_NUC
;
548 sparc64_kern_sec_context
= CTX_CHEETAH_PLUS_CTX0
;
553 static void __init
inherit_prom_mappings(void)
555 /* Now fixup OBP's idea about where we really are mapped. */
556 printk("Remapping the kernel... ");
561 void prom_world(int enter
)
564 set_fs((mm_segment_t
) { get_thread_current_ds() });
566 __asm__
__volatile__("flushw");
569 void __flush_dcache_range(unsigned long start
, unsigned long end
)
573 if (tlb_type
== spitfire
) {
576 for (va
= start
; va
< end
; va
+= 32) {
577 spitfire_put_dcache_tag(va
& 0x3fe0, 0x0);
581 } else if (tlb_type
== cheetah
|| tlb_type
== cheetah_plus
) {
584 for (va
= start
; va
< end
; va
+= 32)
585 __asm__
__volatile__("stxa %%g0, [%0] %1\n\t"
589 "i" (ASI_DCACHE_INVALIDATE
));
593 /* get_new_mmu_context() uses "cache + 1". */
594 DEFINE_SPINLOCK(ctx_alloc_lock
);
595 unsigned long tlb_context_cache
= CTX_FIRST_VERSION
- 1;
596 #define MAX_CTX_NR (1UL << CTX_NR_BITS)
597 #define CTX_BMAP_SLOTS BITS_TO_LONGS(MAX_CTX_NR)
598 DECLARE_BITMAP(mmu_context_bmap
, MAX_CTX_NR
);
600 /* Caller does TLB context flushing on local CPU if necessary.
601 * The caller also ensures that CTX_VALID(mm->context) is false.
603 * We must be careful about boundary cases so that we never
604 * let the user have CTX 0 (nucleus) or we ever use a CTX
605 * version of zero (and thus NO_CONTEXT would not be caught
606 * by version mis-match tests in mmu_context.h).
608 * Always invoked with interrupts disabled.
610 void get_new_mmu_context(struct mm_struct
*mm
)
612 unsigned long ctx
, new_ctx
;
613 unsigned long orig_pgsz_bits
;
617 spin_lock_irqsave(&ctx_alloc_lock
, flags
);
618 orig_pgsz_bits
= (mm
->context
.sparc64_ctx_val
& CTX_PGSZ_MASK
);
619 ctx
= (tlb_context_cache
+ 1) & CTX_NR_MASK
;
620 new_ctx
= find_next_zero_bit(mmu_context_bmap
, 1 << CTX_NR_BITS
, ctx
);
622 if (new_ctx
>= (1 << CTX_NR_BITS
)) {
623 new_ctx
= find_next_zero_bit(mmu_context_bmap
, ctx
, 1);
624 if (new_ctx
>= ctx
) {
626 new_ctx
= (tlb_context_cache
& CTX_VERSION_MASK
) +
629 new_ctx
= CTX_FIRST_VERSION
;
631 /* Don't call memset, for 16 entries that's just
634 mmu_context_bmap
[0] = 3;
635 mmu_context_bmap
[1] = 0;
636 mmu_context_bmap
[2] = 0;
637 mmu_context_bmap
[3] = 0;
638 for (i
= 4; i
< CTX_BMAP_SLOTS
; i
+= 4) {
639 mmu_context_bmap
[i
+ 0] = 0;
640 mmu_context_bmap
[i
+ 1] = 0;
641 mmu_context_bmap
[i
+ 2] = 0;
642 mmu_context_bmap
[i
+ 3] = 0;
648 mmu_context_bmap
[new_ctx
>>6] |= (1UL << (new_ctx
& 63));
649 new_ctx
|= (tlb_context_cache
& CTX_VERSION_MASK
);
651 tlb_context_cache
= new_ctx
;
652 mm
->context
.sparc64_ctx_val
= new_ctx
| orig_pgsz_bits
;
653 spin_unlock_irqrestore(&ctx_alloc_lock
, flags
);
655 if (unlikely(new_version
))
656 smp_new_mmu_context_version();
659 static int numa_enabled
= 1;
660 static int numa_debug
;
662 static int __init
early_numa(char *p
)
667 if (strstr(p
, "off"))
670 if (strstr(p
, "debug"))
675 early_param("numa", early_numa
);
677 #define numadbg(f, a...) \
678 do { if (numa_debug) \
679 printk(KERN_INFO f, ## a); \
682 static void __init
find_ramdisk(unsigned long phys_base
)
684 #ifdef CONFIG_BLK_DEV_INITRD
685 if (sparc_ramdisk_image
|| sparc_ramdisk_image64
) {
686 unsigned long ramdisk_image
;
688 /* Older versions of the bootloader only supported a
689 * 32-bit physical address for the ramdisk image
690 * location, stored at sparc_ramdisk_image. Newer
691 * SILO versions set sparc_ramdisk_image to zero and
692 * provide a full 64-bit physical address at
693 * sparc_ramdisk_image64.
695 ramdisk_image
= sparc_ramdisk_image
;
697 ramdisk_image
= sparc_ramdisk_image64
;
699 /* Another bootloader quirk. The bootloader normalizes
700 * the physical address to KERNBASE, so we have to
701 * factor that back out and add in the lowest valid
702 * physical page address to get the true physical address.
704 ramdisk_image
-= KERNBASE
;
705 ramdisk_image
+= phys_base
;
707 numadbg("Found ramdisk at physical address 0x%lx, size %u\n",
708 ramdisk_image
, sparc_ramdisk_size
);
710 initrd_start
= ramdisk_image
;
711 initrd_end
= ramdisk_image
+ sparc_ramdisk_size
;
713 lmb_reserve(initrd_start
, sparc_ramdisk_size
);
715 initrd_start
+= PAGE_OFFSET
;
716 initrd_end
+= PAGE_OFFSET
;
721 struct node_mem_mask
{
724 unsigned long bootmem_paddr
;
726 static struct node_mem_mask node_masks
[MAX_NUMNODES
];
727 static int num_node_masks
;
729 int numa_cpu_lookup_table
[NR_CPUS
];
730 cpumask_t numa_cpumask_lookup_table
[MAX_NUMNODES
];
732 #ifdef CONFIG_NEED_MULTIPLE_NODES
734 struct mdesc_mblock
{
737 u64 offset
; /* RA-to-PA */
739 static struct mdesc_mblock
*mblocks
;
740 static int num_mblocks
;
742 static unsigned long ra_to_pa(unsigned long addr
)
746 for (i
= 0; i
< num_mblocks
; i
++) {
747 struct mdesc_mblock
*m
= &mblocks
[i
];
749 if (addr
>= m
->base
&&
750 addr
< (m
->base
+ m
->size
)) {
758 static int find_node(unsigned long addr
)
762 addr
= ra_to_pa(addr
);
763 for (i
= 0; i
< num_node_masks
; i
++) {
764 struct node_mem_mask
*p
= &node_masks
[i
];
766 if ((addr
& p
->mask
) == p
->val
)
772 static unsigned long nid_range(unsigned long start
, unsigned long end
,
775 *nid
= find_node(start
);
777 while (start
< end
) {
778 int n
= find_node(start
);
791 static unsigned long nid_range(unsigned long start
, unsigned long end
,
799 /* This must be invoked after performing all of the necessary
800 * add_active_range() calls for 'nid'. We need to be able to get
801 * correct data from get_pfn_range_for_nid().
803 static void __init
allocate_node_data(int nid
)
805 unsigned long paddr
, num_pages
, start_pfn
, end_pfn
;
806 struct pglist_data
*p
;
808 #ifdef CONFIG_NEED_MULTIPLE_NODES
809 paddr
= lmb_alloc_nid(sizeof(struct pglist_data
),
810 SMP_CACHE_BYTES
, nid
, nid_range
);
812 prom_printf("Cannot allocate pglist_data for nid[%d]\n", nid
);
815 NODE_DATA(nid
) = __va(paddr
);
816 memset(NODE_DATA(nid
), 0, sizeof(struct pglist_data
));
818 NODE_DATA(nid
)->bdata
= &bootmem_node_data
[nid
];
823 get_pfn_range_for_nid(nid
, &start_pfn
, &end_pfn
);
824 p
->node_start_pfn
= start_pfn
;
825 p
->node_spanned_pages
= end_pfn
- start_pfn
;
827 if (p
->node_spanned_pages
) {
828 num_pages
= bootmem_bootmap_pages(p
->node_spanned_pages
);
830 paddr
= lmb_alloc_nid(num_pages
<< PAGE_SHIFT
, PAGE_SIZE
, nid
,
833 prom_printf("Cannot allocate bootmap for nid[%d]\n",
837 node_masks
[nid
].bootmem_paddr
= paddr
;
841 static void init_node_masks_nonnuma(void)
845 numadbg("Initializing tables for non-numa.\n");
847 node_masks
[0].mask
= node_masks
[0].val
= 0;
850 for (i
= 0; i
< NR_CPUS
; i
++)
851 numa_cpu_lookup_table
[i
] = 0;
853 numa_cpumask_lookup_table
[0] = CPU_MASK_ALL
;
856 #ifdef CONFIG_NEED_MULTIPLE_NODES
857 struct pglist_data
*node_data
[MAX_NUMNODES
];
859 EXPORT_SYMBOL(numa_cpu_lookup_table
);
860 EXPORT_SYMBOL(numa_cpumask_lookup_table
);
861 EXPORT_SYMBOL(node_data
);
863 struct mdesc_mlgroup
{
869 static struct mdesc_mlgroup
*mlgroups
;
870 static int num_mlgroups
;
872 static int scan_pio_for_cfg_handle(struct mdesc_handle
*md
, u64 pio
,
877 mdesc_for_each_arc(arc
, md
, pio
, MDESC_ARC_TYPE_FWD
) {
878 u64 target
= mdesc_arc_target(md
, arc
);
881 val
= mdesc_get_property(md
, target
,
883 if (val
&& *val
== cfg_handle
)
889 static int scan_arcs_for_cfg_handle(struct mdesc_handle
*md
, u64 grp
,
892 u64 arc
, candidate
, best_latency
= ~(u64
)0;
894 candidate
= MDESC_NODE_NULL
;
895 mdesc_for_each_arc(arc
, md
, grp
, MDESC_ARC_TYPE_FWD
) {
896 u64 target
= mdesc_arc_target(md
, arc
);
897 const char *name
= mdesc_node_name(md
, target
);
900 if (strcmp(name
, "pio-latency-group"))
903 val
= mdesc_get_property(md
, target
, "latency", NULL
);
907 if (*val
< best_latency
) {
913 if (candidate
== MDESC_NODE_NULL
)
916 return scan_pio_for_cfg_handle(md
, candidate
, cfg_handle
);
919 int of_node_to_nid(struct device_node
*dp
)
921 const struct linux_prom64_registers
*regs
;
922 struct mdesc_handle
*md
;
927 /* This is the right thing to do on currently supported
928 * SUN4U NUMA platforms as well, as the PCI controller does
929 * not sit behind any particular memory controller.
934 regs
= of_get_property(dp
, "reg", NULL
);
938 cfg_handle
= (regs
->phys_addr
>> 32UL) & 0x0fffffff;
944 mdesc_for_each_node_by_name(md
, grp
, "group") {
945 if (!scan_arcs_for_cfg_handle(md
, grp
, cfg_handle
)) {
957 static void __init
add_node_ranges(void)
961 for (i
= 0; i
< lmb
.memory
.cnt
; i
++) {
962 unsigned long size
= lmb_size_bytes(&lmb
.memory
, i
);
963 unsigned long start
, end
;
965 start
= lmb
.memory
.region
[i
].base
;
967 while (start
< end
) {
968 unsigned long this_end
;
971 this_end
= nid_range(start
, end
, &nid
);
973 numadbg("Adding active range nid[%d] "
974 "start[%lx] end[%lx]\n",
975 nid
, start
, this_end
);
977 add_active_range(nid
,
979 this_end
>> PAGE_SHIFT
);
986 static int __init
grab_mlgroups(struct mdesc_handle
*md
)
992 mdesc_for_each_node_by_name(md
, node
, "memory-latency-group")
997 paddr
= lmb_alloc(count
* sizeof(struct mdesc_mlgroup
),
1002 mlgroups
= __va(paddr
);
1003 num_mlgroups
= count
;
1006 mdesc_for_each_node_by_name(md
, node
, "memory-latency-group") {
1007 struct mdesc_mlgroup
*m
= &mlgroups
[count
++];
1012 val
= mdesc_get_property(md
, node
, "latency", NULL
);
1014 val
= mdesc_get_property(md
, node
, "address-match", NULL
);
1016 val
= mdesc_get_property(md
, node
, "address-mask", NULL
);
1019 numadbg("MLGROUP[%d]: node[%lx] latency[%lx] "
1020 "match[%lx] mask[%lx]\n",
1021 count
- 1, m
->node
, m
->latency
, m
->match
, m
->mask
);
1027 static int __init
grab_mblocks(struct mdesc_handle
*md
)
1029 unsigned long paddr
;
1033 mdesc_for_each_node_by_name(md
, node
, "mblock")
1038 paddr
= lmb_alloc(count
* sizeof(struct mdesc_mblock
),
1043 mblocks
= __va(paddr
);
1044 num_mblocks
= count
;
1047 mdesc_for_each_node_by_name(md
, node
, "mblock") {
1048 struct mdesc_mblock
*m
= &mblocks
[count
++];
1051 val
= mdesc_get_property(md
, node
, "base", NULL
);
1053 val
= mdesc_get_property(md
, node
, "size", NULL
);
1055 val
= mdesc_get_property(md
, node
,
1056 "address-congruence-offset", NULL
);
1059 numadbg("MBLOCK[%d]: base[%lx] size[%lx] offset[%lx]\n",
1060 count
- 1, m
->base
, m
->size
, m
->offset
);
1066 static void __init
numa_parse_mdesc_group_cpus(struct mdesc_handle
*md
,
1067 u64 grp
, cpumask_t
*mask
)
1073 mdesc_for_each_arc(arc
, md
, grp
, MDESC_ARC_TYPE_BACK
) {
1074 u64 target
= mdesc_arc_target(md
, arc
);
1075 const char *name
= mdesc_node_name(md
, target
);
1078 if (strcmp(name
, "cpu"))
1080 id
= mdesc_get_property(md
, target
, "id", NULL
);
1082 cpu_set(*id
, *mask
);
1086 static struct mdesc_mlgroup
* __init
find_mlgroup(u64 node
)
1090 for (i
= 0; i
< num_mlgroups
; i
++) {
1091 struct mdesc_mlgroup
*m
= &mlgroups
[i
];
1092 if (m
->node
== node
)
1098 static int __init
numa_attach_mlgroup(struct mdesc_handle
*md
, u64 grp
,
1101 struct mdesc_mlgroup
*candidate
= NULL
;
1102 u64 arc
, best_latency
= ~(u64
)0;
1103 struct node_mem_mask
*n
;
1105 mdesc_for_each_arc(arc
, md
, grp
, MDESC_ARC_TYPE_FWD
) {
1106 u64 target
= mdesc_arc_target(md
, arc
);
1107 struct mdesc_mlgroup
*m
= find_mlgroup(target
);
1110 if (m
->latency
< best_latency
) {
1112 best_latency
= m
->latency
;
1118 if (num_node_masks
!= index
) {
1119 printk(KERN_ERR
"Inconsistent NUMA state, "
1120 "index[%d] != num_node_masks[%d]\n",
1121 index
, num_node_masks
);
1125 n
= &node_masks
[num_node_masks
++];
1127 n
->mask
= candidate
->mask
;
1128 n
->val
= candidate
->match
;
1130 numadbg("NUMA NODE[%d]: mask[%lx] val[%lx] (latency[%lx])\n",
1131 index
, n
->mask
, n
->val
, candidate
->latency
);
1136 static int __init
numa_parse_mdesc_group(struct mdesc_handle
*md
, u64 grp
,
1142 numa_parse_mdesc_group_cpus(md
, grp
, &mask
);
1144 for_each_cpu_mask(cpu
, mask
)
1145 numa_cpu_lookup_table
[cpu
] = index
;
1146 numa_cpumask_lookup_table
[index
] = mask
;
1149 printk(KERN_INFO
"NUMA GROUP[%d]: cpus [ ", index
);
1150 for_each_cpu_mask(cpu
, mask
)
1155 return numa_attach_mlgroup(md
, grp
, index
);
1158 static int __init
numa_parse_mdesc(void)
1160 struct mdesc_handle
*md
= mdesc_grab();
1164 node
= mdesc_node_by_name(md
, MDESC_NODE_NULL
, "latency-groups");
1165 if (node
== MDESC_NODE_NULL
) {
1170 err
= grab_mblocks(md
);
1174 err
= grab_mlgroups(md
);
1179 mdesc_for_each_node_by_name(md
, node
, "group") {
1180 err
= numa_parse_mdesc_group(md
, node
, count
);
1188 for (i
= 0; i
< num_node_masks
; i
++) {
1189 allocate_node_data(i
);
1199 static int __init
numa_parse_jbus(void)
1201 unsigned long cpu
, index
;
1203 /* NUMA node id is encoded in bits 36 and higher, and there is
1204 * a 1-to-1 mapping from CPU ID to NUMA node ID.
1207 for_each_present_cpu(cpu
) {
1208 numa_cpu_lookup_table
[cpu
] = index
;
1209 numa_cpumask_lookup_table
[index
] = cpumask_of_cpu(cpu
);
1210 node_masks
[index
].mask
= ~((1UL << 36UL) - 1UL);
1211 node_masks
[index
].val
= cpu
<< 36UL;
1215 num_node_masks
= index
;
1219 for (index
= 0; index
< num_node_masks
; index
++) {
1220 allocate_node_data(index
);
1221 node_set_online(index
);
1227 static int __init
numa_parse_sun4u(void)
1229 if (tlb_type
== cheetah
|| tlb_type
== cheetah_plus
) {
1232 __asm__ ("rdpr %%ver, %0" : "=r" (ver
));
1233 if ((ver
>> 32UL) == __JALAPENO_ID
||
1234 (ver
>> 32UL) == __SERRANO_ID
)
1235 return numa_parse_jbus();
1240 static int __init
bootmem_init_numa(void)
1244 numadbg("bootmem_init_numa()\n");
1247 if (tlb_type
== hypervisor
)
1248 err
= numa_parse_mdesc();
1250 err
= numa_parse_sun4u();
1257 static int bootmem_init_numa(void)
1264 static void __init
bootmem_init_nonnuma(void)
1266 unsigned long top_of_ram
= lmb_end_of_DRAM();
1267 unsigned long total_ram
= lmb_phys_mem_size();
1270 numadbg("bootmem_init_nonnuma()\n");
1272 printk(KERN_INFO
"Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
1273 top_of_ram
, total_ram
);
1274 printk(KERN_INFO
"Memory hole size: %ldMB\n",
1275 (top_of_ram
- total_ram
) >> 20);
1277 init_node_masks_nonnuma();
1279 for (i
= 0; i
< lmb
.memory
.cnt
; i
++) {
1280 unsigned long size
= lmb_size_bytes(&lmb
.memory
, i
);
1281 unsigned long start_pfn
, end_pfn
;
1286 start_pfn
= lmb
.memory
.region
[i
].base
>> PAGE_SHIFT
;
1287 end_pfn
= start_pfn
+ lmb_size_pages(&lmb
.memory
, i
);
1288 add_active_range(0, start_pfn
, end_pfn
);
1291 allocate_node_data(0);
1296 static void __init
reserve_range_in_node(int nid
, unsigned long start
,
1299 numadbg(" reserve_range_in_node(nid[%d],start[%lx],end[%lx]\n",
1301 while (start
< end
) {
1302 unsigned long this_end
;
1305 this_end
= nid_range(start
, end
, &n
);
1307 numadbg(" MATCH reserving range [%lx:%lx]\n",
1309 reserve_bootmem_node(NODE_DATA(nid
), start
,
1310 (this_end
- start
), BOOTMEM_DEFAULT
);
1312 numadbg(" NO MATCH, advancing start to %lx\n",
1319 static void __init
trim_reserved_in_node(int nid
)
1323 numadbg(" trim_reserved_in_node(%d)\n", nid
);
1325 for (i
= 0; i
< lmb
.reserved
.cnt
; i
++) {
1326 unsigned long start
= lmb
.reserved
.region
[i
].base
;
1327 unsigned long size
= lmb_size_bytes(&lmb
.reserved
, i
);
1328 unsigned long end
= start
+ size
;
1330 reserve_range_in_node(nid
, start
, end
);
1334 static void __init
bootmem_init_one_node(int nid
)
1336 struct pglist_data
*p
;
1338 numadbg("bootmem_init_one_node(%d)\n", nid
);
1342 if (p
->node_spanned_pages
) {
1343 unsigned long paddr
= node_masks
[nid
].bootmem_paddr
;
1344 unsigned long end_pfn
;
1346 end_pfn
= p
->node_start_pfn
+ p
->node_spanned_pages
;
1348 numadbg(" init_bootmem_node(%d, %lx, %lx, %lx)\n",
1349 nid
, paddr
>> PAGE_SHIFT
, p
->node_start_pfn
, end_pfn
);
1351 init_bootmem_node(p
, paddr
>> PAGE_SHIFT
,
1352 p
->node_start_pfn
, end_pfn
);
1354 numadbg(" free_bootmem_with_active_regions(%d, %lx)\n",
1356 free_bootmem_with_active_regions(nid
, end_pfn
);
1358 trim_reserved_in_node(nid
);
1360 numadbg(" sparse_memory_present_with_active_regions(%d)\n",
1362 sparse_memory_present_with_active_regions(nid
);
1366 static unsigned long __init
bootmem_init(unsigned long phys_base
)
1368 unsigned long end_pfn
;
1371 end_pfn
= lmb_end_of_DRAM() >> PAGE_SHIFT
;
1372 max_pfn
= max_low_pfn
= end_pfn
;
1373 min_low_pfn
= (phys_base
>> PAGE_SHIFT
);
1375 if (bootmem_init_numa() < 0)
1376 bootmem_init_nonnuma();
1378 /* XXX cpu notifier XXX */
1380 for_each_online_node(nid
)
1381 bootmem_init_one_node(nid
);
1388 static struct linux_prom64_registers pall
[MAX_BANKS
] __initdata
;
1389 static int pall_ents __initdata
;
1391 #ifdef CONFIG_DEBUG_PAGEALLOC
1392 static unsigned long __ref
kernel_map_range(unsigned long pstart
,
1393 unsigned long pend
, pgprot_t prot
)
1395 unsigned long vstart
= PAGE_OFFSET
+ pstart
;
1396 unsigned long vend
= PAGE_OFFSET
+ pend
;
1397 unsigned long alloc_bytes
= 0UL;
1399 if ((vstart
& ~PAGE_MASK
) || (vend
& ~PAGE_MASK
)) {
1400 prom_printf("kernel_map: Unaligned physmem[%lx:%lx]\n",
1405 while (vstart
< vend
) {
1406 unsigned long this_end
, paddr
= __pa(vstart
);
1407 pgd_t
*pgd
= pgd_offset_k(vstart
);
1412 pud
= pud_offset(pgd
, vstart
);
1413 if (pud_none(*pud
)) {
1416 new = __alloc_bootmem(PAGE_SIZE
, PAGE_SIZE
, PAGE_SIZE
);
1417 alloc_bytes
+= PAGE_SIZE
;
1418 pud_populate(&init_mm
, pud
, new);
1421 pmd
= pmd_offset(pud
, vstart
);
1422 if (!pmd_present(*pmd
)) {
1425 new = __alloc_bootmem(PAGE_SIZE
, PAGE_SIZE
, PAGE_SIZE
);
1426 alloc_bytes
+= PAGE_SIZE
;
1427 pmd_populate_kernel(&init_mm
, pmd
, new);
1430 pte
= pte_offset_kernel(pmd
, vstart
);
1431 this_end
= (vstart
+ PMD_SIZE
) & PMD_MASK
;
1432 if (this_end
> vend
)
1435 while (vstart
< this_end
) {
1436 pte_val(*pte
) = (paddr
| pgprot_val(prot
));
1438 vstart
+= PAGE_SIZE
;
1447 extern unsigned int kvmap_linear_patch
[1];
1448 #endif /* CONFIG_DEBUG_PAGEALLOC */
1450 static void __init
mark_kpte_bitmap(unsigned long start
, unsigned long end
)
1452 const unsigned long shift_256MB
= 28;
1453 const unsigned long mask_256MB
= ((1UL << shift_256MB
) - 1UL);
1454 const unsigned long size_256MB
= (1UL << shift_256MB
);
1456 while (start
< end
) {
1459 remains
= end
- start
;
1460 if (remains
< size_256MB
)
1463 if (start
& mask_256MB
) {
1464 start
= (start
+ size_256MB
) & ~mask_256MB
;
1468 while (remains
>= size_256MB
) {
1469 unsigned long index
= start
>> shift_256MB
;
1471 __set_bit(index
, kpte_linear_bitmap
);
1473 start
+= size_256MB
;
1474 remains
-= size_256MB
;
1479 static void __init
init_kpte_bitmap(void)
1483 for (i
= 0; i
< pall_ents
; i
++) {
1484 unsigned long phys_start
, phys_end
;
1486 phys_start
= pall
[i
].phys_addr
;
1487 phys_end
= phys_start
+ pall
[i
].reg_size
;
1489 mark_kpte_bitmap(phys_start
, phys_end
);
1493 static void __init
kernel_physical_mapping_init(void)
1495 #ifdef CONFIG_DEBUG_PAGEALLOC
1496 unsigned long i
, mem_alloced
= 0UL;
1498 for (i
= 0; i
< pall_ents
; i
++) {
1499 unsigned long phys_start
, phys_end
;
1501 phys_start
= pall
[i
].phys_addr
;
1502 phys_end
= phys_start
+ pall
[i
].reg_size
;
1504 mem_alloced
+= kernel_map_range(phys_start
, phys_end
,
1508 printk("Allocated %ld bytes for kernel page tables.\n",
1511 kvmap_linear_patch
[0] = 0x01000000; /* nop */
1512 flushi(&kvmap_linear_patch
[0]);
1518 #ifdef CONFIG_DEBUG_PAGEALLOC
1519 void kernel_map_pages(struct page
*page
, int numpages
, int enable
)
1521 unsigned long phys_start
= page_to_pfn(page
) << PAGE_SHIFT
;
1522 unsigned long phys_end
= phys_start
+ (numpages
* PAGE_SIZE
);
1524 kernel_map_range(phys_start
, phys_end
,
1525 (enable
? PAGE_KERNEL
: __pgprot(0)));
1527 flush_tsb_kernel_range(PAGE_OFFSET
+ phys_start
,
1528 PAGE_OFFSET
+ phys_end
);
1530 /* we should perform an IPI and flush all tlbs,
1531 * but that can deadlock->flush only current cpu.
1533 __flush_tlb_kernel_range(PAGE_OFFSET
+ phys_start
,
1534 PAGE_OFFSET
+ phys_end
);
1538 unsigned long __init
find_ecache_flush_span(unsigned long size
)
1542 for (i
= 0; i
< pavail_ents
; i
++) {
1543 if (pavail
[i
].reg_size
>= size
)
1544 return pavail
[i
].phys_addr
;
1550 static void __init
tsb_phys_patch(void)
1552 struct tsb_ldquad_phys_patch_entry
*pquad
;
1553 struct tsb_phys_patch_entry
*p
;
1555 pquad
= &__tsb_ldquad_phys_patch
;
1556 while (pquad
< &__tsb_ldquad_phys_patch_end
) {
1557 unsigned long addr
= pquad
->addr
;
1559 if (tlb_type
== hypervisor
)
1560 *(unsigned int *) addr
= pquad
->sun4v_insn
;
1562 *(unsigned int *) addr
= pquad
->sun4u_insn
;
1564 __asm__
__volatile__("flush %0"
1571 p
= &__tsb_phys_patch
;
1572 while (p
< &__tsb_phys_patch_end
) {
1573 unsigned long addr
= p
->addr
;
1575 *(unsigned int *) addr
= p
->insn
;
1577 __asm__
__volatile__("flush %0"
1585 /* Don't mark as init, we give this to the Hypervisor. */
1586 #ifndef CONFIG_DEBUG_PAGEALLOC
1587 #define NUM_KTSB_DESCR 2
1589 #define NUM_KTSB_DESCR 1
1591 static struct hv_tsb_descr ktsb_descr
[NUM_KTSB_DESCR
];
1592 extern struct tsb swapper_tsb
[KERNEL_TSB_NENTRIES
];
1594 static void __init
sun4v_ktsb_init(void)
1596 unsigned long ktsb_pa
;
1598 /* First KTSB for PAGE_SIZE mappings. */
1599 ktsb_pa
= kern_base
+ ((unsigned long)&swapper_tsb
[0] - KERNBASE
);
1601 switch (PAGE_SIZE
) {
1604 ktsb_descr
[0].pgsz_idx
= HV_PGSZ_IDX_8K
;
1605 ktsb_descr
[0].pgsz_mask
= HV_PGSZ_MASK_8K
;
1609 ktsb_descr
[0].pgsz_idx
= HV_PGSZ_IDX_64K
;
1610 ktsb_descr
[0].pgsz_mask
= HV_PGSZ_MASK_64K
;
1614 ktsb_descr
[0].pgsz_idx
= HV_PGSZ_IDX_512K
;
1615 ktsb_descr
[0].pgsz_mask
= HV_PGSZ_MASK_512K
;
1618 case 4 * 1024 * 1024:
1619 ktsb_descr
[0].pgsz_idx
= HV_PGSZ_IDX_4MB
;
1620 ktsb_descr
[0].pgsz_mask
= HV_PGSZ_MASK_4MB
;
1624 ktsb_descr
[0].assoc
= 1;
1625 ktsb_descr
[0].num_ttes
= KERNEL_TSB_NENTRIES
;
1626 ktsb_descr
[0].ctx_idx
= 0;
1627 ktsb_descr
[0].tsb_base
= ktsb_pa
;
1628 ktsb_descr
[0].resv
= 0;
1630 #ifndef CONFIG_DEBUG_PAGEALLOC
1631 /* Second KTSB for 4MB/256MB mappings. */
1632 ktsb_pa
= (kern_base
+
1633 ((unsigned long)&swapper_4m_tsb
[0] - KERNBASE
));
1635 ktsb_descr
[1].pgsz_idx
= HV_PGSZ_IDX_4MB
;
1636 ktsb_descr
[1].pgsz_mask
= (HV_PGSZ_MASK_4MB
|
1637 HV_PGSZ_MASK_256MB
);
1638 ktsb_descr
[1].assoc
= 1;
1639 ktsb_descr
[1].num_ttes
= KERNEL_TSB4M_NENTRIES
;
1640 ktsb_descr
[1].ctx_idx
= 0;
1641 ktsb_descr
[1].tsb_base
= ktsb_pa
;
1642 ktsb_descr
[1].resv
= 0;
1646 void __cpuinit
sun4v_ktsb_register(void)
1648 unsigned long pa
, ret
;
1650 pa
= kern_base
+ ((unsigned long)&ktsb_descr
[0] - KERNBASE
);
1652 ret
= sun4v_mmu_tsb_ctx0(NUM_KTSB_DESCR
, pa
);
1654 prom_printf("hypervisor_mmu_tsb_ctx0[%lx]: "
1655 "errors with %lx\n", pa
, ret
);
1660 /* paging_init() sets up the page tables */
1662 static unsigned long last_valid_pfn
;
1663 pgd_t swapper_pg_dir
[2048];
1665 static void sun4u_pgprot_init(void);
1666 static void sun4v_pgprot_init(void);
1668 /* Dummy function */
1669 void __init
setup_per_cpu_areas(void)
1673 void __init
paging_init(void)
1675 unsigned long end_pfn
, shift
, phys_base
;
1676 unsigned long real_end
, i
;
1678 /* These build time checkes make sure that the dcache_dirty_cpu()
1679 * page->flags usage will work.
1681 * When a page gets marked as dcache-dirty, we store the
1682 * cpu number starting at bit 32 in the page->flags. Also,
1683 * functions like clear_dcache_dirty_cpu use the cpu mask
1684 * in 13-bit signed-immediate instruction fields.
1688 * Page flags must not reach into upper 32 bits that are used
1689 * for the cpu number
1691 BUILD_BUG_ON(NR_PAGEFLAGS
> 32);
1694 * The bit fields placed in the high range must not reach below
1695 * the 32 bit boundary. Otherwise we cannot place the cpu field
1696 * at the 32 bit boundary.
1698 BUILD_BUG_ON(SECTIONS_WIDTH
+ NODES_WIDTH
+ ZONES_WIDTH
+
1699 ilog2(roundup_pow_of_two(NR_CPUS
)) > 32);
1701 BUILD_BUG_ON(NR_CPUS
> 4096);
1703 kern_base
= (prom_boot_mapping_phys_low
>> 22UL) << 22UL;
1704 kern_size
= (unsigned long)&_end
- (unsigned long)KERNBASE
;
1706 /* Invalidate both kernel TSBs. */
1707 memset(swapper_tsb
, 0x40, sizeof(swapper_tsb
));
1708 #ifndef CONFIG_DEBUG_PAGEALLOC
1709 memset(swapper_4m_tsb
, 0x40, sizeof(swapper_4m_tsb
));
1712 if (tlb_type
== hypervisor
)
1713 sun4v_pgprot_init();
1715 sun4u_pgprot_init();
1717 if (tlb_type
== cheetah_plus
||
1718 tlb_type
== hypervisor
)
1721 if (tlb_type
== hypervisor
) {
1722 sun4v_patch_tlb_handlers();
1728 /* Find available physical memory...
1730 * Read it twice in order to work around a bug in openfirmware.
1731 * The call to grab this table itself can cause openfirmware to
1732 * allocate memory, which in turn can take away some space from
1733 * the list of available memory. Reading it twice makes sure
1734 * we really do get the final value.
1736 read_obp_translations();
1737 read_obp_memory("reg", &pall
[0], &pall_ents
);
1738 read_obp_memory("available", &pavail
[0], &pavail_ents
);
1739 read_obp_memory("available", &pavail
[0], &pavail_ents
);
1741 phys_base
= 0xffffffffffffffffUL
;
1742 for (i
= 0; i
< pavail_ents
; i
++) {
1743 phys_base
= min(phys_base
, pavail
[i
].phys_addr
);
1744 lmb_add(pavail
[i
].phys_addr
, pavail
[i
].reg_size
);
1747 lmb_reserve(kern_base
, kern_size
);
1749 find_ramdisk(phys_base
);
1751 lmb_enforce_memory_limit(cmdline_memory_size
);
1756 set_bit(0, mmu_context_bmap
);
1758 shift
= kern_base
+ PAGE_OFFSET
- ((unsigned long)KERNBASE
);
1760 real_end
= (unsigned long)_end
;
1761 num_kernel_image_mappings
= DIV_ROUND_UP(real_end
- KERNBASE
, 1 << 22);
1762 printk("Kernel: Using %d locked TLB entries for main kernel image.\n",
1763 num_kernel_image_mappings
);
1765 /* Set kernel pgd to upper alias so physical page computations
1768 init_mm
.pgd
+= ((shift
) / (sizeof(pgd_t
)));
1770 memset(swapper_low_pmd_dir
, 0, sizeof(swapper_low_pmd_dir
));
1772 /* Now can init the kernel/bad page tables. */
1773 pud_set(pud_offset(&swapper_pg_dir
[0], 0),
1774 swapper_low_pmd_dir
+ (shift
/ sizeof(pgd_t
)));
1776 inherit_prom_mappings();
1780 /* Ok, we can use our TLB miss and window trap handlers safely. */
1785 if (tlb_type
== hypervisor
)
1786 sun4v_ktsb_register();
1788 /* We must setup the per-cpu areas before we pull in the
1789 * PROM and the MDESC. The code there fills in cpu and
1790 * other information into per-cpu data structures.
1792 real_setup_per_cpu_areas();
1794 prom_build_devicetree();
1796 if (tlb_type
== hypervisor
)
1799 /* Once the OF device tree and MDESC have been setup, we know
1800 * the list of possible cpus. Therefore we can allocate the
1803 for_each_possible_cpu(i
) {
1804 /* XXX Use node local allocations... XXX */
1805 softirq_stack
[i
] = __va(lmb_alloc(THREAD_SIZE
, THREAD_SIZE
));
1806 hardirq_stack
[i
] = __va(lmb_alloc(THREAD_SIZE
, THREAD_SIZE
));
1809 /* Setup bootmem... */
1810 last_valid_pfn
= end_pfn
= bootmem_init(phys_base
);
1812 #ifndef CONFIG_NEED_MULTIPLE_NODES
1813 max_mapnr
= last_valid_pfn
;
1815 kernel_physical_mapping_init();
1818 unsigned long max_zone_pfns
[MAX_NR_ZONES
];
1820 memset(max_zone_pfns
, 0, sizeof(max_zone_pfns
));
1822 max_zone_pfns
[ZONE_NORMAL
] = end_pfn
;
1824 free_area_init_nodes(max_zone_pfns
);
1827 printk("Booting Linux...\n");
1830 int __init
page_in_phys_avail(unsigned long paddr
)
1836 for (i
= 0; i
< pavail_ents
; i
++) {
1837 unsigned long start
, end
;
1839 start
= pavail
[i
].phys_addr
;
1840 end
= start
+ pavail
[i
].reg_size
;
1842 if (paddr
>= start
&& paddr
< end
)
1845 if (paddr
>= kern_base
&& paddr
< (kern_base
+ kern_size
))
1847 #ifdef CONFIG_BLK_DEV_INITRD
1848 if (paddr
>= __pa(initrd_start
) &&
1849 paddr
< __pa(PAGE_ALIGN(initrd_end
)))
1856 static struct linux_prom64_registers pavail_rescan
[MAX_BANKS
] __initdata
;
1857 static int pavail_rescan_ents __initdata
;
1859 /* Certain OBP calls, such as fetching "available" properties, can
1860 * claim physical memory. So, along with initializing the valid
1861 * address bitmap, what we do here is refetch the physical available
1862 * memory list again, and make sure it provides at least as much
1863 * memory as 'pavail' does.
1865 static void __init
setup_valid_addr_bitmap_from_pavail(void)
1869 read_obp_memory("available", &pavail_rescan
[0], &pavail_rescan_ents
);
1871 for (i
= 0; i
< pavail_ents
; i
++) {
1872 unsigned long old_start
, old_end
;
1874 old_start
= pavail
[i
].phys_addr
;
1875 old_end
= old_start
+ pavail
[i
].reg_size
;
1876 while (old_start
< old_end
) {
1879 for (n
= 0; n
< pavail_rescan_ents
; n
++) {
1880 unsigned long new_start
, new_end
;
1882 new_start
= pavail_rescan
[n
].phys_addr
;
1883 new_end
= new_start
+
1884 pavail_rescan
[n
].reg_size
;
1886 if (new_start
<= old_start
&&
1887 new_end
>= (old_start
+ PAGE_SIZE
)) {
1888 set_bit(old_start
>> 22,
1889 sparc64_valid_addr_bitmap
);
1894 prom_printf("mem_init: Lost memory in pavail\n");
1895 prom_printf("mem_init: OLD start[%lx] size[%lx]\n",
1896 pavail
[i
].phys_addr
,
1897 pavail
[i
].reg_size
);
1898 prom_printf("mem_init: NEW start[%lx] size[%lx]\n",
1899 pavail_rescan
[i
].phys_addr
,
1900 pavail_rescan
[i
].reg_size
);
1901 prom_printf("mem_init: Cannot continue, aborting.\n");
1905 old_start
+= PAGE_SIZE
;
1910 void __init
mem_init(void)
1912 unsigned long codepages
, datapages
, initpages
;
1913 unsigned long addr
, last
;
1916 i
= last_valid_pfn
>> ((22 - PAGE_SHIFT
) + 6);
1918 sparc64_valid_addr_bitmap
= (unsigned long *) alloc_bootmem(i
<< 3);
1919 if (sparc64_valid_addr_bitmap
== NULL
) {
1920 prom_printf("mem_init: Cannot alloc valid_addr_bitmap.\n");
1923 memset(sparc64_valid_addr_bitmap
, 0, i
<< 3);
1925 addr
= PAGE_OFFSET
+ kern_base
;
1926 last
= PAGE_ALIGN(kern_size
) + addr
;
1927 while (addr
< last
) {
1928 set_bit(__pa(addr
) >> 22, sparc64_valid_addr_bitmap
);
1932 setup_valid_addr_bitmap_from_pavail();
1934 high_memory
= __va(last_valid_pfn
<< PAGE_SHIFT
);
1936 #ifdef CONFIG_NEED_MULTIPLE_NODES
1937 for_each_online_node(i
) {
1938 if (NODE_DATA(i
)->node_spanned_pages
!= 0) {
1940 free_all_bootmem_node(NODE_DATA(i
));
1944 totalram_pages
= free_all_bootmem();
1947 /* We subtract one to account for the mem_map_zero page
1950 totalram_pages
-= 1;
1951 num_physpages
= totalram_pages
;
1954 * Set up the zero page, mark it reserved, so that page count
1955 * is not manipulated when freeing the page from user ptes.
1957 mem_map_zero
= alloc_pages(GFP_KERNEL
|__GFP_ZERO
, 0);
1958 if (mem_map_zero
== NULL
) {
1959 prom_printf("paging_init: Cannot alloc zero page.\n");
1962 SetPageReserved(mem_map_zero
);
1964 codepages
= (((unsigned long) _etext
) - ((unsigned long) _start
));
1965 codepages
= PAGE_ALIGN(codepages
) >> PAGE_SHIFT
;
1966 datapages
= (((unsigned long) _edata
) - ((unsigned long) _etext
));
1967 datapages
= PAGE_ALIGN(datapages
) >> PAGE_SHIFT
;
1968 initpages
= (((unsigned long) __init_end
) - ((unsigned long) __init_begin
));
1969 initpages
= PAGE_ALIGN(initpages
) >> PAGE_SHIFT
;
1971 printk("Memory: %luk available (%ldk kernel code, %ldk data, %ldk init) [%016lx,%016lx]\n",
1972 nr_free_pages() << (PAGE_SHIFT
-10),
1973 codepages
<< (PAGE_SHIFT
-10),
1974 datapages
<< (PAGE_SHIFT
-10),
1975 initpages
<< (PAGE_SHIFT
-10),
1976 PAGE_OFFSET
, (last_valid_pfn
<< PAGE_SHIFT
));
1978 if (tlb_type
== cheetah
|| tlb_type
== cheetah_plus
)
1979 cheetah_ecache_flush_init();
1982 void free_initmem(void)
1984 unsigned long addr
, initend
;
1987 /* If the physical memory maps were trimmed by kernel command
1988 * line options, don't even try freeing this initmem stuff up.
1989 * The kernel image could have been in the trimmed out region
1990 * and if so the freeing below will free invalid page structs.
1992 if (cmdline_memory_size
)
1996 * The init section is aligned to 8k in vmlinux.lds. Page align for >8k pagesizes.
1998 addr
= PAGE_ALIGN((unsigned long)(__init_begin
));
1999 initend
= (unsigned long)(__init_end
) & PAGE_MASK
;
2000 for (; addr
< initend
; addr
+= PAGE_SIZE
) {
2005 ((unsigned long) __va(kern_base
)) -
2006 ((unsigned long) KERNBASE
));
2007 memset((void *)addr
, POISON_FREE_INITMEM
, PAGE_SIZE
);
2010 p
= virt_to_page(page
);
2012 ClearPageReserved(p
);
2021 #ifdef CONFIG_BLK_DEV_INITRD
2022 void free_initrd_mem(unsigned long start
, unsigned long end
)
2025 printk ("Freeing initrd memory: %ldk freed\n", (end
- start
) >> 10);
2026 for (; start
< end
; start
+= PAGE_SIZE
) {
2027 struct page
*p
= virt_to_page(start
);
2029 ClearPageReserved(p
);
2038 #define _PAGE_CACHE_4U (_PAGE_CP_4U | _PAGE_CV_4U)
2039 #define _PAGE_CACHE_4V (_PAGE_CP_4V | _PAGE_CV_4V)
2040 #define __DIRTY_BITS_4U (_PAGE_MODIFIED_4U | _PAGE_WRITE_4U | _PAGE_W_4U)
2041 #define __DIRTY_BITS_4V (_PAGE_MODIFIED_4V | _PAGE_WRITE_4V | _PAGE_W_4V)
2042 #define __ACCESS_BITS_4U (_PAGE_ACCESSED_4U | _PAGE_READ_4U | _PAGE_R)
2043 #define __ACCESS_BITS_4V (_PAGE_ACCESSED_4V | _PAGE_READ_4V | _PAGE_R)
2045 pgprot_t PAGE_KERNEL __read_mostly
;
2046 EXPORT_SYMBOL(PAGE_KERNEL
);
2048 pgprot_t PAGE_KERNEL_LOCKED __read_mostly
;
2049 pgprot_t PAGE_COPY __read_mostly
;
2051 pgprot_t PAGE_SHARED __read_mostly
;
2052 EXPORT_SYMBOL(PAGE_SHARED
);
2054 unsigned long pg_iobits __read_mostly
;
2056 unsigned long _PAGE_IE __read_mostly
;
2057 EXPORT_SYMBOL(_PAGE_IE
);
2059 unsigned long _PAGE_E __read_mostly
;
2060 EXPORT_SYMBOL(_PAGE_E
);
2062 unsigned long _PAGE_CACHE __read_mostly
;
2063 EXPORT_SYMBOL(_PAGE_CACHE
);
2065 #ifdef CONFIG_SPARSEMEM_VMEMMAP
2066 unsigned long vmemmap_table
[VMEMMAP_SIZE
];
2068 int __meminit
vmemmap_populate(struct page
*start
, unsigned long nr
, int node
)
2070 unsigned long vstart
= (unsigned long) start
;
2071 unsigned long vend
= (unsigned long) (start
+ nr
);
2072 unsigned long phys_start
= (vstart
- VMEMMAP_BASE
);
2073 unsigned long phys_end
= (vend
- VMEMMAP_BASE
);
2074 unsigned long addr
= phys_start
& VMEMMAP_CHUNK_MASK
;
2075 unsigned long end
= VMEMMAP_ALIGN(phys_end
);
2076 unsigned long pte_base
;
2078 pte_base
= (_PAGE_VALID
| _PAGE_SZ4MB_4U
|
2079 _PAGE_CP_4U
| _PAGE_CV_4U
|
2080 _PAGE_P_4U
| _PAGE_W_4U
);
2081 if (tlb_type
== hypervisor
)
2082 pte_base
= (_PAGE_VALID
| _PAGE_SZ4MB_4V
|
2083 _PAGE_CP_4V
| _PAGE_CV_4V
|
2084 _PAGE_P_4V
| _PAGE_W_4V
);
2086 for (; addr
< end
; addr
+= VMEMMAP_CHUNK
) {
2087 unsigned long *vmem_pp
=
2088 vmemmap_table
+ (addr
>> VMEMMAP_CHUNK_SHIFT
);
2091 if (!(*vmem_pp
& _PAGE_VALID
)) {
2092 block
= vmemmap_alloc_block(1UL << 22, node
);
2096 *vmem_pp
= pte_base
| __pa(block
);
2098 printk(KERN_INFO
"[%p-%p] page_structs=%lu "
2099 "node=%d entry=%lu/%lu\n", start
, block
, nr
,
2101 addr
>> VMEMMAP_CHUNK_SHIFT
,
2102 VMEMMAP_SIZE
>> VMEMMAP_CHUNK_SHIFT
);
2107 #endif /* CONFIG_SPARSEMEM_VMEMMAP */
2109 static void prot_init_common(unsigned long page_none
,
2110 unsigned long page_shared
,
2111 unsigned long page_copy
,
2112 unsigned long page_readonly
,
2113 unsigned long page_exec_bit
)
2115 PAGE_COPY
= __pgprot(page_copy
);
2116 PAGE_SHARED
= __pgprot(page_shared
);
2118 protection_map
[0x0] = __pgprot(page_none
);
2119 protection_map
[0x1] = __pgprot(page_readonly
& ~page_exec_bit
);
2120 protection_map
[0x2] = __pgprot(page_copy
& ~page_exec_bit
);
2121 protection_map
[0x3] = __pgprot(page_copy
& ~page_exec_bit
);
2122 protection_map
[0x4] = __pgprot(page_readonly
);
2123 protection_map
[0x5] = __pgprot(page_readonly
);
2124 protection_map
[0x6] = __pgprot(page_copy
);
2125 protection_map
[0x7] = __pgprot(page_copy
);
2126 protection_map
[0x8] = __pgprot(page_none
);
2127 protection_map
[0x9] = __pgprot(page_readonly
& ~page_exec_bit
);
2128 protection_map
[0xa] = __pgprot(page_shared
& ~page_exec_bit
);
2129 protection_map
[0xb] = __pgprot(page_shared
& ~page_exec_bit
);
2130 protection_map
[0xc] = __pgprot(page_readonly
);
2131 protection_map
[0xd] = __pgprot(page_readonly
);
2132 protection_map
[0xe] = __pgprot(page_shared
);
2133 protection_map
[0xf] = __pgprot(page_shared
);
2136 static void __init
sun4u_pgprot_init(void)
2138 unsigned long page_none
, page_shared
, page_copy
, page_readonly
;
2139 unsigned long page_exec_bit
;
2141 PAGE_KERNEL
= __pgprot (_PAGE_PRESENT_4U
| _PAGE_VALID
|
2142 _PAGE_CACHE_4U
| _PAGE_P_4U
|
2143 __ACCESS_BITS_4U
| __DIRTY_BITS_4U
|
2145 PAGE_KERNEL_LOCKED
= __pgprot (_PAGE_PRESENT_4U
| _PAGE_VALID
|
2146 _PAGE_CACHE_4U
| _PAGE_P_4U
|
2147 __ACCESS_BITS_4U
| __DIRTY_BITS_4U
|
2148 _PAGE_EXEC_4U
| _PAGE_L_4U
);
2150 _PAGE_IE
= _PAGE_IE_4U
;
2151 _PAGE_E
= _PAGE_E_4U
;
2152 _PAGE_CACHE
= _PAGE_CACHE_4U
;
2154 pg_iobits
= (_PAGE_VALID
| _PAGE_PRESENT_4U
| __DIRTY_BITS_4U
|
2155 __ACCESS_BITS_4U
| _PAGE_E_4U
);
2157 #ifdef CONFIG_DEBUG_PAGEALLOC
2158 kern_linear_pte_xor
[0] = (_PAGE_VALID
| _PAGE_SZBITS_4U
) ^
2159 0xfffff80000000000UL
;
2161 kern_linear_pte_xor
[0] = (_PAGE_VALID
| _PAGE_SZ4MB_4U
) ^
2162 0xfffff80000000000UL
;
2164 kern_linear_pte_xor
[0] |= (_PAGE_CP_4U
| _PAGE_CV_4U
|
2165 _PAGE_P_4U
| _PAGE_W_4U
);
2167 /* XXX Should use 256MB on Panther. XXX */
2168 kern_linear_pte_xor
[1] = kern_linear_pte_xor
[0];
2170 _PAGE_SZBITS
= _PAGE_SZBITS_4U
;
2171 _PAGE_ALL_SZ_BITS
= (_PAGE_SZ4MB_4U
| _PAGE_SZ512K_4U
|
2172 _PAGE_SZ64K_4U
| _PAGE_SZ8K_4U
|
2173 _PAGE_SZ32MB_4U
| _PAGE_SZ256MB_4U
);
2176 page_none
= _PAGE_PRESENT_4U
| _PAGE_ACCESSED_4U
| _PAGE_CACHE_4U
;
2177 page_shared
= (_PAGE_VALID
| _PAGE_PRESENT_4U
| _PAGE_CACHE_4U
|
2178 __ACCESS_BITS_4U
| _PAGE_WRITE_4U
| _PAGE_EXEC_4U
);
2179 page_copy
= (_PAGE_VALID
| _PAGE_PRESENT_4U
| _PAGE_CACHE_4U
|
2180 __ACCESS_BITS_4U
| _PAGE_EXEC_4U
);
2181 page_readonly
= (_PAGE_VALID
| _PAGE_PRESENT_4U
| _PAGE_CACHE_4U
|
2182 __ACCESS_BITS_4U
| _PAGE_EXEC_4U
);
2184 page_exec_bit
= _PAGE_EXEC_4U
;
2186 prot_init_common(page_none
, page_shared
, page_copy
, page_readonly
,
2190 static void __init
sun4v_pgprot_init(void)
2192 unsigned long page_none
, page_shared
, page_copy
, page_readonly
;
2193 unsigned long page_exec_bit
;
2195 PAGE_KERNEL
= __pgprot (_PAGE_PRESENT_4V
| _PAGE_VALID
|
2196 _PAGE_CACHE_4V
| _PAGE_P_4V
|
2197 __ACCESS_BITS_4V
| __DIRTY_BITS_4V
|
2199 PAGE_KERNEL_LOCKED
= PAGE_KERNEL
;
2201 _PAGE_IE
= _PAGE_IE_4V
;
2202 _PAGE_E
= _PAGE_E_4V
;
2203 _PAGE_CACHE
= _PAGE_CACHE_4V
;
2205 #ifdef CONFIG_DEBUG_PAGEALLOC
2206 kern_linear_pte_xor
[0] = (_PAGE_VALID
| _PAGE_SZBITS_4V
) ^
2207 0xfffff80000000000UL
;
2209 kern_linear_pte_xor
[0] = (_PAGE_VALID
| _PAGE_SZ4MB_4V
) ^
2210 0xfffff80000000000UL
;
2212 kern_linear_pte_xor
[0] |= (_PAGE_CP_4V
| _PAGE_CV_4V
|
2213 _PAGE_P_4V
| _PAGE_W_4V
);
2215 #ifdef CONFIG_DEBUG_PAGEALLOC
2216 kern_linear_pte_xor
[1] = (_PAGE_VALID
| _PAGE_SZBITS_4V
) ^
2217 0xfffff80000000000UL
;
2219 kern_linear_pte_xor
[1] = (_PAGE_VALID
| _PAGE_SZ256MB_4V
) ^
2220 0xfffff80000000000UL
;
2222 kern_linear_pte_xor
[1] |= (_PAGE_CP_4V
| _PAGE_CV_4V
|
2223 _PAGE_P_4V
| _PAGE_W_4V
);
2225 pg_iobits
= (_PAGE_VALID
| _PAGE_PRESENT_4V
| __DIRTY_BITS_4V
|
2226 __ACCESS_BITS_4V
| _PAGE_E_4V
);
2228 _PAGE_SZBITS
= _PAGE_SZBITS_4V
;
2229 _PAGE_ALL_SZ_BITS
= (_PAGE_SZ16GB_4V
| _PAGE_SZ2GB_4V
|
2230 _PAGE_SZ256MB_4V
| _PAGE_SZ32MB_4V
|
2231 _PAGE_SZ4MB_4V
| _PAGE_SZ512K_4V
|
2232 _PAGE_SZ64K_4V
| _PAGE_SZ8K_4V
);
2234 page_none
= _PAGE_PRESENT_4V
| _PAGE_ACCESSED_4V
| _PAGE_CACHE_4V
;
2235 page_shared
= (_PAGE_VALID
| _PAGE_PRESENT_4V
| _PAGE_CACHE_4V
|
2236 __ACCESS_BITS_4V
| _PAGE_WRITE_4V
| _PAGE_EXEC_4V
);
2237 page_copy
= (_PAGE_VALID
| _PAGE_PRESENT_4V
| _PAGE_CACHE_4V
|
2238 __ACCESS_BITS_4V
| _PAGE_EXEC_4V
);
2239 page_readonly
= (_PAGE_VALID
| _PAGE_PRESENT_4V
| _PAGE_CACHE_4V
|
2240 __ACCESS_BITS_4V
| _PAGE_EXEC_4V
);
2242 page_exec_bit
= _PAGE_EXEC_4V
;
2244 prot_init_common(page_none
, page_shared
, page_copy
, page_readonly
,
2248 unsigned long pte_sz_bits(unsigned long sz
)
2250 if (tlb_type
== hypervisor
) {
2254 return _PAGE_SZ8K_4V
;
2256 return _PAGE_SZ64K_4V
;
2258 return _PAGE_SZ512K_4V
;
2259 case 4 * 1024 * 1024:
2260 return _PAGE_SZ4MB_4V
;
2266 return _PAGE_SZ8K_4U
;
2268 return _PAGE_SZ64K_4U
;
2270 return _PAGE_SZ512K_4U
;
2271 case 4 * 1024 * 1024:
2272 return _PAGE_SZ4MB_4U
;
2277 pte_t
mk_pte_io(unsigned long page
, pgprot_t prot
, int space
, unsigned long page_size
)
2281 pte_val(pte
) = page
| pgprot_val(pgprot_noncached(prot
));
2282 pte_val(pte
) |= (((unsigned long)space
) << 32);
2283 pte_val(pte
) |= pte_sz_bits(page_size
);
2288 static unsigned long kern_large_tte(unsigned long paddr
)
2292 val
= (_PAGE_VALID
| _PAGE_SZ4MB_4U
|
2293 _PAGE_CP_4U
| _PAGE_CV_4U
| _PAGE_P_4U
|
2294 _PAGE_EXEC_4U
| _PAGE_L_4U
| _PAGE_W_4U
);
2295 if (tlb_type
== hypervisor
)
2296 val
= (_PAGE_VALID
| _PAGE_SZ4MB_4V
|
2297 _PAGE_CP_4V
| _PAGE_CV_4V
| _PAGE_P_4V
|
2298 _PAGE_EXEC_4V
| _PAGE_W_4V
);
2303 /* If not locked, zap it. */
2304 void __flush_tlb_all(void)
2306 unsigned long pstate
;
2309 __asm__
__volatile__("flushw\n\t"
2310 "rdpr %%pstate, %0\n\t"
2311 "wrpr %0, %1, %%pstate"
2314 if (tlb_type
== hypervisor
) {
2315 sun4v_mmu_demap_all();
2316 } else if (tlb_type
== spitfire
) {
2317 for (i
= 0; i
< 64; i
++) {
2318 /* Spitfire Errata #32 workaround */
2319 /* NOTE: Always runs on spitfire, so no
2320 * cheetah+ page size encodings.
2322 __asm__
__volatile__("stxa %0, [%1] %2\n\t"
2326 "r" (PRIMARY_CONTEXT
), "i" (ASI_DMMU
));
2328 if (!(spitfire_get_dtlb_data(i
) & _PAGE_L_4U
)) {
2329 __asm__
__volatile__("stxa %%g0, [%0] %1\n\t"
2332 : "r" (TLB_TAG_ACCESS
), "i" (ASI_DMMU
));
2333 spitfire_put_dtlb_data(i
, 0x0UL
);
2336 /* Spitfire Errata #32 workaround */
2337 /* NOTE: Always runs on spitfire, so no
2338 * cheetah+ page size encodings.
2340 __asm__
__volatile__("stxa %0, [%1] %2\n\t"
2344 "r" (PRIMARY_CONTEXT
), "i" (ASI_DMMU
));
2346 if (!(spitfire_get_itlb_data(i
) & _PAGE_L_4U
)) {
2347 __asm__
__volatile__("stxa %%g0, [%0] %1\n\t"
2350 : "r" (TLB_TAG_ACCESS
), "i" (ASI_IMMU
));
2351 spitfire_put_itlb_data(i
, 0x0UL
);
2354 } else if (tlb_type
== cheetah
|| tlb_type
== cheetah_plus
) {
2355 cheetah_flush_dtlb_all();
2356 cheetah_flush_itlb_all();
2358 __asm__
__volatile__("wrpr %0, 0, %%pstate"