2 * arch/sparc64/mm/init.c
4 * Copyright (C) 1996-1999 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1997-1999 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
8 #include <linux/module.h>
9 #include <linux/kernel.h>
10 #include <linux/sched.h>
11 #include <linux/string.h>
12 #include <linux/init.h>
13 #include <linux/bootmem.h>
15 #include <linux/hugetlb.h>
16 #include <linux/slab.h>
17 #include <linux/initrd.h>
18 #include <linux/swap.h>
19 #include <linux/pagemap.h>
20 #include <linux/poison.h>
22 #include <linux/seq_file.h>
23 #include <linux/kprobes.h>
24 #include <linux/cache.h>
25 #include <linux/sort.h>
26 #include <linux/percpu.h>
27 #include <linux/lmb.h>
28 #include <linux/mmzone.h>
31 #include <asm/system.h>
33 #include <asm/pgalloc.h>
34 #include <asm/pgtable.h>
35 #include <asm/oplib.h>
36 #include <asm/iommu.h>
38 #include <asm/uaccess.h>
39 #include <asm/mmu_context.h>
40 #include <asm/tlbflush.h>
42 #include <asm/starfire.h>
44 #include <asm/spitfire.h>
45 #include <asm/sections.h>
47 #include <asm/hypervisor.h>
49 #include <asm/sstate.h>
50 #include <asm/mdesc.h>
51 #include <asm/cpudata.h>
54 #define MAX_PHYS_ADDRESS (1UL << 42UL)
55 #define KPTE_BITMAP_CHUNK_SZ (256UL * 1024UL * 1024UL)
56 #define KPTE_BITMAP_BYTES \
57 ((MAX_PHYS_ADDRESS / KPTE_BITMAP_CHUNK_SZ) / 8)
59 unsigned long kern_linear_pte_xor
[2] __read_mostly
;
61 /* A bitmap, one bit for every 256MB of physical memory. If the bit
62 * is clear, we should use a 4MB page (via kern_linear_pte_xor[0]) else
63 * if set we should use a 256MB page (via kern_linear_pte_xor[1]).
65 unsigned long kpte_linear_bitmap
[KPTE_BITMAP_BYTES
/ sizeof(unsigned long)];
67 #ifndef CONFIG_DEBUG_PAGEALLOC
68 /* A special kernel TSB for 4MB and 256MB linear mappings.
69 * Space is allocated for this right after the trap table
70 * in arch/sparc64/kernel/head.S
72 extern struct tsb swapper_4m_tsb
[KERNEL_TSB4M_NENTRIES
];
77 static struct linux_prom64_registers pavail
[MAX_BANKS
] __initdata
;
78 static int pavail_ents __initdata
;
80 static int cmp_p64(const void *a
, const void *b
)
82 const struct linux_prom64_registers
*x
= a
, *y
= b
;
84 if (x
->phys_addr
> y
->phys_addr
)
86 if (x
->phys_addr
< y
->phys_addr
)
91 static void __init
read_obp_memory(const char *property
,
92 struct linux_prom64_registers
*regs
,
95 int node
= prom_finddevice("/memory");
96 int prop_size
= prom_getproplen(node
, property
);
99 ents
= prop_size
/ sizeof(struct linux_prom64_registers
);
100 if (ents
> MAX_BANKS
) {
101 prom_printf("The machine has more %s property entries than "
102 "this kernel can support (%d).\n",
103 property
, MAX_BANKS
);
107 ret
= prom_getproperty(node
, property
, (char *) regs
, prop_size
);
109 prom_printf("Couldn't get %s property from /memory.\n");
113 /* Sanitize what we got from the firmware, by page aligning
116 for (i
= 0; i
< ents
; i
++) {
117 unsigned long base
, size
;
119 base
= regs
[i
].phys_addr
;
120 size
= regs
[i
].reg_size
;
123 if (base
& ~PAGE_MASK
) {
124 unsigned long new_base
= PAGE_ALIGN(base
);
126 size
-= new_base
- base
;
127 if ((long) size
< 0L)
132 /* If it is empty, simply get rid of it.
133 * This simplifies the logic of the other
134 * functions that process these arrays.
136 memmove(®s
[i
], ®s
[i
+ 1],
137 (ents
- i
- 1) * sizeof(regs
[0]));
142 regs
[i
].phys_addr
= base
;
143 regs
[i
].reg_size
= size
;
148 sort(regs
, ents
, sizeof(struct linux_prom64_registers
),
152 unsigned long *sparc64_valid_addr_bitmap __read_mostly
;
154 /* Kernel physical address base and size in bytes. */
155 unsigned long kern_base __read_mostly
;
156 unsigned long kern_size __read_mostly
;
158 /* Initial ramdisk setup */
159 extern unsigned long sparc_ramdisk_image64
;
160 extern unsigned int sparc_ramdisk_image
;
161 extern unsigned int sparc_ramdisk_size
;
163 struct page
*mem_map_zero __read_mostly
;
164 EXPORT_SYMBOL(mem_map_zero
);
166 unsigned int sparc64_highest_unlocked_tlb_ent __read_mostly
;
168 unsigned long sparc64_kern_pri_context __read_mostly
;
169 unsigned long sparc64_kern_pri_nuc_bits __read_mostly
;
170 unsigned long sparc64_kern_sec_context __read_mostly
;
172 int num_kernel_image_mappings
;
174 #ifdef CONFIG_DEBUG_DCFLUSH
175 atomic_t dcpage_flushes
= ATOMIC_INIT(0);
177 atomic_t dcpage_flushes_xcall
= ATOMIC_INIT(0);
181 inline void flush_dcache_page_impl(struct page
*page
)
183 BUG_ON(tlb_type
== hypervisor
);
184 #ifdef CONFIG_DEBUG_DCFLUSH
185 atomic_inc(&dcpage_flushes
);
188 #ifdef DCACHE_ALIASING_POSSIBLE
189 __flush_dcache_page(page_address(page
),
190 ((tlb_type
== spitfire
) &&
191 page_mapping(page
) != NULL
));
193 if (page_mapping(page
) != NULL
&&
194 tlb_type
== spitfire
)
195 __flush_icache_page(__pa(page_address(page
)));
199 #define PG_dcache_dirty PG_arch_1
200 #define PG_dcache_cpu_shift 32UL
201 #define PG_dcache_cpu_mask \
202 ((1UL<<ilog2(roundup_pow_of_two(NR_CPUS)))-1UL)
204 #define dcache_dirty_cpu(page) \
205 (((page)->flags >> PG_dcache_cpu_shift) & PG_dcache_cpu_mask)
207 static inline void set_dcache_dirty(struct page
*page
, int this_cpu
)
209 unsigned long mask
= this_cpu
;
210 unsigned long non_cpu_bits
;
212 non_cpu_bits
= ~(PG_dcache_cpu_mask
<< PG_dcache_cpu_shift
);
213 mask
= (mask
<< PG_dcache_cpu_shift
) | (1UL << PG_dcache_dirty
);
215 __asm__
__volatile__("1:\n\t"
217 "and %%g7, %1, %%g1\n\t"
218 "or %%g1, %0, %%g1\n\t"
219 "casx [%2], %%g7, %%g1\n\t"
221 "membar #StoreLoad | #StoreStore\n\t"
222 "bne,pn %%xcc, 1b\n\t"
225 : "r" (mask
), "r" (non_cpu_bits
), "r" (&page
->flags
)
229 static inline void clear_dcache_dirty_cpu(struct page
*page
, unsigned long cpu
)
231 unsigned long mask
= (1UL << PG_dcache_dirty
);
233 __asm__
__volatile__("! test_and_clear_dcache_dirty\n"
236 "srlx %%g7, %4, %%g1\n\t"
237 "and %%g1, %3, %%g1\n\t"
239 "bne,pn %%icc, 2f\n\t"
240 " andn %%g7, %1, %%g1\n\t"
241 "casx [%2], %%g7, %%g1\n\t"
243 "membar #StoreLoad | #StoreStore\n\t"
244 "bne,pn %%xcc, 1b\n\t"
248 : "r" (cpu
), "r" (mask
), "r" (&page
->flags
),
249 "i" (PG_dcache_cpu_mask
),
250 "i" (PG_dcache_cpu_shift
)
254 static inline void tsb_insert(struct tsb
*ent
, unsigned long tag
, unsigned long pte
)
256 unsigned long tsb_addr
= (unsigned long) ent
;
258 if (tlb_type
== cheetah_plus
|| tlb_type
== hypervisor
)
259 tsb_addr
= __pa(tsb_addr
);
261 __tsb_insert(tsb_addr
, tag
, pte
);
264 unsigned long _PAGE_ALL_SZ_BITS __read_mostly
;
265 unsigned long _PAGE_SZBITS __read_mostly
;
267 void update_mmu_cache(struct vm_area_struct
*vma
, unsigned long address
, pte_t pte
)
269 struct mm_struct
*mm
;
271 unsigned long tag
, flags
;
272 unsigned long tsb_index
, tsb_hash_shift
;
274 if (tlb_type
!= hypervisor
) {
275 unsigned long pfn
= pte_pfn(pte
);
276 unsigned long pg_flags
;
279 if (pfn_valid(pfn
) &&
280 (page
= pfn_to_page(pfn
), page_mapping(page
)) &&
281 ((pg_flags
= page
->flags
) & (1UL << PG_dcache_dirty
))) {
282 int cpu
= ((pg_flags
>> PG_dcache_cpu_shift
) &
284 int this_cpu
= get_cpu();
286 /* This is just to optimize away some function calls
290 flush_dcache_page_impl(page
);
292 smp_flush_dcache_page_impl(page
, cpu
);
294 clear_dcache_dirty_cpu(page
, cpu
);
302 tsb_index
= MM_TSB_BASE
;
303 tsb_hash_shift
= PAGE_SHIFT
;
305 spin_lock_irqsave(&mm
->context
.lock
, flags
);
307 #ifdef CONFIG_HUGETLB_PAGE
308 if (mm
->context
.tsb_block
[MM_TSB_HUGE
].tsb
!= NULL
) {
309 if ((tlb_type
== hypervisor
&&
310 (pte_val(pte
) & _PAGE_SZALL_4V
) == _PAGE_SZHUGE_4V
) ||
311 (tlb_type
!= hypervisor
&&
312 (pte_val(pte
) & _PAGE_SZALL_4U
) == _PAGE_SZHUGE_4U
)) {
313 tsb_index
= MM_TSB_HUGE
;
314 tsb_hash_shift
= HPAGE_SHIFT
;
319 tsb
= mm
->context
.tsb_block
[tsb_index
].tsb
;
320 tsb
+= ((address
>> tsb_hash_shift
) &
321 (mm
->context
.tsb_block
[tsb_index
].tsb_nentries
- 1UL));
322 tag
= (address
>> 22UL);
323 tsb_insert(tsb
, tag
, pte_val(pte
));
325 spin_unlock_irqrestore(&mm
->context
.lock
, flags
);
328 void flush_dcache_page(struct page
*page
)
330 struct address_space
*mapping
;
333 if (tlb_type
== hypervisor
)
336 /* Do not bother with the expensive D-cache flush if it
337 * is merely the zero page. The 'bigcore' testcase in GDB
338 * causes this case to run millions of times.
340 if (page
== ZERO_PAGE(0))
343 this_cpu
= get_cpu();
345 mapping
= page_mapping(page
);
346 if (mapping
&& !mapping_mapped(mapping
)) {
347 int dirty
= test_bit(PG_dcache_dirty
, &page
->flags
);
349 int dirty_cpu
= dcache_dirty_cpu(page
);
351 if (dirty_cpu
== this_cpu
)
353 smp_flush_dcache_page_impl(page
, dirty_cpu
);
355 set_dcache_dirty(page
, this_cpu
);
357 /* We could delay the flush for the !page_mapping
358 * case too. But that case is for exec env/arg
359 * pages and those are %99 certainly going to get
360 * faulted into the tlb (and thus flushed) anyways.
362 flush_dcache_page_impl(page
);
369 void __kprobes
flush_icache_range(unsigned long start
, unsigned long end
)
371 /* Cheetah and Hypervisor platform cpus have coherent I-cache. */
372 if (tlb_type
== spitfire
) {
375 /* This code only runs on Spitfire cpus so this is
376 * why we can assume _PAGE_PADDR_4U.
378 for (kaddr
= start
; kaddr
< end
; kaddr
+= PAGE_SIZE
) {
379 unsigned long paddr
, mask
= _PAGE_PADDR_4U
;
381 if (kaddr
>= PAGE_OFFSET
)
382 paddr
= kaddr
& mask
;
384 pgd_t
*pgdp
= pgd_offset_k(kaddr
);
385 pud_t
*pudp
= pud_offset(pgdp
, kaddr
);
386 pmd_t
*pmdp
= pmd_offset(pudp
, kaddr
);
387 pte_t
*ptep
= pte_offset_kernel(pmdp
, kaddr
);
389 paddr
= pte_val(*ptep
) & mask
;
391 __flush_icache_page(paddr
);
396 void mmu_info(struct seq_file
*m
)
398 if (tlb_type
== cheetah
)
399 seq_printf(m
, "MMU Type\t: Cheetah\n");
400 else if (tlb_type
== cheetah_plus
)
401 seq_printf(m
, "MMU Type\t: Cheetah+\n");
402 else if (tlb_type
== spitfire
)
403 seq_printf(m
, "MMU Type\t: Spitfire\n");
404 else if (tlb_type
== hypervisor
)
405 seq_printf(m
, "MMU Type\t: Hypervisor (sun4v)\n");
407 seq_printf(m
, "MMU Type\t: ???\n");
409 #ifdef CONFIG_DEBUG_DCFLUSH
410 seq_printf(m
, "DCPageFlushes\t: %d\n",
411 atomic_read(&dcpage_flushes
));
413 seq_printf(m
, "DCPageFlushesXC\t: %d\n",
414 atomic_read(&dcpage_flushes_xcall
));
415 #endif /* CONFIG_SMP */
416 #endif /* CONFIG_DEBUG_DCFLUSH */
419 struct linux_prom_translation
{
425 /* Exported for kernel TLB miss handling in ktlb.S */
426 struct linux_prom_translation prom_trans
[512] __read_mostly
;
427 unsigned int prom_trans_ents __read_mostly
;
429 /* Exported for SMP bootup purposes. */
430 unsigned long kern_locked_tte_data
;
432 /* The obp translations are saved based on 8k pagesize, since obp can
433 * use a mixture of pagesizes. Misses to the LOW_OBP_ADDRESS ->
434 * HI_OBP_ADDRESS range are handled in ktlb.S.
436 static inline int in_obp_range(unsigned long vaddr
)
438 return (vaddr
>= LOW_OBP_ADDRESS
&&
439 vaddr
< HI_OBP_ADDRESS
);
442 static int cmp_ptrans(const void *a
, const void *b
)
444 const struct linux_prom_translation
*x
= a
, *y
= b
;
446 if (x
->virt
> y
->virt
)
448 if (x
->virt
< y
->virt
)
453 /* Read OBP translations property into 'prom_trans[]'. */
454 static void __init
read_obp_translations(void)
456 int n
, node
, ents
, first
, last
, i
;
458 node
= prom_finddevice("/virtual-memory");
459 n
= prom_getproplen(node
, "translations");
460 if (unlikely(n
== 0 || n
== -1)) {
461 prom_printf("prom_mappings: Couldn't get size.\n");
464 if (unlikely(n
> sizeof(prom_trans
))) {
465 prom_printf("prom_mappings: Size %Zd is too big.\n", n
);
469 if ((n
= prom_getproperty(node
, "translations",
470 (char *)&prom_trans
[0],
471 sizeof(prom_trans
))) == -1) {
472 prom_printf("prom_mappings: Couldn't get property.\n");
476 n
= n
/ sizeof(struct linux_prom_translation
);
480 sort(prom_trans
, ents
, sizeof(struct linux_prom_translation
),
483 /* Now kick out all the non-OBP entries. */
484 for (i
= 0; i
< ents
; i
++) {
485 if (in_obp_range(prom_trans
[i
].virt
))
489 for (; i
< ents
; i
++) {
490 if (!in_obp_range(prom_trans
[i
].virt
))
495 for (i
= 0; i
< (last
- first
); i
++) {
496 struct linux_prom_translation
*src
= &prom_trans
[i
+ first
];
497 struct linux_prom_translation
*dest
= &prom_trans
[i
];
501 for (; i
< ents
; i
++) {
502 struct linux_prom_translation
*dest
= &prom_trans
[i
];
503 dest
->virt
= dest
->size
= dest
->data
= 0x0UL
;
506 prom_trans_ents
= last
- first
;
508 if (tlb_type
== spitfire
) {
509 /* Clear diag TTE bits. */
510 for (i
= 0; i
< prom_trans_ents
; i
++)
511 prom_trans
[i
].data
&= ~0x0003fe0000000000UL
;
515 static void __init
hypervisor_tlb_lock(unsigned long vaddr
,
519 unsigned long ret
= sun4v_mmu_map_perm_addr(vaddr
, 0, pte
, mmu
);
522 prom_printf("hypervisor_tlb_lock[%lx:%lx:%lx:%lx]: "
523 "errors with %lx\n", vaddr
, 0, pte
, mmu
, ret
);
528 static unsigned long kern_large_tte(unsigned long paddr
);
530 static void __init
remap_kernel(void)
532 unsigned long phys_page
, tte_vaddr
, tte_data
;
533 int i
, tlb_ent
= sparc64_highest_locked_tlbent();
535 tte_vaddr
= (unsigned long) KERNBASE
;
536 phys_page
= (prom_boot_mapping_phys_low
>> 22UL) << 22UL;
537 tte_data
= kern_large_tte(phys_page
);
539 kern_locked_tte_data
= tte_data
;
541 /* Now lock us into the TLBs via Hypervisor or OBP. */
542 if (tlb_type
== hypervisor
) {
543 for (i
= 0; i
< num_kernel_image_mappings
; i
++) {
544 hypervisor_tlb_lock(tte_vaddr
, tte_data
, HV_MMU_DMMU
);
545 hypervisor_tlb_lock(tte_vaddr
, tte_data
, HV_MMU_IMMU
);
546 tte_vaddr
+= 0x400000;
547 tte_data
+= 0x400000;
550 for (i
= 0; i
< num_kernel_image_mappings
; i
++) {
551 prom_dtlb_load(tlb_ent
- i
, tte_data
, tte_vaddr
);
552 prom_itlb_load(tlb_ent
- i
, tte_data
, tte_vaddr
);
553 tte_vaddr
+= 0x400000;
554 tte_data
+= 0x400000;
556 sparc64_highest_unlocked_tlb_ent
= tlb_ent
- i
;
558 if (tlb_type
== cheetah_plus
) {
559 sparc64_kern_pri_context
= (CTX_CHEETAH_PLUS_CTX0
|
560 CTX_CHEETAH_PLUS_NUC
);
561 sparc64_kern_pri_nuc_bits
= CTX_CHEETAH_PLUS_NUC
;
562 sparc64_kern_sec_context
= CTX_CHEETAH_PLUS_CTX0
;
567 static void __init
inherit_prom_mappings(void)
569 /* Now fixup OBP's idea about where we really are mapped. */
570 printk("Remapping the kernel... ");
575 void prom_world(int enter
)
578 set_fs((mm_segment_t
) { get_thread_current_ds() });
580 __asm__
__volatile__("flushw");
583 void __flush_dcache_range(unsigned long start
, unsigned long end
)
587 if (tlb_type
== spitfire
) {
590 for (va
= start
; va
< end
; va
+= 32) {
591 spitfire_put_dcache_tag(va
& 0x3fe0, 0x0);
595 } else if (tlb_type
== cheetah
|| tlb_type
== cheetah_plus
) {
598 for (va
= start
; va
< end
; va
+= 32)
599 __asm__
__volatile__("stxa %%g0, [%0] %1\n\t"
603 "i" (ASI_DCACHE_INVALIDATE
));
607 /* get_new_mmu_context() uses "cache + 1". */
608 DEFINE_SPINLOCK(ctx_alloc_lock
);
609 unsigned long tlb_context_cache
= CTX_FIRST_VERSION
- 1;
610 #define MAX_CTX_NR (1UL << CTX_NR_BITS)
611 #define CTX_BMAP_SLOTS BITS_TO_LONGS(MAX_CTX_NR)
612 DECLARE_BITMAP(mmu_context_bmap
, MAX_CTX_NR
);
614 /* Caller does TLB context flushing on local CPU if necessary.
615 * The caller also ensures that CTX_VALID(mm->context) is false.
617 * We must be careful about boundary cases so that we never
618 * let the user have CTX 0 (nucleus) or we ever use a CTX
619 * version of zero (and thus NO_CONTEXT would not be caught
620 * by version mis-match tests in mmu_context.h).
622 * Always invoked with interrupts disabled.
624 void get_new_mmu_context(struct mm_struct
*mm
)
626 unsigned long ctx
, new_ctx
;
627 unsigned long orig_pgsz_bits
;
631 spin_lock_irqsave(&ctx_alloc_lock
, flags
);
632 orig_pgsz_bits
= (mm
->context
.sparc64_ctx_val
& CTX_PGSZ_MASK
);
633 ctx
= (tlb_context_cache
+ 1) & CTX_NR_MASK
;
634 new_ctx
= find_next_zero_bit(mmu_context_bmap
, 1 << CTX_NR_BITS
, ctx
);
636 if (new_ctx
>= (1 << CTX_NR_BITS
)) {
637 new_ctx
= find_next_zero_bit(mmu_context_bmap
, ctx
, 1);
638 if (new_ctx
>= ctx
) {
640 new_ctx
= (tlb_context_cache
& CTX_VERSION_MASK
) +
643 new_ctx
= CTX_FIRST_VERSION
;
645 /* Don't call memset, for 16 entries that's just
648 mmu_context_bmap
[0] = 3;
649 mmu_context_bmap
[1] = 0;
650 mmu_context_bmap
[2] = 0;
651 mmu_context_bmap
[3] = 0;
652 for (i
= 4; i
< CTX_BMAP_SLOTS
; i
+= 4) {
653 mmu_context_bmap
[i
+ 0] = 0;
654 mmu_context_bmap
[i
+ 1] = 0;
655 mmu_context_bmap
[i
+ 2] = 0;
656 mmu_context_bmap
[i
+ 3] = 0;
662 mmu_context_bmap
[new_ctx
>>6] |= (1UL << (new_ctx
& 63));
663 new_ctx
|= (tlb_context_cache
& CTX_VERSION_MASK
);
665 tlb_context_cache
= new_ctx
;
666 mm
->context
.sparc64_ctx_val
= new_ctx
| orig_pgsz_bits
;
667 spin_unlock_irqrestore(&ctx_alloc_lock
, flags
);
669 if (unlikely(new_version
))
670 smp_new_mmu_context_version();
673 static int numa_enabled
= 1;
674 static int numa_debug
;
676 static int __init
early_numa(char *p
)
681 if (strstr(p
, "off"))
684 if (strstr(p
, "debug"))
689 early_param("numa", early_numa
);
691 #define numadbg(f, a...) \
692 do { if (numa_debug) \
693 printk(KERN_INFO f, ## a); \
696 static void __init
find_ramdisk(unsigned long phys_base
)
698 #ifdef CONFIG_BLK_DEV_INITRD
699 if (sparc_ramdisk_image
|| sparc_ramdisk_image64
) {
700 unsigned long ramdisk_image
;
702 /* Older versions of the bootloader only supported a
703 * 32-bit physical address for the ramdisk image
704 * location, stored at sparc_ramdisk_image. Newer
705 * SILO versions set sparc_ramdisk_image to zero and
706 * provide a full 64-bit physical address at
707 * sparc_ramdisk_image64.
709 ramdisk_image
= sparc_ramdisk_image
;
711 ramdisk_image
= sparc_ramdisk_image64
;
713 /* Another bootloader quirk. The bootloader normalizes
714 * the physical address to KERNBASE, so we have to
715 * factor that back out and add in the lowest valid
716 * physical page address to get the true physical address.
718 ramdisk_image
-= KERNBASE
;
719 ramdisk_image
+= phys_base
;
721 numadbg("Found ramdisk at physical address 0x%lx, size %u\n",
722 ramdisk_image
, sparc_ramdisk_size
);
724 initrd_start
= ramdisk_image
;
725 initrd_end
= ramdisk_image
+ sparc_ramdisk_size
;
727 lmb_reserve(initrd_start
, sparc_ramdisk_size
);
729 initrd_start
+= PAGE_OFFSET
;
730 initrd_end
+= PAGE_OFFSET
;
735 struct node_mem_mask
{
738 unsigned long bootmem_paddr
;
740 static struct node_mem_mask node_masks
[MAX_NUMNODES
];
741 static int num_node_masks
;
743 int numa_cpu_lookup_table
[NR_CPUS
];
744 cpumask_t numa_cpumask_lookup_table
[MAX_NUMNODES
];
746 #ifdef CONFIG_NEED_MULTIPLE_NODES
748 struct mdesc_mblock
{
751 u64 offset
; /* RA-to-PA */
753 static struct mdesc_mblock
*mblocks
;
754 static int num_mblocks
;
756 static unsigned long ra_to_pa(unsigned long addr
)
760 for (i
= 0; i
< num_mblocks
; i
++) {
761 struct mdesc_mblock
*m
= &mblocks
[i
];
763 if (addr
>= m
->base
&&
764 addr
< (m
->base
+ m
->size
)) {
772 static int find_node(unsigned long addr
)
776 addr
= ra_to_pa(addr
);
777 for (i
= 0; i
< num_node_masks
; i
++) {
778 struct node_mem_mask
*p
= &node_masks
[i
];
780 if ((addr
& p
->mask
) == p
->val
)
786 static unsigned long nid_range(unsigned long start
, unsigned long end
,
789 *nid
= find_node(start
);
791 while (start
< end
) {
792 int n
= find_node(start
);
802 static unsigned long nid_range(unsigned long start
, unsigned long end
,
810 /* This must be invoked after performing all of the necessary
811 * add_active_range() calls for 'nid'. We need to be able to get
812 * correct data from get_pfn_range_for_nid().
814 static void __init
allocate_node_data(int nid
)
816 unsigned long paddr
, num_pages
, start_pfn
, end_pfn
;
817 struct pglist_data
*p
;
819 #ifdef CONFIG_NEED_MULTIPLE_NODES
820 paddr
= lmb_alloc_nid(sizeof(struct pglist_data
),
821 SMP_CACHE_BYTES
, nid
, nid_range
);
823 prom_printf("Cannot allocate pglist_data for nid[%d]\n", nid
);
826 NODE_DATA(nid
) = __va(paddr
);
827 memset(NODE_DATA(nid
), 0, sizeof(struct pglist_data
));
829 NODE_DATA(nid
)->bdata
= &bootmem_node_data
[nid
];
834 get_pfn_range_for_nid(nid
, &start_pfn
, &end_pfn
);
835 p
->node_start_pfn
= start_pfn
;
836 p
->node_spanned_pages
= end_pfn
- start_pfn
;
838 if (p
->node_spanned_pages
) {
839 num_pages
= bootmem_bootmap_pages(p
->node_spanned_pages
);
841 paddr
= lmb_alloc_nid(num_pages
<< PAGE_SHIFT
, PAGE_SIZE
, nid
,
844 prom_printf("Cannot allocate bootmap for nid[%d]\n",
848 node_masks
[nid
].bootmem_paddr
= paddr
;
852 static void init_node_masks_nonnuma(void)
856 numadbg("Initializing tables for non-numa.\n");
858 node_masks
[0].mask
= node_masks
[0].val
= 0;
861 for (i
= 0; i
< NR_CPUS
; i
++)
862 numa_cpu_lookup_table
[i
] = 0;
864 numa_cpumask_lookup_table
[0] = CPU_MASK_ALL
;
867 #ifdef CONFIG_NEED_MULTIPLE_NODES
868 struct pglist_data
*node_data
[MAX_NUMNODES
];
870 EXPORT_SYMBOL(numa_cpu_lookup_table
);
871 EXPORT_SYMBOL(numa_cpumask_lookup_table
);
872 EXPORT_SYMBOL(node_data
);
874 struct mdesc_mlgroup
{
880 static struct mdesc_mlgroup
*mlgroups
;
881 static int num_mlgroups
;
883 static int scan_pio_for_cfg_handle(struct mdesc_handle
*md
, u64 pio
,
888 mdesc_for_each_arc(arc
, md
, pio
, MDESC_ARC_TYPE_FWD
) {
889 u64 target
= mdesc_arc_target(md
, arc
);
892 val
= mdesc_get_property(md
, target
,
894 if (val
&& *val
== cfg_handle
)
900 static int scan_arcs_for_cfg_handle(struct mdesc_handle
*md
, u64 grp
,
903 u64 arc
, candidate
, best_latency
= ~(u64
)0;
905 candidate
= MDESC_NODE_NULL
;
906 mdesc_for_each_arc(arc
, md
, grp
, MDESC_ARC_TYPE_FWD
) {
907 u64 target
= mdesc_arc_target(md
, arc
);
908 const char *name
= mdesc_node_name(md
, target
);
911 if (strcmp(name
, "pio-latency-group"))
914 val
= mdesc_get_property(md
, target
, "latency", NULL
);
918 if (*val
< best_latency
) {
924 if (candidate
== MDESC_NODE_NULL
)
927 return scan_pio_for_cfg_handle(md
, candidate
, cfg_handle
);
930 int of_node_to_nid(struct device_node
*dp
)
932 const struct linux_prom64_registers
*regs
;
933 struct mdesc_handle
*md
;
941 regs
= of_get_property(dp
, "reg", NULL
);
945 cfg_handle
= (regs
->phys_addr
>> 32UL) & 0x0fffffff;
951 mdesc_for_each_node_by_name(md
, grp
, "group") {
952 if (!scan_arcs_for_cfg_handle(md
, grp
, cfg_handle
)) {
964 static void add_node_ranges(void)
968 for (i
= 0; i
< lmb
.memory
.cnt
; i
++) {
969 unsigned long size
= lmb_size_bytes(&lmb
.memory
, i
);
970 unsigned long start
, end
;
972 start
= lmb
.memory
.region
[i
].base
;
974 while (start
< end
) {
975 unsigned long this_end
;
978 this_end
= nid_range(start
, end
, &nid
);
980 numadbg("Adding active range nid[%d] "
981 "start[%lx] end[%lx]\n",
982 nid
, start
, this_end
);
984 add_active_range(nid
,
986 this_end
>> PAGE_SHIFT
);
993 static int __init
grab_mlgroups(struct mdesc_handle
*md
)
999 mdesc_for_each_node_by_name(md
, node
, "memory-latency-group")
1004 paddr
= lmb_alloc(count
* sizeof(struct mdesc_mlgroup
),
1009 mlgroups
= __va(paddr
);
1010 num_mlgroups
= count
;
1013 mdesc_for_each_node_by_name(md
, node
, "memory-latency-group") {
1014 struct mdesc_mlgroup
*m
= &mlgroups
[count
++];
1019 val
= mdesc_get_property(md
, node
, "latency", NULL
);
1021 val
= mdesc_get_property(md
, node
, "address-match", NULL
);
1023 val
= mdesc_get_property(md
, node
, "address-mask", NULL
);
1026 numadbg("MLGROUP[%d]: node[%lx] latency[%lx] "
1027 "match[%lx] mask[%lx]\n",
1028 count
- 1, m
->node
, m
->latency
, m
->match
, m
->mask
);
1034 static int __init
grab_mblocks(struct mdesc_handle
*md
)
1036 unsigned long paddr
;
1040 mdesc_for_each_node_by_name(md
, node
, "mblock")
1045 paddr
= lmb_alloc(count
* sizeof(struct mdesc_mblock
),
1050 mblocks
= __va(paddr
);
1051 num_mblocks
= count
;
1054 mdesc_for_each_node_by_name(md
, node
, "mblock") {
1055 struct mdesc_mblock
*m
= &mblocks
[count
++];
1058 val
= mdesc_get_property(md
, node
, "base", NULL
);
1060 val
= mdesc_get_property(md
, node
, "size", NULL
);
1062 val
= mdesc_get_property(md
, node
,
1063 "address-congruence-offset", NULL
);
1066 numadbg("MBLOCK[%d]: base[%lx] size[%lx] offset[%lx]\n",
1067 count
- 1, m
->base
, m
->size
, m
->offset
);
1073 static void __init
numa_parse_mdesc_group_cpus(struct mdesc_handle
*md
,
1074 u64 grp
, cpumask_t
*mask
)
1080 mdesc_for_each_arc(arc
, md
, grp
, MDESC_ARC_TYPE_BACK
) {
1081 u64 target
= mdesc_arc_target(md
, arc
);
1082 const char *name
= mdesc_node_name(md
, target
);
1085 if (strcmp(name
, "cpu"))
1087 id
= mdesc_get_property(md
, target
, "id", NULL
);
1089 cpu_set(*id
, *mask
);
1093 static struct mdesc_mlgroup
* __init
find_mlgroup(u64 node
)
1097 for (i
= 0; i
< num_mlgroups
; i
++) {
1098 struct mdesc_mlgroup
*m
= &mlgroups
[i
];
1099 if (m
->node
== node
)
1105 static int __init
numa_attach_mlgroup(struct mdesc_handle
*md
, u64 grp
,
1108 struct mdesc_mlgroup
*candidate
= NULL
;
1109 u64 arc
, best_latency
= ~(u64
)0;
1110 struct node_mem_mask
*n
;
1112 mdesc_for_each_arc(arc
, md
, grp
, MDESC_ARC_TYPE_FWD
) {
1113 u64 target
= mdesc_arc_target(md
, arc
);
1114 struct mdesc_mlgroup
*m
= find_mlgroup(target
);
1117 if (m
->latency
< best_latency
) {
1119 best_latency
= m
->latency
;
1125 if (num_node_masks
!= index
) {
1126 printk(KERN_ERR
"Inconsistent NUMA state, "
1127 "index[%d] != num_node_masks[%d]\n",
1128 index
, num_node_masks
);
1132 n
= &node_masks
[num_node_masks
++];
1134 n
->mask
= candidate
->mask
;
1135 n
->val
= candidate
->match
;
1137 numadbg("NUMA NODE[%d]: mask[%lx] val[%lx] (latency[%lx])\n",
1138 index
, n
->mask
, n
->val
, candidate
->latency
);
1143 static int __init
numa_parse_mdesc_group(struct mdesc_handle
*md
, u64 grp
,
1149 numa_parse_mdesc_group_cpus(md
, grp
, &mask
);
1151 for_each_cpu_mask(cpu
, mask
)
1152 numa_cpu_lookup_table
[cpu
] = index
;
1153 numa_cpumask_lookup_table
[index
] = mask
;
1156 printk(KERN_INFO
"NUMA GROUP[%d]: cpus [ ", index
);
1157 for_each_cpu_mask(cpu
, mask
)
1162 return numa_attach_mlgroup(md
, grp
, index
);
1165 static int __init
numa_parse_mdesc(void)
1167 struct mdesc_handle
*md
= mdesc_grab();
1171 node
= mdesc_node_by_name(md
, MDESC_NODE_NULL
, "latency-groups");
1172 if (node
== MDESC_NODE_NULL
) {
1177 err
= grab_mblocks(md
);
1181 err
= grab_mlgroups(md
);
1186 mdesc_for_each_node_by_name(md
, node
, "group") {
1187 err
= numa_parse_mdesc_group(md
, node
, count
);
1195 for (i
= 0; i
< num_node_masks
; i
++) {
1196 allocate_node_data(i
);
1206 static int __init
numa_parse_sun4u(void)
1211 static int __init
bootmem_init_numa(void)
1215 numadbg("bootmem_init_numa()\n");
1218 if (tlb_type
== hypervisor
)
1219 err
= numa_parse_mdesc();
1221 err
= numa_parse_sun4u();
1228 static int bootmem_init_numa(void)
1235 static void __init
bootmem_init_nonnuma(void)
1237 unsigned long top_of_ram
= lmb_end_of_DRAM();
1238 unsigned long total_ram
= lmb_phys_mem_size();
1241 numadbg("bootmem_init_nonnuma()\n");
1243 printk(KERN_INFO
"Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
1244 top_of_ram
, total_ram
);
1245 printk(KERN_INFO
"Memory hole size: %ldMB\n",
1246 (top_of_ram
- total_ram
) >> 20);
1248 init_node_masks_nonnuma();
1250 for (i
= 0; i
< lmb
.memory
.cnt
; i
++) {
1251 unsigned long size
= lmb_size_bytes(&lmb
.memory
, i
);
1252 unsigned long start_pfn
, end_pfn
;
1257 start_pfn
= lmb
.memory
.region
[i
].base
>> PAGE_SHIFT
;
1258 end_pfn
= start_pfn
+ lmb_size_pages(&lmb
.memory
, i
);
1259 add_active_range(0, start_pfn
, end_pfn
);
1262 allocate_node_data(0);
1267 static void __init
reserve_range_in_node(int nid
, unsigned long start
,
1270 numadbg(" reserve_range_in_node(nid[%d],start[%lx],end[%lx]\n",
1272 while (start
< end
) {
1273 unsigned long this_end
;
1276 this_end
= nid_range(start
, end
, &n
);
1278 numadbg(" MATCH reserving range [%lx:%lx]\n",
1280 reserve_bootmem_node(NODE_DATA(nid
), start
,
1281 (this_end
- start
), BOOTMEM_DEFAULT
);
1283 numadbg(" NO MATCH, advancing start to %lx\n",
1290 static void __init
trim_reserved_in_node(int nid
)
1294 numadbg(" trim_reserved_in_node(%d)\n", nid
);
1296 for (i
= 0; i
< lmb
.reserved
.cnt
; i
++) {
1297 unsigned long start
= lmb
.reserved
.region
[i
].base
;
1298 unsigned long size
= lmb_size_bytes(&lmb
.reserved
, i
);
1299 unsigned long end
= start
+ size
;
1301 reserve_range_in_node(nid
, start
, end
);
1305 static void __init
bootmem_init_one_node(int nid
)
1307 struct pglist_data
*p
;
1309 numadbg("bootmem_init_one_node(%d)\n", nid
);
1313 if (p
->node_spanned_pages
) {
1314 unsigned long paddr
= node_masks
[nid
].bootmem_paddr
;
1315 unsigned long end_pfn
;
1317 end_pfn
= p
->node_start_pfn
+ p
->node_spanned_pages
;
1319 numadbg(" init_bootmem_node(%d, %lx, %lx, %lx)\n",
1320 nid
, paddr
>> PAGE_SHIFT
, p
->node_start_pfn
, end_pfn
);
1322 init_bootmem_node(p
, paddr
>> PAGE_SHIFT
,
1323 p
->node_start_pfn
, end_pfn
);
1325 numadbg(" free_bootmem_with_active_regions(%d, %lx)\n",
1327 free_bootmem_with_active_regions(nid
, end_pfn
);
1329 trim_reserved_in_node(nid
);
1331 numadbg(" sparse_memory_present_with_active_regions(%d)\n",
1333 sparse_memory_present_with_active_regions(nid
);
1337 static unsigned long __init
bootmem_init(unsigned long phys_base
)
1339 unsigned long end_pfn
;
1342 end_pfn
= lmb_end_of_DRAM() >> PAGE_SHIFT
;
1343 max_pfn
= max_low_pfn
= end_pfn
;
1344 min_low_pfn
= (phys_base
>> PAGE_SHIFT
);
1346 if (bootmem_init_numa() < 0)
1347 bootmem_init_nonnuma();
1349 /* XXX cpu notifier XXX */
1351 for_each_online_node(nid
)
1352 bootmem_init_one_node(nid
);
1359 static struct linux_prom64_registers pall
[MAX_BANKS
] __initdata
;
1360 static int pall_ents __initdata
;
1362 #ifdef CONFIG_DEBUG_PAGEALLOC
1363 static unsigned long __ref
kernel_map_range(unsigned long pstart
,
1364 unsigned long pend
, pgprot_t prot
)
1366 unsigned long vstart
= PAGE_OFFSET
+ pstart
;
1367 unsigned long vend
= PAGE_OFFSET
+ pend
;
1368 unsigned long alloc_bytes
= 0UL;
1370 if ((vstart
& ~PAGE_MASK
) || (vend
& ~PAGE_MASK
)) {
1371 prom_printf("kernel_map: Unaligned physmem[%lx:%lx]\n",
1376 while (vstart
< vend
) {
1377 unsigned long this_end
, paddr
= __pa(vstart
);
1378 pgd_t
*pgd
= pgd_offset_k(vstart
);
1383 pud
= pud_offset(pgd
, vstart
);
1384 if (pud_none(*pud
)) {
1387 new = __alloc_bootmem(PAGE_SIZE
, PAGE_SIZE
, PAGE_SIZE
);
1388 alloc_bytes
+= PAGE_SIZE
;
1389 pud_populate(&init_mm
, pud
, new);
1392 pmd
= pmd_offset(pud
, vstart
);
1393 if (!pmd_present(*pmd
)) {
1396 new = __alloc_bootmem(PAGE_SIZE
, PAGE_SIZE
, PAGE_SIZE
);
1397 alloc_bytes
+= PAGE_SIZE
;
1398 pmd_populate_kernel(&init_mm
, pmd
, new);
1401 pte
= pte_offset_kernel(pmd
, vstart
);
1402 this_end
= (vstart
+ PMD_SIZE
) & PMD_MASK
;
1403 if (this_end
> vend
)
1406 while (vstart
< this_end
) {
1407 pte_val(*pte
) = (paddr
| pgprot_val(prot
));
1409 vstart
+= PAGE_SIZE
;
1418 extern unsigned int kvmap_linear_patch
[1];
1419 #endif /* CONFIG_DEBUG_PAGEALLOC */
1421 static void __init
mark_kpte_bitmap(unsigned long start
, unsigned long end
)
1423 const unsigned long shift_256MB
= 28;
1424 const unsigned long mask_256MB
= ((1UL << shift_256MB
) - 1UL);
1425 const unsigned long size_256MB
= (1UL << shift_256MB
);
1427 while (start
< end
) {
1430 remains
= end
- start
;
1431 if (remains
< size_256MB
)
1434 if (start
& mask_256MB
) {
1435 start
= (start
+ size_256MB
) & ~mask_256MB
;
1439 while (remains
>= size_256MB
) {
1440 unsigned long index
= start
>> shift_256MB
;
1442 __set_bit(index
, kpte_linear_bitmap
);
1444 start
+= size_256MB
;
1445 remains
-= size_256MB
;
1450 static void __init
init_kpte_bitmap(void)
1454 for (i
= 0; i
< pall_ents
; i
++) {
1455 unsigned long phys_start
, phys_end
;
1457 phys_start
= pall
[i
].phys_addr
;
1458 phys_end
= phys_start
+ pall
[i
].reg_size
;
1460 mark_kpte_bitmap(phys_start
, phys_end
);
1464 static void __init
kernel_physical_mapping_init(void)
1466 #ifdef CONFIG_DEBUG_PAGEALLOC
1467 unsigned long i
, mem_alloced
= 0UL;
1469 for (i
= 0; i
< pall_ents
; i
++) {
1470 unsigned long phys_start
, phys_end
;
1472 phys_start
= pall
[i
].phys_addr
;
1473 phys_end
= phys_start
+ pall
[i
].reg_size
;
1475 mem_alloced
+= kernel_map_range(phys_start
, phys_end
,
1479 printk("Allocated %ld bytes for kernel page tables.\n",
1482 kvmap_linear_patch
[0] = 0x01000000; /* nop */
1483 flushi(&kvmap_linear_patch
[0]);
1489 #ifdef CONFIG_DEBUG_PAGEALLOC
1490 void kernel_map_pages(struct page
*page
, int numpages
, int enable
)
1492 unsigned long phys_start
= page_to_pfn(page
) << PAGE_SHIFT
;
1493 unsigned long phys_end
= phys_start
+ (numpages
* PAGE_SIZE
);
1495 kernel_map_range(phys_start
, phys_end
,
1496 (enable
? PAGE_KERNEL
: __pgprot(0)));
1498 flush_tsb_kernel_range(PAGE_OFFSET
+ phys_start
,
1499 PAGE_OFFSET
+ phys_end
);
1501 /* we should perform an IPI and flush all tlbs,
1502 * but that can deadlock->flush only current cpu.
1504 __flush_tlb_kernel_range(PAGE_OFFSET
+ phys_start
,
1505 PAGE_OFFSET
+ phys_end
);
1509 unsigned long __init
find_ecache_flush_span(unsigned long size
)
1513 for (i
= 0; i
< pavail_ents
; i
++) {
1514 if (pavail
[i
].reg_size
>= size
)
1515 return pavail
[i
].phys_addr
;
1521 static void __init
tsb_phys_patch(void)
1523 struct tsb_ldquad_phys_patch_entry
*pquad
;
1524 struct tsb_phys_patch_entry
*p
;
1526 pquad
= &__tsb_ldquad_phys_patch
;
1527 while (pquad
< &__tsb_ldquad_phys_patch_end
) {
1528 unsigned long addr
= pquad
->addr
;
1530 if (tlb_type
== hypervisor
)
1531 *(unsigned int *) addr
= pquad
->sun4v_insn
;
1533 *(unsigned int *) addr
= pquad
->sun4u_insn
;
1535 __asm__
__volatile__("flush %0"
1542 p
= &__tsb_phys_patch
;
1543 while (p
< &__tsb_phys_patch_end
) {
1544 unsigned long addr
= p
->addr
;
1546 *(unsigned int *) addr
= p
->insn
;
1548 __asm__
__volatile__("flush %0"
1556 /* Don't mark as init, we give this to the Hypervisor. */
1557 #ifndef CONFIG_DEBUG_PAGEALLOC
1558 #define NUM_KTSB_DESCR 2
1560 #define NUM_KTSB_DESCR 1
1562 static struct hv_tsb_descr ktsb_descr
[NUM_KTSB_DESCR
];
1563 extern struct tsb swapper_tsb
[KERNEL_TSB_NENTRIES
];
1565 static void __init
sun4v_ktsb_init(void)
1567 unsigned long ktsb_pa
;
1569 /* First KTSB for PAGE_SIZE mappings. */
1570 ktsb_pa
= kern_base
+ ((unsigned long)&swapper_tsb
[0] - KERNBASE
);
1572 switch (PAGE_SIZE
) {
1575 ktsb_descr
[0].pgsz_idx
= HV_PGSZ_IDX_8K
;
1576 ktsb_descr
[0].pgsz_mask
= HV_PGSZ_MASK_8K
;
1580 ktsb_descr
[0].pgsz_idx
= HV_PGSZ_IDX_64K
;
1581 ktsb_descr
[0].pgsz_mask
= HV_PGSZ_MASK_64K
;
1585 ktsb_descr
[0].pgsz_idx
= HV_PGSZ_IDX_512K
;
1586 ktsb_descr
[0].pgsz_mask
= HV_PGSZ_MASK_512K
;
1589 case 4 * 1024 * 1024:
1590 ktsb_descr
[0].pgsz_idx
= HV_PGSZ_IDX_4MB
;
1591 ktsb_descr
[0].pgsz_mask
= HV_PGSZ_MASK_4MB
;
1595 ktsb_descr
[0].assoc
= 1;
1596 ktsb_descr
[0].num_ttes
= KERNEL_TSB_NENTRIES
;
1597 ktsb_descr
[0].ctx_idx
= 0;
1598 ktsb_descr
[0].tsb_base
= ktsb_pa
;
1599 ktsb_descr
[0].resv
= 0;
1601 #ifndef CONFIG_DEBUG_PAGEALLOC
1602 /* Second KTSB for 4MB/256MB mappings. */
1603 ktsb_pa
= (kern_base
+
1604 ((unsigned long)&swapper_4m_tsb
[0] - KERNBASE
));
1606 ktsb_descr
[1].pgsz_idx
= HV_PGSZ_IDX_4MB
;
1607 ktsb_descr
[1].pgsz_mask
= (HV_PGSZ_MASK_4MB
|
1608 HV_PGSZ_MASK_256MB
);
1609 ktsb_descr
[1].assoc
= 1;
1610 ktsb_descr
[1].num_ttes
= KERNEL_TSB4M_NENTRIES
;
1611 ktsb_descr
[1].ctx_idx
= 0;
1612 ktsb_descr
[1].tsb_base
= ktsb_pa
;
1613 ktsb_descr
[1].resv
= 0;
1617 void __cpuinit
sun4v_ktsb_register(void)
1619 unsigned long pa
, ret
;
1621 pa
= kern_base
+ ((unsigned long)&ktsb_descr
[0] - KERNBASE
);
1623 ret
= sun4v_mmu_tsb_ctx0(NUM_KTSB_DESCR
, pa
);
1625 prom_printf("hypervisor_mmu_tsb_ctx0[%lx]: "
1626 "errors with %lx\n", pa
, ret
);
1631 /* paging_init() sets up the page tables */
1633 extern void central_probe(void);
1635 static unsigned long last_valid_pfn
;
1636 pgd_t swapper_pg_dir
[2048];
1638 static void sun4u_pgprot_init(void);
1639 static void sun4v_pgprot_init(void);
1641 /* Dummy function */
1642 void __init
setup_per_cpu_areas(void)
1646 void __init
paging_init(void)
1648 unsigned long end_pfn
, shift
, phys_base
;
1649 unsigned long real_end
, i
;
1651 /* These build time checkes make sure that the dcache_dirty_cpu()
1652 * page->flags usage will work.
1654 * When a page gets marked as dcache-dirty, we store the
1655 * cpu number starting at bit 32 in the page->flags. Also,
1656 * functions like clear_dcache_dirty_cpu use the cpu mask
1657 * in 13-bit signed-immediate instruction fields.
1661 * Page flags must not reach into upper 32 bits that are used
1662 * for the cpu number
1664 BUILD_BUG_ON(NR_PAGEFLAGS
> 32);
1667 * The bit fields placed in the high range must not reach below
1668 * the 32 bit boundary. Otherwise we cannot place the cpu field
1669 * at the 32 bit boundary.
1671 BUILD_BUG_ON(SECTIONS_WIDTH
+ NODES_WIDTH
+ ZONES_WIDTH
+
1672 ilog2(roundup_pow_of_two(NR_CPUS
)) > 32);
1674 BUILD_BUG_ON(NR_CPUS
> 4096);
1676 kern_base
= (prom_boot_mapping_phys_low
>> 22UL) << 22UL;
1677 kern_size
= (unsigned long)&_end
- (unsigned long)KERNBASE
;
1681 /* Invalidate both kernel TSBs. */
1682 memset(swapper_tsb
, 0x40, sizeof(swapper_tsb
));
1683 #ifndef CONFIG_DEBUG_PAGEALLOC
1684 memset(swapper_4m_tsb
, 0x40, sizeof(swapper_4m_tsb
));
1687 if (tlb_type
== hypervisor
)
1688 sun4v_pgprot_init();
1690 sun4u_pgprot_init();
1692 if (tlb_type
== cheetah_plus
||
1693 tlb_type
== hypervisor
)
1696 if (tlb_type
== hypervisor
) {
1697 sun4v_patch_tlb_handlers();
1703 /* Find available physical memory...
1705 * Read it twice in order to work around a bug in openfirmware.
1706 * The call to grab this table itself can cause openfirmware to
1707 * allocate memory, which in turn can take away some space from
1708 * the list of available memory. Reading it twice makes sure
1709 * we really do get the final value.
1711 read_obp_translations();
1712 read_obp_memory("reg", &pall
[0], &pall_ents
);
1713 read_obp_memory("available", &pavail
[0], &pavail_ents
);
1714 read_obp_memory("available", &pavail
[0], &pavail_ents
);
1716 phys_base
= 0xffffffffffffffffUL
;
1717 for (i
= 0; i
< pavail_ents
; i
++) {
1718 phys_base
= min(phys_base
, pavail
[i
].phys_addr
);
1719 lmb_add(pavail
[i
].phys_addr
, pavail
[i
].reg_size
);
1722 lmb_reserve(kern_base
, kern_size
);
1724 find_ramdisk(phys_base
);
1726 if (cmdline_memory_size
)
1727 lmb_enforce_memory_limit(phys_base
+ cmdline_memory_size
);
1732 set_bit(0, mmu_context_bmap
);
1734 shift
= kern_base
+ PAGE_OFFSET
- ((unsigned long)KERNBASE
);
1736 real_end
= (unsigned long)_end
;
1737 num_kernel_image_mappings
= DIV_ROUND_UP(real_end
- KERNBASE
, 1 << 22);
1738 printk("Kernel: Using %d locked TLB entries for main kernel image.\n",
1739 num_kernel_image_mappings
);
1741 /* Set kernel pgd to upper alias so physical page computations
1744 init_mm
.pgd
+= ((shift
) / (sizeof(pgd_t
)));
1746 memset(swapper_low_pmd_dir
, 0, sizeof(swapper_low_pmd_dir
));
1748 /* Now can init the kernel/bad page tables. */
1749 pud_set(pud_offset(&swapper_pg_dir
[0], 0),
1750 swapper_low_pmd_dir
+ (shift
/ sizeof(pgd_t
)));
1752 inherit_prom_mappings();
1756 /* Ok, we can use our TLB miss and window trap handlers safely. */
1761 if (tlb_type
== hypervisor
)
1762 sun4v_ktsb_register();
1764 /* We must setup the per-cpu areas before we pull in the
1765 * PROM and the MDESC. The code there fills in cpu and
1766 * other information into per-cpu data structures.
1768 real_setup_per_cpu_areas();
1770 prom_build_devicetree();
1772 if (tlb_type
== hypervisor
)
1775 /* Once the OF device tree and MDESC have been setup, we know
1776 * the list of possible cpus. Therefore we can allocate the
1779 for_each_possible_cpu(i
) {
1780 /* XXX Use node local allocations... XXX */
1781 softirq_stack
[i
] = __va(lmb_alloc(THREAD_SIZE
, THREAD_SIZE
));
1782 hardirq_stack
[i
] = __va(lmb_alloc(THREAD_SIZE
, THREAD_SIZE
));
1785 /* Setup bootmem... */
1786 last_valid_pfn
= end_pfn
= bootmem_init(phys_base
);
1788 #ifndef CONFIG_NEED_MULTIPLE_NODES
1789 max_mapnr
= last_valid_pfn
;
1791 kernel_physical_mapping_init();
1794 unsigned long max_zone_pfns
[MAX_NR_ZONES
];
1796 memset(max_zone_pfns
, 0, sizeof(max_zone_pfns
));
1798 max_zone_pfns
[ZONE_NORMAL
] = end_pfn
;
1800 free_area_init_nodes(max_zone_pfns
);
1803 printk("Booting Linux...\n");
1809 int __init
page_in_phys_avail(unsigned long paddr
)
1815 for (i
= 0; i
< pavail_ents
; i
++) {
1816 unsigned long start
, end
;
1818 start
= pavail
[i
].phys_addr
;
1819 end
= start
+ pavail
[i
].reg_size
;
1821 if (paddr
>= start
&& paddr
< end
)
1824 if (paddr
>= kern_base
&& paddr
< (kern_base
+ kern_size
))
1826 #ifdef CONFIG_BLK_DEV_INITRD
1827 if (paddr
>= __pa(initrd_start
) &&
1828 paddr
< __pa(PAGE_ALIGN(initrd_end
)))
1835 static struct linux_prom64_registers pavail_rescan
[MAX_BANKS
] __initdata
;
1836 static int pavail_rescan_ents __initdata
;
1838 /* Certain OBP calls, such as fetching "available" properties, can
1839 * claim physical memory. So, along with initializing the valid
1840 * address bitmap, what we do here is refetch the physical available
1841 * memory list again, and make sure it provides at least as much
1842 * memory as 'pavail' does.
1844 static void setup_valid_addr_bitmap_from_pavail(void)
1848 read_obp_memory("available", &pavail_rescan
[0], &pavail_rescan_ents
);
1850 for (i
= 0; i
< pavail_ents
; i
++) {
1851 unsigned long old_start
, old_end
;
1853 old_start
= pavail
[i
].phys_addr
;
1854 old_end
= old_start
+ pavail
[i
].reg_size
;
1855 while (old_start
< old_end
) {
1858 for (n
= 0; n
< pavail_rescan_ents
; n
++) {
1859 unsigned long new_start
, new_end
;
1861 new_start
= pavail_rescan
[n
].phys_addr
;
1862 new_end
= new_start
+
1863 pavail_rescan
[n
].reg_size
;
1865 if (new_start
<= old_start
&&
1866 new_end
>= (old_start
+ PAGE_SIZE
)) {
1867 set_bit(old_start
>> 22,
1868 sparc64_valid_addr_bitmap
);
1873 prom_printf("mem_init: Lost memory in pavail\n");
1874 prom_printf("mem_init: OLD start[%lx] size[%lx]\n",
1875 pavail
[i
].phys_addr
,
1876 pavail
[i
].reg_size
);
1877 prom_printf("mem_init: NEW start[%lx] size[%lx]\n",
1878 pavail_rescan
[i
].phys_addr
,
1879 pavail_rescan
[i
].reg_size
);
1880 prom_printf("mem_init: Cannot continue, aborting.\n");
1884 old_start
+= PAGE_SIZE
;
1889 void __init
mem_init(void)
1891 unsigned long codepages
, datapages
, initpages
;
1892 unsigned long addr
, last
;
1895 i
= last_valid_pfn
>> ((22 - PAGE_SHIFT
) + 6);
1897 sparc64_valid_addr_bitmap
= (unsigned long *) alloc_bootmem(i
<< 3);
1898 if (sparc64_valid_addr_bitmap
== NULL
) {
1899 prom_printf("mem_init: Cannot alloc valid_addr_bitmap.\n");
1902 memset(sparc64_valid_addr_bitmap
, 0, i
<< 3);
1904 addr
= PAGE_OFFSET
+ kern_base
;
1905 last
= PAGE_ALIGN(kern_size
) + addr
;
1906 while (addr
< last
) {
1907 set_bit(__pa(addr
) >> 22, sparc64_valid_addr_bitmap
);
1911 setup_valid_addr_bitmap_from_pavail();
1913 high_memory
= __va(last_valid_pfn
<< PAGE_SHIFT
);
1915 #ifdef CONFIG_NEED_MULTIPLE_NODES
1916 for_each_online_node(i
) {
1917 if (NODE_DATA(i
)->node_spanned_pages
!= 0) {
1919 free_all_bootmem_node(NODE_DATA(i
));
1923 totalram_pages
= free_all_bootmem();
1926 /* We subtract one to account for the mem_map_zero page
1929 totalram_pages
-= 1;
1930 num_physpages
= totalram_pages
;
1933 * Set up the zero page, mark it reserved, so that page count
1934 * is not manipulated when freeing the page from user ptes.
1936 mem_map_zero
= alloc_pages(GFP_KERNEL
|__GFP_ZERO
, 0);
1937 if (mem_map_zero
== NULL
) {
1938 prom_printf("paging_init: Cannot alloc zero page.\n");
1941 SetPageReserved(mem_map_zero
);
1943 codepages
= (((unsigned long) _etext
) - ((unsigned long) _start
));
1944 codepages
= PAGE_ALIGN(codepages
) >> PAGE_SHIFT
;
1945 datapages
= (((unsigned long) _edata
) - ((unsigned long) _etext
));
1946 datapages
= PAGE_ALIGN(datapages
) >> PAGE_SHIFT
;
1947 initpages
= (((unsigned long) __init_end
) - ((unsigned long) __init_begin
));
1948 initpages
= PAGE_ALIGN(initpages
) >> PAGE_SHIFT
;
1950 printk("Memory: %luk available (%ldk kernel code, %ldk data, %ldk init) [%016lx,%016lx]\n",
1951 nr_free_pages() << (PAGE_SHIFT
-10),
1952 codepages
<< (PAGE_SHIFT
-10),
1953 datapages
<< (PAGE_SHIFT
-10),
1954 initpages
<< (PAGE_SHIFT
-10),
1955 PAGE_OFFSET
, (last_valid_pfn
<< PAGE_SHIFT
));
1957 if (tlb_type
== cheetah
|| tlb_type
== cheetah_plus
)
1958 cheetah_ecache_flush_init();
1961 void free_initmem(void)
1963 unsigned long addr
, initend
;
1966 * The init section is aligned to 8k in vmlinux.lds. Page align for >8k pagesizes.
1968 addr
= PAGE_ALIGN((unsigned long)(__init_begin
));
1969 initend
= (unsigned long)(__init_end
) & PAGE_MASK
;
1970 for (; addr
< initend
; addr
+= PAGE_SIZE
) {
1975 ((unsigned long) __va(kern_base
)) -
1976 ((unsigned long) KERNBASE
));
1977 memset((void *)addr
, POISON_FREE_INITMEM
, PAGE_SIZE
);
1978 p
= virt_to_page(page
);
1980 ClearPageReserved(p
);
1988 #ifdef CONFIG_BLK_DEV_INITRD
1989 void free_initrd_mem(unsigned long start
, unsigned long end
)
1992 printk ("Freeing initrd memory: %ldk freed\n", (end
- start
) >> 10);
1993 for (; start
< end
; start
+= PAGE_SIZE
) {
1994 struct page
*p
= virt_to_page(start
);
1996 ClearPageReserved(p
);
2005 #define _PAGE_CACHE_4U (_PAGE_CP_4U | _PAGE_CV_4U)
2006 #define _PAGE_CACHE_4V (_PAGE_CP_4V | _PAGE_CV_4V)
2007 #define __DIRTY_BITS_4U (_PAGE_MODIFIED_4U | _PAGE_WRITE_4U | _PAGE_W_4U)
2008 #define __DIRTY_BITS_4V (_PAGE_MODIFIED_4V | _PAGE_WRITE_4V | _PAGE_W_4V)
2009 #define __ACCESS_BITS_4U (_PAGE_ACCESSED_4U | _PAGE_READ_4U | _PAGE_R)
2010 #define __ACCESS_BITS_4V (_PAGE_ACCESSED_4V | _PAGE_READ_4V | _PAGE_R)
2012 pgprot_t PAGE_KERNEL __read_mostly
;
2013 EXPORT_SYMBOL(PAGE_KERNEL
);
2015 pgprot_t PAGE_KERNEL_LOCKED __read_mostly
;
2016 pgprot_t PAGE_COPY __read_mostly
;
2018 pgprot_t PAGE_SHARED __read_mostly
;
2019 EXPORT_SYMBOL(PAGE_SHARED
);
2021 pgprot_t PAGE_EXEC __read_mostly
;
2022 unsigned long pg_iobits __read_mostly
;
2024 unsigned long _PAGE_IE __read_mostly
;
2025 EXPORT_SYMBOL(_PAGE_IE
);
2027 unsigned long _PAGE_E __read_mostly
;
2028 EXPORT_SYMBOL(_PAGE_E
);
2030 unsigned long _PAGE_CACHE __read_mostly
;
2031 EXPORT_SYMBOL(_PAGE_CACHE
);
2033 #ifdef CONFIG_SPARSEMEM_VMEMMAP
2035 #define VMEMMAP_CHUNK_SHIFT 22
2036 #define VMEMMAP_CHUNK (1UL << VMEMMAP_CHUNK_SHIFT)
2037 #define VMEMMAP_CHUNK_MASK ~(VMEMMAP_CHUNK - 1UL)
2038 #define VMEMMAP_ALIGN(x) (((x)+VMEMMAP_CHUNK-1UL)&VMEMMAP_CHUNK_MASK)
2040 #define VMEMMAP_SIZE ((((1UL << MAX_PHYSADDR_BITS) >> PAGE_SHIFT) * \
2041 sizeof(struct page *)) >> VMEMMAP_CHUNK_SHIFT)
2042 unsigned long vmemmap_table
[VMEMMAP_SIZE
];
2044 int __meminit
vmemmap_populate(struct page
*start
, unsigned long nr
, int node
)
2046 unsigned long vstart
= (unsigned long) start
;
2047 unsigned long vend
= (unsigned long) (start
+ nr
);
2048 unsigned long phys_start
= (vstart
- VMEMMAP_BASE
);
2049 unsigned long phys_end
= (vend
- VMEMMAP_BASE
);
2050 unsigned long addr
= phys_start
& VMEMMAP_CHUNK_MASK
;
2051 unsigned long end
= VMEMMAP_ALIGN(phys_end
);
2052 unsigned long pte_base
;
2054 pte_base
= (_PAGE_VALID
| _PAGE_SZ4MB_4U
|
2055 _PAGE_CP_4U
| _PAGE_CV_4U
|
2056 _PAGE_P_4U
| _PAGE_W_4U
);
2057 if (tlb_type
== hypervisor
)
2058 pte_base
= (_PAGE_VALID
| _PAGE_SZ4MB_4V
|
2059 _PAGE_CP_4V
| _PAGE_CV_4V
|
2060 _PAGE_P_4V
| _PAGE_W_4V
);
2062 for (; addr
< end
; addr
+= VMEMMAP_CHUNK
) {
2063 unsigned long *vmem_pp
=
2064 vmemmap_table
+ (addr
>> VMEMMAP_CHUNK_SHIFT
);
2067 if (!(*vmem_pp
& _PAGE_VALID
)) {
2068 block
= vmemmap_alloc_block(1UL << 22, node
);
2072 *vmem_pp
= pte_base
| __pa(block
);
2074 printk(KERN_INFO
"[%p-%p] page_structs=%lu "
2075 "node=%d entry=%lu/%lu\n", start
, block
, nr
,
2077 addr
>> VMEMMAP_CHUNK_SHIFT
,
2078 VMEMMAP_SIZE
>> VMEMMAP_CHUNK_SHIFT
);
2083 #endif /* CONFIG_SPARSEMEM_VMEMMAP */
2085 static void prot_init_common(unsigned long page_none
,
2086 unsigned long page_shared
,
2087 unsigned long page_copy
,
2088 unsigned long page_readonly
,
2089 unsigned long page_exec_bit
)
2091 PAGE_COPY
= __pgprot(page_copy
);
2092 PAGE_SHARED
= __pgprot(page_shared
);
2094 protection_map
[0x0] = __pgprot(page_none
);
2095 protection_map
[0x1] = __pgprot(page_readonly
& ~page_exec_bit
);
2096 protection_map
[0x2] = __pgprot(page_copy
& ~page_exec_bit
);
2097 protection_map
[0x3] = __pgprot(page_copy
& ~page_exec_bit
);
2098 protection_map
[0x4] = __pgprot(page_readonly
);
2099 protection_map
[0x5] = __pgprot(page_readonly
);
2100 protection_map
[0x6] = __pgprot(page_copy
);
2101 protection_map
[0x7] = __pgprot(page_copy
);
2102 protection_map
[0x8] = __pgprot(page_none
);
2103 protection_map
[0x9] = __pgprot(page_readonly
& ~page_exec_bit
);
2104 protection_map
[0xa] = __pgprot(page_shared
& ~page_exec_bit
);
2105 protection_map
[0xb] = __pgprot(page_shared
& ~page_exec_bit
);
2106 protection_map
[0xc] = __pgprot(page_readonly
);
2107 protection_map
[0xd] = __pgprot(page_readonly
);
2108 protection_map
[0xe] = __pgprot(page_shared
);
2109 protection_map
[0xf] = __pgprot(page_shared
);
2112 static void __init
sun4u_pgprot_init(void)
2114 unsigned long page_none
, page_shared
, page_copy
, page_readonly
;
2115 unsigned long page_exec_bit
;
2117 PAGE_KERNEL
= __pgprot (_PAGE_PRESENT_4U
| _PAGE_VALID
|
2118 _PAGE_CACHE_4U
| _PAGE_P_4U
|
2119 __ACCESS_BITS_4U
| __DIRTY_BITS_4U
|
2121 PAGE_KERNEL_LOCKED
= __pgprot (_PAGE_PRESENT_4U
| _PAGE_VALID
|
2122 _PAGE_CACHE_4U
| _PAGE_P_4U
|
2123 __ACCESS_BITS_4U
| __DIRTY_BITS_4U
|
2124 _PAGE_EXEC_4U
| _PAGE_L_4U
);
2125 PAGE_EXEC
= __pgprot(_PAGE_EXEC_4U
);
2127 _PAGE_IE
= _PAGE_IE_4U
;
2128 _PAGE_E
= _PAGE_E_4U
;
2129 _PAGE_CACHE
= _PAGE_CACHE_4U
;
2131 pg_iobits
= (_PAGE_VALID
| _PAGE_PRESENT_4U
| __DIRTY_BITS_4U
|
2132 __ACCESS_BITS_4U
| _PAGE_E_4U
);
2134 #ifdef CONFIG_DEBUG_PAGEALLOC
2135 kern_linear_pte_xor
[0] = (_PAGE_VALID
| _PAGE_SZBITS_4U
) ^
2138 kern_linear_pte_xor
[0] = (_PAGE_VALID
| _PAGE_SZ4MB_4U
) ^
2141 kern_linear_pte_xor
[0] |= (_PAGE_CP_4U
| _PAGE_CV_4U
|
2142 _PAGE_P_4U
| _PAGE_W_4U
);
2144 /* XXX Should use 256MB on Panther. XXX */
2145 kern_linear_pte_xor
[1] = kern_linear_pte_xor
[0];
2147 _PAGE_SZBITS
= _PAGE_SZBITS_4U
;
2148 _PAGE_ALL_SZ_BITS
= (_PAGE_SZ4MB_4U
| _PAGE_SZ512K_4U
|
2149 _PAGE_SZ64K_4U
| _PAGE_SZ8K_4U
|
2150 _PAGE_SZ32MB_4U
| _PAGE_SZ256MB_4U
);
2153 page_none
= _PAGE_PRESENT_4U
| _PAGE_ACCESSED_4U
| _PAGE_CACHE_4U
;
2154 page_shared
= (_PAGE_VALID
| _PAGE_PRESENT_4U
| _PAGE_CACHE_4U
|
2155 __ACCESS_BITS_4U
| _PAGE_WRITE_4U
| _PAGE_EXEC_4U
);
2156 page_copy
= (_PAGE_VALID
| _PAGE_PRESENT_4U
| _PAGE_CACHE_4U
|
2157 __ACCESS_BITS_4U
| _PAGE_EXEC_4U
);
2158 page_readonly
= (_PAGE_VALID
| _PAGE_PRESENT_4U
| _PAGE_CACHE_4U
|
2159 __ACCESS_BITS_4U
| _PAGE_EXEC_4U
);
2161 page_exec_bit
= _PAGE_EXEC_4U
;
2163 prot_init_common(page_none
, page_shared
, page_copy
, page_readonly
,
2167 static void __init
sun4v_pgprot_init(void)
2169 unsigned long page_none
, page_shared
, page_copy
, page_readonly
;
2170 unsigned long page_exec_bit
;
2172 PAGE_KERNEL
= __pgprot (_PAGE_PRESENT_4V
| _PAGE_VALID
|
2173 _PAGE_CACHE_4V
| _PAGE_P_4V
|
2174 __ACCESS_BITS_4V
| __DIRTY_BITS_4V
|
2176 PAGE_KERNEL_LOCKED
= PAGE_KERNEL
;
2177 PAGE_EXEC
= __pgprot(_PAGE_EXEC_4V
);
2179 _PAGE_IE
= _PAGE_IE_4V
;
2180 _PAGE_E
= _PAGE_E_4V
;
2181 _PAGE_CACHE
= _PAGE_CACHE_4V
;
2183 #ifdef CONFIG_DEBUG_PAGEALLOC
2184 kern_linear_pte_xor
[0] = (_PAGE_VALID
| _PAGE_SZBITS_4V
) ^
2187 kern_linear_pte_xor
[0] = (_PAGE_VALID
| _PAGE_SZ4MB_4V
) ^
2190 kern_linear_pte_xor
[0] |= (_PAGE_CP_4V
| _PAGE_CV_4V
|
2191 _PAGE_P_4V
| _PAGE_W_4V
);
2193 #ifdef CONFIG_DEBUG_PAGEALLOC
2194 kern_linear_pte_xor
[1] = (_PAGE_VALID
| _PAGE_SZBITS_4V
) ^
2197 kern_linear_pte_xor
[1] = (_PAGE_VALID
| _PAGE_SZ256MB_4V
) ^
2200 kern_linear_pte_xor
[1] |= (_PAGE_CP_4V
| _PAGE_CV_4V
|
2201 _PAGE_P_4V
| _PAGE_W_4V
);
2203 pg_iobits
= (_PAGE_VALID
| _PAGE_PRESENT_4V
| __DIRTY_BITS_4V
|
2204 __ACCESS_BITS_4V
| _PAGE_E_4V
);
2206 _PAGE_SZBITS
= _PAGE_SZBITS_4V
;
2207 _PAGE_ALL_SZ_BITS
= (_PAGE_SZ16GB_4V
| _PAGE_SZ2GB_4V
|
2208 _PAGE_SZ256MB_4V
| _PAGE_SZ32MB_4V
|
2209 _PAGE_SZ4MB_4V
| _PAGE_SZ512K_4V
|
2210 _PAGE_SZ64K_4V
| _PAGE_SZ8K_4V
);
2212 page_none
= _PAGE_PRESENT_4V
| _PAGE_ACCESSED_4V
| _PAGE_CACHE_4V
;
2213 page_shared
= (_PAGE_VALID
| _PAGE_PRESENT_4V
| _PAGE_CACHE_4V
|
2214 __ACCESS_BITS_4V
| _PAGE_WRITE_4V
| _PAGE_EXEC_4V
);
2215 page_copy
= (_PAGE_VALID
| _PAGE_PRESENT_4V
| _PAGE_CACHE_4V
|
2216 __ACCESS_BITS_4V
| _PAGE_EXEC_4V
);
2217 page_readonly
= (_PAGE_VALID
| _PAGE_PRESENT_4V
| _PAGE_CACHE_4V
|
2218 __ACCESS_BITS_4V
| _PAGE_EXEC_4V
);
2220 page_exec_bit
= _PAGE_EXEC_4V
;
2222 prot_init_common(page_none
, page_shared
, page_copy
, page_readonly
,
2226 unsigned long pte_sz_bits(unsigned long sz
)
2228 if (tlb_type
== hypervisor
) {
2232 return _PAGE_SZ8K_4V
;
2234 return _PAGE_SZ64K_4V
;
2236 return _PAGE_SZ512K_4V
;
2237 case 4 * 1024 * 1024:
2238 return _PAGE_SZ4MB_4V
;
2244 return _PAGE_SZ8K_4U
;
2246 return _PAGE_SZ64K_4U
;
2248 return _PAGE_SZ512K_4U
;
2249 case 4 * 1024 * 1024:
2250 return _PAGE_SZ4MB_4U
;
2255 pte_t
mk_pte_io(unsigned long page
, pgprot_t prot
, int space
, unsigned long page_size
)
2259 pte_val(pte
) = page
| pgprot_val(pgprot_noncached(prot
));
2260 pte_val(pte
) |= (((unsigned long)space
) << 32);
2261 pte_val(pte
) |= pte_sz_bits(page_size
);
2266 static unsigned long kern_large_tte(unsigned long paddr
)
2270 val
= (_PAGE_VALID
| _PAGE_SZ4MB_4U
|
2271 _PAGE_CP_4U
| _PAGE_CV_4U
| _PAGE_P_4U
|
2272 _PAGE_EXEC_4U
| _PAGE_L_4U
| _PAGE_W_4U
);
2273 if (tlb_type
== hypervisor
)
2274 val
= (_PAGE_VALID
| _PAGE_SZ4MB_4V
|
2275 _PAGE_CP_4V
| _PAGE_CV_4V
| _PAGE_P_4V
|
2276 _PAGE_EXEC_4V
| _PAGE_W_4V
);
2281 /* If not locked, zap it. */
2282 void __flush_tlb_all(void)
2284 unsigned long pstate
;
2287 __asm__
__volatile__("flushw\n\t"
2288 "rdpr %%pstate, %0\n\t"
2289 "wrpr %0, %1, %%pstate"
2292 if (tlb_type
== hypervisor
) {
2293 sun4v_mmu_demap_all();
2294 } else if (tlb_type
== spitfire
) {
2295 for (i
= 0; i
< 64; i
++) {
2296 /* Spitfire Errata #32 workaround */
2297 /* NOTE: Always runs on spitfire, so no
2298 * cheetah+ page size encodings.
2300 __asm__
__volatile__("stxa %0, [%1] %2\n\t"
2304 "r" (PRIMARY_CONTEXT
), "i" (ASI_DMMU
));
2306 if (!(spitfire_get_dtlb_data(i
) & _PAGE_L_4U
)) {
2307 __asm__
__volatile__("stxa %%g0, [%0] %1\n\t"
2310 : "r" (TLB_TAG_ACCESS
), "i" (ASI_DMMU
));
2311 spitfire_put_dtlb_data(i
, 0x0UL
);
2314 /* Spitfire Errata #32 workaround */
2315 /* NOTE: Always runs on spitfire, so no
2316 * cheetah+ page size encodings.
2318 __asm__
__volatile__("stxa %0, [%1] %2\n\t"
2322 "r" (PRIMARY_CONTEXT
), "i" (ASI_DMMU
));
2324 if (!(spitfire_get_itlb_data(i
) & _PAGE_L_4U
)) {
2325 __asm__
__volatile__("stxa %%g0, [%0] %1\n\t"
2328 : "r" (TLB_TAG_ACCESS
), "i" (ASI_IMMU
));
2329 spitfire_put_itlb_data(i
, 0x0UL
);
2332 } else if (tlb_type
== cheetah
|| tlb_type
== cheetah_plus
) {
2333 cheetah_flush_dtlb_all();
2334 cheetah_flush_itlb_all();
2336 __asm__
__volatile__("wrpr %0, 0, %%pstate"