1 /* $Id: init.c,v 1.209 2002/02/09 19:49:31 davem Exp $
2 * arch/sparc64/mm/init.c
4 * Copyright (C) 1996-1999 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1997-1999 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
8 #include <linux/module.h>
9 #include <linux/kernel.h>
10 #include <linux/sched.h>
11 #include <linux/string.h>
12 #include <linux/init.h>
13 #include <linux/bootmem.h>
15 #include <linux/hugetlb.h>
16 #include <linux/slab.h>
17 #include <linux/initrd.h>
18 #include <linux/swap.h>
19 #include <linux/pagemap.h>
20 #include <linux/poison.h>
22 #include <linux/seq_file.h>
23 #include <linux/kprobes.h>
24 #include <linux/cache.h>
25 #include <linux/sort.h>
28 #include <asm/system.h>
30 #include <asm/pgalloc.h>
31 #include <asm/pgtable.h>
32 #include <asm/oplib.h>
33 #include <asm/iommu.h>
35 #include <asm/uaccess.h>
36 #include <asm/mmu_context.h>
37 #include <asm/tlbflush.h>
39 #include <asm/starfire.h>
41 #include <asm/spitfire.h>
42 #include <asm/sections.h>
44 #include <asm/hypervisor.h>
47 extern void device_scan(void);
49 #define MAX_PHYS_ADDRESS (1UL << 42UL)
50 #define KPTE_BITMAP_CHUNK_SZ (256UL * 1024UL * 1024UL)
51 #define KPTE_BITMAP_BYTES \
52 ((MAX_PHYS_ADDRESS / KPTE_BITMAP_CHUNK_SZ) / 8)
54 unsigned long kern_linear_pte_xor
[2] __read_mostly
;
56 /* A bitmap, one bit for every 256MB of physical memory. If the bit
57 * is clear, we should use a 4MB page (via kern_linear_pte_xor[0]) else
58 * if set we should use a 256MB page (via kern_linear_pte_xor[1]).
60 unsigned long kpte_linear_bitmap
[KPTE_BITMAP_BYTES
/ sizeof(unsigned long)];
62 #ifndef CONFIG_DEBUG_PAGEALLOC
63 /* A special kernel TSB for 4MB and 256MB linear mappings. */
64 struct tsb swapper_4m_tsb
[KERNEL_TSB4M_NENTRIES
];
69 static struct linux_prom64_registers pavail
[MAX_BANKS
] __initdata
;
70 static struct linux_prom64_registers pavail_rescan
[MAX_BANKS
] __initdata
;
71 static int pavail_ents __initdata
;
72 static int pavail_rescan_ents __initdata
;
74 static int cmp_p64(const void *a
, const void *b
)
76 const struct linux_prom64_registers
*x
= a
, *y
= b
;
78 if (x
->phys_addr
> y
->phys_addr
)
80 if (x
->phys_addr
< y
->phys_addr
)
85 static void __init
read_obp_memory(const char *property
,
86 struct linux_prom64_registers
*regs
,
89 int node
= prom_finddevice("/memory");
90 int prop_size
= prom_getproplen(node
, property
);
93 ents
= prop_size
/ sizeof(struct linux_prom64_registers
);
94 if (ents
> MAX_BANKS
) {
95 prom_printf("The machine has more %s property entries than "
96 "this kernel can support (%d).\n",
101 ret
= prom_getproperty(node
, property
, (char *) regs
, prop_size
);
103 prom_printf("Couldn't get %s property from /memory.\n");
107 /* Sanitize what we got from the firmware, by page aligning
110 for (i
= 0; i
< ents
; i
++) {
111 unsigned long base
, size
;
113 base
= regs
[i
].phys_addr
;
114 size
= regs
[i
].reg_size
;
117 if (base
& ~PAGE_MASK
) {
118 unsigned long new_base
= PAGE_ALIGN(base
);
120 size
-= new_base
- base
;
121 if ((long) size
< 0L)
126 /* If it is empty, simply get rid of it.
127 * This simplifies the logic of the other
128 * functions that process these arrays.
130 memmove(®s
[i
], ®s
[i
+ 1],
131 (ents
- i
- 1) * sizeof(regs
[0]));
136 regs
[i
].phys_addr
= base
;
137 regs
[i
].reg_size
= size
;
142 sort(regs
, ents
, sizeof(struct linux_prom64_registers
),
146 unsigned long *sparc64_valid_addr_bitmap __read_mostly
;
148 /* Kernel physical address base and size in bytes. */
149 unsigned long kern_base __read_mostly
;
150 unsigned long kern_size __read_mostly
;
152 /* Initial ramdisk setup */
153 extern unsigned long sparc_ramdisk_image64
;
154 extern unsigned int sparc_ramdisk_image
;
155 extern unsigned int sparc_ramdisk_size
;
157 struct page
*mem_map_zero __read_mostly
;
159 unsigned int sparc64_highest_unlocked_tlb_ent __read_mostly
;
161 unsigned long sparc64_kern_pri_context __read_mostly
;
162 unsigned long sparc64_kern_pri_nuc_bits __read_mostly
;
163 unsigned long sparc64_kern_sec_context __read_mostly
;
167 #ifdef CONFIG_DEBUG_DCFLUSH
168 atomic_t dcpage_flushes
= ATOMIC_INIT(0);
170 atomic_t dcpage_flushes_xcall
= ATOMIC_INIT(0);
174 inline void flush_dcache_page_impl(struct page
*page
)
176 BUG_ON(tlb_type
== hypervisor
);
177 #ifdef CONFIG_DEBUG_DCFLUSH
178 atomic_inc(&dcpage_flushes
);
181 #ifdef DCACHE_ALIASING_POSSIBLE
182 __flush_dcache_page(page_address(page
),
183 ((tlb_type
== spitfire
) &&
184 page_mapping(page
) != NULL
));
186 if (page_mapping(page
) != NULL
&&
187 tlb_type
== spitfire
)
188 __flush_icache_page(__pa(page_address(page
)));
192 #define PG_dcache_dirty PG_arch_1
193 #define PG_dcache_cpu_shift 24UL
194 #define PG_dcache_cpu_mask (256UL - 1UL)
197 #error D-cache dirty tracking and thread_info->cpu need fixing for > 256 cpus
200 #define dcache_dirty_cpu(page) \
201 (((page)->flags >> PG_dcache_cpu_shift) & PG_dcache_cpu_mask)
203 static __inline__
void set_dcache_dirty(struct page
*page
, int this_cpu
)
205 unsigned long mask
= this_cpu
;
206 unsigned long non_cpu_bits
;
208 non_cpu_bits
= ~(PG_dcache_cpu_mask
<< PG_dcache_cpu_shift
);
209 mask
= (mask
<< PG_dcache_cpu_shift
) | (1UL << PG_dcache_dirty
);
211 __asm__
__volatile__("1:\n\t"
213 "and %%g7, %1, %%g1\n\t"
214 "or %%g1, %0, %%g1\n\t"
215 "casx [%2], %%g7, %%g1\n\t"
217 "membar #StoreLoad | #StoreStore\n\t"
218 "bne,pn %%xcc, 1b\n\t"
221 : "r" (mask
), "r" (non_cpu_bits
), "r" (&page
->flags
)
225 static __inline__
void clear_dcache_dirty_cpu(struct page
*page
, unsigned long cpu
)
227 unsigned long mask
= (1UL << PG_dcache_dirty
);
229 __asm__
__volatile__("! test_and_clear_dcache_dirty\n"
232 "srlx %%g7, %4, %%g1\n\t"
233 "and %%g1, %3, %%g1\n\t"
235 "bne,pn %%icc, 2f\n\t"
236 " andn %%g7, %1, %%g1\n\t"
237 "casx [%2], %%g7, %%g1\n\t"
239 "membar #StoreLoad | #StoreStore\n\t"
240 "bne,pn %%xcc, 1b\n\t"
244 : "r" (cpu
), "r" (mask
), "r" (&page
->flags
),
245 "i" (PG_dcache_cpu_mask
),
246 "i" (PG_dcache_cpu_shift
)
250 static inline void tsb_insert(struct tsb
*ent
, unsigned long tag
, unsigned long pte
)
252 unsigned long tsb_addr
= (unsigned long) ent
;
254 if (tlb_type
== cheetah_plus
|| tlb_type
== hypervisor
)
255 tsb_addr
= __pa(tsb_addr
);
257 __tsb_insert(tsb_addr
, tag
, pte
);
260 unsigned long _PAGE_ALL_SZ_BITS __read_mostly
;
261 unsigned long _PAGE_SZBITS __read_mostly
;
263 void update_mmu_cache(struct vm_area_struct
*vma
, unsigned long address
, pte_t pte
)
265 struct mm_struct
*mm
;
267 unsigned long tag
, flags
;
268 unsigned long tsb_index
, tsb_hash_shift
;
270 if (tlb_type
!= hypervisor
) {
271 unsigned long pfn
= pte_pfn(pte
);
272 unsigned long pg_flags
;
275 if (pfn_valid(pfn
) &&
276 (page
= pfn_to_page(pfn
), page_mapping(page
)) &&
277 ((pg_flags
= page
->flags
) & (1UL << PG_dcache_dirty
))) {
278 int cpu
= ((pg_flags
>> PG_dcache_cpu_shift
) &
280 int this_cpu
= get_cpu();
282 /* This is just to optimize away some function calls
286 flush_dcache_page_impl(page
);
288 smp_flush_dcache_page_impl(page
, cpu
);
290 clear_dcache_dirty_cpu(page
, cpu
);
298 tsb_index
= MM_TSB_BASE
;
299 tsb_hash_shift
= PAGE_SHIFT
;
301 spin_lock_irqsave(&mm
->context
.lock
, flags
);
303 #ifdef CONFIG_HUGETLB_PAGE
304 if (mm
->context
.tsb_block
[MM_TSB_HUGE
].tsb
!= NULL
) {
305 if ((tlb_type
== hypervisor
&&
306 (pte_val(pte
) & _PAGE_SZALL_4V
) == _PAGE_SZHUGE_4V
) ||
307 (tlb_type
!= hypervisor
&&
308 (pte_val(pte
) & _PAGE_SZALL_4U
) == _PAGE_SZHUGE_4U
)) {
309 tsb_index
= MM_TSB_HUGE
;
310 tsb_hash_shift
= HPAGE_SHIFT
;
315 tsb
= mm
->context
.tsb_block
[tsb_index
].tsb
;
316 tsb
+= ((address
>> tsb_hash_shift
) &
317 (mm
->context
.tsb_block
[tsb_index
].tsb_nentries
- 1UL));
318 tag
= (address
>> 22UL);
319 tsb_insert(tsb
, tag
, pte_val(pte
));
321 spin_unlock_irqrestore(&mm
->context
.lock
, flags
);
324 void flush_dcache_page(struct page
*page
)
326 struct address_space
*mapping
;
329 if (tlb_type
== hypervisor
)
332 /* Do not bother with the expensive D-cache flush if it
333 * is merely the zero page. The 'bigcore' testcase in GDB
334 * causes this case to run millions of times.
336 if (page
== ZERO_PAGE(0))
339 this_cpu
= get_cpu();
341 mapping
= page_mapping(page
);
342 if (mapping
&& !mapping_mapped(mapping
)) {
343 int dirty
= test_bit(PG_dcache_dirty
, &page
->flags
);
345 int dirty_cpu
= dcache_dirty_cpu(page
);
347 if (dirty_cpu
== this_cpu
)
349 smp_flush_dcache_page_impl(page
, dirty_cpu
);
351 set_dcache_dirty(page
, this_cpu
);
353 /* We could delay the flush for the !page_mapping
354 * case too. But that case is for exec env/arg
355 * pages and those are %99 certainly going to get
356 * faulted into the tlb (and thus flushed) anyways.
358 flush_dcache_page_impl(page
);
365 void __kprobes
flush_icache_range(unsigned long start
, unsigned long end
)
367 /* Cheetah and Hypervisor platform cpus have coherent I-cache. */
368 if (tlb_type
== spitfire
) {
371 /* This code only runs on Spitfire cpus so this is
372 * why we can assume _PAGE_PADDR_4U.
374 for (kaddr
= start
; kaddr
< end
; kaddr
+= PAGE_SIZE
) {
375 unsigned long paddr
, mask
= _PAGE_PADDR_4U
;
377 if (kaddr
>= PAGE_OFFSET
)
378 paddr
= kaddr
& mask
;
380 pgd_t
*pgdp
= pgd_offset_k(kaddr
);
381 pud_t
*pudp
= pud_offset(pgdp
, kaddr
);
382 pmd_t
*pmdp
= pmd_offset(pudp
, kaddr
);
383 pte_t
*ptep
= pte_offset_kernel(pmdp
, kaddr
);
385 paddr
= pte_val(*ptep
) & mask
;
387 __flush_icache_page(paddr
);
394 unsigned long total
= 0, reserved
= 0;
395 unsigned long shared
= 0, cached
= 0;
398 printk(KERN_INFO
"Mem-info:\n");
400 printk(KERN_INFO
"Free swap: %6ldkB\n",
401 nr_swap_pages
<< (PAGE_SHIFT
-10));
402 for_each_online_pgdat(pgdat
) {
403 unsigned long i
, flags
;
405 pgdat_resize_lock(pgdat
, &flags
);
406 for (i
= 0; i
< pgdat
->node_spanned_pages
; i
++) {
407 struct page
*page
= pgdat_page_nr(pgdat
, i
);
409 if (PageReserved(page
))
411 else if (PageSwapCache(page
))
413 else if (page_count(page
))
414 shared
+= page_count(page
) - 1;
416 pgdat_resize_unlock(pgdat
, &flags
);
419 printk(KERN_INFO
"%lu pages of RAM\n", total
);
420 printk(KERN_INFO
"%lu reserved pages\n", reserved
);
421 printk(KERN_INFO
"%lu pages shared\n", shared
);
422 printk(KERN_INFO
"%lu pages swap cached\n", cached
);
424 printk(KERN_INFO
"%lu pages dirty\n",
425 global_page_state(NR_FILE_DIRTY
));
426 printk(KERN_INFO
"%lu pages writeback\n",
427 global_page_state(NR_WRITEBACK
));
428 printk(KERN_INFO
"%lu pages mapped\n",
429 global_page_state(NR_FILE_MAPPED
));
430 printk(KERN_INFO
"%lu pages slab\n",
431 global_page_state(NR_SLAB_RECLAIMABLE
) +
432 global_page_state(NR_SLAB_UNRECLAIMABLE
));
433 printk(KERN_INFO
"%lu pages pagetables\n",
434 global_page_state(NR_PAGETABLE
));
437 void mmu_info(struct seq_file
*m
)
439 if (tlb_type
== cheetah
)
440 seq_printf(m
, "MMU Type\t: Cheetah\n");
441 else if (tlb_type
== cheetah_plus
)
442 seq_printf(m
, "MMU Type\t: Cheetah+\n");
443 else if (tlb_type
== spitfire
)
444 seq_printf(m
, "MMU Type\t: Spitfire\n");
445 else if (tlb_type
== hypervisor
)
446 seq_printf(m
, "MMU Type\t: Hypervisor (sun4v)\n");
448 seq_printf(m
, "MMU Type\t: ???\n");
450 #ifdef CONFIG_DEBUG_DCFLUSH
451 seq_printf(m
, "DCPageFlushes\t: %d\n",
452 atomic_read(&dcpage_flushes
));
454 seq_printf(m
, "DCPageFlushesXC\t: %d\n",
455 atomic_read(&dcpage_flushes_xcall
));
456 #endif /* CONFIG_SMP */
457 #endif /* CONFIG_DEBUG_DCFLUSH */
460 struct linux_prom_translation
{
466 /* Exported for kernel TLB miss handling in ktlb.S */
467 struct linux_prom_translation prom_trans
[512] __read_mostly
;
468 unsigned int prom_trans_ents __read_mostly
;
470 /* Exported for SMP bootup purposes. */
471 unsigned long kern_locked_tte_data
;
473 /* The obp translations are saved based on 8k pagesize, since obp can
474 * use a mixture of pagesizes. Misses to the LOW_OBP_ADDRESS ->
475 * HI_OBP_ADDRESS range are handled in ktlb.S.
477 static inline int in_obp_range(unsigned long vaddr
)
479 return (vaddr
>= LOW_OBP_ADDRESS
&&
480 vaddr
< HI_OBP_ADDRESS
);
483 static int cmp_ptrans(const void *a
, const void *b
)
485 const struct linux_prom_translation
*x
= a
, *y
= b
;
487 if (x
->virt
> y
->virt
)
489 if (x
->virt
< y
->virt
)
494 /* Read OBP translations property into 'prom_trans[]'. */
495 static void __init
read_obp_translations(void)
497 int n
, node
, ents
, first
, last
, i
;
499 node
= prom_finddevice("/virtual-memory");
500 n
= prom_getproplen(node
, "translations");
501 if (unlikely(n
== 0 || n
== -1)) {
502 prom_printf("prom_mappings: Couldn't get size.\n");
505 if (unlikely(n
> sizeof(prom_trans
))) {
506 prom_printf("prom_mappings: Size %Zd is too big.\n", n
);
510 if ((n
= prom_getproperty(node
, "translations",
511 (char *)&prom_trans
[0],
512 sizeof(prom_trans
))) == -1) {
513 prom_printf("prom_mappings: Couldn't get property.\n");
517 n
= n
/ sizeof(struct linux_prom_translation
);
521 sort(prom_trans
, ents
, sizeof(struct linux_prom_translation
),
524 /* Now kick out all the non-OBP entries. */
525 for (i
= 0; i
< ents
; i
++) {
526 if (in_obp_range(prom_trans
[i
].virt
))
530 for (; i
< ents
; i
++) {
531 if (!in_obp_range(prom_trans
[i
].virt
))
536 for (i
= 0; i
< (last
- first
); i
++) {
537 struct linux_prom_translation
*src
= &prom_trans
[i
+ first
];
538 struct linux_prom_translation
*dest
= &prom_trans
[i
];
542 for (; i
< ents
; i
++) {
543 struct linux_prom_translation
*dest
= &prom_trans
[i
];
544 dest
->virt
= dest
->size
= dest
->data
= 0x0UL
;
547 prom_trans_ents
= last
- first
;
549 if (tlb_type
== spitfire
) {
550 /* Clear diag TTE bits. */
551 for (i
= 0; i
< prom_trans_ents
; i
++)
552 prom_trans
[i
].data
&= ~0x0003fe0000000000UL
;
556 static void __init
hypervisor_tlb_lock(unsigned long vaddr
,
560 register unsigned long func
asm("%o5");
561 register unsigned long arg0
asm("%o0");
562 register unsigned long arg1
asm("%o1");
563 register unsigned long arg2
asm("%o2");
564 register unsigned long arg3
asm("%o3");
566 func
= HV_FAST_MMU_MAP_PERM_ADDR
;
571 __asm__
__volatile__("ta 0x80"
572 : "=&r" (func
), "=&r" (arg0
),
573 "=&r" (arg1
), "=&r" (arg2
),
575 : "0" (func
), "1" (arg0
), "2" (arg1
),
576 "3" (arg2
), "4" (arg3
));
578 prom_printf("hypervisor_tlb_lock[%lx:%lx:%lx:%lx]: "
579 "errors with %lx\n", vaddr
, 0, pte
, mmu
, arg0
);
584 static unsigned long kern_large_tte(unsigned long paddr
);
586 static void __init
remap_kernel(void)
588 unsigned long phys_page
, tte_vaddr
, tte_data
;
589 int tlb_ent
= sparc64_highest_locked_tlbent();
591 tte_vaddr
= (unsigned long) KERNBASE
;
592 phys_page
= (prom_boot_mapping_phys_low
>> 22UL) << 22UL;
593 tte_data
= kern_large_tte(phys_page
);
595 kern_locked_tte_data
= tte_data
;
597 /* Now lock us into the TLBs via Hypervisor or OBP. */
598 if (tlb_type
== hypervisor
) {
599 hypervisor_tlb_lock(tte_vaddr
, tte_data
, HV_MMU_DMMU
);
600 hypervisor_tlb_lock(tte_vaddr
, tte_data
, HV_MMU_IMMU
);
602 tte_vaddr
+= 0x400000;
603 tte_data
+= 0x400000;
604 hypervisor_tlb_lock(tte_vaddr
, tte_data
, HV_MMU_DMMU
);
605 hypervisor_tlb_lock(tte_vaddr
, tte_data
, HV_MMU_IMMU
);
608 prom_dtlb_load(tlb_ent
, tte_data
, tte_vaddr
);
609 prom_itlb_load(tlb_ent
, tte_data
, tte_vaddr
);
612 prom_dtlb_load(tlb_ent
,
614 tte_vaddr
+ 0x400000);
615 prom_itlb_load(tlb_ent
,
617 tte_vaddr
+ 0x400000);
619 sparc64_highest_unlocked_tlb_ent
= tlb_ent
- 1;
621 if (tlb_type
== cheetah_plus
) {
622 sparc64_kern_pri_context
= (CTX_CHEETAH_PLUS_CTX0
|
623 CTX_CHEETAH_PLUS_NUC
);
624 sparc64_kern_pri_nuc_bits
= CTX_CHEETAH_PLUS_NUC
;
625 sparc64_kern_sec_context
= CTX_CHEETAH_PLUS_CTX0
;
630 static void __init
inherit_prom_mappings(void)
632 read_obp_translations();
634 /* Now fixup OBP's idea about where we really are mapped. */
635 prom_printf("Remapping the kernel... ");
637 prom_printf("done.\n");
640 void prom_world(int enter
)
643 set_fs((mm_segment_t
) { get_thread_current_ds() });
645 __asm__
__volatile__("flushw");
648 #ifdef DCACHE_ALIASING_POSSIBLE
649 void __flush_dcache_range(unsigned long start
, unsigned long end
)
653 if (tlb_type
== spitfire
) {
656 for (va
= start
; va
< end
; va
+= 32) {
657 spitfire_put_dcache_tag(va
& 0x3fe0, 0x0);
661 } else if (tlb_type
== cheetah
|| tlb_type
== cheetah_plus
) {
664 for (va
= start
; va
< end
; va
+= 32)
665 __asm__
__volatile__("stxa %%g0, [%0] %1\n\t"
669 "i" (ASI_DCACHE_INVALIDATE
));
672 #endif /* DCACHE_ALIASING_POSSIBLE */
674 /* get_new_mmu_context() uses "cache + 1". */
675 DEFINE_SPINLOCK(ctx_alloc_lock
);
676 unsigned long tlb_context_cache
= CTX_FIRST_VERSION
- 1;
677 #define MAX_CTX_NR (1UL << CTX_NR_BITS)
678 #define CTX_BMAP_SLOTS BITS_TO_LONGS(MAX_CTX_NR)
679 DECLARE_BITMAP(mmu_context_bmap
, MAX_CTX_NR
);
681 /* Caller does TLB context flushing on local CPU if necessary.
682 * The caller also ensures that CTX_VALID(mm->context) is false.
684 * We must be careful about boundary cases so that we never
685 * let the user have CTX 0 (nucleus) or we ever use a CTX
686 * version of zero (and thus NO_CONTEXT would not be caught
687 * by version mis-match tests in mmu_context.h).
689 * Always invoked with interrupts disabled.
691 void get_new_mmu_context(struct mm_struct
*mm
)
693 unsigned long ctx
, new_ctx
;
694 unsigned long orig_pgsz_bits
;
698 spin_lock_irqsave(&ctx_alloc_lock
, flags
);
699 orig_pgsz_bits
= (mm
->context
.sparc64_ctx_val
& CTX_PGSZ_MASK
);
700 ctx
= (tlb_context_cache
+ 1) & CTX_NR_MASK
;
701 new_ctx
= find_next_zero_bit(mmu_context_bmap
, 1 << CTX_NR_BITS
, ctx
);
703 if (new_ctx
>= (1 << CTX_NR_BITS
)) {
704 new_ctx
= find_next_zero_bit(mmu_context_bmap
, ctx
, 1);
705 if (new_ctx
>= ctx
) {
707 new_ctx
= (tlb_context_cache
& CTX_VERSION_MASK
) +
710 new_ctx
= CTX_FIRST_VERSION
;
712 /* Don't call memset, for 16 entries that's just
715 mmu_context_bmap
[0] = 3;
716 mmu_context_bmap
[1] = 0;
717 mmu_context_bmap
[2] = 0;
718 mmu_context_bmap
[3] = 0;
719 for (i
= 4; i
< CTX_BMAP_SLOTS
; i
+= 4) {
720 mmu_context_bmap
[i
+ 0] = 0;
721 mmu_context_bmap
[i
+ 1] = 0;
722 mmu_context_bmap
[i
+ 2] = 0;
723 mmu_context_bmap
[i
+ 3] = 0;
729 mmu_context_bmap
[new_ctx
>>6] |= (1UL << (new_ctx
& 63));
730 new_ctx
|= (tlb_context_cache
& CTX_VERSION_MASK
);
732 tlb_context_cache
= new_ctx
;
733 mm
->context
.sparc64_ctx_val
= new_ctx
| orig_pgsz_bits
;
734 spin_unlock_irqrestore(&ctx_alloc_lock
, flags
);
736 if (unlikely(new_version
))
737 smp_new_mmu_context_version();
740 /* Find a free area for the bootmem map, avoiding the kernel image
741 * and the initial ramdisk.
743 static unsigned long __init
choose_bootmap_pfn(unsigned long start_pfn
,
744 unsigned long end_pfn
)
746 unsigned long avoid_start
, avoid_end
, bootmap_size
;
749 bootmap_size
= bootmem_bootmap_pages(end_pfn
- start_pfn
);
750 bootmap_size
<<= PAGE_SHIFT
;
752 avoid_start
= avoid_end
= 0;
753 #ifdef CONFIG_BLK_DEV_INITRD
754 avoid_start
= initrd_start
;
755 avoid_end
= PAGE_ALIGN(initrd_end
);
758 #ifdef CONFIG_DEBUG_BOOTMEM
759 prom_printf("choose_bootmap_pfn: kern[%lx:%lx] avoid[%lx:%lx]\n",
760 kern_base
, PAGE_ALIGN(kern_base
+ kern_size
),
761 avoid_start
, avoid_end
);
763 for (i
= 0; i
< pavail_ents
; i
++) {
764 unsigned long start
, end
;
766 start
= pavail
[i
].phys_addr
;
767 end
= start
+ pavail
[i
].reg_size
;
769 while (start
< end
) {
770 if (start
>= kern_base
&&
771 start
< PAGE_ALIGN(kern_base
+ kern_size
)) {
772 start
= PAGE_ALIGN(kern_base
+ kern_size
);
775 if (start
>= avoid_start
&& start
< avoid_end
) {
780 if ((end
- start
) < bootmap_size
)
783 if (start
< kern_base
&&
784 (start
+ bootmap_size
) > kern_base
) {
785 start
= PAGE_ALIGN(kern_base
+ kern_size
);
789 if (start
< avoid_start
&&
790 (start
+ bootmap_size
) > avoid_start
) {
795 /* OK, it doesn't overlap anything, use it. */
796 #ifdef CONFIG_DEBUG_BOOTMEM
797 prom_printf("choose_bootmap_pfn: Using %lx [%lx]\n",
798 start
>> PAGE_SHIFT
, start
);
800 return start
>> PAGE_SHIFT
;
804 prom_printf("Cannot find free area for bootmap, aborting.\n");
808 static void __init
trim_pavail(unsigned long *cur_size_p
,
809 unsigned long *end_of_phys_p
)
811 unsigned long to_trim
= *cur_size_p
- cmdline_memory_size
;
812 unsigned long avoid_start
, avoid_end
;
815 to_trim
= PAGE_ALIGN(to_trim
);
817 avoid_start
= avoid_end
= 0;
818 #ifdef CONFIG_BLK_DEV_INITRD
819 avoid_start
= initrd_start
;
820 avoid_end
= PAGE_ALIGN(initrd_end
);
823 /* Trim some pavail[] entries in order to satisfy the
824 * requested "mem=xxx" kernel command line specification.
826 * We must not trim off the kernel image area nor the
827 * initial ramdisk range (if any). Also, we must not trim
828 * any pavail[] entry down to zero in order to preserve
829 * the invariant that all pavail[] entries have a non-zero
830 * size which is assumed by all of the code in here.
832 for (i
= 0; i
< pavail_ents
; i
++) {
833 unsigned long start
, end
, kern_end
;
834 unsigned long trim_low
, trim_high
, n
;
836 kern_end
= PAGE_ALIGN(kern_base
+ kern_size
);
838 trim_low
= start
= pavail
[i
].phys_addr
;
839 trim_high
= end
= start
+ pavail
[i
].reg_size
;
841 if (kern_base
>= start
&&
843 trim_low
= kern_base
;
847 if (kern_end
>= start
&&
849 trim_high
= kern_end
;
852 avoid_start
>= start
&&
854 if (trim_low
> avoid_start
)
855 trim_low
= avoid_start
;
856 if (avoid_end
>= end
)
860 avoid_end
>= start
&&
862 if (trim_high
< avoid_end
)
863 trim_high
= avoid_end
;
866 if (trim_high
<= trim_low
)
869 if (trim_low
== start
&& trim_high
== end
) {
870 /* Whole chunk is available for trimming.
871 * Trim all except one page, in order to keep
874 n
= (end
- start
) - PAGE_SIZE
;
879 pavail
[i
].phys_addr
+= n
;
880 pavail
[i
].reg_size
-= n
;
884 n
= (trim_low
- start
);
889 pavail
[i
].phys_addr
+= n
;
890 pavail
[i
].reg_size
-= n
;
898 pavail
[i
].reg_size
-= n
;
910 for (i
= 0; i
< pavail_ents
; i
++) {
911 *end_of_phys_p
= pavail
[i
].phys_addr
+
913 *cur_size_p
+= pavail
[i
].reg_size
;
917 /* About pages_avail, this is the value we will use to calculate
918 * the zholes_size[] argument given to free_area_init_node(). The
919 * page allocator uses this to calculate nr_kernel_pages,
920 * nr_all_pages and zone->present_pages. On NUMA it is used
921 * to calculate zone->min_unmapped_pages and zone->min_slab_pages.
923 * So this number should really be set to what the page allocator
924 * actually ends up with. This means:
925 * 1) It should include bootmem map pages, we'll release those.
926 * 2) It should not include the kernel image, except for the
927 * __init sections which we will also release.
928 * 3) It should include the initrd image, since we'll release
931 static unsigned long __init
bootmem_init(unsigned long *pages_avail
,
932 unsigned long phys_base
)
934 unsigned long bootmap_size
, end_pfn
;
935 unsigned long end_of_phys_memory
= 0UL;
936 unsigned long bootmap_pfn
, bytes_avail
, size
;
939 #ifdef CONFIG_DEBUG_BOOTMEM
940 prom_printf("bootmem_init: Scan pavail, ");
944 for (i
= 0; i
< pavail_ents
; i
++) {
945 end_of_phys_memory
= pavail
[i
].phys_addr
+
947 bytes_avail
+= pavail
[i
].reg_size
;
950 /* Determine the location of the initial ramdisk before trying
951 * to honor the "mem=xxx" command line argument. We must know
952 * where the kernel image and the ramdisk image are so that we
953 * do not trim those two areas from the physical memory map.
956 #ifdef CONFIG_BLK_DEV_INITRD
957 /* Now have to check initial ramdisk, so that bootmap does not overwrite it */
958 if (sparc_ramdisk_image
|| sparc_ramdisk_image64
) {
959 unsigned long ramdisk_image
= sparc_ramdisk_image
?
960 sparc_ramdisk_image
: sparc_ramdisk_image64
;
961 ramdisk_image
-= KERNBASE
;
962 initrd_start
= ramdisk_image
+ phys_base
;
963 initrd_end
= initrd_start
+ sparc_ramdisk_size
;
964 if (initrd_end
> end_of_phys_memory
) {
965 printk(KERN_CRIT
"initrd extends beyond end of memory "
966 "(0x%016lx > 0x%016lx)\ndisabling initrd\n",
967 initrd_end
, end_of_phys_memory
);
974 if (cmdline_memory_size
&&
975 bytes_avail
> cmdline_memory_size
)
976 trim_pavail(&bytes_avail
,
977 &end_of_phys_memory
);
979 *pages_avail
= bytes_avail
>> PAGE_SHIFT
;
981 end_pfn
= end_of_phys_memory
>> PAGE_SHIFT
;
983 /* Initialize the boot-time allocator. */
984 max_pfn
= max_low_pfn
= end_pfn
;
985 min_low_pfn
= (phys_base
>> PAGE_SHIFT
);
987 bootmap_pfn
= choose_bootmap_pfn(min_low_pfn
, end_pfn
);
989 #ifdef CONFIG_DEBUG_BOOTMEM
990 prom_printf("init_bootmem(min[%lx], bootmap[%lx], max[%lx])\n",
991 min_low_pfn
, bootmap_pfn
, max_low_pfn
);
993 bootmap_size
= init_bootmem_node(NODE_DATA(0), bootmap_pfn
,
994 min_low_pfn
, end_pfn
);
996 /* Now register the available physical memory with the
999 for (i
= 0; i
< pavail_ents
; i
++) {
1000 #ifdef CONFIG_DEBUG_BOOTMEM
1001 prom_printf("free_bootmem(pavail:%d): base[%lx] size[%lx]\n",
1002 i
, pavail
[i
].phys_addr
, pavail
[i
].reg_size
);
1004 free_bootmem(pavail
[i
].phys_addr
, pavail
[i
].reg_size
);
1007 #ifdef CONFIG_BLK_DEV_INITRD
1009 size
= initrd_end
- initrd_start
;
1011 /* Resert the initrd image area. */
1012 #ifdef CONFIG_DEBUG_BOOTMEM
1013 prom_printf("reserve_bootmem(initrd): base[%llx] size[%lx]\n",
1014 initrd_start
, initrd_end
);
1016 reserve_bootmem(initrd_start
, size
);
1018 initrd_start
+= PAGE_OFFSET
;
1019 initrd_end
+= PAGE_OFFSET
;
1022 /* Reserve the kernel text/data/bss. */
1023 #ifdef CONFIG_DEBUG_BOOTMEM
1024 prom_printf("reserve_bootmem(kernel): base[%lx] size[%lx]\n", kern_base
, kern_size
);
1026 reserve_bootmem(kern_base
, kern_size
);
1027 *pages_avail
-= PAGE_ALIGN(kern_size
) >> PAGE_SHIFT
;
1029 /* Add back in the initmem pages. */
1030 size
= ((unsigned long)(__init_end
) & PAGE_MASK
) -
1031 PAGE_ALIGN((unsigned long)__init_begin
);
1032 *pages_avail
+= size
>> PAGE_SHIFT
;
1034 /* Reserve the bootmem map. We do not account for it
1035 * in pages_avail because we will release that memory
1036 * in free_all_bootmem.
1038 size
= bootmap_size
;
1039 #ifdef CONFIG_DEBUG_BOOTMEM
1040 prom_printf("reserve_bootmem(bootmap): base[%lx] size[%lx]\n",
1041 (bootmap_pfn
<< PAGE_SHIFT
), size
);
1043 reserve_bootmem((bootmap_pfn
<< PAGE_SHIFT
), size
);
1045 for (i
= 0; i
< pavail_ents
; i
++) {
1046 unsigned long start_pfn
, end_pfn
;
1048 start_pfn
= pavail
[i
].phys_addr
>> PAGE_SHIFT
;
1049 end_pfn
= (start_pfn
+ (pavail
[i
].reg_size
>> PAGE_SHIFT
));
1050 #ifdef CONFIG_DEBUG_BOOTMEM
1051 prom_printf("memory_present(0, %lx, %lx)\n",
1052 start_pfn
, end_pfn
);
1054 memory_present(0, start_pfn
, end_pfn
);
1062 static struct linux_prom64_registers pall
[MAX_BANKS
] __initdata
;
1063 static int pall_ents __initdata
;
1065 #ifdef CONFIG_DEBUG_PAGEALLOC
1066 static unsigned long kernel_map_range(unsigned long pstart
, unsigned long pend
, pgprot_t prot
)
1068 unsigned long vstart
= PAGE_OFFSET
+ pstart
;
1069 unsigned long vend
= PAGE_OFFSET
+ pend
;
1070 unsigned long alloc_bytes
= 0UL;
1072 if ((vstart
& ~PAGE_MASK
) || (vend
& ~PAGE_MASK
)) {
1073 prom_printf("kernel_map: Unaligned physmem[%lx:%lx]\n",
1078 while (vstart
< vend
) {
1079 unsigned long this_end
, paddr
= __pa(vstart
);
1080 pgd_t
*pgd
= pgd_offset_k(vstart
);
1085 pud
= pud_offset(pgd
, vstart
);
1086 if (pud_none(*pud
)) {
1089 new = __alloc_bootmem(PAGE_SIZE
, PAGE_SIZE
, PAGE_SIZE
);
1090 alloc_bytes
+= PAGE_SIZE
;
1091 pud_populate(&init_mm
, pud
, new);
1094 pmd
= pmd_offset(pud
, vstart
);
1095 if (!pmd_present(*pmd
)) {
1098 new = __alloc_bootmem(PAGE_SIZE
, PAGE_SIZE
, PAGE_SIZE
);
1099 alloc_bytes
+= PAGE_SIZE
;
1100 pmd_populate_kernel(&init_mm
, pmd
, new);
1103 pte
= pte_offset_kernel(pmd
, vstart
);
1104 this_end
= (vstart
+ PMD_SIZE
) & PMD_MASK
;
1105 if (this_end
> vend
)
1108 while (vstart
< this_end
) {
1109 pte_val(*pte
) = (paddr
| pgprot_val(prot
));
1111 vstart
+= PAGE_SIZE
;
1120 extern unsigned int kvmap_linear_patch
[1];
1121 #endif /* CONFIG_DEBUG_PAGEALLOC */
1123 static void __init
mark_kpte_bitmap(unsigned long start
, unsigned long end
)
1125 const unsigned long shift_256MB
= 28;
1126 const unsigned long mask_256MB
= ((1UL << shift_256MB
) - 1UL);
1127 const unsigned long size_256MB
= (1UL << shift_256MB
);
1129 while (start
< end
) {
1132 remains
= end
- start
;
1133 if (remains
< size_256MB
)
1136 if (start
& mask_256MB
) {
1137 start
= (start
+ size_256MB
) & ~mask_256MB
;
1141 while (remains
>= size_256MB
) {
1142 unsigned long index
= start
>> shift_256MB
;
1144 __set_bit(index
, kpte_linear_bitmap
);
1146 start
+= size_256MB
;
1147 remains
-= size_256MB
;
1152 static void __init
kernel_physical_mapping_init(void)
1155 #ifdef CONFIG_DEBUG_PAGEALLOC
1156 unsigned long mem_alloced
= 0UL;
1159 read_obp_memory("reg", &pall
[0], &pall_ents
);
1161 for (i
= 0; i
< pall_ents
; i
++) {
1162 unsigned long phys_start
, phys_end
;
1164 phys_start
= pall
[i
].phys_addr
;
1165 phys_end
= phys_start
+ pall
[i
].reg_size
;
1167 mark_kpte_bitmap(phys_start
, phys_end
);
1169 #ifdef CONFIG_DEBUG_PAGEALLOC
1170 mem_alloced
+= kernel_map_range(phys_start
, phys_end
,
1175 #ifdef CONFIG_DEBUG_PAGEALLOC
1176 printk("Allocated %ld bytes for kernel page tables.\n",
1179 kvmap_linear_patch
[0] = 0x01000000; /* nop */
1180 flushi(&kvmap_linear_patch
[0]);
1186 #ifdef CONFIG_DEBUG_PAGEALLOC
1187 void kernel_map_pages(struct page
*page
, int numpages
, int enable
)
1189 unsigned long phys_start
= page_to_pfn(page
) << PAGE_SHIFT
;
1190 unsigned long phys_end
= phys_start
+ (numpages
* PAGE_SIZE
);
1192 kernel_map_range(phys_start
, phys_end
,
1193 (enable
? PAGE_KERNEL
: __pgprot(0)));
1195 flush_tsb_kernel_range(PAGE_OFFSET
+ phys_start
,
1196 PAGE_OFFSET
+ phys_end
);
1198 /* we should perform an IPI and flush all tlbs,
1199 * but that can deadlock->flush only current cpu.
1201 __flush_tlb_kernel_range(PAGE_OFFSET
+ phys_start
,
1202 PAGE_OFFSET
+ phys_end
);
1206 unsigned long __init
find_ecache_flush_span(unsigned long size
)
1210 for (i
= 0; i
< pavail_ents
; i
++) {
1211 if (pavail
[i
].reg_size
>= size
)
1212 return pavail
[i
].phys_addr
;
1218 static void __init
tsb_phys_patch(void)
1220 struct tsb_ldquad_phys_patch_entry
*pquad
;
1221 struct tsb_phys_patch_entry
*p
;
1223 pquad
= &__tsb_ldquad_phys_patch
;
1224 while (pquad
< &__tsb_ldquad_phys_patch_end
) {
1225 unsigned long addr
= pquad
->addr
;
1227 if (tlb_type
== hypervisor
)
1228 *(unsigned int *) addr
= pquad
->sun4v_insn
;
1230 *(unsigned int *) addr
= pquad
->sun4u_insn
;
1232 __asm__
__volatile__("flush %0"
1239 p
= &__tsb_phys_patch
;
1240 while (p
< &__tsb_phys_patch_end
) {
1241 unsigned long addr
= p
->addr
;
1243 *(unsigned int *) addr
= p
->insn
;
1245 __asm__
__volatile__("flush %0"
1253 /* Don't mark as init, we give this to the Hypervisor. */
1254 #ifndef CONFIG_DEBUG_PAGEALLOC
1255 #define NUM_KTSB_DESCR 2
1257 #define NUM_KTSB_DESCR 1
1259 static struct hv_tsb_descr ktsb_descr
[NUM_KTSB_DESCR
];
1260 extern struct tsb swapper_tsb
[KERNEL_TSB_NENTRIES
];
1262 static void __init
sun4v_ktsb_init(void)
1264 unsigned long ktsb_pa
;
1266 /* First KTSB for PAGE_SIZE mappings. */
1267 ktsb_pa
= kern_base
+ ((unsigned long)&swapper_tsb
[0] - KERNBASE
);
1269 switch (PAGE_SIZE
) {
1272 ktsb_descr
[0].pgsz_idx
= HV_PGSZ_IDX_8K
;
1273 ktsb_descr
[0].pgsz_mask
= HV_PGSZ_MASK_8K
;
1277 ktsb_descr
[0].pgsz_idx
= HV_PGSZ_IDX_64K
;
1278 ktsb_descr
[0].pgsz_mask
= HV_PGSZ_MASK_64K
;
1282 ktsb_descr
[0].pgsz_idx
= HV_PGSZ_IDX_512K
;
1283 ktsb_descr
[0].pgsz_mask
= HV_PGSZ_MASK_512K
;
1286 case 4 * 1024 * 1024:
1287 ktsb_descr
[0].pgsz_idx
= HV_PGSZ_IDX_4MB
;
1288 ktsb_descr
[0].pgsz_mask
= HV_PGSZ_MASK_4MB
;
1292 ktsb_descr
[0].assoc
= 1;
1293 ktsb_descr
[0].num_ttes
= KERNEL_TSB_NENTRIES
;
1294 ktsb_descr
[0].ctx_idx
= 0;
1295 ktsb_descr
[0].tsb_base
= ktsb_pa
;
1296 ktsb_descr
[0].resv
= 0;
1298 #ifndef CONFIG_DEBUG_PAGEALLOC
1299 /* Second KTSB for 4MB/256MB mappings. */
1300 ktsb_pa
= (kern_base
+
1301 ((unsigned long)&swapper_4m_tsb
[0] - KERNBASE
));
1303 ktsb_descr
[1].pgsz_idx
= HV_PGSZ_IDX_4MB
;
1304 ktsb_descr
[1].pgsz_mask
= (HV_PGSZ_MASK_4MB
|
1305 HV_PGSZ_MASK_256MB
);
1306 ktsb_descr
[1].assoc
= 1;
1307 ktsb_descr
[1].num_ttes
= KERNEL_TSB4M_NENTRIES
;
1308 ktsb_descr
[1].ctx_idx
= 0;
1309 ktsb_descr
[1].tsb_base
= ktsb_pa
;
1310 ktsb_descr
[1].resv
= 0;
1314 void __cpuinit
sun4v_ktsb_register(void)
1316 register unsigned long func
asm("%o5");
1317 register unsigned long arg0
asm("%o0");
1318 register unsigned long arg1
asm("%o1");
1321 pa
= kern_base
+ ((unsigned long)&ktsb_descr
[0] - KERNBASE
);
1323 func
= HV_FAST_MMU_TSB_CTX0
;
1324 arg0
= NUM_KTSB_DESCR
;
1326 __asm__
__volatile__("ta %6"
1327 : "=&r" (func
), "=&r" (arg0
), "=&r" (arg1
)
1328 : "0" (func
), "1" (arg0
), "2" (arg1
),
1329 "i" (HV_FAST_TRAP
));
1332 /* paging_init() sets up the page tables */
1334 extern void cheetah_ecache_flush_init(void);
1335 extern void sun4v_patch_tlb_handlers(void);
1337 static unsigned long last_valid_pfn
;
1338 pgd_t swapper_pg_dir
[2048];
1340 static void sun4u_pgprot_init(void);
1341 static void sun4v_pgprot_init(void);
1343 void __init
paging_init(void)
1345 unsigned long end_pfn
, pages_avail
, shift
, phys_base
;
1346 unsigned long real_end
, i
;
1348 kern_base
= (prom_boot_mapping_phys_low
>> 22UL) << 22UL;
1349 kern_size
= (unsigned long)&_end
- (unsigned long)KERNBASE
;
1351 /* Invalidate both kernel TSBs. */
1352 memset(swapper_tsb
, 0x40, sizeof(swapper_tsb
));
1353 #ifndef CONFIG_DEBUG_PAGEALLOC
1354 memset(swapper_4m_tsb
, 0x40, sizeof(swapper_4m_tsb
));
1357 if (tlb_type
== hypervisor
)
1358 sun4v_pgprot_init();
1360 sun4u_pgprot_init();
1362 if (tlb_type
== cheetah_plus
||
1363 tlb_type
== hypervisor
)
1366 if (tlb_type
== hypervisor
) {
1367 sun4v_patch_tlb_handlers();
1371 /* Find available physical memory... */
1372 read_obp_memory("available", &pavail
[0], &pavail_ents
);
1374 phys_base
= 0xffffffffffffffffUL
;
1375 for (i
= 0; i
< pavail_ents
; i
++)
1376 phys_base
= min(phys_base
, pavail
[i
].phys_addr
);
1378 set_bit(0, mmu_context_bmap
);
1380 shift
= kern_base
+ PAGE_OFFSET
- ((unsigned long)KERNBASE
);
1382 real_end
= (unsigned long)_end
;
1383 if ((real_end
> ((unsigned long)KERNBASE
+ 0x400000)))
1385 if ((real_end
> ((unsigned long)KERNBASE
+ 0x800000))) {
1386 prom_printf("paging_init: Kernel > 8MB, too large.\n");
1390 /* Set kernel pgd to upper alias so physical page computations
1393 init_mm
.pgd
+= ((shift
) / (sizeof(pgd_t
)));
1395 memset(swapper_low_pmd_dir
, 0, sizeof(swapper_low_pmd_dir
));
1397 /* Now can init the kernel/bad page tables. */
1398 pud_set(pud_offset(&swapper_pg_dir
[0], 0),
1399 swapper_low_pmd_dir
+ (shift
/ sizeof(pgd_t
)));
1401 inherit_prom_mappings();
1403 /* Ok, we can use our TLB miss and window trap handlers safely. */
1408 if (tlb_type
== hypervisor
)
1409 sun4v_ktsb_register();
1411 /* Setup bootmem... */
1413 last_valid_pfn
= end_pfn
= bootmem_init(&pages_avail
, phys_base
);
1415 max_mapnr
= last_valid_pfn
;
1417 kernel_physical_mapping_init();
1419 prom_build_devicetree();
1422 unsigned long zones_size
[MAX_NR_ZONES
];
1423 unsigned long zholes_size
[MAX_NR_ZONES
];
1426 for (znum
= 0; znum
< MAX_NR_ZONES
; znum
++)
1427 zones_size
[znum
] = zholes_size
[znum
] = 0;
1429 zones_size
[ZONE_NORMAL
] = end_pfn
;
1430 zholes_size
[ZONE_NORMAL
] = end_pfn
- pages_avail
;
1432 free_area_init_node(0, &contig_page_data
, zones_size
,
1433 __pa(PAGE_OFFSET
) >> PAGE_SHIFT
,
1440 static void __init
taint_real_pages(void)
1444 read_obp_memory("available", &pavail_rescan
[0], &pavail_rescan_ents
);
1446 /* Find changes discovered in the physmem available rescan and
1447 * reserve the lost portions in the bootmem maps.
1449 for (i
= 0; i
< pavail_ents
; i
++) {
1450 unsigned long old_start
, old_end
;
1452 old_start
= pavail
[i
].phys_addr
;
1453 old_end
= old_start
+
1455 while (old_start
< old_end
) {
1458 for (n
= 0; n
< pavail_rescan_ents
; n
++) {
1459 unsigned long new_start
, new_end
;
1461 new_start
= pavail_rescan
[n
].phys_addr
;
1462 new_end
= new_start
+
1463 pavail_rescan
[n
].reg_size
;
1465 if (new_start
<= old_start
&&
1466 new_end
>= (old_start
+ PAGE_SIZE
)) {
1467 set_bit(old_start
>> 22,
1468 sparc64_valid_addr_bitmap
);
1472 reserve_bootmem(old_start
, PAGE_SIZE
);
1475 old_start
+= PAGE_SIZE
;
1480 int __init
page_in_phys_avail(unsigned long paddr
)
1486 for (i
= 0; i
< pavail_rescan_ents
; i
++) {
1487 unsigned long start
, end
;
1489 start
= pavail_rescan
[i
].phys_addr
;
1490 end
= start
+ pavail_rescan
[i
].reg_size
;
1492 if (paddr
>= start
&& paddr
< end
)
1495 if (paddr
>= kern_base
&& paddr
< (kern_base
+ kern_size
))
1497 #ifdef CONFIG_BLK_DEV_INITRD
1498 if (paddr
>= __pa(initrd_start
) &&
1499 paddr
< __pa(PAGE_ALIGN(initrd_end
)))
1506 void __init
mem_init(void)
1508 unsigned long codepages
, datapages
, initpages
;
1509 unsigned long addr
, last
;
1512 i
= last_valid_pfn
>> ((22 - PAGE_SHIFT
) + 6);
1514 sparc64_valid_addr_bitmap
= (unsigned long *) alloc_bootmem(i
<< 3);
1515 if (sparc64_valid_addr_bitmap
== NULL
) {
1516 prom_printf("mem_init: Cannot alloc valid_addr_bitmap.\n");
1519 memset(sparc64_valid_addr_bitmap
, 0, i
<< 3);
1521 addr
= PAGE_OFFSET
+ kern_base
;
1522 last
= PAGE_ALIGN(kern_size
) + addr
;
1523 while (addr
< last
) {
1524 set_bit(__pa(addr
) >> 22, sparc64_valid_addr_bitmap
);
1530 high_memory
= __va(last_valid_pfn
<< PAGE_SHIFT
);
1532 #ifdef CONFIG_DEBUG_BOOTMEM
1533 prom_printf("mem_init: Calling free_all_bootmem().\n");
1536 /* We subtract one to account for the mem_map_zero page
1539 totalram_pages
= num_physpages
= free_all_bootmem() - 1;
1542 * Set up the zero page, mark it reserved, so that page count
1543 * is not manipulated when freeing the page from user ptes.
1545 mem_map_zero
= alloc_pages(GFP_KERNEL
|__GFP_ZERO
, 0);
1546 if (mem_map_zero
== NULL
) {
1547 prom_printf("paging_init: Cannot alloc zero page.\n");
1550 SetPageReserved(mem_map_zero
);
1552 codepages
= (((unsigned long) _etext
) - ((unsigned long) _start
));
1553 codepages
= PAGE_ALIGN(codepages
) >> PAGE_SHIFT
;
1554 datapages
= (((unsigned long) _edata
) - ((unsigned long) _etext
));
1555 datapages
= PAGE_ALIGN(datapages
) >> PAGE_SHIFT
;
1556 initpages
= (((unsigned long) __init_end
) - ((unsigned long) __init_begin
));
1557 initpages
= PAGE_ALIGN(initpages
) >> PAGE_SHIFT
;
1559 printk("Memory: %luk available (%ldk kernel code, %ldk data, %ldk init) [%016lx,%016lx]\n",
1560 nr_free_pages() << (PAGE_SHIFT
-10),
1561 codepages
<< (PAGE_SHIFT
-10),
1562 datapages
<< (PAGE_SHIFT
-10),
1563 initpages
<< (PAGE_SHIFT
-10),
1564 PAGE_OFFSET
, (last_valid_pfn
<< PAGE_SHIFT
));
1566 if (tlb_type
== cheetah
|| tlb_type
== cheetah_plus
)
1567 cheetah_ecache_flush_init();
1570 void free_initmem(void)
1572 unsigned long addr
, initend
;
1575 * The init section is aligned to 8k in vmlinux.lds. Page align for >8k pagesizes.
1577 addr
= PAGE_ALIGN((unsigned long)(__init_begin
));
1578 initend
= (unsigned long)(__init_end
) & PAGE_MASK
;
1579 for (; addr
< initend
; addr
+= PAGE_SIZE
) {
1584 ((unsigned long) __va(kern_base
)) -
1585 ((unsigned long) KERNBASE
));
1586 memset((void *)addr
, POISON_FREE_INITMEM
, PAGE_SIZE
);
1587 p
= virt_to_page(page
);
1589 ClearPageReserved(p
);
1597 #ifdef CONFIG_BLK_DEV_INITRD
1598 void free_initrd_mem(unsigned long start
, unsigned long end
)
1601 printk ("Freeing initrd memory: %ldk freed\n", (end
- start
) >> 10);
1602 for (; start
< end
; start
+= PAGE_SIZE
) {
1603 struct page
*p
= virt_to_page(start
);
1605 ClearPageReserved(p
);
1614 #define _PAGE_CACHE_4U (_PAGE_CP_4U | _PAGE_CV_4U)
1615 #define _PAGE_CACHE_4V (_PAGE_CP_4V | _PAGE_CV_4V)
1616 #define __DIRTY_BITS_4U (_PAGE_MODIFIED_4U | _PAGE_WRITE_4U | _PAGE_W_4U)
1617 #define __DIRTY_BITS_4V (_PAGE_MODIFIED_4V | _PAGE_WRITE_4V | _PAGE_W_4V)
1618 #define __ACCESS_BITS_4U (_PAGE_ACCESSED_4U | _PAGE_READ_4U | _PAGE_R)
1619 #define __ACCESS_BITS_4V (_PAGE_ACCESSED_4V | _PAGE_READ_4V | _PAGE_R)
1621 pgprot_t PAGE_KERNEL __read_mostly
;
1622 EXPORT_SYMBOL(PAGE_KERNEL
);
1624 pgprot_t PAGE_KERNEL_LOCKED __read_mostly
;
1625 pgprot_t PAGE_COPY __read_mostly
;
1627 pgprot_t PAGE_SHARED __read_mostly
;
1628 EXPORT_SYMBOL(PAGE_SHARED
);
1630 pgprot_t PAGE_EXEC __read_mostly
;
1631 unsigned long pg_iobits __read_mostly
;
1633 unsigned long _PAGE_IE __read_mostly
;
1634 EXPORT_SYMBOL(_PAGE_IE
);
1636 unsigned long _PAGE_E __read_mostly
;
1637 EXPORT_SYMBOL(_PAGE_E
);
1639 unsigned long _PAGE_CACHE __read_mostly
;
1640 EXPORT_SYMBOL(_PAGE_CACHE
);
1642 static void prot_init_common(unsigned long page_none
,
1643 unsigned long page_shared
,
1644 unsigned long page_copy
,
1645 unsigned long page_readonly
,
1646 unsigned long page_exec_bit
)
1648 PAGE_COPY
= __pgprot(page_copy
);
1649 PAGE_SHARED
= __pgprot(page_shared
);
1651 protection_map
[0x0] = __pgprot(page_none
);
1652 protection_map
[0x1] = __pgprot(page_readonly
& ~page_exec_bit
);
1653 protection_map
[0x2] = __pgprot(page_copy
& ~page_exec_bit
);
1654 protection_map
[0x3] = __pgprot(page_copy
& ~page_exec_bit
);
1655 protection_map
[0x4] = __pgprot(page_readonly
);
1656 protection_map
[0x5] = __pgprot(page_readonly
);
1657 protection_map
[0x6] = __pgprot(page_copy
);
1658 protection_map
[0x7] = __pgprot(page_copy
);
1659 protection_map
[0x8] = __pgprot(page_none
);
1660 protection_map
[0x9] = __pgprot(page_readonly
& ~page_exec_bit
);
1661 protection_map
[0xa] = __pgprot(page_shared
& ~page_exec_bit
);
1662 protection_map
[0xb] = __pgprot(page_shared
& ~page_exec_bit
);
1663 protection_map
[0xc] = __pgprot(page_readonly
);
1664 protection_map
[0xd] = __pgprot(page_readonly
);
1665 protection_map
[0xe] = __pgprot(page_shared
);
1666 protection_map
[0xf] = __pgprot(page_shared
);
1669 static void __init
sun4u_pgprot_init(void)
1671 unsigned long page_none
, page_shared
, page_copy
, page_readonly
;
1672 unsigned long page_exec_bit
;
1674 PAGE_KERNEL
= __pgprot (_PAGE_PRESENT_4U
| _PAGE_VALID
|
1675 _PAGE_CACHE_4U
| _PAGE_P_4U
|
1676 __ACCESS_BITS_4U
| __DIRTY_BITS_4U
|
1678 PAGE_KERNEL_LOCKED
= __pgprot (_PAGE_PRESENT_4U
| _PAGE_VALID
|
1679 _PAGE_CACHE_4U
| _PAGE_P_4U
|
1680 __ACCESS_BITS_4U
| __DIRTY_BITS_4U
|
1681 _PAGE_EXEC_4U
| _PAGE_L_4U
);
1682 PAGE_EXEC
= __pgprot(_PAGE_EXEC_4U
);
1684 _PAGE_IE
= _PAGE_IE_4U
;
1685 _PAGE_E
= _PAGE_E_4U
;
1686 _PAGE_CACHE
= _PAGE_CACHE_4U
;
1688 pg_iobits
= (_PAGE_VALID
| _PAGE_PRESENT_4U
| __DIRTY_BITS_4U
|
1689 __ACCESS_BITS_4U
| _PAGE_E_4U
);
1691 #ifdef CONFIG_DEBUG_PAGEALLOC
1692 kern_linear_pte_xor
[0] = (_PAGE_VALID
| _PAGE_SZBITS_4U
) ^
1695 kern_linear_pte_xor
[0] = (_PAGE_VALID
| _PAGE_SZ4MB_4U
) ^
1698 kern_linear_pte_xor
[0] |= (_PAGE_CP_4U
| _PAGE_CV_4U
|
1699 _PAGE_P_4U
| _PAGE_W_4U
);
1701 /* XXX Should use 256MB on Panther. XXX */
1702 kern_linear_pte_xor
[1] = kern_linear_pte_xor
[0];
1704 _PAGE_SZBITS
= _PAGE_SZBITS_4U
;
1705 _PAGE_ALL_SZ_BITS
= (_PAGE_SZ4MB_4U
| _PAGE_SZ512K_4U
|
1706 _PAGE_SZ64K_4U
| _PAGE_SZ8K_4U
|
1707 _PAGE_SZ32MB_4U
| _PAGE_SZ256MB_4U
);
1710 page_none
= _PAGE_PRESENT_4U
| _PAGE_ACCESSED_4U
| _PAGE_CACHE_4U
;
1711 page_shared
= (_PAGE_VALID
| _PAGE_PRESENT_4U
| _PAGE_CACHE_4U
|
1712 __ACCESS_BITS_4U
| _PAGE_WRITE_4U
| _PAGE_EXEC_4U
);
1713 page_copy
= (_PAGE_VALID
| _PAGE_PRESENT_4U
| _PAGE_CACHE_4U
|
1714 __ACCESS_BITS_4U
| _PAGE_EXEC_4U
);
1715 page_readonly
= (_PAGE_VALID
| _PAGE_PRESENT_4U
| _PAGE_CACHE_4U
|
1716 __ACCESS_BITS_4U
| _PAGE_EXEC_4U
);
1718 page_exec_bit
= _PAGE_EXEC_4U
;
1720 prot_init_common(page_none
, page_shared
, page_copy
, page_readonly
,
1724 static void __init
sun4v_pgprot_init(void)
1726 unsigned long page_none
, page_shared
, page_copy
, page_readonly
;
1727 unsigned long page_exec_bit
;
1729 PAGE_KERNEL
= __pgprot (_PAGE_PRESENT_4V
| _PAGE_VALID
|
1730 _PAGE_CACHE_4V
| _PAGE_P_4V
|
1731 __ACCESS_BITS_4V
| __DIRTY_BITS_4V
|
1733 PAGE_KERNEL_LOCKED
= PAGE_KERNEL
;
1734 PAGE_EXEC
= __pgprot(_PAGE_EXEC_4V
);
1736 _PAGE_IE
= _PAGE_IE_4V
;
1737 _PAGE_E
= _PAGE_E_4V
;
1738 _PAGE_CACHE
= _PAGE_CACHE_4V
;
1740 #ifdef CONFIG_DEBUG_PAGEALLOC
1741 kern_linear_pte_xor
[0] = (_PAGE_VALID
| _PAGE_SZBITS_4V
) ^
1744 kern_linear_pte_xor
[0] = (_PAGE_VALID
| _PAGE_SZ4MB_4V
) ^
1747 kern_linear_pte_xor
[0] |= (_PAGE_CP_4V
| _PAGE_CV_4V
|
1748 _PAGE_P_4V
| _PAGE_W_4V
);
1750 #ifdef CONFIG_DEBUG_PAGEALLOC
1751 kern_linear_pte_xor
[1] = (_PAGE_VALID
| _PAGE_SZBITS_4V
) ^
1754 kern_linear_pte_xor
[1] = (_PAGE_VALID
| _PAGE_SZ256MB_4V
) ^
1757 kern_linear_pte_xor
[1] |= (_PAGE_CP_4V
| _PAGE_CV_4V
|
1758 _PAGE_P_4V
| _PAGE_W_4V
);
1760 pg_iobits
= (_PAGE_VALID
| _PAGE_PRESENT_4V
| __DIRTY_BITS_4V
|
1761 __ACCESS_BITS_4V
| _PAGE_E_4V
);
1763 _PAGE_SZBITS
= _PAGE_SZBITS_4V
;
1764 _PAGE_ALL_SZ_BITS
= (_PAGE_SZ16GB_4V
| _PAGE_SZ2GB_4V
|
1765 _PAGE_SZ256MB_4V
| _PAGE_SZ32MB_4V
|
1766 _PAGE_SZ4MB_4V
| _PAGE_SZ512K_4V
|
1767 _PAGE_SZ64K_4V
| _PAGE_SZ8K_4V
);
1769 page_none
= _PAGE_PRESENT_4V
| _PAGE_ACCESSED_4V
| _PAGE_CACHE_4V
;
1770 page_shared
= (_PAGE_VALID
| _PAGE_PRESENT_4V
| _PAGE_CACHE_4V
|
1771 __ACCESS_BITS_4V
| _PAGE_WRITE_4V
| _PAGE_EXEC_4V
);
1772 page_copy
= (_PAGE_VALID
| _PAGE_PRESENT_4V
| _PAGE_CACHE_4V
|
1773 __ACCESS_BITS_4V
| _PAGE_EXEC_4V
);
1774 page_readonly
= (_PAGE_VALID
| _PAGE_PRESENT_4V
| _PAGE_CACHE_4V
|
1775 __ACCESS_BITS_4V
| _PAGE_EXEC_4V
);
1777 page_exec_bit
= _PAGE_EXEC_4V
;
1779 prot_init_common(page_none
, page_shared
, page_copy
, page_readonly
,
1783 unsigned long pte_sz_bits(unsigned long sz
)
1785 if (tlb_type
== hypervisor
) {
1789 return _PAGE_SZ8K_4V
;
1791 return _PAGE_SZ64K_4V
;
1793 return _PAGE_SZ512K_4V
;
1794 case 4 * 1024 * 1024:
1795 return _PAGE_SZ4MB_4V
;
1801 return _PAGE_SZ8K_4U
;
1803 return _PAGE_SZ64K_4U
;
1805 return _PAGE_SZ512K_4U
;
1806 case 4 * 1024 * 1024:
1807 return _PAGE_SZ4MB_4U
;
1812 pte_t
mk_pte_io(unsigned long page
, pgprot_t prot
, int space
, unsigned long page_size
)
1816 pte_val(pte
) = page
| pgprot_val(pgprot_noncached(prot
));
1817 pte_val(pte
) |= (((unsigned long)space
) << 32);
1818 pte_val(pte
) |= pte_sz_bits(page_size
);
1823 static unsigned long kern_large_tte(unsigned long paddr
)
1827 val
= (_PAGE_VALID
| _PAGE_SZ4MB_4U
|
1828 _PAGE_CP_4U
| _PAGE_CV_4U
| _PAGE_P_4U
|
1829 _PAGE_EXEC_4U
| _PAGE_L_4U
| _PAGE_W_4U
);
1830 if (tlb_type
== hypervisor
)
1831 val
= (_PAGE_VALID
| _PAGE_SZ4MB_4V
|
1832 _PAGE_CP_4V
| _PAGE_CV_4V
| _PAGE_P_4V
|
1833 _PAGE_EXEC_4V
| _PAGE_W_4V
);
1838 /* If not locked, zap it. */
1839 void __flush_tlb_all(void)
1841 unsigned long pstate
;
1844 __asm__
__volatile__("flushw\n\t"
1845 "rdpr %%pstate, %0\n\t"
1846 "wrpr %0, %1, %%pstate"
1849 if (tlb_type
== spitfire
) {
1850 for (i
= 0; i
< 64; i
++) {
1851 /* Spitfire Errata #32 workaround */
1852 /* NOTE: Always runs on spitfire, so no
1853 * cheetah+ page size encodings.
1855 __asm__
__volatile__("stxa %0, [%1] %2\n\t"
1859 "r" (PRIMARY_CONTEXT
), "i" (ASI_DMMU
));
1861 if (!(spitfire_get_dtlb_data(i
) & _PAGE_L_4U
)) {
1862 __asm__
__volatile__("stxa %%g0, [%0] %1\n\t"
1865 : "r" (TLB_TAG_ACCESS
), "i" (ASI_DMMU
));
1866 spitfire_put_dtlb_data(i
, 0x0UL
);
1869 /* Spitfire Errata #32 workaround */
1870 /* NOTE: Always runs on spitfire, so no
1871 * cheetah+ page size encodings.
1873 __asm__
__volatile__("stxa %0, [%1] %2\n\t"
1877 "r" (PRIMARY_CONTEXT
), "i" (ASI_DMMU
));
1879 if (!(spitfire_get_itlb_data(i
) & _PAGE_L_4U
)) {
1880 __asm__
__volatile__("stxa %%g0, [%0] %1\n\t"
1883 : "r" (TLB_TAG_ACCESS
), "i" (ASI_IMMU
));
1884 spitfire_put_itlb_data(i
, 0x0UL
);
1887 } else if (tlb_type
== cheetah
|| tlb_type
== cheetah_plus
) {
1888 cheetah_flush_dtlb_all();
1889 cheetah_flush_itlb_all();
1891 __asm__
__volatile__("wrpr %0, 0, %%pstate"
1895 #ifdef CONFIG_MEMORY_HOTPLUG
1897 void online_page(struct page
*page
)
1899 ClearPageReserved(page
);
1900 init_page_count(page
);
1906 int remove_memory(u64 start
, u64 size
)
1911 #endif /* CONFIG_MEMORY_HOTPLUG */