2 * PowerPC64 port by Mike Corrigan and Dave Engebretsen
3 * {mikejc|engebret}@us.ibm.com
5 * Copyright (c) 2000 Mike Corrigan <mikejc@us.ibm.com>
7 * SMP scalability work:
8 * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
13 * PowerPC Hashed Page Table functions
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
24 #include <linux/spinlock.h>
25 #include <linux/errno.h>
26 #include <linux/sched.h>
27 #include <linux/proc_fs.h>
28 #include <linux/stat.h>
29 #include <linux/sysctl.h>
30 #include <linux/ctype.h>
31 #include <linux/cache.h>
32 #include <linux/init.h>
33 #include <linux/signal.h>
35 #include <asm/processor.h>
36 #include <asm/pgtable.h>
38 #include <asm/mmu_context.h>
40 #include <asm/types.h>
41 #include <asm/system.h>
42 #include <asm/uaccess.h>
43 #include <asm/machdep.h>
45 #include <asm/abs_addr.h>
46 #include <asm/tlbflush.h>
50 #include <asm/cacheflush.h>
51 #include <asm/cputable.h>
52 #include <asm/sections.h>
57 #define DBG(fmt...) udbg_printf(fmt)
63 #define DBG_LOW(fmt...) udbg_printf(fmt)
65 #define DBG_LOW(fmt...)
72 * Note: pte --> Linux PTE
73 * HPTE --> PowerPC Hashed Page Table Entry
76 * htab_initialize is called with the MMU off (of course), but
77 * the kernel has been copied down to zero so it can directly
78 * reference global data. At this point it is very difficult
79 * to print debug info.
84 extern unsigned long dart_tablebase
;
85 #endif /* CONFIG_U3_DART */
87 static unsigned long _SDR1
;
88 struct mmu_psize_def mmu_psize_defs
[MMU_PAGE_COUNT
];
90 struct hash_pte
*htab_address
;
91 unsigned long htab_size_bytes
;
92 unsigned long htab_hash_mask
;
93 int mmu_linear_psize
= MMU_PAGE_4K
;
94 int mmu_virtual_psize
= MMU_PAGE_4K
;
95 int mmu_vmalloc_psize
= MMU_PAGE_4K
;
96 int mmu_io_psize
= MMU_PAGE_4K
;
97 int mmu_kernel_ssize
= MMU_SEGSIZE_256M
;
98 int mmu_highuser_ssize
= MMU_SEGSIZE_256M
;
99 #ifdef CONFIG_HUGETLB_PAGE
100 int mmu_huge_psize
= MMU_PAGE_16M
;
101 unsigned int HPAGE_SHIFT
;
103 #ifdef CONFIG_PPC_64K_PAGES
104 int mmu_ci_restrictions
;
106 #ifdef CONFIG_DEBUG_PAGEALLOC
107 static u8
*linear_map_hash_slots
;
108 static unsigned long linear_map_hash_count
;
109 static DEFINE_SPINLOCK(linear_map_hash_lock
);
110 #endif /* CONFIG_DEBUG_PAGEALLOC */
112 /* There are definitions of page sizes arrays to be used when none
113 * is provided by the firmware.
116 /* Pre-POWER4 CPUs (4k pages only)
118 struct mmu_psize_def mmu_psize_defaults_old
[] = {
128 /* POWER4, GPUL, POWER5
130 * Support for 16Mb large pages
132 struct mmu_psize_def mmu_psize_defaults_gp
[] = {
150 int htab_bolt_mapping(unsigned long vstart
, unsigned long vend
,
151 unsigned long pstart
, unsigned long mode
,
152 int psize
, int ssize
)
154 unsigned long vaddr
, paddr
;
155 unsigned int step
, shift
;
156 unsigned long tmp_mode
;
159 shift
= mmu_psize_defs
[psize
].shift
;
162 for (vaddr
= vstart
, paddr
= pstart
; vaddr
< vend
;
163 vaddr
+= step
, paddr
+= step
) {
164 unsigned long hash
, hpteg
;
165 unsigned long vsid
= get_kernel_vsid(vaddr
, ssize
);
166 unsigned long va
= hpt_va(vaddr
, vsid
, ssize
);
170 /* Make non-kernel text non-executable */
171 if (!in_kernel_text(vaddr
))
172 tmp_mode
= mode
| HPTE_R_N
;
174 hash
= hpt_hash(va
, shift
, ssize
);
175 hpteg
= ((hash
& htab_hash_mask
) * HPTES_PER_GROUP
);
177 DBG("htab_bolt_mapping: calling %p\n", ppc_md
.hpte_insert
);
179 BUG_ON(!ppc_md
.hpte_insert
);
180 ret
= ppc_md
.hpte_insert(hpteg
, va
, paddr
,
181 tmp_mode
, HPTE_V_BOLTED
, psize
, ssize
);
185 #ifdef CONFIG_DEBUG_PAGEALLOC
186 if ((paddr
>> PAGE_SHIFT
) < linear_map_hash_count
)
187 linear_map_hash_slots
[paddr
>> PAGE_SHIFT
] = ret
| 0x80;
188 #endif /* CONFIG_DEBUG_PAGEALLOC */
190 return ret
< 0 ? ret
: 0;
193 static int __init
htab_dt_scan_seg_sizes(unsigned long node
,
194 const char *uname
, int depth
,
197 char *type
= of_get_flat_dt_prop(node
, "device_type", NULL
);
199 unsigned long size
= 0;
201 /* We are scanning "cpu" nodes only */
202 if (type
== NULL
|| strcmp(type
, "cpu") != 0)
205 prop
= (u32
*)of_get_flat_dt_prop(node
, "ibm,processor-segment-sizes",
209 for (; size
>= 4; size
-= 4, ++prop
) {
211 DBG("1T segment support detected\n");
212 cur_cpu_spec
->cpu_features
|= CPU_FTR_1T_SEGMENT
;
216 cur_cpu_spec
->cpu_features
&= ~CPU_FTR_NO_SLBIE_B
;
220 static void __init
htab_init_seg_sizes(void)
222 of_scan_flat_dt(htab_dt_scan_seg_sizes
, NULL
);
225 static int __init
htab_dt_scan_page_sizes(unsigned long node
,
226 const char *uname
, int depth
,
229 char *type
= of_get_flat_dt_prop(node
, "device_type", NULL
);
231 unsigned long size
= 0;
233 /* We are scanning "cpu" nodes only */
234 if (type
== NULL
|| strcmp(type
, "cpu") != 0)
237 prop
= (u32
*)of_get_flat_dt_prop(node
,
238 "ibm,segment-page-sizes", &size
);
240 DBG("Page sizes from device-tree:\n");
242 cur_cpu_spec
->cpu_features
&= ~(CPU_FTR_16M_PAGE
);
244 unsigned int shift
= prop
[0];
245 unsigned int slbenc
= prop
[1];
246 unsigned int lpnum
= prop
[2];
247 unsigned int lpenc
= 0;
248 struct mmu_psize_def
*def
;
251 size
-= 3; prop
+= 3;
252 while(size
> 0 && lpnum
) {
253 if (prop
[0] == shift
)
255 prop
+= 2; size
-= 2;
270 cur_cpu_spec
->cpu_features
|= CPU_FTR_16M_PAGE
;
278 def
= &mmu_psize_defs
[idx
];
283 def
->avpnm
= (1 << (shift
- 23)) - 1;
286 /* We don't know for sure what's up with tlbiel, so
287 * for now we only set it for 4K and 64K pages
289 if (idx
== MMU_PAGE_4K
|| idx
== MMU_PAGE_64K
)
294 DBG(" %d: shift=%02x, sllp=%04x, avpnm=%08x, "
295 "tlbiel=%d, penc=%d\n",
296 idx
, shift
, def
->sllp
, def
->avpnm
, def
->tlbiel
,
304 static void __init
htab_init_page_sizes(void)
308 /* Default to 4K pages only */
309 memcpy(mmu_psize_defs
, mmu_psize_defaults_old
,
310 sizeof(mmu_psize_defaults_old
));
313 * Try to find the available page sizes in the device-tree
315 rc
= of_scan_flat_dt(htab_dt_scan_page_sizes
, NULL
);
316 if (rc
!= 0) /* Found */
320 * Not in the device-tree, let's fallback on known size
321 * list for 16M capable GP & GR
323 if (cpu_has_feature(CPU_FTR_16M_PAGE
))
324 memcpy(mmu_psize_defs
, mmu_psize_defaults_gp
,
325 sizeof(mmu_psize_defaults_gp
));
327 #ifndef CONFIG_DEBUG_PAGEALLOC
329 * Pick a size for the linear mapping. Currently, we only support
330 * 16M, 1M and 4K which is the default
332 if (mmu_psize_defs
[MMU_PAGE_16M
].shift
)
333 mmu_linear_psize
= MMU_PAGE_16M
;
334 else if (mmu_psize_defs
[MMU_PAGE_1M
].shift
)
335 mmu_linear_psize
= MMU_PAGE_1M
;
336 #endif /* CONFIG_DEBUG_PAGEALLOC */
338 #ifdef CONFIG_PPC_64K_PAGES
340 * Pick a size for the ordinary pages. Default is 4K, we support
341 * 64K for user mappings and vmalloc if supported by the processor.
342 * We only use 64k for ioremap if the processor
343 * (and firmware) support cache-inhibited large pages.
344 * If not, we use 4k and set mmu_ci_restrictions so that
345 * hash_page knows to switch processes that use cache-inhibited
346 * mappings to 4k pages.
348 if (mmu_psize_defs
[MMU_PAGE_64K
].shift
) {
349 mmu_virtual_psize
= MMU_PAGE_64K
;
350 mmu_vmalloc_psize
= MMU_PAGE_64K
;
351 if (mmu_linear_psize
== MMU_PAGE_4K
)
352 mmu_linear_psize
= MMU_PAGE_64K
;
353 if (cpu_has_feature(CPU_FTR_CI_LARGE_PAGE
))
354 mmu_io_psize
= MMU_PAGE_64K
;
356 mmu_ci_restrictions
= 1;
358 #endif /* CONFIG_PPC_64K_PAGES */
360 printk(KERN_DEBUG
"Page orders: linear mapping = %d, "
361 "virtual = %d, io = %d\n",
362 mmu_psize_defs
[mmu_linear_psize
].shift
,
363 mmu_psize_defs
[mmu_virtual_psize
].shift
,
364 mmu_psize_defs
[mmu_io_psize
].shift
);
366 #ifdef CONFIG_HUGETLB_PAGE
367 /* Init large page size. Currently, we pick 16M or 1M depending
368 * on what is available
370 if (mmu_psize_defs
[MMU_PAGE_16M
].shift
)
371 mmu_huge_psize
= MMU_PAGE_16M
;
372 /* With 4k/4level pagetables, we can't (for now) cope with a
373 * huge page size < PMD_SIZE */
374 else if (mmu_psize_defs
[MMU_PAGE_1M
].shift
)
375 mmu_huge_psize
= MMU_PAGE_1M
;
377 /* Calculate HPAGE_SHIFT and sanity check it */
378 if (mmu_psize_defs
[mmu_huge_psize
].shift
> MIN_HUGEPTE_SHIFT
&&
379 mmu_psize_defs
[mmu_huge_psize
].shift
< SID_SHIFT
)
380 HPAGE_SHIFT
= mmu_psize_defs
[mmu_huge_psize
].shift
;
382 HPAGE_SHIFT
= 0; /* No huge pages dude ! */
383 #endif /* CONFIG_HUGETLB_PAGE */
386 static int __init
htab_dt_scan_pftsize(unsigned long node
,
387 const char *uname
, int depth
,
390 char *type
= of_get_flat_dt_prop(node
, "device_type", NULL
);
393 /* We are scanning "cpu" nodes only */
394 if (type
== NULL
|| strcmp(type
, "cpu") != 0)
397 prop
= (u32
*)of_get_flat_dt_prop(node
, "ibm,pft-size", NULL
);
399 /* pft_size[0] is the NUMA CEC cookie */
400 ppc64_pft_size
= prop
[1];
406 static unsigned long __init
htab_get_table_size(void)
408 unsigned long mem_size
, rnd_mem_size
, pteg_count
;
410 /* If hash size isn't already provided by the platform, we try to
411 * retrieve it from the device-tree. If it's not there neither, we
412 * calculate it now based on the total RAM size
414 if (ppc64_pft_size
== 0)
415 of_scan_flat_dt(htab_dt_scan_pftsize
, NULL
);
417 return 1UL << ppc64_pft_size
;
419 /* round mem_size up to next power of 2 */
420 mem_size
= lmb_phys_mem_size();
421 rnd_mem_size
= 1UL << __ilog2(mem_size
);
422 if (rnd_mem_size
< mem_size
)
426 pteg_count
= max(rnd_mem_size
>> (12 + 1), 1UL << 11);
428 return pteg_count
<< 7;
431 #ifdef CONFIG_MEMORY_HOTPLUG
432 void create_section_mapping(unsigned long start
, unsigned long end
)
434 BUG_ON(htab_bolt_mapping(start
, end
, __pa(start
),
435 _PAGE_ACCESSED
| _PAGE_DIRTY
| _PAGE_COHERENT
| PP_RWXX
,
436 mmu_linear_psize
, mmu_kernel_ssize
));
438 #endif /* CONFIG_MEMORY_HOTPLUG */
440 static inline void make_bl(unsigned int *insn_addr
, void *func
)
442 unsigned long funcp
= *((unsigned long *)func
);
443 int offset
= funcp
- (unsigned long)insn_addr
;
445 *insn_addr
= (unsigned int)(0x48000001 | (offset
& 0x03fffffc));
446 flush_icache_range((unsigned long)insn_addr
, 4+
447 (unsigned long)insn_addr
);
450 static void __init
htab_finish_init(void)
452 extern unsigned int *htab_call_hpte_insert1
;
453 extern unsigned int *htab_call_hpte_insert2
;
454 extern unsigned int *htab_call_hpte_remove
;
455 extern unsigned int *htab_call_hpte_updatepp
;
457 #ifdef CONFIG_PPC_HAS_HASH_64K
458 extern unsigned int *ht64_call_hpte_insert1
;
459 extern unsigned int *ht64_call_hpte_insert2
;
460 extern unsigned int *ht64_call_hpte_remove
;
461 extern unsigned int *ht64_call_hpte_updatepp
;
463 make_bl(ht64_call_hpte_insert1
, ppc_md
.hpte_insert
);
464 make_bl(ht64_call_hpte_insert2
, ppc_md
.hpte_insert
);
465 make_bl(ht64_call_hpte_remove
, ppc_md
.hpte_remove
);
466 make_bl(ht64_call_hpte_updatepp
, ppc_md
.hpte_updatepp
);
467 #endif /* CONFIG_PPC_HAS_HASH_64K */
469 make_bl(htab_call_hpte_insert1
, ppc_md
.hpte_insert
);
470 make_bl(htab_call_hpte_insert2
, ppc_md
.hpte_insert
);
471 make_bl(htab_call_hpte_remove
, ppc_md
.hpte_remove
);
472 make_bl(htab_call_hpte_updatepp
, ppc_md
.hpte_updatepp
);
475 void __init
htab_initialize(void)
478 unsigned long pteg_count
;
479 unsigned long mode_rw
;
480 unsigned long base
= 0, size
= 0;
483 extern unsigned long tce_alloc_start
, tce_alloc_end
;
485 DBG(" -> htab_initialize()\n");
487 /* Initialize segment sizes */
488 htab_init_seg_sizes();
490 /* Initialize page sizes */
491 htab_init_page_sizes();
493 if (cpu_has_feature(CPU_FTR_1T_SEGMENT
)) {
494 mmu_kernel_ssize
= MMU_SEGSIZE_1T
;
495 mmu_highuser_ssize
= MMU_SEGSIZE_1T
;
496 printk(KERN_INFO
"Using 1TB segments\n");
500 * Calculate the required size of the htab. We want the number of
501 * PTEGs to equal one half the number of real pages.
503 htab_size_bytes
= htab_get_table_size();
504 pteg_count
= htab_size_bytes
>> 7;
506 htab_hash_mask
= pteg_count
- 1;
508 if (firmware_has_feature(FW_FEATURE_LPAR
)) {
509 /* Using a hypervisor which owns the htab */
513 /* Find storage for the HPT. Must be contiguous in
514 * the absolute address space.
516 table
= lmb_alloc(htab_size_bytes
, htab_size_bytes
);
518 DBG("Hash table allocated at %lx, size: %lx\n", table
,
521 htab_address
= abs_to_virt(table
);
523 /* htab absolute addr + encoded htabsize */
524 _SDR1
= table
+ __ilog2(pteg_count
) - 11;
526 /* Initialize the HPT with no entries */
527 memset((void *)table
, 0, htab_size_bytes
);
530 mtspr(SPRN_SDR1
, _SDR1
);
533 mode_rw
= _PAGE_ACCESSED
| _PAGE_DIRTY
| _PAGE_COHERENT
| PP_RWXX
;
535 #ifdef CONFIG_DEBUG_PAGEALLOC
536 linear_map_hash_count
= lmb_end_of_DRAM() >> PAGE_SHIFT
;
537 linear_map_hash_slots
= __va(lmb_alloc_base(linear_map_hash_count
,
539 memset(linear_map_hash_slots
, 0, linear_map_hash_count
);
540 #endif /* CONFIG_DEBUG_PAGEALLOC */
542 /* On U3 based machines, we need to reserve the DART area and
543 * _NOT_ map it to avoid cache paradoxes as it's remapped non
547 /* create bolted the linear mapping in the hash table */
548 for (i
=0; i
< lmb
.memory
.cnt
; i
++) {
549 base
= (unsigned long)__va(lmb
.memory
.region
[i
].base
);
550 size
= lmb
.memory
.region
[i
].size
;
552 DBG("creating mapping for region: %lx : %lx\n", base
, size
);
554 #ifdef CONFIG_U3_DART
555 /* Do not map the DART space. Fortunately, it will be aligned
556 * in such a way that it will not cross two lmb regions and
557 * will fit within a single 16Mb page.
558 * The DART space is assumed to be a full 16Mb region even if
559 * we only use 2Mb of that space. We will use more of it later
560 * for AGP GART. We have to use a full 16Mb large page.
562 DBG("DART base: %lx\n", dart_tablebase
);
564 if (dart_tablebase
!= 0 && dart_tablebase
>= base
565 && dart_tablebase
< (base
+ size
)) {
566 unsigned long dart_table_end
= dart_tablebase
+ 16 * MB
;
567 if (base
!= dart_tablebase
)
568 BUG_ON(htab_bolt_mapping(base
, dart_tablebase
,
572 if ((base
+ size
) > dart_table_end
)
573 BUG_ON(htab_bolt_mapping(dart_tablebase
+16*MB
,
575 __pa(dart_table_end
),
581 #endif /* CONFIG_U3_DART */
582 BUG_ON(htab_bolt_mapping(base
, base
+ size
, __pa(base
),
583 mode_rw
, mmu_linear_psize
, mmu_kernel_ssize
));
587 * If we have a memory_limit and we've allocated TCEs then we need to
588 * explicitly map the TCE area at the top of RAM. We also cope with the
589 * case that the TCEs start below memory_limit.
590 * tce_alloc_start/end are 16MB aligned so the mapping should work
591 * for either 4K or 16MB pages.
593 if (tce_alloc_start
) {
594 tce_alloc_start
= (unsigned long)__va(tce_alloc_start
);
595 tce_alloc_end
= (unsigned long)__va(tce_alloc_end
);
597 if (base
+ size
>= tce_alloc_start
)
598 tce_alloc_start
= base
+ size
+ 1;
600 BUG_ON(htab_bolt_mapping(tce_alloc_start
, tce_alloc_end
,
601 __pa(tce_alloc_start
), mode_rw
,
602 mmu_linear_psize
, mmu_kernel_ssize
));
607 DBG(" <- htab_initialize()\n");
612 void htab_initialize_secondary(void)
614 if (!firmware_has_feature(FW_FEATURE_LPAR
))
615 mtspr(SPRN_SDR1
, _SDR1
);
619 * Called by asm hashtable.S for doing lazy icache flush
621 unsigned int hash_page_do_lazy_icache(unsigned int pp
, pte_t pte
, int trap
)
625 if (!pfn_valid(pte_pfn(pte
)))
628 page
= pte_page(pte
);
631 if (!test_bit(PG_arch_1
, &page
->flags
) && !PageReserved(page
)) {
633 __flush_dcache_icache(page_address(page
));
634 set_bit(PG_arch_1
, &page
->flags
);
642 * Demote a segment to using 4k pages.
643 * For now this makes the whole process use 4k pages.
645 #ifdef CONFIG_PPC_64K_PAGES
646 static void demote_segment_4k(struct mm_struct
*mm
, unsigned long addr
)
648 if (mm
->context
.user_psize
== MMU_PAGE_4K
)
650 slice_set_user_psize(mm
, MMU_PAGE_4K
);
651 #ifdef CONFIG_SPU_BASE
652 spu_flush_all_slbs(mm
);
655 #endif /* CONFIG_PPC_64K_PAGES */
659 * 1 - normal page fault
660 * -1 - critical hash insertion error
662 int hash_page(unsigned long ea
, unsigned long access
, unsigned long trap
)
666 struct mm_struct
*mm
;
669 int rc
, user_region
= 0, local
= 0;
672 DBG_LOW("hash_page(ea=%016lx, access=%lx, trap=%lx\n",
675 if ((ea
& ~REGION_MASK
) >= PGTABLE_RANGE
) {
676 DBG_LOW(" out of pgtable range !\n");
680 /* Get region & vsid */
681 switch (REGION_ID(ea
)) {
686 DBG_LOW(" user region with no mm !\n");
689 #ifdef CONFIG_PPC_MM_SLICES
690 psize
= get_slice_psize(mm
, ea
);
692 psize
= mm
->context
.user_psize
;
694 ssize
= user_segment_size(ea
);
695 vsid
= get_vsid(mm
->context
.id
, ea
, ssize
);
697 case VMALLOC_REGION_ID
:
699 vsid
= get_kernel_vsid(ea
, mmu_kernel_ssize
);
700 if (ea
< VMALLOC_END
)
701 psize
= mmu_vmalloc_psize
;
703 psize
= mmu_io_psize
;
704 ssize
= mmu_kernel_ssize
;
708 * Send the problem up to do_page_fault
712 DBG_LOW(" mm=%p, mm->pgdir=%p, vsid=%016lx\n", mm
, mm
->pgd
, vsid
);
719 /* Check CPU locality */
720 tmp
= cpumask_of_cpu(smp_processor_id());
721 if (user_region
&& cpus_equal(mm
->cpu_vm_mask
, tmp
))
724 #ifdef CONFIG_HUGETLB_PAGE
725 /* Handle hugepage regions */
726 if (HPAGE_SHIFT
&& psize
== mmu_huge_psize
) {
727 DBG_LOW(" -> huge page !\n");
728 return hash_huge_page(mm
, access
, ea
, vsid
, local
, trap
);
730 #endif /* CONFIG_HUGETLB_PAGE */
732 #ifndef CONFIG_PPC_64K_PAGES
733 /* If we use 4K pages and our psize is not 4K, then we are hitting
734 * a special driver mapping, we need to align the address before
737 if (psize
!= MMU_PAGE_4K
)
738 ea
&= ~((1ul << mmu_psize_defs
[psize
].shift
) - 1);
739 #endif /* CONFIG_PPC_64K_PAGES */
741 /* Get PTE and page size from page tables */
742 ptep
= find_linux_pte(pgdir
, ea
);
743 if (ptep
== NULL
|| !pte_present(*ptep
)) {
744 DBG_LOW(" no PTE !\n");
748 #ifndef CONFIG_PPC_64K_PAGES
749 DBG_LOW(" i-pte: %016lx\n", pte_val(*ptep
));
751 DBG_LOW(" i-pte: %016lx %016lx\n", pte_val(*ptep
),
752 pte_val(*(ptep
+ PTRS_PER_PTE
)));
754 /* Pre-check access permissions (will be re-checked atomically
755 * in __hash_page_XX but this pre-check is a fast path
757 if (access
& ~pte_val(*ptep
)) {
758 DBG_LOW(" no access !\n");
762 /* Do actual hashing */
763 #ifdef CONFIG_PPC_64K_PAGES
764 /* If _PAGE_4K_PFN is set, make sure this is a 4k segment */
765 if (pte_val(*ptep
) & _PAGE_4K_PFN
) {
766 demote_segment_4k(mm
, ea
);
770 /* If this PTE is non-cacheable and we have restrictions on
771 * using non cacheable large pages, then we switch to 4k
773 if (mmu_ci_restrictions
&& psize
== MMU_PAGE_64K
&&
774 (pte_val(*ptep
) & _PAGE_NO_CACHE
)) {
776 demote_segment_4k(mm
, ea
);
778 } else if (ea
< VMALLOC_END
) {
780 * some driver did a non-cacheable mapping
781 * in vmalloc space, so switch vmalloc
784 printk(KERN_ALERT
"Reducing vmalloc segment "
785 "to 4kB pages because of "
786 "non-cacheable mapping\n");
787 psize
= mmu_vmalloc_psize
= MMU_PAGE_4K
;
788 #ifdef CONFIG_SPU_BASE
789 spu_flush_all_slbs(mm
);
794 if (psize
!= get_paca()->context
.user_psize
) {
795 get_paca()->context
= mm
->context
;
796 slb_flush_and_rebolt();
798 } else if (get_paca()->vmalloc_sllp
!=
799 mmu_psize_defs
[mmu_vmalloc_psize
].sllp
) {
800 get_paca()->vmalloc_sllp
=
801 mmu_psize_defs
[mmu_vmalloc_psize
].sllp
;
802 slb_vmalloc_update();
804 #endif /* CONFIG_PPC_64K_PAGES */
806 #ifdef CONFIG_PPC_HAS_HASH_64K
807 if (psize
== MMU_PAGE_64K
)
808 rc
= __hash_page_64K(ea
, access
, vsid
, ptep
, trap
, local
, ssize
);
810 #endif /* CONFIG_PPC_HAS_HASH_64K */
811 rc
= __hash_page_4K(ea
, access
, vsid
, ptep
, trap
, local
, ssize
);
813 #ifndef CONFIG_PPC_64K_PAGES
814 DBG_LOW(" o-pte: %016lx\n", pte_val(*ptep
));
816 DBG_LOW(" o-pte: %016lx %016lx\n", pte_val(*ptep
),
817 pte_val(*(ptep
+ PTRS_PER_PTE
)));
819 DBG_LOW(" -> rc=%d\n", rc
);
822 EXPORT_SYMBOL_GPL(hash_page
);
824 void hash_preload(struct mm_struct
*mm
, unsigned long ea
,
825 unsigned long access
, unsigned long trap
)
835 BUG_ON(REGION_ID(ea
) != USER_REGION_ID
);
837 #ifdef CONFIG_PPC_MM_SLICES
838 /* We only prefault standard pages for now */
839 if (unlikely(get_slice_psize(mm
, ea
) != mm
->context
.user_psize
))
843 DBG_LOW("hash_preload(mm=%p, mm->pgdir=%p, ea=%016lx, access=%lx,"
844 " trap=%lx\n", mm
, mm
->pgd
, ea
, access
, trap
);
846 /* Get Linux PTE if available */
850 ptep
= find_linux_pte(pgdir
, ea
);
854 #ifdef CONFIG_PPC_64K_PAGES
855 /* If either _PAGE_4K_PFN or _PAGE_NO_CACHE is set (and we are on
856 * a 64K kernel), then we don't preload, hash_page() will take
857 * care of it once we actually try to access the page.
858 * That way we don't have to duplicate all of the logic for segment
859 * page size demotion here
861 if (pte_val(*ptep
) & (_PAGE_4K_PFN
| _PAGE_NO_CACHE
))
863 #endif /* CONFIG_PPC_64K_PAGES */
866 ssize
= user_segment_size(ea
);
867 vsid
= get_vsid(mm
->context
.id
, ea
, ssize
);
869 /* Hash doesn't like irqs */
870 local_irq_save(flags
);
872 /* Is that local to this CPU ? */
873 mask
= cpumask_of_cpu(smp_processor_id());
874 if (cpus_equal(mm
->cpu_vm_mask
, mask
))
878 #ifdef CONFIG_PPC_HAS_HASH_64K
879 if (mm
->context
.user_psize
== MMU_PAGE_64K
)
880 __hash_page_64K(ea
, access
, vsid
, ptep
, trap
, local
, ssize
);
882 #endif /* CONFIG_PPC_HAS_HASH_64K */
883 __hash_page_4K(ea
, access
, vsid
, ptep
, trap
, local
, ssize
);
885 local_irq_restore(flags
);
888 /* WARNING: This is called from hash_low_64.S, if you change this prototype,
889 * do not forget to update the assembly call site !
891 void flush_hash_page(unsigned long va
, real_pte_t pte
, int psize
, int ssize
,
894 unsigned long hash
, index
, shift
, hidx
, slot
;
896 DBG_LOW("flush_hash_page(va=%016x)\n", va
);
897 pte_iterate_hashed_subpages(pte
, psize
, va
, index
, shift
) {
898 hash
= hpt_hash(va
, shift
, ssize
);
899 hidx
= __rpte_to_hidx(pte
, index
);
900 if (hidx
& _PTEIDX_SECONDARY
)
902 slot
= (hash
& htab_hash_mask
) * HPTES_PER_GROUP
;
903 slot
+= hidx
& _PTEIDX_GROUP_IX
;
904 DBG_LOW(" sub %d: hash=%x, hidx=%x\n", index
, slot
, hidx
);
905 ppc_md
.hpte_invalidate(slot
, va
, psize
, ssize
, local
);
906 } pte_iterate_hashed_end();
909 void flush_hash_range(unsigned long number
, int local
)
911 if (ppc_md
.flush_hash_range
)
912 ppc_md
.flush_hash_range(number
, local
);
915 struct ppc64_tlb_batch
*batch
=
916 &__get_cpu_var(ppc64_tlb_batch
);
918 for (i
= 0; i
< number
; i
++)
919 flush_hash_page(batch
->vaddr
[i
], batch
->pte
[i
],
920 batch
->psize
, batch
->ssize
, local
);
925 * low_hash_fault is called when we the low level hash code failed
926 * to instert a PTE due to an hypervisor error
928 void low_hash_fault(struct pt_regs
*regs
, unsigned long address
)
930 if (user_mode(regs
)) {
933 info
.si_signo
= SIGBUS
;
935 info
.si_code
= BUS_ADRERR
;
936 info
.si_addr
= (void __user
*)address
;
937 force_sig_info(SIGBUS
, &info
, current
);
940 bad_page_fault(regs
, address
, SIGBUS
);
943 #ifdef CONFIG_DEBUG_PAGEALLOC
944 static void kernel_map_linear_page(unsigned long vaddr
, unsigned long lmi
)
946 unsigned long hash
, hpteg
;
947 unsigned long vsid
= get_kernel_vsid(vaddr
, mmu_kernel_ssize
);
948 unsigned long va
= hpt_va(vaddr
, vsid
, mmu_kernel_ssize
);
949 unsigned long mode
= _PAGE_ACCESSED
| _PAGE_DIRTY
|
950 _PAGE_COHERENT
| PP_RWXX
| HPTE_R_N
;
953 hash
= hpt_hash(va
, PAGE_SHIFT
, mmu_kernel_ssize
);
954 hpteg
= ((hash
& htab_hash_mask
) * HPTES_PER_GROUP
);
956 ret
= ppc_md
.hpte_insert(hpteg
, va
, __pa(vaddr
),
958 mmu_linear_psize
, mmu_kernel_ssize
);
960 spin_lock(&linear_map_hash_lock
);
961 BUG_ON(linear_map_hash_slots
[lmi
] & 0x80);
962 linear_map_hash_slots
[lmi
] = ret
| 0x80;
963 spin_unlock(&linear_map_hash_lock
);
966 static void kernel_unmap_linear_page(unsigned long vaddr
, unsigned long lmi
)
968 unsigned long hash
, hidx
, slot
;
969 unsigned long vsid
= get_kernel_vsid(vaddr
, mmu_kernel_ssize
);
970 unsigned long va
= hpt_va(vaddr
, vsid
, mmu_kernel_ssize
);
972 hash
= hpt_hash(va
, PAGE_SHIFT
, mmu_kernel_ssize
);
973 spin_lock(&linear_map_hash_lock
);
974 BUG_ON(!(linear_map_hash_slots
[lmi
] & 0x80));
975 hidx
= linear_map_hash_slots
[lmi
] & 0x7f;
976 linear_map_hash_slots
[lmi
] = 0;
977 spin_unlock(&linear_map_hash_lock
);
978 if (hidx
& _PTEIDX_SECONDARY
)
980 slot
= (hash
& htab_hash_mask
) * HPTES_PER_GROUP
;
981 slot
+= hidx
& _PTEIDX_GROUP_IX
;
982 ppc_md
.hpte_invalidate(slot
, va
, mmu_linear_psize
, mmu_kernel_ssize
, 0);
985 void kernel_map_pages(struct page
*page
, int numpages
, int enable
)
987 unsigned long flags
, vaddr
, lmi
;
990 local_irq_save(flags
);
991 for (i
= 0; i
< numpages
; i
++, page
++) {
992 vaddr
= (unsigned long)page_address(page
);
993 lmi
= __pa(vaddr
) >> PAGE_SHIFT
;
994 if (lmi
>= linear_map_hash_count
)
997 kernel_map_linear_page(vaddr
, lmi
);
999 kernel_unmap_linear_page(vaddr
, lmi
);
1001 local_irq_restore(flags
);
1003 #endif /* CONFIG_DEBUG_PAGEALLOC */