2 * PPC64 (POWER4) Huge TLB Page Support for Kernel.
4 * Copyright (C) 2003 David Gibson, IBM Corporation.
6 * Based on the IA-32 version:
7 * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
10 #include <linux/init.h>
13 #include <linux/hugetlb.h>
14 #include <linux/pagemap.h>
15 #include <linux/slab.h>
16 #include <linux/err.h>
17 #include <linux/sysctl.h>
19 #include <asm/pgalloc.h>
21 #include <asm/tlbflush.h>
22 #include <asm/mmu_context.h>
23 #include <asm/machdep.h>
24 #include <asm/cputable.h>
27 #define PAGE_SHIFT_64K 16
28 #define PAGE_SHIFT_16M 24
29 #define PAGE_SHIFT_16G 34
31 #define NUM_LOW_AREAS (0x100000000UL >> SID_SHIFT)
32 #define NUM_HIGH_AREAS (PGTABLE_RANGE >> HTLB_AREA_SHIFT)
33 #define MAX_NUMBER_GPAGES 1024
35 /* Tracks the 16G pages after the device tree is scanned and before the
36 * huge_boot_pages list is ready. */
37 static unsigned long gpage_freearray
[MAX_NUMBER_GPAGES
];
38 static unsigned nr_gpages
;
40 unsigned int hugepte_shift
;
41 #define PTRS_PER_HUGEPTE (1 << hugepte_shift)
42 #define HUGEPTE_TABLE_SIZE (sizeof(pte_t) << hugepte_shift)
44 #define HUGEPD_SHIFT (HPAGE_SHIFT + hugepte_shift)
45 #define HUGEPD_SIZE (1UL << HUGEPD_SHIFT)
46 #define HUGEPD_MASK (~(HUGEPD_SIZE-1))
48 #define huge_pgtable_cache (pgtable_cache[HUGEPTE_CACHE_NUM])
50 /* Flag to mark huge PD pointers. This means pmd_bad() and pud_bad()
51 * will choke on pointers to hugepte tables, which is handy for
52 * catching screwups early. */
55 typedef struct { unsigned long pd
; } hugepd_t
;
57 #define hugepd_none(hpd) ((hpd).pd == 0)
59 static inline pte_t
*hugepd_page(hugepd_t hpd
)
61 BUG_ON(!(hpd
.pd
& HUGEPD_OK
));
62 return (pte_t
*)(hpd
.pd
& ~HUGEPD_OK
);
65 static inline pte_t
*hugepte_offset(hugepd_t
*hpdp
, unsigned long addr
)
67 unsigned long idx
= ((addr
>> HPAGE_SHIFT
) & (PTRS_PER_HUGEPTE
-1));
68 pte_t
*dir
= hugepd_page(*hpdp
);
73 static int __hugepte_alloc(struct mm_struct
*mm
, hugepd_t
*hpdp
,
74 unsigned long address
)
76 pte_t
*new = kmem_cache_alloc(huge_pgtable_cache
,
77 GFP_KERNEL
|__GFP_REPEAT
);
82 spin_lock(&mm
->page_table_lock
);
83 if (!hugepd_none(*hpdp
))
84 kmem_cache_free(huge_pgtable_cache
, new);
86 hpdp
->pd
= (unsigned long)new | HUGEPD_OK
;
87 spin_unlock(&mm
->page_table_lock
);
91 /* Base page size affects how we walk hugetlb page tables */
92 #ifdef CONFIG_PPC_64K_PAGES
93 #define hpmd_offset(pud, addr) pmd_offset(pud, addr)
94 #define hpmd_alloc(mm, pud, addr) pmd_alloc(mm, pud, addr)
97 pmd_t
*hpmd_offset(pud_t
*pud
, unsigned long addr
)
99 if (HPAGE_SHIFT
== PAGE_SHIFT_64K
)
100 return pmd_offset(pud
, addr
);
102 return (pmd_t
*) pud
;
105 pmd_t
*hpmd_alloc(struct mm_struct
*mm
, pud_t
*pud
, unsigned long addr
)
107 if (HPAGE_SHIFT
== PAGE_SHIFT_64K
)
108 return pmd_alloc(mm
, pud
, addr
);
110 return (pmd_t
*) pud
;
114 /* Build list of addresses of gigantic pages. This function is used in early
115 * boot before the buddy or bootmem allocator is setup.
117 void add_gpage(unsigned long addr
, unsigned long page_size
,
118 unsigned long number_of_pages
)
122 while (number_of_pages
> 0) {
123 gpage_freearray
[nr_gpages
] = addr
;
130 /* Moves the gigantic page addresses from the temporary list to the
131 * huge_boot_pages list. */
132 int alloc_bootmem_huge_page(struct hstate
*h
)
134 struct huge_bootmem_page
*m
;
137 m
= phys_to_virt(gpage_freearray
[--nr_gpages
]);
138 gpage_freearray
[nr_gpages
] = 0;
139 list_add(&m
->list
, &huge_boot_pages
);
145 /* Modelled after find_linux_pte() */
146 pte_t
*huge_pte_offset(struct mm_struct
*mm
, unsigned long addr
)
152 BUG_ON(get_slice_psize(mm
, addr
) != mmu_huge_psize
);
156 pg
= pgd_offset(mm
, addr
);
157 if (!pgd_none(*pg
)) {
158 pu
= pud_offset(pg
, addr
);
159 if (!pud_none(*pu
)) {
160 pm
= hpmd_offset(pu
, addr
);
162 return hugepte_offset((hugepd_t
*)pm
, addr
);
169 pte_t
*huge_pte_alloc(struct mm_struct
*mm
,
170 unsigned long addr
, unsigned long sz
)
175 hugepd_t
*hpdp
= NULL
;
177 BUG_ON(get_slice_psize(mm
, addr
) != mmu_huge_psize
);
181 pg
= pgd_offset(mm
, addr
);
182 pu
= pud_alloc(mm
, pg
, addr
);
185 pm
= hpmd_alloc(mm
, pu
, addr
);
187 hpdp
= (hugepd_t
*)pm
;
193 if (hugepd_none(*hpdp
) && __hugepte_alloc(mm
, hpdp
, addr
))
196 return hugepte_offset(hpdp
, addr
);
199 int huge_pmd_unshare(struct mm_struct
*mm
, unsigned long *addr
, pte_t
*ptep
)
204 static void free_hugepte_range(struct mmu_gather
*tlb
, hugepd_t
*hpdp
)
206 pte_t
*hugepte
= hugepd_page(*hpdp
);
210 pgtable_free_tlb(tlb
, pgtable_free_cache(hugepte
, HUGEPTE_CACHE_NUM
,
214 static void hugetlb_free_pmd_range(struct mmu_gather
*tlb
, pud_t
*pud
,
215 unsigned long addr
, unsigned long end
,
216 unsigned long floor
, unsigned long ceiling
)
223 pmd
= pmd_offset(pud
, addr
);
225 next
= pmd_addr_end(addr
, end
);
228 free_hugepte_range(tlb
, (hugepd_t
*)pmd
);
229 } while (pmd
++, addr
= next
, addr
!= end
);
239 if (end
- 1 > ceiling
- 1)
242 pmd
= pmd_offset(pud
, start
);
244 pmd_free_tlb(tlb
, pmd
);
247 static void hugetlb_free_pud_range(struct mmu_gather
*tlb
, pgd_t
*pgd
,
248 unsigned long addr
, unsigned long end
,
249 unsigned long floor
, unsigned long ceiling
)
256 pud
= pud_offset(pgd
, addr
);
258 next
= pud_addr_end(addr
, end
);
259 #ifdef CONFIG_PPC_64K_PAGES
260 if (pud_none_or_clear_bad(pud
))
262 hugetlb_free_pmd_range(tlb
, pud
, addr
, next
, floor
, ceiling
);
264 if (HPAGE_SHIFT
== PAGE_SHIFT_64K
) {
265 if (pud_none_or_clear_bad(pud
))
267 hugetlb_free_pmd_range(tlb
, pud
, addr
, next
, floor
, ceiling
);
271 free_hugepte_range(tlb
, (hugepd_t
*)pud
);
274 } while (pud
++, addr
= next
, addr
!= end
);
280 ceiling
&= PGDIR_MASK
;
284 if (end
- 1 > ceiling
- 1)
287 pud
= pud_offset(pgd
, start
);
289 pud_free_tlb(tlb
, pud
);
293 * This function frees user-level page tables of a process.
295 * Must be called with pagetable lock held.
297 void hugetlb_free_pgd_range(struct mmu_gather
*tlb
,
298 unsigned long addr
, unsigned long end
,
299 unsigned long floor
, unsigned long ceiling
)
306 * Comments below take from the normal free_pgd_range(). They
307 * apply here too. The tests against HUGEPD_MASK below are
308 * essential, because we *don't* test for this at the bottom
309 * level. Without them we'll attempt to free a hugepte table
310 * when we unmap just part of it, even if there are other
311 * active mappings using it.
313 * The next few lines have given us lots of grief...
315 * Why are we testing HUGEPD* at this top level? Because
316 * often there will be no work to do at all, and we'd prefer
317 * not to go all the way down to the bottom just to discover
320 * Why all these "- 1"s? Because 0 represents both the bottom
321 * of the address space and the top of it (using -1 for the
322 * top wouldn't help much: the masks would do the wrong thing).
323 * The rule is that addr 0 and floor 0 refer to the bottom of
324 * the address space, but end 0 and ceiling 0 refer to the top
325 * Comparisons need to use "end - 1" and "ceiling - 1" (though
326 * that end 0 case should be mythical).
328 * Wherever addr is brought up or ceiling brought down, we
329 * must be careful to reject "the opposite 0" before it
330 * confuses the subsequent tests. But what about where end is
331 * brought down by HUGEPD_SIZE below? no, end can't go down to
334 * Whereas we round start (addr) and ceiling down, by different
335 * masks at different levels, in order to test whether a table
336 * now has no other vmas using it, so can be freed, we don't
337 * bother to round floor or end up - the tests don't need that.
347 ceiling
&= HUGEPD_MASK
;
351 if (end
- 1 > ceiling
- 1)
357 pgd
= pgd_offset(tlb
->mm
, addr
);
359 BUG_ON(get_slice_psize(tlb
->mm
, addr
) != mmu_huge_psize
);
360 next
= pgd_addr_end(addr
, end
);
361 if (pgd_none_or_clear_bad(pgd
))
363 hugetlb_free_pud_range(tlb
, pgd
, addr
, next
, floor
, ceiling
);
364 } while (pgd
++, addr
= next
, addr
!= end
);
367 void set_huge_pte_at(struct mm_struct
*mm
, unsigned long addr
,
368 pte_t
*ptep
, pte_t pte
)
370 if (pte_present(*ptep
)) {
371 /* We open-code pte_clear because we need to pass the right
372 * argument to hpte_need_flush (huge / !huge). Might not be
373 * necessary anymore if we make hpte_need_flush() get the
374 * page size from the slices
376 pte_update(mm
, addr
& HPAGE_MASK
, ptep
, ~0UL, 1);
378 *ptep
= __pte(pte_val(pte
) & ~_PAGE_HPTEFLAGS
);
381 pte_t
huge_ptep_get_and_clear(struct mm_struct
*mm
, unsigned long addr
,
384 unsigned long old
= pte_update(mm
, addr
, ptep
, ~0UL, 1);
389 follow_huge_addr(struct mm_struct
*mm
, unsigned long address
, int write
)
394 if (get_slice_psize(mm
, address
) != mmu_huge_psize
)
395 return ERR_PTR(-EINVAL
);
397 ptep
= huge_pte_offset(mm
, address
);
398 page
= pte_page(*ptep
);
400 page
+= (address
% HPAGE_SIZE
) / PAGE_SIZE
;
405 int pmd_huge(pmd_t pmd
)
410 int pud_huge(pud_t pud
)
416 follow_huge_pmd(struct mm_struct
*mm
, unsigned long address
,
417 pmd_t
*pmd
, int write
)
424 unsigned long hugetlb_get_unmapped_area(struct file
*file
, unsigned long addr
,
425 unsigned long len
, unsigned long pgoff
,
428 return slice_get_unmapped_area(addr
, len
, flags
,
429 mmu_huge_psize
, 1, 0);
433 * Called by asm hashtable.S for doing lazy icache flush
435 static unsigned int hash_huge_page_do_lazy_icache(unsigned long rflags
,
441 if (!pfn_valid(pte_pfn(pte
)))
444 page
= pte_page(pte
);
447 if (!test_bit(PG_arch_1
, &page
->flags
) && !PageReserved(page
)) {
449 for (i
= 0; i
< (HPAGE_SIZE
/ PAGE_SIZE
); i
++)
450 __flush_dcache_icache(page_address(page
+i
));
451 set_bit(PG_arch_1
, &page
->flags
);
459 int hash_huge_page(struct mm_struct
*mm
, unsigned long access
,
460 unsigned long ea
, unsigned long vsid
, int local
,
464 unsigned long old_pte
, new_pte
;
465 unsigned long va
, rflags
, pa
;
468 int ssize
= user_segment_size(ea
);
470 ptep
= huge_pte_offset(mm
, ea
);
472 /* Search the Linux page table for a match with va */
473 va
= hpt_va(ea
, vsid
, ssize
);
476 * If no pte found or not present, send the problem up to
479 if (unlikely(!ptep
|| pte_none(*ptep
)))
483 * Check the user's access rights to the page. If access should be
484 * prevented then send the problem up to do_page_fault.
486 if (unlikely(access
& ~pte_val(*ptep
)))
489 * At this point, we have a pte (old_pte) which can be used to build
490 * or update an HPTE. There are 2 cases:
492 * 1. There is a valid (present) pte with no associated HPTE (this is
493 * the most common case)
494 * 2. There is a valid (present) pte with an associated HPTE. The
495 * current values of the pp bits in the HPTE prevent access
496 * because we are doing software DIRTY bit management and the
497 * page is currently not DIRTY.
502 old_pte
= pte_val(*ptep
);
503 if (old_pte
& _PAGE_BUSY
)
505 new_pte
= old_pte
| _PAGE_BUSY
| _PAGE_ACCESSED
;
506 } while(old_pte
!= __cmpxchg_u64((unsigned long *)ptep
,
509 rflags
= 0x2 | (!(new_pte
& _PAGE_RW
));
510 /* _PAGE_EXEC -> HW_NO_EXEC since it's inverted */
511 rflags
|= ((new_pte
& _PAGE_EXEC
) ? 0 : HPTE_R_N
);
512 if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE
))
513 /* No CPU has hugepages but lacks no execute, so we
514 * don't need to worry about that case */
515 rflags
= hash_huge_page_do_lazy_icache(rflags
, __pte(old_pte
),
518 /* Check if pte already has an hpte (case 2) */
519 if (unlikely(old_pte
& _PAGE_HASHPTE
)) {
520 /* There MIGHT be an HPTE for this pte */
521 unsigned long hash
, slot
;
523 hash
= hpt_hash(va
, HPAGE_SHIFT
, ssize
);
524 if (old_pte
& _PAGE_F_SECOND
)
526 slot
= (hash
& htab_hash_mask
) * HPTES_PER_GROUP
;
527 slot
+= (old_pte
& _PAGE_F_GIX
) >> 12;
529 if (ppc_md
.hpte_updatepp(slot
, rflags
, va
, mmu_huge_psize
,
531 old_pte
&= ~_PAGE_HPTEFLAGS
;
534 if (likely(!(old_pte
& _PAGE_HASHPTE
))) {
535 unsigned long hash
= hpt_hash(va
, HPAGE_SHIFT
, ssize
);
536 unsigned long hpte_group
;
538 pa
= pte_pfn(__pte(old_pte
)) << PAGE_SHIFT
;
541 hpte_group
= ((hash
& htab_hash_mask
) *
542 HPTES_PER_GROUP
) & ~0x7UL
;
544 /* clear HPTE slot informations in new PTE */
545 #ifdef CONFIG_PPC_64K_PAGES
546 new_pte
= (new_pte
& ~_PAGE_HPTEFLAGS
) | _PAGE_HPTE_SUB0
;
548 new_pte
= (new_pte
& ~_PAGE_HPTEFLAGS
) | _PAGE_HASHPTE
;
550 /* Add in WIMG bits */
551 rflags
|= (new_pte
& (_PAGE_WRITETHRU
| _PAGE_NO_CACHE
|
552 _PAGE_COHERENT
| _PAGE_GUARDED
));
554 /* Insert into the hash table, primary slot */
555 slot
= ppc_md
.hpte_insert(hpte_group
, va
, pa
, rflags
, 0,
556 mmu_huge_psize
, ssize
);
558 /* Primary is full, try the secondary */
559 if (unlikely(slot
== -1)) {
560 hpte_group
= ((~hash
& htab_hash_mask
) *
561 HPTES_PER_GROUP
) & ~0x7UL
;
562 slot
= ppc_md
.hpte_insert(hpte_group
, va
, pa
, rflags
,
564 mmu_huge_psize
, ssize
);
567 hpte_group
= ((hash
& htab_hash_mask
) *
568 HPTES_PER_GROUP
)&~0x7UL
;
570 ppc_md
.hpte_remove(hpte_group
);
575 if (unlikely(slot
== -2))
576 panic("hash_huge_page: pte_insert failed\n");
578 new_pte
|= (slot
<< 12) & (_PAGE_F_SECOND
| _PAGE_F_GIX
);
582 * No need to use ldarx/stdcx here
584 *ptep
= __pte(new_pte
& ~_PAGE_BUSY
);
592 void set_huge_psize(int psize
)
594 /* Check that it is a page size supported by the hardware and
595 * that it fits within pagetable limits. */
596 if (mmu_psize_defs
[psize
].shift
&&
597 mmu_psize_defs
[psize
].shift
< SID_SHIFT_1T
&&
598 (mmu_psize_defs
[psize
].shift
> MIN_HUGEPTE_SHIFT
||
599 mmu_psize_defs
[psize
].shift
== PAGE_SHIFT_64K
||
600 mmu_psize_defs
[psize
].shift
== PAGE_SHIFT_16G
)) {
601 /* Return if huge page size is the same as the
603 if (mmu_psize_defs
[psize
].shift
== PAGE_SHIFT
)
606 HPAGE_SHIFT
= mmu_psize_defs
[psize
].shift
;
607 mmu_huge_psize
= psize
;
609 switch (HPAGE_SHIFT
) {
611 /* We only allow 64k hpages with 4k base page,
612 * which was checked above, and always put them
614 hugepte_shift
= PMD_SHIFT
;
617 /* 16M pages can be at two different levels
618 * of pagestables based on base page size */
619 if (PAGE_SHIFT
== PAGE_SHIFT_64K
)
620 hugepte_shift
= PMD_SHIFT
;
621 else /* 4k base page */
622 hugepte_shift
= PUD_SHIFT
;
625 /* 16G pages are always at PGD level */
626 hugepte_shift
= PGDIR_SHIFT
;
629 hugepte_shift
-= HPAGE_SHIFT
;
634 static int __init
hugepage_setup_sz(char *str
)
636 unsigned long long size
;
640 size
= memparse(str
, &str
);
644 #ifndef CONFIG_PPC_64K_PAGES
646 mmu_psize
= MMU_PAGE_64K
;
650 mmu_psize
= MMU_PAGE_16M
;
653 mmu_psize
= MMU_PAGE_16G
;
657 if (mmu_psize
>= 0 && mmu_psize_defs
[mmu_psize
].shift
) {
658 set_huge_psize(mmu_psize
);
659 hugetlb_add_hstate(shift
- PAGE_SHIFT
);
662 printk(KERN_WARNING
"Invalid huge page size specified(%llu)\n", size
);
666 __setup("hugepagesz=", hugepage_setup_sz
);
668 static void zero_ctor(struct kmem_cache
*cache
, void *addr
)
670 memset(addr
, 0, kmem_cache_size(cache
));
673 static int __init
hugetlbpage_init(void)
675 if (!cpu_has_feature(CPU_FTR_16M_PAGE
))
678 huge_pgtable_cache
= kmem_cache_create("hugepte_cache",
683 if (! huge_pgtable_cache
)
684 panic("hugetlbpage_init(): could not create hugepte cache\n");
689 module_init(hugetlbpage_init
);