2 * PPC64 (POWER4) Huge TLB Page Support for Kernel.
4 * Copyright (C) 2003 David Gibson, IBM Corporation.
6 * Based on the IA-32 version:
7 * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
10 #include <linux/init.h>
13 #include <linux/hugetlb.h>
14 #include <linux/pagemap.h>
15 #include <linux/slab.h>
16 #include <linux/err.h>
17 #include <linux/sysctl.h>
19 #include <asm/pgalloc.h>
21 #include <asm/tlbflush.h>
22 #include <asm/mmu_context.h>
23 #include <asm/machdep.h>
24 #include <asm/cputable.h>
27 #define HPAGE_SHIFT_64K 16
28 #define HPAGE_SHIFT_16M 24
30 #define NUM_LOW_AREAS (0x100000000UL >> SID_SHIFT)
31 #define NUM_HIGH_AREAS (PGTABLE_RANGE >> HTLB_AREA_SHIFT)
32 #define MAX_NUMBER_GPAGES 1024
34 /* Tracks the 16G pages after the device tree is scanned and before the
35 * huge_boot_pages list is ready. */
36 static unsigned long gpage_freearray
[MAX_NUMBER_GPAGES
];
37 static unsigned nr_gpages
;
39 unsigned int hugepte_shift
;
40 #define PTRS_PER_HUGEPTE (1 << hugepte_shift)
41 #define HUGEPTE_TABLE_SIZE (sizeof(pte_t) << hugepte_shift)
43 #define HUGEPD_SHIFT (HPAGE_SHIFT + hugepte_shift)
44 #define HUGEPD_SIZE (1UL << HUGEPD_SHIFT)
45 #define HUGEPD_MASK (~(HUGEPD_SIZE-1))
47 #define huge_pgtable_cache (pgtable_cache[HUGEPTE_CACHE_NUM])
49 /* Flag to mark huge PD pointers. This means pmd_bad() and pud_bad()
50 * will choke on pointers to hugepte tables, which is handy for
51 * catching screwups early. */
54 typedef struct { unsigned long pd
; } hugepd_t
;
56 #define hugepd_none(hpd) ((hpd).pd == 0)
58 static inline pte_t
*hugepd_page(hugepd_t hpd
)
60 BUG_ON(!(hpd
.pd
& HUGEPD_OK
));
61 return (pte_t
*)(hpd
.pd
& ~HUGEPD_OK
);
64 static inline pte_t
*hugepte_offset(hugepd_t
*hpdp
, unsigned long addr
)
66 unsigned long idx
= ((addr
>> HPAGE_SHIFT
) & (PTRS_PER_HUGEPTE
-1));
67 pte_t
*dir
= hugepd_page(*hpdp
);
72 static int __hugepte_alloc(struct mm_struct
*mm
, hugepd_t
*hpdp
,
73 unsigned long address
)
75 pte_t
*new = kmem_cache_alloc(huge_pgtable_cache
,
76 GFP_KERNEL
|__GFP_REPEAT
);
81 spin_lock(&mm
->page_table_lock
);
82 if (!hugepd_none(*hpdp
))
83 kmem_cache_free(huge_pgtable_cache
, new);
85 hpdp
->pd
= (unsigned long)new | HUGEPD_OK
;
86 spin_unlock(&mm
->page_table_lock
);
90 /* Base page size affects how we walk hugetlb page tables */
91 #ifdef CONFIG_PPC_64K_PAGES
92 #define hpmd_offset(pud, addr) pmd_offset(pud, addr)
93 #define hpmd_alloc(mm, pud, addr) pmd_alloc(mm, pud, addr)
96 pmd_t
*hpmd_offset(pud_t
*pud
, unsigned long addr
)
98 if (HPAGE_SHIFT
== HPAGE_SHIFT_64K
)
99 return pmd_offset(pud
, addr
);
101 return (pmd_t
*) pud
;
104 pmd_t
*hpmd_alloc(struct mm_struct
*mm
, pud_t
*pud
, unsigned long addr
)
106 if (HPAGE_SHIFT
== HPAGE_SHIFT_64K
)
107 return pmd_alloc(mm
, pud
, addr
);
109 return (pmd_t
*) pud
;
113 /* Build list of addresses of gigantic pages. This function is used in early
114 * boot before the buddy or bootmem allocator is setup.
116 void add_gpage(unsigned long addr
, unsigned long page_size
,
117 unsigned long number_of_pages
)
121 while (number_of_pages
> 0) {
122 gpage_freearray
[nr_gpages
] = addr
;
129 /* Moves the gigantic page addresses from the temporary list to the
130 * huge_boot_pages list. */
131 int alloc_bootmem_huge_page(struct hstate
*h
)
133 struct huge_bootmem_page
*m
;
136 m
= phys_to_virt(gpage_freearray
[--nr_gpages
]);
137 gpage_freearray
[nr_gpages
] = 0;
138 list_add(&m
->list
, &huge_boot_pages
);
144 /* Modelled after find_linux_pte() */
145 pte_t
*huge_pte_offset(struct mm_struct
*mm
, unsigned long addr
)
151 BUG_ON(get_slice_psize(mm
, addr
) != mmu_huge_psize
);
155 pg
= pgd_offset(mm
, addr
);
156 if (!pgd_none(*pg
)) {
157 pu
= pud_offset(pg
, addr
);
158 if (!pud_none(*pu
)) {
159 pm
= hpmd_offset(pu
, addr
);
161 return hugepte_offset((hugepd_t
*)pm
, addr
);
168 pte_t
*huge_pte_alloc(struct mm_struct
*mm
,
169 unsigned long addr
, unsigned long sz
)
174 hugepd_t
*hpdp
= NULL
;
176 BUG_ON(get_slice_psize(mm
, addr
) != mmu_huge_psize
);
180 pg
= pgd_offset(mm
, addr
);
181 pu
= pud_alloc(mm
, pg
, addr
);
184 pm
= hpmd_alloc(mm
, pu
, addr
);
186 hpdp
= (hugepd_t
*)pm
;
192 if (hugepd_none(*hpdp
) && __hugepte_alloc(mm
, hpdp
, addr
))
195 return hugepte_offset(hpdp
, addr
);
198 int huge_pmd_unshare(struct mm_struct
*mm
, unsigned long *addr
, pte_t
*ptep
)
203 static void free_hugepte_range(struct mmu_gather
*tlb
, hugepd_t
*hpdp
)
205 pte_t
*hugepte
= hugepd_page(*hpdp
);
209 pgtable_free_tlb(tlb
, pgtable_free_cache(hugepte
, HUGEPTE_CACHE_NUM
,
213 static void hugetlb_free_pmd_range(struct mmu_gather
*tlb
, pud_t
*pud
,
214 unsigned long addr
, unsigned long end
,
215 unsigned long floor
, unsigned long ceiling
)
222 pmd
= pmd_offset(pud
, addr
);
224 next
= pmd_addr_end(addr
, end
);
227 free_hugepte_range(tlb
, (hugepd_t
*)pmd
);
228 } while (pmd
++, addr
= next
, addr
!= end
);
238 if (end
- 1 > ceiling
- 1)
241 pmd
= pmd_offset(pud
, start
);
243 pmd_free_tlb(tlb
, pmd
);
246 static void hugetlb_free_pud_range(struct mmu_gather
*tlb
, pgd_t
*pgd
,
247 unsigned long addr
, unsigned long end
,
248 unsigned long floor
, unsigned long ceiling
)
255 pud
= pud_offset(pgd
, addr
);
257 next
= pud_addr_end(addr
, end
);
258 #ifdef CONFIG_PPC_64K_PAGES
259 if (pud_none_or_clear_bad(pud
))
261 hugetlb_free_pmd_range(tlb
, pud
, addr
, next
, floor
, ceiling
);
263 if (HPAGE_SHIFT
== HPAGE_SHIFT_64K
) {
264 if (pud_none_or_clear_bad(pud
))
266 hugetlb_free_pmd_range(tlb
, pud
, addr
, next
, floor
, ceiling
);
270 free_hugepte_range(tlb
, (hugepd_t
*)pud
);
273 } while (pud
++, addr
= next
, addr
!= end
);
279 ceiling
&= PGDIR_MASK
;
283 if (end
- 1 > ceiling
- 1)
286 pud
= pud_offset(pgd
, start
);
288 pud_free_tlb(tlb
, pud
);
292 * This function frees user-level page tables of a process.
294 * Must be called with pagetable lock held.
296 void hugetlb_free_pgd_range(struct mmu_gather
*tlb
,
297 unsigned long addr
, unsigned long end
,
298 unsigned long floor
, unsigned long ceiling
)
305 * Comments below take from the normal free_pgd_range(). They
306 * apply here too. The tests against HUGEPD_MASK below are
307 * essential, because we *don't* test for this at the bottom
308 * level. Without them we'll attempt to free a hugepte table
309 * when we unmap just part of it, even if there are other
310 * active mappings using it.
312 * The next few lines have given us lots of grief...
314 * Why are we testing HUGEPD* at this top level? Because
315 * often there will be no work to do at all, and we'd prefer
316 * not to go all the way down to the bottom just to discover
319 * Why all these "- 1"s? Because 0 represents both the bottom
320 * of the address space and the top of it (using -1 for the
321 * top wouldn't help much: the masks would do the wrong thing).
322 * The rule is that addr 0 and floor 0 refer to the bottom of
323 * the address space, but end 0 and ceiling 0 refer to the top
324 * Comparisons need to use "end - 1" and "ceiling - 1" (though
325 * that end 0 case should be mythical).
327 * Wherever addr is brought up or ceiling brought down, we
328 * must be careful to reject "the opposite 0" before it
329 * confuses the subsequent tests. But what about where end is
330 * brought down by HUGEPD_SIZE below? no, end can't go down to
333 * Whereas we round start (addr) and ceiling down, by different
334 * masks at different levels, in order to test whether a table
335 * now has no other vmas using it, so can be freed, we don't
336 * bother to round floor or end up - the tests don't need that.
346 ceiling
&= HUGEPD_MASK
;
350 if (end
- 1 > ceiling
- 1)
356 pgd
= pgd_offset(tlb
->mm
, addr
);
358 BUG_ON(get_slice_psize(tlb
->mm
, addr
) != mmu_huge_psize
);
359 next
= pgd_addr_end(addr
, end
);
360 if (pgd_none_or_clear_bad(pgd
))
362 hugetlb_free_pud_range(tlb
, pgd
, addr
, next
, floor
, ceiling
);
363 } while (pgd
++, addr
= next
, addr
!= end
);
366 void set_huge_pte_at(struct mm_struct
*mm
, unsigned long addr
,
367 pte_t
*ptep
, pte_t pte
)
369 if (pte_present(*ptep
)) {
370 /* We open-code pte_clear because we need to pass the right
371 * argument to hpte_need_flush (huge / !huge). Might not be
372 * necessary anymore if we make hpte_need_flush() get the
373 * page size from the slices
375 pte_update(mm
, addr
& HPAGE_MASK
, ptep
, ~0UL, 1);
377 *ptep
= __pte(pte_val(pte
) & ~_PAGE_HPTEFLAGS
);
380 pte_t
huge_ptep_get_and_clear(struct mm_struct
*mm
, unsigned long addr
,
383 unsigned long old
= pte_update(mm
, addr
, ptep
, ~0UL, 1);
388 follow_huge_addr(struct mm_struct
*mm
, unsigned long address
, int write
)
393 if (get_slice_psize(mm
, address
) != mmu_huge_psize
)
394 return ERR_PTR(-EINVAL
);
396 ptep
= huge_pte_offset(mm
, address
);
397 page
= pte_page(*ptep
);
399 page
+= (address
% HPAGE_SIZE
) / PAGE_SIZE
;
404 int pmd_huge(pmd_t pmd
)
409 int pud_huge(pud_t pud
)
415 follow_huge_pmd(struct mm_struct
*mm
, unsigned long address
,
416 pmd_t
*pmd
, int write
)
423 unsigned long hugetlb_get_unmapped_area(struct file
*file
, unsigned long addr
,
424 unsigned long len
, unsigned long pgoff
,
427 return slice_get_unmapped_area(addr
, len
, flags
,
428 mmu_huge_psize
, 1, 0);
432 * Called by asm hashtable.S for doing lazy icache flush
434 static unsigned int hash_huge_page_do_lazy_icache(unsigned long rflags
,
440 if (!pfn_valid(pte_pfn(pte
)))
443 page
= pte_page(pte
);
446 if (!test_bit(PG_arch_1
, &page
->flags
) && !PageReserved(page
)) {
448 for (i
= 0; i
< (HPAGE_SIZE
/ PAGE_SIZE
); i
++)
449 __flush_dcache_icache(page_address(page
+i
));
450 set_bit(PG_arch_1
, &page
->flags
);
458 int hash_huge_page(struct mm_struct
*mm
, unsigned long access
,
459 unsigned long ea
, unsigned long vsid
, int local
,
463 unsigned long old_pte
, new_pte
;
464 unsigned long va
, rflags
, pa
;
467 int ssize
= user_segment_size(ea
);
469 ptep
= huge_pte_offset(mm
, ea
);
471 /* Search the Linux page table for a match with va */
472 va
= hpt_va(ea
, vsid
, ssize
);
475 * If no pte found or not present, send the problem up to
478 if (unlikely(!ptep
|| pte_none(*ptep
)))
482 * Check the user's access rights to the page. If access should be
483 * prevented then send the problem up to do_page_fault.
485 if (unlikely(access
& ~pte_val(*ptep
)))
488 * At this point, we have a pte (old_pte) which can be used to build
489 * or update an HPTE. There are 2 cases:
491 * 1. There is a valid (present) pte with no associated HPTE (this is
492 * the most common case)
493 * 2. There is a valid (present) pte with an associated HPTE. The
494 * current values of the pp bits in the HPTE prevent access
495 * because we are doing software DIRTY bit management and the
496 * page is currently not DIRTY.
501 old_pte
= pte_val(*ptep
);
502 if (old_pte
& _PAGE_BUSY
)
504 new_pte
= old_pte
| _PAGE_BUSY
| _PAGE_ACCESSED
;
505 } while(old_pte
!= __cmpxchg_u64((unsigned long *)ptep
,
508 rflags
= 0x2 | (!(new_pte
& _PAGE_RW
));
509 /* _PAGE_EXEC -> HW_NO_EXEC since it's inverted */
510 rflags
|= ((new_pte
& _PAGE_EXEC
) ? 0 : HPTE_R_N
);
511 if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE
))
512 /* No CPU has hugepages but lacks no execute, so we
513 * don't need to worry about that case */
514 rflags
= hash_huge_page_do_lazy_icache(rflags
, __pte(old_pte
),
517 /* Check if pte already has an hpte (case 2) */
518 if (unlikely(old_pte
& _PAGE_HASHPTE
)) {
519 /* There MIGHT be an HPTE for this pte */
520 unsigned long hash
, slot
;
522 hash
= hpt_hash(va
, HPAGE_SHIFT
, ssize
);
523 if (old_pte
& _PAGE_F_SECOND
)
525 slot
= (hash
& htab_hash_mask
) * HPTES_PER_GROUP
;
526 slot
+= (old_pte
& _PAGE_F_GIX
) >> 12;
528 if (ppc_md
.hpte_updatepp(slot
, rflags
, va
, mmu_huge_psize
,
530 old_pte
&= ~_PAGE_HPTEFLAGS
;
533 if (likely(!(old_pte
& _PAGE_HASHPTE
))) {
534 unsigned long hash
= hpt_hash(va
, HPAGE_SHIFT
, ssize
);
535 unsigned long hpte_group
;
537 pa
= pte_pfn(__pte(old_pte
)) << PAGE_SHIFT
;
540 hpte_group
= ((hash
& htab_hash_mask
) *
541 HPTES_PER_GROUP
) & ~0x7UL
;
543 /* clear HPTE slot informations in new PTE */
544 #ifdef CONFIG_PPC_64K_PAGES
545 new_pte
= (new_pte
& ~_PAGE_HPTEFLAGS
) | _PAGE_HPTE_SUB0
;
547 new_pte
= (new_pte
& ~_PAGE_HPTEFLAGS
) | _PAGE_HASHPTE
;
549 /* Add in WIMG bits */
550 rflags
|= (new_pte
& (_PAGE_WRITETHRU
| _PAGE_NO_CACHE
|
551 _PAGE_COHERENT
| _PAGE_GUARDED
));
553 /* Insert into the hash table, primary slot */
554 slot
= ppc_md
.hpte_insert(hpte_group
, va
, pa
, rflags
, 0,
555 mmu_huge_psize
, ssize
);
557 /* Primary is full, try the secondary */
558 if (unlikely(slot
== -1)) {
559 hpte_group
= ((~hash
& htab_hash_mask
) *
560 HPTES_PER_GROUP
) & ~0x7UL
;
561 slot
= ppc_md
.hpte_insert(hpte_group
, va
, pa
, rflags
,
563 mmu_huge_psize
, ssize
);
566 hpte_group
= ((hash
& htab_hash_mask
) *
567 HPTES_PER_GROUP
)&~0x7UL
;
569 ppc_md
.hpte_remove(hpte_group
);
574 if (unlikely(slot
== -2))
575 panic("hash_huge_page: pte_insert failed\n");
577 new_pte
|= (slot
<< 12) & (_PAGE_F_SECOND
| _PAGE_F_GIX
);
581 * No need to use ldarx/stdcx here
583 *ptep
= __pte(new_pte
& ~_PAGE_BUSY
);
591 void set_huge_psize(int psize
)
593 /* Check that it is a page size supported by the hardware and
594 * that it fits within pagetable limits. */
595 if (mmu_psize_defs
[psize
].shift
&& mmu_psize_defs
[psize
].shift
< SID_SHIFT
&&
596 (mmu_psize_defs
[psize
].shift
> MIN_HUGEPTE_SHIFT
||
597 mmu_psize_defs
[psize
].shift
== HPAGE_SHIFT_64K
)) {
598 HPAGE_SHIFT
= mmu_psize_defs
[psize
].shift
;
599 mmu_huge_psize
= psize
;
600 #ifdef CONFIG_PPC_64K_PAGES
601 hugepte_shift
= (PMD_SHIFT
-HPAGE_SHIFT
);
603 if (HPAGE_SHIFT
== HPAGE_SHIFT_64K
)
604 hugepte_shift
= (PMD_SHIFT
-HPAGE_SHIFT
);
606 hugepte_shift
= (PUD_SHIFT
-HPAGE_SHIFT
);
613 static int __init
hugepage_setup_sz(char *str
)
615 unsigned long long size
;
619 size
= memparse(str
, &str
);
623 #ifndef CONFIG_PPC_64K_PAGES
624 case HPAGE_SHIFT_64K
:
625 mmu_psize
= MMU_PAGE_64K
;
628 case HPAGE_SHIFT_16M
:
629 mmu_psize
= MMU_PAGE_16M
;
633 if (mmu_psize
>=0 && mmu_psize_defs
[mmu_psize
].shift
)
634 set_huge_psize(mmu_psize
);
636 printk(KERN_WARNING
"Invalid huge page size specified(%llu)\n", size
);
640 __setup("hugepagesz=", hugepage_setup_sz
);
642 static void zero_ctor(struct kmem_cache
*cache
, void *addr
)
644 memset(addr
, 0, kmem_cache_size(cache
));
647 static int __init
hugetlbpage_init(void)
649 if (!cpu_has_feature(CPU_FTR_16M_PAGE
))
652 huge_pgtable_cache
= kmem_cache_create("hugepte_cache",
657 if (! huge_pgtable_cache
)
658 panic("hugetlbpage_init(): could not create hugepte cache\n");
663 module_init(hugetlbpage_init
);