2 * PPC64 (POWER4) Huge TLB Page Support for Kernel.
4 * Copyright (C) 2003 David Gibson, IBM Corporation.
6 * Based on the IA-32 version:
7 * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
10 #include <linux/init.h>
13 #include <linux/hugetlb.h>
14 #include <linux/pagemap.h>
15 #include <linux/smp_lock.h>
16 #include <linux/slab.h>
17 #include <linux/err.h>
18 #include <linux/sysctl.h>
20 #include <asm/pgalloc.h>
22 #include <asm/tlbflush.h>
23 #include <asm/mmu_context.h>
24 #include <asm/machdep.h>
25 #include <asm/cputable.h>
28 #include <linux/sysctl.h>
32 * 31 30 ... 15 14 13 12 10 9 8 7 6 5 4 3 2 1 0
33 * PFN>>12..... - - - - - - HASH_IX.... 2ND HASH RW - HG=1
36 #define HUGEPTE_SHIFT 15
37 #define _HUGEPAGE_PFN 0xffff8000
38 #define _HUGEPAGE_BAD 0x00007f00
39 #define _HUGEPAGE_HASHPTE 0x00000008
40 #define _HUGEPAGE_SECONDARY 0x00000010
41 #define _HUGEPAGE_GROUP_IX 0x000000e0
42 #define _HUGEPAGE_HPTEFLAGS (_HUGEPAGE_HASHPTE | _HUGEPAGE_SECONDARY | \
44 #define _HUGEPAGE_RW 0x00000004
46 typedef struct {unsigned int val
;} hugepte_t
;
47 #define hugepte_val(hugepte) ((hugepte).val)
48 #define __hugepte(x) ((hugepte_t) { (x) } )
49 #define hugepte_pfn(x) \
50 ((unsigned long)(hugepte_val(x)>>HUGEPTE_SHIFT) << HUGETLB_PAGE_ORDER)
51 #define mk_hugepte(page,wr) __hugepte( \
52 ((page_to_pfn(page)>>HUGETLB_PAGE_ORDER) << HUGEPTE_SHIFT ) \
53 | (!!(wr) * _HUGEPAGE_RW) | _PMD_HUGEPAGE )
55 #define hugepte_bad(x) ( !(hugepte_val(x) & _PMD_HUGEPAGE) || \
56 (hugepte_val(x) & _HUGEPAGE_BAD) )
57 #define hugepte_page(x) pfn_to_page(hugepte_pfn(x))
58 #define hugepte_none(x) (!(hugepte_val(x) & _HUGEPAGE_PFN))
61 static void flush_hash_hugepage(mm_context_t context
, unsigned long ea
,
62 hugepte_t pte
, int local
);
64 static inline unsigned int hugepte_update(hugepte_t
*p
, unsigned int clr
,
67 unsigned int old
, tmp
;
70 "1: lwarx %0,0,%3 # pte_update\n\
75 : "=&r" (old
), "=&r" (tmp
), "=m" (*p
)
76 : "r" (p
), "r" (clr
), "r" (set
), "m" (*p
)
81 static inline void set_hugepte(hugepte_t
*ptep
, hugepte_t pte
)
83 hugepte_update(ptep
, ~_HUGEPAGE_HPTEFLAGS
,
84 hugepte_val(pte
) & ~_HUGEPAGE_HPTEFLAGS
);
87 static hugepte_t
*hugepte_alloc(struct mm_struct
*mm
, unsigned long addr
)
92 BUG_ON(!in_hugepage_area(mm
->context
, addr
));
94 pgd
= pgd_offset(mm
, addr
);
95 pmd
= pmd_alloc(mm
, pgd
, addr
);
97 /* We shouldn't find a (normal) PTE page pointer here */
98 BUG_ON(!pmd_none(*pmd
) && !pmd_hugepage(*pmd
));
100 return (hugepte_t
*)pmd
;
103 static hugepte_t
*hugepte_offset(struct mm_struct
*mm
, unsigned long addr
)
108 BUG_ON(!in_hugepage_area(mm
->context
, addr
));
110 pgd
= pgd_offset(mm
, addr
);
114 pmd
= pmd_offset(pgd
, addr
);
116 /* We shouldn't find a (normal) PTE page pointer here */
117 BUG_ON(!pmd_none(*pmd
) && !pmd_hugepage(*pmd
));
119 return (hugepte_t
*)pmd
;
122 static void setup_huge_pte(struct mm_struct
*mm
, struct page
*page
,
123 hugepte_t
*ptep
, int write_access
)
128 mm
->rss
+= (HPAGE_SIZE
/ PAGE_SIZE
);
129 entry
= mk_hugepte(page
, write_access
);
130 for (i
= 0; i
< HUGEPTE_BATCH_SIZE
; i
++)
131 set_hugepte(ptep
+i
, entry
);
134 static void teardown_huge_pte(hugepte_t
*ptep
)
138 for (i
= 0; i
< HUGEPTE_BATCH_SIZE
; i
++)
139 pmd_clear((pmd_t
*)(ptep
+i
));
143 * This function checks for proper alignment of input addr and len parameters.
145 int is_aligned_hugepage_range(unsigned long addr
, unsigned long len
)
147 if (len
& ~HPAGE_MASK
)
149 if (addr
& ~HPAGE_MASK
)
151 if (! (within_hugepage_low_range(addr
, len
)
152 || within_hugepage_high_range(addr
, len
)) )
157 static void flush_segments(void *parm
)
159 u16 segs
= (unsigned long) parm
;
162 asm volatile("isync" : : : "memory");
164 for (i
= 0; i
< 16; i
++) {
165 if (! (segs
& (1U << i
)))
167 asm volatile("slbie %0" : : "r" (i
<< SID_SHIFT
));
170 asm volatile("isync" : : : "memory");
173 static int prepare_low_seg_for_htlb(struct mm_struct
*mm
, unsigned long seg
)
175 unsigned long start
= seg
<< SID_SHIFT
;
176 unsigned long end
= (seg
+1) << SID_SHIFT
;
177 struct vm_area_struct
*vma
;
179 struct mmu_gather
*tlb
;
183 /* Check no VMAs are in the region */
184 vma
= find_vma(mm
, start
);
185 if (vma
&& (vma
->vm_start
< end
))
188 /* Clean up any leftover PTE pages in the region */
189 spin_lock(&mm
->page_table_lock
);
190 tlb
= tlb_gather_mmu(mm
, 0);
191 for (addr
= start
; addr
< end
; addr
+= PMD_SIZE
) {
192 pgd_t
*pgd
= pgd_offset(mm
, addr
);
200 pmd
= pmd_offset(pgd
, addr
);
201 if (!pmd
|| pmd_none(*pmd
))
208 pte
= (pte_t
*)pmd_page_kernel(*pmd
);
209 /* No VMAs, so there should be no PTEs, check just in case. */
210 for (i
= 0; i
< PTRS_PER_PTE
; i
++) {
211 BUG_ON(!pte_none(*pte
));
214 page
= pmd_page(*pmd
);
216 dec_page_state(nr_page_table_pages
);
217 pte_free_tlb(tlb
, page
);
219 tlb_finish_mmu(tlb
, start
, end
);
220 spin_unlock(&mm
->page_table_lock
);
225 static int open_low_hpage_segs(struct mm_struct
*mm
, u16 newsegs
)
229 newsegs
&= ~(mm
->context
.htlb_segs
);
231 return 0; /* The segments we want are already open */
233 for (i
= 0; i
< 16; i
++)
234 if ((1 << i
) & newsegs
)
235 if (prepare_low_seg_for_htlb(mm
, i
) != 0)
238 mm
->context
.htlb_segs
|= newsegs
;
239 /* the context change must make it to memory before the flush,
240 * so that further SLB misses do the right thing. */
242 on_each_cpu(flush_segments
, (void *)(unsigned long)newsegs
, 0, 1);
247 int prepare_hugepage_range(unsigned long addr
, unsigned long len
)
249 if (within_hugepage_high_range(addr
, len
))
251 else if ((addr
< 0x100000000) && ((addr
+len
) < 0x100000000)) {
253 /* Yes, we need both tests, in case addr+len overflows
254 * 64-bit arithmetic */
255 err
= open_low_hpage_segs(current
->mm
,
256 LOW_ESID_MASK(addr
, len
));
258 printk(KERN_DEBUG
"prepare_hugepage_range(%lx, %lx)"
259 " failed (segs: 0x%04hx)\n", addr
, len
,
260 LOW_ESID_MASK(addr
, len
));
267 int copy_hugetlb_page_range(struct mm_struct
*dst
, struct mm_struct
*src
,
268 struct vm_area_struct
*vma
)
270 hugepte_t
*src_pte
, *dst_pte
, entry
;
271 struct page
*ptepage
;
272 unsigned long addr
= vma
->vm_start
;
273 unsigned long end
= vma
->vm_end
;
276 BUG_ON(! in_hugepage_area(src
->context
, addr
));
277 BUG_ON(! in_hugepage_area(dst
->context
, addr
));
279 dst_pte
= hugepte_alloc(dst
, addr
);
283 src_pte
= hugepte_offset(src
, addr
);
286 if ((addr
% HPAGE_SIZE
) == 0) {
287 /* This is the first hugepte in a batch */
288 ptepage
= hugepte_page(entry
);
290 dst
->rss
+= (HPAGE_SIZE
/ PAGE_SIZE
);
292 set_hugepte(dst_pte
, entry
);
301 follow_hugetlb_page(struct mm_struct
*mm
, struct vm_area_struct
*vma
,
302 struct page
**pages
, struct vm_area_struct
**vmas
,
303 unsigned long *position
, int *length
, int i
)
305 unsigned long vpfn
, vaddr
= *position
;
306 int remainder
= *length
;
308 WARN_ON(!is_vm_hugetlb_page(vma
));
310 vpfn
= vaddr
/PAGE_SIZE
;
311 while (vaddr
< vma
->vm_end
&& remainder
) {
312 BUG_ON(!in_hugepage_area(mm
->context
, vaddr
));
318 pte
= hugepte_offset(mm
, vaddr
);
320 /* hugetlb should be locked, and hence, prefaulted */
321 WARN_ON(!pte
|| hugepte_none(*pte
));
323 page
= &hugepte_page(*pte
)[vpfn
% (HPAGE_SIZE
/PAGE_SIZE
)];
325 WARN_ON(!PageCompound(page
));
347 follow_huge_addr(struct mm_struct
*mm
, unsigned long address
, int write
)
349 return ERR_PTR(-EINVAL
);
352 int pmd_huge(pmd_t pmd
)
354 return pmd_hugepage(pmd
);
358 follow_huge_pmd(struct mm_struct
*mm
, unsigned long address
,
359 pmd_t
*pmd
, int write
)
363 BUG_ON(! pmd_hugepage(*pmd
));
365 page
= hugepte_page(*(hugepte_t
*)pmd
);
367 page
+= ((address
& ~HPAGE_MASK
) >> PAGE_SHIFT
);
371 void unmap_hugepage_range(struct vm_area_struct
*vma
,
372 unsigned long start
, unsigned long end
)
374 struct mm_struct
*mm
= vma
->vm_mm
;
382 WARN_ON(!is_vm_hugetlb_page(vma
));
383 BUG_ON((start
% HPAGE_SIZE
) != 0);
384 BUG_ON((end
% HPAGE_SIZE
) != 0);
386 /* XXX are there races with checking cpu_vm_mask? - Anton */
388 tmp
= cpumask_of_cpu(cpu
);
389 if (cpus_equal(vma
->vm_mm
->cpu_vm_mask
, tmp
))
392 for (addr
= start
; addr
< end
; addr
+= HPAGE_SIZE
) {
395 BUG_ON(!in_hugepage_area(mm
->context
, addr
));
397 ptep
= hugepte_offset(mm
, addr
);
398 if (!ptep
|| hugepte_none(*ptep
))
402 page
= hugepte_page(pte
);
403 teardown_huge_pte(ptep
);
405 if (hugepte_val(pte
) & _HUGEPAGE_HASHPTE
)
406 flush_hash_hugepage(mm
->context
, addr
,
413 mm
->rss
-= (end
- start
) >> PAGE_SHIFT
;
416 int hugetlb_prefault(struct address_space
*mapping
, struct vm_area_struct
*vma
)
418 struct mm_struct
*mm
= current
->mm
;
422 WARN_ON(!is_vm_hugetlb_page(vma
));
423 BUG_ON((vma
->vm_start
% HPAGE_SIZE
) != 0);
424 BUG_ON((vma
->vm_end
% HPAGE_SIZE
) != 0);
426 spin_lock(&mm
->page_table_lock
);
427 for (addr
= vma
->vm_start
; addr
< vma
->vm_end
; addr
+= HPAGE_SIZE
) {
429 hugepte_t
*pte
= hugepte_alloc(mm
, addr
);
432 BUG_ON(!in_hugepage_area(mm
->context
, addr
));
438 if (!hugepte_none(*pte
))
441 idx
= ((addr
- vma
->vm_start
) >> HPAGE_SHIFT
)
442 + (vma
->vm_pgoff
>> (HPAGE_SHIFT
- PAGE_SHIFT
));
443 page
= find_get_page(mapping
, idx
);
445 /* charge the fs quota first */
446 if (hugetlb_get_quota(mapping
)) {
450 page
= alloc_huge_page();
452 hugetlb_put_quota(mapping
);
456 ret
= add_to_page_cache(page
, mapping
, idx
, GFP_ATOMIC
);
460 hugetlb_put_quota(mapping
);
461 free_huge_page(page
);
465 setup_huge_pte(mm
, page
, pte
, vma
->vm_flags
& VM_WRITE
);
468 spin_unlock(&mm
->page_table_lock
);
472 /* Because we have an exclusive hugepage region which lies within the
473 * normal user address space, we have to take special measures to make
474 * non-huge mmap()s evade the hugepage reserved regions. */
475 unsigned long arch_get_unmapped_area(struct file
*filp
, unsigned long addr
,
476 unsigned long len
, unsigned long pgoff
,
479 struct mm_struct
*mm
= current
->mm
;
480 struct vm_area_struct
*vma
;
481 unsigned long start_addr
;
487 addr
= PAGE_ALIGN(addr
);
488 vma
= find_vma(mm
, addr
);
489 if (((TASK_SIZE
- len
) >= addr
)
490 && (!vma
|| (addr
+len
) <= vma
->vm_start
)
491 && !is_hugepage_only_range(addr
,len
))
494 start_addr
= addr
= mm
->free_area_cache
;
497 vma
= find_vma(mm
, addr
);
498 while (TASK_SIZE
- len
>= addr
) {
499 BUG_ON(vma
&& (addr
>= vma
->vm_end
));
501 if (touches_hugepage_low_range(addr
, len
)) {
502 addr
= ALIGN(addr
+1, 1<<SID_SHIFT
);
503 vma
= find_vma(mm
, addr
);
506 if (touches_hugepage_high_range(addr
, len
)) {
507 addr
= TASK_HPAGE_END
;
508 vma
= find_vma(mm
, addr
);
511 if (!vma
|| addr
+ len
<= vma
->vm_start
) {
513 * Remember the place where we stopped the search:
515 mm
->free_area_cache
= addr
+ len
;
522 /* Make sure we didn't miss any holes */
523 if (start_addr
!= TASK_UNMAPPED_BASE
) {
524 start_addr
= addr
= TASK_UNMAPPED_BASE
;
531 * This mmap-allocator allocates new areas top-down from below the
532 * stack's low limit (the base):
534 * Because we have an exclusive hugepage region which lies within the
535 * normal user address space, we have to take special measures to make
536 * non-huge mmap()s evade the hugepage reserved regions.
539 arch_get_unmapped_area_topdown(struct file
*filp
, const unsigned long addr0
,
540 const unsigned long len
, const unsigned long pgoff
,
541 const unsigned long flags
)
543 struct vm_area_struct
*vma
, *prev_vma
;
544 struct mm_struct
*mm
= current
->mm
;
545 unsigned long base
= mm
->mmap_base
, addr
= addr0
;
548 /* requested length too big for entire address space */
552 /* dont allow allocations above current base */
553 if (mm
->free_area_cache
> base
)
554 mm
->free_area_cache
= base
;
556 /* requesting a specific address */
558 addr
= PAGE_ALIGN(addr
);
559 vma
= find_vma(mm
, addr
);
560 if (TASK_SIZE
- len
>= addr
&&
561 (!vma
|| addr
+ len
<= vma
->vm_start
)
562 && !is_hugepage_only_range(addr
,len
))
567 /* make sure it can fit in the remaining address space */
568 if (mm
->free_area_cache
< len
)
571 /* either no address requested or cant fit in requested address hole */
572 addr
= (mm
->free_area_cache
- len
) & PAGE_MASK
;
575 if (touches_hugepage_low_range(addr
, len
)) {
576 addr
= (addr
& ((~0) << SID_SHIFT
)) - len
;
577 goto hugepage_recheck
;
578 } else if (touches_hugepage_high_range(addr
, len
)) {
579 addr
= TASK_HPAGE_BASE
- len
;
583 * Lookup failure means no vma is above this address,
584 * i.e. return with success:
586 if (!(vma
= find_vma_prev(mm
, addr
, &prev_vma
)))
590 * new region fits between prev_vma->vm_end and
591 * vma->vm_start, use it:
593 if (addr
+len
<= vma
->vm_start
&&
594 (!prev_vma
|| (addr
>= prev_vma
->vm_end
)))
595 /* remember the address as a hint for next time */
596 return (mm
->free_area_cache
= addr
);
598 /* pull free_area_cache down to the first hole */
599 if (mm
->free_area_cache
== vma
->vm_end
)
600 mm
->free_area_cache
= vma
->vm_start
;
602 /* try just below the current vma->vm_start */
603 addr
= vma
->vm_start
-len
;
604 } while (len
<= vma
->vm_start
);
608 * if hint left us with no space for the requested
609 * mapping then try again:
612 mm
->free_area_cache
= base
;
617 * A failed mmap() very likely causes application failure,
618 * so fall back to the bottom-up function here. This scenario
619 * can happen with large stack limits and large mmap()
622 mm
->free_area_cache
= TASK_UNMAPPED_BASE
;
623 addr
= arch_get_unmapped_area(filp
, addr0
, len
, pgoff
, flags
);
625 * Restore the topdown base:
627 mm
->free_area_cache
= base
;
632 static unsigned long htlb_get_low_area(unsigned long len
, u16 segmask
)
634 unsigned long addr
= 0;
635 struct vm_area_struct
*vma
;
637 vma
= find_vma(current
->mm
, addr
);
638 while (addr
+ len
<= 0x100000000UL
) {
639 BUG_ON(vma
&& (addr
>= vma
->vm_end
)); /* invariant */
641 if (! __within_hugepage_low_range(addr
, len
, segmask
)) {
642 addr
= ALIGN(addr
+1, 1<<SID_SHIFT
);
643 vma
= find_vma(current
->mm
, addr
);
647 if (!vma
|| (addr
+ len
) <= vma
->vm_start
)
649 addr
= ALIGN(vma
->vm_end
, HPAGE_SIZE
);
650 /* Depending on segmask this might not be a confirmed
651 * hugepage region, so the ALIGN could have skipped
653 vma
= find_vma(current
->mm
, addr
);
659 static unsigned long htlb_get_high_area(unsigned long len
)
661 unsigned long addr
= TASK_HPAGE_BASE
;
662 struct vm_area_struct
*vma
;
664 vma
= find_vma(current
->mm
, addr
);
665 for (vma
= find_vma(current
->mm
, addr
);
666 addr
+ len
<= TASK_HPAGE_END
;
667 vma
= vma
->vm_next
) {
668 BUG_ON(vma
&& (addr
>= vma
->vm_end
)); /* invariant */
669 BUG_ON(! within_hugepage_high_range(addr
, len
));
671 if (!vma
|| (addr
+ len
) <= vma
->vm_start
)
673 addr
= ALIGN(vma
->vm_end
, HPAGE_SIZE
);
674 /* Because we're in a hugepage region, this alignment
675 * should not skip us over any VMAs */
681 unsigned long hugetlb_get_unmapped_area(struct file
*file
, unsigned long addr
,
682 unsigned long len
, unsigned long pgoff
,
685 if (len
& ~HPAGE_MASK
)
688 if (!(cur_cpu_spec
->cpu_features
& CPU_FTR_16M_PAGE
))
691 if (test_thread_flag(TIF_32BIT
)) {
693 u16 segmask
, cursegs
= current
->mm
->context
.htlb_segs
;
695 /* First see if we can do the mapping in the existing
696 * low hpage segments */
697 addr
= htlb_get_low_area(len
, cursegs
);
701 for (segmask
= LOW_ESID_MASK(0x100000000UL
-len
, len
);
702 ! lastshift
; segmask
>>=1) {
706 addr
= htlb_get_low_area(len
, cursegs
| segmask
);
707 if ((addr
!= -ENOMEM
)
708 && open_low_hpage_segs(current
->mm
, segmask
) == 0)
711 printk(KERN_DEBUG
"hugetlb_get_unmapped_area() unable to open"
712 " enough segments\n");
715 return htlb_get_high_area(len
);
719 int hash_huge_page(struct mm_struct
*mm
, unsigned long access
,
720 unsigned long ea
, unsigned long vsid
, int local
)
723 unsigned long va
, vpn
;
725 hugepte_t old_pte
, new_pte
;
726 unsigned long hpteflags
, prpn
, flags
;
729 /* We have to find the first hugepte in the batch, since
730 * that's the one that will store the HPTE flags */
732 ptep
= hugepte_offset(mm
, ea
);
734 /* Search the Linux page table for a match with va */
735 va
= (vsid
<< 28) | (ea
& 0x0fffffff);
736 vpn
= va
>> HPAGE_SHIFT
;
739 * If no pte found or not present, send the problem up to
742 if (unlikely(!ptep
|| hugepte_none(*ptep
)))
745 BUG_ON(hugepte_bad(*ptep
));
748 * Check the user's access rights to the page. If access should be
749 * prevented then send the problem up to do_page_fault.
751 is_write
= access
& _PAGE_RW
;
752 if (unlikely(is_write
&& !(hugepte_val(*ptep
) & _HUGEPAGE_RW
)))
756 * At this point, we have a pte (old_pte) which can be used to build
757 * or update an HPTE. There are 2 cases:
759 * 1. There is a valid (present) pte with no associated HPTE (this is
760 * the most common case)
761 * 2. There is a valid (present) pte with an associated HPTE. The
762 * current values of the pp bits in the HPTE prevent access
763 * because we are doing software DIRTY bit management and the
764 * page is currently not DIRTY.
767 spin_lock_irqsave(&mm
->page_table_lock
, flags
);
772 hpteflags
= 0x2 | (! (hugepte_val(new_pte
) & _HUGEPAGE_RW
));
774 /* Check if pte already has an hpte (case 2) */
775 if (unlikely(hugepte_val(old_pte
) & _HUGEPAGE_HASHPTE
)) {
776 /* There MIGHT be an HPTE for this pte */
777 unsigned long hash
, slot
;
779 hash
= hpt_hash(vpn
, 1);
780 if (hugepte_val(old_pte
) & _HUGEPAGE_SECONDARY
)
782 slot
= (hash
& htab_data
.htab_hash_mask
) * HPTES_PER_GROUP
;
783 slot
+= (hugepte_val(old_pte
) & _HUGEPAGE_GROUP_IX
) >> 5;
785 if (ppc_md
.hpte_updatepp(slot
, hpteflags
, va
, 1, local
) == -1)
786 hugepte_val(old_pte
) &= ~_HUGEPAGE_HPTEFLAGS
;
789 if (likely(!(hugepte_val(old_pte
) & _HUGEPAGE_HASHPTE
))) {
790 unsigned long hash
= hpt_hash(vpn
, 1);
791 unsigned long hpte_group
;
793 prpn
= hugepte_pfn(old_pte
);
796 hpte_group
= ((hash
& htab_data
.htab_hash_mask
) *
797 HPTES_PER_GROUP
) & ~0x7UL
;
799 /* Update the linux pte with the HPTE slot */
800 hugepte_val(new_pte
) &= ~_HUGEPAGE_HPTEFLAGS
;
801 hugepte_val(new_pte
) |= _HUGEPAGE_HASHPTE
;
803 /* Add in WIMG bits */
804 /* XXX We should store these in the pte */
805 hpteflags
|= _PAGE_COHERENT
;
807 slot
= ppc_md
.hpte_insert(hpte_group
, va
, prpn
, 0,
810 /* Primary is full, try the secondary */
811 if (unlikely(slot
== -1)) {
812 hugepte_val(new_pte
) |= _HUGEPAGE_SECONDARY
;
813 hpte_group
= ((~hash
& htab_data
.htab_hash_mask
) *
814 HPTES_PER_GROUP
) & ~0x7UL
;
815 slot
= ppc_md
.hpte_insert(hpte_group
, va
, prpn
,
819 hpte_group
= ((hash
& htab_data
.htab_hash_mask
) * HPTES_PER_GROUP
) & ~0x7UL
;
821 ppc_md
.hpte_remove(hpte_group
);
826 if (unlikely(slot
== -2))
827 panic("hash_huge_page: pte_insert failed\n");
829 hugepte_val(new_pte
) |= (slot
<<5) & _HUGEPAGE_GROUP_IX
;
832 * No need to use ldarx/stdcx here because all who
833 * might be updating the pte will hold the
834 * page_table_lock or the hash_table_lock
840 spin_unlock_irqrestore(&mm
->page_table_lock
, flags
);
845 static void flush_hash_hugepage(mm_context_t context
, unsigned long ea
,
846 hugepte_t pte
, int local
)
848 unsigned long vsid
, vpn
, va
, hash
, slot
;
850 BUG_ON(hugepte_bad(pte
));
851 BUG_ON(!in_hugepage_area(context
, ea
));
853 vsid
= get_vsid(context
.id
, ea
);
855 va
= (vsid
<< 28) | (ea
& 0x0fffffff);
856 vpn
= va
>> HPAGE_SHIFT
;
857 hash
= hpt_hash(vpn
, 1);
858 if (hugepte_val(pte
) & _HUGEPAGE_SECONDARY
)
860 slot
= (hash
& htab_data
.htab_hash_mask
) * HPTES_PER_GROUP
;
861 slot
+= (hugepte_val(pte
) & _HUGEPAGE_GROUP_IX
) >> 5;
863 ppc_md
.hpte_invalidate(slot
, va
, 1, local
);