initial commit with v2.6.9
[linux-2.6.9-moxart.git] / arch / ppc64 / mm / hugetlbpage.c
blobf9d47ac8f6d56c021bd01672be71f802e1fc4174
1 /*
2 * PPC64 (POWER4) Huge TLB Page Support for Kernel.
4 * Copyright (C) 2003 David Gibson, IBM Corporation.
6 * Based on the IA-32 version:
7 * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
8 */
10 #include <linux/init.h>
11 #include <linux/fs.h>
12 #include <linux/mm.h>
13 #include <linux/hugetlb.h>
14 #include <linux/pagemap.h>
15 #include <linux/smp_lock.h>
16 #include <linux/slab.h>
17 #include <linux/err.h>
18 #include <linux/sysctl.h>
19 #include <asm/mman.h>
20 #include <asm/pgalloc.h>
21 #include <asm/tlb.h>
22 #include <asm/tlbflush.h>
23 #include <asm/mmu_context.h>
24 #include <asm/machdep.h>
25 #include <asm/cputable.h>
26 #include <asm/tlb.h>
28 #include <linux/sysctl.h>
30 /* HugePTE layout:
32 * 31 30 ... 15 14 13 12 10 9 8 7 6 5 4 3 2 1 0
33 * PFN>>12..... - - - - - - HASH_IX.... 2ND HASH RW - HG=1
36 #define HUGEPTE_SHIFT 15
37 #define _HUGEPAGE_PFN 0xffff8000
38 #define _HUGEPAGE_BAD 0x00007f00
39 #define _HUGEPAGE_HASHPTE 0x00000008
40 #define _HUGEPAGE_SECONDARY 0x00000010
41 #define _HUGEPAGE_GROUP_IX 0x000000e0
42 #define _HUGEPAGE_HPTEFLAGS (_HUGEPAGE_HASHPTE | _HUGEPAGE_SECONDARY | \
43 _HUGEPAGE_GROUP_IX)
44 #define _HUGEPAGE_RW 0x00000004
46 typedef struct {unsigned int val;} hugepte_t;
47 #define hugepte_val(hugepte) ((hugepte).val)
48 #define __hugepte(x) ((hugepte_t) { (x) } )
49 #define hugepte_pfn(x) \
50 ((unsigned long)(hugepte_val(x)>>HUGEPTE_SHIFT) << HUGETLB_PAGE_ORDER)
51 #define mk_hugepte(page,wr) __hugepte( \
52 ((page_to_pfn(page)>>HUGETLB_PAGE_ORDER) << HUGEPTE_SHIFT ) \
53 | (!!(wr) * _HUGEPAGE_RW) | _PMD_HUGEPAGE )
55 #define hugepte_bad(x) ( !(hugepte_val(x) & _PMD_HUGEPAGE) || \
56 (hugepte_val(x) & _HUGEPAGE_BAD) )
57 #define hugepte_page(x) pfn_to_page(hugepte_pfn(x))
58 #define hugepte_none(x) (!(hugepte_val(x) & _HUGEPAGE_PFN))
61 static void flush_hash_hugepage(mm_context_t context, unsigned long ea,
62 hugepte_t pte, int local);
64 static inline unsigned int hugepte_update(hugepte_t *p, unsigned int clr,
65 unsigned int set)
67 unsigned int old, tmp;
69 __asm__ __volatile__(
70 "1: lwarx %0,0,%3 # pte_update\n\
71 andc %1,%0,%4 \n\
72 or %1,%1,%5 \n\
73 stwcx. %1,0,%3 \n\
74 bne- 1b"
75 : "=&r" (old), "=&r" (tmp), "=m" (*p)
76 : "r" (p), "r" (clr), "r" (set), "m" (*p)
77 : "cc" );
78 return old;
81 static inline void set_hugepte(hugepte_t *ptep, hugepte_t pte)
83 hugepte_update(ptep, ~_HUGEPAGE_HPTEFLAGS,
84 hugepte_val(pte) & ~_HUGEPAGE_HPTEFLAGS);
87 static hugepte_t *hugepte_alloc(struct mm_struct *mm, unsigned long addr)
89 pgd_t *pgd;
90 pmd_t *pmd = NULL;
92 BUG_ON(!in_hugepage_area(mm->context, addr));
94 pgd = pgd_offset(mm, addr);
95 pmd = pmd_alloc(mm, pgd, addr);
97 /* We shouldn't find a (normal) PTE page pointer here */
98 BUG_ON(!pmd_none(*pmd) && !pmd_hugepage(*pmd));
100 return (hugepte_t *)pmd;
103 static hugepte_t *hugepte_offset(struct mm_struct *mm, unsigned long addr)
105 pgd_t *pgd;
106 pmd_t *pmd = NULL;
108 BUG_ON(!in_hugepage_area(mm->context, addr));
110 pgd = pgd_offset(mm, addr);
111 if (pgd_none(*pgd))
112 return NULL;
114 pmd = pmd_offset(pgd, addr);
116 /* We shouldn't find a (normal) PTE page pointer here */
117 BUG_ON(!pmd_none(*pmd) && !pmd_hugepage(*pmd));
119 return (hugepte_t *)pmd;
122 static void setup_huge_pte(struct mm_struct *mm, struct page *page,
123 hugepte_t *ptep, int write_access)
125 hugepte_t entry;
126 int i;
128 mm->rss += (HPAGE_SIZE / PAGE_SIZE);
129 entry = mk_hugepte(page, write_access);
130 for (i = 0; i < HUGEPTE_BATCH_SIZE; i++)
131 set_hugepte(ptep+i, entry);
134 static void teardown_huge_pte(hugepte_t *ptep)
136 int i;
138 for (i = 0; i < HUGEPTE_BATCH_SIZE; i++)
139 pmd_clear((pmd_t *)(ptep+i));
143 * This function checks for proper alignment of input addr and len parameters.
145 int is_aligned_hugepage_range(unsigned long addr, unsigned long len)
147 if (len & ~HPAGE_MASK)
148 return -EINVAL;
149 if (addr & ~HPAGE_MASK)
150 return -EINVAL;
151 if (! (within_hugepage_low_range(addr, len)
152 || within_hugepage_high_range(addr, len)) )
153 return -EINVAL;
154 return 0;
157 static void flush_segments(void *parm)
159 u16 segs = (unsigned long) parm;
160 unsigned long i;
162 asm volatile("isync" : : : "memory");
164 for (i = 0; i < 16; i++) {
165 if (! (segs & (1U << i)))
166 continue;
167 asm volatile("slbie %0" : : "r" (i << SID_SHIFT));
170 asm volatile("isync" : : : "memory");
173 static int prepare_low_seg_for_htlb(struct mm_struct *mm, unsigned long seg)
175 unsigned long start = seg << SID_SHIFT;
176 unsigned long end = (seg+1) << SID_SHIFT;
177 struct vm_area_struct *vma;
178 unsigned long addr;
179 struct mmu_gather *tlb;
181 BUG_ON(seg >= 16);
183 /* Check no VMAs are in the region */
184 vma = find_vma(mm, start);
185 if (vma && (vma->vm_start < end))
186 return -EBUSY;
188 /* Clean up any leftover PTE pages in the region */
189 spin_lock(&mm->page_table_lock);
190 tlb = tlb_gather_mmu(mm, 0);
191 for (addr = start; addr < end; addr += PMD_SIZE) {
192 pgd_t *pgd = pgd_offset(mm, addr);
193 pmd_t *pmd;
194 struct page *page;
195 pte_t *pte;
196 int i;
198 if (pgd_none(*pgd))
199 continue;
200 pmd = pmd_offset(pgd, addr);
201 if (!pmd || pmd_none(*pmd))
202 continue;
203 if (pmd_bad(*pmd)) {
204 pmd_ERROR(*pmd);
205 pmd_clear(pmd);
206 continue;
208 pte = (pte_t *)pmd_page_kernel(*pmd);
209 /* No VMAs, so there should be no PTEs, check just in case. */
210 for (i = 0; i < PTRS_PER_PTE; i++) {
211 BUG_ON(!pte_none(*pte));
212 pte++;
214 page = pmd_page(*pmd);
215 pmd_clear(pmd);
216 dec_page_state(nr_page_table_pages);
217 pte_free_tlb(tlb, page);
219 tlb_finish_mmu(tlb, start, end);
220 spin_unlock(&mm->page_table_lock);
222 return 0;
225 static int open_low_hpage_segs(struct mm_struct *mm, u16 newsegs)
227 unsigned long i;
229 newsegs &= ~(mm->context.htlb_segs);
230 if (! newsegs)
231 return 0; /* The segments we want are already open */
233 for (i = 0; i < 16; i++)
234 if ((1 << i) & newsegs)
235 if (prepare_low_seg_for_htlb(mm, i) != 0)
236 return -EBUSY;
238 mm->context.htlb_segs |= newsegs;
239 /* the context change must make it to memory before the flush,
240 * so that further SLB misses do the right thing. */
241 mb();
242 on_each_cpu(flush_segments, (void *)(unsigned long)newsegs, 0, 1);
244 return 0;
247 int prepare_hugepage_range(unsigned long addr, unsigned long len)
249 if (within_hugepage_high_range(addr, len))
250 return 0;
251 else if ((addr < 0x100000000) && ((addr+len) < 0x100000000)) {
252 int err;
253 /* Yes, we need both tests, in case addr+len overflows
254 * 64-bit arithmetic */
255 err = open_low_hpage_segs(current->mm,
256 LOW_ESID_MASK(addr, len));
257 if (err)
258 printk(KERN_DEBUG "prepare_hugepage_range(%lx, %lx)"
259 " failed (segs: 0x%04hx)\n", addr, len,
260 LOW_ESID_MASK(addr, len));
261 return err;
264 return -EINVAL;
267 int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
268 struct vm_area_struct *vma)
270 hugepte_t *src_pte, *dst_pte, entry;
271 struct page *ptepage;
272 unsigned long addr = vma->vm_start;
273 unsigned long end = vma->vm_end;
275 while (addr < end) {
276 BUG_ON(! in_hugepage_area(src->context, addr));
277 BUG_ON(! in_hugepage_area(dst->context, addr));
279 dst_pte = hugepte_alloc(dst, addr);
280 if (!dst_pte)
281 return -ENOMEM;
283 src_pte = hugepte_offset(src, addr);
284 entry = *src_pte;
286 if ((addr % HPAGE_SIZE) == 0) {
287 /* This is the first hugepte in a batch */
288 ptepage = hugepte_page(entry);
289 get_page(ptepage);
290 dst->rss += (HPAGE_SIZE / PAGE_SIZE);
292 set_hugepte(dst_pte, entry);
295 addr += PMD_SIZE;
297 return 0;
301 follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
302 struct page **pages, struct vm_area_struct **vmas,
303 unsigned long *position, int *length, int i)
305 unsigned long vpfn, vaddr = *position;
306 int remainder = *length;
308 WARN_ON(!is_vm_hugetlb_page(vma));
310 vpfn = vaddr/PAGE_SIZE;
311 while (vaddr < vma->vm_end && remainder) {
312 BUG_ON(!in_hugepage_area(mm->context, vaddr));
314 if (pages) {
315 hugepte_t *pte;
316 struct page *page;
318 pte = hugepte_offset(mm, vaddr);
320 /* hugetlb should be locked, and hence, prefaulted */
321 WARN_ON(!pte || hugepte_none(*pte));
323 page = &hugepte_page(*pte)[vpfn % (HPAGE_SIZE/PAGE_SIZE)];
325 WARN_ON(!PageCompound(page));
327 get_page(page);
328 pages[i] = page;
331 if (vmas)
332 vmas[i] = vma;
334 vaddr += PAGE_SIZE;
335 ++vpfn;
336 --remainder;
337 ++i;
340 *length = remainder;
341 *position = vaddr;
343 return i;
346 struct page *
347 follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
349 return ERR_PTR(-EINVAL);
352 int pmd_huge(pmd_t pmd)
354 return pmd_hugepage(pmd);
357 struct page *
358 follow_huge_pmd(struct mm_struct *mm, unsigned long address,
359 pmd_t *pmd, int write)
361 struct page *page;
363 BUG_ON(! pmd_hugepage(*pmd));
365 page = hugepte_page(*(hugepte_t *)pmd);
366 if (page)
367 page += ((address & ~HPAGE_MASK) >> PAGE_SHIFT);
368 return page;
371 void unmap_hugepage_range(struct vm_area_struct *vma,
372 unsigned long start, unsigned long end)
374 struct mm_struct *mm = vma->vm_mm;
375 unsigned long addr;
376 hugepte_t *ptep;
377 struct page *page;
378 int cpu;
379 int local = 0;
380 cpumask_t tmp;
382 WARN_ON(!is_vm_hugetlb_page(vma));
383 BUG_ON((start % HPAGE_SIZE) != 0);
384 BUG_ON((end % HPAGE_SIZE) != 0);
386 /* XXX are there races with checking cpu_vm_mask? - Anton */
387 cpu = get_cpu();
388 tmp = cpumask_of_cpu(cpu);
389 if (cpus_equal(vma->vm_mm->cpu_vm_mask, tmp))
390 local = 1;
392 for (addr = start; addr < end; addr += HPAGE_SIZE) {
393 hugepte_t pte;
395 BUG_ON(!in_hugepage_area(mm->context, addr));
397 ptep = hugepte_offset(mm, addr);
398 if (!ptep || hugepte_none(*ptep))
399 continue;
401 pte = *ptep;
402 page = hugepte_page(pte);
403 teardown_huge_pte(ptep);
405 if (hugepte_val(pte) & _HUGEPAGE_HASHPTE)
406 flush_hash_hugepage(mm->context, addr,
407 pte, local);
409 put_page(page);
411 put_cpu();
413 mm->rss -= (end - start) >> PAGE_SHIFT;
416 int hugetlb_prefault(struct address_space *mapping, struct vm_area_struct *vma)
418 struct mm_struct *mm = current->mm;
419 unsigned long addr;
420 int ret = 0;
422 WARN_ON(!is_vm_hugetlb_page(vma));
423 BUG_ON((vma->vm_start % HPAGE_SIZE) != 0);
424 BUG_ON((vma->vm_end % HPAGE_SIZE) != 0);
426 spin_lock(&mm->page_table_lock);
427 for (addr = vma->vm_start; addr < vma->vm_end; addr += HPAGE_SIZE) {
428 unsigned long idx;
429 hugepte_t *pte = hugepte_alloc(mm, addr);
430 struct page *page;
432 BUG_ON(!in_hugepage_area(mm->context, addr));
434 if (!pte) {
435 ret = -ENOMEM;
436 goto out;
438 if (!hugepte_none(*pte))
439 continue;
441 idx = ((addr - vma->vm_start) >> HPAGE_SHIFT)
442 + (vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT));
443 page = find_get_page(mapping, idx);
444 if (!page) {
445 /* charge the fs quota first */
446 if (hugetlb_get_quota(mapping)) {
447 ret = -ENOMEM;
448 goto out;
450 page = alloc_huge_page();
451 if (!page) {
452 hugetlb_put_quota(mapping);
453 ret = -ENOMEM;
454 goto out;
456 ret = add_to_page_cache(page, mapping, idx, GFP_ATOMIC);
457 if (! ret) {
458 unlock_page(page);
459 } else {
460 hugetlb_put_quota(mapping);
461 free_huge_page(page);
462 goto out;
465 setup_huge_pte(mm, page, pte, vma->vm_flags & VM_WRITE);
467 out:
468 spin_unlock(&mm->page_table_lock);
469 return ret;
472 /* Because we have an exclusive hugepage region which lies within the
473 * normal user address space, we have to take special measures to make
474 * non-huge mmap()s evade the hugepage reserved regions. */
475 unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
476 unsigned long len, unsigned long pgoff,
477 unsigned long flags)
479 struct mm_struct *mm = current->mm;
480 struct vm_area_struct *vma;
481 unsigned long start_addr;
483 if (len > TASK_SIZE)
484 return -ENOMEM;
486 if (addr) {
487 addr = PAGE_ALIGN(addr);
488 vma = find_vma(mm, addr);
489 if (((TASK_SIZE - len) >= addr)
490 && (!vma || (addr+len) <= vma->vm_start)
491 && !is_hugepage_only_range(addr,len))
492 return addr;
494 start_addr = addr = mm->free_area_cache;
496 full_search:
497 vma = find_vma(mm, addr);
498 while (TASK_SIZE - len >= addr) {
499 BUG_ON(vma && (addr >= vma->vm_end));
501 if (touches_hugepage_low_range(addr, len)) {
502 addr = ALIGN(addr+1, 1<<SID_SHIFT);
503 vma = find_vma(mm, addr);
504 continue;
506 if (touches_hugepage_high_range(addr, len)) {
507 addr = TASK_HPAGE_END;
508 vma = find_vma(mm, addr);
509 continue;
511 if (!vma || addr + len <= vma->vm_start) {
513 * Remember the place where we stopped the search:
515 mm->free_area_cache = addr + len;
516 return addr;
518 addr = vma->vm_end;
519 vma = vma->vm_next;
522 /* Make sure we didn't miss any holes */
523 if (start_addr != TASK_UNMAPPED_BASE) {
524 start_addr = addr = TASK_UNMAPPED_BASE;
525 goto full_search;
527 return -ENOMEM;
531 * This mmap-allocator allocates new areas top-down from below the
532 * stack's low limit (the base):
534 * Because we have an exclusive hugepage region which lies within the
535 * normal user address space, we have to take special measures to make
536 * non-huge mmap()s evade the hugepage reserved regions.
538 unsigned long
539 arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
540 const unsigned long len, const unsigned long pgoff,
541 const unsigned long flags)
543 struct vm_area_struct *vma, *prev_vma;
544 struct mm_struct *mm = current->mm;
545 unsigned long base = mm->mmap_base, addr = addr0;
546 int first_time = 1;
548 /* requested length too big for entire address space */
549 if (len > TASK_SIZE)
550 return -ENOMEM;
552 /* dont allow allocations above current base */
553 if (mm->free_area_cache > base)
554 mm->free_area_cache = base;
556 /* requesting a specific address */
557 if (addr) {
558 addr = PAGE_ALIGN(addr);
559 vma = find_vma(mm, addr);
560 if (TASK_SIZE - len >= addr &&
561 (!vma || addr + len <= vma->vm_start)
562 && !is_hugepage_only_range(addr,len))
563 return addr;
566 try_again:
567 /* make sure it can fit in the remaining address space */
568 if (mm->free_area_cache < len)
569 goto fail;
571 /* either no address requested or cant fit in requested address hole */
572 addr = (mm->free_area_cache - len) & PAGE_MASK;
573 do {
574 hugepage_recheck:
575 if (touches_hugepage_low_range(addr, len)) {
576 addr = (addr & ((~0) << SID_SHIFT)) - len;
577 goto hugepage_recheck;
578 } else if (touches_hugepage_high_range(addr, len)) {
579 addr = TASK_HPAGE_BASE - len;
583 * Lookup failure means no vma is above this address,
584 * i.e. return with success:
586 if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
587 return addr;
590 * new region fits between prev_vma->vm_end and
591 * vma->vm_start, use it:
593 if (addr+len <= vma->vm_start &&
594 (!prev_vma || (addr >= prev_vma->vm_end)))
595 /* remember the address as a hint for next time */
596 return (mm->free_area_cache = addr);
597 else
598 /* pull free_area_cache down to the first hole */
599 if (mm->free_area_cache == vma->vm_end)
600 mm->free_area_cache = vma->vm_start;
602 /* try just below the current vma->vm_start */
603 addr = vma->vm_start-len;
604 } while (len <= vma->vm_start);
606 fail:
608 * if hint left us with no space for the requested
609 * mapping then try again:
611 if (first_time) {
612 mm->free_area_cache = base;
613 first_time = 0;
614 goto try_again;
617 * A failed mmap() very likely causes application failure,
618 * so fall back to the bottom-up function here. This scenario
619 * can happen with large stack limits and large mmap()
620 * allocations.
622 mm->free_area_cache = TASK_UNMAPPED_BASE;
623 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
625 * Restore the topdown base:
627 mm->free_area_cache = base;
629 return addr;
632 static unsigned long htlb_get_low_area(unsigned long len, u16 segmask)
634 unsigned long addr = 0;
635 struct vm_area_struct *vma;
637 vma = find_vma(current->mm, addr);
638 while (addr + len <= 0x100000000UL) {
639 BUG_ON(vma && (addr >= vma->vm_end)); /* invariant */
641 if (! __within_hugepage_low_range(addr, len, segmask)) {
642 addr = ALIGN(addr+1, 1<<SID_SHIFT);
643 vma = find_vma(current->mm, addr);
644 continue;
647 if (!vma || (addr + len) <= vma->vm_start)
648 return addr;
649 addr = ALIGN(vma->vm_end, HPAGE_SIZE);
650 /* Depending on segmask this might not be a confirmed
651 * hugepage region, so the ALIGN could have skipped
652 * some VMAs */
653 vma = find_vma(current->mm, addr);
656 return -ENOMEM;
659 static unsigned long htlb_get_high_area(unsigned long len)
661 unsigned long addr = TASK_HPAGE_BASE;
662 struct vm_area_struct *vma;
664 vma = find_vma(current->mm, addr);
665 for (vma = find_vma(current->mm, addr);
666 addr + len <= TASK_HPAGE_END;
667 vma = vma->vm_next) {
668 BUG_ON(vma && (addr >= vma->vm_end)); /* invariant */
669 BUG_ON(! within_hugepage_high_range(addr, len));
671 if (!vma || (addr + len) <= vma->vm_start)
672 return addr;
673 addr = ALIGN(vma->vm_end, HPAGE_SIZE);
674 /* Because we're in a hugepage region, this alignment
675 * should not skip us over any VMAs */
678 return -ENOMEM;
681 unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
682 unsigned long len, unsigned long pgoff,
683 unsigned long flags)
685 if (len & ~HPAGE_MASK)
686 return -EINVAL;
688 if (!(cur_cpu_spec->cpu_features & CPU_FTR_16M_PAGE))
689 return -EINVAL;
691 if (test_thread_flag(TIF_32BIT)) {
692 int lastshift = 0;
693 u16 segmask, cursegs = current->mm->context.htlb_segs;
695 /* First see if we can do the mapping in the existing
696 * low hpage segments */
697 addr = htlb_get_low_area(len, cursegs);
698 if (addr != -ENOMEM)
699 return addr;
701 for (segmask = LOW_ESID_MASK(0x100000000UL-len, len);
702 ! lastshift; segmask >>=1) {
703 if (segmask & 1)
704 lastshift = 1;
706 addr = htlb_get_low_area(len, cursegs | segmask);
707 if ((addr != -ENOMEM)
708 && open_low_hpage_segs(current->mm, segmask) == 0)
709 return addr;
711 printk(KERN_DEBUG "hugetlb_get_unmapped_area() unable to open"
712 " enough segments\n");
713 return -ENOMEM;
714 } else {
715 return htlb_get_high_area(len);
719 int hash_huge_page(struct mm_struct *mm, unsigned long access,
720 unsigned long ea, unsigned long vsid, int local)
722 hugepte_t *ptep;
723 unsigned long va, vpn;
724 int is_write;
725 hugepte_t old_pte, new_pte;
726 unsigned long hpteflags, prpn, flags;
727 long slot;
729 /* We have to find the first hugepte in the batch, since
730 * that's the one that will store the HPTE flags */
731 ea &= HPAGE_MASK;
732 ptep = hugepte_offset(mm, ea);
734 /* Search the Linux page table for a match with va */
735 va = (vsid << 28) | (ea & 0x0fffffff);
736 vpn = va >> HPAGE_SHIFT;
739 * If no pte found or not present, send the problem up to
740 * do_page_fault
742 if (unlikely(!ptep || hugepte_none(*ptep)))
743 return 1;
745 BUG_ON(hugepte_bad(*ptep));
748 * Check the user's access rights to the page. If access should be
749 * prevented then send the problem up to do_page_fault.
751 is_write = access & _PAGE_RW;
752 if (unlikely(is_write && !(hugepte_val(*ptep) & _HUGEPAGE_RW)))
753 return 1;
756 * At this point, we have a pte (old_pte) which can be used to build
757 * or update an HPTE. There are 2 cases:
759 * 1. There is a valid (present) pte with no associated HPTE (this is
760 * the most common case)
761 * 2. There is a valid (present) pte with an associated HPTE. The
762 * current values of the pp bits in the HPTE prevent access
763 * because we are doing software DIRTY bit management and the
764 * page is currently not DIRTY.
767 spin_lock_irqsave(&mm->page_table_lock, flags);
769 old_pte = *ptep;
770 new_pte = old_pte;
772 hpteflags = 0x2 | (! (hugepte_val(new_pte) & _HUGEPAGE_RW));
774 /* Check if pte already has an hpte (case 2) */
775 if (unlikely(hugepte_val(old_pte) & _HUGEPAGE_HASHPTE)) {
776 /* There MIGHT be an HPTE for this pte */
777 unsigned long hash, slot;
779 hash = hpt_hash(vpn, 1);
780 if (hugepte_val(old_pte) & _HUGEPAGE_SECONDARY)
781 hash = ~hash;
782 slot = (hash & htab_data.htab_hash_mask) * HPTES_PER_GROUP;
783 slot += (hugepte_val(old_pte) & _HUGEPAGE_GROUP_IX) >> 5;
785 if (ppc_md.hpte_updatepp(slot, hpteflags, va, 1, local) == -1)
786 hugepte_val(old_pte) &= ~_HUGEPAGE_HPTEFLAGS;
789 if (likely(!(hugepte_val(old_pte) & _HUGEPAGE_HASHPTE))) {
790 unsigned long hash = hpt_hash(vpn, 1);
791 unsigned long hpte_group;
793 prpn = hugepte_pfn(old_pte);
795 repeat:
796 hpte_group = ((hash & htab_data.htab_hash_mask) *
797 HPTES_PER_GROUP) & ~0x7UL;
799 /* Update the linux pte with the HPTE slot */
800 hugepte_val(new_pte) &= ~_HUGEPAGE_HPTEFLAGS;
801 hugepte_val(new_pte) |= _HUGEPAGE_HASHPTE;
803 /* Add in WIMG bits */
804 /* XXX We should store these in the pte */
805 hpteflags |= _PAGE_COHERENT;
807 slot = ppc_md.hpte_insert(hpte_group, va, prpn, 0,
808 hpteflags, 0, 1);
810 /* Primary is full, try the secondary */
811 if (unlikely(slot == -1)) {
812 hugepte_val(new_pte) |= _HUGEPAGE_SECONDARY;
813 hpte_group = ((~hash & htab_data.htab_hash_mask) *
814 HPTES_PER_GROUP) & ~0x7UL;
815 slot = ppc_md.hpte_insert(hpte_group, va, prpn,
816 1, hpteflags, 0, 1);
817 if (slot == -1) {
818 if (mftb() & 0x1)
819 hpte_group = ((hash & htab_data.htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL;
821 ppc_md.hpte_remove(hpte_group);
822 goto repeat;
826 if (unlikely(slot == -2))
827 panic("hash_huge_page: pte_insert failed\n");
829 hugepte_val(new_pte) |= (slot<<5) & _HUGEPAGE_GROUP_IX;
832 * No need to use ldarx/stdcx here because all who
833 * might be updating the pte will hold the
834 * page_table_lock or the hash_table_lock
835 * (we hold both)
837 *ptep = new_pte;
840 spin_unlock_irqrestore(&mm->page_table_lock, flags);
842 return 0;
845 static void flush_hash_hugepage(mm_context_t context, unsigned long ea,
846 hugepte_t pte, int local)
848 unsigned long vsid, vpn, va, hash, slot;
850 BUG_ON(hugepte_bad(pte));
851 BUG_ON(!in_hugepage_area(context, ea));
853 vsid = get_vsid(context.id, ea);
855 va = (vsid << 28) | (ea & 0x0fffffff);
856 vpn = va >> HPAGE_SHIFT;
857 hash = hpt_hash(vpn, 1);
858 if (hugepte_val(pte) & _HUGEPAGE_SECONDARY)
859 hash = ~hash;
860 slot = (hash & htab_data.htab_hash_mask) * HPTES_PER_GROUP;
861 slot += (hugepte_val(pte) & _HUGEPAGE_GROUP_IX) >> 5;
863 ppc_md.hpte_invalidate(slot, va, 1, local);