Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/josef/btrfs...
[linux-2.6.git] / arch / powerpc / mm / hash_native_64.c
blob3f0c30ae4791db7597c3fac74740e09e0fb3b7c1
1 /*
2 * native hashtable management.
4 * SMP scalability work:
5 * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
13 #undef DEBUG_LOW
15 #include <linux/spinlock.h>
16 #include <linux/bitops.h>
17 #include <linux/of.h>
18 #include <linux/threads.h>
19 #include <linux/smp.h>
21 #include <asm/machdep.h>
22 #include <asm/mmu.h>
23 #include <asm/mmu_context.h>
24 #include <asm/pgtable.h>
25 #include <asm/tlbflush.h>
26 #include <asm/tlb.h>
27 #include <asm/cputable.h>
28 #include <asm/udbg.h>
29 #include <asm/kexec.h>
30 #include <asm/ppc-opcode.h>
32 #ifdef DEBUG_LOW
33 #define DBG_LOW(fmt...) udbg_printf(fmt)
34 #else
35 #define DBG_LOW(fmt...)
36 #endif
38 #define HPTE_LOCK_BIT 3
40 DEFINE_RAW_SPINLOCK(native_tlbie_lock);
42 static inline void __tlbie(unsigned long vpn, int psize, int apsize, int ssize)
44 unsigned long va;
45 unsigned int penc;
48 * We need 14 to 65 bits of va for a tlibe of 4K page
49 * With vpn we ignore the lower VPN_SHIFT bits already.
50 * And top two bits are already ignored because we can
51 * only accomadate 76 bits in a 64 bit vpn with a VPN_SHIFT
52 * of 12.
54 va = vpn << VPN_SHIFT;
56 * clear top 16 bits of 64bit va, non SLS segment
57 * Older versions of the architecture (2.02 and earler) require the
58 * masking of the top 16 bits.
60 va &= ~(0xffffULL << 48);
62 switch (psize) {
63 case MMU_PAGE_4K:
64 /* clear out bits after (52) [0....52.....63] */
65 va &= ~((1ul << (64 - 52)) - 1);
66 va |= ssize << 8;
67 va |= mmu_psize_defs[apsize].sllp << 6;
68 asm volatile(ASM_FTR_IFCLR("tlbie %0,0", PPC_TLBIE(%1,%0), %2)
69 : : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206)
70 : "memory");
71 break;
72 default:
73 /* We need 14 to 14 + i bits of va */
74 penc = mmu_psize_defs[psize].penc[apsize];
75 va &= ~((1ul << mmu_psize_defs[apsize].shift) - 1);
76 va |= penc << 12;
77 va |= ssize << 8;
78 /* Add AVAL part */
79 if (psize != apsize) {
81 * MPSS, 64K base page size and 16MB parge page size
82 * We don't need all the bits, but rest of the bits
83 * must be ignored by the processor.
84 * vpn cover upto 65 bits of va. (0...65) and we need
85 * 58..64 bits of va.
87 va |= (vpn & 0xfe);
89 va |= 1; /* L */
90 asm volatile(ASM_FTR_IFCLR("tlbie %0,1", PPC_TLBIE(%1,%0), %2)
91 : : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206)
92 : "memory");
93 break;
97 static inline void __tlbiel(unsigned long vpn, int psize, int apsize, int ssize)
99 unsigned long va;
100 unsigned int penc;
102 /* VPN_SHIFT can be atmost 12 */
103 va = vpn << VPN_SHIFT;
105 * clear top 16 bits of 64 bit va, non SLS segment
106 * Older versions of the architecture (2.02 and earler) require the
107 * masking of the top 16 bits.
109 va &= ~(0xffffULL << 48);
111 switch (psize) {
112 case MMU_PAGE_4K:
113 /* clear out bits after(52) [0....52.....63] */
114 va &= ~((1ul << (64 - 52)) - 1);
115 va |= ssize << 8;
116 va |= mmu_psize_defs[apsize].sllp << 6;
117 asm volatile(".long 0x7c000224 | (%0 << 11) | (0 << 21)"
118 : : "r"(va) : "memory");
119 break;
120 default:
121 /* We need 14 to 14 + i bits of va */
122 penc = mmu_psize_defs[psize].penc[apsize];
123 va &= ~((1ul << mmu_psize_defs[apsize].shift) - 1);
124 va |= penc << 12;
125 va |= ssize << 8;
126 /* Add AVAL part */
127 if (psize != apsize) {
129 * MPSS, 64K base page size and 16MB parge page size
130 * We don't need all the bits, but rest of the bits
131 * must be ignored by the processor.
132 * vpn cover upto 65 bits of va. (0...65) and we need
133 * 58..64 bits of va.
135 va |= (vpn & 0xfe);
137 va |= 1; /* L */
138 asm volatile(".long 0x7c000224 | (%0 << 11) | (1 << 21)"
139 : : "r"(va) : "memory");
140 break;
145 static inline void tlbie(unsigned long vpn, int psize, int apsize,
146 int ssize, int local)
148 unsigned int use_local = local && mmu_has_feature(MMU_FTR_TLBIEL);
149 int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
151 if (use_local)
152 use_local = mmu_psize_defs[psize].tlbiel;
153 if (lock_tlbie && !use_local)
154 raw_spin_lock(&native_tlbie_lock);
155 asm volatile("ptesync": : :"memory");
156 if (use_local) {
157 __tlbiel(vpn, psize, apsize, ssize);
158 asm volatile("ptesync": : :"memory");
159 } else {
160 __tlbie(vpn, psize, apsize, ssize);
161 asm volatile("eieio; tlbsync; ptesync": : :"memory");
163 if (lock_tlbie && !use_local)
164 raw_spin_unlock(&native_tlbie_lock);
167 static inline void native_lock_hpte(struct hash_pte *hptep)
169 unsigned long *word = &hptep->v;
171 while (1) {
172 if (!test_and_set_bit_lock(HPTE_LOCK_BIT, word))
173 break;
174 while(test_bit(HPTE_LOCK_BIT, word))
175 cpu_relax();
179 static inline void native_unlock_hpte(struct hash_pte *hptep)
181 unsigned long *word = &hptep->v;
183 clear_bit_unlock(HPTE_LOCK_BIT, word);
186 static long native_hpte_insert(unsigned long hpte_group, unsigned long vpn,
187 unsigned long pa, unsigned long rflags,
188 unsigned long vflags, int psize, int apsize, int ssize)
190 struct hash_pte *hptep = htab_address + hpte_group;
191 unsigned long hpte_v, hpte_r;
192 int i;
194 if (!(vflags & HPTE_V_BOLTED)) {
195 DBG_LOW(" insert(group=%lx, vpn=%016lx, pa=%016lx,"
196 " rflags=%lx, vflags=%lx, psize=%d)\n",
197 hpte_group, vpn, pa, rflags, vflags, psize);
200 for (i = 0; i < HPTES_PER_GROUP; i++) {
201 if (! (hptep->v & HPTE_V_VALID)) {
202 /* retry with lock held */
203 native_lock_hpte(hptep);
204 if (! (hptep->v & HPTE_V_VALID))
205 break;
206 native_unlock_hpte(hptep);
209 hptep++;
212 if (i == HPTES_PER_GROUP)
213 return -1;
215 hpte_v = hpte_encode_v(vpn, psize, apsize, ssize) | vflags | HPTE_V_VALID;
216 hpte_r = hpte_encode_r(pa, psize, apsize) | rflags;
218 if (!(vflags & HPTE_V_BOLTED)) {
219 DBG_LOW(" i=%x hpte_v=%016lx, hpte_r=%016lx\n",
220 i, hpte_v, hpte_r);
223 hptep->r = hpte_r;
224 /* Guarantee the second dword is visible before the valid bit */
225 eieio();
227 * Now set the first dword including the valid bit
228 * NOTE: this also unlocks the hpte
230 hptep->v = hpte_v;
232 __asm__ __volatile__ ("ptesync" : : : "memory");
234 return i | (!!(vflags & HPTE_V_SECONDARY) << 3);
237 static long native_hpte_remove(unsigned long hpte_group)
239 struct hash_pte *hptep;
240 int i;
241 int slot_offset;
242 unsigned long hpte_v;
244 DBG_LOW(" remove(group=%lx)\n", hpte_group);
246 /* pick a random entry to start at */
247 slot_offset = mftb() & 0x7;
249 for (i = 0; i < HPTES_PER_GROUP; i++) {
250 hptep = htab_address + hpte_group + slot_offset;
251 hpte_v = hptep->v;
253 if ((hpte_v & HPTE_V_VALID) && !(hpte_v & HPTE_V_BOLTED)) {
254 /* retry with lock held */
255 native_lock_hpte(hptep);
256 hpte_v = hptep->v;
257 if ((hpte_v & HPTE_V_VALID)
258 && !(hpte_v & HPTE_V_BOLTED))
259 break;
260 native_unlock_hpte(hptep);
263 slot_offset++;
264 slot_offset &= 0x7;
267 if (i == HPTES_PER_GROUP)
268 return -1;
270 /* Invalidate the hpte. NOTE: this also unlocks it */
271 hptep->v = 0;
273 return i;
276 static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
277 unsigned long vpn, int bpsize,
278 int apsize, int ssize, int local)
280 struct hash_pte *hptep = htab_address + slot;
281 unsigned long hpte_v, want_v;
282 int ret = 0;
284 want_v = hpte_encode_avpn(vpn, bpsize, ssize);
286 DBG_LOW(" update(vpn=%016lx, avpnv=%016lx, group=%lx, newpp=%lx)",
287 vpn, want_v & HPTE_V_AVPN, slot, newpp);
289 native_lock_hpte(hptep);
291 hpte_v = hptep->v;
293 * We need to invalidate the TLB always because hpte_remove doesn't do
294 * a tlb invalidate. If a hash bucket gets full, we "evict" a more/less
295 * random entry from it. When we do that we don't invalidate the TLB
296 * (hpte_remove) because we assume the old translation is still
297 * technically "valid".
299 if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID)) {
300 DBG_LOW(" -> miss\n");
301 ret = -1;
302 } else {
303 DBG_LOW(" -> hit\n");
304 /* Update the HPTE */
305 hptep->r = (hptep->r & ~(HPTE_R_PP | HPTE_R_N)) |
306 (newpp & (HPTE_R_PP | HPTE_R_N | HPTE_R_C));
308 native_unlock_hpte(hptep);
310 /* Ensure it is out of the tlb too. */
311 tlbie(vpn, bpsize, apsize, ssize, local);
313 return ret;
316 static long native_hpte_find(unsigned long vpn, int psize, int ssize)
318 struct hash_pte *hptep;
319 unsigned long hash;
320 unsigned long i;
321 long slot;
322 unsigned long want_v, hpte_v;
324 hash = hpt_hash(vpn, mmu_psize_defs[psize].shift, ssize);
325 want_v = hpte_encode_avpn(vpn, psize, ssize);
327 /* Bolted mappings are only ever in the primary group */
328 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
329 for (i = 0; i < HPTES_PER_GROUP; i++) {
330 hptep = htab_address + slot;
331 hpte_v = hptep->v;
333 if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID))
334 /* HPTE matches */
335 return slot;
336 ++slot;
339 return -1;
343 * Update the page protection bits. Intended to be used to create
344 * guard pages for kernel data structures on pages which are bolted
345 * in the HPT. Assumes pages being operated on will not be stolen.
347 * No need to lock here because we should be the only user.
349 static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea,
350 int psize, int ssize)
352 unsigned long vpn;
353 unsigned long vsid;
354 long slot;
355 struct hash_pte *hptep;
357 vsid = get_kernel_vsid(ea, ssize);
358 vpn = hpt_vpn(ea, vsid, ssize);
360 slot = native_hpte_find(vpn, psize, ssize);
361 if (slot == -1)
362 panic("could not find page to bolt\n");
363 hptep = htab_address + slot;
365 /* Update the HPTE */
366 hptep->r = (hptep->r & ~(HPTE_R_PP | HPTE_R_N)) |
367 (newpp & (HPTE_R_PP | HPTE_R_N));
369 * Ensure it is out of the tlb too. Bolted entries base and
370 * actual page size will be same.
372 tlbie(vpn, psize, psize, ssize, 0);
375 static void native_hpte_invalidate(unsigned long slot, unsigned long vpn,
376 int bpsize, int apsize, int ssize, int local)
378 struct hash_pte *hptep = htab_address + slot;
379 unsigned long hpte_v;
380 unsigned long want_v;
381 unsigned long flags;
383 local_irq_save(flags);
385 DBG_LOW(" invalidate(vpn=%016lx, hash: %lx)\n", vpn, slot);
387 want_v = hpte_encode_avpn(vpn, bpsize, ssize);
388 native_lock_hpte(hptep);
389 hpte_v = hptep->v;
392 * We need to invalidate the TLB always because hpte_remove doesn't do
393 * a tlb invalidate. If a hash bucket gets full, we "evict" a more/less
394 * random entry from it. When we do that we don't invalidate the TLB
395 * (hpte_remove) because we assume the old translation is still
396 * technically "valid".
398 if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID))
399 native_unlock_hpte(hptep);
400 else
401 /* Invalidate the hpte. NOTE: this also unlocks it */
402 hptep->v = 0;
404 /* Invalidate the TLB */
405 tlbie(vpn, bpsize, apsize, ssize, local);
407 local_irq_restore(flags);
410 static void native_hugepage_invalidate(struct mm_struct *mm,
411 unsigned char *hpte_slot_array,
412 unsigned long addr, int psize)
414 int ssize = 0, i;
415 int lock_tlbie;
416 struct hash_pte *hptep;
417 int actual_psize = MMU_PAGE_16M;
418 unsigned int max_hpte_count, valid;
419 unsigned long flags, s_addr = addr;
420 unsigned long hpte_v, want_v, shift;
421 unsigned long hidx, vpn = 0, vsid, hash, slot;
423 shift = mmu_psize_defs[psize].shift;
424 max_hpte_count = 1U << (PMD_SHIFT - shift);
426 local_irq_save(flags);
427 for (i = 0; i < max_hpte_count; i++) {
428 valid = hpte_valid(hpte_slot_array, i);
429 if (!valid)
430 continue;
431 hidx = hpte_hash_index(hpte_slot_array, i);
433 /* get the vpn */
434 addr = s_addr + (i * (1ul << shift));
435 if (!is_kernel_addr(addr)) {
436 ssize = user_segment_size(addr);
437 vsid = get_vsid(mm->context.id, addr, ssize);
438 WARN_ON(vsid == 0);
439 } else {
440 vsid = get_kernel_vsid(addr, mmu_kernel_ssize);
441 ssize = mmu_kernel_ssize;
444 vpn = hpt_vpn(addr, vsid, ssize);
445 hash = hpt_hash(vpn, shift, ssize);
446 if (hidx & _PTEIDX_SECONDARY)
447 hash = ~hash;
449 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
450 slot += hidx & _PTEIDX_GROUP_IX;
452 hptep = htab_address + slot;
453 want_v = hpte_encode_avpn(vpn, psize, ssize);
454 native_lock_hpte(hptep);
455 hpte_v = hptep->v;
457 /* Even if we miss, we need to invalidate the TLB */
458 if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID))
459 native_unlock_hpte(hptep);
460 else
461 /* Invalidate the hpte. NOTE: this also unlocks it */
462 hptep->v = 0;
465 * Since this is a hugepage, we just need a single tlbie.
466 * use the last vpn.
468 lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
469 if (lock_tlbie)
470 raw_spin_lock(&native_tlbie_lock);
472 asm volatile("ptesync":::"memory");
473 __tlbie(vpn, psize, actual_psize, ssize);
474 asm volatile("eieio; tlbsync; ptesync":::"memory");
476 if (lock_tlbie)
477 raw_spin_unlock(&native_tlbie_lock);
479 local_irq_restore(flags);
482 static inline int __hpte_actual_psize(unsigned int lp, int psize)
484 int i, shift;
485 unsigned int mask;
487 /* start from 1 ignoring MMU_PAGE_4K */
488 for (i = 1; i < MMU_PAGE_COUNT; i++) {
490 /* invalid penc */
491 if (mmu_psize_defs[psize].penc[i] == -1)
492 continue;
494 * encoding bits per actual page size
495 * PTE LP actual page size
496 * rrrr rrrz >=8KB
497 * rrrr rrzz >=16KB
498 * rrrr rzzz >=32KB
499 * rrrr zzzz >=64KB
500 * .......
502 shift = mmu_psize_defs[i].shift - LP_SHIFT;
503 if (shift > LP_BITS)
504 shift = LP_BITS;
505 mask = (1 << shift) - 1;
506 if ((lp & mask) == mmu_psize_defs[psize].penc[i])
507 return i;
509 return -1;
512 static void hpte_decode(struct hash_pte *hpte, unsigned long slot,
513 int *psize, int *apsize, int *ssize, unsigned long *vpn)
515 unsigned long avpn, pteg, vpi;
516 unsigned long hpte_v = hpte->v;
517 unsigned long vsid, seg_off;
518 int size, a_size, shift;
519 /* Look at the 8 bit LP value */
520 unsigned int lp = (hpte->r >> LP_SHIFT) & ((1 << LP_BITS) - 1);
522 if (!(hpte_v & HPTE_V_LARGE)) {
523 size = MMU_PAGE_4K;
524 a_size = MMU_PAGE_4K;
525 } else {
526 for (size = 0; size < MMU_PAGE_COUNT; size++) {
528 /* valid entries have a shift value */
529 if (!mmu_psize_defs[size].shift)
530 continue;
532 a_size = __hpte_actual_psize(lp, size);
533 if (a_size != -1)
534 break;
537 /* This works for all page sizes, and for 256M and 1T segments */
538 *ssize = hpte_v >> HPTE_V_SSIZE_SHIFT;
539 shift = mmu_psize_defs[size].shift;
541 avpn = (HPTE_V_AVPN_VAL(hpte_v) & ~mmu_psize_defs[size].avpnm);
542 pteg = slot / HPTES_PER_GROUP;
543 if (hpte_v & HPTE_V_SECONDARY)
544 pteg = ~pteg;
546 switch (*ssize) {
547 case MMU_SEGSIZE_256M:
548 /* We only have 28 - 23 bits of seg_off in avpn */
549 seg_off = (avpn & 0x1f) << 23;
550 vsid = avpn >> 5;
551 /* We can find more bits from the pteg value */
552 if (shift < 23) {
553 vpi = (vsid ^ pteg) & htab_hash_mask;
554 seg_off |= vpi << shift;
556 *vpn = vsid << (SID_SHIFT - VPN_SHIFT) | seg_off >> VPN_SHIFT;
557 case MMU_SEGSIZE_1T:
558 /* We only have 40 - 23 bits of seg_off in avpn */
559 seg_off = (avpn & 0x1ffff) << 23;
560 vsid = avpn >> 17;
561 if (shift < 23) {
562 vpi = (vsid ^ (vsid << 25) ^ pteg) & htab_hash_mask;
563 seg_off |= vpi << shift;
565 *vpn = vsid << (SID_SHIFT_1T - VPN_SHIFT) | seg_off >> VPN_SHIFT;
566 default:
567 *vpn = size = 0;
569 *psize = size;
570 *apsize = a_size;
574 * clear all mappings on kexec. All cpus are in real mode (or they will
575 * be when they isi), and we are the only one left. We rely on our kernel
576 * mapping being 0xC0's and the hardware ignoring those two real bits.
578 * TODO: add batching support when enabled. remember, no dynamic memory here,
579 * athough there is the control page available...
581 static void native_hpte_clear(void)
583 unsigned long vpn = 0;
584 unsigned long slot, slots, flags;
585 struct hash_pte *hptep = htab_address;
586 unsigned long hpte_v;
587 unsigned long pteg_count;
588 int psize, apsize, ssize;
590 pteg_count = htab_hash_mask + 1;
592 local_irq_save(flags);
594 /* we take the tlbie lock and hold it. Some hardware will
595 * deadlock if we try to tlbie from two processors at once.
597 raw_spin_lock(&native_tlbie_lock);
599 slots = pteg_count * HPTES_PER_GROUP;
601 for (slot = 0; slot < slots; slot++, hptep++) {
603 * we could lock the pte here, but we are the only cpu
604 * running, right? and for crash dump, we probably
605 * don't want to wait for a maybe bad cpu.
607 hpte_v = hptep->v;
610 * Call __tlbie() here rather than tlbie() since we
611 * already hold the native_tlbie_lock.
613 if (hpte_v & HPTE_V_VALID) {
614 hpte_decode(hptep, slot, &psize, &apsize, &ssize, &vpn);
615 hptep->v = 0;
616 __tlbie(vpn, psize, apsize, ssize);
620 asm volatile("eieio; tlbsync; ptesync":::"memory");
621 raw_spin_unlock(&native_tlbie_lock);
622 local_irq_restore(flags);
626 * Batched hash table flush, we batch the tlbie's to avoid taking/releasing
627 * the lock all the time
629 static void native_flush_hash_range(unsigned long number, int local)
631 unsigned long vpn;
632 unsigned long hash, index, hidx, shift, slot;
633 struct hash_pte *hptep;
634 unsigned long hpte_v;
635 unsigned long want_v;
636 unsigned long flags;
637 real_pte_t pte;
638 struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);
639 unsigned long psize = batch->psize;
640 int ssize = batch->ssize;
641 int i;
643 local_irq_save(flags);
645 for (i = 0; i < number; i++) {
646 vpn = batch->vpn[i];
647 pte = batch->pte[i];
649 pte_iterate_hashed_subpages(pte, psize, vpn, index, shift) {
650 hash = hpt_hash(vpn, shift, ssize);
651 hidx = __rpte_to_hidx(pte, index);
652 if (hidx & _PTEIDX_SECONDARY)
653 hash = ~hash;
654 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
655 slot += hidx & _PTEIDX_GROUP_IX;
656 hptep = htab_address + slot;
657 want_v = hpte_encode_avpn(vpn, psize, ssize);
658 native_lock_hpte(hptep);
659 hpte_v = hptep->v;
660 if (!HPTE_V_COMPARE(hpte_v, want_v) ||
661 !(hpte_v & HPTE_V_VALID))
662 native_unlock_hpte(hptep);
663 else
664 hptep->v = 0;
665 } pte_iterate_hashed_end();
668 if (mmu_has_feature(MMU_FTR_TLBIEL) &&
669 mmu_psize_defs[psize].tlbiel && local) {
670 asm volatile("ptesync":::"memory");
671 for (i = 0; i < number; i++) {
672 vpn = batch->vpn[i];
673 pte = batch->pte[i];
675 pte_iterate_hashed_subpages(pte, psize,
676 vpn, index, shift) {
677 __tlbiel(vpn, psize, psize, ssize);
678 } pte_iterate_hashed_end();
680 asm volatile("ptesync":::"memory");
681 } else {
682 int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
684 if (lock_tlbie)
685 raw_spin_lock(&native_tlbie_lock);
687 asm volatile("ptesync":::"memory");
688 for (i = 0; i < number; i++) {
689 vpn = batch->vpn[i];
690 pte = batch->pte[i];
692 pte_iterate_hashed_subpages(pte, psize,
693 vpn, index, shift) {
694 __tlbie(vpn, psize, psize, ssize);
695 } pte_iterate_hashed_end();
697 asm volatile("eieio; tlbsync; ptesync":::"memory");
699 if (lock_tlbie)
700 raw_spin_unlock(&native_tlbie_lock);
703 local_irq_restore(flags);
706 void __init hpte_init_native(void)
708 ppc_md.hpte_invalidate = native_hpte_invalidate;
709 ppc_md.hpte_updatepp = native_hpte_updatepp;
710 ppc_md.hpte_updateboltedpp = native_hpte_updateboltedpp;
711 ppc_md.hpte_insert = native_hpte_insert;
712 ppc_md.hpte_remove = native_hpte_remove;
713 ppc_md.hpte_clear_all = native_hpte_clear;
714 ppc_md.flush_hash_range = native_flush_hash_range;
715 ppc_md.hugepage_invalidate = native_hugepage_invalidate;