2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
14 * From i386 code copyright (C) 1995 Linus Torvalds
17 #include <linux/signal.h>
18 #include <linux/sched.h>
19 #include <linux/kernel.h>
20 #include <linux/errno.h>
21 #include <linux/string.h>
22 #include <linux/types.h>
23 #include <linux/ptrace.h>
24 #include <linux/mman.h>
26 #include <linux/smp.h>
27 #include <linux/smp_lock.h>
28 #include <linux/interrupt.h>
29 #include <linux/init.h>
30 #include <linux/tty.h>
31 #include <linux/vt_kern.h> /* For unblank_screen() */
32 #include <linux/highmem.h>
33 #include <linux/module.h>
34 #include <linux/kprobes.h>
35 #include <linux/hugetlb.h>
36 #include <linux/syscalls.h>
37 #include <linux/uaccess.h>
39 #include <asm/system.h>
40 #include <asm/pgalloc.h>
41 #include <asm/sections.h>
42 #include <asm/traps.h>
43 #include <asm/syscalls.h>
45 #include <arch/interrupts.h>
47 static noinline
void force_sig_info_fault(int si_signo
, int si_code
,
48 unsigned long address
, int fault_num
, struct task_struct
*tsk
)
52 if (unlikely(tsk
->pid
< 2)) {
53 panic("Signal %d (code %d) at %#lx sent to %s!",
54 si_signo
, si_code
& 0xffff, address
,
55 tsk
->pid
? "init" : "the idle task");
58 info
.si_signo
= si_signo
;
60 info
.si_code
= si_code
;
61 info
.si_addr
= (void __user
*)address
;
62 info
.si_trapno
= fault_num
;
63 force_sig_info(si_signo
, &info
, tsk
);
68 * Synthesize the fault a PL0 process would get by doing a word-load of
69 * an unaligned address or a high kernel address. Called indirectly
70 * from sys_cmpxchg() in kernel/intvec.S.
72 int _sys_cmpxchg_badaddr(unsigned long address
, struct pt_regs
*regs
)
74 if (address
>= PAGE_OFFSET
)
75 force_sig_info_fault(SIGSEGV
, SEGV_MAPERR
, address
,
76 INT_DTLB_MISS
, current
);
78 force_sig_info_fault(SIGBUS
, BUS_ADRALN
, address
,
79 INT_UNALIGN_DATA
, current
);
82 * Adjust pc to point at the actual instruction, which is unusual
83 * for syscalls normally, but is appropriate when we are claiming
84 * that a syscall swint1 caused a page fault or bus error.
89 * Mark this as a caller-save interrupt, like a normal page fault,
90 * so that when we go through the signal handler path we will
91 * properly restore r0, r1, and r2 for the signal handler arguments.
93 regs
->flags
|= PT_FLAGS_CALLER_SAVES
;
99 static inline pmd_t
*vmalloc_sync_one(pgd_t
*pgd
, unsigned long address
)
101 unsigned index
= pgd_index(address
);
107 pgd_k
= init_mm
.pgd
+ index
;
109 if (!pgd_present(*pgd_k
))
112 pud
= pud_offset(pgd
, address
);
113 pud_k
= pud_offset(pgd_k
, address
);
114 if (!pud_present(*pud_k
))
117 pmd
= pmd_offset(pud
, address
);
118 pmd_k
= pmd_offset(pud_k
, address
);
119 if (!pmd_present(*pmd_k
))
121 if (!pmd_present(*pmd
)) {
122 set_pmd(pmd
, *pmd_k
);
123 arch_flush_lazy_mmu_mode();
125 BUG_ON(pmd_ptfn(*pmd
) != pmd_ptfn(*pmd_k
));
130 * Handle a fault on the vmalloc or module mapping area
132 static inline int vmalloc_fault(pgd_t
*pgd
, unsigned long address
)
137 /* Make sure we are in vmalloc area */
138 if (!(address
>= VMALLOC_START
&& address
< VMALLOC_END
))
142 * Synchronize this task's top level page-table
143 * with the 'reference' page table.
145 pmd_k
= vmalloc_sync_one(pgd
, address
);
148 if (pmd_huge(*pmd_k
))
149 return 0; /* support TILE huge_vmap() API */
150 pte_k
= pte_offset_kernel(pmd_k
, address
);
151 if (!pte_present(*pte_k
))
156 /* Wait until this PTE has completed migration. */
157 static void wait_for_migration(pte_t
*pte
)
159 if (pte_migrating(*pte
)) {
161 * Wait until the migrater fixes up this pte.
162 * We scale the loop count by the clock rate so we'll wait for
163 * a few seconds here.
166 int bound
= get_clock_rate();
167 while (pte_migrating(*pte
)) {
169 if (++retries
> bound
)
170 panic("Hit migrating PTE (%#llx) and"
171 " page PFN %#lx still migrating",
172 pte
->val
, pte_pfn(*pte
));
178 * It's not generally safe to use "current" to get the page table pointer,
179 * since we might be running an oprofile interrupt in the middle of a
182 static pgd_t
*get_current_pgd(void)
184 HV_Context ctx
= hv_inquire_context();
185 unsigned long pgd_pfn
= ctx
.page_table
>> PAGE_SHIFT
;
186 struct page
*pgd_page
= pfn_to_page(pgd_pfn
);
187 BUG_ON(PageHighMem(pgd_page
)); /* oops, HIGHPTE? */
188 return (pgd_t
*) __va(ctx
.page_table
);
192 * We can receive a page fault from a migrating PTE at any time.
193 * Handle it by just waiting until the fault resolves.
195 * It's also possible to get a migrating kernel PTE that resolves
196 * itself during the downcall from hypervisor to Linux. We just check
197 * here to see if the PTE seems valid, and if so we retry it.
199 * NOTE! We MUST NOT take any locks for this case. We may be in an
200 * interrupt or a critical region, and must do as little as possible.
201 * Similarly, we can't use atomic ops here, since we may be handling a
202 * fault caused by an atomic op access.
204 static int handle_migrating_pte(pgd_t
*pgd
, int fault_num
,
205 unsigned long address
,
206 int is_kernel_mode
, int write
)
213 if (pgd_addr_invalid(address
))
216 pgd
+= pgd_index(address
);
217 pud
= pud_offset(pgd
, address
);
218 if (!pud
|| !pud_present(*pud
))
220 pmd
= pmd_offset(pud
, address
);
221 if (!pmd
|| !pmd_present(*pmd
))
223 pte
= pmd_huge_page(*pmd
) ? ((pte_t
*)pmd
) :
224 pte_offset_kernel(pmd
, address
);
226 if (pte_migrating(pteval
)) {
227 wait_for_migration(pte
);
231 if (!is_kernel_mode
|| !pte_present(pteval
))
233 if (fault_num
== INT_ITLB_MISS
) {
234 if (pte_exec(pteval
))
237 if (pte_write(pteval
))
240 if (pte_read(pteval
))
248 * This routine is responsible for faulting in user pages.
249 * It passes the work off to one of the appropriate routines.
250 * It returns true if the fault was successfully handled.
252 static int handle_page_fault(struct pt_regs
*regs
,
255 unsigned long address
,
258 struct task_struct
*tsk
;
259 struct mm_struct
*mm
;
260 struct vm_area_struct
*vma
;
261 unsigned long stack_offset
;
267 /* on TILE, protection faults are always writes */
271 is_kernel_mode
= (EX1_PL(regs
->ex1
) != USER_PL
);
273 tsk
= validate_current();
276 * Check to see if we might be overwriting the stack, and bail
277 * out if so. The page fault code is a relatively likely
278 * place to get trapped in an infinite regress, and once we
279 * overwrite the whole stack, it becomes very hard to recover.
281 stack_offset
= stack_pointer
& (THREAD_SIZE
-1);
282 if (stack_offset
< THREAD_SIZE
/ 8) {
283 pr_alert("Potential stack overrun: sp %#lx\n",
286 pr_alert("Killing current process %d/%s\n",
287 tsk
->pid
, tsk
->comm
);
288 do_group_exit(SIGKILL
);
292 * Early on, we need to check for migrating PTE entries;
293 * see homecache.c. If we find a migrating PTE, we wait until
294 * the backing page claims to be done migrating, then we procede.
295 * For kernel PTEs, we rewrite the PTE and return and retry.
296 * Otherwise, we treat the fault like a normal "no PTE" fault,
297 * rather than trying to patch up the existing PTE.
299 pgd
= get_current_pgd();
300 if (handle_migrating_pte(pgd
, fault_num
, address
,
301 is_kernel_mode
, write
))
304 si_code
= SEGV_MAPERR
;
307 * We fault-in kernel-space virtual memory on-demand. The
308 * 'reference' page table is init_mm.pgd.
310 * NOTE! We MUST NOT take any locks for this case. We may
311 * be in an interrupt or a critical region, and should
312 * only copy the information from the master page table,
315 * This verifies that the fault happens in kernel space
316 * and that the fault was not a protection fault.
318 if (unlikely(address
>= TASK_SIZE
&&
319 !is_arch_mappable_range(address
, 0))) {
320 if (is_kernel_mode
&& is_page_fault
&&
321 vmalloc_fault(pgd
, address
) >= 0)
324 * Don't take the mm semaphore here. If we fixup a prefetch
325 * fault we could otherwise deadlock.
327 mm
= NULL
; /* happy compiler */
329 goto bad_area_nosemaphore
;
333 * If we're trying to touch user-space addresses, we must
334 * be either at PL0, or else with interrupts enabled in the
335 * kernel, so either way we can re-enable interrupts here.
342 * If we're in an interrupt, have no user context or are running in an
343 * atomic region then we must not take the fault.
345 if (in_atomic() || !mm
) {
346 vma
= NULL
; /* happy compiler */
347 goto bad_area_nosemaphore
;
351 * When running in the kernel we expect faults to occur only to
352 * addresses in user space. All other faults represent errors in the
353 * kernel and should generate an OOPS. Unfortunately, in the case of an
354 * erroneous fault occurring in a code path which already holds mmap_sem
355 * we will deadlock attempting to validate the fault against the
356 * address space. Luckily the kernel only validly references user
357 * space from well defined areas of code, which are listed in the
360 * As the vast majority of faults will be valid we will only perform
361 * the source reference check when there is a possibility of a deadlock.
362 * Attempt to lock the address space, if we cannot we then validate the
363 * source. If this is invalid we can skip the address space check,
364 * thus avoiding the deadlock.
366 if (!down_read_trylock(&mm
->mmap_sem
)) {
367 if (is_kernel_mode
&&
368 !search_exception_tables(regs
->pc
)) {
369 vma
= NULL
; /* happy compiler */
370 goto bad_area_nosemaphore
;
372 down_read(&mm
->mmap_sem
);
375 vma
= find_vma(mm
, address
);
378 if (vma
->vm_start
<= address
)
380 if (!(vma
->vm_flags
& VM_GROWSDOWN
))
382 if (regs
->sp
< PAGE_OFFSET
) {
384 * accessing the stack below sp is always a bug.
386 if (address
< regs
->sp
)
389 if (expand_stack(vma
, address
))
393 * Ok, we have a good vm_area for this memory access, so
397 si_code
= SEGV_ACCERR
;
398 if (fault_num
== INT_ITLB_MISS
) {
399 if (!(vma
->vm_flags
& VM_EXEC
))
402 #ifdef TEST_VERIFY_AREA
403 if (!is_page_fault
&& regs
->cs
== KERNEL_CS
)
404 pr_err("WP fault at "REGFMT
"\n", regs
->eip
);
406 if (!(vma
->vm_flags
& VM_WRITE
))
409 if (!is_page_fault
|| !(vma
->vm_flags
& VM_READ
))
415 * If for any reason at all we couldn't handle the fault,
416 * make sure we exit gracefully rather than endlessly redo
419 fault
= handle_mm_fault(mm
, vma
, address
, write
);
420 if (unlikely(fault
& VM_FAULT_ERROR
)) {
421 if (fault
& VM_FAULT_OOM
)
423 else if (fault
& VM_FAULT_SIGBUS
)
427 if (fault
& VM_FAULT_MAJOR
)
432 #if CHIP_HAS_TILE_DMA() || CHIP_HAS_SN_PROC()
434 * If this was an asynchronous fault,
435 * restart the appropriate engine.
438 #if CHIP_HAS_TILE_DMA()
439 case INT_DMATLB_MISS
:
440 case INT_DMATLB_MISS_DWNCL
:
441 case INT_DMATLB_ACCESS
:
442 case INT_DMATLB_ACCESS_DWNCL
:
443 __insn_mtspr(SPR_DMA_CTR
, SPR_DMA_CTR__REQUEST_MASK
);
446 #if CHIP_HAS_SN_PROC()
447 case INT_SNITLB_MISS
:
448 case INT_SNITLB_MISS_DWNCL
:
449 __insn_mtspr(SPR_SNCTL
,
450 __insn_mfspr(SPR_SNCTL
) &
451 ~SPR_SNCTL__FRZPROC_MASK
);
457 up_read(&mm
->mmap_sem
);
461 * Something tried to access memory that isn't in our memory map..
462 * Fix it, but check if it's kernel or user first..
465 up_read(&mm
->mmap_sem
);
467 bad_area_nosemaphore
:
468 /* User mode accesses just cause a SIGSEGV */
469 if (!is_kernel_mode
) {
471 * It's possible to have interrupts off here.
475 force_sig_info_fault(SIGSEGV
, si_code
, address
,
481 /* Are we prepared to handle this kernel fault? */
482 if (fixup_exception(regs
))
486 * Oops. The kernel tried to access some bad page. We'll have to
487 * terminate things with extreme prejudice.
492 #ifdef SUPPORT_LOOKUP_ADDRESS
493 if (fault_num
== INT_ITLB_MISS
) {
494 pte_t
*pte
= lookup_address(address
);
496 if (pte
&& pte_present(*pte
) && !pte_exec_kernel(*pte
))
497 pr_crit("kernel tried to execute"
498 " non-executable page - exploit attempt?"
499 " (uid: %d)\n", current
->uid
);
502 if (address
< PAGE_SIZE
)
503 pr_alert("Unable to handle kernel NULL pointer dereference\n");
505 pr_alert("Unable to handle kernel paging request\n");
506 pr_alert(" at virtual address "REGFMT
", pc "REGFMT
"\n",
511 if (unlikely(tsk
->pid
< 2)) {
512 panic("Kernel page fault running %s!",
513 tsk
->pid
? "init" : "the idle task");
521 do_group_exit(SIGKILL
);
524 * We ran out of memory, or some other thing happened to us that made
525 * us unable to handle the page fault gracefully.
528 up_read(&mm
->mmap_sem
);
529 if (is_global_init(tsk
)) {
531 down_read(&mm
->mmap_sem
);
534 pr_alert("VM: killing process %s\n", tsk
->comm
);
536 do_group_exit(SIGKILL
);
540 up_read(&mm
->mmap_sem
);
542 /* Kernel mode? Handle exceptions or die */
546 force_sig_info_fault(SIGBUS
, BUS_ADRERR
, address
, fault_num
, tsk
);
552 /* We must release ICS before panicking or we won't get anywhere. */
553 #define ics_panic(fmt, ...) do { \
554 __insn_mtspr(SPR_INTERRUPT_CRITICAL_SECTION, 0); \
555 panic(fmt, __VA_ARGS__); \
559 * When we take an ITLB or DTLB fault or access violation in the
560 * supervisor while the critical section bit is set, the hypervisor is
561 * reluctant to write new values into the EX_CONTEXT_1_x registers,
562 * since that might indicate we have not yet squirreled the SPR
563 * contents away and can thus safely take a recursive interrupt.
564 * Accordingly, the hypervisor passes us the PC via SYSTEM_SAVE_1_2.
566 * Note that this routine is called before homecache_tlb_defer_enter(),
567 * which means that we can properly unlock any atomics that might
568 * be used there (good), but also means we must be very sensitive
569 * to not touch any data structures that might be located in memory
570 * that could migrate, as we could be entering the kernel on a dataplane
571 * cpu that has been deferring kernel TLB updates. This means, for
572 * example, that we can't migrate init_mm or its pgd.
574 struct intvec_state
do_page_fault_ics(struct pt_regs
*regs
, int fault_num
,
575 unsigned long address
,
578 unsigned long pc
= info
& ~1;
579 int write
= info
& 1;
580 pgd_t
*pgd
= get_current_pgd();
582 /* Retval is 1 at first since we will handle the fault fully. */
583 struct intvec_state state
= {
584 do_page_fault
, fault_num
, address
, write
, 1
587 /* Validate that we are plausibly in the right routine. */
588 if ((pc
& 0x7) != 0 || pc
< PAGE_OFFSET
||
589 (fault_num
!= INT_DTLB_MISS
&&
590 fault_num
!= INT_DTLB_ACCESS
)) {
591 unsigned long old_pc
= regs
->pc
;
593 ics_panic("Bad ICS page fault args:"
594 " old PC %#lx, fault %d/%d at %#lx\n",
595 old_pc
, fault_num
, write
, address
);
598 /* We might be faulting on a vmalloc page, so check that first. */
599 if (fault_num
!= INT_DTLB_ACCESS
&& vmalloc_fault(pgd
, address
) >= 0)
603 * If we faulted with ICS set in sys_cmpxchg, we are providing
604 * a user syscall service that should generate a signal on
605 * fault. We didn't set up a kernel stack on initial entry to
606 * sys_cmpxchg, but instead had one set up by the fault, which
607 * (because sys_cmpxchg never releases ICS) came to us via the
608 * SYSTEM_SAVE_1_2 mechanism, and thus EX_CONTEXT_1_[01] are
609 * still referencing the original user code. We release the
610 * atomic lock and rewrite pt_regs so that it appears that we
611 * came from user-space directly, and after we finish the
612 * fault we'll go back to user space and re-issue the swint.
613 * This way the backtrace information is correct if we need to
614 * emit a stack dump at any point while handling this.
616 * Must match register use in sys_cmpxchg().
618 if (pc
>= (unsigned long) sys_cmpxchg
&&
619 pc
< (unsigned long) __sys_cmpxchg_end
) {
621 /* Don't unlock before we could have locked. */
622 if (pc
>= (unsigned long)__sys_cmpxchg_grab_lock
) {
623 int *lock_ptr
= (int *)(regs
->regs
[ATOMIC_LOCK_REG
]);
624 __atomic_fault_unlock(lock_ptr
);
627 regs
->sp
= regs
->regs
[27];
631 * We can also fault in the atomic assembly, in which
632 * case we use the exception table to do the first-level fixup.
633 * We may re-fixup again in the real fault handler if it
634 * turns out the faulting address is just bad, and not,
635 * for example, migrating.
637 else if (pc
>= (unsigned long) __start_atomic_asm_code
&&
638 pc
< (unsigned long) __end_atomic_asm_code
) {
639 const struct exception_table_entry
*fixup
;
641 /* Unlock the atomic lock. */
642 int *lock_ptr
= (int *)(regs
->regs
[ATOMIC_LOCK_REG
]);
643 __atomic_fault_unlock(lock_ptr
);
645 fixup
= search_exception_tables(pc
);
647 ics_panic("ICS atomic fault not in table:"
648 " PC %#lx, fault %d", pc
, fault_num
);
649 regs
->pc
= fixup
->fixup
;
650 regs
->ex1
= PL_ICS_EX1(KERNEL_PL
, 0);
654 * NOTE: the one other type of access that might bring us here
655 * are the memory ops in __tns_atomic_acquire/__tns_atomic_release,
656 * but we don't have to check specially for them since we can
657 * always safely return to the address of the fault and retry,
658 * since no separate atomic locks are involved.
662 * Now that we have released the atomic lock (if necessary),
663 * it's safe to spin if the PTE that caused the fault was migrating.
665 if (fault_num
== INT_DTLB_ACCESS
)
667 if (handle_migrating_pte(pgd
, fault_num
, address
, 1, write
))
670 /* Return zero so that we continue on with normal fault handling. */
675 #endif /* !__tilegx__ */
678 * This routine handles page faults. It determines the address, and the
679 * problem, and then passes it handle_page_fault() for normal DTLB and
680 * ITLB issues, and for DMA or SN processor faults when we are in user
681 * space. For the latter, if we're in kernel mode, we just save the
682 * interrupt away appropriately and return immediately. We can't do
683 * page faults for user code while in kernel mode.
685 void do_page_fault(struct pt_regs
*regs
, int fault_num
,
686 unsigned long address
, unsigned long write
)
690 /* This case should have been handled by do_page_fault_ics(). */
693 #if CHIP_HAS_TILE_DMA()
695 * If it's a DMA fault, suspend the transfer while we're
696 * handling the miss; we'll restart after it's handled. If we
697 * don't suspend, it's possible that this process could swap
698 * out and back in, and restart the engine since the DMA is
701 if (fault_num
== INT_DMATLB_MISS
||
702 fault_num
== INT_DMATLB_ACCESS
||
703 fault_num
== INT_DMATLB_MISS_DWNCL
||
704 fault_num
== INT_DMATLB_ACCESS_DWNCL
) {
705 __insn_mtspr(SPR_DMA_CTR
, SPR_DMA_CTR__SUSPEND_MASK
);
706 while (__insn_mfspr(SPR_DMA_USER_STATUS
) &
707 SPR_DMA_STATUS__BUSY_MASK
)
712 /* Validate fault num and decide if this is a first-time page fault. */
716 #if CHIP_HAS_TILE_DMA()
717 case INT_DMATLB_MISS
:
718 case INT_DMATLB_MISS_DWNCL
:
720 #if CHIP_HAS_SN_PROC()
721 case INT_SNITLB_MISS
:
722 case INT_SNITLB_MISS_DWNCL
:
727 case INT_DTLB_ACCESS
:
728 #if CHIP_HAS_TILE_DMA()
729 case INT_DMATLB_ACCESS
:
730 case INT_DMATLB_ACCESS_DWNCL
:
736 panic("Bad fault number %d in do_page_fault", fault_num
);
739 if (EX1_PL(regs
->ex1
) != USER_PL
) {
740 struct async_tlb
*async
;
742 #if CHIP_HAS_TILE_DMA()
743 case INT_DMATLB_MISS
:
744 case INT_DMATLB_ACCESS
:
745 case INT_DMATLB_MISS_DWNCL
:
746 case INT_DMATLB_ACCESS_DWNCL
:
747 async
= ¤t
->thread
.dma_async_tlb
;
750 #if CHIP_HAS_SN_PROC()
751 case INT_SNITLB_MISS
:
752 case INT_SNITLB_MISS_DWNCL
:
753 async
= ¤t
->thread
.sn_async_tlb
;
762 * No vmalloc check required, so we can allow
763 * interrupts immediately at this point.
767 set_thread_flag(TIF_ASYNC_TLB
);
768 if (async
->fault_num
!= 0) {
769 panic("Second async fault %d;"
770 " old fault was %d (%#lx/%ld)",
771 fault_num
, async
->fault_num
,
774 BUG_ON(fault_num
== 0);
775 async
->fault_num
= fault_num
;
776 async
->is_fault
= is_page_fault
;
777 async
->is_write
= write
;
778 async
->address
= address
;
783 handle_page_fault(regs
, fault_num
, is_page_fault
, address
, write
);
787 #if CHIP_HAS_TILE_DMA() || CHIP_HAS_SN_PROC()
789 * Check an async_tlb structure to see if a deferred fault is waiting,
790 * and if so pass it to the page-fault code.
792 static void handle_async_page_fault(struct pt_regs
*regs
,
793 struct async_tlb
*async
)
795 if (async
->fault_num
) {
797 * Clear async->fault_num before calling the page-fault
798 * handler so that if we re-interrupt before returning
799 * from the function we have somewhere to put the
800 * information from the new interrupt.
802 int fault_num
= async
->fault_num
;
803 async
->fault_num
= 0;
804 handle_page_fault(regs
, fault_num
, async
->is_fault
,
805 async
->address
, async
->is_write
);
808 #endif /* CHIP_HAS_TILE_DMA() || CHIP_HAS_SN_PROC() */
812 * This routine effectively re-issues asynchronous page faults
813 * when we are returning to user space.
815 void do_async_page_fault(struct pt_regs
*regs
)
818 * Clear thread flag early. If we re-interrupt while processing
819 * code here, we will reset it and recall this routine before
820 * returning to user space.
822 clear_thread_flag(TIF_ASYNC_TLB
);
824 #if CHIP_HAS_TILE_DMA()
825 handle_async_page_fault(regs
, ¤t
->thread
.dma_async_tlb
);
827 #if CHIP_HAS_SN_PROC()
828 handle_async_page_fault(regs
, ¤t
->thread
.sn_async_tlb
);
832 void vmalloc_sync_all(void)
835 /* Currently all L1 kernel pmd's are static and shared. */
836 BUG_ON(pgd_index(VMALLOC_END
) != pgd_index(VMALLOC_START
));
839 * Note that races in the updates of insync and start aren't
840 * problematic: insync can only get set bits added, and updates to
841 * start are only improving performance (without affecting correctness
844 static DECLARE_BITMAP(insync
, PTRS_PER_PGD
);
845 static unsigned long start
= PAGE_OFFSET
;
846 unsigned long address
;
848 BUILD_BUG_ON(PAGE_OFFSET
& ~PGDIR_MASK
);
849 for (address
= start
; address
>= PAGE_OFFSET
; address
+= PGDIR_SIZE
) {
850 if (!test_bit(pgd_index(address
), insync
)) {
852 struct list_head
*pos
;
854 spin_lock_irqsave(&pgd_lock
, flags
);
855 list_for_each(pos
, &pgd_list
)
856 if (!vmalloc_sync_one(list_to_pgd(pos
),
858 /* Must be at first entry in list. */
859 BUG_ON(pos
!= pgd_list
.next
);
862 spin_unlock_irqrestore(&pgd_lock
, flags
);
863 if (pos
!= pgd_list
.next
)
864 set_bit(pgd_index(address
), insync
);
866 if (address
== start
&& test_bit(pgd_index(address
), insync
))
867 start
= address
+ PGDIR_SIZE
;