2 * Copyright (C) 1995 Linus Torvalds
3 * Copyright (C) 2001,2002 Andi Kleen, SuSE Labs.
6 #include <linux/signal.h>
7 #include <linux/sched.h>
8 #include <linux/kernel.h>
9 #include <linux/errno.h>
10 #include <linux/string.h>
11 #include <linux/types.h>
12 #include <linux/ptrace.h>
13 #include <linux/mman.h>
15 #include <linux/smp.h>
16 #include <linux/interrupt.h>
17 #include <linux/init.h>
18 #include <linux/tty.h>
19 #include <linux/vt_kern.h> /* For unblank_screen() */
20 #include <linux/compiler.h>
21 #include <linux/highmem.h>
22 #include <linux/bootmem.h> /* for max_low_pfn */
23 #include <linux/vmalloc.h>
24 #include <linux/module.h>
25 #include <linux/kprobes.h>
26 #include <linux/uaccess.h>
27 #include <linux/kdebug.h>
29 #include <asm/system.h>
31 #include <asm/segment.h>
32 #include <asm/pgalloc.h>
34 #include <asm/tlbflush.h>
35 #include <asm/proto.h>
36 #include <asm-generic/sections.h>
39 * Page fault error code bits
40 * bit 0 == 0 means no page found, 1 means protection fault
41 * bit 1 == 0 means read, 1 means write
42 * bit 2 == 0 means kernel, 1 means user-mode
43 * bit 3 == 1 means use of reserved bit detected
44 * bit 4 == 1 means fault was an instruction fetch
46 #define PF_PROT (1<<0)
47 #define PF_WRITE (1<<1)
48 #define PF_USER (1<<2)
49 #define PF_RSVD (1<<3)
50 #define PF_INSTR (1<<4)
52 static inline int notify_page_fault(struct pt_regs
*regs
)
57 /* kprobe_running() needs smp_processor_id() */
59 if (!user_mode_vm(regs
)) {
61 if (!user_mode(regs
)) {
64 if (kprobe_running() && kprobe_fault_handler(regs
, 14))
77 * Sometimes AMD Athlon/Opteron CPUs report invalid exceptions on prefetch.
78 * Check that here and ignore it.
81 * Sometimes the CPU reports invalid exceptions on prefetch.
82 * Check that here and ignore it.
84 * Opcode checker based on code by Richard Brunner
86 static int is_prefetch(struct pt_regs
*regs
, unsigned long addr
,
87 unsigned long error_code
)
92 unsigned char *max_instr
;
95 /* Catch an obscure case of prefetch inside an NX page: */
96 if ((__supported_pte_mask
& _PAGE_NX
) && (error_code
& 16))
100 /* If it was a exec fault on NX page, ignore */
101 if (error_code
& PF_INSTR
)
104 instr
= (unsigned char *)convert_ip_to_linear(current
, regs
);
105 max_instr
= instr
+ 15;
107 if (user_mode(regs
) && instr
>= (unsigned char *)TASK_SIZE
)
110 while (scan_more
&& instr
< max_instr
) {
111 unsigned char opcode
;
112 unsigned char instr_hi
;
113 unsigned char instr_lo
;
115 if (probe_kernel_address(instr
, opcode
))
118 instr_hi
= opcode
& 0xf0;
119 instr_lo
= opcode
& 0x0f;
126 * Values 0x26,0x2E,0x36,0x3E are valid x86 prefixes.
127 * In X86_64 long mode, the CPU will signal invalid
128 * opcode if some of these prefixes are present so
129 * X86_64 will never get here anyway
131 scan_more
= ((instr_lo
& 7) == 0x6);
136 * In AMD64 long mode 0x40..0x4F are valid REX prefixes
137 * Need to figure out under what instruction mode the
138 * instruction was issued. Could check the LDT for lm,
139 * but for now it's good enough to assume that long
140 * mode only uses well known segments or kernel.
142 scan_more
= (!user_mode(regs
)) || (regs
->cs
== __USER_CS
);
146 /* 0x64 thru 0x67 are valid prefixes in all modes. */
147 scan_more
= (instr_lo
& 0xC) == 0x4;
150 /* 0xF0, 0xF2, 0xF3 are valid prefixes in all modes. */
151 scan_more
= !instr_lo
|| (instr_lo
>>1) == 1;
154 /* Prefetch instruction is 0x0F0D or 0x0F18 */
157 if (probe_kernel_address(instr
, opcode
))
159 prefetch
= (instr_lo
== 0xF) &&
160 (opcode
== 0x0D || opcode
== 0x18);
170 static void force_sig_info_fault(int si_signo
, int si_code
,
171 unsigned long address
, struct task_struct
*tsk
)
175 info
.si_signo
= si_signo
;
177 info
.si_code
= si_code
;
178 info
.si_addr
= (void __user
*)address
;
179 force_sig_info(si_signo
, &info
, tsk
);
183 static int bad_address(void *p
)
186 return probe_kernel_address((unsigned long *)p
, dummy
);
190 static void dump_pagetable(unsigned long address
)
193 __typeof__(pte_val(__pte(0))) page
;
196 page
= ((__typeof__(page
) *) __va(page
))[address
>> PGDIR_SHIFT
];
197 #ifdef CONFIG_X86_PAE
198 printk("*pdpt = %016Lx ", page
);
199 if ((page
>> PAGE_SHIFT
) < max_low_pfn
200 && page
& _PAGE_PRESENT
) {
202 page
= ((__typeof__(page
) *) __va(page
))[(address
>> PMD_SHIFT
)
203 & (PTRS_PER_PMD
- 1)];
204 printk(KERN_CONT
"*pde = %016Lx ", page
);
208 printk("*pde = %08lx ", page
);
212 * We must not directly access the pte in the highpte
213 * case if the page table is located in highmem.
214 * And let's rather not kmap-atomic the pte, just in case
215 * it's allocated already.
217 if ((page
>> PAGE_SHIFT
) < max_low_pfn
218 && (page
& _PAGE_PRESENT
)
219 && !(page
& _PAGE_PSE
)) {
221 page
= ((__typeof__(page
) *) __va(page
))[(address
>> PAGE_SHIFT
)
222 & (PTRS_PER_PTE
- 1)];
223 printk("*pte = %0*Lx ", sizeof(page
)*2, (u64
)page
);
227 #else /* CONFIG_X86_64 */
233 pgd
= (pgd_t
*)read_cr3();
235 pgd
= __va((unsigned long)pgd
& PHYSICAL_PAGE_MASK
);
236 pgd
+= pgd_index(address
);
237 if (bad_address(pgd
)) goto bad
;
238 printk("PGD %lx ", pgd_val(*pgd
));
239 if (!pgd_present(*pgd
)) goto ret
;
241 pud
= pud_offset(pgd
, address
);
242 if (bad_address(pud
)) goto bad
;
243 printk("PUD %lx ", pud_val(*pud
));
244 if (!pud_present(*pud
) || pud_large(*pud
))
247 pmd
= pmd_offset(pud
, address
);
248 if (bad_address(pmd
)) goto bad
;
249 printk("PMD %lx ", pmd_val(*pmd
));
250 if (!pmd_present(*pmd
) || pmd_large(*pmd
)) goto ret
;
252 pte
= pte_offset_kernel(pmd
, address
);
253 if (bad_address(pte
)) goto bad
;
254 printk("PTE %lx", pte_val(*pte
));
264 static inline pmd_t
*vmalloc_sync_one(pgd_t
*pgd
, unsigned long address
)
266 unsigned index
= pgd_index(address
);
272 pgd_k
= init_mm
.pgd
+ index
;
274 if (!pgd_present(*pgd_k
))
278 * set_pgd(pgd, *pgd_k); here would be useless on PAE
279 * and redundant with the set_pmd() on non-PAE. As would
283 pud
= pud_offset(pgd
, address
);
284 pud_k
= pud_offset(pgd_k
, address
);
285 if (!pud_present(*pud_k
))
288 pmd
= pmd_offset(pud
, address
);
289 pmd_k
= pmd_offset(pud_k
, address
);
290 if (!pmd_present(*pmd_k
))
292 if (!pmd_present(*pmd
)) {
293 set_pmd(pmd
, *pmd_k
);
294 arch_flush_lazy_mmu_mode();
296 BUG_ON(pmd_page(*pmd
) != pmd_page(*pmd_k
));
302 static const char errata93_warning
[] =
303 KERN_ERR
"******* Your BIOS seems to not contain a fix for K8 errata #93\n"
304 KERN_ERR
"******* Working around it, but it may cause SEGVs or burn power.\n"
305 KERN_ERR
"******* Please consider a BIOS update.\n"
306 KERN_ERR
"******* Disabling USB legacy in the BIOS may also help.\n";
309 /* Workaround for K8 erratum #93 & buggy BIOS.
310 BIOS SMM functions are required to use a specific workaround
311 to avoid corruption of the 64bit RIP register on C stepping K8.
312 A lot of BIOS that didn't get tested properly miss this.
313 The OS sees this as a page fault with the upper 32bits of RIP cleared.
314 Try to work around it here.
315 Note we only handle faults in kernel here.
316 Does nothing for X86_32
318 static int is_errata93(struct pt_regs
*regs
, unsigned long address
)
322 if (address
!= regs
->ip
)
324 if ((address
>> 32) != 0)
326 address
|= 0xffffffffUL
<< 32;
327 if ((address
>= (u64
)_stext
&& address
<= (u64
)_etext
) ||
328 (address
>= MODULES_VADDR
&& address
<= MODULES_END
)) {
330 printk(errata93_warning
);
341 * Work around K8 erratum #100 K8 in compat mode occasionally jumps to illegal
342 * addresses >4GB. We catch this in the page fault handler because these
343 * addresses are not reachable. Just detect this case and return. Any code
344 * segment in LDT is compatibility mode.
346 static int is_errata100(struct pt_regs
*regs
, unsigned long address
)
349 if ((regs
->cs
== __USER32_CS
|| (regs
->cs
& (1<<2))) &&
356 void do_invalid_op(struct pt_regs
*, unsigned long);
358 static int is_f00f_bug(struct pt_regs
*regs
, unsigned long address
)
360 #ifdef CONFIG_X86_F00F_BUG
363 * Pentium F0 0F C7 C8 bug workaround.
365 if (boot_cpu_data
.f00f_bug
) {
366 nr
= (address
- idt_descr
.address
) >> 3;
369 do_invalid_op(regs
, 0);
377 static void show_fault_oops(struct pt_regs
*regs
, unsigned long error_code
,
378 unsigned long address
)
381 if (!oops_may_print())
385 #ifdef CONFIG_X86_PAE
386 if (error_code
& PF_INSTR
) {
388 pte_t
*pte
= lookup_address(address
, &level
);
390 if (pte
&& pte_present(*pte
) && !pte_exec(*pte
))
391 printk(KERN_CRIT
"kernel tried to execute "
392 "NX-protected page - exploit attempt? "
393 "(uid: %d)\n", current
->uid
);
397 printk(KERN_ALERT
"BUG: unable to handle kernel ");
398 if (address
< PAGE_SIZE
)
399 printk(KERN_CONT
"NULL pointer dereference");
401 printk(KERN_CONT
"paging request");
403 printk(KERN_CONT
" at %08lx\n", address
);
405 printk(KERN_CONT
" at %016lx\n", address
);
407 printk(KERN_ALERT
"IP:");
408 printk_address(regs
->ip
, 1);
409 dump_pagetable(address
);
413 static noinline
void pgtable_bad(unsigned long address
, struct pt_regs
*regs
,
414 unsigned long error_code
)
416 unsigned long flags
= oops_begin();
417 struct task_struct
*tsk
;
419 printk(KERN_ALERT
"%s: Corrupted page table at address %lx\n",
420 current
->comm
, address
);
421 dump_pagetable(address
);
423 tsk
->thread
.cr2
= address
;
424 tsk
->thread
.trap_no
= 14;
425 tsk
->thread
.error_code
= error_code
;
426 if (__die("Bad pagetable", regs
, error_code
))
428 oops_end(flags
, regs
, SIGKILL
);
432 static int spurious_fault_check(unsigned long error_code
, pte_t
*pte
)
434 if ((error_code
& PF_WRITE
) && !pte_write(*pte
))
436 if ((error_code
& PF_INSTR
) && !pte_exec(*pte
))
443 * Handle a spurious fault caused by a stale TLB entry. This allows
444 * us to lazily refresh the TLB when increasing the permissions of a
445 * kernel page (RO -> RW or NX -> X). Doing it eagerly is very
446 * expensive since that implies doing a full cross-processor TLB
447 * flush, even if no stale TLB entries exist on other processors.
448 * There are no security implications to leaving a stale TLB when
449 * increasing the permissions on a page.
451 static int spurious_fault(unsigned long address
,
452 unsigned long error_code
)
459 /* Reserved-bit violation or user access to kernel space? */
460 if (error_code
& (PF_USER
| PF_RSVD
))
463 pgd
= init_mm
.pgd
+ pgd_index(address
);
464 if (!pgd_present(*pgd
))
467 pud
= pud_offset(pgd
, address
);
468 if (!pud_present(*pud
))
472 return spurious_fault_check(error_code
, (pte_t
*) pud
);
474 pmd
= pmd_offset(pud
, address
);
475 if (!pmd_present(*pmd
))
479 return spurious_fault_check(error_code
, (pte_t
*) pmd
);
481 pte
= pte_offset_kernel(pmd
, address
);
482 if (!pte_present(*pte
))
485 return spurious_fault_check(error_code
, pte
);
490 * Handle a fault on the vmalloc or module mapping area
493 * Handle a fault on the vmalloc area
495 * This assumes no large pages in there.
497 static int vmalloc_fault(unsigned long address
)
500 unsigned long pgd_paddr
;
504 * Synchronize this task's top level page-table
505 * with the 'reference' page table.
507 * Do _not_ use "current" here. We might be inside
508 * an interrupt in the middle of a task switch..
510 pgd_paddr
= read_cr3();
511 pmd_k
= vmalloc_sync_one(__va(pgd_paddr
), address
);
514 pte_k
= pte_offset_kernel(pmd_k
, address
);
515 if (!pte_present(*pte_k
))
519 pgd_t
*pgd
, *pgd_ref
;
520 pud_t
*pud
, *pud_ref
;
521 pmd_t
*pmd
, *pmd_ref
;
522 pte_t
*pte
, *pte_ref
;
524 /* Make sure we are in vmalloc area */
525 if (!(address
>= VMALLOC_START
&& address
< VMALLOC_END
))
528 /* Copy kernel mappings over when needed. This can also
529 happen within a race in page table update. In the later
532 pgd
= pgd_offset(current
->mm
?: &init_mm
, address
);
533 pgd_ref
= pgd_offset_k(address
);
534 if (pgd_none(*pgd_ref
))
537 set_pgd(pgd
, *pgd_ref
);
539 BUG_ON(pgd_page_vaddr(*pgd
) != pgd_page_vaddr(*pgd_ref
));
541 /* Below here mismatches are bugs because these lower tables
544 pud
= pud_offset(pgd
, address
);
545 pud_ref
= pud_offset(pgd_ref
, address
);
546 if (pud_none(*pud_ref
))
548 if (pud_none(*pud
) || pud_page_vaddr(*pud
) != pud_page_vaddr(*pud_ref
))
550 pmd
= pmd_offset(pud
, address
);
551 pmd_ref
= pmd_offset(pud_ref
, address
);
552 if (pmd_none(*pmd_ref
))
554 if (pmd_none(*pmd
) || pmd_page(*pmd
) != pmd_page(*pmd_ref
))
556 pte_ref
= pte_offset_kernel(pmd_ref
, address
);
557 if (!pte_present(*pte_ref
))
559 pte
= pte_offset_kernel(pmd
, address
);
560 /* Don't use pte_page here, because the mappings can point
561 outside mem_map, and the NUMA hash lookup cannot handle
563 if (!pte_present(*pte
) || pte_pfn(*pte
) != pte_pfn(*pte_ref
))
569 int show_unhandled_signals
= 1;
572 * This routine handles page faults. It determines the address,
573 * and the problem, and then passes it off to one of the appropriate
579 void __kprobes
do_page_fault(struct pt_regs
*regs
, unsigned long error_code
)
581 struct task_struct
*tsk
;
582 struct mm_struct
*mm
;
583 struct vm_area_struct
*vma
;
584 unsigned long address
;
592 * We can fault from pretty much anywhere, with unknown IRQ state.
594 trace_hardirqs_fixup();
598 prefetchw(&mm
->mmap_sem
);
600 /* get the address */
601 address
= read_cr2();
603 si_code
= SEGV_MAPERR
;
605 if (notify_page_fault(regs
))
609 * We fault-in kernel-space virtual memory on-demand. The
610 * 'reference' page table is init_mm.pgd.
612 * NOTE! We MUST NOT take any locks for this case. We may
613 * be in an interrupt or a critical region, and should
614 * only copy the information from the master page table,
617 * This verifies that the fault happens in kernel space
618 * (error_code & 4) == 0, and that the fault was not a
619 * protection error (error_code & 9) == 0.
622 if (unlikely(address
>= TASK_SIZE
)) {
624 if (unlikely(address
>= TASK_SIZE64
)) {
626 if (!(error_code
& (PF_RSVD
|PF_USER
|PF_PROT
)) &&
627 vmalloc_fault(address
) >= 0)
630 /* Can handle a stale RO->RW TLB */
631 if (spurious_fault(address
, error_code
))
635 * Don't take the mm semaphore here. If we fixup a prefetch
636 * fault we could otherwise deadlock.
638 goto bad_area_nosemaphore
;
643 /* It's safe to allow irq's after cr2 has been saved and the vmalloc
644 fault has been handled. */
645 if (regs
->flags
& (X86_EFLAGS_IF
|VM_MASK
))
649 * If we're in an interrupt, have no user context or are running in an
650 * atomic region then we must not take the fault.
652 if (in_atomic() || !mm
)
653 goto bad_area_nosemaphore
;
654 #else /* CONFIG_X86_64 */
655 if (likely(regs
->flags
& X86_EFLAGS_IF
))
658 if (unlikely(error_code
& PF_RSVD
))
659 pgtable_bad(address
, regs
, error_code
);
662 * If we're in an interrupt, have no user context or are running in an
663 * atomic region then we must not take the fault.
665 if (unlikely(in_atomic() || !mm
))
666 goto bad_area_nosemaphore
;
669 * User-mode registers count as a user access even for any
670 * potential system fault or CPU buglet.
672 if (user_mode_vm(regs
))
673 error_code
|= PF_USER
;
676 /* When running in the kernel we expect faults to occur only to
677 * addresses in user space. All other faults represent errors in the
678 * kernel and should generate an OOPS. Unfortunately, in the case of an
679 * erroneous fault occurring in a code path which already holds mmap_sem
680 * we will deadlock attempting to validate the fault against the
681 * address space. Luckily the kernel only validly references user
682 * space from well defined areas of code, which are listed in the
685 * As the vast majority of faults will be valid we will only perform
686 * the source reference check when there is a possibility of a deadlock.
687 * Attempt to lock the address space, if we cannot we then validate the
688 * source. If this is invalid we can skip the address space check,
689 * thus avoiding the deadlock.
691 if (!down_read_trylock(&mm
->mmap_sem
)) {
692 if ((error_code
& PF_USER
) == 0 &&
693 !search_exception_tables(regs
->ip
))
694 goto bad_area_nosemaphore
;
695 down_read(&mm
->mmap_sem
);
698 vma
= find_vma(mm
, address
);
701 if (vma
->vm_start
<= address
)
703 if (!(vma
->vm_flags
& VM_GROWSDOWN
))
705 if (error_code
& PF_USER
) {
707 * Accessing the stack below %sp is always a bug.
708 * The large cushion allows instructions like enter
709 * and pusha to work. ("enter $65535,$31" pushes
710 * 32 pointers and then decrements %sp by 65535.)
712 if (address
+ 65536 + 32 * sizeof(unsigned long) < regs
->sp
)
715 if (expand_stack(vma
, address
))
718 * Ok, we have a good vm_area for this memory access, so
722 si_code
= SEGV_ACCERR
;
724 switch (error_code
& (PF_PROT
|PF_WRITE
)) {
725 default: /* 3: write, present */
727 case PF_WRITE
: /* write, not present */
728 if (!(vma
->vm_flags
& VM_WRITE
))
732 case PF_PROT
: /* read, present */
734 case 0: /* read, not present */
735 if (!(vma
->vm_flags
& (VM_READ
| VM_EXEC
| VM_WRITE
)))
743 * If for any reason at all we couldn't handle the fault,
744 * make sure we exit gracefully rather than endlessly redo
747 fault
= handle_mm_fault(mm
, vma
, address
, write
);
748 if (unlikely(fault
& VM_FAULT_ERROR
)) {
749 if (fault
& VM_FAULT_OOM
)
751 else if (fault
& VM_FAULT_SIGBUS
)
755 if (fault
& VM_FAULT_MAJOR
)
762 * Did it hit the DOS screen memory VA from vm86 mode?
764 if (v8086_mode(regs
)) {
765 unsigned long bit
= (address
- 0xA0000) >> PAGE_SHIFT
;
767 tsk
->thread
.screen_bitmap
|= 1 << bit
;
770 up_read(&mm
->mmap_sem
);
774 * Something tried to access memory that isn't in our memory map..
775 * Fix it, but check if it's kernel or user first..
778 up_read(&mm
->mmap_sem
);
780 bad_area_nosemaphore
:
781 /* User mode accesses just cause a SIGSEGV */
782 if (error_code
& PF_USER
) {
784 * It's possible to have interrupts off here.
789 * Valid to do another page fault here because this one came
792 if (is_prefetch(regs
, address
, error_code
))
795 if (is_errata100(regs
, address
))
798 if (show_unhandled_signals
&& unhandled_signal(tsk
, SIGSEGV
) &&
799 printk_ratelimit()) {
802 "%s%s[%d]: segfault at %lx ip %08lx sp %08lx error %lx",
804 "%s%s[%d]: segfault at %lx ip %lx sp %lx error %lx",
806 task_pid_nr(tsk
) > 1 ? KERN_INFO
: KERN_EMERG
,
807 tsk
->comm
, task_pid_nr(tsk
), address
, regs
->ip
,
808 regs
->sp
, error_code
);
809 print_vma_addr(" in ", regs
->ip
);
813 tsk
->thread
.cr2
= address
;
814 /* Kernel addresses are always protection faults */
815 tsk
->thread
.error_code
= error_code
| (address
>= TASK_SIZE
);
816 tsk
->thread
.trap_no
= 14;
817 force_sig_info_fault(SIGSEGV
, si_code
, address
, tsk
);
821 if (is_f00f_bug(regs
, address
))
825 /* Are we prepared to handle this kernel fault? */
826 if (fixup_exception(regs
))
831 * Valid to do another page fault here, because if this fault
832 * had been triggered by is_prefetch fixup_exception would have
836 * Hall of shame of CPU/BIOS bugs.
838 if (is_prefetch(regs
, address
, error_code
))
841 if (is_errata93(regs
, address
))
845 * Oops. The kernel tried to access some bad page. We'll have to
846 * terminate things with extreme prejudice.
851 flags
= oops_begin();
854 show_fault_oops(regs
, error_code
, address
);
856 tsk
->thread
.cr2
= address
;
857 tsk
->thread
.trap_no
= 14;
858 tsk
->thread
.error_code
= error_code
;
861 die("Oops", regs
, error_code
);
865 if (__die("Oops", regs
, error_code
))
867 /* Executive summary in case the body of the oops scrolled away */
868 printk(KERN_EMERG
"CR2: %016lx\n", address
);
869 oops_end(flags
, regs
, SIGKILL
);
873 * We ran out of memory, or some other thing happened to us that made
874 * us unable to handle the page fault gracefully.
877 up_read(&mm
->mmap_sem
);
878 if (is_global_init(tsk
)) {
881 down_read(&mm
->mmap_sem
);
888 printk("VM: killing process %s\n", tsk
->comm
);
889 if (error_code
& PF_USER
)
890 do_group_exit(SIGKILL
);
894 up_read(&mm
->mmap_sem
);
896 /* Kernel mode? Handle exceptions or die */
897 if (!(error_code
& PF_USER
))
900 /* User space => ok to do another page fault */
901 if (is_prefetch(regs
, address
, error_code
))
904 tsk
->thread
.cr2
= address
;
905 tsk
->thread
.error_code
= error_code
;
906 tsk
->thread
.trap_no
= 14;
907 force_sig_info_fault(SIGBUS
, BUS_ADRERR
, address
, tsk
);
910 DEFINE_SPINLOCK(pgd_lock
);
913 void vmalloc_sync_all(void)
917 * Note that races in the updates of insync and start aren't
918 * problematic: insync can only get set bits added, and updates to
919 * start are only improving performance (without affecting correctness
922 static DECLARE_BITMAP(insync
, PTRS_PER_PGD
);
923 static unsigned long start
= TASK_SIZE
;
924 unsigned long address
;
926 if (SHARED_KERNEL_PMD
)
929 BUILD_BUG_ON(TASK_SIZE
& ~PGDIR_MASK
);
930 for (address
= start
; address
>= TASK_SIZE
; address
+= PGDIR_SIZE
) {
931 if (!test_bit(pgd_index(address
), insync
)) {
935 spin_lock_irqsave(&pgd_lock
, flags
);
936 list_for_each_entry(page
, &pgd_list
, lru
) {
937 if (!vmalloc_sync_one(page_address(page
),
941 spin_unlock_irqrestore(&pgd_lock
, flags
);
943 set_bit(pgd_index(address
), insync
);
945 if (address
== start
&& test_bit(pgd_index(address
), insync
))
946 start
= address
+ PGDIR_SIZE
;
948 #else /* CONFIG_X86_64 */
950 * Note that races in the updates of insync and start aren't
951 * problematic: insync can only get set bits added, and updates to
952 * start are only improving performance (without affecting correctness
955 static DECLARE_BITMAP(insync
, PTRS_PER_PGD
);
956 static unsigned long start
= VMALLOC_START
& PGDIR_MASK
;
957 unsigned long address
;
959 for (address
= start
; address
<= VMALLOC_END
; address
+= PGDIR_SIZE
) {
960 if (!test_bit(pgd_index(address
), insync
)) {
961 const pgd_t
*pgd_ref
= pgd_offset_k(address
);
965 if (pgd_none(*pgd_ref
))
967 spin_lock_irqsave(&pgd_lock
, flags
);
968 list_for_each_entry(page
, &pgd_list
, lru
) {
970 pgd
= (pgd_t
*)page_address(page
) + pgd_index(address
);
972 set_pgd(pgd
, *pgd_ref
);
974 BUG_ON(pgd_page_vaddr(*pgd
) != pgd_page_vaddr(*pgd_ref
));
976 spin_unlock_irqrestore(&pgd_lock
, flags
);
977 set_bit(pgd_index(address
), insync
);
979 if (address
== start
)
980 start
= address
+ PGDIR_SIZE
;
982 /* Check that there is no need to do the same for the modules area. */
983 BUILD_BUG_ON(!(MODULES_VADDR
> __START_KERNEL
));
984 BUILD_BUG_ON(!(((MODULES_END
- 1) & PGDIR_MASK
) ==
985 (__START_KERNEL
& PGDIR_MASK
)));