kernel - Fix some rare pmap races in i386 and x86_64.
[dragonfly.git] / sys / platform / pc32 / i386 / pmap_inval.c
blob9b26855e26fe5869cd761532bdaba504101d1759
1 /*
2 * Copyright (c) 2003,2004 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
34 * $DragonFly: src/sys/platform/pc32/i386/pmap_inval.c,v 1.5 2005/11/04 08:57:27 dillon Exp $
38 * pmap invalidation support code. Certain hardware requirements must
39 * be dealt with when manipulating page table entries and page directory
40 * entries within a pmap. In particular, we cannot safely manipulate
41 * page tables which are in active use by another cpu (even if it is
42 * running in userland) for two reasons: First, TLB writebacks will
43 * race against our own modifications and tests. Second, even if we
44 * were to use bus-locked instruction we can still screw up the
45 * target cpu's instruction pipeline due to Intel cpu errata.
48 #include <sys/param.h>
49 #include <sys/systm.h>
50 #include <sys/kernel.h>
51 #include <sys/proc.h>
52 #include <sys/vmmeter.h>
53 #include <sys/thread2.h>
55 #include <vm/vm.h>
56 #include <vm/pmap.h>
57 #include <vm/vm_object.h>
59 #include <machine/cputypes.h>
60 #include <machine/md_var.h>
61 #include <machine/specialreg.h>
62 #include <machine/smp.h>
63 #include <machine/globaldata.h>
64 #include <machine/pmap.h>
65 #include <machine/pmap_inval.h>
67 #ifdef SMP
69 static void
70 _cpu_invltlb(void *dummy)
72 cpu_invltlb();
75 static void
76 _cpu_invl1pg(void *data)
78 cpu_invlpg(data);
81 #endif
84 * Initialize for add or flush
86 void
87 pmap_inval_init(pmap_inval_info_t info)
89 info->pir_flags = 0;
90 crit_enter_id("inval");
94 * Add a (pmap, va) pair to the invalidation list and protect access
95 * as appropriate.
97 * CPUMASK_LOCK is used to interlock thread switchins
99 void
100 pmap_inval_interlock(pmap_inval_info_t info, pmap_t pmap, vm_offset_t va)
102 #ifdef SMP
103 cpumask_t oactive;
104 cpumask_t nactive;
106 for (;;) {
107 oactive = pmap->pm_active & ~CPUMASK_LOCK;
108 nactive = oactive | CPUMASK_LOCK;
109 if (atomic_cmpset_int(&pmap->pm_active, oactive, nactive))
110 break;
111 crit_enter();
112 lwkt_process_ipiq();
113 crit_exit();
116 if ((info->pir_flags & PIRF_CPUSYNC) == 0) {
117 info->pir_flags |= PIRF_CPUSYNC;
118 info->pir_cpusync.cs_run_func = NULL;
119 info->pir_cpusync.cs_fin1_func = NULL;
120 info->pir_cpusync.cs_fin2_func = NULL;
121 lwkt_cpusync_start(oactive, &info->pir_cpusync);
122 } else if (pmap->pm_active & ~info->pir_cpusync.cs_mask) {
123 lwkt_cpusync_add(oactive, &info->pir_cpusync);
125 #else
126 if (pmap->pm_active == 0)
127 return;
128 #endif
129 if ((info->pir_flags & (PIRF_INVLTLB|PIRF_INVL1PG)) == 0) {
130 if (va == (vm_offset_t)-1) {
131 info->pir_flags |= PIRF_INVLTLB;
132 #ifdef SMP
133 info->pir_cpusync.cs_fin2_func = _cpu_invltlb;
134 #endif
135 } else {
136 info->pir_flags |= PIRF_INVL1PG;
137 info->pir_cpusync.cs_data = (void *)va;
138 #ifdef SMP
139 info->pir_cpusync.cs_fin2_func = _cpu_invl1pg;
140 #endif
142 } else {
143 info->pir_flags |= PIRF_INVLTLB;
144 #ifdef SMP
145 info->pir_cpusync.cs_fin2_func = _cpu_invltlb;
146 #endif
150 void
151 pmap_inval_deinterlock(pmap_inval_info_t info, pmap_t pmap)
153 #ifdef SMP
154 atomic_clear_int(&pmap->pm_active, CPUMASK_LOCK);
155 #endif
159 * Synchronize changes with target cpus.
161 void
162 pmap_inval_flush(pmap_inval_info_t info)
164 #ifdef SMP
165 if (info->pir_flags & PIRF_CPUSYNC)
166 lwkt_cpusync_finish(&info->pir_cpusync);
167 #else
168 if (info->pir_flags & PIRF_INVLTLB)
169 cpu_invltlb();
170 else if (info->pir_flags & PIRF_INVL1PG)
171 cpu_invlpg(info->pir_cpusync.cs_data);
172 #endif
173 info->pir_flags = 0;
176 void
177 pmap_inval_done(pmap_inval_info_t info)
179 pmap_inval_flush(info);
180 crit_exit_id("flush");