kernel - Refactor Xinvltlb (3)
[dragonfly.git] / sys / platform / pc64 / x86_64 / pmap_inval.c
blob238704bf636f09c95b7da1f8b5e993143387f3ef
1 /*
2 * Copyright (c) 2003-2011 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
36 * pmap invalidation support code. Certain hardware requirements must
37 * be dealt with when manipulating page table entries and page directory
38 * entries within a pmap. In particular, we cannot safely manipulate
39 * page tables which are in active use by another cpu (even if it is
40 * running in userland) for two reasons: First, TLB writebacks will
41 * race against our own modifications and tests. Second, even if we
42 * were to use bus-locked instruction we can still screw up the
43 * target cpu's instruction pipeline due to Intel cpu errata.
46 #include <sys/param.h>
47 #include <sys/systm.h>
48 #include <sys/kernel.h>
49 #include <sys/proc.h>
50 #include <sys/vmmeter.h>
51 #include <sys/thread2.h>
52 #include <sys/sysctl.h>
54 #include <vm/vm.h>
55 #include <vm/pmap.h>
56 #include <vm/vm_object.h>
58 #include <machine/cputypes.h>
59 #include <machine/md_var.h>
60 #include <machine/specialreg.h>
61 #include <machine/smp.h>
62 #include <machine/globaldata.h>
63 #include <machine/pmap.h>
64 #include <machine/pmap_inval.h>
66 #if 1 /* DEBUGGING */
67 #define LOOPMASK (/* 32 * */ 16 * 128 * 1024 - 1)
68 #endif
70 #define MAX_INVAL_PAGES 128
72 struct pmap_inval_info {
73 vm_offset_t va;
74 pt_entry_t *ptep;
75 pt_entry_t opte;
76 pt_entry_t npte;
77 enum { INVDONE, INVSTORE, INVCMPSET } mode;
78 int success;
79 int npgs;
80 cpumask_t done;
81 cpumask_t mask;
82 #ifdef LOOPMASK
83 cpumask_t sigmask;
84 int failed;
85 int xloops;
86 #endif
87 } __cachealign;
89 typedef struct pmap_inval_info pmap_inval_info_t;
91 static pmap_inval_info_t invinfo[MAXCPU];
92 extern cpumask_t smp_invmask;
93 #ifdef LOOPMASK
94 #ifdef LOOPMASK_IN
95 extern cpumask_t smp_in_mask;
96 #endif
97 extern cpumask_t smp_smurf_mask;
98 #endif
99 static long pmap_inval_bulk_count;
101 SYSCTL_LONG(_machdep, OID_AUTO, pmap_inval_bulk_count, CTLFLAG_RW,
102 &pmap_inval_bulk_count, 0, "");
104 static void
105 pmap_inval_init(pmap_t pmap)
107 cpulock_t olock;
108 cpulock_t nlock;
110 crit_enter_id("inval");
112 if (pmap != &kernel_pmap) {
113 for (;;) {
114 olock = pmap->pm_active_lock;
115 cpu_ccfence();
116 nlock = olock | CPULOCK_EXCL;
117 if (olock != nlock &&
118 atomic_cmpset_int(&pmap->pm_active_lock,
119 olock, nlock)) {
120 break;
122 lwkt_process_ipiq();
123 cpu_pause();
125 atomic_add_acq_long(&pmap->pm_invgen, 1);
129 static void
130 pmap_inval_done(pmap_t pmap)
132 if (pmap != &kernel_pmap) {
133 atomic_clear_int(&pmap->pm_active_lock, CPULOCK_EXCL);
134 atomic_add_acq_long(&pmap->pm_invgen, 1);
136 crit_exit_id("inval");
140 * API function - invalidation the pte at (va) and replace *ptep with
141 * npte atomically across the pmap's active cpus.
143 * This is a holy mess.
145 * Returns the previous contents of *ptep.
147 static
148 void
149 loopdebug(const char *msg, pmap_inval_info_t *info)
151 int p;
152 int cpu = mycpu->gd_cpuid;
154 cpu_lfence();
155 atomic_add_long(&smp_smurf_mask.ary[0], 0);
156 kprintf("%s %d mode=%d m=%08jx d=%08jx s=%08jx "
157 #ifdef LOOPMASK_IN
158 "in=%08jx "
159 #endif
160 "smurf=%08jx\n",
161 msg, cpu, info->mode,
162 info->mask.ary[0],
163 info->done.ary[0],
164 info->sigmask.ary[0],
165 #ifdef LOOPMASK_IN
166 smp_in_mask.ary[0],
167 #endif
168 smp_smurf_mask.ary[0]);
169 kprintf("mdglob ");
170 for (p = 0; p < ncpus; ++p)
171 kprintf(" %d", CPU_prvspace[p]->mdglobaldata.gd_xinvaltlb);
172 kprintf("\n");
175 #ifdef CHECKSIG
177 #define CHECKSIGMASK(info) _checksigmask(info, __FILE__, __LINE__)
179 static
180 void
181 _checksigmask(pmap_inval_info_t *info, const char *file, int line)
183 cpumask_t tmp;
185 tmp = info->mask;
186 CPUMASK_ANDMASK(tmp, info->sigmask);
187 if (CPUMASK_CMPMASKNEQ(tmp, info->mask)) {
188 kprintf("\"%s\" line %d: bad sig/mask %08jx %08jx\n",
189 file, line, info->sigmask.ary[0], info->mask.ary[0]);
193 #else
195 #define CHECKSIGMASK(info)
197 #endif
200 * Invalidate the specified va across all cpus associated with the pmap.
201 * If va == (vm_offset_t)-1, we invltlb() instead of invlpg(). The operation
202 * will be done fully synchronously with storing npte into *ptep and returning
203 * opte.
205 * If ptep is NULL the operation will execute semi-synchronously.
206 * ptep must be NULL if npgs > 1
208 pt_entry_t
209 pmap_inval_smp(pmap_t pmap, vm_offset_t va, int npgs,
210 pt_entry_t *ptep, pt_entry_t npte)
212 globaldata_t gd = mycpu;
213 pmap_inval_info_t *info;
214 pt_entry_t opte = 0;
215 int cpu = gd->gd_cpuid;
216 cpumask_t tmpmask;
217 unsigned long rflags;
220 * Shortcut single-cpu case if possible.
222 if (pmap == NULL)
223 pmap = &kernel_pmap;
224 pmap_inval_init(pmap);
225 if (CPUMASK_CMPMASKEQ(pmap->pm_active, gd->gd_cpumask)) {
227 * Convert to invltlb if there are too many pages to
228 * invlpg on.
230 if (npgs > MAX_INVAL_PAGES) {
231 npgs = 0;
232 va = (vm_offset_t)-1;
236 * Invalidate the specified pages, handle invltlb if requested.
238 while (npgs) {
239 --npgs;
240 if (ptep) {
241 opte = atomic_swap_long(ptep, npte);
242 ++ptep;
244 if (va == (vm_offset_t)-1)
245 break;
246 cpu_invlpg((void *)va);
247 va += PAGE_SIZE;
249 if (va == (vm_offset_t)-1)
250 cpu_invltlb();
251 pmap_inval_done(pmap);
253 return opte;
257 * We must wait for other cpus which may still be finishing up a
258 * prior operation.
260 info = &invinfo[cpu];
261 while (CPUMASK_TESTNZERO(info->done)) {
262 #ifdef LOOPMASK
263 int loops;
265 loops = ++info->xloops;
266 if ((loops & LOOPMASK) == 0) {
267 info->failed = 1;
268 loopdebug("orig_waitA", info);
269 /* XXX recover from possible bug */
270 CPUMASK_ASSZERO(info->done);
272 #endif
273 cpu_pause();
275 KKASSERT(info->mode == INVDONE);
278 * Must disable interrupts to prevent an Xinvltlb (which ignores
279 * critical sections) from trying to execute our command before we
280 * have managed to send any IPIs to the target cpus.
282 rflags = read_rflags();
283 cpu_disable_intr();
286 * Must set our cpu in the invalidation scan mask before
287 * any possibility of [partial] execution (remember, XINVLTLB
288 * can interrupt a critical section).
290 if (CPUMASK_TESTBIT(smp_invmask, cpu)) {
291 kprintf("bcpu %d already in\n", cpu);
293 ATOMIC_CPUMASK_ORBIT(smp_invmask, cpu);
295 info->va = va;
296 info->npgs = npgs;
297 info->ptep = ptep;
298 info->npte = npte;
299 info->opte = 0;
300 #ifdef LOOPMASK
301 info->failed = 0;
302 #endif
303 tmpmask = pmap->pm_active; /* volatile (bits may be cleared) */
304 cpu_ccfence();
305 CPUMASK_ANDMASK(tmpmask, smp_active_mask);
308 * If ptep is NULL the operation can be semi-synchronous, which means
309 * we can improve performance by flagging and removing idle cpus
310 * (see the idleinvlclr function in mp_machdep.c).
312 * Typically kernel page table operation is semi-synchronous.
314 if (ptep == NULL)
315 smp_smurf_idleinvlclr(&tmpmask);
316 CPUMASK_ORBIT(tmpmask, cpu);
317 info->mode = INVSTORE;
320 * Command may start executing the moment 'done' is initialized,
321 * disable current cpu interrupt to prevent 'done' field from
322 * changing (other cpus can't clear done bits until the originating
323 * cpu clears its mask bit, but other cpus CAN start clearing their
324 * mask bits).
326 info->mask = tmpmask;
327 #ifdef LOOPMASK
328 info->sigmask = tmpmask;
329 CHECKSIGMASK(info);
330 #endif
331 cpu_sfence();
332 info->done = tmpmask; /* execute can begin here due to races */
335 * Pass our copy of the done bits (so they don't change out from
336 * under us) to generate the Xinvltlb interrupt on the targets.
338 smp_invlpg(&tmpmask);
339 opte = info->opte;
340 KKASSERT(info->mode == INVDONE);
343 * Target cpus will be in their loop exiting concurrently with our
344 * cleanup. They will not lose the bitmask they obtained before so
345 * we can safely clear this bit.
347 ATOMIC_CPUMASK_NANDBIT(smp_invmask, cpu);
348 write_rflags(rflags);
349 pmap_inval_done(pmap);
351 return opte;
355 * API function - invalidate the pte at (va) and replace *ptep with npte
356 * atomically only if *ptep equals opte, across the pmap's active cpus.
358 * Returns 1 on success, 0 on failure (caller typically retries).
361 pmap_inval_smp_cmpset(pmap_t pmap, vm_offset_t va, pt_entry_t *ptep,
362 pt_entry_t opte, pt_entry_t npte)
364 globaldata_t gd = mycpu;
365 pmap_inval_info_t *info;
366 int success;
367 int cpu = gd->gd_cpuid;
368 cpumask_t tmpmask;
369 unsigned long rflags;
372 * Shortcut single-cpu case if possible.
374 if (pmap == NULL)
375 pmap = &kernel_pmap;
376 pmap_inval_init(pmap);
377 if (CPUMASK_CMPMASKEQ(pmap->pm_active, gd->gd_cpumask)) {
378 if (atomic_cmpset_long(ptep, opte, npte)) {
379 if (va == (vm_offset_t)-1)
380 cpu_invltlb();
381 else
382 cpu_invlpg((void *)va);
383 pmap_inval_done(pmap);
384 return 1;
385 } else {
386 pmap_inval_done(pmap);
387 return 0;
392 * We must wait for other cpus which may still be finishing
393 * up a prior operation.
395 info = &invinfo[cpu];
396 while (CPUMASK_TESTNZERO(info->done)) {
397 #ifdef LOOPMASK
398 int loops;
400 loops = ++info->xloops;
401 if ((loops & LOOPMASK) == 0) {
402 info->failed = 1;
403 loopdebug("orig_waitB", info);
404 /* XXX recover from possible bug */
405 CPUMASK_ASSZERO(info->done);
407 #endif
408 cpu_pause();
410 KKASSERT(info->mode == INVDONE);
413 * Must disable interrupts to prevent an Xinvltlb (which ignores
414 * critical sections) from trying to execute our command before we
415 * have managed to send any IPIs to the target cpus.
417 rflags = read_rflags();
418 cpu_disable_intr();
421 * Must set our cpu in the invalidation scan mask before
422 * any possibility of [partial] execution (remember, XINVLTLB
423 * can interrupt a critical section).
425 if (CPUMASK_TESTBIT(smp_invmask, cpu)) {
426 kprintf("acpu %d already in\n", cpu);
428 ATOMIC_CPUMASK_ORBIT(smp_invmask, cpu);
430 info->va = va;
431 info->npgs = 1; /* unused */
432 info->ptep = ptep;
433 info->npte = npte;
434 info->opte = opte;
435 info->failed = 0;
436 tmpmask = pmap->pm_active; /* volatile */
437 cpu_ccfence();
438 CPUMASK_ANDMASK(tmpmask, smp_active_mask);
439 CPUMASK_ORBIT(tmpmask, cpu);
440 info->mode = INVCMPSET; /* initialize last */
441 info->success = 0;
444 * Command may start executing the moment 'done' is initialized,
445 * disable current cpu interrupt to prevent 'done' field from
446 * changing (other cpus can't clear done bits until the originating
447 * cpu clears its mask bit).
449 cpu_ccfence();
450 info->mask = tmpmask;
451 #ifdef LOOPMASK
452 info->sigmask = tmpmask;
453 CHECKSIGMASK(info);
454 #endif
455 info->done = tmpmask;
458 * Calling smp_invlpg() will issue the IPIs to XINVLTLB (which can
459 * execute even from inside a critical section), and will call us
460 * back with via pmap_inval_intr() with interrupts disabled.
462 * Unlike smp_invltlb(), this interface causes all cpus to stay
463 * inside XINVLTLB until the whole thing is done. When our cpu
464 * detects that the whole thing is done we execute the requested
465 * operation and return.
467 smp_invlpg(&tmpmask);
468 success = info->success;
469 KKASSERT(info->mode == INVDONE);
471 ATOMIC_CPUMASK_NANDBIT(smp_invmask, cpu);
472 write_rflags(rflags);
473 pmap_inval_done(pmap);
475 return success;
478 void
479 pmap_inval_bulk_init(pmap_inval_bulk_t *bulk, struct pmap *pmap)
481 bulk->pmap = pmap;
482 bulk->va_beg = 0;
483 bulk->va_end = 0;
484 bulk->count = 0;
487 pt_entry_t
488 pmap_inval_bulk(pmap_inval_bulk_t *bulk, vm_offset_t va,
489 pt_entry_t *ptep, pt_entry_t npte)
491 pt_entry_t pte;
494 * Degenerate case, localized or we don't care (e.g. because we
495 * are jacking the entire page table) or the pmap is not in-use
496 * by anyone. No invalidations are done on any cpu.
498 if (bulk == NULL) {
499 pte = atomic_swap_long(ptep, npte);
500 return pte;
504 * If it isn't the kernel pmap we execute the operation synchronously
505 * on all cpus belonging to the pmap, which avoids concurrency bugs in
506 * the hw related to changing pte's out from under threads.
508 * Eventually I would like to implement streaming pmap invalidation
509 * for user pmaps to reduce mmap/munmap overheads for heavily-loaded
510 * threaded programs.
512 if (bulk->pmap != &kernel_pmap) {
513 pte = pmap_inval_smp(bulk->pmap, va, 1, ptep, npte);
514 return pte;
518 * This is the kernel_pmap. All unmap operations presume that there
519 * are no other cpus accessing the addresses in question. Implement
520 * the bulking algorithm. collect the required information and
521 * synchronize once at the end.
523 pte = atomic_swap_long(ptep, npte);
524 if (va == (vm_offset_t)-1) {
525 bulk->va_beg = va;
526 } else if (bulk->va_beg == bulk->va_end) {
527 bulk->va_beg = va;
528 bulk->va_end = va + PAGE_SIZE;
529 } else if (va == bulk->va_end) {
530 bulk->va_end = va + PAGE_SIZE;
531 } else {
532 bulk->va_beg = (vm_offset_t)-1;
533 bulk->va_end = 0;
534 #if 0
535 pmap_inval_bulk_flush(bulk);
536 bulk->count = 1;
537 if (va == (vm_offset_t)-1) {
538 bulk->va_beg = va;
539 bulk->va_end = 0;
540 } else {
541 bulk->va_beg = va;
542 bulk->va_end = va + PAGE_SIZE;
544 #endif
546 ++bulk->count;
548 return pte;
551 void
552 pmap_inval_bulk_flush(pmap_inval_bulk_t *bulk)
554 if (bulk == NULL)
555 return;
556 if (bulk->count > 0)
557 pmap_inval_bulk_count += (bulk->count - 1);
558 if (bulk->va_beg != bulk->va_end) {
559 if (bulk->va_beg == (vm_offset_t)-1) {
560 pmap_inval_smp(bulk->pmap, bulk->va_beg, 1, NULL, 0);
561 } else {
562 long n;
564 n = (bulk->va_end - bulk->va_beg) >> PAGE_SHIFT;
565 pmap_inval_smp(bulk->pmap, bulk->va_beg, n, NULL, 0);
568 bulk->va_beg = 0;
569 bulk->va_end = 0;
570 bulk->count = 0;
574 * Called with interrupts hard-disabled.
577 pmap_inval_intr(cpumask_t *cpumaskp)
579 globaldata_t gd = mycpu;
580 pmap_inval_info_t *info;
581 int loopme = 0;
582 int cpu;
583 cpumask_t cpumask;
584 #ifdef LOOPMASK
585 int loops;
586 #endif
589 * Check all cpus for invalidations we may need to service.
591 cpu_ccfence();
592 cpu = gd->gd_cpuid;
593 cpumask = *cpumaskp;
595 while (CPUMASK_TESTNZERO(cpumask)) {
596 int n = BSFCPUMASK(cpumask);
598 #ifdef LOOPMASK
599 KKASSERT(n >= 0 && n < MAXCPU);
600 #endif
602 CPUMASK_NANDBIT(cpumask, n);
603 info = &invinfo[n];
606 * Due to interrupts/races we can catch a new operation
607 * in an older interrupt. A fence is needed once we detect
608 * the (not) done bit.
610 if (!CPUMASK_TESTBIT(info->done, cpu))
611 continue;
612 cpu_lfence();
615 * info->mask and info->done always contain the originating
616 * cpu until the originator is done. Targets may still be
617 * present in info->done after the originator is done (they
618 * will be finishing up their loops).
620 * Clear info->mask bits on other cpus to indicate that they
621 * have quiesced (entered the loop). Once the other mask bits
622 * are clear we can execute the operation on the original,
623 * then clear the mask and done bits on the originator. The
624 * targets will then finish up their side and clear their
625 * done bits.
627 * The command is considered 100% done when all done bits have
628 * been cleared.
630 if (n != cpu) {
632 * Command state machine for 'other' cpus.
634 if (CPUMASK_TESTBIT(info->mask, cpu)) {
636 * Other cpu indicate to originator that they
637 * are quiesced.
639 ATOMIC_CPUMASK_NANDBIT(info->mask, cpu);
640 loopme = 1;
641 } else if (info->ptep &&
642 CPUMASK_TESTBIT(info->mask, n)) {
644 * Other cpu must wait for the originator (n)
645 * to complete its command if ptep is not NULL.
647 loopme = 1;
648 } else {
650 * Other cpu detects that the originator has
651 * completed its command, or there was no
652 * command.
654 * Now that the page table entry has changed,
655 * we can follow up with our own invalidation.
657 vm_offset_t va = info->va;
658 int npgs;
660 if (va == (vm_offset_t)-1 ||
661 info->npgs > MAX_INVAL_PAGES) {
662 cpu_invltlb();
663 } else {
664 for (npgs = info->npgs; npgs; --npgs) {
665 cpu_invlpg((void *)va);
666 va += PAGE_SIZE;
669 ATOMIC_CPUMASK_NANDBIT(info->done, cpu);
670 /* info invalid now */
671 /* loopme left alone */
673 } else if (CPUMASK_TESTBIT(info->mask, cpu)) {
675 * Originator is waiting for other cpus
677 if (CPUMASK_CMPMASKNEQ(info->mask, gd->gd_cpumask)) {
679 * Originator waits for other cpus to enter
680 * their loop (aka quiesce).
682 loopme = 1;
683 #ifdef LOOPMASK
684 loops = ++info->xloops;
685 if ((loops & LOOPMASK) == 0) {
686 info->failed = 1;
687 loopdebug("orig_waitC", info);
688 /* XXX recover from possible bug */
689 mdcpu->gd_xinvaltlb = 0;
690 smp_invlpg(&smp_active_mask);
692 #endif
693 } else {
695 * Originator executes operation and clears
696 * mask to allow other cpus to finish.
698 KKASSERT(info->mode != INVDONE);
699 if (info->mode == INVSTORE) {
700 if (info->ptep)
701 info->opte = atomic_swap_long(info->ptep, info->npte);
702 CHECKSIGMASK(info);
703 ATOMIC_CPUMASK_NANDBIT(info->mask, cpu);
704 CHECKSIGMASK(info);
705 } else {
706 if (atomic_cmpset_long(info->ptep,
707 info->opte, info->npte)) {
708 info->success = 1;
709 } else {
710 info->success = 0;
712 CHECKSIGMASK(info);
713 ATOMIC_CPUMASK_NANDBIT(info->mask, cpu);
714 CHECKSIGMASK(info);
716 loopme = 1;
718 } else {
720 * Originator does not have to wait for the other
721 * cpus to finish. It clears its done bit. A new
722 * command will not be initiated by the originator
723 * until the other cpus have cleared their done bits
724 * (asynchronously).
726 vm_offset_t va = info->va;
727 int npgs;
729 if (va == (vm_offset_t)-1 ||
730 info->npgs > MAX_INVAL_PAGES) {
731 cpu_invltlb();
732 } else {
733 for (npgs = info->npgs; npgs; --npgs) {
734 cpu_invlpg((void *)va);
735 va += PAGE_SIZE;
738 #ifdef LOOPMASK
739 info->xloops = 0;
740 #endif
741 /* leave loopme alone */
742 /* other cpus may still be finishing up */
743 /* can't race originator since that's us */
744 info->mode = INVDONE;
745 ATOMIC_CPUMASK_NANDBIT(info->done, cpu);
748 return loopme;