kernel - Improve physio performance (2)
[dragonfly.git] / sys / vm / vm_pager.c
blob0112b3748613eda1ec0c5399902fe185deae9a65
1 /*
2 * (MPSAFE)
4 * Copyright (c) 1991, 1993
5 * The Regents of the University of California. All rights reserved.
7 * This code is derived from software contributed to Berkeley by
8 * The Mach Operating System project at Carnegie-Mellon University.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
34 * from: @(#)vm_pager.c 8.6 (Berkeley) 1/12/94
37 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
38 * All rights reserved.
40 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
42 * Permission to use, copy, modify and distribute this software and
43 * its documentation is hereby granted, provided that both the copyright
44 * notice and this permission notice appear in all copies of the
45 * software, derivative works or modified versions, and any portions
46 * thereof, and that both notices appear in supporting documentation.
48 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
49 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
50 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
52 * Carnegie Mellon requests users of this software to return to
54 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
55 * School of Computer Science
56 * Carnegie Mellon University
57 * Pittsburgh PA 15213-3890
59 * any improvements or extensions that they make and grant Carnegie the
60 * rights to redistribute these changes.
62 * $FreeBSD: src/sys/vm/vm_pager.c,v 1.54.2.2 2001/11/18 07:11:00 dillon Exp $
66 * Paging space routine stubs. Emulates a matchmaker-like interface
67 * for builtin pagers.
70 #include <sys/param.h>
71 #include <sys/systm.h>
72 #include <sys/kernel.h>
73 #include <sys/vnode.h>
74 #include <sys/buf.h>
75 #include <sys/ucred.h>
76 #include <sys/dsched.h>
77 #include <sys/proc.h>
78 #include <sys/sysctl.h>
79 #include <sys/thread2.h>
81 #include <vm/vm.h>
82 #include <vm/vm_param.h>
83 #include <vm/vm_kern.h>
84 #include <vm/vm_object.h>
85 #include <vm/vm_page.h>
86 #include <vm/vm_pager.h>
87 #include <vm/vm_extern.h>
89 #include <sys/buf2.h>
90 #include <vm/vm_page2.h>
92 extern struct pagerops defaultpagerops;
93 extern struct pagerops swappagerops;
94 extern struct pagerops vnodepagerops;
95 extern struct pagerops devicepagerops;
96 extern struct pagerops physpagerops;
98 int cluster_pbuf_freecnt = -1; /* unlimited to begin with */
100 static int dead_pager_getpage (vm_object_t, vm_page_t *, int);
101 static void dead_pager_putpages (vm_object_t, vm_page_t *, int, int, int *);
102 static boolean_t dead_pager_haspage (vm_object_t, vm_pindex_t);
103 static void dead_pager_dealloc (vm_object_t);
106 * No requirements.
108 static int
109 dead_pager_getpage(vm_object_t obj, vm_page_t *mpp, int seqaccess)
111 return VM_PAGER_FAIL;
115 * No requirements.
117 static void
118 dead_pager_putpages(vm_object_t object, vm_page_t *m, int count, int flags,
119 int *rtvals)
121 int i;
123 for (i = 0; i < count; i++) {
124 rtvals[i] = VM_PAGER_AGAIN;
129 * No requirements.
131 static boolean_t
132 dead_pager_haspage(vm_object_t object, vm_pindex_t pindex)
134 return FALSE;
138 * No requirements.
140 static void
141 dead_pager_dealloc(vm_object_t object)
143 KKASSERT(object->swblock_count == 0);
144 return;
147 static struct pagerops deadpagerops = {
148 dead_pager_dealloc,
149 dead_pager_getpage,
150 dead_pager_putpages,
151 dead_pager_haspage
154 struct pagerops *pagertab[] = {
155 &defaultpagerops, /* OBJT_DEFAULT */
156 &swappagerops, /* OBJT_SWAP */
157 &vnodepagerops, /* OBJT_VNODE */
158 &devicepagerops, /* OBJT_DEVICE */
159 &devicepagerops, /* OBJT_MGTDEVICE */
160 &physpagerops, /* OBJT_PHYS */
161 &deadpagerops /* OBJT_DEAD */
164 int npagers = NELEM(pagertab);
167 * Kernel address space for mapping pages.
168 * Used by pagers where KVAs are needed for IO.
170 * XXX needs to be large enough to support the number of pending async
171 * cleaning requests (NPENDINGIO == 64) * the maximum swap cluster size
172 * (MAXPHYS == 64k) if you want to get the most efficiency.
174 #define PAGER_MAP_SIZE (8 * 1024 * 1024)
176 #define BSWHSIZE 16
177 #define BSWHMASK (BSWHSIZE - 1)
179 TAILQ_HEAD(swqueue, buf);
181 int pager_map_size = PAGER_MAP_SIZE;
182 struct vm_map pager_map;
184 static vm_offset_t swapbkva_mem; /* swap buffers kva */
185 static vm_offset_t swapbkva_kva; /* swap buffers kva */
186 static struct swqueue bswlist_mem[BSWHSIZE]; /* with preallocated memory */
187 static struct swqueue bswlist_kva[BSWHSIZE]; /* with kva */
188 static struct swqueue bswlist_raw[BSWHSIZE]; /* without kva */
189 static struct spinlock bswspin_mem[BSWHSIZE];
190 static struct spinlock bswspin_kva[BSWHSIZE];
191 static struct spinlock bswspin_raw[BSWHSIZE];
192 static int pbuf_raw_count;
193 static int pbuf_kva_count;
194 static int pbuf_mem_count;
196 SYSCTL_INT(_vfs, OID_AUTO, pbuf_raw_count, CTLFLAG_RD, &pbuf_raw_count, 0,
197 "Kernel pbuf raw reservations");
198 SYSCTL_INT(_vfs, OID_AUTO, pbuf_kva_count, CTLFLAG_RD, &pbuf_kva_count, 0,
199 "Kernel pbuf kva reservations");
200 SYSCTL_INT(_vfs, OID_AUTO, pbuf_mem_count, CTLFLAG_RD, &pbuf_mem_count, 0,
201 "Kernel pbuf mem reservations");
204 * Initialize the swap buffer list.
206 * Called from the low level boot code only.
208 static void
209 vm_pager_init(void *arg __unused)
211 int i;
213 for (i = 0; i < BSWHSIZE; ++i) {
214 TAILQ_INIT(&bswlist_mem[i]);
215 TAILQ_INIT(&bswlist_kva[i]);
216 TAILQ_INIT(&bswlist_raw[i]);
217 spin_init(&bswspin_mem[i], "bswmem");
218 spin_init(&bswspin_kva[i], "bswkva");
219 spin_init(&bswspin_raw[i], "bswraw");
222 SYSINIT(vm_mem, SI_BOOT1_VM, SI_ORDER_SECOND, vm_pager_init, NULL);
225 * Called from the low level boot code only.
227 static
228 void
229 vm_pager_bufferinit(void *dummy __unused)
231 struct buf *bp;
232 long i;
235 * Reserve KVM space for pbuf data.
237 swapbkva_mem = kmem_alloc_pageable(&pager_map, nswbuf_mem * MAXPHYS);
238 if (!swapbkva_mem)
239 panic("Not enough pager_map VM space for physical buffers");
240 swapbkva_kva = kmem_alloc_pageable(&pager_map, nswbuf_kva * MAXPHYS);
241 if (!swapbkva_kva)
242 panic("Not enough pager_map VM space for physical buffers");
245 * Initial pbuf setup.
247 * mem - These pbufs have permanently allocated memory
248 * kva - These pbufs have unallocated kva reservations
249 * raw - These pbufs have no kva reservations
253 * Buffers with pre-allocated kernel memory can be convenient for
254 * copyin/copyout because no SMP page invalidation or other pmap
255 * operations are needed.
257 #if 1
258 bp = swbuf_mem;
259 for (i = 0; i < nswbuf_mem; ++i, ++bp) {
260 vm_page_t m;
261 vm_pindex_t pg;
262 int j;
264 bp->b_kvabase = (caddr_t)((intptr_t)i * MAXPHYS) + swapbkva_mem;
265 bp->b_kvasize = MAXPHYS;
266 bp->b_swindex = i & BSWHMASK;
267 BUF_LOCKINIT(bp);
268 buf_dep_init(bp);
269 TAILQ_INSERT_HEAD(&bswlist_mem[i & BSWHMASK], bp, b_freelist);
270 atomic_add_int(&pbuf_mem_count, 1);
271 bp->b_data = bp->b_kvabase;
272 bp->b_bcount = MAXPHYS;
273 bp->b_xio.xio_pages = bp->b_xio.xio_internal_pages;
275 pg = (vm_offset_t)bp->b_kvabase >> PAGE_SHIFT;
276 vm_object_hold(&kernel_object);
277 for (j = 0; j < MAXPHYS / PAGE_SIZE; ++j) {
278 m = vm_page_alloc(&kernel_object, pg, VM_ALLOC_NORMAL |
279 VM_ALLOC_SYSTEM);
280 KKASSERT(m != NULL);
281 bp->b_xio.xio_internal_pages[j] = m;
282 vm_page_wire(m);
283 vm_page_flag_clear(m, PG_ZERO);
284 /* early boot, no other cpus running yet */
285 pmap_kenter_noinval(pg * PAGE_SIZE, VM_PAGE_TO_PHYS(m));
286 cpu_invlpg((void *)(pg * PAGE_SIZE));
287 vm_page_wakeup(m);
288 ++pg;
290 vm_object_drop(&kernel_object);
291 bp->b_xio.xio_npages = j;
293 #endif
296 * Buffers with pre-assigned KVA bases. The KVA has no memory pages
297 * assigned to it. Saves the caller from having to reserve KVA for
298 * the page map.
300 bp = swbuf_kva;
301 for (i = 0; i < nswbuf_kva; ++i, ++bp) {
302 bp->b_kvabase = (caddr_t)((intptr_t)i * MAXPHYS) + swapbkva_kva;
303 bp->b_kvasize = MAXPHYS;
304 bp->b_swindex = i & BSWHMASK;
305 BUF_LOCKINIT(bp);
306 buf_dep_init(bp);
307 TAILQ_INSERT_HEAD(&bswlist_kva[i & BSWHMASK], bp, b_freelist);
308 atomic_add_int(&pbuf_kva_count, 1);
312 * RAW buffers with no KVA mappings.
314 * NOTE: We use KM_NOTLBSYNC here to reduce unnecessary IPIs
315 * during startup, which can really slow down emulated
316 * systems.
318 nswbuf_raw = nbuf * 2;
319 swbuf_raw = (void *)kmem_alloc3(&kernel_map,
320 round_page(nswbuf_raw * sizeof(struct buf)),
321 KM_NOTLBSYNC);
322 smp_invltlb();
323 bp = swbuf_raw;
324 for (i = 0; i < nswbuf_raw; ++i, ++bp) {
325 bp->b_swindex = i & BSWHMASK;
326 BUF_LOCKINIT(bp);
327 buf_dep_init(bp);
328 TAILQ_INSERT_HEAD(&bswlist_raw[i & BSWHMASK], bp, b_freelist);
329 atomic_add_int(&pbuf_raw_count, 1);
333 * Allow the clustering code to use half of our pbufs.
335 cluster_pbuf_freecnt = nswbuf_kva / 2;
338 SYSINIT(do_vmpg, SI_BOOT2_MACHDEP, SI_ORDER_FIRST, vm_pager_bufferinit, NULL);
341 * No requirements.
343 void
344 vm_pager_deallocate(vm_object_t object)
346 (*pagertab[object->type]->pgo_dealloc) (object);
350 * vm_pager_get_pages() - inline, see vm/vm_pager.h
351 * vm_pager_put_pages() - inline, see vm/vm_pager.h
352 * vm_pager_has_page() - inline, see vm/vm_pager.h
353 * vm_pager_page_inserted() - inline, see vm/vm_pager.h
354 * vm_pager_page_removed() - inline, see vm/vm_pager.h
358 * Search the specified pager object list for an object with the
359 * specified handle. If an object with the specified handle is found,
360 * increase its reference count and return it. Otherwise, return NULL.
362 * The pager object list must be locked.
364 vm_object_t
365 vm_pager_object_lookup(struct pagerlst *pg_list, void *handle)
367 vm_object_t object;
369 TAILQ_FOREACH(object, pg_list, pager_object_list) {
370 if (object->handle == handle) {
371 VM_OBJECT_LOCK(object);
372 if ((object->flags & OBJ_DEAD) == 0) {
373 vm_object_reference_locked(object);
374 VM_OBJECT_UNLOCK(object);
375 break;
377 VM_OBJECT_UNLOCK(object);
380 return (object);
384 * Initialize a physical buffer.
386 * No requirements.
388 static void
389 initpbuf(struct buf *bp)
391 bp->b_qindex = 0; /* BQUEUE_NONE */
392 bp->b_data = bp->b_kvabase; /* NULL if pbuf sans kva */
393 bp->b_flags = B_PAGING;
394 bp->b_cmd = BUF_CMD_DONE;
395 bp->b_error = 0;
396 bp->b_bcount = 0;
397 bp->b_bufsize = MAXPHYS;
398 initbufbio(bp);
399 xio_init(&bp->b_xio);
400 BUF_LOCK(bp, LK_EXCLUSIVE);
404 * Allocate a physical buffer
406 * There are a limited number of physical buffers. We need to make
407 * sure that no single subsystem is able to hog all of them,
408 * so each subsystem implements a counter which is typically initialized
409 * to 1/2 nswbuf. getpbuf() decrements this counter in allocation and
410 * increments it on release, and blocks if the counter hits zero. A
411 * subsystem may initialize the counter to -1 to disable the feature,
412 * but it must still be sure to match up all uses of getpbuf() with
413 * relpbuf() using the same variable.
415 * NOTE: pfreecnt can be NULL, but this 'feature' will be removed
416 * relatively soon when the rest of the subsystems get smart about it. XXX
418 * Physical buffers can be with or without KVA space reserved. There
419 * are severe limitations on the ones with KVA reserved, and fewer
420 * limitations on the ones without. getpbuf() gets one without,
421 * getpbuf_kva() gets one with.
423 * No requirements.
425 struct buf *
426 getpbuf(int *pfreecnt) /* raw */
428 struct buf *bp;
429 int iter;
430 int loops;
432 for (;;) {
433 while (pfreecnt && *pfreecnt <= 0) {
434 tsleep_interlock(pfreecnt, 0);
435 if (atomic_fetchadd_int(pfreecnt, 0) <= 0)
436 tsleep(pfreecnt, PINTERLOCKED, "wswbuf0", 0);
438 if (pbuf_raw_count <= 0) {
439 tsleep_interlock(&pbuf_raw_count, 0);
440 if (atomic_fetchadd_int(&pbuf_raw_count, 0) <= 0)
441 tsleep(&pbuf_raw_count, PINTERLOCKED,
442 "wswbuf0", 0);
443 continue;
445 iter = mycpuid & BSWHMASK;
446 for (loops = BSWHSIZE; loops; --loops) {
447 if (TAILQ_FIRST(&bswlist_raw[iter]) == NULL) {
448 iter = (iter + 1) & BSWHMASK;
449 continue;
451 spin_lock(&bswspin_raw[iter]);
452 if ((bp = TAILQ_FIRST(&bswlist_raw[iter])) == NULL) {
453 spin_unlock(&bswspin_raw[iter]);
454 iter = (iter + 1) & BSWHMASK;
455 continue;
457 TAILQ_REMOVE(&bswlist_raw[iter], bp, b_freelist);
458 atomic_add_int(&pbuf_raw_count, -1);
459 if (pfreecnt)
460 atomic_add_int(pfreecnt, -1);
461 spin_unlock(&bswspin_raw[iter]);
462 initpbuf(bp);
464 return bp;
467 /* not reached */
470 struct buf *
471 getpbuf_kva(int *pfreecnt)
473 struct buf *bp;
474 int iter;
475 int loops;
477 for (;;) {
478 while (pfreecnt && *pfreecnt <= 0) {
479 tsleep_interlock(pfreecnt, 0);
480 if (atomic_fetchadd_int(pfreecnt, 0) <= 0)
481 tsleep(pfreecnt, PINTERLOCKED, "wswbuf0", 0);
483 if (pbuf_kva_count <= 0) {
484 tsleep_interlock(&pbuf_kva_count, 0);
485 if (atomic_fetchadd_int(&pbuf_kva_count, 0) <= 0)
486 tsleep(&pbuf_kva_count, PINTERLOCKED,
487 "wswbuf0", 0);
488 continue;
490 iter = mycpuid & BSWHMASK;
491 for (loops = BSWHSIZE; loops; --loops) {
492 if (TAILQ_FIRST(&bswlist_kva[iter]) == NULL) {
493 iter = (iter + 1) & BSWHMASK;
494 continue;
496 spin_lock(&bswspin_kva[iter]);
497 if ((bp = TAILQ_FIRST(&bswlist_kva[iter])) == NULL) {
498 spin_unlock(&bswspin_kva[iter]);
499 iter = (iter + 1) & BSWHMASK;
500 continue;
502 TAILQ_REMOVE(&bswlist_kva[iter], bp, b_freelist);
503 atomic_add_int(&pbuf_kva_count, -1);
504 if (pfreecnt)
505 atomic_add_int(pfreecnt, -1);
506 spin_unlock(&bswspin_kva[iter]);
507 initpbuf(bp);
509 return bp;
512 /* not reached */
516 * Allocate a pbuf with kernel memory already preallocated. Caller must
517 * not change the mapping.
519 struct buf *
520 getpbuf_mem(int *pfreecnt)
522 struct buf *bp;
523 int iter;
524 int loops;
526 for (;;) {
527 while (pfreecnt && *pfreecnt <= 0) {
528 tsleep_interlock(pfreecnt, 0);
529 if (atomic_fetchadd_int(pfreecnt, 0) <= 0)
530 tsleep(pfreecnt, PINTERLOCKED, "wswbuf0", 0);
532 if (pbuf_mem_count <= 0) {
533 tsleep_interlock(&pbuf_mem_count, 0);
534 if (atomic_fetchadd_int(&pbuf_mem_count, 0) <= 0)
535 tsleep(&pbuf_mem_count, PINTERLOCKED,
536 "wswbuf0", 0);
537 continue;
539 iter = mycpuid & BSWHMASK;
540 for (loops = BSWHSIZE; loops; --loops) {
541 if (TAILQ_FIRST(&bswlist_mem[iter]) == NULL) {
542 iter = (iter + 1) & BSWHMASK;
543 continue;
545 spin_lock(&bswspin_mem[iter]);
546 if ((bp = TAILQ_FIRST(&bswlist_mem[iter])) == NULL) {
547 spin_unlock(&bswspin_mem[iter]);
548 iter = (iter + 1) & BSWHMASK;
549 continue;
551 TAILQ_REMOVE(&bswlist_mem[iter], bp, b_freelist);
552 atomic_add_int(&pbuf_mem_count, -1);
553 if (pfreecnt)
554 atomic_add_int(pfreecnt, -1);
555 spin_unlock(&bswspin_mem[iter]);
556 initpbuf(bp);
558 return bp;
561 /* not reached */
565 * Allocate a physical buffer, if one is available.
567 * Note that there is no NULL hack here - all subsystems using this
568 * call understand how to use pfreecnt.
570 * No requirements.
572 struct buf *
573 trypbuf(int *pfreecnt) /* raw */
575 struct buf *bp;
576 int iter = mycpuid & BSWHMASK;
577 int loops;
579 for (loops = BSWHSIZE; loops; --loops) {
580 if (*pfreecnt <= 0 || TAILQ_FIRST(&bswlist_raw[iter]) == NULL) {
581 iter = (iter + 1) & BSWHMASK;
582 continue;
584 spin_lock(&bswspin_raw[iter]);
585 if (*pfreecnt <= 0 ||
586 (bp = TAILQ_FIRST(&bswlist_raw[iter])) == NULL) {
587 spin_unlock(&bswspin_raw[iter]);
588 iter = (iter + 1) & BSWHMASK;
589 continue;
591 TAILQ_REMOVE(&bswlist_raw[iter], bp, b_freelist);
592 atomic_add_int(&pbuf_raw_count, -1);
593 atomic_add_int(pfreecnt, -1);
595 spin_unlock(&bswspin_raw[iter]);
597 initpbuf(bp);
599 return bp;
601 return NULL;
604 struct buf *
605 trypbuf_kva(int *pfreecnt)
607 struct buf *bp;
608 int iter = mycpuid & BSWHMASK;
609 int loops;
611 for (loops = BSWHSIZE; loops; --loops) {
612 if (*pfreecnt <= 0 || TAILQ_FIRST(&bswlist_kva[iter]) == NULL) {
613 iter = (iter + 1) & BSWHMASK;
614 continue;
616 spin_lock(&bswspin_kva[iter]);
617 if (*pfreecnt <= 0 ||
618 (bp = TAILQ_FIRST(&bswlist_kva[iter])) == NULL) {
619 spin_unlock(&bswspin_kva[iter]);
620 iter = (iter + 1) & BSWHMASK;
621 continue;
623 TAILQ_REMOVE(&bswlist_kva[iter], bp, b_freelist);
624 atomic_add_int(&pbuf_kva_count, -1);
625 atomic_add_int(pfreecnt, -1);
627 spin_unlock(&bswspin_kva[iter]);
629 initpbuf(bp);
631 return bp;
633 return NULL;
637 * Release a physical buffer
639 * NOTE: pfreecnt can be NULL, but this 'feature' will be removed
640 * relatively soon when the rest of the subsystems get smart about it. XXX
642 * No requirements.
644 void
645 relpbuf(struct buf *bp, int *pfreecnt)
647 int wake = 0;
648 int wake_free = 0;
649 int iter = bp->b_swindex;
651 KKASSERT(bp->b_flags & B_PAGING);
652 dsched_buf_exit(bp);
654 BUF_UNLOCK(bp);
656 if (bp >= swbuf_mem && bp < &swbuf_mem[nswbuf_mem]) {
657 KKASSERT(bp->b_kvabase);
658 spin_lock(&bswspin_mem[iter]);
659 TAILQ_INSERT_HEAD(&bswlist_mem[iter], bp, b_freelist);
660 if (atomic_fetchadd_int(&pbuf_mem_count, 1) == nswbuf_mem / 4)
661 wake = 1;
662 if (pfreecnt) {
663 if (atomic_fetchadd_int(pfreecnt, 1) == 1)
664 wake_free = 1;
666 spin_unlock(&bswspin_mem[iter]);
667 if (wake)
668 wakeup(&pbuf_mem_count);
669 } else if (swbuf_kva && bp < &swbuf_kva[nswbuf_kva]) {
670 KKASSERT(bp->b_kvabase);
671 spin_lock(&bswspin_kva[iter]);
672 TAILQ_INSERT_HEAD(&bswlist_kva[iter], bp, b_freelist);
673 if (atomic_fetchadd_int(&pbuf_kva_count, 1) == nswbuf_kva / 4)
674 wake = 1;
675 if (pfreecnt) {
676 if (atomic_fetchadd_int(pfreecnt, 1) == 1)
677 wake_free = 1;
679 spin_unlock(&bswspin_kva[iter]);
680 if (wake)
681 wakeup(&pbuf_kva_count);
682 } else {
683 KKASSERT(bp->b_kvabase == NULL);
684 KKASSERT(bp >= swbuf_raw && bp < &swbuf_raw[nswbuf_raw]);
685 spin_lock(&bswspin_raw[iter]);
686 TAILQ_INSERT_HEAD(&bswlist_raw[iter], bp, b_freelist);
687 if (atomic_fetchadd_int(&pbuf_raw_count, 1) == nswbuf_raw / 4)
688 wake = 1;
689 if (pfreecnt) {
690 if (atomic_fetchadd_int(pfreecnt, 1) == 1)
691 wake_free = 1;
693 spin_unlock(&bswspin_raw[iter]);
694 if (wake)
695 wakeup(&pbuf_raw_count);
697 if (wake_free)
698 wakeup(pfreecnt);
701 void
702 pbuf_adjcount(int *pfreecnt, int n)
704 if (n) {
705 atomic_add_int(pfreecnt, n);
706 wakeup(pfreecnt);