4 * Copyright (c) 1982, 1986, 1989, 1993
5 * The Regents of the University of California. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of the University nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * @(#)vm_swap.c 8.5 (Berkeley) 2/17/94
32 * $FreeBSD: src/sys/vm/vm_swap.c,v 1.96.2.2 2001/10/14 18:46:47 iedowse Exp $
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/sysmsg.h>
43 #include <sys/nlookup.h>
44 #include <sys/sysctl.h>
45 #include <sys/dmap.h> /* XXX */
46 #include <sys/vnode.h>
47 #include <sys/fcntl.h>
48 #include <sys/blist.h>
49 #include <sys/kernel.h>
55 #include <vm/vm_extern.h>
56 #include <vm/swap_pager.h>
57 #include <vm/vm_zone.h>
58 #include <vm/vm_param.h>
60 #include <sys/mutex2.h>
61 #include <sys/spinlock2.h>
64 * Indirect driver for multi-controller paging.
70 static struct swdevt should_be_malloced
[NSWAPDEV
];
71 struct swdevt
*swdevt
= should_be_malloced
; /* exported to pstat/systat */
72 static swblk_t nswap
; /* first block after the interleaved devs */
73 static struct mtx swap_mtx
= MTX_INITIALIZER("swpmtx");
74 int nswdev
= NSWAPDEV
; /* exported to pstat/systat */
78 static int swapoff_one(int index
);
79 struct vnode
*swapdev_vp
;
82 * (struct vnode *a_vp, struct bio *b_bio)
84 * vn_strategy() for swapdev_vp. Perform swap strategy interleave device
87 * This function supports the KVABIO API. If the underlying vnode/device
88 * does not, it will make appropriate adjustments.
93 swapdev_strategy(struct vop_strategy_args
*ap
)
95 struct bio
*bio
= ap
->a_bio
;
97 struct buf
*bp
= bio
->bio_buf
;
98 swblk_t sz
, off
, seg
, blkno
, nblkno
;
101 sz
= howmany(bp
->b_bcount
, PAGE_SIZE
);
102 blkno
= (swblk_t
)(bio
->bio_offset
>> PAGE_SHIFT
);
105 * Convert interleaved swap into per-device swap. Note that
106 * the block size is left in PAGE_SIZE'd chunks (for the newswap)
109 nbio
= push_bio(bio
);
111 off
= blkno
% SWB_DMMAX
;
112 if (off
+ sz
> SWB_DMMAX
) {
113 bp
->b_error
= EINVAL
;
114 bp
->b_flags
|= B_ERROR
;
118 seg
= blkno
/ SWB_DMMAX
;
119 index
= seg
% nswdev
;
121 nbio
->bio_offset
= (off_t
)(seg
* SWB_DMMAX
+ off
) << PAGE_SHIFT
;
124 nbio
->bio_offset
= bio
->bio_offset
;
126 nblkno
= (swblk_t
)(nbio
->bio_offset
>> PAGE_SHIFT
);
128 if (nblkno
+ sz
> sp
->sw_nblks
) {
129 bp
->b_error
= EINVAL
;
130 bp
->b_flags
|= B_ERROR
;
131 /* I/O was never started on nbio, must biodone(bio) */
135 if (sp
->sw_vp
== NULL
) {
136 bp
->b_error
= ENODEV
;
137 bp
->b_flags
|= B_ERROR
;
138 /* I/O was never started on nbio, must biodone(bio) */
144 * Issue a strategy call on the appropriate swap vnode. Note that
145 * bp->b_vp is not modified. Strategy code is always supposed to
148 * We have to use vn_strategy() here even if we know we have a
149 * device in order to properly break up requests which exceed the
150 * device's DMA limits.
152 vn_strategy(sp
->sw_vp
, nbio
);
158 swapdev_inactive(struct vop_inactive_args
*ap
)
165 swapdev_reclaim(struct vop_reclaim_args
*ap
)
171 * Create a special vnode op vector for swapdev_vp - we only use
172 * vn_strategy(), everything else returns an error.
174 static struct vop_ops swapdev_vnode_vops
= {
175 .vop_default
= vop_defaultop
,
176 .vop_strategy
= swapdev_strategy
,
177 .vop_inactive
= swapdev_inactive
,
178 .vop_reclaim
= swapdev_reclaim
180 static struct vop_ops
*swapdev_vnode_vops_p
= &swapdev_vnode_vops
;
182 VNODEOP_SET(swapdev_vnode_vops
);
185 * swapon_args(char *name)
187 * System call swapon(name) enables swapping on device name,
188 * which must be in the swdevsw. Return EBUSY
189 * if already swapping on this device.
194 sys_swapon(struct sysmsg
*sysmsg
, const struct swapon_args
*uap
)
196 struct thread
*td
= curthread
;
199 struct nlookupdata nd
;
202 error
= caps_priv_check_self(SYSCAP_RESTRICTEDROOT
);
208 error
= nlookup_init(&nd
, uap
->name
, UIO_USERSPACE
, NLC_FOLLOW
);
210 error
= nlookup(&nd
);
212 error
= cache_vref(&nd
.nl_nch
, nd
.nl_cred
, &vp
);
215 mtx_unlock(&swap_mtx
);
219 if (vn_isdisk(vp
, &error
)) {
220 error
= swaponvp(td
, vp
, 0);
221 } else if (vp
->v_type
== VREG
&& vp
->v_tag
== VT_NFS
&&
222 (error
= VOP_GETATTR(vp
, &attr
)) == 0) {
224 * Allow direct swapping to NFS regular files in the same
225 * way that nfs_mountroot() sets up diskless swapping.
227 error
= swaponvp(td
, vp
, attr
.va_size
/ DEV_BSIZE
);
231 mtx_unlock(&swap_mtx
);
237 * Swfree(index) frees the index'th portion of the swap map.
238 * Each of the nswdev devices provides 1/nswdev'th of the swap
239 * space, which is laid out with blocks of SWB_DMMAX pages circularly
242 * The new swap code uses page-sized blocks. The old swap code used
243 * DEV_BSIZE'd chunks.
245 * XXX locking when multiple swapon's run in parallel
248 swaponvp(struct thread
*td
, struct vnode
*vp
, u_quad_t nblks
)
250 swblk_t aligned_nblks
;
263 lwkt_gettoken(&vm_token
); /* needed for vm_swap_size and blist */
267 * Setup swapdev_vp. We support the KVABIO API for this vnode's
271 error
= getspecialvnode(VT_NON
, NULL
, &swapdev_vnode_vops_p
,
274 panic("Cannot get vnode for swapdev");
275 swapdev_vp
->v_type
= VNON
; /* Untyped */
276 vsetflags(swapdev_vp
, VKVABIO
);
277 vx_unlock(swapdev_vp
);
280 for (sp
= swdevt
, index
= 0 ; index
< nswdev
; index
++, sp
++) {
281 if (sp
->sw_vp
== vp
) {
292 vn_lock(vp
, LK_EXCLUSIVE
| LK_RETRY
);
293 error
= VOP_OPEN(vp
, FREAD
| FWRITE
, cred
, NULL
);
299 * v_rdev is not valid until after the VOP_OPEN() call. dev_psize()
300 * must be supported if a character device has been specified.
302 if (vp
->v_type
== VCHR
)
307 if (nblks
== 0 && dev
!= NULL
) {
308 dpsize
= dev_dpsize(dev
);
310 vn_lock(vp
, LK_EXCLUSIVE
| LK_RETRY
);
311 VOP_CLOSE(vp
, FREAD
| FWRITE
, NULL
);
316 nblks
= (u_quad_t
)dpsize
;
319 vn_lock(vp
, LK_EXCLUSIVE
| LK_RETRY
);
320 VOP_CLOSE(vp
, FREAD
| FWRITE
, NULL
);
327 * nblks is in DEV_BSIZE'd chunks, convert to PAGE_SIZE'd chunks.
328 * First chop nblks off to page-align it, then convert.
330 * sw->sw_nblks is in page-sized chunks now too.
332 nblks
&= ~(u_quad_t
)(ctodb(1) - 1);
333 nblks
= dbtoc(nblks
);
336 * Post-conversion nblks must not be >= BLIST_MAXBLKS, and
337 * we impose a 4-swap-device limit so we have to divide it out
338 * further. Going beyond this will result in overflows in the
341 * Post-conversion nblks must fit within a (swblk_t), which
342 * this test also ensures.
344 if (nblks
> BLIST_MAXBLKS
/ nswdev
) {
345 kprintf("exceeded maximum of %ld blocks per swap unit\n",
346 (long)BLIST_MAXBLKS
/ nswdev
);
347 vn_lock(vp
, LK_EXCLUSIVE
| LK_RETRY
);
348 VOP_CLOSE(vp
, FREAD
| FWRITE
, NULL
);
355 sp
->sw_dev
= devid_from_dev(dev
);
357 sp
->sw_flags
= SW_FREED
;
361 * nblks, nswap, and SWB_DMMAX are PAGE_SIZE'd parameters now, not
362 * DEV_BSIZE'd. aligned_nblks is used to calculate the
363 * size of the swap bitmap, taking into account the stripe size.
365 aligned_nblks
= (swblk_t
)((nblks
+ SWB_DMMASK
) &
366 ~(u_swblk_t
)SWB_DMMASK
);
367 sp
->sw_nblks
= aligned_nblks
;
369 if (aligned_nblks
* nswdev
> nswap
)
370 nswap
= aligned_nblks
* nswdev
;
372 if (swapblist
== NULL
)
373 swapblist
= blist_create(nswap
);
375 blist_resize(&swapblist
, nswap
, 0);
377 for (dvbase
= SWB_DMMAX
; dvbase
< aligned_nblks
; dvbase
+= SWB_DMMAX
) {
378 blk
= min(aligned_nblks
- dvbase
, SWB_DMMAX
);
379 vsbase
= index
* SWB_DMMAX
+ dvbase
* nswdev
;
380 blist_free(swapblist
, vsbase
, blk
);
384 swap_pager_newswap();
387 mtx_unlock(&swap_mtx
);
388 lwkt_reltoken(&vm_token
);
393 * swapoff_args(char *name)
395 * System call swapoff(name) disables swapping on device name,
396 * which must be an active swap device. Return ENOMEM
397 * if there is not enough memory to page in the contents of
403 sys_swapoff(struct sysmsg
*sysmsg
, const struct swapoff_args
*uap
)
406 struct nlookupdata nd
;
410 error
= caps_priv_check_self(SYSCAP_RESTRICTEDROOT
);
416 error
= nlookup_init(&nd
, uap
->name
, UIO_USERSPACE
, NLC_FOLLOW
);
418 error
= nlookup(&nd
);
420 error
= cache_vref(&nd
.nl_nch
, nd
.nl_cred
, &vp
);
425 for (sp
= swdevt
, index
= 0; index
< nswdev
; index
++, sp
++) {
432 error
= swapoff_one(index
);
433 swap_pager_newswap();
436 mtx_unlock(&swap_mtx
);
441 swapoff_one(int index
)
443 swblk_t blk
, aligned_nblks
;
444 swblk_t dvbase
, vsbase
;
445 u_int pq_active_clean
, pq_inactive_clean
;
447 struct vm_page marker
;
454 aligned_nblks
= sp
->sw_nblks
;
455 pq_active_clean
= pq_inactive_clean
= 0;
458 * We can turn off this swap device safely only if the
459 * available virtual memory in the system will fit the amount
460 * of data we will have to page back in, plus an epsilon so
461 * the system doesn't become critically low on swap space.
463 for (q
= 0; q
< PQ_L2_SIZE
; ++q
) {
464 bzero(&marker
, sizeof(marker
));
465 marker
.flags
= PG_FICTITIOUS
| PG_MARKER
;
466 marker
.busy_count
= PBUSY_LOCKED
;
467 marker
.queue
= PQ_ACTIVE
+ q
;
469 marker
.wire_count
= 1;
471 vm_page_queues_spin_lock(marker
.queue
);
472 TAILQ_INSERT_HEAD(&vm_page_queues
[marker
.queue
].pl
,
475 while ((m
= TAILQ_NEXT(&marker
, pageq
)) != NULL
) {
476 TAILQ_REMOVE(&vm_page_queues
[marker
.queue
].pl
,
478 TAILQ_INSERT_AFTER(&vm_page_queues
[marker
.queue
].pl
, m
,
480 if (m
->flags
& (PG_MARKER
| PG_FICTITIOUS
))
483 if (vm_page_busy_try(m
, FALSE
) == 0) {
484 vm_page_queues_spin_unlock(marker
.queue
);
486 vm_page_test_dirty(m
);
491 vm_page_queues_spin_lock(marker
.queue
);
494 TAILQ_REMOVE(&vm_page_queues
[marker
.queue
].pl
, &marker
, pageq
);
495 vm_page_queues_spin_unlock(marker
.queue
);
497 marker
.queue
= PQ_INACTIVE
+ q
;
499 vm_page_queues_spin_lock(marker
.queue
);
500 TAILQ_INSERT_HEAD(&vm_page_queues
[marker
.queue
].pl
,
503 while ((m
= TAILQ_NEXT(&marker
, pageq
)) != NULL
) {
505 &vm_page_queues
[marker
.queue
].pl
,
508 &vm_page_queues
[marker
.queue
].pl
,
510 if (m
->flags
& (PG_MARKER
| PG_FICTITIOUS
))
513 if (vm_page_busy_try(m
, FALSE
) == 0) {
514 vm_page_queues_spin_unlock(marker
.queue
);
516 vm_page_test_dirty(m
);
521 vm_page_queues_spin_lock(marker
.queue
);
524 TAILQ_REMOVE(&vm_page_queues
[marker
.queue
].pl
,
526 vm_page_queues_spin_unlock(marker
.queue
);
529 if (vmstats
.v_free_count
+ vmstats
.v_cache_count
+ pq_active_clean
+
530 pq_inactive_clean
+ vm_swap_size
< aligned_nblks
+ nswap_lowat
) {
531 mtx_unlock(&swap_mtx
);
536 * Prevent further allocations on this device
538 sp
->sw_flags
|= SW_CLOSING
;
539 for (dvbase
= SWB_DMMAX
; dvbase
< aligned_nblks
; dvbase
+= SWB_DMMAX
) {
540 blk
= min(aligned_nblks
- dvbase
, SWB_DMMAX
);
541 vsbase
= index
* SWB_DMMAX
+ dvbase
* nswdev
;
542 vm_swap_size
-= blist_fill(swapblist
, vsbase
, blk
);
547 * Page in the contents of the device and close it.
549 if (swap_pager_swapoff(index
) && swap_pager_swapoff(index
)) {
550 mtx_unlock(&swap_mtx
);
554 vn_lock(sp
->sw_vp
, LK_EXCLUSIVE
| LK_RETRY
);
555 VOP_CLOSE(sp
->sw_vp
, FREAD
| FWRITE
, NULL
);
556 vn_unlock(sp
->sw_vp
);
558 bzero(swdevt
+ index
, sizeof(struct swdevt
));
561 * Resize the bitmap based on the nem largest swap device,
562 * or free the bitmap if there are no more devices.
564 for (sp
= swdevt
, aligned_nblks
= 0; sp
< swdevt
+ nswdev
; sp
++) {
566 aligned_nblks
= max(aligned_nblks
, sp
->sw_nblks
);
569 nswap
= aligned_nblks
* nswdev
;
572 blist_destroy(swapblist
);
577 blist_resize(&swapblist
, nswap
, 0);
580 mtx_unlock(&swap_mtx
);
585 * Account for swap space in individual swdevt's. The caller ensures
586 * that the provided range falls into a single swdevt.
589 * -count space allocated
592 swapacctspace(swblk_t base
, swblk_t count
)
597 vm_swap_size
+= count
;
598 seg
= base
/ SWB_DMMAX
;
599 index
= seg
% nswdev
;
600 swdevt
[index
].sw_nused
-= count
;
607 sysctl_vm_swap_info(SYSCTL_HANDLER_ARGS
)
615 for (n
= 0; n
< nswdev
; ++n
) {
618 xs
.xsw_size
= sizeof(xs
);
619 xs
.xsw_version
= XSWDEV_VERSION
;
620 xs
.xsw_blksize
= PAGE_SIZE
;
621 xs
.xsw_dev
= sp
->sw_dev
;
622 xs
.xsw_flags
= sp
->sw_flags
;
623 xs
.xsw_nblks
= sp
->sw_nblks
;
624 xs
.xsw_used
= sp
->sw_nused
;
626 error
= SYSCTL_OUT(req
, &xs
, sizeof(xs
));
633 SYSCTL_INT(_vm
, OID_AUTO
, nswapdev
, CTLFLAG_RD
, &nswdev
, 0,
634 "Number of swap devices");
635 SYSCTL_NODE(_vm
, OID_AUTO
, swap_info_array
, CTLFLAG_RD
, sysctl_vm_swap_info
,
636 "Swap statistics by device");