4 * Copyright (c) 1982, 1986, 1989, 1993
5 * The Regents of the University of California. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by the University of
18 * California, Berkeley and its contributors.
19 * 4. Neither the name of the University nor the names of its contributors
20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission.
23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * @(#)vm_swap.c 8.5 (Berkeley) 2/17/94
36 * $FreeBSD: src/sys/vm/vm_swap.c,v 1.96.2.2 2001/10/14 18:46:47 iedowse Exp $
37 * $DragonFly: src/sys/vm/vm_swap.c,v 1.36 2007/07/20 17:21:54 dillon Exp $
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/sysproto.h>
48 #include <sys/nlookup.h>
49 #include <sys/sysctl.h>
50 #include <sys/dmap.h> /* XXX */
51 #include <sys/vnode.h>
52 #include <sys/fcntl.h>
53 #include <sys/blist.h>
54 #include <sys/kernel.h>
60 #include <vm/vm_extern.h>
61 #include <vm/swap_pager.h>
62 #include <vm/vm_zone.h>
63 #include <vm/vm_param.h>
65 #include <sys/thread2.h>
66 #include <sys/mplock2.h>
67 #include <sys/mutex2.h>
70 * Indirect driver for multi-controller paging.
76 static struct swdevt should_be_malloced
[NSWAPDEV
];
77 struct swdevt
*swdevt
= should_be_malloced
; /* exported to pstat/systat */
78 static swblk_t nswap
; /* first block after the interleaved devs */
79 static struct mtx swap_mtx
= MTX_INITIALIZER
;
80 int nswdev
= NSWAPDEV
; /* exported to pstat/systat */
84 static int swapoff_one (int index
);
85 struct vnode
*swapdev_vp
;
88 * (struct vnode *a_vp, struct bio *b_bio)
90 * vn_strategy() for swapdev_vp. Perform swap strategy interleave device
96 swapdev_strategy(struct vop_strategy_args
*ap
)
98 struct bio
*bio
= ap
->a_bio
;
100 struct buf
*bp
= bio
->bio_buf
;
101 int sz
, off
, seg
, index
, blkno
, nblkno
;
106 sz
= howmany(bp
->b_bcount
, PAGE_SIZE
);
107 blkno
= (int)(bio
->bio_offset
>> PAGE_SHIFT
);
110 * Convert interleaved swap into per-device swap. Note that
111 * the block size is left in PAGE_SIZE'd chunks (for the newswap)
114 nbio
= push_bio(bio
);
117 if (off
+ sz
> dmmax
) {
118 bp
->b_error
= EINVAL
;
119 bp
->b_flags
|= B_ERROR
;
124 index
= seg
% nswdev
;
126 nbio
->bio_offset
= (off_t
)(seg
* dmmax
+ off
) << PAGE_SHIFT
;
129 nbio
->bio_offset
= bio
->bio_offset
;
131 nblkno
= (int)(nbio
->bio_offset
>> PAGE_SHIFT
);
133 if (nblkno
+ sz
> sp
->sw_nblks
) {
134 bp
->b_error
= EINVAL
;
135 bp
->b_flags
|= B_ERROR
;
136 /* I/O was never started on nbio, must biodone(bio) */
140 if (sp
->sw_vp
== NULL
) {
141 bp
->b_error
= ENODEV
;
142 bp
->b_flags
|= B_ERROR
;
143 /* I/O was never started on nbio, must biodone(bio) */
149 * Issue a strategy call on the appropriate swap vnode. Note that
150 * bp->b_vp is not modified. Strategy code is always supposed to
153 * We have to use vn_strategy() here even if we know we have a
154 * device in order to properly break up requests which exceed the
155 * device's DMA limits.
157 vn_strategy(sp
->sw_vp
, nbio
);
162 swapdev_inactive(struct vop_inactive_args
*ap
)
169 swapdev_reclaim(struct vop_reclaim_args
*ap
)
175 * Create a special vnode op vector for swapdev_vp - we only use
176 * vn_strategy(), everything else returns an error.
178 static struct vop_ops swapdev_vnode_vops
= {
179 .vop_default
= vop_defaultop
,
180 .vop_strategy
= swapdev_strategy
,
181 .vop_inactive
= swapdev_inactive
,
182 .vop_reclaim
= swapdev_reclaim
184 static struct vop_ops
*swapdev_vnode_vops_p
= &swapdev_vnode_vops
;
186 VNODEOP_SET(swapdev_vnode_vops
);
189 * swapon_args(char *name)
191 * System call swapon(name) enables swapping on device name,
192 * which must be in the swdevsw. Return EBUSY
193 * if already swapping on this device.
198 sys_swapon(struct swapon_args
*uap
)
200 struct thread
*td
= curthread
;
203 struct nlookupdata nd
;
209 error
= priv_check(td
, PRIV_ROOT
);
216 error
= nlookup_init(&nd
, uap
->name
, UIO_USERSPACE
, NLC_FOLLOW
);
218 error
= nlookup(&nd
);
220 error
= cache_vref(&nd
.nl_nch
, nd
.nl_cred
, &vp
);
224 mtx_unlock(&swap_mtx
);
228 if (vn_isdisk(vp
, &error
)) {
229 error
= swaponvp(td
, vp
, 0);
230 } else if (vp
->v_type
== VREG
&& vp
->v_tag
== VT_NFS
&&
231 (error
= VOP_GETATTR(vp
, &attr
)) == 0) {
233 * Allow direct swapping to NFS regular files in the same
234 * way that nfs_mountroot() sets up diskless swapping.
236 error
= swaponvp(td
, vp
, attr
.va_size
/ DEV_BSIZE
);
241 mtx_unlock(&swap_mtx
);
247 * Swfree(index) frees the index'th portion of the swap map.
248 * Each of the nswdev devices provides 1/nswdev'th of the swap
249 * space, which is laid out with blocks of dmmax pages circularly
252 * The new swap code uses page-sized blocks. The old swap code used
253 * DEV_BSIZE'd chunks.
255 * XXX locking when multiple swapon's run in parallel
258 swaponvp(struct thread
*td
, struct vnode
*vp
, u_quad_t nblks
)
260 swblk_t aligned_nblks
;
276 error
= getspecialvnode(VT_NON
, NULL
, &swapdev_vnode_vops_p
,
279 panic("Cannot get vnode for swapdev");
280 swapdev_vp
->v_type
= VNON
; /* Untyped */
281 vx_unlock(swapdev_vp
);
284 for (sp
= swdevt
, index
= 0 ; index
< nswdev
; index
++, sp
++) {
285 if (sp
->sw_vp
== vp
) {
286 mtx_unlock(&swap_mtx
);
293 mtx_unlock(&swap_mtx
);
296 vn_lock(vp
, LK_EXCLUSIVE
| LK_RETRY
);
297 error
= VOP_OPEN(vp
, FREAD
| FWRITE
, cred
, NULL
);
300 mtx_unlock(&swap_mtx
);
305 * v_rdev is not valid until after the VOP_OPEN() call. dev_psize()
306 * must be supported if a character device has been specified.
308 if (vp
->v_type
== VCHR
)
313 if (nblks
== 0 && dev
!= NULL
) {
314 dpsize
= dev_dpsize(dev
);
316 VOP_CLOSE(vp
, FREAD
| FWRITE
);
317 mtx_unlock(&swap_mtx
);
320 nblks
= (u_quad_t
)dpsize
;
323 VOP_CLOSE(vp
, FREAD
| FWRITE
);
324 mtx_unlock(&swap_mtx
);
329 * nblks is in DEV_BSIZE'd chunks, convert to PAGE_SIZE'd chunks.
330 * First chop nblks off to page-align it, then convert.
332 * sw->sw_nblks is in page-sized chunks now too.
334 nblks
&= ~(u_quad_t
)(ctodb(1) - 1);
335 nblks
= dbtoc(nblks
);
338 * Post-conversion nblks must not be >= BLIST_MAXBLKS, and
339 * we impose a 4-swap-device limit so we have to divide it out
340 * further. Going beyond this will result in overflows in the
343 * Post-conversion nblks must fit within a (swblk_t), which
344 * this test also ensures.
346 if (nblks
> BLIST_MAXBLKS
/ nswdev
) {
347 kprintf("exceeded maximum of %d blocks per swap unit\n",
348 (int)BLIST_MAXBLKS
/ nswdev
);
349 VOP_CLOSE(vp
, FREAD
| FWRITE
);
350 mtx_unlock(&swap_mtx
);
355 sp
->sw_dev
= dev2udev(dev
);
357 sp
->sw_flags
= SW_FREED
;
361 * nblks, nswap, and dmmax are PAGE_SIZE'd parameters now, not
362 * DEV_BSIZE'd. aligned_nblks is used to calculate the
363 * size of the swap bitmap, taking into account the stripe size.
365 aligned_nblks
= (swblk_t
)((nblks
+ (dmmax
- 1)) & ~(u_long
)(dmmax
- 1));
366 sp
->sw_nblks
= aligned_nblks
;
368 if (aligned_nblks
* nswdev
> nswap
)
369 nswap
= aligned_nblks
* nswdev
;
371 if (swapblist
== NULL
)
372 swapblist
= blist_create(nswap
);
374 blist_resize(&swapblist
, nswap
, 0);
376 for (dvbase
= dmmax
; dvbase
< aligned_nblks
; dvbase
+= dmmax
) {
377 blk
= min(aligned_nblks
- dvbase
, dmmax
);
378 vsbase
= index
* dmmax
+ dvbase
* nswdev
;
379 blist_free(swapblist
, vsbase
, blk
);
383 swap_pager_newswap();
385 mtx_unlock(&swap_mtx
);
390 * swapoff_args(char *name)
392 * System call swapoff(name) disables swapping on device name,
393 * which must be an active swap device. Return ENOMEM
394 * if there is not enough memory to page in the contents of
400 sys_swapoff(struct swapoff_args
*uap
)
403 struct nlookupdata nd
;
407 error
= priv_check(curthread
, PRIV_ROOT
);
414 error
= nlookup_init(&nd
, uap
->name
, UIO_USERSPACE
, NLC_FOLLOW
);
416 error
= nlookup(&nd
);
418 error
= cache_vref(&nd
.nl_nch
, nd
.nl_cred
, &vp
);
423 for (sp
= swdevt
, index
= 0; index
< nswdev
; index
++, sp
++) {
430 error
= swapoff_one(index
);
434 mtx_unlock(&swap_mtx
);
439 swapoff_one(int index
)
441 swblk_t blk
, aligned_nblks
;
442 swblk_t dvbase
, vsbase
;
443 u_int pq_active_clean
, pq_inactive_clean
;
450 aligned_nblks
= sp
->sw_nblks
;
451 pq_active_clean
= pq_inactive_clean
= 0;
454 * We can turn off this swap device safely only if the
455 * available virtual memory in the system will fit the amount
456 * of data we will have to page back in, plus an epsilon so
457 * the system doesn't become critically low on swap space.
459 lwkt_gettoken(&vm_token
);
460 TAILQ_FOREACH(m
, &vm_page_queues
[PQ_ACTIVE
].pl
, pageq
) {
461 if (m
->flags
& (PG_MARKER
| PG_FICTITIOUS
))
465 vm_page_test_dirty(m
);
470 TAILQ_FOREACH(m
, &vm_page_queues
[PQ_INACTIVE
].pl
, pageq
) {
471 if (m
->flags
& (PG_MARKER
| PG_FICTITIOUS
))
475 vm_page_test_dirty(m
);
480 lwkt_reltoken(&vm_token
);
482 if (vmstats
.v_free_count
+ vmstats
.v_cache_count
+ pq_active_clean
+
483 pq_inactive_clean
+ vm_swap_size
< aligned_nblks
+ nswap_lowat
) {
484 mtx_unlock(&swap_mtx
);
489 * Prevent further allocations on this device
491 sp
->sw_flags
|= SW_CLOSING
;
492 for (dvbase
= dmmax
; dvbase
< aligned_nblks
; dvbase
+= dmmax
) {
493 blk
= min(aligned_nblks
- dvbase
, dmmax
);
494 vsbase
= index
* dmmax
+ dvbase
* nswdev
;
495 vm_swap_size
-= blist_fill(swapblist
, vsbase
, blk
);
500 * Page in the contents of the device and close it.
502 if (swap_pager_swapoff(index
)) {
503 mtx_unlock(&swap_mtx
);
507 VOP_CLOSE(sp
->sw_vp
, FREAD
| FWRITE
);
509 bzero(swdevt
+ index
, sizeof(struct swdevt
));
512 * Resize the bitmap based on the nem largest swap device,
513 * or free the bitmap if there are no more devices.
515 for (sp
= swdevt
, aligned_nblks
= 0; sp
< swdevt
+ nswdev
; sp
++) {
517 aligned_nblks
= max(aligned_nblks
, sp
->sw_nblks
);
520 nswap
= aligned_nblks
* nswdev
;
523 blist_destroy(swapblist
);
528 blist_resize(&swapblist
, nswap
, 0);
531 mtx_unlock(&swap_mtx
);
536 * Account for swap space in individual swdevt's. The caller ensures
537 * that the provided range falls into a single swdevt.
540 * -count space allocated
543 swapacctspace(swblk_t base
, swblk_t count
)
548 vm_swap_size
+= count
;
550 index
= seg
% nswdev
;
551 swdevt
[index
].sw_nused
-= count
;
558 sysctl_vm_swap_info(SYSCTL_HANDLER_ARGS
)
566 for (n
= 0; n
< nswdev
; ++n
) {
569 xs
.xsw_size
= sizeof(xs
);
570 xs
.xsw_version
= XSWDEV_VERSION
;
571 xs
.xsw_blksize
= PAGE_SIZE
;
572 xs
.xsw_dev
= sp
->sw_dev
;
573 xs
.xsw_flags
= sp
->sw_flags
;
574 xs
.xsw_nblks
= sp
->sw_nblks
;
575 xs
.xsw_used
= sp
->sw_nused
;
577 error
= SYSCTL_OUT(req
, &xs
, sizeof(xs
));
584 SYSCTL_INT(_vm
, OID_AUTO
, nswapdev
, CTLFLAG_RD
, &nswdev
, 0,
585 "Number of swap devices");
586 SYSCTL_NODE(_vm
, OID_AUTO
, swap_info_array
, CTLFLAG_RD
, sysctl_vm_swap_info
,
587 "Swap statistics by device");