sbin/hammer: Rename hammer_parsedevs() to hammer_parse_blkdevs()
[dragonfly.git] / sys / vm / vm_swap.c
blob7fde018591a6b1dc77ceacb1706e95c3b4774b8d
1 /*
2 * (MPSAFE)
4 * Copyright (c) 1982, 1986, 1989, 1993
5 * The Regents of the University of California. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of the University nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
31 * @(#)vm_swap.c 8.5 (Berkeley) 2/17/94
32 * $FreeBSD: src/sys/vm/vm_swap.c,v 1.96.2.2 2001/10/14 18:46:47 iedowse Exp $
35 #include "opt_swap.h"
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/sysproto.h>
40 #include <sys/buf.h>
41 #include <sys/proc.h>
42 #include <sys/priv.h>
43 #include <sys/nlookup.h>
44 #include <sys/sysctl.h>
45 #include <sys/dmap.h> /* XXX */
46 #include <sys/vnode.h>
47 #include <sys/fcntl.h>
48 #include <sys/blist.h>
49 #include <sys/kernel.h>
50 #include <sys/lock.h>
51 #include <sys/conf.h>
52 #include <sys/stat.h>
54 #include <vm/vm.h>
55 #include <vm/vm_extern.h>
56 #include <vm/swap_pager.h>
57 #include <vm/vm_zone.h>
58 #include <vm/vm_param.h>
60 #include <sys/thread2.h>
61 #include <sys/mutex2.h>
62 #include <sys/spinlock2.h>
65 * Indirect driver for multi-controller paging.
68 #ifndef NSWAPDEV
69 #define NSWAPDEV 4
70 #endif
71 static struct swdevt should_be_malloced[NSWAPDEV];
72 struct swdevt *swdevt = should_be_malloced; /* exported to pstat/systat */
73 static swblk_t nswap; /* first block after the interleaved devs */
74 static struct mtx swap_mtx = MTX_INITIALIZER("swpmtx");
75 int nswdev = NSWAPDEV; /* exported to pstat/systat */
76 swblk_t vm_swap_size;
77 swblk_t vm_swap_max;
79 static int swapoff_one(int index);
80 struct vnode *swapdev_vp;
83 * (struct vnode *a_vp, struct bio *b_bio)
85 * vn_strategy() for swapdev_vp. Perform swap strategy interleave device
86 * selection.
88 * No requirements.
90 static int
91 swapdev_strategy(struct vop_strategy_args *ap)
93 struct bio *bio = ap->a_bio;
94 struct bio *nbio;
95 struct buf *bp = bio->bio_buf;
96 swblk_t sz, off, seg, blkno, nblkno;
97 int index;
98 struct swdevt *sp;
99 sz = howmany(bp->b_bcount, PAGE_SIZE);
100 blkno = (swblk_t)(bio->bio_offset >> PAGE_SHIFT);
103 * Convert interleaved swap into per-device swap. Note that
104 * the block size is left in PAGE_SIZE'd chunks (for the newswap)
105 * here.
107 nbio = push_bio(bio);
108 if (nswdev > 1) {
109 off = blkno % SWB_DMMAX;
110 if (off + sz > SWB_DMMAX) {
111 bp->b_error = EINVAL;
112 bp->b_flags |= B_ERROR;
113 biodone(bio);
114 return 0;
116 seg = blkno / SWB_DMMAX;
117 index = seg % nswdev;
118 seg /= nswdev;
119 nbio->bio_offset = (off_t)(seg * SWB_DMMAX + off) << PAGE_SHIFT;
120 } else {
121 index = 0;
122 nbio->bio_offset = bio->bio_offset;
124 nblkno = (swblk_t)(nbio->bio_offset >> PAGE_SHIFT);
125 sp = &swdevt[index];
126 if (nblkno + sz > sp->sw_nblks) {
127 bp->b_error = EINVAL;
128 bp->b_flags |= B_ERROR;
129 /* I/O was never started on nbio, must biodone(bio) */
130 biodone(bio);
131 return 0;
133 if (sp->sw_vp == NULL) {
134 bp->b_error = ENODEV;
135 bp->b_flags |= B_ERROR;
136 /* I/O was never started on nbio, must biodone(bio) */
137 biodone(bio);
138 return 0;
142 * Issue a strategy call on the appropriate swap vnode. Note that
143 * bp->b_vp is not modified. Strategy code is always supposed to
144 * use the passed vp.
146 * We have to use vn_strategy() here even if we know we have a
147 * device in order to properly break up requests which exceed the
148 * device's DMA limits.
150 vn_strategy(sp->sw_vp, nbio);
151 return 0;
154 static int
155 swapdev_inactive(struct vop_inactive_args *ap)
157 vrecycle(ap->a_vp);
158 return(0);
161 static int
162 swapdev_reclaim(struct vop_reclaim_args *ap)
164 return(0);
168 * Create a special vnode op vector for swapdev_vp - we only use
169 * vn_strategy(), everything else returns an error.
171 static struct vop_ops swapdev_vnode_vops = {
172 .vop_default = vop_defaultop,
173 .vop_strategy = swapdev_strategy,
174 .vop_inactive = swapdev_inactive,
175 .vop_reclaim = swapdev_reclaim
177 static struct vop_ops *swapdev_vnode_vops_p = &swapdev_vnode_vops;
179 VNODEOP_SET(swapdev_vnode_vops);
182 * swapon_args(char *name)
184 * System call swapon(name) enables swapping on device name,
185 * which must be in the swdevsw. Return EBUSY
186 * if already swapping on this device.
188 * No requirements.
191 sys_swapon(struct swapon_args *uap)
193 struct thread *td = curthread;
194 struct vattr attr;
195 struct vnode *vp;
196 struct nlookupdata nd;
197 int error;
199 error = priv_check(td, PRIV_ROOT);
200 if (error)
201 return (error);
203 mtx_lock(&swap_mtx);
204 vp = NULL;
205 error = nlookup_init(&nd, uap->name, UIO_USERSPACE, NLC_FOLLOW);
206 if (error == 0)
207 error = nlookup(&nd);
208 if (error == 0)
209 error = cache_vref(&nd.nl_nch, nd.nl_cred, &vp);
210 nlookup_done(&nd);
211 if (error) {
212 mtx_unlock(&swap_mtx);
213 return (error);
216 if (vn_isdisk(vp, &error)) {
217 error = swaponvp(td, vp, 0);
218 } else if (vp->v_type == VREG && vp->v_tag == VT_NFS &&
219 (error = VOP_GETATTR(vp, &attr)) == 0) {
221 * Allow direct swapping to NFS regular files in the same
222 * way that nfs_mountroot() sets up diskless swapping.
224 error = swaponvp(td, vp, attr.va_size / DEV_BSIZE);
226 if (error)
227 vrele(vp);
228 mtx_unlock(&swap_mtx);
230 return (error);
234 * Swfree(index) frees the index'th portion of the swap map.
235 * Each of the nswdev devices provides 1/nswdev'th of the swap
236 * space, which is laid out with blocks of SWB_DMMAX pages circularly
237 * among the devices.
239 * The new swap code uses page-sized blocks. The old swap code used
240 * DEV_BSIZE'd chunks.
242 * XXX locking when multiple swapon's run in parallel
245 swaponvp(struct thread *td, struct vnode *vp, u_quad_t nblks)
247 swblk_t aligned_nblks;
248 int64_t dpsize;
249 struct ucred *cred;
250 struct swdevt *sp;
251 swblk_t vsbase;
252 swblk_t dvbase;
253 cdev_t dev;
254 int index;
255 int error;
256 swblk_t blk;
258 cred = td->td_ucred;
260 lwkt_gettoken(&vm_token); /* needed for vm_swap_size and blist */
261 mtx_lock(&swap_mtx);
263 if (!swapdev_vp) {
264 error = getspecialvnode(VT_NON, NULL, &swapdev_vnode_vops_p,
265 &swapdev_vp, 0, 0);
266 if (error)
267 panic("Cannot get vnode for swapdev");
268 swapdev_vp->v_type = VNON; /* Untyped */
269 vx_unlock(swapdev_vp);
272 for (sp = swdevt, index = 0 ; index < nswdev; index++, sp++) {
273 if (sp->sw_vp == vp) {
274 error = EBUSY;
275 goto done;
277 if (!sp->sw_vp)
278 goto found;
281 error = EINVAL;
282 goto done;
283 found:
284 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
285 error = VOP_OPEN(vp, FREAD | FWRITE, cred, NULL);
286 vn_unlock(vp);
287 if (error)
288 goto done;
291 * v_rdev is not valid until after the VOP_OPEN() call. dev_psize()
292 * must be supported if a character device has been specified.
294 if (vp->v_type == VCHR)
295 dev = vp->v_rdev;
296 else
297 dev = NULL;
299 if (nblks == 0 && dev != NULL) {
300 dpsize = dev_dpsize(dev);
301 if (dpsize == -1) {
302 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
303 VOP_CLOSE(vp, FREAD | FWRITE, NULL);
304 vn_unlock(vp);
305 error = ENXIO;
306 goto done;
308 nblks = (u_quad_t)dpsize;
310 if (nblks == 0) {
311 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
312 VOP_CLOSE(vp, FREAD | FWRITE, NULL);
313 vn_unlock(vp);
314 error = ENXIO;
315 goto done;
319 * nblks is in DEV_BSIZE'd chunks, convert to PAGE_SIZE'd chunks.
320 * First chop nblks off to page-align it, then convert.
322 * sw->sw_nblks is in page-sized chunks now too.
324 nblks &= ~(u_quad_t)(ctodb(1) - 1);
325 nblks = dbtoc(nblks);
328 * Post-conversion nblks must not be >= BLIST_MAXBLKS, and
329 * we impose a 4-swap-device limit so we have to divide it out
330 * further. Going beyond this will result in overflows in the
331 * blist code.
333 * Post-conversion nblks must fit within a (swblk_t), which
334 * this test also ensures.
336 if (nblks > BLIST_MAXBLKS / nswdev) {
337 kprintf("exceeded maximum of %ld blocks per swap unit\n",
338 (long)BLIST_MAXBLKS / nswdev);
339 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
340 VOP_CLOSE(vp, FREAD | FWRITE, NULL);
341 vn_unlock(vp);
342 error = ENXIO;
343 goto done;
346 sp->sw_vp = vp;
347 sp->sw_dev = dev2udev(dev);
348 sp->sw_device = dev;
349 sp->sw_flags = SW_FREED;
350 sp->sw_nused = 0;
353 * nblks, nswap, and SWB_DMMAX are PAGE_SIZE'd parameters now, not
354 * DEV_BSIZE'd. aligned_nblks is used to calculate the
355 * size of the swap bitmap, taking into account the stripe size.
357 aligned_nblks = (swblk_t)((nblks + SWB_DMMASK) &
358 ~(u_swblk_t)SWB_DMMASK);
359 sp->sw_nblks = aligned_nblks;
361 if (aligned_nblks * nswdev > nswap)
362 nswap = aligned_nblks * nswdev;
364 if (swapblist == NULL)
365 swapblist = blist_create(nswap);
366 else
367 blist_resize(&swapblist, nswap, 0);
369 for (dvbase = SWB_DMMAX; dvbase < aligned_nblks; dvbase += SWB_DMMAX) {
370 blk = min(aligned_nblks - dvbase, SWB_DMMAX);
371 vsbase = index * SWB_DMMAX + dvbase * nswdev;
372 blist_free(swapblist, vsbase, blk);
373 vm_swap_size += blk;
374 vm_swap_max += blk;
376 swap_pager_newswap();
377 error = 0;
378 done:
379 mtx_unlock(&swap_mtx);
380 lwkt_reltoken(&vm_token);
381 return (error);
385 * swapoff_args(char *name)
387 * System call swapoff(name) disables swapping on device name,
388 * which must be an active swap device. Return ENOMEM
389 * if there is not enough memory to page in the contents of
390 * the given device.
392 * No requirements.
395 sys_swapoff(struct swapoff_args *uap)
397 struct vnode *vp;
398 struct nlookupdata nd;
399 struct swdevt *sp;
400 int error, index;
402 error = priv_check(curthread, PRIV_ROOT);
403 if (error)
404 return (error);
406 mtx_lock(&swap_mtx);
407 vp = NULL;
408 error = nlookup_init(&nd, uap->name, UIO_USERSPACE, NLC_FOLLOW);
409 if (error == 0)
410 error = nlookup(&nd);
411 if (error == 0)
412 error = cache_vref(&nd.nl_nch, nd.nl_cred, &vp);
413 nlookup_done(&nd);
414 if (error)
415 goto done;
417 for (sp = swdevt, index = 0; index < nswdev; index++, sp++) {
418 if (sp->sw_vp == vp)
419 goto found;
421 error = EINVAL;
422 goto done;
423 found:
424 error = swapoff_one(index);
425 swap_pager_newswap();
427 done:
428 mtx_unlock(&swap_mtx);
429 return (error);
432 static int
433 swapoff_one(int index)
435 swblk_t blk, aligned_nblks;
436 swblk_t dvbase, vsbase;
437 u_int pq_active_clean, pq_inactive_clean;
438 struct swdevt *sp;
439 struct vm_page marker;
440 vm_page_t m;
441 int q;
443 mtx_lock(&swap_mtx);
445 sp = &swdevt[index];
446 aligned_nblks = sp->sw_nblks;
447 pq_active_clean = pq_inactive_clean = 0;
450 * We can turn off this swap device safely only if the
451 * available virtual memory in the system will fit the amount
452 * of data we will have to page back in, plus an epsilon so
453 * the system doesn't become critically low on swap space.
455 for (q = 0; q < PQ_L2_SIZE; ++q) {
456 bzero(&marker, sizeof(marker));
457 marker.flags = PG_BUSY | PG_FICTITIOUS | PG_MARKER;
458 marker.queue = PQ_ACTIVE + q;
459 marker.pc = q;
460 marker.wire_count = 1;
462 vm_page_queues_spin_lock(marker.queue);
463 TAILQ_INSERT_HEAD(&vm_page_queues[marker.queue].pl,
464 &marker, pageq);
466 while ((m = TAILQ_NEXT(&marker, pageq)) != NULL) {
467 TAILQ_REMOVE(&vm_page_queues[marker.queue].pl,
468 &marker, pageq);
469 TAILQ_INSERT_AFTER(&vm_page_queues[marker.queue].pl, m,
470 &marker, pageq);
471 if (m->flags & (PG_MARKER | PG_FICTITIOUS))
472 continue;
474 if (vm_page_busy_try(m, FALSE) == 0) {
475 vm_page_queues_spin_unlock(marker.queue);
476 if (m->dirty == 0) {
477 vm_page_test_dirty(m);
478 if (m->dirty == 0)
479 ++pq_active_clean;
481 vm_page_wakeup(m);
482 vm_page_queues_spin_lock(marker.queue);
485 TAILQ_REMOVE(&vm_page_queues[marker.queue].pl, &marker, pageq);
486 vm_page_queues_spin_unlock(marker.queue);
488 marker.queue = PQ_INACTIVE + q;
489 marker.pc = q;
490 vm_page_queues_spin_lock(marker.queue);
491 TAILQ_INSERT_HEAD(&vm_page_queues[marker.queue].pl,
492 &marker, pageq);
494 while ((m = TAILQ_NEXT(&marker, pageq)) != NULL) {
495 TAILQ_REMOVE(
496 &vm_page_queues[marker.queue].pl,
497 &marker, pageq);
498 TAILQ_INSERT_AFTER(
499 &vm_page_queues[marker.queue].pl,
500 m, &marker, pageq);
501 if (m->flags & (PG_MARKER | PG_FICTITIOUS))
502 continue;
504 if (vm_page_busy_try(m, FALSE) == 0) {
505 vm_page_queues_spin_unlock(marker.queue);
506 if (m->dirty == 0) {
507 vm_page_test_dirty(m);
508 if (m->dirty == 0)
509 ++pq_inactive_clean;
511 vm_page_wakeup(m);
512 vm_page_queues_spin_lock(marker.queue);
515 TAILQ_REMOVE(&vm_page_queues[marker.queue].pl,
516 &marker, pageq);
517 vm_page_queues_spin_unlock(marker.queue);
520 if (vmstats.v_free_count + vmstats.v_cache_count + pq_active_clean +
521 pq_inactive_clean + vm_swap_size < aligned_nblks + nswap_lowat) {
522 mtx_unlock(&swap_mtx);
523 return (ENOMEM);
527 * Prevent further allocations on this device
529 sp->sw_flags |= SW_CLOSING;
530 for (dvbase = SWB_DMMAX; dvbase < aligned_nblks; dvbase += SWB_DMMAX) {
531 blk = min(aligned_nblks - dvbase, SWB_DMMAX);
532 vsbase = index * SWB_DMMAX + dvbase * nswdev;
533 vm_swap_size -= blist_fill(swapblist, vsbase, blk);
534 vm_swap_max -= blk;
538 * Page in the contents of the device and close it.
540 if (swap_pager_swapoff(index) && swap_pager_swapoff(index)) {
541 mtx_unlock(&swap_mtx);
542 return (EINTR);
545 vn_lock(sp->sw_vp, LK_EXCLUSIVE | LK_RETRY);
546 VOP_CLOSE(sp->sw_vp, FREAD | FWRITE, NULL);
547 vn_unlock(sp->sw_vp);
548 vrele(sp->sw_vp);
549 bzero(swdevt + index, sizeof(struct swdevt));
552 * Resize the bitmap based on the nem largest swap device,
553 * or free the bitmap if there are no more devices.
555 for (sp = swdevt, aligned_nblks = 0; sp < swdevt + nswdev; sp++) {
556 if (sp->sw_vp)
557 aligned_nblks = max(aligned_nblks, sp->sw_nblks);
560 nswap = aligned_nblks * nswdev;
562 if (nswap == 0) {
563 blist_destroy(swapblist);
564 swapblist = NULL;
565 vrele(swapdev_vp);
566 swapdev_vp = NULL;
567 } else {
568 blist_resize(&swapblist, nswap, 0);
571 mtx_unlock(&swap_mtx);
572 return (0);
576 * Account for swap space in individual swdevt's. The caller ensures
577 * that the provided range falls into a single swdevt.
579 * +count space freed
580 * -count space allocated
582 void
583 swapacctspace(swblk_t base, swblk_t count)
585 int index;
586 swblk_t seg;
588 vm_swap_size += count;
589 seg = base / SWB_DMMAX;
590 index = seg % nswdev;
591 swdevt[index].sw_nused -= count;
595 * Retrieve swap info
597 static int
598 sysctl_vm_swap_info(SYSCTL_HANDLER_ARGS)
600 struct xswdev xs;
601 struct swdevt *sp;
602 int error;
603 int n;
605 error = 0;
606 for (n = 0; n < nswdev; ++n) {
607 sp = &swdevt[n];
609 xs.xsw_size = sizeof(xs);
610 xs.xsw_version = XSWDEV_VERSION;
611 xs.xsw_blksize = PAGE_SIZE;
612 xs.xsw_dev = sp->sw_dev;
613 xs.xsw_flags = sp->sw_flags;
614 xs.xsw_nblks = sp->sw_nblks;
615 xs.xsw_used = sp->sw_nused;
617 error = SYSCTL_OUT(req, &xs, sizeof(xs));
618 if (error)
619 break;
621 return (error);
624 SYSCTL_INT(_vm, OID_AUTO, nswapdev, CTLFLAG_RD, &nswdev, 0,
625 "Number of swap devices");
626 SYSCTL_NODE(_vm, OID_AUTO, swap_info_array, CTLFLAG_RD, sysctl_vm_swap_info,
627 "Swap statistics by device");