kernel: Mark two more functions __printflike.
[dragonfly.git] / sys / vm / vm_swap.c
blob2a0a64ffa7f415a01df675ce4c338ac7fbf853a7
1 /*
2 * (MPSAFE)
4 * Copyright (c) 1982, 1986, 1989, 1993
5 * The Regents of the University of California. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by the University of
18 * California, Berkeley and its contributors.
19 * 4. Neither the name of the University nor the names of its contributors
20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission.
23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
35 * @(#)vm_swap.c 8.5 (Berkeley) 2/17/94
36 * $FreeBSD: src/sys/vm/vm_swap.c,v 1.96.2.2 2001/10/14 18:46:47 iedowse Exp $
39 #include "opt_swap.h"
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/sysproto.h>
44 #include <sys/buf.h>
45 #include <sys/proc.h>
46 #include <sys/priv.h>
47 #include <sys/nlookup.h>
48 #include <sys/sysctl.h>
49 #include <sys/dmap.h> /* XXX */
50 #include <sys/vnode.h>
51 #include <sys/fcntl.h>
52 #include <sys/blist.h>
53 #include <sys/kernel.h>
54 #include <sys/lock.h>
55 #include <sys/conf.h>
56 #include <sys/stat.h>
58 #include <vm/vm.h>
59 #include <vm/vm_extern.h>
60 #include <vm/swap_pager.h>
61 #include <vm/vm_zone.h>
62 #include <vm/vm_param.h>
64 #include <sys/thread2.h>
65 #include <sys/mplock2.h>
66 #include <sys/mutex2.h>
67 #include <sys/spinlock2.h>
70 * Indirect driver for multi-controller paging.
73 #ifndef NSWAPDEV
74 #define NSWAPDEV 4
75 #endif
76 static struct swdevt should_be_malloced[NSWAPDEV];
77 struct swdevt *swdevt = should_be_malloced; /* exported to pstat/systat */
78 static swblk_t nswap; /* first block after the interleaved devs */
79 static struct mtx swap_mtx = MTX_INITIALIZER;
80 int nswdev = NSWAPDEV; /* exported to pstat/systat */
81 int vm_swap_size;
82 int vm_swap_max;
84 static int swapoff_one(int index);
85 struct vnode *swapdev_vp;
88 * (struct vnode *a_vp, struct bio *b_bio)
90 * vn_strategy() for swapdev_vp. Perform swap strategy interleave device
91 * selection.
93 * No requirements.
95 static int
96 swapdev_strategy(struct vop_strategy_args *ap)
98 struct bio *bio = ap->a_bio;
99 struct bio *nbio;
100 struct buf *bp = bio->bio_buf;
101 int sz, off, seg, index, blkno, nblkno;
102 struct swdevt *sp;
103 sz = howmany(bp->b_bcount, PAGE_SIZE);
104 blkno = (int)(bio->bio_offset >> PAGE_SHIFT);
107 * Convert interleaved swap into per-device swap. Note that
108 * the block size is left in PAGE_SIZE'd chunks (for the newswap)
109 * here.
111 nbio = push_bio(bio);
112 if (nswdev > 1) {
113 off = blkno % dmmax;
114 if (off + sz > dmmax) {
115 bp->b_error = EINVAL;
116 bp->b_flags |= B_ERROR;
117 biodone(bio);
118 return 0;
120 seg = blkno / dmmax;
121 index = seg % nswdev;
122 seg /= nswdev;
123 nbio->bio_offset = (off_t)(seg * dmmax + off) << PAGE_SHIFT;
124 } else {
125 index = 0;
126 nbio->bio_offset = bio->bio_offset;
128 nblkno = (int)(nbio->bio_offset >> PAGE_SHIFT);
129 sp = &swdevt[index];
130 if (nblkno + sz > sp->sw_nblks) {
131 bp->b_error = EINVAL;
132 bp->b_flags |= B_ERROR;
133 /* I/O was never started on nbio, must biodone(bio) */
134 biodone(bio);
135 return 0;
137 if (sp->sw_vp == NULL) {
138 bp->b_error = ENODEV;
139 bp->b_flags |= B_ERROR;
140 /* I/O was never started on nbio, must biodone(bio) */
141 biodone(bio);
142 return 0;
146 * Issue a strategy call on the appropriate swap vnode. Note that
147 * bp->b_vp is not modified. Strategy code is always supposed to
148 * use the passed vp.
150 * We have to use vn_strategy() here even if we know we have a
151 * device in order to properly break up requests which exceed the
152 * device's DMA limits.
154 vn_strategy(sp->sw_vp, nbio);
155 return 0;
158 static int
159 swapdev_inactive(struct vop_inactive_args *ap)
161 vrecycle(ap->a_vp);
162 return(0);
165 static int
166 swapdev_reclaim(struct vop_reclaim_args *ap)
168 return(0);
172 * Create a special vnode op vector for swapdev_vp - we only use
173 * vn_strategy(), everything else returns an error.
175 static struct vop_ops swapdev_vnode_vops = {
176 .vop_default = vop_defaultop,
177 .vop_strategy = swapdev_strategy,
178 .vop_inactive = swapdev_inactive,
179 .vop_reclaim = swapdev_reclaim
181 static struct vop_ops *swapdev_vnode_vops_p = &swapdev_vnode_vops;
183 VNODEOP_SET(swapdev_vnode_vops);
186 * swapon_args(char *name)
188 * System call swapon(name) enables swapping on device name,
189 * which must be in the swdevsw. Return EBUSY
190 * if already swapping on this device.
192 * No requirements.
195 sys_swapon(struct swapon_args *uap)
197 struct thread *td = curthread;
198 struct vattr attr;
199 struct vnode *vp;
200 struct nlookupdata nd;
201 int error;
203 error = priv_check(td, PRIV_ROOT);
204 if (error)
205 return (error);
207 mtx_lock(&swap_mtx);
208 get_mplock();
209 vp = NULL;
210 error = nlookup_init(&nd, uap->name, UIO_USERSPACE, NLC_FOLLOW);
211 if (error == 0)
212 error = nlookup(&nd);
213 if (error == 0)
214 error = cache_vref(&nd.nl_nch, nd.nl_cred, &vp);
215 nlookup_done(&nd);
216 if (error) {
217 rel_mplock();
218 mtx_unlock(&swap_mtx);
219 return (error);
222 if (vn_isdisk(vp, &error)) {
223 error = swaponvp(td, vp, 0);
224 } else if (vp->v_type == VREG && vp->v_tag == VT_NFS &&
225 (error = VOP_GETATTR(vp, &attr)) == 0) {
227 * Allow direct swapping to NFS regular files in the same
228 * way that nfs_mountroot() sets up diskless swapping.
230 error = swaponvp(td, vp, attr.va_size / DEV_BSIZE);
232 if (error)
233 vrele(vp);
234 rel_mplock();
235 mtx_unlock(&swap_mtx);
237 return (error);
241 * Swfree(index) frees the index'th portion of the swap map.
242 * Each of the nswdev devices provides 1/nswdev'th of the swap
243 * space, which is laid out with blocks of dmmax pages circularly
244 * among the devices.
246 * The new swap code uses page-sized blocks. The old swap code used
247 * DEV_BSIZE'd chunks.
249 * XXX locking when multiple swapon's run in parallel
252 swaponvp(struct thread *td, struct vnode *vp, u_quad_t nblks)
254 swblk_t aligned_nblks;
255 int64_t dpsize;
256 struct ucred *cred;
257 struct swdevt *sp;
258 swblk_t vsbase;
259 swblk_t dvbase;
260 cdev_t dev;
261 int index;
262 int error;
263 swblk_t blk;
265 cred = td->td_ucred;
267 lwkt_gettoken(&vm_token); /* needed for vm_swap_size and blist */
268 mtx_lock(&swap_mtx);
270 if (!swapdev_vp) {
271 error = getspecialvnode(VT_NON, NULL, &swapdev_vnode_vops_p,
272 &swapdev_vp, 0, 0);
273 if (error)
274 panic("Cannot get vnode for swapdev");
275 swapdev_vp->v_type = VNON; /* Untyped */
276 vx_unlock(swapdev_vp);
279 for (sp = swdevt, index = 0 ; index < nswdev; index++, sp++) {
280 if (sp->sw_vp == vp) {
281 error = EBUSY;
282 goto done;
284 if (!sp->sw_vp)
285 goto found;
288 error = EINVAL;
289 goto done;
290 found:
291 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
292 error = VOP_OPEN(vp, FREAD | FWRITE, cred, NULL);
293 vn_unlock(vp);
294 if (error)
295 goto done;
298 * v_rdev is not valid until after the VOP_OPEN() call. dev_psize()
299 * must be supported if a character device has been specified.
301 if (vp->v_type == VCHR)
302 dev = vp->v_rdev;
303 else
304 dev = NULL;
306 if (nblks == 0 && dev != NULL) {
307 dpsize = dev_dpsize(dev);
308 if (dpsize == -1) {
309 VOP_CLOSE(vp, FREAD | FWRITE);
310 error = ENXIO;
311 goto done;
313 nblks = (u_quad_t)dpsize;
315 if (nblks == 0) {
316 VOP_CLOSE(vp, FREAD | FWRITE);
317 error = ENXIO;
318 goto done;
322 * nblks is in DEV_BSIZE'd chunks, convert to PAGE_SIZE'd chunks.
323 * First chop nblks off to page-align it, then convert.
325 * sw->sw_nblks is in page-sized chunks now too.
327 nblks &= ~(u_quad_t)(ctodb(1) - 1);
328 nblks = dbtoc(nblks);
331 * Post-conversion nblks must not be >= BLIST_MAXBLKS, and
332 * we impose a 4-swap-device limit so we have to divide it out
333 * further. Going beyond this will result in overflows in the
334 * blist code.
336 * Post-conversion nblks must fit within a (swblk_t), which
337 * this test also ensures.
339 if (nblks > BLIST_MAXBLKS / nswdev) {
340 kprintf("exceeded maximum of %d blocks per swap unit\n",
341 (int)BLIST_MAXBLKS / nswdev);
342 VOP_CLOSE(vp, FREAD | FWRITE);
343 error = ENXIO;
344 goto done;
347 sp->sw_vp = vp;
348 sp->sw_dev = dev2udev(dev);
349 sp->sw_device = dev;
350 sp->sw_flags = SW_FREED;
351 sp->sw_nused = 0;
354 * nblks, nswap, and dmmax are PAGE_SIZE'd parameters now, not
355 * DEV_BSIZE'd. aligned_nblks is used to calculate the
356 * size of the swap bitmap, taking into account the stripe size.
358 aligned_nblks = (swblk_t)((nblks + (dmmax - 1)) & ~(u_long)(dmmax - 1));
359 sp->sw_nblks = aligned_nblks;
361 if (aligned_nblks * nswdev > nswap)
362 nswap = aligned_nblks * nswdev;
364 if (swapblist == NULL)
365 swapblist = blist_create(nswap);
366 else
367 blist_resize(&swapblist, nswap, 0);
369 for (dvbase = dmmax; dvbase < aligned_nblks; dvbase += dmmax) {
370 blk = min(aligned_nblks - dvbase, dmmax);
371 vsbase = index * dmmax + dvbase * nswdev;
372 blist_free(swapblist, vsbase, blk);
373 vm_swap_size += blk;
374 vm_swap_max += blk;
376 swap_pager_newswap();
377 error = 0;
378 done:
379 mtx_unlock(&swap_mtx);
380 lwkt_reltoken(&vm_token);
381 return (error);
385 * swapoff_args(char *name)
387 * System call swapoff(name) disables swapping on device name,
388 * which must be an active swap device. Return ENOMEM
389 * if there is not enough memory to page in the contents of
390 * the given device.
392 * No requirements.
395 sys_swapoff(struct swapoff_args *uap)
397 struct vnode *vp;
398 struct nlookupdata nd;
399 struct swdevt *sp;
400 int error, index;
402 error = priv_check(curthread, PRIV_ROOT);
403 if (error)
404 return (error);
406 mtx_lock(&swap_mtx);
407 get_mplock();
408 vp = NULL;
409 error = nlookup_init(&nd, uap->name, UIO_USERSPACE, NLC_FOLLOW);
410 if (error == 0)
411 error = nlookup(&nd);
412 if (error == 0)
413 error = cache_vref(&nd.nl_nch, nd.nl_cred, &vp);
414 nlookup_done(&nd);
415 if (error)
416 goto done;
418 for (sp = swdevt, index = 0; index < nswdev; index++, sp++) {
419 if (sp->sw_vp == vp)
420 goto found;
422 error = EINVAL;
423 goto done;
424 found:
425 error = swapoff_one(index);
427 done:
428 rel_mplock();
429 mtx_unlock(&swap_mtx);
430 return (error);
433 static int
434 swapoff_one(int index)
436 swblk_t blk, aligned_nblks;
437 swblk_t dvbase, vsbase;
438 u_int pq_active_clean, pq_inactive_clean;
439 struct swdevt *sp;
440 struct vm_page marker;
441 vm_page_t m;
442 int q;
444 mtx_lock(&swap_mtx);
446 sp = &swdevt[index];
447 aligned_nblks = sp->sw_nblks;
448 pq_active_clean = pq_inactive_clean = 0;
451 * We can turn off this swap device safely only if the
452 * available virtual memory in the system will fit the amount
453 * of data we will have to page back in, plus an epsilon so
454 * the system doesn't become critically low on swap space.
456 for (q = 0; q < PQ_L2_SIZE; ++q) {
457 bzero(&marker, sizeof(marker));
458 marker.flags = PG_BUSY | PG_FICTITIOUS | PG_MARKER;
459 marker.queue = PQ_ACTIVE + q;
460 marker.pc = q;
461 marker.wire_count = 1;
463 vm_page_queues_spin_lock(marker.queue);
464 TAILQ_INSERT_HEAD(&vm_page_queues[marker.queue].pl,
465 &marker, pageq);
467 while ((m = TAILQ_NEXT(&marker, pageq)) != NULL) {
468 TAILQ_REMOVE(&vm_page_queues[marker.queue].pl,
469 &marker, pageq);
470 TAILQ_INSERT_AFTER(&vm_page_queues[marker.queue].pl, m,
471 &marker, pageq);
472 if (m->flags & (PG_MARKER | PG_FICTITIOUS))
473 continue;
475 if (vm_page_busy_try(m, FALSE) == 0) {
476 vm_page_queues_spin_unlock(marker.queue);
477 if (m->dirty == 0) {
478 vm_page_test_dirty(m);
479 if (m->dirty == 0)
480 ++pq_active_clean;
482 vm_page_wakeup(m);
483 vm_page_queues_spin_lock(marker.queue);
486 TAILQ_REMOVE(&vm_page_queues[marker.queue].pl, &marker, pageq);
487 vm_page_queues_spin_unlock(marker.queue);
489 marker.queue = PQ_INACTIVE + q;
490 marker.pc = q;
491 vm_page_queues_spin_lock(marker.queue);
492 TAILQ_INSERT_HEAD(&vm_page_queues[marker.queue].pl,
493 &marker, pageq);
495 while ((m = TAILQ_NEXT(&marker, pageq)) != NULL) {
496 TAILQ_REMOVE(
497 &vm_page_queues[marker.queue].pl,
498 &marker, pageq);
499 TAILQ_INSERT_AFTER(
500 &vm_page_queues[marker.queue].pl,
501 m, &marker, pageq);
502 if (m->flags & (PG_MARKER | PG_FICTITIOUS))
503 continue;
505 if (vm_page_busy_try(m, FALSE) == 0) {
506 vm_page_queues_spin_unlock(marker.queue);
507 if (m->dirty == 0) {
508 vm_page_test_dirty(m);
509 if (m->dirty == 0)
510 ++pq_inactive_clean;
512 vm_page_wakeup(m);
513 vm_page_queues_spin_lock(marker.queue);
516 TAILQ_REMOVE(&vm_page_queues[marker.queue].pl,
517 &marker, pageq);
518 vm_page_queues_spin_unlock(marker.queue);
521 if (vmstats.v_free_count + vmstats.v_cache_count + pq_active_clean +
522 pq_inactive_clean + vm_swap_size < aligned_nblks + nswap_lowat) {
523 mtx_unlock(&swap_mtx);
524 return (ENOMEM);
528 * Prevent further allocations on this device
530 sp->sw_flags |= SW_CLOSING;
531 for (dvbase = dmmax; dvbase < aligned_nblks; dvbase += dmmax) {
532 blk = min(aligned_nblks - dvbase, dmmax);
533 vsbase = index * dmmax + dvbase * nswdev;
534 vm_swap_size -= blist_fill(swapblist, vsbase, blk);
535 vm_swap_max -= blk;
539 * Page in the contents of the device and close it.
541 if (swap_pager_swapoff(index)) {
542 mtx_unlock(&swap_mtx);
543 return (EINTR);
546 VOP_CLOSE(sp->sw_vp, FREAD | FWRITE);
547 vrele(sp->sw_vp);
548 bzero(swdevt + index, sizeof(struct swdevt));
551 * Resize the bitmap based on the nem largest swap device,
552 * or free the bitmap if there are no more devices.
554 for (sp = swdevt, aligned_nblks = 0; sp < swdevt + nswdev; sp++) {
555 if (sp->sw_vp)
556 aligned_nblks = max(aligned_nblks, sp->sw_nblks);
559 nswap = aligned_nblks * nswdev;
561 if (nswap == 0) {
562 blist_destroy(swapblist);
563 swapblist = NULL;
564 vrele(swapdev_vp);
565 swapdev_vp = NULL;
566 } else {
567 blist_resize(&swapblist, nswap, 0);
570 mtx_unlock(&swap_mtx);
571 return (0);
575 * Account for swap space in individual swdevt's. The caller ensures
576 * that the provided range falls into a single swdevt.
578 * +count space freed
579 * -count space allocated
581 void
582 swapacctspace(swblk_t base, swblk_t count)
584 int index;
585 int seg;
587 vm_swap_size += count;
588 seg = base / dmmax;
589 index = seg % nswdev;
590 swdevt[index].sw_nused -= count;
594 * Retrieve swap info
596 static int
597 sysctl_vm_swap_info(SYSCTL_HANDLER_ARGS)
599 struct xswdev xs;
600 struct swdevt *sp;
601 int error;
602 int n;
604 error = 0;
605 for (n = 0; n < nswdev; ++n) {
606 sp = &swdevt[n];
608 xs.xsw_size = sizeof(xs);
609 xs.xsw_version = XSWDEV_VERSION;
610 xs.xsw_blksize = PAGE_SIZE;
611 xs.xsw_dev = sp->sw_dev;
612 xs.xsw_flags = sp->sw_flags;
613 xs.xsw_nblks = sp->sw_nblks;
614 xs.xsw_used = sp->sw_nused;
616 error = SYSCTL_OUT(req, &xs, sizeof(xs));
617 if (error)
618 break;
620 return (error);
623 SYSCTL_INT(_vm, OID_AUTO, nswapdev, CTLFLAG_RD, &nswdev, 0,
624 "Number of swap devices");
625 SYSCTL_NODE(_vm, OID_AUTO, swap_info_array, CTLFLAG_RD, sysctl_vm_swap_info,
626 "Swap statistics by device");