uts: make emu10k non-verbose
[unleashed.git] / kernel / vm / vm_swap.c
blobd71fa6326b509d4a5466675f411bd714950312a5
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
23 * Copyright 2015 Joyent, Inc.
27 * Copyright (c) 1987, 2010, Oracle and/or its affiliates. All rights reserved.
30 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
31 /* All Rights Reserved */
34 * University Copyright- Copyright (c) 1982, 1986, 1988
35 * The Regents of the University of California
36 * All Rights Reserved
38 * University Acknowledgment- Portions of this document are derived from
39 * software developed by the University of California, Berkeley, and its
40 * contributors.
44 * Each physical swap area has an associated bitmap representing
45 * its physical storage. The bitmap records which swap slots are
46 * currently allocated or freed. Allocation is done by searching
47 * through the bitmap for the first free slot. Thus, there's
48 * no linear relation between offset within the swap device and the
49 * address (within its segment(s)) of the page that the slot backs;
50 * instead, it's an arbitrary one-to-one mapping.
52 * Associated with each swap area is a swapinfo structure. These
53 * structures are linked into a linear list that determines the
54 * ordering of swap areas in the logical swap device. Each contains a
55 * pointer to the corresponding bitmap, the area's size, and its
56 * associated vnode.
59 #include <sys/types.h>
60 #include <sys/inttypes.h>
61 #include <sys/param.h>
62 #include <sys/t_lock.h>
63 #include <sys/sysmacros.h>
64 #include <sys/systm.h>
65 #include <sys/errno.h>
66 #include <sys/kmem.h>
67 #include <sys/vfs.h>
68 #include <sys/vnode.h>
69 #include <sys/pathname.h>
70 #include <sys/cmn_err.h>
71 #include <sys/vtrace.h>
72 #include <sys/swap.h>
73 #include <sys/dumphdr.h>
74 #include <sys/debug.h>
75 #include <sys/fs/snode.h>
76 #include <sys/fs/swapnode.h>
77 #include <sys/policy.h>
78 #include <sys/zone.h>
80 #include <vm/as.h>
81 #include <vm/seg.h>
82 #include <vm/page.h>
83 #include <vm/seg_vn.h>
84 #include <vm/hat.h>
85 #include <vm/anon.h>
86 #include <vm/seg_map.h>
89 * To balance the load among multiple swap areas, we don't allow
90 * more than swap_maxcontig allocations to be satisfied from a
91 * single swap area before moving on to the next swap area. This
92 * effectively "interleaves" allocations among the many swap areas.
94 int swap_maxcontig; /* set by anon_init() to 1 Mb */
96 #define MINIROOTSIZE 12000 /* ~6 Meg XXX */
99 * XXX - this lock is a kludge. It serializes some aspects of swapadd() and
100 * swapdel() (namely fop_open, fop_close, VN_RELE). It protects against
101 * somebody swapadd'ing and getting swap slots from a vnode, while someone
102 * else is in the process of closing or rele'ing it.
104 static kmutex_t swap_lock;
106 kmutex_t swapinfo_lock;
109 * protected by the swapinfo_lock
111 struct swapinfo *swapinfo;
113 static struct swapinfo *silast;
114 static int nswapfiles;
116 static uoff_t swap_getoff(struct swapinfo *);
117 static int swapadd(struct vnode *, ulong_t, ulong_t, char *);
118 static int swapdel(struct vnode *, ulong_t);
119 static int swapslot_free(struct vnode *, uoff_t, struct swapinfo *);
122 * swap device bitmap allocation macros
124 #define MAPSHIFT 5
125 #define NBBW (NBPW * NBBY) /* number of bits per word */
126 #define TESTBIT(map, i) (((map)[(i) >> MAPSHIFT] & (1 << (i) % NBBW)))
127 #define SETBIT(map, i) (((map)[(i) >> MAPSHIFT] |= (1 << (i) % NBBW)))
128 #define CLEARBIT(map, i) (((map)[(i) >> MAPSHIFT] &= ~(1 << (i) % NBBW)))
130 int swap_debug = 0; /* set for debug printf's */
131 int swap_verify = 0; /* set to verify slots when freeing and allocating */
133 uint_t swapalloc_maxcontig;
136 * Allocate a range of up to *lenp contiguous slots (page) from a physical
137 * swap device. Flags are one of:
138 * SA_NOT Must have a slot from a physical swap device other than the
139 * the one containing input (*vpp, *offp).
140 * Less slots than requested may be returned. *lenp allocated slots are
141 * returned starting at *offp on *vpp.
142 * Returns 1 for a successful allocation, 0 for couldn't allocate any slots.
145 swap_phys_alloc(
146 struct vnode **vpp,
147 uoff_t *offp,
148 size_t *lenp,
149 uint_t flags)
151 struct swapinfo *sip;
152 offset_t soff, noff;
153 size_t len;
155 mutex_enter(&swapinfo_lock);
156 sip = silast;
158 /* Find a desirable physical device and allocate from it. */
159 do {
160 if (sip == NULL)
161 break;
162 if (!(sip->si_flags & ST_INDEL) &&
163 (spgcnt_t)sip->si_nfpgs > 0) {
164 /* Caller wants other than specified swap device */
165 if (flags & SA_NOT) {
166 if (*vpp != sip->si_vp ||
167 *offp < sip->si_soff ||
168 *offp >= sip->si_eoff)
169 goto found;
170 /* Caller is loose, will take anything */
171 } else
172 goto found;
173 } else if (sip->si_nfpgs == 0)
174 sip->si_allocs = 0;
175 if ((sip = sip->si_next) == NULL)
176 sip = swapinfo;
177 } while (sip != silast);
178 mutex_exit(&swapinfo_lock);
179 return (0);
180 found:
181 soff = swap_getoff(sip);
182 sip->si_nfpgs--;
183 if (soff == -1)
184 panic("swap_alloc: swap_getoff failed!");
186 for (len = PAGESIZE; len < *lenp; len += PAGESIZE) {
187 if (sip->si_nfpgs == 0)
188 break;
189 if (swapalloc_maxcontig && len >= swapalloc_maxcontig)
190 break;
191 noff = swap_getoff(sip);
192 if (noff == -1) {
193 break;
194 } else if (noff != soff + len) {
195 CLEARBIT(sip->si_swapslots, btop(noff - sip->si_soff));
196 break;
198 sip->si_nfpgs--;
200 *vpp = sip->si_vp;
201 *offp = soff;
202 *lenp = len;
203 ASSERT((spgcnt_t)sip->si_nfpgs >= 0);
204 sip->si_allocs += btop(len);
205 if (sip->si_allocs >= swap_maxcontig) {
206 sip->si_allocs = 0;
207 if ((silast = sip->si_next) == NULL)
208 silast = swapinfo;
210 mutex_exit(&swapinfo_lock);
211 return (1);
214 int swap_backsearch = 0;
217 * Get a free offset on swap device sip.
218 * Return >=0 offset if succeeded, -1 for failure.
220 static uoff_t
221 swap_getoff(struct swapinfo *sip)
223 uint_t *sp, *ep;
224 size_t aoff, boff, poff, slotnumber;
226 ASSERT(MUTEX_HELD(&swapinfo_lock));
228 sip->si_alloccnt++;
229 for (sp = &sip->si_swapslots[sip->si_hint >> MAPSHIFT],
230 ep = &sip->si_swapslots[sip->si_mapsize / NBPW]; sp < ep; sp++) {
231 if (*sp != (uint_t)0xffffffff)
232 goto foundentry;
233 else
234 sip->si_checkcnt++;
236 SWAP_PRINT(SW_ALLOC,
237 "swap_getoff: couldn't find slot from hint %ld to end\n",
238 sip->si_hint, 0, 0, 0, 0);
240 * Go backwards? Check for faster method XXX
242 if (swap_backsearch) {
243 for (sp = &sip->si_swapslots[sip->si_hint >> MAPSHIFT],
244 ep = sip->si_swapslots; sp > ep; sp--) {
245 if (*sp != (uint_t)0xffffffff)
246 goto foundentry;
247 else
248 sip->si_checkcnt++;
250 } else {
251 for (sp = sip->si_swapslots,
252 ep = &sip->si_swapslots[sip->si_hint >> MAPSHIFT];
253 sp < ep; sp++) {
254 if (*sp != (uint_t)0xffffffff)
255 goto foundentry;
256 else
257 sip->si_checkcnt++;
260 if (*sp == 0xffffffff) {
261 cmn_err(CE_WARN, "No free swap slots!");
262 return ((uoff_t)-1);
265 foundentry:
267 * aoff is the page number offset (in bytes) of the si_swapslots
268 * array element containing a free page
270 * boff is the page number offset of the free page
271 * (i.e. cleared bit) in si_swapslots[aoff].
273 aoff = ((char *)sp - (char *)sip->si_swapslots) * NBBY;
275 for (boff = (sip->si_hint % NBBW); boff < NBBW; boff++) {
276 if (!TESTBIT(sip->si_swapslots, aoff + boff))
277 goto foundslot;
278 else
279 sip->si_checkcnt++;
281 for (boff = 0; boff < (sip->si_hint % NBBW); boff++) {
282 if (!TESTBIT(sip->si_swapslots, aoff + boff))
283 goto foundslot;
284 else
285 sip->si_checkcnt++;
287 panic("swap_getoff: didn't find slot in word hint %ld", sip->si_hint);
289 foundslot:
291 * Return the offset of the free page in swap device.
292 * Convert page number of byte offset and add starting
293 * offset of swap device.
295 slotnumber = aoff + boff;
296 SWAP_PRINT(SW_ALLOC, "swap_getoff: allocating slot %ld\n",
297 slotnumber, 0, 0, 0, 0);
298 poff = ptob(slotnumber);
299 if (poff + sip->si_soff >= sip->si_eoff)
300 printf("ptob(aoff(%ld) + boff(%ld))(%ld) >= eoff(%ld)\n",
301 aoff, boff, ptob(slotnumber), (long)sip->si_eoff);
302 ASSERT(poff < sip->si_eoff);
304 * We could verify here that the slot isn't already allocated
305 * by looking through all the anon slots.
307 SETBIT(sip->si_swapslots, slotnumber);
308 sip->si_hint = slotnumber + 1; /* hint = next slot */
309 return (poff + sip->si_soff);
313 * Free a swap page.
315 void
316 swap_phys_free(struct vnode *vp, uoff_t off, size_t len)
318 struct swapinfo *sip;
319 ssize_t pagenumber, npage;
321 mutex_enter(&swapinfo_lock);
322 sip = swapinfo;
324 do {
325 if (sip->si_vp == vp &&
326 sip->si_soff <= off && off < sip->si_eoff) {
327 for (pagenumber = btop(off - sip->si_soff),
328 npage = btop(len) + pagenumber;
329 pagenumber < npage; pagenumber++) {
330 SWAP_PRINT(SW_ALLOC,
331 "swap_phys_free: freeing slot %ld on "
332 "sip %p\n",
333 pagenumber, sip, 0, 0, 0);
334 if (!TESTBIT(sip->si_swapslots, pagenumber)) {
335 panic(
336 "swap_phys_free: freeing free slot "
337 "%p,%lx\n", (void *)vp,
338 ptob(pagenumber) + sip->si_soff);
340 CLEARBIT(sip->si_swapslots, pagenumber);
341 sip->si_nfpgs++;
343 ASSERT(sip->si_nfpgs <= sip->si_npgs);
344 mutex_exit(&swapinfo_lock);
345 return;
347 } while ((sip = sip->si_next) != NULL);
348 panic("swap_phys_free");
349 /*NOTREACHED*/
353 * Return the anon struct corresponding for the given
354 * <vnode, off> if it is part of the virtual swap device.
355 * Return the anon struct if found, otherwise NULL.
357 struct anon *
358 swap_anon(struct vnode *vp, uoff_t off)
360 struct anon *ap;
362 ASSERT(MUTEX_HELD(AH_MUTEX(vp, off)));
364 for (ap = anon_hash[ANON_HASH(vp, off)]; ap != NULL; ap = ap->an_hash) {
365 if (ap->an_vp == vp && ap->an_off == off)
366 return (ap);
368 return (NULL);
373 * Determine if the vp offset range overlap a swap device.
376 swap_in_range(struct vnode *vp, uoff_t offset, size_t len)
378 struct swapinfo *sip;
379 uoff_t eoff;
381 eoff = offset + len;
382 ASSERT(eoff > offset);
384 mutex_enter(&swapinfo_lock);
385 sip = swapinfo;
386 if (vp && sip) {
387 do {
388 if (vp != sip->si_vp || eoff <= sip->si_soff ||
389 offset >= sip->si_eoff)
390 continue;
391 mutex_exit(&swapinfo_lock);
392 return (1);
393 } while ((sip = sip->si_next) != NULL);
395 mutex_exit(&swapinfo_lock);
396 return (0);
400 * See if name is one of our swap files
401 * even though lookupname failed.
402 * This can be used by swapdel to delete
403 * swap resources on remote machines
404 * where the link has gone down.
406 static struct vnode *
407 swapdel_byname(
408 char *name, /* pathname to delete */
409 ulong_t lowblk) /* Low block number of area to delete */
411 struct swapinfo **sipp, *osip;
412 uoff_t soff;
415 * Find the swap file entry for the file to
416 * be deleted. Skip any entries that are in
417 * transition.
420 soff = ptob(btopr(lowblk << SCTRSHFT)); /* must be page aligned */
422 mutex_enter(&swapinfo_lock);
423 for (sipp = &swapinfo; (osip = *sipp) != NULL; sipp = &osip->si_next) {
424 if ((strcmp(osip->si_pname, name) == 0) &&
425 (osip->si_soff == soff) && (osip->si_flags == 0)) {
426 struct vnode *vp = osip->si_vp;
428 VN_HOLD(vp);
429 mutex_exit(&swapinfo_lock);
430 return (vp);
433 mutex_exit(&swapinfo_lock);
434 return (NULL);
439 * New system call to manipulate swap files.
442 swapctl(int sc_cmd, void *sc_arg, int *rv)
444 struct swapinfo *sip, *csip, *tsip;
445 int error = 0;
446 struct swapent st, *ust;
447 struct swapres sr;
448 struct vnode *vp;
449 int cnt = 0;
450 int tmp_nswapfiles;
451 int nswap;
452 int length, nlen;
453 int gplen = 0, plen;
454 char *swapname;
455 char *pname;
456 char *tpname;
457 struct anoninfo ai;
458 spgcnt_t avail;
459 int global = INGLOBALZONE(curproc);
460 struct zone *zp = curproc->p_zone;
463 * When running in a zone we want to hide the details of the swap
464 * devices: we report there only being one swap device named "swap"
465 * having a size equal to the sum of the sizes of all real swap devices
466 * on the system.
468 switch (sc_cmd) {
469 case SC_GETNSWP:
470 if (global)
471 *rv = nswapfiles;
472 else
473 *rv = 1;
474 return (0);
476 case SC_AINFO:
478 * Return anoninfo information with these changes:
479 * ani_max = maximum amount of swap space
480 * (including potentially available physical memory)
481 * ani_free = amount of unallocated anonymous memory
482 * (some of which might be reserved and including
483 * potentially available physical memory)
484 * ani_resv = amount of claimed (reserved) anonymous memory
486 avail = MAX((spgcnt_t)(availrmem - swapfs_minfree), 0);
487 ai.ani_max = (k_anoninfo.ani_max +
488 k_anoninfo.ani_mem_resv) + avail;
490 /* Update ani_free */
491 set_anoninfo();
492 ai.ani_free = k_anoninfo.ani_free + avail;
494 ai.ani_resv = k_anoninfo.ani_phys_resv +
495 k_anoninfo.ani_mem_resv;
497 if (!global && zp->zone_max_swap_ctl != UINT64_MAX) {
499 * We're in a non-global zone with a swap cap. We
500 * always report the system-wide values for the global
501 * zone, even though it too can have a swap cap.
505 * For a swap-capped zone, the numbers are contrived
506 * since we don't have a correct value of 'reserved'
507 * for the zone.
509 * The ani_max value is always the zone's swap cap.
511 * The ani_free value is always the difference between
512 * the cap and the amount of swap in use by the zone.
514 * The ani_resv value is typically set to be the amount
515 * of swap in use by the zone, but can be adjusted
516 * upwards to indicate how much swap is currently
517 * unavailable to that zone due to usage by entities
518 * outside the zone.
520 * This works as follows.
522 * In the 'swap -s' output, the data is displayed
523 * as follows:
524 * allocated = ani_max - ani_free
525 * reserved = ani_resv - allocated
526 * available = ani_max - ani_resv
528 * Taking a contrived example, if the swap cap is 100
529 * and the amount of swap used by the zone is 75, this
530 * gives:
531 * allocated = ani_max - ani_free = 100 - 25 = 75
532 * reserved = ani_resv - allocated = 75 - 75 = 0
533 * available = ani_max - ani_resv = 100 - 75 = 25
535 * In this typical case, you can see that the 'swap -s'
536 * 'reserved' will always be 0 inside a swap capped
537 * zone.
539 * However, if the system as a whole has less free
540 * swap than the zone limits allow, then we adjust
541 * the ani_resv value up so that it is the difference
542 * between the zone cap and the amount of free system
543 * swap. Taking the above example, but when the
544 * system as a whole only has 20 of swap available, we
545 * get an ani_resv of 100 - 20 = 80. This gives:
546 * allocated = ani_max - ani_free = 100 - 25 = 75
547 * reserved = ani_resv - allocated = 80 - 75 = 5
548 * available = ani_max - ani_resv = 100 - 80 = 20
550 * In this case, you can see how the ani_resv value is
551 * tweaked up to make the 'swap -s' numbers work inside
552 * the zone.
554 rctl_qty_t cap, used;
555 pgcnt_t pgcap, sys_avail;
557 mutex_enter(&zp->zone_mem_lock);
558 cap = zp->zone_max_swap_ctl;
559 used = zp->zone_max_swap;
560 mutex_exit(&zp->zone_mem_lock);
562 pgcap = MIN(btop(cap), ai.ani_max);
563 ai.ani_free = pgcap - btop(used);
565 /* Get the system-wide swap currently available. */
566 sys_avail = ai.ani_max - ai.ani_resv;
567 if (sys_avail < ai.ani_free)
568 ai.ani_resv = pgcap - sys_avail;
569 else
570 ai.ani_resv = btop(used);
572 ai.ani_max = pgcap;
575 if (copyout(&ai, sc_arg, sizeof (struct anoninfo)) != 0)
576 return (EFAULT);
577 return (0);
579 case SC_LIST:
580 if (copyin(sc_arg, &length, sizeof (int)) != 0)
581 return (EFAULT);
582 if (!global) {
583 struct swapent st;
584 char *swappath = "swap";
586 if (length < 1)
587 return (ENOMEM);
588 ust = (swapent_t *)((swaptbl_t *)sc_arg)->swt_ent;
589 if (copyin(ust, &st, sizeof (swapent_t)) != 0)
590 return (EFAULT);
591 st.ste_start = PAGESIZE >> SCTRSHFT;
592 st.ste_length = (off_t)0;
593 st.ste_pages = 0;
594 st.ste_free = 0;
595 st.ste_flags = 0;
597 mutex_enter(&swapinfo_lock);
598 for (sip = swapinfo, nswap = 0;
599 sip != NULL && nswap < nswapfiles;
600 sip = sip->si_next, nswap++) {
601 st.ste_length +=
602 (sip->si_eoff - sip->si_soff) >> SCTRSHFT;
603 st.ste_pages += sip->si_npgs;
604 st.ste_free += sip->si_nfpgs;
606 mutex_exit(&swapinfo_lock);
608 if (zp->zone_max_swap_ctl != UINT64_MAX) {
609 rctl_qty_t cap, used;
611 mutex_enter(&zp->zone_mem_lock);
612 cap = zp->zone_max_swap_ctl;
613 used = zp->zone_max_swap;
614 mutex_exit(&zp->zone_mem_lock);
616 st.ste_length = MIN(cap, st.ste_length);
617 st.ste_pages = MIN(btop(cap), st.ste_pages);
618 st.ste_free = MIN(st.ste_pages - btop(used),
619 st.ste_free);
622 if (copyout(&st, ust, sizeof (swapent_t)) != 0 ||
623 copyout(swappath, st.ste_path,
624 strlen(swappath) + 1) != 0) {
625 return (EFAULT);
627 *rv = 1;
628 return (0);
630 beginning:
631 mutex_enter(&swapinfo_lock);
632 tmp_nswapfiles = nswapfiles;
633 mutex_exit(&swapinfo_lock);
636 * Return early if there are no swap entries to report:
638 if (tmp_nswapfiles < 1) {
639 *rv = 0;
640 return (0);
643 /* Return an error if not enough space for the whole table. */
644 if (length < tmp_nswapfiles)
645 return (ENOMEM);
647 * Get memory to hold the swap entries and their names. We'll
648 * copy the real entries into these and then copy these out.
649 * Allocating the pathname memory is only a guess so we may
650 * find that we need more and have to do it again.
651 * All this is because we have to hold the anon lock while
652 * traversing the swapinfo list, and we can't be doing copyouts
653 * and/or kmem_alloc()s during this.
655 csip = kmem_zalloc(tmp_nswapfiles * sizeof (struct swapinfo),
656 KM_SLEEP);
657 retry:
658 nlen = tmp_nswapfiles * (gplen += 100);
659 pname = kmem_zalloc(nlen, KM_SLEEP);
661 mutex_enter(&swapinfo_lock);
663 if (tmp_nswapfiles != nswapfiles) {
664 mutex_exit(&swapinfo_lock);
665 kmem_free(pname, nlen);
666 kmem_free(csip,
667 tmp_nswapfiles * sizeof (struct swapinfo));
668 gplen = 0;
669 goto beginning;
671 for (sip = swapinfo, tsip = csip, tpname = pname, nswap = 0;
672 sip && nswap < tmp_nswapfiles;
673 sip = sip->si_next, tsip++, tpname += plen, nswap++) {
674 plen = sip->si_pnamelen;
675 if (tpname + plen - pname > nlen) {
676 mutex_exit(&swapinfo_lock);
677 kmem_free(pname, nlen);
678 goto retry;
680 *tsip = *sip;
681 tsip->si_pname = tpname;
682 (void) strcpy(tsip->si_pname, sip->si_pname);
684 mutex_exit(&swapinfo_lock);
686 if (sip) {
687 error = ENOMEM;
688 goto lout;
690 ust = (swapent_t *)((swaptbl_t *)sc_arg)->swt_ent;
691 for (tsip = csip, cnt = 0; cnt < nswap; tsip++, ust++, cnt++) {
692 if (copyin(ust, &st, sizeof (swapent_t)) != 0) {
693 error = EFAULT;
694 goto lout;
696 st.ste_flags = tsip->si_flags;
697 st.ste_length =
698 (tsip->si_eoff - tsip->si_soff) >> SCTRSHFT;
699 st.ste_start = tsip->si_soff >> SCTRSHFT;
700 st.ste_pages = tsip->si_npgs;
701 st.ste_free = tsip->si_nfpgs;
702 if (copyout(&st, ust, sizeof (swapent_t)) != 0) {
703 error = EFAULT;
704 goto lout;
706 if (!tsip->si_pnamelen)
707 continue;
708 if (copyout(tsip->si_pname, st.ste_path,
709 tsip->si_pnamelen) != 0) {
710 error = EFAULT;
711 goto lout;
714 *rv = nswap;
715 lout:
716 kmem_free(csip, tmp_nswapfiles * sizeof (struct swapinfo));
717 kmem_free(pname, nlen);
718 return (error);
720 case SC_ADD:
721 case SC_REMOVE:
722 break;
723 default:
724 return (EINVAL);
726 if ((error = secpolicy_swapctl(CRED())) != 0)
727 return (error);
729 if (copyin(sc_arg, &sr, sizeof (swapres_t)))
730 return (EFAULT);
732 /* Allocate the space to read in pathname */
733 if ((swapname = kmem_alloc(MAXPATHLEN, KM_NOSLEEP)) == NULL)
734 return (ENOMEM);
736 error = copyinstr(sr.sr_name, swapname, MAXPATHLEN, 0);
737 if (error)
738 goto out;
740 error = lookupname(swapname, UIO_SYSSPACE, FOLLOW, NULLVPP, &vp);
741 if (error) {
742 if (sc_cmd == SC_ADD)
743 goto out;
744 /* see if we match by name */
745 vp = swapdel_byname(swapname, (size_t)sr.sr_start);
746 if (vp == NULL)
747 goto out;
750 if (vp->v_flag & (VNOMAP | VNOSWAP)) {
751 VN_RELE(vp);
752 error = ENOSYS;
753 goto out;
755 switch (vp->v_type) {
756 case VBLK:
757 break;
759 case VREG:
760 if (vp->v_vfsp && vn_is_readonly(vp))
761 error = EROFS;
762 else
763 error = fop_access(vp, VREAD|VWRITE, 0, CRED(), NULL);
764 break;
766 case VDIR:
767 error = EISDIR;
768 break;
769 default:
770 error = ENOSYS;
771 break;
773 if (error == 0) {
774 if (sc_cmd == SC_REMOVE)
775 error = swapdel(vp, sr.sr_start);
776 else
777 error = swapadd(vp, sr.sr_start,
778 sr.sr_length, swapname);
780 VN_RELE(vp);
781 out:
782 kmem_free(swapname, MAXPATHLEN);
783 return (error);
786 #if defined(_LP64) && defined(_SYSCALL32)
789 swapctl32(int sc_cmd, void *sc_arg, int *rv)
791 struct swapinfo *sip, *csip, *tsip;
792 int error = 0;
793 struct swapent32 st, *ust;
794 struct swapres32 sr;
795 struct vnode *vp;
796 int cnt = 0;
797 int tmp_nswapfiles;
798 int nswap;
799 int length, nlen;
800 int gplen = 0, plen;
801 char *swapname;
802 char *pname;
803 char *tpname;
804 struct anoninfo32 ai;
805 size_t s;
806 spgcnt_t avail;
807 int global = INGLOBALZONE(curproc);
808 struct zone *zp = curproc->p_zone;
811 * When running in a zone we want to hide the details of the swap
812 * devices: we report there only being one swap device named "swap"
813 * having a size equal to the sum of the sizes of all real swap devices
814 * on the system.
816 switch (sc_cmd) {
817 case SC_GETNSWP:
818 if (global)
819 *rv = nswapfiles;
820 else
821 *rv = 1;
822 return (0);
824 case SC_AINFO:
826 * Return anoninfo information with these changes:
827 * ani_max = maximum amount of swap space
828 * (including potentially available physical memory)
829 * ani_free = amount of unallocated anonymous memory
830 * (some of which might be reserved and including
831 * potentially available physical memory)
832 * ani_resv = amount of claimed (reserved) anonymous memory
834 avail = MAX((spgcnt_t)(availrmem - swapfs_minfree), 0);
835 s = (k_anoninfo.ani_max + k_anoninfo.ani_mem_resv) + avail;
836 if (s > UINT32_MAX)
837 return (EOVERFLOW);
838 ai.ani_max = s;
840 /* Update ani_free */
841 set_anoninfo();
842 s = k_anoninfo.ani_free + avail;
843 if (s > UINT32_MAX)
844 return (EOVERFLOW);
845 ai.ani_free = s;
847 s = k_anoninfo.ani_phys_resv + k_anoninfo.ani_mem_resv;
848 if (s > UINT32_MAX)
849 return (EOVERFLOW);
850 ai.ani_resv = s;
852 if (!global && zp->zone_max_swap_ctl != UINT64_MAX) {
854 * We're in a non-global zone with a swap cap. We
855 * always report the system-wide values for the global
856 * zone, even though it too can have a swap cap.
857 * See the comment for the SC_AINFO case in swapctl()
858 * which explains the following logic.
860 rctl_qty_t cap, used;
861 pgcnt_t pgcap, sys_avail;
863 mutex_enter(&zp->zone_mem_lock);
864 cap = zp->zone_max_swap_ctl;
865 used = zp->zone_max_swap;
866 mutex_exit(&zp->zone_mem_lock);
868 pgcap = MIN(btop(cap), ai.ani_max);
869 ai.ani_free = pgcap - btop(used);
871 /* Get the system-wide swap currently available. */
872 sys_avail = ai.ani_max - ai.ani_resv;
873 if (sys_avail < ai.ani_free)
874 ai.ani_resv = pgcap - sys_avail;
875 else
876 ai.ani_resv = btop(used);
878 ai.ani_max = pgcap;
881 if (copyout(&ai, sc_arg, sizeof (ai)) != 0)
882 return (EFAULT);
883 return (0);
885 case SC_LIST:
886 if (copyin(sc_arg, &length, sizeof (int32_t)) != 0)
887 return (EFAULT);
888 if (!global) {
889 struct swapent32 st;
890 char *swappath = "swap";
892 if (length < 1)
893 return (ENOMEM);
894 ust = (swapent32_t *)((swaptbl32_t *)sc_arg)->swt_ent;
895 if (copyin(ust, &st, sizeof (swapent32_t)) != 0)
896 return (EFAULT);
897 st.ste_start = PAGESIZE >> SCTRSHFT;
898 st.ste_length = (off_t)0;
899 st.ste_pages = 0;
900 st.ste_free = 0;
901 st.ste_flags = 0;
903 mutex_enter(&swapinfo_lock);
904 for (sip = swapinfo, nswap = 0;
905 sip != NULL && nswap < nswapfiles;
906 sip = sip->si_next, nswap++) {
907 st.ste_length +=
908 (sip->si_eoff - sip->si_soff) >> SCTRSHFT;
909 st.ste_pages += sip->si_npgs;
910 st.ste_free += sip->si_nfpgs;
912 mutex_exit(&swapinfo_lock);
914 if (zp->zone_max_swap_ctl != UINT64_MAX) {
915 rctl_qty_t cap, used;
917 mutex_enter(&zp->zone_mem_lock);
918 cap = zp->zone_max_swap_ctl;
919 used = zp->zone_max_swap;
920 mutex_exit(&zp->zone_mem_lock);
922 st.ste_length = MIN(cap, st.ste_length);
923 st.ste_pages = MIN(btop(cap), st.ste_pages);
924 st.ste_free = MIN(st.ste_pages - btop(used),
925 st.ste_free);
928 if (copyout(&st, ust, sizeof (swapent32_t)) != 0 ||
929 copyout(swappath, (caddr_t)(uintptr_t)st.ste_path,
930 strlen(swappath) + 1) != 0) {
931 return (EFAULT);
933 *rv = 1;
934 return (0);
936 beginning:
937 mutex_enter(&swapinfo_lock);
938 tmp_nswapfiles = nswapfiles;
939 mutex_exit(&swapinfo_lock);
942 * Return early if there are no swap entries to report:
944 if (tmp_nswapfiles < 1) {
945 *rv = 0;
946 return (0);
949 /* Return an error if not enough space for the whole table. */
950 if (length < tmp_nswapfiles)
951 return (ENOMEM);
953 * Get memory to hold the swap entries and their names. We'll
954 * copy the real entries into these and then copy these out.
955 * Allocating the pathname memory is only a guess so we may
956 * find that we need more and have to do it again.
957 * All this is because we have to hold the anon lock while
958 * traversing the swapinfo list, and we can't be doing copyouts
959 * and/or kmem_alloc()s during this.
961 csip = kmem_zalloc(tmp_nswapfiles * sizeof (*csip), KM_SLEEP);
962 retry:
963 nlen = tmp_nswapfiles * (gplen += 100);
964 pname = kmem_zalloc(nlen, KM_SLEEP);
966 mutex_enter(&swapinfo_lock);
968 if (tmp_nswapfiles != nswapfiles) {
969 mutex_exit(&swapinfo_lock);
970 kmem_free(pname, nlen);
971 kmem_free(csip, tmp_nswapfiles * sizeof (*csip));
972 gplen = 0;
973 goto beginning;
975 for (sip = swapinfo, tsip = csip, tpname = pname, nswap = 0;
976 (sip != NULL) && (nswap < tmp_nswapfiles);
977 sip = sip->si_next, tsip++, tpname += plen, nswap++) {
978 plen = sip->si_pnamelen;
979 if (tpname + plen - pname > nlen) {
980 mutex_exit(&swapinfo_lock);
981 kmem_free(pname, nlen);
982 goto retry;
984 *tsip = *sip;
985 tsip->si_pname = tpname;
986 (void) strcpy(tsip->si_pname, sip->si_pname);
988 mutex_exit(&swapinfo_lock);
990 if (sip != NULL) {
991 error = ENOMEM;
992 goto lout;
994 ust = (swapent32_t *)((swaptbl32_t *)sc_arg)->swt_ent;
995 for (tsip = csip, cnt = 0; cnt < nswap; tsip++, ust++, cnt++) {
996 if (copyin(ust, &st, sizeof (*ust)) != 0) {
997 error = EFAULT;
998 goto lout;
1000 st.ste_flags = tsip->si_flags;
1001 st.ste_length =
1002 (tsip->si_eoff - tsip->si_soff) >> SCTRSHFT;
1003 st.ste_start = tsip->si_soff >> SCTRSHFT;
1004 st.ste_pages = tsip->si_npgs;
1005 st.ste_free = tsip->si_nfpgs;
1006 if (copyout(&st, ust, sizeof (st)) != 0) {
1007 error = EFAULT;
1008 goto lout;
1010 if (!tsip->si_pnamelen)
1011 continue;
1012 if (copyout(tsip->si_pname,
1013 (caddr_t)(uintptr_t)st.ste_path,
1014 tsip->si_pnamelen) != 0) {
1015 error = EFAULT;
1016 goto lout;
1019 *rv = nswap;
1020 lout:
1021 kmem_free(csip, tmp_nswapfiles * sizeof (*csip));
1022 kmem_free(pname, nlen);
1023 return (error);
1025 case SC_ADD:
1026 case SC_REMOVE:
1027 break;
1028 default:
1029 return (EINVAL);
1031 if ((error = secpolicy_swapctl(CRED())) != 0)
1032 return (error);
1034 if (copyin(sc_arg, &sr, sizeof (sr)))
1035 return (EFAULT);
1037 /* Allocate the space to read in pathname */
1038 if ((swapname = kmem_alloc(MAXPATHLEN, KM_NOSLEEP)) == NULL)
1039 return (ENOMEM);
1041 error = copyinstr((caddr_t)(uintptr_t)sr.sr_name,
1042 swapname, MAXPATHLEN, NULL);
1043 if (error)
1044 goto out;
1046 error = lookupname(swapname, UIO_SYSSPACE, FOLLOW, NULLVPP, &vp);
1047 if (error) {
1048 if (sc_cmd == SC_ADD)
1049 goto out;
1050 /* see if we match by name */
1051 vp = swapdel_byname(swapname, (uint_t)sr.sr_start);
1052 if (vp == NULL)
1053 goto out;
1056 if (vp->v_flag & (VNOMAP | VNOSWAP)) {
1057 VN_RELE(vp);
1058 error = ENOSYS;
1059 goto out;
1061 switch (vp->v_type) {
1062 case VBLK:
1063 break;
1065 case VREG:
1066 if (vp->v_vfsp && vn_is_readonly(vp))
1067 error = EROFS;
1068 else
1069 error = fop_access(vp, VREAD|VWRITE, 0, CRED(), NULL);
1070 break;
1072 case VDIR:
1073 error = EISDIR;
1074 break;
1075 default:
1076 error = ENOSYS;
1077 break;
1079 if (error == 0) {
1080 if (sc_cmd == SC_REMOVE)
1081 error = swapdel(vp, sr.sr_start);
1082 else
1083 error = swapadd(vp, sr.sr_start, sr.sr_length,
1084 swapname);
1086 VN_RELE(vp);
1087 out:
1088 kmem_free(swapname, MAXPATHLEN);
1089 return (error);
1092 #endif /* _LP64 && _SYSCALL32 */
1095 * Add a new swap file.
1098 swapadd(struct vnode *vp, ulong_t lowblk, ulong_t nblks, char *swapname)
1100 struct swapinfo **sipp, *nsip = NULL, *esip = NULL;
1101 struct vnode *cvp;
1102 struct vattr vattr;
1103 pgcnt_t pages;
1104 uoff_t soff, eoff;
1105 int error;
1106 ssize_t i, start, end;
1107 ushort_t wasswap;
1108 ulong_t startblk;
1109 size_t returned_mem;
1111 SWAP_PRINT(SW_CTL, "swapadd: vp %p lowblk %ld nblks %ld swapname %s\n",
1112 vp, lowblk, nblks, swapname, 0);
1114 * Get the real vnode. (If vp is not a specnode it just returns vp, so
1115 * it does the right thing, but having this code know about specnodes
1116 * violates the spirit of having it be indepedent of vnode type.)
1118 cvp = common_specvp(vp);
1121 * Or in VISSWAP so file system has chance to deny swap-ons during open.
1123 mutex_enter(&cvp->v_lock);
1124 wasswap = cvp->v_flag & VISSWAP;
1125 cvp->v_flag |= VISSWAP;
1126 mutex_exit(&cvp->v_lock);
1128 mutex_enter(&swap_lock);
1129 if (error = fop_open(&cvp, FREAD|FWRITE, CRED(), NULL)) {
1130 mutex_exit(&swap_lock);
1131 /* restore state of v_flag */
1132 if (!wasswap) {
1133 mutex_enter(&cvp->v_lock);
1134 cvp->v_flag &= ~VISSWAP;
1135 mutex_exit(&cvp->v_lock);
1137 return (error);
1139 mutex_exit(&swap_lock);
1142 * Get partition size. Return error if empty partition,
1143 * or if request does not fit within the partition.
1144 * If this is the first swap device, we can reduce
1145 * the size of the swap area to match what is
1146 * available. This can happen if the system was built
1147 * on a machine with a different size swap partition.
1149 vattr.va_mask = AT_SIZE;
1150 if (error = fop_getattr(cvp, &vattr, ATTR_COMM, CRED(), NULL))
1151 goto out;
1154 * Specfs returns a va_size of MAXOFFSET_T (UNKNOWN_SIZE) when the
1155 * size of the device can't be determined.
1157 if ((vattr.va_size == 0) || (vattr.va_size == MAXOFFSET_T)) {
1158 error = EINVAL;
1159 goto out;
1162 #ifdef _ILP32
1164 * No support for large swap in 32-bit OS, if the size of the swap is
1165 * bigger than MAXOFF32_T then the size used by swapfs must be limited.
1166 * This limitation is imposed by the swap subsystem itself, a D_64BIT
1167 * driver as the target of swap operation should be able to field
1168 * the IO.
1170 if (vattr.va_size > MAXOFF32_T) {
1171 cmn_err(CE_NOTE,
1172 "!swap device %s truncated from 0x%llx to 0x%x bytes",
1173 swapname, vattr.va_size, MAXOFF32_T);
1174 vattr.va_size = MAXOFF32_T;
1176 #endif /* _ILP32 */
1178 /* Fail if file not writeable (try to set size to current size) */
1179 vattr.va_mask = AT_SIZE;
1180 if (error = fop_setattr(cvp, &vattr, 0, CRED(), NULL))
1181 goto out;
1183 /* Fail if fs does not support fop_pageio */
1184 error = fop_pageio(cvp, NULL, 0, 0, 0, CRED(),
1185 NULL);
1187 if (error == ENOSYS)
1188 goto out;
1189 else
1190 error = 0;
1192 * If swapping on the root filesystem don't put swap blocks that
1193 * correspond to the miniroot filesystem on the swap free list.
1195 if (cvp == rootdir)
1196 startblk = roundup(MINIROOTSIZE<<SCTRSHFT, klustsize)>>SCTRSHFT;
1197 else /* Skip 1st page (disk label) */
1198 startblk = (ulong_t)(lowblk ? lowblk : 1);
1200 soff = startblk << SCTRSHFT;
1201 if (soff >= vattr.va_size) {
1202 error = EINVAL;
1203 goto out;
1207 * If user specified 0 blks, use the size of the device
1209 eoff = nblks ? soff + (nblks - (startblk - lowblk) << SCTRSHFT) :
1210 vattr.va_size;
1212 SWAP_PRINT(SW_CTL, "swapadd: va_size %ld soff %ld eoff %ld\n",
1213 vattr.va_size, soff, eoff, 0, 0);
1215 if (eoff > vattr.va_size) {
1216 error = EINVAL;
1217 goto out;
1221 * The starting and ending offsets must be page aligned.
1222 * Round soff up to next page boundary, round eoff
1223 * down to previous page boundary.
1225 soff = ptob(btopr(soff));
1226 eoff = ptob(btop(eoff));
1227 if (soff >= eoff) {
1228 SWAP_PRINT(SW_CTL, "swapadd: soff %ld >= eoff %ld\n",
1229 soff, eoff, 0, 0, 0);
1230 error = EINVAL;
1231 goto out;
1234 pages = btop(eoff - soff);
1236 /* Allocate and partially set up the new swapinfo */
1237 nsip = kmem_zalloc(sizeof (struct swapinfo), KM_SLEEP);
1238 nsip->si_vp = cvp;
1240 nsip->si_soff = soff;
1241 nsip->si_eoff = eoff;
1242 nsip->si_hint = 0;
1243 nsip->si_checkcnt = nsip->si_alloccnt = 0;
1245 nsip->si_pnamelen = (int)strlen(swapname) + 1;
1246 nsip->si_pname = kmem_zalloc(nsip->si_pnamelen, KM_SLEEP);
1247 bcopy(swapname, nsip->si_pname, nsip->si_pnamelen - 1);
1248 SWAP_PRINT(SW_CTL, "swapadd: allocating swapinfo for %s, %ld pages\n",
1249 swapname, pages, 0, 0, 0);
1251 * Size of swapslots map in bytes
1253 nsip->si_mapsize = P2ROUNDUP(pages, NBBW) / NBBY;
1254 nsip->si_swapslots = kmem_zalloc(nsip->si_mapsize, KM_SLEEP);
1257 * Permanently set the bits that can't ever be allocated,
1258 * i.e. those from the ending offset to the round up slot for the
1259 * swapslots bit map.
1261 start = pages;
1262 end = P2ROUNDUP(pages, NBBW);
1263 for (i = start; i < end; i++) {
1264 SWAP_PRINT(SW_CTL, "swapadd: set bit for page %ld\n", i,
1265 0, 0, 0, 0);
1266 SETBIT(nsip->si_swapslots, i);
1268 nsip->si_npgs = nsip->si_nfpgs = pages;
1270 * Now check to see if we can add it. We wait til now to check because
1271 * we need the swapinfo_lock and we don't want sleep with it (e.g.,
1272 * during kmem_alloc()) while we're setting up the swapinfo.
1274 mutex_enter(&swapinfo_lock);
1275 for (sipp = &swapinfo; (esip = *sipp) != NULL; sipp = &esip->si_next) {
1276 if (esip->si_vp == cvp) {
1277 if (esip->si_soff == soff && esip->si_npgs == pages &&
1278 (esip->si_flags & ST_DOINGDEL)) {
1280 * We are adding a device that we are in the
1281 * middle of deleting. Just clear the
1282 * ST_DOINGDEL flag to signal this and
1283 * the deletion routine will eventually notice
1284 * it and add it back.
1286 esip->si_flags &= ~ST_DOINGDEL;
1287 mutex_exit(&swapinfo_lock);
1288 goto out;
1290 /* disallow overlapping swap files */
1291 if ((soff < esip->si_eoff) && (eoff > esip->si_soff)) {
1292 error = EEXIST;
1293 mutex_exit(&swapinfo_lock);
1294 goto out;
1299 nswapfiles++;
1302 * add new swap device to list and shift allocations to it
1303 * before updating the anoninfo counters
1305 *sipp = nsip;
1306 silast = nsip;
1309 * Update the total amount of reservable swap space
1310 * accounting properly for swap space from physical memory
1312 /* New swap device soaks up currently reserved memory swap */
1313 mutex_enter(&anoninfo_lock);
1315 ASSERT(k_anoninfo.ani_mem_resv >= k_anoninfo.ani_locked_swap);
1316 ASSERT(k_anoninfo.ani_max >= k_anoninfo.ani_phys_resv);
1318 k_anoninfo.ani_max += pages;
1319 ANI_ADD(pages);
1320 if (k_anoninfo.ani_mem_resv > k_anoninfo.ani_locked_swap) {
1321 returned_mem = MIN(k_anoninfo.ani_mem_resv -
1322 k_anoninfo.ani_locked_swap,
1323 k_anoninfo.ani_max - k_anoninfo.ani_phys_resv);
1325 ANI_ADD(-returned_mem);
1326 k_anoninfo.ani_free -= returned_mem;
1327 k_anoninfo.ani_mem_resv -= returned_mem;
1328 k_anoninfo.ani_phys_resv += returned_mem;
1330 mutex_enter(&freemem_lock);
1331 availrmem += returned_mem;
1332 mutex_exit(&freemem_lock);
1335 * At boot time, to permit booting small memory machines using
1336 * only physical memory as swap space, we allowed a dangerously
1337 * large amount of memory to be used as swap space; now that
1338 * more physical backing store is available bump down the amount
1339 * we can get from memory to a safer size.
1341 if (swapfs_minfree < swapfs_desfree) {
1342 mutex_enter(&freemem_lock);
1343 if (availrmem > swapfs_desfree || !k_anoninfo.ani_mem_resv)
1344 swapfs_minfree = swapfs_desfree;
1345 mutex_exit(&freemem_lock);
1348 SWAP_PRINT(SW_CTL, "swapadd: ani_max %ld ani_free %ld\n",
1349 k_anoninfo.ani_free, k_anoninfo.ani_free, 0, 0, 0);
1351 mutex_exit(&anoninfo_lock);
1353 mutex_exit(&swapinfo_lock);
1355 /* Initialize the dump device */
1356 mutex_enter(&dump_lock);
1357 if (dumpvp == NULL)
1358 (void) dumpinit(vp, swapname, 0);
1359 mutex_exit(&dump_lock);
1361 VN_HOLD(cvp);
1362 out:
1363 if (error || esip) {
1364 SWAP_PRINT(SW_CTL, "swapadd: error (%d)\n", error, 0, 0, 0, 0);
1366 if (!wasswap) {
1367 mutex_enter(&cvp->v_lock);
1368 cvp->v_flag &= ~VISSWAP;
1369 mutex_exit(&cvp->v_lock);
1371 if (nsip) {
1372 kmem_free(nsip->si_swapslots, (size_t)nsip->si_mapsize);
1373 kmem_free(nsip->si_pname, nsip->si_pnamelen);
1374 kmem_free(nsip, sizeof (*nsip));
1376 mutex_enter(&swap_lock);
1377 (void) fop_close(cvp, FREAD|FWRITE, 1, (offset_t)0, CRED(),
1378 NULL);
1379 mutex_exit(&swap_lock);
1381 return (error);
1385 * Delete a swap file.
1387 static int
1388 swapdel(
1389 struct vnode *vp,
1390 ulong_t lowblk) /* Low block number of area to delete. */
1392 struct swapinfo **sipp, *osip = NULL;
1393 struct vnode *cvp;
1394 uoff_t soff;
1395 int error = 0;
1396 uoff_t toff = 0;
1397 struct vnode *tvp = NULL;
1398 spgcnt_t pages;
1399 struct anon **app, *ap;
1400 kmutex_t *ahm;
1401 pgcnt_t adjust_swap = 0;
1403 /* Find the swap file entry for the file to be deleted */
1404 cvp = common_specvp(vp);
1407 lowblk = lowblk ? lowblk : 1; /* Skip first page (disk label) */
1408 soff = ptob(btopr(lowblk << SCTRSHFT)); /* must be page aligned */
1410 mutex_enter(&swapinfo_lock);
1411 for (sipp = &swapinfo; (osip = *sipp) != NULL; sipp = &osip->si_next) {
1412 if ((osip->si_vp == cvp) &&
1413 (osip->si_soff == soff) && (osip->si_flags == 0))
1414 break;
1417 /* If the file was not found, error. */
1418 if (osip == NULL) {
1419 error = EINVAL;
1420 mutex_exit(&swapinfo_lock);
1421 goto out;
1424 pages = osip->si_npgs;
1427 * Do not delete if we will be low on swap pages.
1429 mutex_enter(&anoninfo_lock);
1431 ASSERT(k_anoninfo.ani_mem_resv >= k_anoninfo.ani_locked_swap);
1432 ASSERT(k_anoninfo.ani_max >= k_anoninfo.ani_phys_resv);
1434 mutex_enter(&freemem_lock);
1435 if (((k_anoninfo.ani_max - k_anoninfo.ani_phys_resv) +
1436 MAX((spgcnt_t)(availrmem - swapfs_minfree), 0)) < pages) {
1437 mutex_exit(&freemem_lock);
1438 mutex_exit(&anoninfo_lock);
1439 error = ENOMEM;
1440 cmn_err(CE_WARN, "swapdel - too few free pages");
1441 mutex_exit(&swapinfo_lock);
1442 goto out;
1444 mutex_exit(&freemem_lock);
1446 k_anoninfo.ani_max -= pages;
1448 /* If needed, reserve memory swap to replace old device */
1449 if (k_anoninfo.ani_phys_resv > k_anoninfo.ani_max) {
1450 adjust_swap = k_anoninfo.ani_phys_resv - k_anoninfo.ani_max;
1451 k_anoninfo.ani_phys_resv -= adjust_swap;
1452 k_anoninfo.ani_mem_resv += adjust_swap;
1453 mutex_enter(&freemem_lock);
1454 availrmem -= adjust_swap;
1455 mutex_exit(&freemem_lock);
1456 ANI_ADD(adjust_swap);
1458 ASSERT(k_anoninfo.ani_mem_resv >= k_anoninfo.ani_locked_swap);
1459 ASSERT(k_anoninfo.ani_max >= k_anoninfo.ani_phys_resv);
1460 mutex_exit(&anoninfo_lock);
1462 ANI_ADD(-pages);
1465 * Set the delete flag. This prevents anyone from allocating more
1466 * pages from this file. Also set ST_DOINGDEL. Someone who wants to
1467 * add the file back while we're deleting it will signify by clearing
1468 * this flag.
1470 osip->si_flags |= ST_INDEL|ST_DOINGDEL;
1471 mutex_exit(&swapinfo_lock);
1474 * Free all the allocated physical slots for this file. We do this
1475 * by walking through the entire anon hash array, because we need
1476 * to update all the anon slots that have physical swap slots on
1477 * this file, and this is the only way to find them all. We go back
1478 * to the beginning of a bucket after each slot is freed because the
1479 * anonhash_lock is not held during the free and thus the hash table
1480 * may change under us.
1482 for (app = anon_hash; app < &anon_hash[ANON_HASH_SIZE]; app++) {
1483 ahm = &anonhash_lock[(app - anon_hash) &
1484 (AH_LOCK_SIZE - 1)].pad_mutex;
1485 mutex_enter(ahm);
1486 top:
1487 for (ap = *app; ap != NULL; ap = ap->an_hash) {
1488 if (ap->an_pvp == cvp &&
1489 ap->an_poff >= osip->si_soff &&
1490 ap->an_poff < osip->si_eoff) {
1491 ASSERT(TESTBIT(osip->si_swapslots,
1492 btop((size_t)(ap->an_poff -
1493 osip->si_soff))));
1494 tvp = ap->an_vp;
1495 toff = ap->an_off;
1496 VN_HOLD(tvp);
1497 mutex_exit(ahm);
1499 error = swapslot_free(tvp, toff, osip);
1501 VN_RELE(tvp);
1502 mutex_enter(ahm);
1503 if (!error && (osip->si_flags & ST_DOINGDEL)) {
1504 goto top;
1505 } else {
1506 if (error) {
1507 cmn_err(CE_WARN,
1508 "swapslot_free failed %d",
1509 error);
1513 * Add device back before making it
1514 * visible.
1516 mutex_enter(&swapinfo_lock);
1517 osip->si_flags &=
1518 ~(ST_INDEL | ST_DOINGDEL);
1519 mutex_exit(&swapinfo_lock);
1522 * Update the anon space available
1524 mutex_enter(&anoninfo_lock);
1526 k_anoninfo.ani_phys_resv += adjust_swap;
1527 k_anoninfo.ani_mem_resv -= adjust_swap;
1528 k_anoninfo.ani_max += pages;
1530 mutex_enter(&freemem_lock);
1531 availrmem += adjust_swap;
1532 mutex_exit(&freemem_lock);
1534 mutex_exit(&anoninfo_lock);
1536 ANI_ADD(pages);
1538 mutex_exit(ahm);
1539 goto out;
1543 mutex_exit(ahm);
1546 /* All done, they'd better all be free! */
1547 mutex_enter(&swapinfo_lock);
1548 ASSERT(osip->si_nfpgs == osip->si_npgs);
1550 /* Now remove it from the swapinfo list */
1551 for (sipp = &swapinfo; *sipp != NULL; sipp = &(*sipp)->si_next) {
1552 if (*sipp == osip)
1553 break;
1555 ASSERT(*sipp);
1556 *sipp = osip->si_next;
1557 if (silast == osip)
1558 if ((silast = osip->si_next) == NULL)
1559 silast = swapinfo;
1560 nswapfiles--;
1561 mutex_exit(&swapinfo_lock);
1563 kmem_free(osip->si_swapslots, osip->si_mapsize);
1564 kmem_free(osip->si_pname, osip->si_pnamelen);
1565 kmem_free(osip, sizeof (*osip));
1567 mutex_enter(&dump_lock);
1568 if (cvp == dumpvp)
1569 dumpfini();
1570 mutex_exit(&dump_lock);
1572 /* Release the vnode */
1574 mutex_enter(&swap_lock);
1575 (void) fop_close(cvp, FREAD|FWRITE, 1, (offset_t)0, CRED(), NULL);
1576 mutex_enter(&cvp->v_lock);
1577 cvp->v_flag &= ~VISSWAP;
1578 mutex_exit(&cvp->v_lock);
1579 VN_RELE(cvp);
1580 mutex_exit(&swap_lock);
1581 out:
1582 return (error);
1586 * Free up a physical swap slot on swapinfo sip, currently in use by the
1587 * anonymous page whose name is (vp, off).
1589 static int
1590 swapslot_free(
1591 struct vnode *vp,
1592 uoff_t off,
1593 struct swapinfo *sip)
1595 struct page *pp = NULL;
1596 struct anon *ap = NULL;
1597 int error = 0;
1598 kmutex_t *ahm;
1599 struct vnode *pvp = NULL;
1600 uoff_t poff;
1601 int alloc_pg = 0;
1603 ASSERT(sip->si_vp != NULL);
1605 * Get the page for the old swap slot if exists or create a new one.
1607 again:
1608 if ((pp = page_lookup(vp, off, SE_SHARED)) == NULL) {
1609 pp = page_create_va(vp, off, PAGESIZE, PG_WAIT | PG_EXCL,
1610 segkmap, NULL);
1611 if (pp == NULL)
1612 goto again;
1613 alloc_pg = 1;
1615 error = swap_getphysname(vp, off, &pvp, &poff);
1616 if (error || pvp != sip->si_vp || poff < sip->si_soff ||
1617 poff >= sip->si_eoff) {
1618 page_io_unlock(pp);
1619 VN_DISPOSE(pp, B_INVAL, 0, kcred);
1620 return (0);
1623 error = fop_pageio(pvp, pp, poff, PAGESIZE, B_READ,
1624 CRED(), NULL);
1625 if (error) {
1626 page_io_unlock(pp);
1627 if (error == EFAULT)
1628 error = 0;
1630 VN_DISPOSE(pp, B_INVAL, 0, kcred);
1631 return (error);
1636 * The anon could have been removed by anon_decref* and/or reallocated
1637 * by anon layer (an_pvp == NULL) with the same vp, off.
1638 * In this case the page which has been allocated needs to
1639 * be freed.
1641 if (!alloc_pg)
1642 page_io_lock(pp);
1643 ahm = AH_MUTEX(vp, off);
1644 mutex_enter(ahm);
1645 ap = swap_anon(vp, off);
1646 if ((ap == NULL || ap->an_pvp == NULL) && alloc_pg) {
1647 mutex_exit(ahm);
1648 page_io_unlock(pp);
1649 VN_DISPOSE(pp, B_INVAL, 0, kcred);
1650 return (0);
1654 * Free the physical slot. It may have been freed up and replaced with
1655 * another one while we were getting the page so we have to re-verify
1656 * that this is really one we want. If we do free the slot we have
1657 * to mark the page modified, as its backing store is now gone.
1659 if ((ap != NULL) && (ap->an_pvp == sip->si_vp && ap->an_poff >=
1660 sip->si_soff && ap->an_poff < sip->si_eoff)) {
1661 swap_phys_free(ap->an_pvp, ap->an_poff, PAGESIZE);
1662 ap->an_pvp = NULL;
1663 ap->an_poff = 0;
1664 mutex_exit(ahm);
1665 hat_setmod(pp);
1666 } else {
1667 mutex_exit(ahm);
1669 page_io_unlock(pp);
1670 page_unlock(pp);
1671 return (0);
1676 * Get contig physical backing store for vp, in the range
1677 * [*offp, *offp + *lenp), May back a subrange of this, but must
1678 * always include the requested offset or fail. Returns the offsets
1679 * backed as [*offp, *offp + *lenp) and the physical offsets used to
1680 * back them from *pvpp in the range [*pstartp, *pstartp + *lenp).
1681 * Returns 0 for success
1682 * SE_NOANON -- no anon slot for requested paged
1683 * SE_NOSWAP -- no physical swap space available
1686 swap_newphysname(
1687 struct vnode *vp,
1688 uoff_t offset,
1689 uoff_t *offp,
1690 size_t *lenp,
1691 struct vnode **pvpp,
1692 uoff_t *poffp)
1694 struct anon *ap = NULL; /* anon slot for vp, off */
1695 int error = 0;
1696 struct vnode *pvp;
1697 uoff_t poff, pstart, prem;
1698 size_t plen;
1699 uoff_t off, start;
1700 kmutex_t *ahm;
1702 ASSERT(*offp <= offset && offset < *offp + *lenp);
1704 /* Get new physical swap slots. */
1705 plen = *lenp;
1706 if (!swap_phys_alloc(&pvp, &pstart, &plen, 0)) {
1708 * No swap available so return error unless requested
1709 * offset is already backed in which case return that.
1711 ahm = AH_MUTEX(vp, offset);
1712 mutex_enter(ahm);
1713 if ((ap = swap_anon(vp, offset)) == NULL) {
1714 error = SE_NOANON;
1715 mutex_exit(ahm);
1716 return (error);
1718 error = (ap->an_pvp ? 0 : SE_NOSWAP);
1719 *offp = offset;
1720 *lenp = PAGESIZE;
1721 *pvpp = ap->an_pvp;
1722 *poffp = ap->an_poff;
1723 mutex_exit(ahm);
1724 return (error);
1728 * We got plen (<= *lenp) contig slots. Use these to back a
1729 * subrange of [*offp, *offp + *lenp) which includes offset.
1730 * For now we just put offset at the end of the kluster.
1731 * Clearly there are other possible choices - which is best?
1733 start = MAX(*offp,
1734 (offset + PAGESIZE > plen) ? (offset + PAGESIZE - plen) : 0);
1735 ASSERT(start + plen <= *offp + *lenp);
1737 for (off = start, poff = pstart; poff < pstart + plen;
1738 off += PAGESIZE, poff += PAGESIZE) {
1739 ahm = AH_MUTEX(vp, off);
1740 mutex_enter(ahm);
1741 if ((ap = swap_anon(vp, off)) != NULL) {
1742 /* Free old slot if any, and assign new one */
1743 if (ap->an_pvp)
1744 swap_phys_free(ap->an_pvp, ap->an_poff,
1745 PAGESIZE);
1746 ap->an_pvp = pvp;
1747 ap->an_poff = poff;
1748 } else { /* No anon slot for a klustered page, quit. */
1749 prem = (pstart + plen) - poff;
1750 /* Already did requested page, do partial kluster */
1751 if (off > offset) {
1752 plen = poff - pstart;
1753 error = 0;
1754 /* Fail on requested page, error */
1755 } else if (off == offset) {
1756 error = SE_NOANON;
1757 /* Fail on prior page, fail on requested page, error */
1758 } else if ((ap = swap_anon(vp, offset)) == NULL) {
1759 error = SE_NOANON;
1760 /* Fail on prior page, got requested page, do only it */
1761 } else {
1762 /* Free old slot if any, and assign new one */
1763 if (ap->an_pvp)
1764 swap_phys_free(ap->an_pvp, ap->an_poff,
1765 PAGESIZE);
1766 ap->an_pvp = pvp;
1767 ap->an_poff = poff;
1768 /* One page kluster */
1769 start = offset;
1770 plen = PAGESIZE;
1771 pstart = poff;
1772 poff += PAGESIZE;
1773 prem -= PAGESIZE;
1775 /* Free unassigned slots */
1776 swap_phys_free(pvp, poff, prem);
1777 mutex_exit(ahm);
1778 break;
1780 mutex_exit(ahm);
1782 ASSERT(*offp <= start && start + plen <= *offp + *lenp);
1783 ASSERT(start <= offset && offset < start + plen);
1784 *offp = start;
1785 *lenp = plen;
1786 *pvpp = pvp;
1787 *poffp = pstart;
1788 return (error);
1793 * Get the physical swap backing store location for a given anonymous page
1794 * named (vp, off). The backing store name is returned in (*pvpp, *poffp).
1795 * Returns 0 success
1796 * EIDRM -- no anon slot (page is not allocated)
1799 swap_getphysname(
1800 struct vnode *vp,
1801 uoff_t off,
1802 struct vnode **pvpp,
1803 uoff_t *poffp)
1805 struct anon *ap;
1806 int error = 0;
1807 kmutex_t *ahm;
1809 ahm = AH_MUTEX(vp, off);
1810 mutex_enter(ahm);
1812 /* Get anon slot for vp, off */
1813 ap = swap_anon(vp, off);
1814 if (ap == NULL) {
1815 error = EIDRM;
1816 goto out;
1818 *pvpp = ap->an_pvp;
1819 *poffp = ap->an_poff;
1820 out:
1821 mutex_exit(ahm);
1822 return (error);