inet6: require RTF_ANNOUNCE to proxy NS
[dragonfly.git] / lib / libkvm / kvm_getswapinfo.c
blob796a95214f51056ff2ef57de1aeeb53ba0af5397
1 /*
2 * Copyright (c) 1999 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
34 * $FreeBSD: src/lib/libkvm/kvm_getswapinfo.c,v 1.10.2.4 2003/01/12 09:23:13 dillon Exp $
37 #define _KERNEL_STRUCTURES
39 #include <sys/param.h>
40 #include <sys/time.h>
41 #include <sys/ucred.h>
42 #include <sys/stat.h>
43 #include <sys/conf.h>
44 #include <sys/blist.h>
45 #include <sys/sysctl.h>
46 #include <vm/vm_param.h>
48 #include <err.h>
49 #include <fcntl.h>
50 #include <nlist.h>
51 #include <paths.h>
52 #include <stdio.h>
53 #include <stdlib.h>
54 #include <string.h>
55 #include <unistd.h>
56 #include <limits.h>
58 #include "kvm.h"
59 #include "kvm_private.h"
61 static struct nlist kvm_swap_nl[] = {
62 { .n_name = "_swapblist" }, /* new radix swap list */
63 { .n_name = "_swdevt" }, /* list of swap devices and sizes */
64 { .n_name = "_nswdev" }, /* number of swap devices */
65 { .n_name = "_dmmax" }, /* maximum size of a swap block */
66 { .n_name = "" }
69 #define NL_SWAPBLIST 0
70 #define NL_SWDEVT 1
71 #define NL_NSWDEV 2
72 #define NL_DMMAX 3
74 static int kvm_swap_nl_cached = 0;
75 static int nswdev;
76 static int unswdev;
77 static int dmmax;
79 static int nlist_init(kvm_t *kd);
80 static void dump_blist(kvm_t *kd);
81 static int kvm_getswapinfo_sysctl(kvm_t *kd, struct kvm_swap *swap_ary,
82 int swap_max, int flags);
84 #define SVAR(var) __STRING(var) /* to force expansion */
85 #define KGET(idx, var) \
86 KGET1(idx, &var, sizeof(var), SVAR(var))
87 #define KGET1(idx, p, s, msg) \
88 KGET2(kvm_swap_nl[idx].n_value, p, s, msg)
89 #define KGET2(addr, p, s, msg) \
90 if (kvm_read(kd, (u_long)(addr), p, s) != s) \
91 warnx("cannot read %s: %s", msg, kvm_geterr(kd))
92 #define KGETN(idx, var) \
93 KGET1N(idx, &var, sizeof(var), SVAR(var))
94 #define KGET1N(idx, p, s, msg) \
95 KGET2N(kvm_swap_nl[idx].n_value, p, s, msg)
96 #define KGET2N(addr, p, s, msg) \
97 ((kvm_read(kd, (u_long)(addr), p, s) == s) ? 1 : 0)
98 #define KGETRET(addr, p, s, msg) \
99 if (kvm_read(kd, (u_long)(addr), p, s) != s) { \
100 warnx("cannot read %s: %s", msg, kvm_geterr(kd)); \
101 return (0); \
104 #define GETSWDEVNAME(dev, str, flags) \
105 if (dev == NODEV) { \
106 strlcpy(str, "[NFS swap]", sizeof(str)); \
107 } else { \
108 snprintf( \
109 str, sizeof(str), "%s%s", \
110 ((flags & SWIF_DEV_PREFIX) ? _PATH_DEV : ""), \
111 devname(dev, S_IFCHR) \
112 ); \
116 kvm_getswapinfo(
117 kvm_t *kd,
118 struct kvm_swap *swap_ary,
119 int swap_max,
120 int flags
122 int i, ti, swi;
123 swblk_t ttl;
124 struct swdevt *sw;
125 struct swdevt swinfo;
128 * clear cache
130 if (kd == NULL) {
131 kvm_swap_nl_cached = 0;
132 return(0);
135 if (swap_max < 1)
136 return (-1);
139 * Use sysctl if possible
141 if (kvm_ishost(kd) && (flags & SWIF_DUMP_TREE) == 0) {
142 ti = kvm_getswapinfo_sysctl(kd, swap_ary, swap_max, flags);
143 if (ti >= 0)
144 return(ti);
148 * namelist
150 if (!nlist_init(kd))
151 return (-1);
153 swi = unswdev;
154 if (swi >= swap_max)
155 swi = swap_max - 1;
157 bzero(swap_ary, sizeof(struct kvm_swap) * (swi + 1));
159 KGET(NL_SWDEVT, sw);
160 for (i = ti = 0; i < nswdev; ++i) {
161 KGET2(&sw[i], &swinfo, sizeof(swinfo), "swinfo");
163 if (swinfo.sw_nblks == 0)
164 continue;
167 * The first dmmax is never allocated to avoid
168 * trashing the disklabels.
170 ttl = swinfo.sw_nblks - dmmax;
171 if (ttl == 0)
172 continue;
174 swap_ary[swi].ksw_total += ttl;
175 swap_ary[swi].ksw_used += swinfo.sw_nused;
177 if (ti < swi) {
178 swap_ary[ti].ksw_total = ttl;
179 swap_ary[ti].ksw_used = swinfo.sw_nused;
180 swap_ary[ti].ksw_flags = swinfo.sw_flags;
181 GETSWDEVNAME(swinfo.sw_dev, swap_ary[ti].ksw_devname,
182 flags);
183 ++ti;
187 if (flags & SWIF_DUMP_TREE)
188 dump_blist(kd);
189 return (swi);
192 static int
193 nlist_init(kvm_t *kd)
195 int i;
196 struct swdevt *sw;
197 struct swdevt swinfo;
199 if (kvm_swap_nl_cached)
200 return (1);
202 if (kvm_nlist(kd, kvm_swap_nl) < 0)
203 return (0);
206 * required entries
208 if (kvm_swap_nl[NL_SWDEVT].n_value == 0 ||
209 kvm_swap_nl[NL_NSWDEV].n_value == 0 ||
210 kvm_swap_nl[NL_DMMAX].n_value == 0 ||
211 kvm_swap_nl[NL_SWAPBLIST].n_type == 0) {
212 return (0);
216 * get globals, type of swap
218 KGET(NL_NSWDEV, nswdev);
219 KGET(NL_DMMAX, dmmax);
222 * figure out how many actual swap devices are enabled
224 KGET(NL_SWDEVT, sw);
225 for (i = unswdev = 0; i < nswdev; ++i) {
226 KGET2(&sw[i], &swinfo, sizeof(swinfo), "swinfo");
227 if (swinfo.sw_nblks)
228 ++unswdev;
232 kvm_swap_nl_cached = 1;
233 return (1);
237 * scanradix() - support routine for radix scanner
240 #define TABME tab, tab, ""
242 static int
243 scanradix(
244 blmeta_t *scan,
245 blmeta_t *scan_cache,
246 swblk_t blk,
247 int64_t radix,
248 swblk_t skip,
249 swblk_t count,
250 kvm_t *kd,
251 int dmmaxr,
252 int nswdevr,
253 int64_t *availp,
254 int tab
256 blmeta_t meta;
257 blmeta_t scan_array[BLIST_BMAP_RADIX];
258 int64_t avail_tmp = 0;
259 unsigned int iu;
260 int im;
261 int next_skip;
263 if (scan_cache) {
264 meta = *scan_cache;
265 } else if (skip == BLIST_META_RADIX) {
266 if (kvm_read(kd, (u_long)scan, scan_array, sizeof(scan_array)) != sizeof(scan_array)) {
267 warnx("cannot read %s: %s", "blmeta_t", kvm_geterr(kd));
268 bzero(scan_array, sizeof(scan_array));
270 meta = scan_array[0];
271 } else {
272 KGET2(scan, &meta, sizeof(meta), "blmeta_t");
276 * Terminator
278 if (meta.bm_bighint == (swblk_t)-1) {
279 printf("%*.*s(0x%06jx,%jd) Terminator\n",
280 TABME,
281 (intmax_t)blk,
282 (intmax_t)radix
284 return(-1);
287 if (radix == BLIST_BMAP_RADIX) {
289 * Leaf bitmap
291 printf("%*.*s(0x%06jx,%jd) Bitmap %016jx big=%jd\n",
292 TABME,
293 (intmax_t)blk,
294 (intmax_t)radix,
295 (intmax_t)meta.u.bmu_bitmap,
296 (intmax_t)meta.bm_bighint
299 if (meta.u.bmu_bitmap) {
300 for (iu = 0; iu < BLIST_BMAP_RADIX; ++iu) {
301 if (meta.u.bmu_bitmap & (1LU << iu))
302 ++*availp;
305 } else if (meta.u.bmu_avail == radix) {
307 * Meta node if all free
309 printf("%*.*s(0x%06jx,%jd) Submap ALL-FREE (big=%jd) {\n",
310 TABME,
311 (intmax_t)blk,
312 (intmax_t)radix,
313 (intmax_t)meta.bm_bighint
315 *availp += radix;
316 } else if (meta.u.bmu_avail == 0) {
318 * Meta node if all used
320 printf("%*.*s(0x%06jx,%jd) Submap ALL-ALLOCATED (big=%jd)\n",
321 TABME,
322 (intmax_t)blk,
323 (intmax_t)radix,
324 (intmax_t)meta.bm_bighint
326 } else {
328 * Meta node if not all free
330 printf("%*.*s(0x%06jx,%jd) Submap avail=%jd big=%jd {\n",
331 TABME,
332 (intmax_t)blk,
333 (intmax_t)radix,
334 (intmax_t)meta.u.bmu_avail,
335 (intmax_t)meta.bm_bighint
338 radix /= BLIST_META_RADIX;
339 next_skip = skip / BLIST_META_RADIX;
341 for (im = 1; im <= skip; im += next_skip) {
342 int r;
343 swblk_t vcount = (count > radix) ?
344 (swblk_t)radix : count;
346 r = scanradix(
347 &scan[im],
348 ((next_skip == 1) ? &scan_array[im] : NULL),
349 blk,
350 radix,
351 next_skip - 1,
352 vcount,
354 dmmaxr,
355 nswdevr,
356 &avail_tmp,
357 tab + 4
359 if (r < 0)
360 break;
361 blk += (swblk_t)radix;
363 *availp += avail_tmp;
364 if (avail_tmp == meta.u.bmu_avail)
365 printf("%*.*s}\n", TABME);
366 else
367 printf("%*.*s} (AVAIL MISMATCH %jd/%jd\n",
368 TABME,
369 (intmax_t)avail_tmp,
370 (intmax_t)meta.u.bmu_avail);
372 return(0);
375 static void
376 dump_blist(kvm_t *kd)
378 struct blist *swapblist = NULL;
379 struct blist blcopy = { 0 };
380 int64_t avail = 0;
382 KGET(NL_SWAPBLIST, swapblist);
384 if (swapblist == NULL) {
385 printf("radix tree: NULL - no swap in system\n");
386 return;
389 KGET2(swapblist, &blcopy, sizeof(blcopy), "*swapblist");
391 printf("radix tree: %jd/%jd/%jd blocks, %jdK wired\n",
392 (intmax_t)blcopy.bl_free,
393 (intmax_t)blcopy.bl_blocks,
394 (intmax_t)blcopy.bl_radix,
395 (intmax_t)((blcopy.bl_rootblks * sizeof(blmeta_t) + 1023)/
396 1024)
399 scanradix(
400 blcopy.bl_root,
401 NULL,
403 blcopy.bl_radix,
404 blcopy.bl_skip,
405 blcopy.bl_rootblks,
407 dmmax,
408 nswdev,
409 &avail,
412 printf("final availability: %jd\n", (intmax_t)avail);
415 static
417 kvm_getswapinfo_sysctl(kvm_t *kd __unused, struct kvm_swap *swap_ary,
418 int swap_max, int flags)
420 size_t bytes = 0;
421 size_t ksize;
422 int ti;
423 int swi;
424 int n;
425 int i;
426 char *xswbuf;
427 struct xswdev *xsw;
429 if (sysctlbyname("vm.swap_info_array", NULL, &bytes, NULL, 0) < 0)
430 return(-1);
431 if (bytes == 0)
432 return(-1);
434 xswbuf = malloc(bytes);
435 if (sysctlbyname("vm.swap_info_array", xswbuf, &bytes, NULL, 0) < 0) {
436 free(xswbuf);
437 return(-1);
439 if (bytes == 0) {
440 free(xswbuf);
441 return(-1);
445 * Calculate size of xsw entry returned by kernel (it can be larger
446 * than the one we have if there is a version mismatch).
448 ksize = ((struct xswdev *)xswbuf)->xsw_size;
449 n = (int)(bytes / ksize);
452 * Calculate the number of live swap devices and calculate
453 * the swap_ary[] index used for the cumulative result (swi)
455 for (i = swi = 0; i < n; ++i) {
456 xsw = (void *)((char *)xswbuf + i * ksize);
457 if ((xsw->xsw_flags & SW_FREED) == 0)
458 continue;
459 ++swi;
461 if (swi >= swap_max)
462 swi = swap_max - 1;
464 bzero(swap_ary, sizeof(struct kvm_swap) * (swi + 1));
467 * Accumulate results. If the provided swap_ary[] is too
468 * small will only populate up to the available entries,
469 * but we always populate the cumulative results entry.
471 for (i = ti = 0; i < n; ++i) {
472 xsw = (void *)((char *)xswbuf + i * ksize);
474 if ((xsw->xsw_flags & SW_FREED) == 0)
475 continue;
477 swap_ary[swi].ksw_total += xsw->xsw_nblks;
478 swap_ary[swi].ksw_used += xsw->xsw_used;
480 if (ti < swi) {
481 swap_ary[ti].ksw_total = xsw->xsw_nblks;
482 swap_ary[ti].ksw_used = xsw->xsw_used;
483 swap_ary[ti].ksw_flags = xsw->xsw_flags;
484 GETSWDEVNAME(xsw->xsw_dev, swap_ary[ti].ksw_devname,
485 flags);
486 ++ti;
490 free(xswbuf);
491 return(swi);