usched: Allow process to change self cpu affinity
[dragonfly.git] / lib / libkvm / kvm_getswapinfo.c
blob0b47c765e06b7c2fe0276cc3324f0974b79d3a59
1 /*
2 * Copyright (c) 1999 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
34 * $FreeBSD: src/lib/libkvm/kvm_getswapinfo.c,v 1.10.2.4 2003/01/12 09:23:13 dillon Exp $
35 * $DragonFly: src/lib/libkvm/kvm_getswapinfo.c,v 1.5 2006/03/18 17:15:35 dillon Exp $
38 #define _KERNEL_STRUCTURES
40 #include <sys/param.h>
41 #include <sys/time.h>
42 #include <sys/ucred.h>
43 #include <sys/stat.h>
44 #include <sys/conf.h>
45 #include <sys/blist.h>
46 #include <sys/sysctl.h>
47 #include <vm/vm_param.h>
49 #include <err.h>
50 #include <fcntl.h>
51 #include <kvm.h>
52 #include <nlist.h>
53 #include <paths.h>
54 #include <stdio.h>
55 #include <stdlib.h>
56 #include <string.h>
57 #include <unistd.h>
58 #include <limits.h>
60 #include "kvm_private.h"
62 static struct nlist kvm_swap_nl[] = {
63 { "_swapblist" }, /* new radix swap list */
64 { "_swdevt" }, /* list of swap devices and sizes */
65 { "_nswdev" }, /* number of swap devices */
66 { "_dmmax" }, /* maximum size of a swap block */
67 { "" }
70 #define NL_SWAPBLIST 0
71 #define NL_SWDEVT 1
72 #define NL_NSWDEV 2
73 #define NL_DMMAX 3
75 static int kvm_swap_nl_cached = 0;
76 static int nswdev;
77 static int unswdev;
78 static int dmmax;
80 static int nlist_init(kvm_t *kd);
81 static void dump_blist(kvm_t *kd);
82 static int kvm_getswapinfo_sysctl(kvm_t *kd, struct kvm_swap *swap_ary,
83 int swap_max, int flags);
85 #define SVAR(var) __STRING(var) /* to force expansion */
86 #define KGET(idx, var) \
87 KGET1(idx, &var, sizeof(var), SVAR(var))
88 #define KGET1(idx, p, s, msg) \
89 KGET2(kvm_swap_nl[idx].n_value, p, s, msg)
90 #define KGET2(addr, p, s, msg) \
91 if (kvm_read(kd, (u_long)(addr), p, s) != s) \
92 warnx("cannot read %s: %s", msg, kvm_geterr(kd))
93 #define KGETN(idx, var) \
94 KGET1N(idx, &var, sizeof(var), SVAR(var))
95 #define KGET1N(idx, p, s, msg) \
96 KGET2N(kvm_swap_nl[idx].n_value, p, s, msg)
97 #define KGET2N(addr, p, s, msg) \
98 ((kvm_read(kd, (u_long)(addr), p, s) == s) ? 1 : 0)
99 #define KGETRET(addr, p, s, msg) \
100 if (kvm_read(kd, (u_long)(addr), p, s) != s) { \
101 warnx("cannot read %s: %s", msg, kvm_geterr(kd)); \
102 return (0); \
105 #define GETSWDEVNAME(dev, str, flags) \
106 if (dev == NODEV) { \
107 strlcpy(str, "[NFS swap]", sizeof(str)); \
108 } else { \
109 snprintf( \
110 str, sizeof(str), "%s%s", \
111 ((flags & SWIF_DEV_PREFIX) ? _PATH_DEV : ""), \
112 devname(dev, S_IFCHR) \
113 ); \
117 kvm_getswapinfo(
118 kvm_t *kd,
119 struct kvm_swap *swap_ary,
120 int swap_max,
121 int flags
123 int i, ti, swi;
124 swblk_t ttl;
125 struct swdevt *sw;
126 struct swdevt swinfo;
129 * clear cache
131 if (kd == NULL) {
132 kvm_swap_nl_cached = 0;
133 return(0);
136 if (swap_max < 1)
137 return (-1);
140 * Use sysctl if possible
142 if (kvm_ishost(kd) && (flags & SWIF_DUMP_TREE) == 0) {
143 ti = kvm_getswapinfo_sysctl(kd, swap_ary, swap_max, flags);
144 if (ti >= 0)
145 return(ti);
149 * namelist
151 if (!nlist_init(kd))
152 return (-1);
154 swi = unswdev;
155 if (swi >= swap_max)
156 swi = swap_max - 1;
158 bzero(swap_ary, sizeof(struct kvm_swap) * (swi + 1));
160 KGET(NL_SWDEVT, sw);
161 for (i = ti = 0; i < nswdev; ++i) {
162 KGET2(&sw[i], &swinfo, sizeof(swinfo), "swinfo");
164 if (swinfo.sw_nblks == 0)
165 continue;
168 * The first dmmax is never allocated to avoid
169 * trashing the disklabels.
171 ttl = swinfo.sw_nblks - dmmax;
172 if (ttl == 0)
173 continue;
175 swap_ary[swi].ksw_total += ttl;
176 swap_ary[swi].ksw_used += swinfo.sw_nused;
178 if (ti < swi) {
179 swap_ary[ti].ksw_total = ttl;
180 swap_ary[ti].ksw_used = swinfo.sw_nused;
181 swap_ary[ti].ksw_flags = swinfo.sw_flags;
182 GETSWDEVNAME(swinfo.sw_dev, swap_ary[ti].ksw_devname,
183 flags);
184 ++ti;
188 if (flags & SWIF_DUMP_TREE)
189 dump_blist(kd);
190 return (swi);
193 static int
194 nlist_init(kvm_t *kd)
196 int i;
197 struct swdevt *sw;
198 struct swdevt swinfo;
200 if (kvm_swap_nl_cached)
201 return (1);
203 if (kvm_nlist(kd, kvm_swap_nl) < 0)
204 return (0);
207 * required entries
209 if (kvm_swap_nl[NL_SWDEVT].n_value == 0 ||
210 kvm_swap_nl[NL_NSWDEV].n_value == 0 ||
211 kvm_swap_nl[NL_DMMAX].n_value == 0 ||
212 kvm_swap_nl[NL_SWAPBLIST].n_type == 0) {
213 return (0);
217 * get globals, type of swap
219 KGET(NL_NSWDEV, nswdev);
220 KGET(NL_DMMAX, dmmax);
223 * figure out how many actual swap devices are enabled
225 KGET(NL_SWDEVT, sw);
226 for (i = unswdev = 0; i < nswdev; ++i) {
227 KGET2(&sw[i], &swinfo, sizeof(swinfo), "swinfo");
228 if (swinfo.sw_nblks)
229 ++unswdev;
233 kvm_swap_nl_cached = 1;
234 return (1);
238 * scanradix() - support routine for radix scanner
241 #define TABME tab, tab, ""
243 static int
244 scanradix(
245 blmeta_t *scan,
246 blmeta_t *scan_cache,
247 swblk_t blk,
248 int64_t radix,
249 swblk_t skip,
250 swblk_t count,
251 kvm_t *kd,
252 int dmmax,
253 int nswdev,
254 int64_t *availp,
255 int tab
257 blmeta_t meta;
258 blmeta_t scan_array[BLIST_BMAP_RADIX];
259 int i;
261 if (scan_cache) {
262 meta = *scan_cache;
263 } else if (skip == BLIST_META_RADIX) {
264 if (kvm_read(kd, (u_long)scan, scan_array, sizeof(scan_array)) != sizeof(scan_array)) {
265 warnx("cannot read %s: %s", "blmeta_t", kvm_geterr(kd));
266 bzero(scan_array, sizeof(scan_array));
268 meta = scan_array[0];
269 } else {
270 KGET2(scan, &meta, sizeof(meta), "blmeta_t");
274 * Terminator
276 if (meta.bm_bighint == (swblk_t)-1) {
277 printf("%*.*s(0x%06jx,%jd) Terminator\n",
278 TABME,
279 (intmax_t)blk,
280 (intmax_t)radix
282 return(-1);
285 if (radix == BLIST_BMAP_RADIX) {
287 * Leaf bitmap
289 printf("%*.*s(0x%06jx,%jd) Bitmap %016jx big=%jd\n",
290 TABME,
291 (intmax_t)blk,
292 (intmax_t)radix,
293 (intmax_t)meta.u.bmu_bitmap,
294 (intmax_t)meta.bm_bighint
297 if (meta.u.bmu_bitmap) {
298 for (i = 0; i < BLIST_BMAP_RADIX; ++i) {
299 if (meta.u.bmu_bitmap & (1 << i))
300 ++*availp;
303 } else if (meta.u.bmu_avail == radix) {
305 * Meta node if all free
307 printf("%*.*s(0x%06jx,%jd) Submap ALL-FREE (big=%jd) {\n",
308 TABME,
309 (intmax_t)blk,
310 (intmax_t)radix,
311 (intmax_t)meta.bm_bighint
313 *availp += radix;
314 } else if (meta.u.bmu_avail == 0) {
316 * Meta node if all used
318 printf("%*.*s(0x%06jx,%jd) Submap ALL-ALLOCATED (big=%jd)\n",
319 TABME,
320 (intmax_t)blk,
321 (intmax_t)radix,
322 (intmax_t)meta.bm_bighint
324 } else {
326 * Meta node if not all free
328 int i;
329 int next_skip;
330 int64_t avail_tmp = 0;
332 printf("%*.*s(0x%06jx,%jd) Submap avail=%jd big=%jd {\n",
333 TABME,
334 (intmax_t)blk,
335 (intmax_t)radix,
336 (intmax_t)meta.u.bmu_avail,
337 (intmax_t)meta.bm_bighint
340 radix /= BLIST_META_RADIX;
341 next_skip = skip / BLIST_META_RADIX;
343 for (i = 1; i <= skip; i += next_skip) {
344 int r;
345 swblk_t vcount = (count > radix) ?
346 (swblk_t)radix : count;
348 r = scanradix(
349 &scan[i],
350 ((next_skip == 1) ? &scan_array[i] : NULL),
351 blk,
352 radix,
353 next_skip - 1,
354 vcount,
356 dmmax,
357 nswdev,
358 &avail_tmp,
359 tab + 4
361 if (r < 0)
362 break;
363 blk += (swblk_t)radix;
365 *availp += avail_tmp;
366 if (avail_tmp == meta.u.bmu_avail)
367 printf("%*.*s}\n", TABME);
368 else
369 printf("%*.*s} (AVAIL MISMATCH %jd/%jd\n",
370 TABME,
371 (intmax_t)avail_tmp,
372 (intmax_t)meta.u.bmu_avail);
374 return(0);
377 static void
378 dump_blist(kvm_t *kd)
380 struct blist *swapblist = NULL;
381 struct blist blcopy = { 0 };
382 int64_t avail = 0;
384 KGET(NL_SWAPBLIST, swapblist);
386 if (swapblist == NULL) {
387 printf("radix tree: NULL - no swap in system\n");
388 return;
391 KGET2(swapblist, &blcopy, sizeof(blcopy), "*swapblist");
393 printf("radix tree: %jd/%jd/%jd blocks, %jdK wired\n",
394 (intmax_t)blcopy.bl_free,
395 (intmax_t)blcopy.bl_blocks,
396 (intmax_t)blcopy.bl_radix,
397 (intmax_t)((blcopy.bl_rootblks * sizeof(blmeta_t) + 1023)/
398 1024)
401 scanradix(
402 blcopy.bl_root,
403 NULL,
405 blcopy.bl_radix,
406 blcopy.bl_skip,
407 blcopy.bl_rootblks,
409 dmmax,
410 nswdev,
411 &avail,
414 printf("final availability: %jd\n", (intmax_t)avail);
417 static
419 kvm_getswapinfo_sysctl(kvm_t *kd, struct kvm_swap *swap_ary,
420 int swap_max, int flags)
422 size_t bytes = 0;
423 size_t ksize;
424 int ti;
425 int swi;
426 int n;
427 int i;
428 char *xswbuf;
429 struct xswdev *xsw;
431 if (sysctlbyname("vm.swap_info_array", NULL, &bytes, NULL, 0) < 0)
432 return(-1);
433 if (bytes == 0)
434 return(-1);
436 xswbuf = malloc(bytes);
437 if (sysctlbyname("vm.swap_info_array", xswbuf, &bytes, NULL, 0) < 0) {
438 free(xswbuf);
439 return(-1);
441 if (bytes == 0) {
442 free(xswbuf);
443 return(-1);
447 * Calculate size of xsw entry returned by kernel (it can be larger
448 * than the one we have if there is a version mismatch).
450 ksize = ((struct xswdev *)xswbuf)->xsw_size;
451 n = (int)(bytes / ksize);
454 * Calculate the number of live swap devices and calculate
455 * the swap_ary[] index used for the cumulative result (swi)
457 for (i = swi = 0; i < n; ++i) {
458 xsw = (void *)((char *)xswbuf + i * ksize);
459 if ((xsw->xsw_flags & SW_FREED) == 0)
460 continue;
461 ++swi;
463 if (swi >= swap_max)
464 swi = swap_max - 1;
466 bzero(swap_ary, sizeof(struct kvm_swap) * (swi + 1));
469 * Accumulate results. If the provided swap_ary[] is too
470 * small will only populate up to the available entries,
471 * but we always populate the cumulative results entry.
473 for (i = ti = 0; i < n; ++i) {
474 xsw = (void *)((char *)xswbuf + i * ksize);
476 if ((xsw->xsw_flags & SW_FREED) == 0)
477 continue;
479 swap_ary[swi].ksw_total += xsw->xsw_nblks;
480 swap_ary[swi].ksw_used += xsw->xsw_used;
482 if (ti < swi) {
483 swap_ary[ti].ksw_total = xsw->xsw_nblks;
484 swap_ary[ti].ksw_used = xsw->xsw_used;
485 swap_ary[ti].ksw_flags = xsw->xsw_flags;
486 GETSWDEVNAME(xsw->xsw_dev, swap_ary[ti].ksw_devname,
487 flags);
488 ++ti;
492 free(xswbuf);
493 return(swi);