kernel - cleanup vfs_cache debugging
[dragonfly.git] / sys / vm / vm_zone.c
blob5ae274438d52866f88470b4b588ecb2cd57554f7
1 /*
2 * (MPSAFE)
4 * Copyright (c) 1997, 1998 John S. Dyson
5 * All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice immediately at the beginning of the file, without modification,
12 * this list of conditions, and the following disclaimer.
13 * 2. Absolutely no warranty of function or purpose is made by the author
14 * John S. Dyson.
16 * $FreeBSD: src/sys/vm/vm_zone.c,v 1.30.2.6 2002/10/10 19:50:16 dillon Exp $
19 #include <sys/param.h>
20 #include <sys/queue.h>
21 #include <sys/systm.h>
22 #include <sys/kernel.h>
23 #include <sys/lock.h>
24 #include <sys/malloc.h>
25 #include <sys/sysctl.h>
26 #include <sys/vmmeter.h>
28 #include <vm/vm.h>
29 #include <vm/vm_object.h>
30 #include <vm/vm_page.h>
31 #include <vm/vm_map.h>
32 #include <vm/vm_kern.h>
33 #include <vm/vm_extern.h>
34 #include <vm/vm_zone.h>
36 #include <sys/spinlock2.h>
37 #include <vm/vm_page2.h>
39 static MALLOC_DEFINE(M_ZONE, "ZONE", "Zone header");
41 #define ZONE_ERROR_INVALID 0
42 #define ZONE_ERROR_NOTFREE 1
43 #define ZONE_ERROR_ALREADYFREE 2
45 #define ZONE_ROUNDING 32
47 #define ZENTRY_FREE 0x12342378
49 int zone_burst = 32;
51 static void *zget(vm_zone_t z);
54 * Return an item from the specified zone. This function is non-blocking for
55 * ZONE_INTERRUPT zones.
57 * No requirements.
59 void *
60 zalloc(vm_zone_t z)
62 globaldata_t gd = mycpu;
63 void *item;
64 int n;
66 #ifdef INVARIANTS
67 if (z == NULL)
68 zerror(ZONE_ERROR_INVALID);
69 #endif
70 retry:
72 * Avoid spinlock contention by allocating from a per-cpu queue
74 if (z->zfreecnt_pcpu[gd->gd_cpuid] > 0) {
75 crit_enter_gd(gd);
76 if (z->zfreecnt_pcpu[gd->gd_cpuid] > 0) {
77 item = z->zitems_pcpu[gd->gd_cpuid];
78 #ifdef INVARIANTS
79 KASSERT(item != NULL,
80 ("zitems_pcpu unexpectedly NULL"));
81 if (((void **)item)[1] != (void *)ZENTRY_FREE)
82 zerror(ZONE_ERROR_NOTFREE);
83 ((void **)item)[1] = NULL;
84 #endif
85 z->zitems_pcpu[gd->gd_cpuid] = ((void **) item)[0];
86 --z->zfreecnt_pcpu[gd->gd_cpuid];
87 z->znalloc++;
88 crit_exit_gd(gd);
89 return item;
91 crit_exit_gd(gd);
95 * Per-zone spinlock for the remainder. Always load at least one
96 * item.
98 spin_lock(&z->zlock);
99 if (z->zfreecnt > z->zfreemin) {
100 n = zone_burst;
101 do {
102 item = z->zitems;
103 #ifdef INVARIANTS
104 KASSERT(item != NULL, ("zitems unexpectedly NULL"));
105 if (((void **)item)[1] != (void *)ZENTRY_FREE)
106 zerror(ZONE_ERROR_NOTFREE);
107 #endif
108 z->zitems = ((void **)item)[0];
109 z->zfreecnt--;
110 ((void **)item)[0] = z->zitems_pcpu[gd->gd_cpuid];
111 z->zitems_pcpu[gd->gd_cpuid] = item;
112 ++z->zfreecnt_pcpu[gd->gd_cpuid];
113 } while (--n > 0 && z->zfreecnt > z->zfreemin);
114 spin_unlock(&z->zlock);
115 goto retry;
116 } else {
117 spin_unlock(&z->zlock);
118 item = zget(z);
120 * PANICFAIL allows the caller to assume that the zalloc()
121 * will always succeed. If it doesn't, we panic here.
123 if (item == NULL && (z->zflags & ZONE_PANICFAIL))
124 panic("zalloc(%s) failed", z->zname);
126 return item;
130 * Free an item to the specified zone.
132 * No requirements.
134 void
135 zfree(vm_zone_t z, void *item)
137 globaldata_t gd = mycpu;
138 void *tail_item;
139 int count;
140 int zmax;
143 * Avoid spinlock contention by freeing into a per-cpu queue
145 if ((zmax = z->zmax) != 0)
146 zmax = zmax / ncpus / 16;
147 if (zmax < 64)
148 zmax = 64;
151 * Add to pcpu cache
153 crit_enter_gd(gd);
154 ((void **)item)[0] = z->zitems_pcpu[gd->gd_cpuid];
155 #ifdef INVARIANTS
156 if (((void **)item)[1] == (void *)ZENTRY_FREE)
157 zerror(ZONE_ERROR_ALREADYFREE);
158 ((void **)item)[1] = (void *)ZENTRY_FREE;
159 #endif
160 z->zitems_pcpu[gd->gd_cpuid] = item;
161 ++z->zfreecnt_pcpu[gd->gd_cpuid];
163 if (z->zfreecnt_pcpu[gd->gd_cpuid] < zmax) {
164 crit_exit_gd(gd);
165 return;
169 * Hystereis, move (zmax) (calculated below) items to the pool.
171 zmax = zmax / 2;
172 if (zmax > zone_burst)
173 zmax = zone_burst;
174 tail_item = item;
175 count = 1;
177 while (count < zmax) {
178 tail_item = ((void **)tail_item)[0];
179 ++count;
182 z->zitems_pcpu[gd->gd_cpuid] = ((void **)tail_item)[0];
183 z->zfreecnt_pcpu[gd->gd_cpuid] -= count;
186 * Per-zone spinlock for the remainder.
188 * Also implement hysteresis by freeing a number of pcpu
189 * entries.
191 spin_lock(&z->zlock);
192 ((void **)tail_item)[0] = z->zitems;
193 z->zitems = item;
194 z->zfreecnt += count;
196 spin_unlock(&z->zlock);
197 crit_exit_gd(gd);
201 * This file comprises a very simple zone allocator. This is used
202 * in lieu of the malloc allocator, where needed or more optimal.
204 * Note that the initial implementation of this had coloring, and
205 * absolutely no improvement (actually perf degradation) occurred.
207 * Note also that the zones are type stable. The only restriction is
208 * that the first two longwords of a data structure can be changed
209 * between allocations. Any data that must be stable between allocations
210 * must reside in areas after the first two longwords.
212 * zinitna, zinit, zbootinit are the initialization routines.
213 * zalloc, zfree, are the allocation/free routines.
216 LIST_HEAD(zlist, vm_zone) zlist = LIST_HEAD_INITIALIZER(zlist);
217 static int sysctl_vm_zone(SYSCTL_HANDLER_ARGS);
218 static int zone_kmem_pages, zone_kern_pages;
219 static long zone_kmem_kvaspace;
222 * Create a zone, but don't allocate the zone structure. If the
223 * zone had been previously created by the zone boot code, initialize
224 * various parts of the zone code.
226 * If waits are not allowed during allocation (e.g. during interrupt
227 * code), a-priori allocate the kernel virtual space, and allocate
228 * only pages when needed.
230 * Arguments:
231 * z pointer to zone structure.
232 * obj pointer to VM object (opt).
233 * name name of zone.
234 * size size of zone entries.
235 * nentries number of zone entries allocated (only ZONE_INTERRUPT.)
236 * flags ZONE_INTERRUPT -- items can be allocated at interrupt time.
237 * zalloc number of pages allocated when memory is needed.
239 * Note that when using ZONE_INTERRUPT, the size of the zone is limited
240 * by the nentries argument. The size of the memory allocatable is
241 * unlimited if ZONE_INTERRUPT is not set.
243 * No requirements.
246 zinitna(vm_zone_t z, vm_object_t obj, char *name, int size,
247 int nentries, int flags, int zalloc)
249 size_t totsize;
252 * Only zones created with zinit() are destroyable.
254 if (z->zflags & ZONE_DESTROYABLE)
255 panic("zinitna: can't create destroyable zone");
258 * NOTE: We can only adjust zsize if we previously did not
259 * use zbootinit().
261 if ((z->zflags & ZONE_BOOT) == 0) {
262 z->zsize = roundup2(size, ZONE_ROUNDING);
263 spin_init(&z->zlock, "zinitna");
264 z->zfreecnt = 0;
265 z->ztotal = 0;
266 z->zmax = 0;
267 z->zname = name;
268 z->znalloc = 0;
269 z->zitems = NULL;
271 lwkt_gettoken(&vm_token);
272 LIST_INSERT_HEAD(&zlist, z, zlink);
273 lwkt_reltoken(&vm_token);
275 bzero(z->zitems_pcpu, sizeof(z->zitems_pcpu));
276 bzero(z->zfreecnt_pcpu, sizeof(z->zfreecnt_pcpu));
279 z->zkmvec = NULL;
280 z->zkmcur = z->zkmmax = 0;
281 z->zflags |= flags;
284 * If we cannot wait, allocate KVA space up front, and we will fill
285 * in pages as needed. This is particularly required when creating
286 * an allocation space for map entries in kernel_map, because we
287 * do not want to go into a recursion deadlock with
288 * vm_map_entry_reserve().
290 if (z->zflags & ZONE_INTERRUPT) {
291 totsize = round_page((size_t)z->zsize * nentries);
292 atomic_add_long(&zone_kmem_kvaspace, totsize);
294 z->zkva = kmem_alloc_pageable(&kernel_map, totsize);
295 if (z->zkva == 0) {
296 LIST_REMOVE(z, zlink);
297 return 0;
300 z->zpagemax = totsize / PAGE_SIZE;
301 if (obj == NULL) {
302 z->zobj = vm_object_allocate(OBJT_DEFAULT, z->zpagemax);
303 } else {
304 z->zobj = obj;
305 _vm_object_allocate(OBJT_DEFAULT, z->zpagemax, obj);
306 vm_object_drop(obj);
308 z->zallocflag = VM_ALLOC_SYSTEM | VM_ALLOC_INTERRUPT |
309 VM_ALLOC_NORMAL | VM_ALLOC_RETRY;
310 z->zmax += nentries;
311 } else {
312 z->zallocflag = VM_ALLOC_NORMAL | VM_ALLOC_SYSTEM;
313 z->zmax = 0;
317 if (z->zsize > PAGE_SIZE)
318 z->zfreemin = 1;
319 else
320 z->zfreemin = PAGE_SIZE / z->zsize;
322 z->zpagecount = 0;
323 if (zalloc)
324 z->zalloc = zalloc;
325 else
326 z->zalloc = 1;
329 * Populate the interrrupt zone at creation time rather than
330 * on first allocation, as this is a potentially long operation.
332 if (z->zflags & ZONE_INTERRUPT) {
333 void *buf;
335 buf = zget(z);
336 zfree(z, buf);
339 return 1;
343 * Subroutine same as zinitna, except zone data structure is allocated
344 * automatically by malloc. This routine should normally be used, except
345 * in certain tricky startup conditions in the VM system -- then
346 * zbootinit and zinitna can be used. Zinit is the standard zone
347 * initialization call.
349 * No requirements.
351 vm_zone_t
352 zinit(char *name, int size, int nentries, int flags, int zalloc)
354 vm_zone_t z;
356 z = (vm_zone_t) kmalloc(sizeof (struct vm_zone), M_ZONE, M_NOWAIT);
357 if (z == NULL)
358 return NULL;
360 z->zflags = 0;
361 if (zinitna(z, NULL, name, size, nentries,
362 flags & ~ZONE_DESTROYABLE, zalloc) == 0) {
363 kfree(z, M_ZONE);
364 return NULL;
367 if (flags & ZONE_DESTROYABLE)
368 z->zflags |= ZONE_DESTROYABLE;
370 return z;
374 * Initialize a zone before the system is fully up. This routine should
375 * only be called before full VM startup.
377 * Called from the low level boot code only.
379 void
380 zbootinit(vm_zone_t z, char *name, int size, void *item, int nitems)
382 int i;
384 bzero(z->zitems_pcpu, sizeof(z->zitems_pcpu));
385 bzero(z->zfreecnt_pcpu, sizeof(z->zfreecnt_pcpu));
387 z->zname = name;
388 z->zsize = size;
389 z->zpagemax = 0;
390 z->zobj = NULL;
391 z->zflags = ZONE_BOOT;
392 z->zfreemin = 0;
393 z->zallocflag = 0;
394 z->zpagecount = 0;
395 z->zalloc = 0;
396 z->znalloc = 0;
397 spin_init(&z->zlock, "zbootinit");
399 bzero(item, (size_t)nitems * z->zsize);
400 z->zitems = NULL;
401 for (i = 0; i < nitems; i++) {
402 ((void **)item)[0] = z->zitems;
403 #ifdef INVARIANTS
404 ((void **)item)[1] = (void *)ZENTRY_FREE;
405 #endif
406 z->zitems = item;
407 item = (uint8_t *)item + z->zsize;
409 z->zfreecnt = nitems;
410 z->zmax = nitems;
411 z->ztotal = nitems;
413 lwkt_gettoken(&vm_token);
414 LIST_INSERT_HEAD(&zlist, z, zlink);
415 lwkt_reltoken(&vm_token);
419 * Release all resources owned by zone created with zinit().
421 * No requirements.
423 void
424 zdestroy(vm_zone_t z)
426 vm_page_t m;
427 int i;
429 if (z == NULL)
430 panic("zdestroy: null zone");
431 if ((z->zflags & ZONE_DESTROYABLE) == 0)
432 panic("zdestroy: undestroyable zone");
434 lwkt_gettoken(&vm_token);
435 LIST_REMOVE(z, zlink);
436 lwkt_reltoken(&vm_token);
439 * Release virtual mappings, physical memory and update sysctl stats.
441 if (z->zflags & ZONE_INTERRUPT) {
443 * Pages mapped via pmap_kenter() must be removed from the
444 * kernel_pmap() before calling kmem_free() to avoid issues
445 * with kernel_pmap.pm_stats.resident_count.
447 pmap_qremove(z->zkva, z->zpagemax);
448 vm_object_hold(z->zobj);
449 for (i = 0; i < z->zpagecount; ++i) {
450 m = vm_page_lookup_busy_wait(z->zobj, i, TRUE, "vmzd");
451 vm_page_unwire(m, 0);
452 vm_page_free(m);
456 * Free the mapping.
458 kmem_free(&kernel_map, z->zkva,
459 (size_t)z->zpagemax * PAGE_SIZE);
460 atomic_subtract_long(&zone_kmem_kvaspace,
461 (size_t)z->zpagemax * PAGE_SIZE);
464 * Free the backing object and physical pages.
466 vm_object_deallocate(z->zobj);
467 vm_object_drop(z->zobj);
468 atomic_subtract_int(&zone_kmem_pages, z->zpagecount);
469 } else {
470 for (i=0; i < z->zkmcur; i++) {
471 kmem_free(&kernel_map, z->zkmvec[i],
472 (size_t)z->zalloc * PAGE_SIZE);
473 atomic_subtract_int(&zone_kern_pages, z->zalloc);
475 if (z->zkmvec != NULL)
476 kfree(z->zkmvec, M_ZONE);
479 spin_uninit(&z->zlock);
480 kfree(z, M_ZONE);
485 * void *zalloc(vm_zone_t zone) --
486 * Returns an item from a specified zone. May not be called from a
487 * FAST interrupt or IPI function.
489 * void zfree(vm_zone_t zone, void *item) --
490 * Frees an item back to a specified zone. May not be called from a
491 * FAST interrupt or IPI function.
495 * Internal zone routine. Not to be called from external (non vm_zone) code.
497 * No requirements.
499 static void *
500 zget(vm_zone_t z)
502 int i;
503 vm_page_t m;
504 int nitems;
505 int npages;
506 int savezpc;
507 size_t nbytes;
508 size_t noffset;
509 void *item;
511 if (z == NULL)
512 panic("zget: null zone");
514 if (z->zflags & ZONE_INTERRUPT) {
516 * Interrupt zones do not mess with the kernel_map, they
517 * simply populate an existing mapping.
519 * First reserve the required space.
521 vm_object_hold(z->zobj);
522 noffset = (size_t)z->zpagecount * PAGE_SIZE;
523 noffset -= noffset % z->zsize;
524 savezpc = z->zpagecount;
525 if (z->zpagecount + z->zalloc > z->zpagemax)
526 z->zpagecount = z->zpagemax;
527 else
528 z->zpagecount += z->zalloc;
529 item = (char *)z->zkva + noffset;
530 npages = z->zpagecount - savezpc;
531 nitems = ((size_t)(savezpc + npages) * PAGE_SIZE - noffset) /
532 z->zsize;
533 atomic_add_int(&zone_kmem_pages, npages);
536 * Now allocate the pages. Note that we can block in the
537 * loop, so we've already done all the necessary calculations
538 * and reservations above.
540 for (i = 0; i < npages; ++i) {
541 vm_offset_t zkva;
543 m = vm_page_alloc(z->zobj, savezpc + i, z->zallocflag);
544 KKASSERT(m != NULL);
545 /* note: z might be modified due to blocking */
547 KKASSERT(m->queue == PQ_NONE);
548 m->valid = VM_PAGE_BITS_ALL;
549 vm_page_wire(m);
550 vm_page_wakeup(m);
552 zkva = z->zkva + (size_t)(savezpc + i) * PAGE_SIZE;
553 pmap_kenter(zkva, VM_PAGE_TO_PHYS(m));
554 bzero((void *)zkva, PAGE_SIZE);
556 vm_object_drop(z->zobj);
557 } else if (z->zflags & ZONE_SPECIAL) {
559 * The special zone is the one used for vm_map_entry_t's.
560 * We have to avoid an infinite recursion in
561 * vm_map_entry_reserve() by using vm_map_entry_kreserve()
562 * instead. The map entries are pre-reserved by the kernel
563 * by vm_map_entry_reserve_cpu_init().
565 nbytes = (size_t)z->zalloc * PAGE_SIZE;
567 item = (void *)kmem_alloc3(&kernel_map, nbytes, KM_KRESERVE);
569 /* note: z might be modified due to blocking */
570 if (item != NULL) {
571 zone_kern_pages += z->zalloc; /* not MP-safe XXX */
572 bzero(item, nbytes);
573 } else {
574 nbytes = 0;
576 nitems = nbytes / z->zsize;
577 } else {
579 * Otherwise allocate KVA from the kernel_map.
581 nbytes = (size_t)z->zalloc * PAGE_SIZE;
583 item = (void *)kmem_alloc3(&kernel_map, nbytes, 0);
585 /* note: z might be modified due to blocking */
586 if (item != NULL) {
587 zone_kern_pages += z->zalloc; /* not MP-safe XXX */
588 bzero(item, nbytes);
590 if (z->zflags & ZONE_DESTROYABLE) {
591 if (z->zkmcur == z->zkmmax) {
592 z->zkmmax =
593 z->zkmmax==0 ? 1 : z->zkmmax*2;
594 z->zkmvec = krealloc(z->zkmvec,
595 z->zkmmax * sizeof(z->zkmvec[0]),
596 M_ZONE, M_WAITOK);
598 z->zkmvec[z->zkmcur++] = (vm_offset_t)item;
600 } else {
601 nbytes = 0;
603 nitems = nbytes / z->zsize;
606 spin_lock(&z->zlock);
607 z->ztotal += nitems;
609 * Save one for immediate allocation
611 if (nitems != 0) {
612 nitems -= 1;
613 for (i = 0; i < nitems; i++) {
614 ((void **)item)[0] = z->zitems;
615 #ifdef INVARIANTS
616 ((void **)item)[1] = (void *)ZENTRY_FREE;
617 #endif
618 z->zitems = item;
619 item = (uint8_t *)item + z->zsize;
621 z->zfreecnt += nitems;
622 z->znalloc++;
623 } else if (z->zfreecnt > 0) {
624 item = z->zitems;
625 z->zitems = ((void **)item)[0];
626 #ifdef INVARIANTS
627 if (((void **)item)[1] != (void *)ZENTRY_FREE)
628 zerror(ZONE_ERROR_NOTFREE);
629 ((void **) item)[1] = NULL;
630 #endif
631 z->zfreecnt--;
632 z->znalloc++;
633 } else {
634 item = NULL;
636 spin_unlock(&z->zlock);
639 * A special zone may have used a kernel-reserved vm_map_entry. If
640 * so we have to be sure to recover our reserve so we don't run out.
641 * We will panic if we run out.
643 if (z->zflags & ZONE_SPECIAL)
644 vm_map_entry_reserve(0);
646 return item;
650 * No requirements.
652 static int
653 sysctl_vm_zone(SYSCTL_HANDLER_ARGS)
655 int error=0;
656 vm_zone_t curzone;
657 char tmpbuf[128];
658 char tmpname[14];
660 ksnprintf(tmpbuf, sizeof(tmpbuf),
661 "\nITEM SIZE LIMIT USED FREE REQUESTS\n");
662 error = SYSCTL_OUT(req, tmpbuf, strlen(tmpbuf));
663 if (error)
664 return (error);
666 lwkt_gettoken(&vm_token);
667 LIST_FOREACH(curzone, &zlist, zlink) {
668 int i;
669 int n;
670 int len;
671 int offset;
672 int freecnt;
674 len = strlen(curzone->zname);
675 if (len >= (sizeof(tmpname) - 1))
676 len = (sizeof(tmpname) - 1);
677 for(i = 0; i < sizeof(tmpname) - 1; i++)
678 tmpname[i] = ' ';
679 tmpname[i] = 0;
680 memcpy(tmpname, curzone->zname, len);
681 tmpname[len] = ':';
682 offset = 0;
683 if (curzone == LIST_FIRST(&zlist)) {
684 offset = 1;
685 tmpbuf[0] = '\n';
687 freecnt = curzone->zfreecnt;
688 for (n = 0; n < ncpus; ++n)
689 freecnt += curzone->zfreecnt_pcpu[n];
691 ksnprintf(tmpbuf + offset, sizeof(tmpbuf) - offset,
692 "%s %6.6u, %8.8u, %6.6u, %6.6u, %8.8u\n",
693 tmpname, curzone->zsize, curzone->zmax,
694 (curzone->ztotal - freecnt),
695 freecnt, curzone->znalloc);
697 len = strlen((char *)tmpbuf);
698 if (LIST_NEXT(curzone, zlink) == NULL)
699 tmpbuf[len - 1] = 0;
701 error = SYSCTL_OUT(req, tmpbuf, len);
703 if (error)
704 break;
706 lwkt_reltoken(&vm_token);
707 return (error);
710 #if defined(INVARIANTS)
713 * Debugging only.
715 void
716 zerror(int error)
718 char *msg;
720 switch (error) {
721 case ZONE_ERROR_INVALID:
722 msg = "zone: invalid zone";
723 break;
724 case ZONE_ERROR_NOTFREE:
725 msg = "zone: entry not free";
726 break;
727 case ZONE_ERROR_ALREADYFREE:
728 msg = "zone: freeing free entry";
729 break;
730 default:
731 msg = "zone: invalid error";
732 break;
734 panic("%s", msg);
736 #endif
738 SYSCTL_OID(_vm, OID_AUTO, zone, CTLTYPE_STRING|CTLFLAG_RD, \
739 NULL, 0, sysctl_vm_zone, "A", "Zone Info");
741 SYSCTL_INT(_vm, OID_AUTO, zone_kmem_pages,
742 CTLFLAG_RD, &zone_kmem_pages, 0, "Number of interrupt safe pages allocated by zone");
743 SYSCTL_INT(_vm, OID_AUTO, zone_burst,
744 CTLFLAG_RW, &zone_burst, 0, "Burst from depot to pcpu cache");
745 SYSCTL_LONG(_vm, OID_AUTO, zone_kmem_kvaspace,
746 CTLFLAG_RD, &zone_kmem_kvaspace, 0, "KVA space allocated by zone");
747 SYSCTL_INT(_vm, OID_AUTO, zone_kern_pages,
748 CTLFLAG_RD, &zone_kern_pages, 0, "Number of non-interrupt safe pages allocated by zone");