kernel - Tag vm_map_entry structure, slight optimization to zalloc, misc.
[dragonfly.git] / sys / vm / vm_zone.c
bloba28e448e247bebf1eff758cd4364e3a69d9fec69
1 /*
2 * (MPSAFE)
4 * Copyright (c) 1997, 1998 John S. Dyson
5 * All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice immediately at the beginning of the file, without modification,
12 * this list of conditions, and the following disclaimer.
13 * 2. Absolutely no warranty of function or purpose is made by the author
14 * John S. Dyson.
16 * $FreeBSD: src/sys/vm/vm_zone.c,v 1.30.2.6 2002/10/10 19:50:16 dillon Exp $
19 #include <sys/param.h>
20 #include <sys/queue.h>
21 #include <sys/systm.h>
22 #include <sys/kernel.h>
23 #include <sys/lock.h>
24 #include <sys/malloc.h>
25 #include <sys/sysctl.h>
26 #include <sys/vmmeter.h>
28 #include <vm/vm.h>
29 #include <vm/vm_object.h>
30 #include <vm/vm_page.h>
31 #include <vm/vm_map.h>
32 #include <vm/vm_kern.h>
33 #include <vm/vm_extern.h>
34 #include <vm/vm_zone.h>
36 #include <sys/spinlock2.h>
37 #include <vm/vm_page2.h>
39 static MALLOC_DEFINE(M_ZONE, "ZONE", "Zone header");
41 #define ZONE_ERROR_INVALID 0
42 #define ZONE_ERROR_NOTFREE 1
43 #define ZONE_ERROR_ALREADYFREE 2
45 #define ZONE_ROUNDING 32
47 #define ZENTRY_FREE 0x12342378
49 int zone_burst = 32;
51 static void *zget(vm_zone_t z);
54 * Return an item from the specified zone. This function is non-blocking for
55 * ZONE_INTERRUPT zones.
57 * No requirements.
59 void *
60 zalloc(vm_zone_t z)
62 globaldata_t gd = mycpu;
63 void *item;
64 int n;
66 #ifdef INVARIANTS
67 if (z == NULL)
68 zerror(ZONE_ERROR_INVALID);
69 #endif
70 retry:
72 * Avoid spinlock contention by allocating from a per-cpu queue
74 if (z->zfreecnt_pcpu[gd->gd_cpuid] > 0) {
75 crit_enter_gd(gd);
76 if (z->zfreecnt_pcpu[gd->gd_cpuid] > 0) {
77 item = z->zitems_pcpu[gd->gd_cpuid];
78 #ifdef INVARIANTS
79 KASSERT(item != NULL,
80 ("zitems_pcpu unexpectedly NULL"));
81 if (((void **)item)[1] != (void *)ZENTRY_FREE)
82 zerror(ZONE_ERROR_NOTFREE);
83 ((void **)item)[1] = NULL;
84 #endif
85 z->zitems_pcpu[gd->gd_cpuid] = ((void **) item)[0];
86 --z->zfreecnt_pcpu[gd->gd_cpuid];
87 z->znalloc++;
88 crit_exit_gd(gd);
89 return item;
91 crit_exit_gd(gd);
95 * Per-zone spinlock for the remainder. Always load at least one
96 * item.
98 spin_lock(&z->zlock);
99 if (z->zfreecnt > z->zfreemin) {
100 n = zone_burst;
101 do {
102 item = z->zitems;
103 #ifdef INVARIANTS
104 KASSERT(item != NULL, ("zitems unexpectedly NULL"));
105 if (((void **)item)[1] != (void *)ZENTRY_FREE)
106 zerror(ZONE_ERROR_NOTFREE);
107 #endif
108 z->zitems = ((void **)item)[0];
109 z->zfreecnt--;
110 ((void **)item)[0] = z->zitems_pcpu[gd->gd_cpuid];
111 z->zitems_pcpu[gd->gd_cpuid] = item;
112 ++z->zfreecnt_pcpu[gd->gd_cpuid];
113 } while (--n > 0 && z->zfreecnt > z->zfreemin);
114 spin_unlock(&z->zlock);
115 goto retry;
116 } else {
117 spin_unlock(&z->zlock);
118 item = zget(z);
120 * PANICFAIL allows the caller to assume that the zalloc()
121 * will always succeed. If it doesn't, we panic here.
123 if (item == NULL && (z->zflags & ZONE_PANICFAIL))
124 panic("zalloc(%s) failed", z->zname);
126 return item;
130 * Free an item to the specified zone.
132 * No requirements.
134 void
135 zfree(vm_zone_t z, void *item)
137 globaldata_t gd = mycpu;
138 void *tail_item;
139 int count;
140 int zmax;
143 * Avoid spinlock contention by freeing into a per-cpu queue
145 if ((zmax = z->zmax) != 0)
146 zmax = zmax / ncpus / 16;
147 if (zmax < 64)
148 zmax = 64;
151 * Add to pcpu cache
153 crit_enter_gd(gd);
154 ((void **)item)[0] = z->zitems_pcpu[gd->gd_cpuid];
155 #ifdef INVARIANTS
156 if (((void **)item)[1] == (void *)ZENTRY_FREE)
157 zerror(ZONE_ERROR_ALREADYFREE);
158 ((void **)item)[1] = (void *)ZENTRY_FREE;
159 #endif
160 z->zitems_pcpu[gd->gd_cpuid] = item;
161 ++z->zfreecnt_pcpu[gd->gd_cpuid];
163 if (z->zfreecnt_pcpu[gd->gd_cpuid] < zmax) {
164 crit_exit_gd(gd);
165 return;
169 * Hystereis, move (zmax) (calculated below) items to the pool.
171 zmax = zmax / 2;
172 if (zmax > zone_burst)
173 zmax = zone_burst;
174 tail_item = item;
175 count = 1;
177 while (count < zmax) {
178 tail_item = ((void **)tail_item)[0];
179 ++count;
182 z->zitems_pcpu[gd->gd_cpuid] = ((void **)tail_item)[0];
183 z->zfreecnt_pcpu[gd->gd_cpuid] -= count;
186 * Per-zone spinlock for the remainder.
188 * Also implement hysteresis by freeing a number of pcpu
189 * entries.
191 spin_lock(&z->zlock);
192 ((void **)tail_item)[0] = z->zitems;
193 z->zitems = item;
194 z->zfreecnt += count;
196 spin_unlock(&z->zlock);
197 crit_exit_gd(gd);
201 * This file comprises a very simple zone allocator. This is used
202 * in lieu of the malloc allocator, where needed or more optimal.
204 * Note that the initial implementation of this had coloring, and
205 * absolutely no improvement (actually perf degradation) occurred.
207 * Note also that the zones are type stable. The only restriction is
208 * that the first two longwords of a data structure can be changed
209 * between allocations. Any data that must be stable between allocations
210 * must reside in areas after the first two longwords.
212 * zinitna, zinit, zbootinit are the initialization routines.
213 * zalloc, zfree, are the allocation/free routines.
216 LIST_HEAD(zlist, vm_zone) zlist = LIST_HEAD_INITIALIZER(zlist);
217 static int sysctl_vm_zone(SYSCTL_HANDLER_ARGS);
218 static int zone_kmem_pages, zone_kern_pages;
219 static long zone_kmem_kvaspace;
222 * Create a zone, but don't allocate the zone structure. If the
223 * zone had been previously created by the zone boot code, initialize
224 * various parts of the zone code.
226 * If waits are not allowed during allocation (e.g. during interrupt
227 * code), a-priori allocate the kernel virtual space, and allocate
228 * only pages when needed.
230 * Arguments:
231 * z pointer to zone structure.
232 * obj pointer to VM object (opt).
233 * name name of zone.
234 * size size of zone entries.
235 * nentries number of zone entries allocated (only ZONE_INTERRUPT.)
236 * flags ZONE_INTERRUPT -- items can be allocated at interrupt time.
237 * zalloc number of pages allocated when memory is needed.
239 * Note that when using ZONE_INTERRUPT, the size of the zone is limited
240 * by the nentries argument. The size of the memory allocatable is
241 * unlimited if ZONE_INTERRUPT is not set.
243 * No requirements.
246 zinitna(vm_zone_t z, vm_object_t obj, char *name, int size,
247 int nentries, int flags)
249 size_t totsize;
252 * Only zones created with zinit() are destroyable.
254 if (z->zflags & ZONE_DESTROYABLE)
255 panic("zinitna: can't create destroyable zone");
258 * NOTE: We can only adjust zsize if we previously did not
259 * use zbootinit().
261 if ((z->zflags & ZONE_BOOT) == 0) {
262 z->zsize = roundup2(size, ZONE_ROUNDING);
263 spin_init(&z->zlock, "zinitna");
264 z->zfreecnt = 0;
265 z->ztotal = 0;
266 z->zmax = 0;
267 z->zname = name;
268 z->znalloc = 0;
269 z->zitems = NULL;
271 lwkt_gettoken(&vm_token);
272 LIST_INSERT_HEAD(&zlist, z, zlink);
273 lwkt_reltoken(&vm_token);
275 bzero(z->zitems_pcpu, sizeof(z->zitems_pcpu));
276 bzero(z->zfreecnt_pcpu, sizeof(z->zfreecnt_pcpu));
279 z->zkmvec = NULL;
280 z->zkmcur = z->zkmmax = 0;
281 z->zflags |= flags;
284 * If we cannot wait, allocate KVA space up front, and we will fill
285 * in pages as needed. This is particularly required when creating
286 * an allocation space for map entries in kernel_map, because we
287 * do not want to go into a recursion deadlock with
288 * vm_map_entry_reserve().
290 if (z->zflags & ZONE_INTERRUPT) {
291 totsize = round_page((size_t)z->zsize * nentries);
292 atomic_add_long(&zone_kmem_kvaspace, totsize);
294 z->zkva = kmem_alloc_pageable(&kernel_map, totsize,
295 VM_SUBSYS_ZALLOC);
296 if (z->zkva == 0) {
297 LIST_REMOVE(z, zlink);
298 return 0;
301 z->zpagemax = totsize / PAGE_SIZE;
302 if (obj == NULL) {
303 z->zobj = vm_object_allocate(OBJT_DEFAULT, z->zpagemax);
304 } else {
305 z->zobj = obj;
306 _vm_object_allocate(OBJT_DEFAULT, z->zpagemax, obj);
307 vm_object_drop(obj);
309 z->zallocflag = VM_ALLOC_SYSTEM | VM_ALLOC_INTERRUPT |
310 VM_ALLOC_NORMAL | VM_ALLOC_RETRY;
311 z->zmax += nentries;
312 } else {
313 z->zallocflag = VM_ALLOC_NORMAL | VM_ALLOC_SYSTEM;
314 z->zmax = 0;
318 if (z->zsize > PAGE_SIZE)
319 z->zfreemin = 1;
320 else
321 z->zfreemin = PAGE_SIZE / z->zsize;
323 z->zpagecount = 0;
326 * Reduce kernel_map spam by allocating in chunks of 4 pages.
328 z->zalloc = 4;
331 * Populate the interrrupt zone at creation time rather than
332 * on first allocation, as this is a potentially long operation.
334 if (z->zflags & ZONE_INTERRUPT) {
335 void *buf;
337 buf = zget(z);
338 zfree(z, buf);
341 return 1;
345 * Subroutine same as zinitna, except zone data structure is allocated
346 * automatically by malloc. This routine should normally be used, except
347 * in certain tricky startup conditions in the VM system -- then
348 * zbootinit and zinitna can be used. Zinit is the standard zone
349 * initialization call.
351 * No requirements.
353 vm_zone_t
354 zinit(char *name, int size, int nentries, int flags)
356 vm_zone_t z;
358 z = (vm_zone_t) kmalloc(sizeof (struct vm_zone), M_ZONE, M_NOWAIT);
359 if (z == NULL)
360 return NULL;
362 z->zflags = 0;
363 if (zinitna(z, NULL, name, size, nentries,
364 flags & ~ZONE_DESTROYABLE) == 0) {
365 kfree(z, M_ZONE);
366 return NULL;
369 if (flags & ZONE_DESTROYABLE)
370 z->zflags |= ZONE_DESTROYABLE;
372 return z;
376 * Initialize a zone before the system is fully up. This routine should
377 * only be called before full VM startup.
379 * Called from the low level boot code only.
381 void
382 zbootinit(vm_zone_t z, char *name, int size, void *item, int nitems)
384 int i;
386 bzero(z->zitems_pcpu, sizeof(z->zitems_pcpu));
387 bzero(z->zfreecnt_pcpu, sizeof(z->zfreecnt_pcpu));
389 z->zname = name;
390 z->zsize = size;
391 z->zpagemax = 0;
392 z->zobj = NULL;
393 z->zflags = ZONE_BOOT;
394 z->zfreemin = 0;
395 z->zallocflag = 0;
396 z->zpagecount = 0;
397 z->zalloc = 0;
398 z->znalloc = 0;
399 spin_init(&z->zlock, "zbootinit");
401 bzero(item, (size_t)nitems * z->zsize);
402 z->zitems = NULL;
403 for (i = 0; i < nitems; i++) {
404 ((void **)item)[0] = z->zitems;
405 #ifdef INVARIANTS
406 ((void **)item)[1] = (void *)ZENTRY_FREE;
407 #endif
408 z->zitems = item;
409 item = (uint8_t *)item + z->zsize;
411 z->zfreecnt = nitems;
412 z->zmax = nitems;
413 z->ztotal = nitems;
415 lwkt_gettoken(&vm_token);
416 LIST_INSERT_HEAD(&zlist, z, zlink);
417 lwkt_reltoken(&vm_token);
421 * Release all resources owned by zone created with zinit().
423 * No requirements.
425 void
426 zdestroy(vm_zone_t z)
428 vm_page_t m;
429 int i;
431 if (z == NULL)
432 panic("zdestroy: null zone");
433 if ((z->zflags & ZONE_DESTROYABLE) == 0)
434 panic("zdestroy: undestroyable zone");
436 lwkt_gettoken(&vm_token);
437 LIST_REMOVE(z, zlink);
438 lwkt_reltoken(&vm_token);
441 * Release virtual mappings, physical memory and update sysctl stats.
443 if (z->zflags & ZONE_INTERRUPT) {
445 * Pages mapped via pmap_kenter() must be removed from the
446 * kernel_pmap() before calling kmem_free() to avoid issues
447 * with kernel_pmap.pm_stats.resident_count.
449 pmap_qremove(z->zkva, z->zpagemax);
450 vm_object_hold(z->zobj);
451 for (i = 0; i < z->zpagecount; ++i) {
452 m = vm_page_lookup_busy_wait(z->zobj, i, TRUE, "vmzd");
453 vm_page_unwire(m, 0);
454 vm_page_free(m);
458 * Free the mapping.
460 kmem_free(&kernel_map, z->zkva,
461 (size_t)z->zpagemax * PAGE_SIZE);
462 atomic_subtract_long(&zone_kmem_kvaspace,
463 (size_t)z->zpagemax * PAGE_SIZE);
466 * Free the backing object and physical pages.
468 vm_object_deallocate(z->zobj);
469 vm_object_drop(z->zobj);
470 atomic_subtract_int(&zone_kmem_pages, z->zpagecount);
471 } else {
472 for (i = 0; i < z->zkmcur; i++) {
473 kmem_free(&kernel_map, z->zkmvec[i],
474 (size_t)z->zalloc * PAGE_SIZE);
475 atomic_subtract_int(&zone_kern_pages, z->zalloc);
477 if (z->zkmvec != NULL)
478 kfree(z->zkmvec, M_ZONE);
481 spin_uninit(&z->zlock);
482 kfree(z, M_ZONE);
487 * void *zalloc(vm_zone_t zone) --
488 * Returns an item from a specified zone. May not be called from a
489 * FAST interrupt or IPI function.
491 * void zfree(vm_zone_t zone, void *item) --
492 * Frees an item back to a specified zone. May not be called from a
493 * FAST interrupt or IPI function.
497 * Internal zone routine. Not to be called from external (non vm_zone) code.
499 * No requirements.
501 static void *
502 zget(vm_zone_t z)
504 int i;
505 vm_page_t m;
506 int nitems;
507 int npages;
508 int savezpc;
509 size_t nbytes;
510 size_t noffset;
511 void *item;
513 if (z == NULL)
514 panic("zget: null zone");
516 if (z->zflags & ZONE_INTERRUPT) {
518 * Interrupt zones do not mess with the kernel_map, they
519 * simply populate an existing mapping.
521 * First reserve the required space.
523 vm_object_hold(z->zobj);
524 noffset = (size_t)z->zpagecount * PAGE_SIZE;
525 noffset -= noffset % z->zsize;
526 savezpc = z->zpagecount;
527 if (z->zpagecount + z->zalloc > z->zpagemax)
528 z->zpagecount = z->zpagemax;
529 else
530 z->zpagecount += z->zalloc;
531 item = (char *)z->zkva + noffset;
532 npages = z->zpagecount - savezpc;
533 nitems = ((size_t)(savezpc + npages) * PAGE_SIZE - noffset) /
534 z->zsize;
535 atomic_add_int(&zone_kmem_pages, npages);
538 * Now allocate the pages. Note that we can block in the
539 * loop, so we've already done all the necessary calculations
540 * and reservations above.
542 for (i = 0; i < npages; ++i) {
543 vm_offset_t zkva;
545 m = vm_page_alloc(z->zobj, savezpc + i, z->zallocflag);
546 KKASSERT(m != NULL);
547 /* note: z might be modified due to blocking */
549 KKASSERT(m->queue == PQ_NONE);
550 m->valid = VM_PAGE_BITS_ALL;
551 vm_page_wire(m);
552 vm_page_wakeup(m);
554 zkva = z->zkva + (size_t)(savezpc + i) * PAGE_SIZE;
555 pmap_kenter(zkva, VM_PAGE_TO_PHYS(m));
556 bzero((void *)zkva, PAGE_SIZE);
558 vm_object_drop(z->zobj);
559 } else if (z->zflags & ZONE_SPECIAL) {
561 * The special zone is the one used for vm_map_entry_t's.
562 * We have to avoid an infinite recursion in
563 * vm_map_entry_reserve() by using vm_map_entry_kreserve()
564 * instead. The map entries are pre-reserved by the kernel
565 * by vm_map_entry_reserve_cpu_init().
567 nbytes = (size_t)z->zalloc * PAGE_SIZE;
569 item = (void *)kmem_alloc3(&kernel_map, nbytes,
570 VM_SUBSYS_ZALLOC, KM_KRESERVE);
572 /* note: z might be modified due to blocking */
573 if (item != NULL) {
574 atomic_add_int(&zone_kern_pages, z->zalloc);
575 bzero(item, nbytes);
576 } else {
577 nbytes = 0;
579 nitems = nbytes / z->zsize;
580 } else {
582 * Otherwise allocate KVA from the kernel_map.
584 nbytes = (size_t)z->zalloc * PAGE_SIZE;
586 item = (void *)kmem_alloc3(&kernel_map, nbytes,
587 VM_SUBSYS_ZALLOC, 0);
589 /* note: z might be modified due to blocking */
590 if (item != NULL) {
591 atomic_add_int(&zone_kern_pages, z->zalloc);
592 bzero(item, nbytes);
594 if (z->zflags & ZONE_DESTROYABLE) {
595 if (z->zkmcur == z->zkmmax) {
596 z->zkmmax =
597 z->zkmmax==0 ? 1 : z->zkmmax*2;
598 z->zkmvec = krealloc(z->zkmvec,
599 z->zkmmax * sizeof(z->zkmvec[0]),
600 M_ZONE, M_WAITOK);
602 z->zkmvec[z->zkmcur++] = (vm_offset_t)item;
604 } else {
605 nbytes = 0;
607 nitems = nbytes / z->zsize;
610 spin_lock(&z->zlock);
611 z->ztotal += nitems;
613 * Save one for immediate allocation
615 if (nitems != 0) {
616 nitems -= 1;
617 for (i = 0; i < nitems; i++) {
618 ((void **)item)[0] = z->zitems;
619 #ifdef INVARIANTS
620 ((void **)item)[1] = (void *)ZENTRY_FREE;
621 #endif
622 z->zitems = item;
623 item = (uint8_t *)item + z->zsize;
625 z->zfreecnt += nitems;
626 z->znalloc++;
627 } else if (z->zfreecnt > 0) {
628 item = z->zitems;
629 z->zitems = ((void **)item)[0];
630 #ifdef INVARIANTS
631 if (((void **)item)[1] != (void *)ZENTRY_FREE)
632 zerror(ZONE_ERROR_NOTFREE);
633 ((void **) item)[1] = NULL;
634 #endif
635 z->zfreecnt--;
636 z->znalloc++;
637 } else {
638 item = NULL;
640 spin_unlock(&z->zlock);
643 * A special zone may have used a kernel-reserved vm_map_entry. If
644 * so we have to be sure to recover our reserve so we don't run out.
645 * We will panic if we run out.
647 if (z->zflags & ZONE_SPECIAL)
648 vm_map_entry_reserve(0);
650 return item;
654 * No requirements.
656 static int
657 sysctl_vm_zone(SYSCTL_HANDLER_ARGS)
659 int error=0;
660 vm_zone_t curzone;
661 char tmpbuf[128];
662 char tmpname[14];
664 ksnprintf(tmpbuf, sizeof(tmpbuf),
665 "\nITEM SIZE LIMIT USED FREE REQUESTS\n");
666 error = SYSCTL_OUT(req, tmpbuf, strlen(tmpbuf));
667 if (error)
668 return (error);
670 lwkt_gettoken(&vm_token);
671 LIST_FOREACH(curzone, &zlist, zlink) {
672 int i;
673 int n;
674 int len;
675 int offset;
676 int freecnt;
678 len = strlen(curzone->zname);
679 if (len >= (sizeof(tmpname) - 1))
680 len = (sizeof(tmpname) - 1);
681 for(i = 0; i < sizeof(tmpname) - 1; i++)
682 tmpname[i] = ' ';
683 tmpname[i] = 0;
684 memcpy(tmpname, curzone->zname, len);
685 tmpname[len] = ':';
686 offset = 0;
687 if (curzone == LIST_FIRST(&zlist)) {
688 offset = 1;
689 tmpbuf[0] = '\n';
691 freecnt = curzone->zfreecnt;
692 for (n = 0; n < ncpus; ++n)
693 freecnt += curzone->zfreecnt_pcpu[n];
695 ksnprintf(tmpbuf + offset, sizeof(tmpbuf) - offset,
696 "%s %6.6u, %8.8u, %6.6u, %6.6u, %8.8u\n",
697 tmpname, curzone->zsize, curzone->zmax,
698 (curzone->ztotal - freecnt),
699 freecnt, curzone->znalloc);
701 len = strlen((char *)tmpbuf);
702 if (LIST_NEXT(curzone, zlink) == NULL)
703 tmpbuf[len - 1] = 0;
705 error = SYSCTL_OUT(req, tmpbuf, len);
707 if (error)
708 break;
710 lwkt_reltoken(&vm_token);
711 return (error);
714 #if defined(INVARIANTS)
717 * Debugging only.
719 void
720 zerror(int error)
722 char *msg;
724 switch (error) {
725 case ZONE_ERROR_INVALID:
726 msg = "zone: invalid zone";
727 break;
728 case ZONE_ERROR_NOTFREE:
729 msg = "zone: entry not free";
730 break;
731 case ZONE_ERROR_ALREADYFREE:
732 msg = "zone: freeing free entry";
733 break;
734 default:
735 msg = "zone: invalid error";
736 break;
738 panic("%s", msg);
740 #endif
742 SYSCTL_OID(_vm, OID_AUTO, zone, CTLTYPE_STRING|CTLFLAG_RD, \
743 NULL, 0, sysctl_vm_zone, "A", "Zone Info");
745 SYSCTL_INT(_vm, OID_AUTO, zone_kmem_pages,
746 CTLFLAG_RD, &zone_kmem_pages, 0, "Number of interrupt safe pages allocated by zone");
747 SYSCTL_INT(_vm, OID_AUTO, zone_burst,
748 CTLFLAG_RW, &zone_burst, 0, "Burst from depot to pcpu cache");
749 SYSCTL_LONG(_vm, OID_AUTO, zone_kmem_kvaspace,
750 CTLFLAG_RD, &zone_kmem_kvaspace, 0, "KVA space allocated by zone");
751 SYSCTL_INT(_vm, OID_AUTO, zone_kern_pages,
752 CTLFLAG_RD, &zone_kern_pages, 0, "Number of non-interrupt safe pages allocated by zone");