games/snack: fix after ncurses privatization
[dragonfly.git] / sys / vm / vm_zone.c
blob0c66f2653cf17408abd745ada37969c4655a09ea
1 /*
2 * (MPSAFE)
4 * Copyright (c) 1997, 1998 John S. Dyson
5 * All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice immediately at the beginning of the file, without modification,
12 * this list of conditions, and the following disclaimer.
13 * 2. Absolutely no warranty of function or purpose is made by the author
14 * John S. Dyson.
16 * $FreeBSD: src/sys/vm/vm_zone.c,v 1.30.2.6 2002/10/10 19:50:16 dillon Exp $
19 #include <sys/param.h>
20 #include <sys/queue.h>
21 #include <sys/systm.h>
22 #include <sys/kernel.h>
23 #include <sys/lock.h>
24 #include <sys/malloc.h>
25 #include <sys/sysctl.h>
26 #include <sys/vmmeter.h>
28 #include <vm/vm.h>
29 #include <vm/vm_object.h>
30 #include <vm/vm_page.h>
31 #include <vm/vm_map.h>
32 #include <vm/vm_kern.h>
33 #include <vm/vm_extern.h>
34 #include <vm/vm_zone.h>
36 #include <sys/spinlock2.h>
37 #include <vm/vm_page2.h>
39 static MALLOC_DEFINE(M_ZONE, "ZONE", "Zone header");
41 #define ZONE_ERROR_INVALID 0
42 #define ZONE_ERROR_NOTFREE 1
43 #define ZONE_ERROR_ALREADYFREE 2
45 #define ZONE_ROUNDING 32
47 #define ZENTRY_FREE 0x12342378
49 int zone_burst = 32;
51 static void *zget(vm_zone_t z);
54 * Return an item from the specified zone. This function is non-blocking for
55 * ZONE_INTERRUPT zones.
57 * No requirements.
59 void *
60 zalloc(vm_zone_t z)
62 globaldata_t gd = mycpu;
63 void *item;
64 int n;
66 #ifdef INVARIANTS
67 if (z == NULL)
68 zerror(ZONE_ERROR_INVALID);
69 #endif
70 retry:
72 * Avoid spinlock contention by allocating from a per-cpu queue
74 if (z->zfreecnt_pcpu[gd->gd_cpuid] > 0) {
75 crit_enter_gd(gd);
76 if (z->zfreecnt_pcpu[gd->gd_cpuid] > 0) {
77 item = z->zitems_pcpu[gd->gd_cpuid];
78 #ifdef INVARIANTS
79 KASSERT(item != NULL,
80 ("zitems_pcpu unexpectedly NULL"));
81 if (((void **)item)[1] != (void *)ZENTRY_FREE)
82 zerror(ZONE_ERROR_NOTFREE);
83 ((void **)item)[1] = NULL;
84 #endif
85 z->zitems_pcpu[gd->gd_cpuid] = ((void **) item)[0];
86 --z->zfreecnt_pcpu[gd->gd_cpuid];
87 z->znalloc++;
88 crit_exit_gd(gd);
89 return item;
91 crit_exit_gd(gd);
95 * Per-zone spinlock for the remainder. Always load at least one
96 * item.
98 spin_lock(&z->zlock);
99 if (z->zfreecnt > z->zfreemin) {
100 n = zone_burst;
101 do {
102 item = z->zitems;
103 #ifdef INVARIANTS
104 KASSERT(item != NULL, ("zitems unexpectedly NULL"));
105 if (((void **)item)[1] != (void *)ZENTRY_FREE)
106 zerror(ZONE_ERROR_NOTFREE);
107 #endif
108 z->zitems = ((void **)item)[0];
109 z->zfreecnt--;
110 ((void **)item)[0] = z->zitems_pcpu[gd->gd_cpuid];
111 z->zitems_pcpu[gd->gd_cpuid] = item;
112 ++z->zfreecnt_pcpu[gd->gd_cpuid];
113 } while (--n > 0 && z->zfreecnt > z->zfreemin);
114 spin_unlock(&z->zlock);
115 goto retry;
116 } else {
117 spin_unlock(&z->zlock);
118 item = zget(z);
120 * PANICFAIL allows the caller to assume that the zalloc()
121 * will always succeed. If it doesn't, we panic here.
123 if (item == NULL && (z->zflags & ZONE_PANICFAIL))
124 panic("zalloc(%s) failed", z->zname);
126 return item;
130 * Free an item to the specified zone.
132 * No requirements.
134 void
135 zfree(vm_zone_t z, void *item)
137 globaldata_t gd = mycpu;
138 int zmax;
141 * Avoid spinlock contention by freeing into a per-cpu queue
143 if ((zmax = z->zmax) != 0)
144 zmax = zmax / ncpus / 16;
145 if (zmax < 64)
146 zmax = 64;
148 if (z->zfreecnt_pcpu[gd->gd_cpuid] < zmax) {
149 crit_enter_gd(gd);
150 ((void **)item)[0] = z->zitems_pcpu[gd->gd_cpuid];
151 #ifdef INVARIANTS
152 if (((void **)item)[1] == (void *)ZENTRY_FREE)
153 zerror(ZONE_ERROR_ALREADYFREE);
154 ((void **)item)[1] = (void *)ZENTRY_FREE;
155 #endif
156 z->zitems_pcpu[gd->gd_cpuid] = item;
157 ++z->zfreecnt_pcpu[gd->gd_cpuid];
158 crit_exit_gd(gd);
159 return;
163 * Per-zone spinlock for the remainder.
165 spin_lock(&z->zlock);
166 ((void **)item)[0] = z->zitems;
167 #ifdef INVARIANTS
168 if (((void **)item)[1] == (void *)ZENTRY_FREE)
169 zerror(ZONE_ERROR_ALREADYFREE);
170 ((void **)item)[1] = (void *)ZENTRY_FREE;
171 #endif
172 z->zitems = item;
173 z->zfreecnt++;
174 spin_unlock(&z->zlock);
178 * This file comprises a very simple zone allocator. This is used
179 * in lieu of the malloc allocator, where needed or more optimal.
181 * Note that the initial implementation of this had coloring, and
182 * absolutely no improvement (actually perf degradation) occurred.
184 * Note also that the zones are type stable. The only restriction is
185 * that the first two longwords of a data structure can be changed
186 * between allocations. Any data that must be stable between allocations
187 * must reside in areas after the first two longwords.
189 * zinitna, zinit, zbootinit are the initialization routines.
190 * zalloc, zfree, are the allocation/free routines.
193 LIST_HEAD(zlist, vm_zone) zlist = LIST_HEAD_INITIALIZER(zlist);
194 static int sysctl_vm_zone(SYSCTL_HANDLER_ARGS);
195 static int zone_kmem_pages, zone_kern_pages;
196 static long zone_kmem_kvaspace;
199 * Create a zone, but don't allocate the zone structure. If the
200 * zone had been previously created by the zone boot code, initialize
201 * various parts of the zone code.
203 * If waits are not allowed during allocation (e.g. during interrupt
204 * code), a-priori allocate the kernel virtual space, and allocate
205 * only pages when needed.
207 * Arguments:
208 * z pointer to zone structure.
209 * obj pointer to VM object (opt).
210 * name name of zone.
211 * size size of zone entries.
212 * nentries number of zone entries allocated (only ZONE_INTERRUPT.)
213 * flags ZONE_INTERRUPT -- items can be allocated at interrupt time.
214 * zalloc number of pages allocated when memory is needed.
216 * Note that when using ZONE_INTERRUPT, the size of the zone is limited
217 * by the nentries argument. The size of the memory allocatable is
218 * unlimited if ZONE_INTERRUPT is not set.
220 * No requirements.
223 zinitna(vm_zone_t z, vm_object_t obj, char *name, int size,
224 int nentries, int flags, int zalloc)
226 size_t totsize;
229 * Only zones created with zinit() are destroyable.
231 if (z->zflags & ZONE_DESTROYABLE)
232 panic("zinitna: can't create destroyable zone");
235 * NOTE: We can only adjust zsize if we previously did not
236 * use zbootinit().
238 if ((z->zflags & ZONE_BOOT) == 0) {
239 z->zsize = roundup2(size, ZONE_ROUNDING);
240 spin_init(&z->zlock, "zinitna");
241 z->zfreecnt = 0;
242 z->ztotal = 0;
243 z->zmax = 0;
244 z->zname = name;
245 z->znalloc = 0;
246 z->zitems = NULL;
248 lwkt_gettoken(&vm_token);
249 LIST_INSERT_HEAD(&zlist, z, zlink);
250 lwkt_reltoken(&vm_token);
252 bzero(z->zitems_pcpu, sizeof(z->zitems_pcpu));
253 bzero(z->zfreecnt_pcpu, sizeof(z->zfreecnt_pcpu));
256 z->zkmvec = NULL;
257 z->zkmcur = z->zkmmax = 0;
258 z->zflags |= flags;
261 * If we cannot wait, allocate KVA space up front, and we will fill
262 * in pages as needed. This is particularly required when creating
263 * an allocation space for map entries in kernel_map, because we
264 * do not want to go into a recursion deadlock with
265 * vm_map_entry_reserve().
267 if (z->zflags & ZONE_INTERRUPT) {
268 totsize = round_page((size_t)z->zsize * nentries);
269 atomic_add_long(&zone_kmem_kvaspace, totsize);
271 z->zkva = kmem_alloc_pageable(&kernel_map, totsize);
272 if (z->zkva == 0) {
273 LIST_REMOVE(z, zlink);
274 return 0;
277 z->zpagemax = totsize / PAGE_SIZE;
278 if (obj == NULL) {
279 z->zobj = vm_object_allocate(OBJT_DEFAULT, z->zpagemax);
280 } else {
281 z->zobj = obj;
282 _vm_object_allocate(OBJT_DEFAULT, z->zpagemax, obj);
283 vm_object_drop(obj);
285 z->zallocflag = VM_ALLOC_SYSTEM | VM_ALLOC_INTERRUPT |
286 VM_ALLOC_NORMAL | VM_ALLOC_RETRY;
287 z->zmax += nentries;
288 } else {
289 z->zallocflag = VM_ALLOC_NORMAL | VM_ALLOC_SYSTEM;
290 z->zmax = 0;
294 if (z->zsize > PAGE_SIZE)
295 z->zfreemin = 1;
296 else
297 z->zfreemin = PAGE_SIZE / z->zsize;
299 z->zpagecount = 0;
300 if (zalloc)
301 z->zalloc = zalloc;
302 else
303 z->zalloc = 1;
306 * Populate the interrrupt zone at creation time rather than
307 * on first allocation, as this is a potentially long operation.
309 if (z->zflags & ZONE_INTERRUPT) {
310 void *buf;
312 buf = zget(z);
313 zfree(z, buf);
316 return 1;
320 * Subroutine same as zinitna, except zone data structure is allocated
321 * automatically by malloc. This routine should normally be used, except
322 * in certain tricky startup conditions in the VM system -- then
323 * zbootinit and zinitna can be used. Zinit is the standard zone
324 * initialization call.
326 * No requirements.
328 vm_zone_t
329 zinit(char *name, int size, int nentries, int flags, int zalloc)
331 vm_zone_t z;
333 z = (vm_zone_t) kmalloc(sizeof (struct vm_zone), M_ZONE, M_NOWAIT);
334 if (z == NULL)
335 return NULL;
337 z->zflags = 0;
338 if (zinitna(z, NULL, name, size, nentries,
339 flags & ~ZONE_DESTROYABLE, zalloc) == 0) {
340 kfree(z, M_ZONE);
341 return NULL;
344 if (flags & ZONE_DESTROYABLE)
345 z->zflags |= ZONE_DESTROYABLE;
347 return z;
351 * Initialize a zone before the system is fully up. This routine should
352 * only be called before full VM startup.
354 * Called from the low level boot code only.
356 void
357 zbootinit(vm_zone_t z, char *name, int size, void *item, int nitems)
359 int i;
361 bzero(z->zitems_pcpu, sizeof(z->zitems_pcpu));
362 bzero(z->zfreecnt_pcpu, sizeof(z->zfreecnt_pcpu));
364 z->zname = name;
365 z->zsize = size;
366 z->zpagemax = 0;
367 z->zobj = NULL;
368 z->zflags = ZONE_BOOT;
369 z->zfreemin = 0;
370 z->zallocflag = 0;
371 z->zpagecount = 0;
372 z->zalloc = 0;
373 z->znalloc = 0;
374 spin_init(&z->zlock, "zbootinit");
376 bzero(item, (size_t)nitems * z->zsize);
377 z->zitems = NULL;
378 for (i = 0; i < nitems; i++) {
379 ((void **)item)[0] = z->zitems;
380 #ifdef INVARIANTS
381 ((void **)item)[1] = (void *)ZENTRY_FREE;
382 #endif
383 z->zitems = item;
384 item = (uint8_t *)item + z->zsize;
386 z->zfreecnt = nitems;
387 z->zmax = nitems;
388 z->ztotal = nitems;
390 lwkt_gettoken(&vm_token);
391 LIST_INSERT_HEAD(&zlist, z, zlink);
392 lwkt_reltoken(&vm_token);
396 * Release all resources owned by zone created with zinit().
398 * No requirements.
400 void
401 zdestroy(vm_zone_t z)
403 vm_page_t m;
404 int i;
406 if (z == NULL)
407 panic("zdestroy: null zone");
408 if ((z->zflags & ZONE_DESTROYABLE) == 0)
409 panic("zdestroy: undestroyable zone");
411 lwkt_gettoken(&vm_token);
412 LIST_REMOVE(z, zlink);
413 lwkt_reltoken(&vm_token);
416 * Release virtual mappings, physical memory and update sysctl stats.
418 if (z->zflags & ZONE_INTERRUPT) {
420 * Pages mapped via pmap_kenter() must be removed from the
421 * kernel_pmap() before calling kmem_free() to avoid issues
422 * with kernel_pmap.pm_stats.resident_count.
424 pmap_qremove(z->zkva, z->zpagemax);
425 vm_object_hold(z->zobj);
426 for (i = 0; i < z->zpagecount; ++i) {
427 m = vm_page_lookup_busy_wait(z->zobj, i, TRUE, "vmzd");
428 vm_page_unwire(m, 0);
429 vm_page_free(m);
433 * Free the mapping.
435 kmem_free(&kernel_map, z->zkva,
436 (size_t)z->zpagemax * PAGE_SIZE);
437 atomic_subtract_long(&zone_kmem_kvaspace,
438 (size_t)z->zpagemax * PAGE_SIZE);
441 * Free the backing object and physical pages.
443 vm_object_deallocate(z->zobj);
444 vm_object_drop(z->zobj);
445 atomic_subtract_int(&zone_kmem_pages, z->zpagecount);
446 } else {
447 for (i=0; i < z->zkmcur; i++) {
448 kmem_free(&kernel_map, z->zkmvec[i],
449 (size_t)z->zalloc * PAGE_SIZE);
450 atomic_subtract_int(&zone_kern_pages, z->zalloc);
452 if (z->zkmvec != NULL)
453 kfree(z->zkmvec, M_ZONE);
456 spin_uninit(&z->zlock);
457 kfree(z, M_ZONE);
462 * void *zalloc(vm_zone_t zone) --
463 * Returns an item from a specified zone. May not be called from a
464 * FAST interrupt or IPI function.
466 * void zfree(vm_zone_t zone, void *item) --
467 * Frees an item back to a specified zone. May not be called from a
468 * FAST interrupt or IPI function.
472 * Internal zone routine. Not to be called from external (non vm_zone) code.
474 * No requirements.
476 static void *
477 zget(vm_zone_t z)
479 int i;
480 vm_page_t m;
481 int nitems;
482 int npages;
483 int savezpc;
484 size_t nbytes;
485 size_t noffset;
486 void *item;
488 if (z == NULL)
489 panic("zget: null zone");
491 if (z->zflags & ZONE_INTERRUPT) {
493 * Interrupt zones do not mess with the kernel_map, they
494 * simply populate an existing mapping.
496 * First reserve the required space.
498 vm_object_hold(z->zobj);
499 noffset = (size_t)z->zpagecount * PAGE_SIZE;
500 noffset -= noffset % z->zsize;
501 savezpc = z->zpagecount;
502 if (z->zpagecount + z->zalloc > z->zpagemax)
503 z->zpagecount = z->zpagemax;
504 else
505 z->zpagecount += z->zalloc;
506 item = (char *)z->zkva + noffset;
507 npages = z->zpagecount - savezpc;
508 nitems = ((size_t)(savezpc + npages) * PAGE_SIZE - noffset) /
509 z->zsize;
510 atomic_add_int(&zone_kmem_pages, npages);
513 * Now allocate the pages. Note that we can block in the
514 * loop, so we've already done all the necessary calculations
515 * and reservations above.
517 for (i = 0; i < npages; ++i) {
518 vm_offset_t zkva;
520 m = vm_page_alloc(z->zobj, savezpc + i, z->zallocflag);
521 KKASSERT(m != NULL);
522 /* note: z might be modified due to blocking */
524 KKASSERT(m->queue == PQ_NONE);
525 m->valid = VM_PAGE_BITS_ALL;
526 vm_page_wire(m);
527 vm_page_wakeup(m);
529 zkva = z->zkva + (size_t)(savezpc + i) * PAGE_SIZE;
530 pmap_kenter(zkva, VM_PAGE_TO_PHYS(m));
531 bzero((void *)zkva, PAGE_SIZE);
533 vm_object_drop(z->zobj);
534 } else if (z->zflags & ZONE_SPECIAL) {
536 * The special zone is the one used for vm_map_entry_t's.
537 * We have to avoid an infinite recursion in
538 * vm_map_entry_reserve() by using vm_map_entry_kreserve()
539 * instead. The map entries are pre-reserved by the kernel
540 * by vm_map_entry_reserve_cpu_init().
542 nbytes = (size_t)z->zalloc * PAGE_SIZE;
544 item = (void *)kmem_alloc3(&kernel_map, nbytes, KM_KRESERVE);
546 /* note: z might be modified due to blocking */
547 if (item != NULL) {
548 zone_kern_pages += z->zalloc; /* not MP-safe XXX */
549 bzero(item, nbytes);
550 } else {
551 nbytes = 0;
553 nitems = nbytes / z->zsize;
554 } else {
556 * Otherwise allocate KVA from the kernel_map.
558 nbytes = (size_t)z->zalloc * PAGE_SIZE;
560 item = (void *)kmem_alloc3(&kernel_map, nbytes, 0);
562 /* note: z might be modified due to blocking */
563 if (item != NULL) {
564 zone_kern_pages += z->zalloc; /* not MP-safe XXX */
565 bzero(item, nbytes);
567 if (z->zflags & ZONE_DESTROYABLE) {
568 if (z->zkmcur == z->zkmmax) {
569 z->zkmmax =
570 z->zkmmax==0 ? 1 : z->zkmmax*2;
571 z->zkmvec = krealloc(z->zkmvec,
572 z->zkmmax * sizeof(z->zkmvec[0]),
573 M_ZONE, M_WAITOK);
575 z->zkmvec[z->zkmcur++] = (vm_offset_t)item;
577 } else {
578 nbytes = 0;
580 nitems = nbytes / z->zsize;
583 spin_lock(&z->zlock);
584 z->ztotal += nitems;
586 * Save one for immediate allocation
588 if (nitems != 0) {
589 nitems -= 1;
590 for (i = 0; i < nitems; i++) {
591 ((void **)item)[0] = z->zitems;
592 #ifdef INVARIANTS
593 ((void **)item)[1] = (void *)ZENTRY_FREE;
594 #endif
595 z->zitems = item;
596 item = (uint8_t *)item + z->zsize;
598 z->zfreecnt += nitems;
599 z->znalloc++;
600 } else if (z->zfreecnt > 0) {
601 item = z->zitems;
602 z->zitems = ((void **)item)[0];
603 #ifdef INVARIANTS
604 if (((void **)item)[1] != (void *)ZENTRY_FREE)
605 zerror(ZONE_ERROR_NOTFREE);
606 ((void **) item)[1] = NULL;
607 #endif
608 z->zfreecnt--;
609 z->znalloc++;
610 } else {
611 item = NULL;
613 spin_unlock(&z->zlock);
616 * A special zone may have used a kernel-reserved vm_map_entry. If
617 * so we have to be sure to recover our reserve so we don't run out.
618 * We will panic if we run out.
620 if (z->zflags & ZONE_SPECIAL)
621 vm_map_entry_reserve(0);
623 return item;
627 * No requirements.
629 static int
630 sysctl_vm_zone(SYSCTL_HANDLER_ARGS)
632 int error=0;
633 vm_zone_t curzone;
634 char tmpbuf[128];
635 char tmpname[14];
637 ksnprintf(tmpbuf, sizeof(tmpbuf),
638 "\nITEM SIZE LIMIT USED FREE REQUESTS\n");
639 error = SYSCTL_OUT(req, tmpbuf, strlen(tmpbuf));
640 if (error)
641 return (error);
643 lwkt_gettoken(&vm_token);
644 LIST_FOREACH(curzone, &zlist, zlink) {
645 int i;
646 int n;
647 int len;
648 int offset;
649 int freecnt;
651 len = strlen(curzone->zname);
652 if (len >= (sizeof(tmpname) - 1))
653 len = (sizeof(tmpname) - 1);
654 for(i = 0; i < sizeof(tmpname) - 1; i++)
655 tmpname[i] = ' ';
656 tmpname[i] = 0;
657 memcpy(tmpname, curzone->zname, len);
658 tmpname[len] = ':';
659 offset = 0;
660 if (curzone == LIST_FIRST(&zlist)) {
661 offset = 1;
662 tmpbuf[0] = '\n';
664 freecnt = curzone->zfreecnt;
665 for (n = 0; n < ncpus; ++n)
666 freecnt += curzone->zfreecnt_pcpu[n];
668 ksnprintf(tmpbuf + offset, sizeof(tmpbuf) - offset,
669 "%s %6.6u, %8.8u, %6.6u, %6.6u, %8.8u\n",
670 tmpname, curzone->zsize, curzone->zmax,
671 (curzone->ztotal - freecnt),
672 freecnt, curzone->znalloc);
674 len = strlen((char *)tmpbuf);
675 if (LIST_NEXT(curzone, zlink) == NULL)
676 tmpbuf[len - 1] = 0;
678 error = SYSCTL_OUT(req, tmpbuf, len);
680 if (error)
681 break;
683 lwkt_reltoken(&vm_token);
684 return (error);
687 #if defined(INVARIANTS)
690 * Debugging only.
692 void
693 zerror(int error)
695 char *msg;
697 switch (error) {
698 case ZONE_ERROR_INVALID:
699 msg = "zone: invalid zone";
700 break;
701 case ZONE_ERROR_NOTFREE:
702 msg = "zone: entry not free";
703 break;
704 case ZONE_ERROR_ALREADYFREE:
705 msg = "zone: freeing free entry";
706 break;
707 default:
708 msg = "zone: invalid error";
709 break;
711 panic("%s", msg);
713 #endif
715 SYSCTL_OID(_vm, OID_AUTO, zone, CTLTYPE_STRING|CTLFLAG_RD, \
716 NULL, 0, sysctl_vm_zone, "A", "Zone Info");
718 SYSCTL_INT(_vm, OID_AUTO, zone_kmem_pages,
719 CTLFLAG_RD, &zone_kmem_pages, 0, "Number of interrupt safe pages allocated by zone");
720 SYSCTL_INT(_vm, OID_AUTO, zone_burst,
721 CTLFLAG_RW, &zone_burst, 0, "Burst from depot to pcpu cache");
722 SYSCTL_LONG(_vm, OID_AUTO, zone_kmem_kvaspace,
723 CTLFLAG_RD, &zone_kmem_kvaspace, 0, "KVA space allocated by zone");
724 SYSCTL_INT(_vm, OID_AUTO, zone_kern_pages,
725 CTLFLAG_RD, &zone_kern_pages, 0, "Number of non-interrupt safe pages allocated by zone");