2 * Copyright (c) 1997, 1998 John S. Dyson
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice immediately at the beginning of the file, without modification,
10 * this list of conditions, and the following disclaimer.
11 * 2. Absolutely no warranty of function or purpose is made by the author
14 * $FreeBSD: src/sys/vm/vm_zone.c,v 1.30.2.6 2002/10/10 19:50:16 dillon Exp $
15 * $DragonFly: src/sys/vm/vm_zone.c,v 1.28 2008/01/23 17:35:48 nth Exp $
18 #include <sys/param.h>
19 #include <sys/queue.h>
20 #include <sys/systm.h>
21 #include <sys/kernel.h>
23 #include <sys/malloc.h>
24 #include <sys/sysctl.h>
25 #include <sys/vmmeter.h>
28 #include <vm/vm_object.h>
29 #include <vm/vm_page.h>
30 #include <vm/vm_map.h>
31 #include <vm/vm_kern.h>
32 #include <vm/vm_extern.h>
33 #include <vm/vm_zone.h>
35 #include <sys/spinlock2.h>
36 #include <sys/mplock2.h>
38 static MALLOC_DEFINE(M_ZONE
, "ZONE", "Zone header");
40 #define ZONE_ERROR_INVALID 0
41 #define ZONE_ERROR_NOTFREE 1
42 #define ZONE_ERROR_ALREADYFREE 2
44 #define ZONE_ROUNDING 32
46 #define ZENTRY_FREE 0x12342378
48 static void *zget(vm_zone_t z
);
51 * Return an item from the specified zone. This function is non-blocking for
52 * ZONE_INTERRUPT zones.
61 zerror(ZONE_ERROR_INVALID
);
63 spin_lock_wr(&z
->zlock
);
64 if (z
->zfreecnt
> z
->zfreemin
) {
67 KASSERT(item
!= NULL
, ("zitems unexpectedly NULL"));
68 if (((void **) item
)[1] != (void *) ZENTRY_FREE
)
69 zerror(ZONE_ERROR_NOTFREE
);
70 ((void **) item
)[1] = 0;
72 z
->zitems
= ((void **) item
)[0];
75 spin_unlock_wr(&z
->zlock
);
77 spin_unlock_wr(&z
->zlock
);
80 * PANICFAIL allows the caller to assume that the zalloc()
81 * will always succeed. If it doesn't, we panic here.
83 if (item
== NULL
&& (z
->zflags
& ZONE_PANICFAIL
))
84 panic("zalloc(%s) failed", z
->zname
);
90 * Free an item to the specified zone.
93 zfree(vm_zone_t z
, void *item
)
96 spin_lock_wr(&z
->zlock
);
97 ((void **) item
)[0] = z
->zitems
;
99 if (((void **) item
)[1] == (void *) ZENTRY_FREE
)
100 zerror(ZONE_ERROR_ALREADYFREE
);
101 ((void **) item
)[1] = (void *) ZENTRY_FREE
;
105 spin_unlock_wr(&z
->zlock
);
109 * This file comprises a very simple zone allocator. This is used
110 * in lieu of the malloc allocator, where needed or more optimal.
112 * Note that the initial implementation of this had coloring, and
113 * absolutely no improvement (actually perf degradation) occurred.
115 * Note also that the zones are type stable. The only restriction is
116 * that the first two longwords of a data structure can be changed
117 * between allocations. Any data that must be stable between allocations
118 * must reside in areas after the first two longwords.
120 * zinitna, zinit, zbootinit are the initialization routines.
121 * zalloc, zfree, are the allocation/free routines.
124 LIST_HEAD(zlist
, vm_zone
) zlist
= LIST_HEAD_INITIALIZER(zlist
);
125 static int sysctl_vm_zone(SYSCTL_HANDLER_ARGS
);
126 static int zone_kmem_pages
, zone_kern_pages
, zone_kmem_kvaspace
;
129 * Create a zone, but don't allocate the zone structure. If the
130 * zone had been previously created by the zone boot code, initialize
131 * various parts of the zone code.
133 * If waits are not allowed during allocation (e.g. during interrupt
134 * code), a-priori allocate the kernel virtual space, and allocate
135 * only pages when needed.
138 * z pointer to zone structure.
139 * obj pointer to VM object (opt).
141 * size size of zone entries.
142 * nentries number of zone entries allocated (only ZONE_INTERRUPT.)
143 * flags ZONE_INTERRUPT -- items can be allocated at interrupt time.
144 * zalloc number of pages allocated when memory is needed.
146 * Note that when using ZONE_INTERRUPT, the size of the zone is limited
147 * by the nentries argument. The size of the memory allocatable is
148 * unlimited if ZONE_INTERRUPT is not set.
152 zinitna(vm_zone_t z
, vm_object_t obj
, char *name
, int size
,
153 int nentries
, int flags
, int zalloc
)
158 * Only zones created with zinit() are destroyable.
160 if (z
->zflags
& ZONE_DESTROYABLE
)
161 panic("zinitna: can't create destroyable zone");
164 * NOTE: We can only adjust zsize if we previously did not
167 if ((z
->zflags
& ZONE_BOOT
) == 0) {
168 z
->zsize
= (size
+ ZONE_ROUNDING
- 1) & ~(ZONE_ROUNDING
- 1);
169 spin_init(&z
->zlock
);
177 LIST_INSERT_HEAD(&zlist
, z
, zlink
);
181 z
->zkmcur
= z
->zkmmax
= 0;
185 * If we cannot wait, allocate KVA space up front, and we will fill
186 * in pages as needed. This is particularly required when creating
187 * an allocation space for map entries in kernel_map, because we
188 * do not want to go into a recursion deadlock with
189 * vm_map_entry_reserve().
191 if (z
->zflags
& ZONE_INTERRUPT
) {
192 totsize
= round_page(z
->zsize
* nentries
);
193 zone_kmem_kvaspace
+= totsize
;
195 z
->zkva
= kmem_alloc_pageable(&kernel_map
, totsize
);
197 LIST_REMOVE(z
, zlink
);
201 z
->zpagemax
= totsize
/ PAGE_SIZE
;
203 z
->zobj
= vm_object_allocate(OBJT_DEFAULT
, z
->zpagemax
);
206 _vm_object_allocate(OBJT_DEFAULT
, z
->zpagemax
, obj
);
208 z
->zallocflag
= VM_ALLOC_SYSTEM
| VM_ALLOC_INTERRUPT
;
211 z
->zallocflag
= VM_ALLOC_NORMAL
| VM_ALLOC_SYSTEM
;
216 if (z
->zsize
> PAGE_SIZE
)
219 z
->zfreemin
= PAGE_SIZE
/ z
->zsize
;
228 * Populate the interrrupt zone at creation time rather than
229 * on first allocation, as this is a potentially long operation.
231 if (z
->zflags
& ZONE_INTERRUPT
) {
242 * Subroutine same as zinitna, except zone data structure is allocated
243 * automatically by malloc. This routine should normally be used, except
244 * in certain tricky startup conditions in the VM system -- then
245 * zbootinit and zinitna can be used. Zinit is the standard zone
246 * initialization call.
249 zinit(char *name
, int size
, int nentries
, int flags
, int zalloc
)
253 z
= (vm_zone_t
) kmalloc(sizeof (struct vm_zone
), M_ZONE
, M_NOWAIT
);
258 if (zinitna(z
, NULL
, name
, size
, nentries
,
259 flags
& ~ZONE_DESTROYABLE
, zalloc
) == 0) {
264 if (flags
& ZONE_DESTROYABLE
)
265 z
->zflags
|= ZONE_DESTROYABLE
;
271 * Initialize a zone before the system is fully up. This routine should
272 * only be called before full VM startup.
275 zbootinit(vm_zone_t z
, char *name
, int size
, void *item
, int nitems
)
283 z
->zflags
= ZONE_BOOT
;
289 spin_init(&z
->zlock
);
291 bzero(item
, nitems
* z
->zsize
);
293 for (i
= 0; i
< nitems
; i
++) {
294 ((void **) item
)[0] = z
->zitems
;
296 ((void **) item
)[1] = (void *) ZENTRY_FREE
;
299 item
= (uint8_t *)item
+ z
->zsize
;
301 z
->zfreecnt
= nitems
;
305 LIST_INSERT_HEAD(&zlist
, z
, zlink
);
309 * Release all resources owned by zone created with zinit().
312 zdestroy(vm_zone_t z
)
317 panic("zdestroy: null zone");
318 if ((z
->zflags
& ZONE_DESTROYABLE
) == 0)
319 panic("zdestroy: undestroyable zone");
321 LIST_REMOVE(z
, zlink
);
324 * Release virtual mappings, physical memory and update sysctl stats.
326 if (z
->zflags
& ZONE_INTERRUPT
) {
330 kmem_free(&kernel_map
, z
->zkva
, z
->zpagemax
*PAGE_SIZE
);
331 atomic_subtract_int(&zone_kmem_kvaspace
, z
->zpagemax
*PAGE_SIZE
);
333 * Free the backing object and physical pages.
335 vm_object_deallocate(z
->zobj
);
336 atomic_subtract_int(&zone_kmem_pages
, z
->zpagecount
);
338 for (i
=0; i
< z
->zkmcur
; i
++) {
339 kmem_free(&kernel_map
, z
->zkmvec
[i
],
340 z
->zalloc
*PAGE_SIZE
);
341 atomic_subtract_int(&zone_kern_pages
, z
->zalloc
);
343 if (z
->zkmvec
!= NULL
)
344 kfree(z
->zkmvec
, M_ZONE
);
347 spin_uninit(&z
->zlock
);
353 * void *zalloc(vm_zone_t zone) --
354 * Returns an item from a specified zone. May not be called from a
355 * FAST interrupt or IPI function.
357 * void zfree(vm_zone_t zone, void *item) --
358 * Frees an item back to a specified zone. May not be called from a
359 * FAST interrupt or IPI function.
363 * Internal zone routine. Not to be called from external (non vm_zone) code.
375 panic("zget: null zone");
377 if (z
->zflags
& ZONE_INTERRUPT
) {
379 * Interrupt zones do not mess with the kernel_map, they
380 * simply populate an existing mapping.
383 savezpc
= z
->zpagecount
;
384 nbytes
= z
->zpagecount
* PAGE_SIZE
;
385 nbytes
-= nbytes
% z
->zsize
;
386 item
= (char *) z
->zkva
+ nbytes
;
387 for (i
= 0; ((i
< z
->zalloc
) && (z
->zpagecount
< z
->zpagemax
));
391 m
= vm_page_alloc(z
->zobj
, z
->zpagecount
,
393 /* note: z might be modified due to blocking */
398 * Unbusy page so it can freed in zdestroy(). Make
399 * sure it is not on any queue and so can not be
400 * recycled under our feet.
402 KKASSERT(m
->queue
== PQ_NONE
);
403 vm_page_flag_clear(m
, PG_BUSY
);
405 zkva
= z
->zkva
+ z
->zpagecount
* PAGE_SIZE
;
406 pmap_kenter(zkva
, VM_PAGE_TO_PHYS(m
)); /* YYY */
407 bzero((void *)zkva
, PAGE_SIZE
);
408 KKASSERT(savezpc
== z
->zpagecount
);
412 vmstats
.v_wire_count
++;
414 nitems
= ((z
->zpagecount
* PAGE_SIZE
) - nbytes
) / z
->zsize
;
416 } else if (z
->zflags
& ZONE_SPECIAL
) {
418 * The special zone is the one used for vm_map_entry_t's.
419 * We have to avoid an infinite recursion in
420 * vm_map_entry_reserve() by using vm_map_entry_kreserve()
421 * instead. The map entries are pre-reserved by the kernel
422 * by vm_map_entry_reserve_cpu_init().
424 nbytes
= z
->zalloc
* PAGE_SIZE
;
426 item
= (void *)kmem_alloc3(&kernel_map
, nbytes
, KM_KRESERVE
);
428 /* note: z might be modified due to blocking */
430 zone_kern_pages
+= z
->zalloc
; /* not MP-safe XXX */
435 nitems
= nbytes
/ z
->zsize
;
438 * Otherwise allocate KVA from the kernel_map.
440 nbytes
= z
->zalloc
* PAGE_SIZE
;
442 item
= (void *)kmem_alloc3(&kernel_map
, nbytes
, 0);
444 /* note: z might be modified due to blocking */
446 zone_kern_pages
+= z
->zalloc
; /* not MP-safe XXX */
449 if (z
->zflags
& ZONE_DESTROYABLE
) {
450 if (z
->zkmcur
== z
->zkmmax
) {
452 z
->zkmmax
==0 ? 1 : z
->zkmmax
*2;
453 z
->zkmvec
= krealloc(z
->zkmvec
,
454 z
->zkmmax
* sizeof(z
->zkmvec
[0]),
457 z
->zkmvec
[z
->zkmcur
++] = (vm_offset_t
)item
;
462 nitems
= nbytes
/ z
->zsize
;
465 spin_lock_wr(&z
->zlock
);
468 * Save one for immediate allocation
472 for (i
= 0; i
< nitems
; i
++) {
473 ((void **) item
)[0] = z
->zitems
;
475 ((void **) item
)[1] = (void *) ZENTRY_FREE
;
478 item
= (uint8_t *)item
+ z
->zsize
;
480 z
->zfreecnt
+= nitems
;
482 } else if (z
->zfreecnt
> 0) {
484 z
->zitems
= ((void **) item
)[0];
486 if (((void **) item
)[1] != (void *) ZENTRY_FREE
)
487 zerror(ZONE_ERROR_NOTFREE
);
488 ((void **) item
)[1] = 0;
495 spin_unlock_wr(&z
->zlock
);
498 * A special zone may have used a kernel-reserved vm_map_entry. If
499 * so we have to be sure to recover our reserve so we don't run out.
500 * We will panic if we run out.
502 if (z
->zflags
& ZONE_SPECIAL
)
503 vm_map_entry_reserve(0);
509 sysctl_vm_zone(SYSCTL_HANDLER_ARGS
)
516 ksnprintf(tmpbuf
, sizeof(tmpbuf
),
517 "\nITEM SIZE LIMIT USED FREE REQUESTS\n");
518 error
= SYSCTL_OUT(req
, tmpbuf
, strlen(tmpbuf
));
522 LIST_FOREACH(curzone
, &zlist
, zlink
) {
527 len
= strlen(curzone
->zname
);
528 if (len
>= (sizeof(tmpname
) - 1))
529 len
= (sizeof(tmpname
) - 1);
530 for(i
= 0; i
< sizeof(tmpname
) - 1; i
++)
533 memcpy(tmpname
, curzone
->zname
, len
);
536 if (curzone
== LIST_FIRST(&zlist
)) {
541 ksnprintf(tmpbuf
+ offset
, sizeof(tmpbuf
) - offset
,
542 "%s %6.6u, %8.8u, %6.6u, %6.6u, %8.8u\n",
543 tmpname
, curzone
->zsize
, curzone
->zmax
,
544 (curzone
->ztotal
- curzone
->zfreecnt
),
545 curzone
->zfreecnt
, curzone
->znalloc
);
547 len
= strlen((char *)tmpbuf
);
548 if (LIST_NEXT(curzone
, zlink
) == NULL
)
551 error
= SYSCTL_OUT(req
, tmpbuf
, len
);
559 #if defined(INVARIANTS)
566 case ZONE_ERROR_INVALID
:
567 msg
= "zone: invalid zone";
569 case ZONE_ERROR_NOTFREE
:
570 msg
= "zone: entry not free";
572 case ZONE_ERROR_ALREADYFREE
:
573 msg
= "zone: freeing free entry";
576 msg
= "zone: invalid error";
583 SYSCTL_OID(_vm
, OID_AUTO
, zone
, CTLTYPE_STRING
|CTLFLAG_RD
, \
584 NULL
, 0, sysctl_vm_zone
, "A", "Zone Info");
586 SYSCTL_INT(_vm
, OID_AUTO
, zone_kmem_pages
,
587 CTLFLAG_RD
, &zone_kmem_pages
, 0, "Number of interrupt safe pages allocated by zone");
588 SYSCTL_INT(_vm
, OID_AUTO
, zone_kmem_kvaspace
,
589 CTLFLAG_RD
, &zone_kmem_kvaspace
, 0, "KVA space allocated by zone");
590 SYSCTL_INT(_vm
, OID_AUTO
, zone_kern_pages
,
591 CTLFLAG_RD
, &zone_kern_pages
, 0, "Number of non-interrupt safe pages allocated by zone");