2 * Copyright (c) 1997, 1998 John S. Dyson
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice immediately at the beginning of the file, without modification,
10 * this list of conditions, and the following disclaimer.
11 * 2. Absolutely no warranty of function or purpose is made by the author
14 * $FreeBSD: src/sys/vm/vm_zone.c,v 1.30.2.6 2002/10/10 19:50:16 dillon Exp $
15 * $DragonFly: src/sys/vm/vm_zone.c,v 1.19 2005/11/08 22:40:01 dillon Exp $
18 #include <sys/param.h>
19 #include <sys/systm.h>
20 #include <sys/kernel.h>
22 #include <sys/malloc.h>
23 #include <sys/sysctl.h>
24 #include <sys/vmmeter.h>
27 #include <vm/vm_object.h>
28 #include <vm/vm_page.h>
29 #include <vm/vm_map.h>
30 #include <vm/vm_kern.h>
31 #include <vm/vm_extern.h>
32 #include <vm/vm_zone.h>
33 #include <sys/spinlock2.h> /* XXX */
35 static MALLOC_DEFINE(M_ZONE
, "ZONE", "Zone header");
37 #define ZONE_ERROR_INVALID 0
38 #define ZONE_ERROR_NOTFREE 1
39 #define ZONE_ERROR_ALREADYFREE 2
41 #define ZONE_ROUNDING 32
43 #define ZENTRY_FREE 0x12342378
45 static void *zget(vm_zone_t z
);
48 * Return an item from the specified zone. This function is interrupt/MP
49 * thread safe and is non-blocking for ZONE_INTERRUPT zones.
58 zerror(ZONE_ERROR_INVALID
);
61 if (z
->zfreecnt
> z
->zfreemin
) {
64 KASSERT(item
!= NULL
, ("zitems unexpectedly NULL"));
65 if (((void **) item
)[1] != (void *) ZENTRY_FREE
)
66 zerror(ZONE_ERROR_NOTFREE
);
67 ((void **) item
)[1] = 0;
69 z
->zitems
= ((void **) item
)[0];
72 spin_unlock(&z
->zlock
);
74 spin_unlock(&z
->zlock
);
77 * PANICFAIL allows the caller to assume that the zalloc()
78 * will always succeed. If it doesn't, we panic here.
80 if (item
== NULL
&& (z
->zflags
& ZONE_PANICFAIL
))
81 panic("zalloc(%s) failed", z
->zname
);
87 * Free an item to the specified zone. This function is interrupt/MP
88 * thread safe and is non-blocking.
91 zfree(vm_zone_t z
, void *item
)
95 ((void **) item
)[0] = z
->zitems
;
97 if (((void **) item
)[1] == (void *) ZENTRY_FREE
)
98 zerror(ZONE_ERROR_ALREADYFREE
);
99 ((void **) item
)[1] = (void *) ZENTRY_FREE
;
103 spin_unlock(&z
->zlock
);
107 * This file comprises a very simple zone allocator. This is used
108 * in lieu of the malloc allocator, where needed or more optimal.
110 * Note that the initial implementation of this had coloring, and
111 * absolutely no improvement (actually perf degradation) occurred.
113 * Note also that the zones are type stable. The only restriction is
114 * that the first two longwords of a data structure can be changed
115 * between allocations. Any data that must be stable between allocations
116 * must reside in areas after the first two longwords.
118 * zinitna, zinit, zbootinit are the initialization routines.
119 * zalloc, zfree, are the allocation/free routines.
122 static struct vm_zone
*zlist
;
123 static int sysctl_vm_zone(SYSCTL_HANDLER_ARGS
);
124 static int zone_kmem_pages
, zone_kern_pages
, zone_kmem_kvaspace
;
127 * Create a zone, but don't allocate the zone structure. If the
128 * zone had been previously created by the zone boot code, initialize
129 * various parts of the zone code.
131 * If waits are not allowed during allocation (e.g. during interrupt
132 * code), a-priori allocate the kernel virtual space, and allocate
133 * only pages when needed.
136 * z pointer to zone structure.
137 * obj pointer to VM object (opt).
139 * size size of zone entries.
140 * nentries number of zone entries allocated (only ZONE_INTERRUPT.)
141 * flags ZONE_INTERRUPT -- items can be allocated at interrupt time.
142 * zalloc number of pages allocated when memory is needed.
144 * Note that when using ZONE_INTERRUPT, the size of the zone is limited
145 * by the nentries argument. The size of the memory allocatable is
146 * unlimited if ZONE_INTERRUPT is not set.
150 zinitna(vm_zone_t z
, vm_object_t obj
, char *name
, int size
,
151 int nentries
, int flags
, int zalloc
)
155 if ((z
->zflags
& ZONE_BOOT
) == 0) {
156 z
->zsize
= (size
+ ZONE_ROUNDING
- 1) & ~(ZONE_ROUNDING
- 1);
157 spin_init(&z
->zlock
);
172 * If we cannot wait, allocate KVA space up front, and we will fill
173 * in pages as needed. This is particularly required when creating
174 * an allocation space for map entries in kernel_map, because we
175 * do not want to go into a recursion deadlock with
176 * vm_map_entry_reserve().
178 if (z
->zflags
& ZONE_INTERRUPT
) {
179 totsize
= round_page(z
->zsize
* nentries
);
180 zone_kmem_kvaspace
+= totsize
;
182 z
->zkva
= kmem_alloc_pageable(kernel_map
, totsize
);
188 z
->zpagemax
= totsize
/ PAGE_SIZE
;
190 z
->zobj
= vm_object_allocate(OBJT_DEFAULT
, z
->zpagemax
);
193 _vm_object_allocate(OBJT_DEFAULT
, z
->zpagemax
, obj
);
195 z
->zallocflag
= VM_ALLOC_SYSTEM
| VM_ALLOC_INTERRUPT
;
198 z
->zallocflag
= VM_ALLOC_NORMAL
| VM_ALLOC_SYSTEM
;
203 if (z
->zsize
> PAGE_SIZE
)
206 z
->zfreemin
= PAGE_SIZE
/ z
->zsize
;
215 * Populate the interrrupt zone at creation time rather than
216 * on first allocation, as this is a potentially long operation.
218 if (z
->zflags
& ZONE_INTERRUPT
) {
229 * Subroutine same as zinitna, except zone data structure is allocated
230 * automatically by malloc. This routine should normally be used, except
231 * in certain tricky startup conditions in the VM system -- then
232 * zbootinit and zinitna can be used. Zinit is the standard zone
233 * initialization call.
236 zinit(char *name
, int size
, int nentries
, int flags
, int zalloc
)
240 z
= (vm_zone_t
) malloc(sizeof (struct vm_zone
), M_ZONE
, M_NOWAIT
);
245 if (zinitna(z
, NULL
, name
, size
, nentries
, flags
, zalloc
) == 0) {
254 * Initialize a zone before the system is fully up. This routine should
255 * only be called before full VM startup.
258 zbootinit(vm_zone_t z
, char *name
, int size
, void *item
, int nitems
)
266 z
->zflags
= ZONE_BOOT
;
272 spin_init(&z
->zlock
);
274 bzero(item
, nitems
* z
->zsize
);
276 for (i
= 0; i
< nitems
; i
++) {
277 ((void **) item
)[0] = z
->zitems
;
279 ((void **) item
)[1] = (void *) ZENTRY_FREE
;
282 item
= (uint8_t *)item
+ z
->zsize
;
284 z
->zfreecnt
= nitems
;
297 * void *zalloc(vm_zone_t zone) --
298 * Returns an item from a specified zone. May not be called from a
299 * FAST interrupt or IPI function.
301 * void zfree(vm_zone_t zone, void *item) --
302 * Frees an item back to a specified zone. May not be called from a
303 * FAST interrupt or IPI function.
307 * Internal zone routine. Not to be called from external (non vm_zone) code.
318 panic("zget: null zone");
320 if (z
->zflags
& ZONE_INTERRUPT
) {
322 * Interrupt zones do not mess with the kernel_map, they
323 * simply populate an existing mapping.
325 nbytes
= z
->zpagecount
* PAGE_SIZE
;
326 nbytes
-= nbytes
% z
->zsize
;
327 item
= (char *) z
->zkva
+ nbytes
;
328 for (i
= 0; ((i
< z
->zalloc
) && (z
->zpagecount
< z
->zpagemax
));
332 m
= vm_page_alloc(z
->zobj
, z
->zpagecount
,
334 /* note: z might be modified due to blocking */
338 zkva
= z
->zkva
+ z
->zpagecount
* PAGE_SIZE
;
339 pmap_kenter(zkva
, VM_PAGE_TO_PHYS(m
)); /* YYY */
340 bzero((void *)zkva
, PAGE_SIZE
);
343 vmstats
.v_wire_count
++;
345 nitems
= ((z
->zpagecount
* PAGE_SIZE
) - nbytes
) / z
->zsize
;
346 } else if (z
->zflags
& ZONE_SPECIAL
) {
348 * The special zone is the one used for vm_map_entry_t's.
349 * We have to avoid an infinite recursion in
350 * vm_map_entry_reserve() by using vm_map_entry_kreserve()
351 * instead. The map entries are pre-reserved by the kernel
352 * by vm_map_entry_reserve_cpu_init().
354 nbytes
= z
->zalloc
* PAGE_SIZE
;
356 item
= (void *)kmem_alloc3(kernel_map
, nbytes
, KM_KRESERVE
);
358 /* note: z might be modified due to blocking */
360 zone_kern_pages
+= z
->zalloc
; /* not MP-safe XXX */
365 nitems
= nbytes
/ z
->zsize
;
368 * Otherwise allocate KVA from the kernel_map.
370 nbytes
= z
->zalloc
* PAGE_SIZE
;
372 item
= (void *)kmem_alloc3(kernel_map
, nbytes
, 0);
374 /* note: z might be modified due to blocking */
376 zone_kern_pages
+= z
->zalloc
; /* not MP-safe XXX */
381 nitems
= nbytes
/ z
->zsize
;
384 spin_lock(&z
->zlock
);
387 * Save one for immediate allocation
391 for (i
= 0; i
< nitems
; i
++) {
392 ((void **) item
)[0] = z
->zitems
;
394 ((void **) item
)[1] = (void *) ZENTRY_FREE
;
397 item
= (uint8_t *)item
+ z
->zsize
;
399 z
->zfreecnt
+= nitems
;
401 } else if (z
->zfreecnt
> 0) {
403 z
->zitems
= ((void **) item
)[0];
405 if (((void **) item
)[1] != (void *) ZENTRY_FREE
)
406 zerror(ZONE_ERROR_NOTFREE
);
407 ((void **) item
)[1] = 0;
414 spin_unlock(&z
->zlock
);
417 * A special zone may have used a kernel-reserved vm_map_entry. If
418 * so we have to be sure to recover our reserve so we don't run out.
419 * We will panic if we run out.
421 if (z
->zflags
& ZONE_SPECIAL
)
422 vm_map_entry_reserve(0);
428 sysctl_vm_zone(SYSCTL_HANDLER_ARGS
)
431 vm_zone_t curzone
, nextzone
;
435 snprintf(tmpbuf
, sizeof(tmpbuf
),
436 "\nITEM SIZE LIMIT USED FREE REQUESTS\n");
437 error
= SYSCTL_OUT(req
, tmpbuf
, strlen(tmpbuf
));
441 for (curzone
= zlist
; curzone
; curzone
= nextzone
) {
446 nextzone
= curzone
->znext
;
447 len
= strlen(curzone
->zname
);
448 if (len
>= (sizeof(tmpname
) - 1))
449 len
= (sizeof(tmpname
) - 1);
450 for(i
= 0; i
< sizeof(tmpname
) - 1; i
++)
453 memcpy(tmpname
, curzone
->zname
, len
);
456 if (curzone
== zlist
) {
461 snprintf(tmpbuf
+ offset
, sizeof(tmpbuf
) - offset
,
462 "%s %6.6u, %8.8u, %6.6u, %6.6u, %8.8u\n",
463 tmpname
, curzone
->zsize
, curzone
->zmax
,
464 (curzone
->ztotal
- curzone
->zfreecnt
),
465 curzone
->zfreecnt
, curzone
->znalloc
);
467 len
= strlen((char *)tmpbuf
);
468 if (nextzone
== NULL
)
471 error
= SYSCTL_OUT(req
, tmpbuf
, len
);
479 #if defined(INVARIANTS)
486 case ZONE_ERROR_INVALID
:
487 msg
= "zone: invalid zone";
489 case ZONE_ERROR_NOTFREE
:
490 msg
= "zone: entry not free";
492 case ZONE_ERROR_ALREADYFREE
:
493 msg
= "zone: freeing free entry";
496 msg
= "zone: invalid error";
503 SYSCTL_OID(_vm
, OID_AUTO
, zone
, CTLTYPE_STRING
|CTLFLAG_RD
, \
504 NULL
, 0, sysctl_vm_zone
, "A", "Zone Info");
506 SYSCTL_INT(_vm
, OID_AUTO
, zone_kmem_pages
,
507 CTLFLAG_RD
, &zone_kmem_pages
, 0, "Number of interrupt safe pages allocated by zone");
508 SYSCTL_INT(_vm
, OID_AUTO
, zone_kmem_kvaspace
,
509 CTLFLAG_RD
, &zone_kmem_kvaspace
, 0, "KVA space allocated by zone");
510 SYSCTL_INT(_vm
, OID_AUTO
, zone_kern_pages
,
511 CTLFLAG_RD
, &zone_kern_pages
, 0, "Number of non-interrupt safe pages allocated by zone");