2 * Copyright (c) 1997, 1998 John S. Dyson
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice immediately at the beginning of the file, without modification,
10 * this list of conditions, and the following disclaimer.
11 * 2. Absolutely no warranty of function or purpose is made by the author
14 * $FreeBSD: src/sys/vm/vm_zone.c,v 1.30.2.6 2002/10/10 19:50:16 dillon Exp $
15 * $DragonFly: src/sys/vm/vm_zone.c,v 1.28 2008/01/23 17:35:48 nth Exp $
18 #include <sys/param.h>
19 #include <sys/queue.h>
20 #include <sys/systm.h>
21 #include <sys/kernel.h>
23 #include <sys/malloc.h>
24 #include <sys/sysctl.h>
25 #include <sys/vmmeter.h>
28 #include <vm/vm_object.h>
29 #include <vm/vm_page.h>
30 #include <vm/vm_map.h>
31 #include <vm/vm_kern.h>
32 #include <vm/vm_extern.h>
33 #include <vm/vm_zone.h>
34 #include <sys/spinlock2.h> /* XXX */
36 static MALLOC_DEFINE(M_ZONE
, "ZONE", "Zone header");
38 #define ZONE_ERROR_INVALID 0
39 #define ZONE_ERROR_NOTFREE 1
40 #define ZONE_ERROR_ALREADYFREE 2
42 #define ZONE_ROUNDING 32
44 #define ZENTRY_FREE 0x12342378
46 static void *zget(vm_zone_t z
);
49 * Return an item from the specified zone. This function is non-blocking for
50 * ZONE_INTERRUPT zones.
59 zerror(ZONE_ERROR_INVALID
);
61 spin_lock_wr(&z
->zlock
);
62 if (z
->zfreecnt
> z
->zfreemin
) {
65 KASSERT(item
!= NULL
, ("zitems unexpectedly NULL"));
66 if (((void **) item
)[1] != (void *) ZENTRY_FREE
)
67 zerror(ZONE_ERROR_NOTFREE
);
68 ((void **) item
)[1] = 0;
70 z
->zitems
= ((void **) item
)[0];
73 spin_unlock_wr(&z
->zlock
);
75 spin_unlock_wr(&z
->zlock
);
78 * PANICFAIL allows the caller to assume that the zalloc()
79 * will always succeed. If it doesn't, we panic here.
81 if (item
== NULL
&& (z
->zflags
& ZONE_PANICFAIL
))
82 panic("zalloc(%s) failed", z
->zname
);
88 * Free an item to the specified zone.
91 zfree(vm_zone_t z
, void *item
)
94 spin_lock_wr(&z
->zlock
);
95 ((void **) item
)[0] = z
->zitems
;
97 if (((void **) item
)[1] == (void *) ZENTRY_FREE
)
98 zerror(ZONE_ERROR_ALREADYFREE
);
99 ((void **) item
)[1] = (void *) ZENTRY_FREE
;
103 spin_unlock_wr(&z
->zlock
);
107 * This file comprises a very simple zone allocator. This is used
108 * in lieu of the malloc allocator, where needed or more optimal.
110 * Note that the initial implementation of this had coloring, and
111 * absolutely no improvement (actually perf degradation) occurred.
113 * Note also that the zones are type stable. The only restriction is
114 * that the first two longwords of a data structure can be changed
115 * between allocations. Any data that must be stable between allocations
116 * must reside in areas after the first two longwords.
118 * zinitna, zinit, zbootinit are the initialization routines.
119 * zalloc, zfree, are the allocation/free routines.
122 LIST_HEAD(zlist
, vm_zone
) zlist
= LIST_HEAD_INITIALIZER(zlist
);
123 static int sysctl_vm_zone(SYSCTL_HANDLER_ARGS
);
124 static int zone_kmem_pages
, zone_kern_pages
, zone_kmem_kvaspace
;
127 * Create a zone, but don't allocate the zone structure. If the
128 * zone had been previously created by the zone boot code, initialize
129 * various parts of the zone code.
131 * If waits are not allowed during allocation (e.g. during interrupt
132 * code), a-priori allocate the kernel virtual space, and allocate
133 * only pages when needed.
136 * z pointer to zone structure.
137 * obj pointer to VM object (opt).
139 * size size of zone entries.
140 * nentries number of zone entries allocated (only ZONE_INTERRUPT.)
141 * flags ZONE_INTERRUPT -- items can be allocated at interrupt time.
142 * zalloc number of pages allocated when memory is needed.
144 * Note that when using ZONE_INTERRUPT, the size of the zone is limited
145 * by the nentries argument. The size of the memory allocatable is
146 * unlimited if ZONE_INTERRUPT is not set.
150 zinitna(vm_zone_t z
, vm_object_t obj
, char *name
, int size
,
151 int nentries
, int flags
, int zalloc
)
156 * Only zones created with zinit() are destroyable.
158 if (z
->zflags
& ZONE_DESTROYABLE
)
159 panic("zinitna: can't create destroyable zone");
161 if ((z
->zflags
& ZONE_BOOT
) == 0) {
162 z
->zsize
= (size
+ ZONE_ROUNDING
- 1) & ~(ZONE_ROUNDING
- 1);
163 spin_init(&z
->zlock
);
171 LIST_INSERT_HEAD(&zlist
, z
, zlink
);
175 z
->zkmcur
= z
->zkmmax
= 0;
179 * If we cannot wait, allocate KVA space up front, and we will fill
180 * in pages as needed. This is particularly required when creating
181 * an allocation space for map entries in kernel_map, because we
182 * do not want to go into a recursion deadlock with
183 * vm_map_entry_reserve().
185 if (z
->zflags
& ZONE_INTERRUPT
) {
186 totsize
= round_page(z
->zsize
* nentries
);
187 zone_kmem_kvaspace
+= totsize
;
189 z
->zkva
= kmem_alloc_pageable(&kernel_map
, totsize
);
191 LIST_REMOVE(z
, zlink
);
195 z
->zpagemax
= totsize
/ PAGE_SIZE
;
197 z
->zobj
= vm_object_allocate(OBJT_DEFAULT
, z
->zpagemax
);
200 _vm_object_allocate(OBJT_DEFAULT
, z
->zpagemax
, obj
);
202 z
->zallocflag
= VM_ALLOC_SYSTEM
| VM_ALLOC_INTERRUPT
;
205 z
->zallocflag
= VM_ALLOC_NORMAL
| VM_ALLOC_SYSTEM
;
210 if (z
->zsize
> PAGE_SIZE
)
213 z
->zfreemin
= PAGE_SIZE
/ z
->zsize
;
222 * Populate the interrrupt zone at creation time rather than
223 * on first allocation, as this is a potentially long operation.
225 if (z
->zflags
& ZONE_INTERRUPT
) {
236 * Subroutine same as zinitna, except zone data structure is allocated
237 * automatically by malloc. This routine should normally be used, except
238 * in certain tricky startup conditions in the VM system -- then
239 * zbootinit and zinitna can be used. Zinit is the standard zone
240 * initialization call.
243 zinit(char *name
, int size
, int nentries
, int flags
, int zalloc
)
247 z
= (vm_zone_t
) kmalloc(sizeof (struct vm_zone
), M_ZONE
, M_NOWAIT
);
252 if (zinitna(z
, NULL
, name
, size
, nentries
,
253 flags
& ~ZONE_DESTROYABLE
, zalloc
) == 0) {
258 if (flags
& ZONE_DESTROYABLE
)
259 z
->zflags
|= ZONE_DESTROYABLE
;
265 * Initialize a zone before the system is fully up. This routine should
266 * only be called before full VM startup.
269 zbootinit(vm_zone_t z
, char *name
, int size
, void *item
, int nitems
)
277 z
->zflags
= ZONE_BOOT
;
283 spin_init(&z
->zlock
);
285 bzero(item
, nitems
* z
->zsize
);
287 for (i
= 0; i
< nitems
; i
++) {
288 ((void **) item
)[0] = z
->zitems
;
290 ((void **) item
)[1] = (void *) ZENTRY_FREE
;
293 item
= (uint8_t *)item
+ z
->zsize
;
295 z
->zfreecnt
= nitems
;
299 LIST_INSERT_HEAD(&zlist
, z
, zlink
);
303 * Release all resources owned by zone created with zinit().
306 zdestroy(vm_zone_t z
)
311 panic("zdestroy: null zone");
312 if ((z
->zflags
& ZONE_DESTROYABLE
) == 0)
313 panic("zdestroy: undestroyable zone");
315 LIST_REMOVE(z
, zlink
);
318 * Release virtual mappings, physical memory and update sysctl stats.
320 if (z
->zflags
& ZONE_INTERRUPT
) {
324 kmem_free(&kernel_map
, z
->zkva
, z
->zpagemax
*PAGE_SIZE
);
325 atomic_subtract_int(&zone_kmem_kvaspace
, z
->zpagemax
*PAGE_SIZE
);
327 * Free the backing object and physical pages.
329 vm_object_deallocate(z
->zobj
);
330 atomic_subtract_int(&zone_kmem_pages
, z
->zpagecount
);
332 for (i
=0; i
< z
->zkmcur
; i
++) {
333 kmem_free(&kernel_map
, z
->zkmvec
[i
],
334 z
->zalloc
*PAGE_SIZE
);
335 atomic_subtract_int(&zone_kern_pages
, z
->zalloc
);
337 if (z
->zkmvec
!= NULL
)
338 kfree(z
->zkmvec
, M_ZONE
);
341 spin_uninit(&z
->zlock
);
347 * void *zalloc(vm_zone_t zone) --
348 * Returns an item from a specified zone. May not be called from a
349 * FAST interrupt or IPI function.
351 * void zfree(vm_zone_t zone, void *item) --
352 * Frees an item back to a specified zone. May not be called from a
353 * FAST interrupt or IPI function.
357 * Internal zone routine. Not to be called from external (non vm_zone) code.
368 panic("zget: null zone");
370 if (z
->zflags
& ZONE_INTERRUPT
) {
372 * Interrupt zones do not mess with the kernel_map, they
373 * simply populate an existing mapping.
375 nbytes
= z
->zpagecount
* PAGE_SIZE
;
376 nbytes
-= nbytes
% z
->zsize
;
377 item
= (char *) z
->zkva
+ nbytes
;
378 for (i
= 0; ((i
< z
->zalloc
) && (z
->zpagecount
< z
->zpagemax
));
382 m
= vm_page_alloc(z
->zobj
, z
->zpagecount
,
384 /* note: z might be modified due to blocking */
389 * Unbusy page so it can freed in zdestroy(). Make
390 * sure it is not on any queue and so can not be
391 * recycled under our feet.
393 KKASSERT(m
->queue
== PQ_NONE
);
394 vm_page_flag_clear(m
, PG_BUSY
);
396 zkva
= z
->zkva
+ z
->zpagecount
* PAGE_SIZE
;
397 pmap_kenter(zkva
, VM_PAGE_TO_PHYS(m
)); /* YYY */
398 bzero((void *)zkva
, PAGE_SIZE
);
401 vmstats
.v_wire_count
++;
403 nitems
= ((z
->zpagecount
* PAGE_SIZE
) - nbytes
) / z
->zsize
;
404 } else if (z
->zflags
& ZONE_SPECIAL
) {
406 * The special zone is the one used for vm_map_entry_t's.
407 * We have to avoid an infinite recursion in
408 * vm_map_entry_reserve() by using vm_map_entry_kreserve()
409 * instead. The map entries are pre-reserved by the kernel
410 * by vm_map_entry_reserve_cpu_init().
412 nbytes
= z
->zalloc
* PAGE_SIZE
;
414 item
= (void *)kmem_alloc3(&kernel_map
, nbytes
, KM_KRESERVE
);
416 /* note: z might be modified due to blocking */
418 zone_kern_pages
+= z
->zalloc
; /* not MP-safe XXX */
423 nitems
= nbytes
/ z
->zsize
;
426 * Otherwise allocate KVA from the kernel_map.
428 nbytes
= z
->zalloc
* PAGE_SIZE
;
430 item
= (void *)kmem_alloc3(&kernel_map
, nbytes
, 0);
432 /* note: z might be modified due to blocking */
434 zone_kern_pages
+= z
->zalloc
; /* not MP-safe XXX */
437 if (z
->zflags
& ZONE_DESTROYABLE
) {
438 if (z
->zkmcur
== z
->zkmmax
) {
440 z
->zkmmax
==0 ? 1 : z
->zkmmax
*2;
441 z
->zkmvec
= krealloc(z
->zkmvec
,
442 z
->zkmmax
* sizeof(z
->zkmvec
[0]),
445 z
->zkmvec
[z
->zkmcur
++] = (vm_offset_t
)item
;
450 nitems
= nbytes
/ z
->zsize
;
453 spin_lock_wr(&z
->zlock
);
456 * Save one for immediate allocation
460 for (i
= 0; i
< nitems
; i
++) {
461 ((void **) item
)[0] = z
->zitems
;
463 ((void **) item
)[1] = (void *) ZENTRY_FREE
;
466 item
= (uint8_t *)item
+ z
->zsize
;
468 z
->zfreecnt
+= nitems
;
470 } else if (z
->zfreecnt
> 0) {
472 z
->zitems
= ((void **) item
)[0];
474 if (((void **) item
)[1] != (void *) ZENTRY_FREE
)
475 zerror(ZONE_ERROR_NOTFREE
);
476 ((void **) item
)[1] = 0;
483 spin_unlock_wr(&z
->zlock
);
486 * A special zone may have used a kernel-reserved vm_map_entry. If
487 * so we have to be sure to recover our reserve so we don't run out.
488 * We will panic if we run out.
490 if (z
->zflags
& ZONE_SPECIAL
)
491 vm_map_entry_reserve(0);
497 sysctl_vm_zone(SYSCTL_HANDLER_ARGS
)
504 ksnprintf(tmpbuf
, sizeof(tmpbuf
),
505 "\nITEM SIZE LIMIT USED FREE REQUESTS\n");
506 error
= SYSCTL_OUT(req
, tmpbuf
, strlen(tmpbuf
));
510 LIST_FOREACH(curzone
, &zlist
, zlink
) {
515 len
= strlen(curzone
->zname
);
516 if (len
>= (sizeof(tmpname
) - 1))
517 len
= (sizeof(tmpname
) - 1);
518 for(i
= 0; i
< sizeof(tmpname
) - 1; i
++)
521 memcpy(tmpname
, curzone
->zname
, len
);
524 if (curzone
== LIST_FIRST(&zlist
)) {
529 ksnprintf(tmpbuf
+ offset
, sizeof(tmpbuf
) - offset
,
530 "%s %6.6u, %8.8u, %6.6u, %6.6u, %8.8u\n",
531 tmpname
, curzone
->zsize
, curzone
->zmax
,
532 (curzone
->ztotal
- curzone
->zfreecnt
),
533 curzone
->zfreecnt
, curzone
->znalloc
);
535 len
= strlen((char *)tmpbuf
);
536 if (LIST_NEXT(curzone
, zlink
) == NULL
)
539 error
= SYSCTL_OUT(req
, tmpbuf
, len
);
547 #if defined(INVARIANTS)
554 case ZONE_ERROR_INVALID
:
555 msg
= "zone: invalid zone";
557 case ZONE_ERROR_NOTFREE
:
558 msg
= "zone: entry not free";
560 case ZONE_ERROR_ALREADYFREE
:
561 msg
= "zone: freeing free entry";
564 msg
= "zone: invalid error";
571 SYSCTL_OID(_vm
, OID_AUTO
, zone
, CTLTYPE_STRING
|CTLFLAG_RD
, \
572 NULL
, 0, sysctl_vm_zone
, "A", "Zone Info");
574 SYSCTL_INT(_vm
, OID_AUTO
, zone_kmem_pages
,
575 CTLFLAG_RD
, &zone_kmem_pages
, 0, "Number of interrupt safe pages allocated by zone");
576 SYSCTL_INT(_vm
, OID_AUTO
, zone_kmem_kvaspace
,
577 CTLFLAG_RD
, &zone_kmem_kvaspace
, 0, "KVA space allocated by zone");
578 SYSCTL_INT(_vm
, OID_AUTO
, zone_kern_pages
,
579 CTLFLAG_RD
, &zone_kern_pages
, 0, "Number of non-interrupt safe pages allocated by zone");