Test commit of new file.
[dragonfly.git] / sys / vm / vm_zone.c
blob8ff47e37d27ae09c2567f764130ac748edd8bab9
1 /*
2 * Copyright (c) 1997, 1998 John S. Dyson
3 * All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice immediately at the beginning of the file, without modification,
10 * this list of conditions, and the following disclaimer.
11 * 2. Absolutely no warranty of function or purpose is made by the author
12 * John S. Dyson.
14 * $FreeBSD: src/sys/vm/vm_zone.c,v 1.30.2.6 2002/10/10 19:50:16 dillon Exp $
15 * $DragonFly: src/sys/vm/vm_zone.c,v 1.19 2005/11/08 22:40:01 dillon Exp $
18 #include <sys/param.h>
19 #include <sys/systm.h>
20 #include <sys/kernel.h>
21 #include <sys/lock.h>
22 #include <sys/malloc.h>
23 #include <sys/sysctl.h>
24 #include <sys/vmmeter.h>
26 #include <vm/vm.h>
27 #include <vm/vm_object.h>
28 #include <vm/vm_page.h>
29 #include <vm/vm_map.h>
30 #include <vm/vm_kern.h>
31 #include <vm/vm_extern.h>
32 #include <vm/vm_zone.h>
33 #include <sys/spinlock2.h> /* XXX */
35 static MALLOC_DEFINE(M_ZONE, "ZONE", "Zone header");
37 #define ZONE_ERROR_INVALID 0
38 #define ZONE_ERROR_NOTFREE 1
39 #define ZONE_ERROR_ALREADYFREE 2
41 #define ZONE_ROUNDING 32
43 #define ZENTRY_FREE 0x12342378
45 static void *zget(vm_zone_t z);
48 * Return an item from the specified zone. This function is interrupt/MP
49 * thread safe and is non-blocking for ZONE_INTERRUPT zones.
51 void *
52 zalloc(vm_zone_t z)
54 void *item;
56 #ifdef INVARIANTS
57 if (z == NULL)
58 zerror(ZONE_ERROR_INVALID);
59 #endif
60 spin_lock(&z->zlock);
61 if (z->zfreecnt > z->zfreemin) {
62 item = z->zitems;
63 #ifdef INVARIANTS
64 KASSERT(item != NULL, ("zitems unexpectedly NULL"));
65 if (((void **) item)[1] != (void *) ZENTRY_FREE)
66 zerror(ZONE_ERROR_NOTFREE);
67 ((void **) item)[1] = 0;
68 #endif
69 z->zitems = ((void **) item)[0];
70 z->zfreecnt--;
71 z->znalloc++;
72 spin_unlock(&z->zlock);
73 } else {
74 spin_unlock(&z->zlock);
75 item = zget(z);
77 * PANICFAIL allows the caller to assume that the zalloc()
78 * will always succeed. If it doesn't, we panic here.
80 if (item == NULL && (z->zflags & ZONE_PANICFAIL))
81 panic("zalloc(%s) failed", z->zname);
83 return item;
87 * Free an item to the specified zone. This function is interrupt/MP
88 * thread safe and is non-blocking.
90 void
91 zfree(vm_zone_t z, void *item)
94 spin_lock(&z->zlock);
95 ((void **) item)[0] = z->zitems;
96 #ifdef INVARIANTS
97 if (((void **) item)[1] == (void *) ZENTRY_FREE)
98 zerror(ZONE_ERROR_ALREADYFREE);
99 ((void **) item)[1] = (void *) ZENTRY_FREE;
100 #endif
101 z->zitems = item;
102 z->zfreecnt++;
103 spin_unlock(&z->zlock);
107 * This file comprises a very simple zone allocator. This is used
108 * in lieu of the malloc allocator, where needed or more optimal.
110 * Note that the initial implementation of this had coloring, and
111 * absolutely no improvement (actually perf degradation) occurred.
113 * Note also that the zones are type stable. The only restriction is
114 * that the first two longwords of a data structure can be changed
115 * between allocations. Any data that must be stable between allocations
116 * must reside in areas after the first two longwords.
118 * zinitna, zinit, zbootinit are the initialization routines.
119 * zalloc, zfree, are the allocation/free routines.
122 static struct vm_zone *zlist;
123 static int sysctl_vm_zone(SYSCTL_HANDLER_ARGS);
124 static int zone_kmem_pages, zone_kern_pages, zone_kmem_kvaspace;
127 * Create a zone, but don't allocate the zone structure. If the
128 * zone had been previously created by the zone boot code, initialize
129 * various parts of the zone code.
131 * If waits are not allowed during allocation (e.g. during interrupt
132 * code), a-priori allocate the kernel virtual space, and allocate
133 * only pages when needed.
135 * Arguments:
136 * z pointer to zone structure.
137 * obj pointer to VM object (opt).
138 * name name of zone.
139 * size size of zone entries.
140 * nentries number of zone entries allocated (only ZONE_INTERRUPT.)
141 * flags ZONE_INTERRUPT -- items can be allocated at interrupt time.
142 * zalloc number of pages allocated when memory is needed.
144 * Note that when using ZONE_INTERRUPT, the size of the zone is limited
145 * by the nentries argument. The size of the memory allocatable is
146 * unlimited if ZONE_INTERRUPT is not set.
150 zinitna(vm_zone_t z, vm_object_t obj, char *name, int size,
151 int nentries, int flags, int zalloc)
153 int totsize;
155 if ((z->zflags & ZONE_BOOT) == 0) {
156 z->zsize = (size + ZONE_ROUNDING - 1) & ~(ZONE_ROUNDING - 1);
157 spin_init(&z->zlock);
158 z->zfreecnt = 0;
159 z->ztotal = 0;
160 z->zmax = 0;
161 z->zname = name;
162 z->znalloc = 0;
163 z->zitems = NULL;
165 z->znext = zlist;
166 zlist = z;
169 z->zflags |= flags;
172 * If we cannot wait, allocate KVA space up front, and we will fill
173 * in pages as needed. This is particularly required when creating
174 * an allocation space for map entries in kernel_map, because we
175 * do not want to go into a recursion deadlock with
176 * vm_map_entry_reserve().
178 if (z->zflags & ZONE_INTERRUPT) {
179 totsize = round_page(z->zsize * nentries);
180 zone_kmem_kvaspace += totsize;
182 z->zkva = kmem_alloc_pageable(kernel_map, totsize);
183 if (z->zkva == 0) {
184 zlist = z->znext;
185 return 0;
188 z->zpagemax = totsize / PAGE_SIZE;
189 if (obj == NULL) {
190 z->zobj = vm_object_allocate(OBJT_DEFAULT, z->zpagemax);
191 } else {
192 z->zobj = obj;
193 _vm_object_allocate(OBJT_DEFAULT, z->zpagemax, obj);
195 z->zallocflag = VM_ALLOC_SYSTEM | VM_ALLOC_INTERRUPT;
196 z->zmax += nentries;
197 } else {
198 z->zallocflag = VM_ALLOC_NORMAL | VM_ALLOC_SYSTEM;
199 z->zmax = 0;
203 if (z->zsize > PAGE_SIZE)
204 z->zfreemin = 1;
205 else
206 z->zfreemin = PAGE_SIZE / z->zsize;
208 z->zpagecount = 0;
209 if (zalloc)
210 z->zalloc = zalloc;
211 else
212 z->zalloc = 1;
215 * Populate the interrrupt zone at creation time rather than
216 * on first allocation, as this is a potentially long operation.
218 if (z->zflags & ZONE_INTERRUPT) {
219 void *buf;
221 buf = zget(z);
222 zfree(z, buf);
225 return 1;
229 * Subroutine same as zinitna, except zone data structure is allocated
230 * automatically by malloc. This routine should normally be used, except
231 * in certain tricky startup conditions in the VM system -- then
232 * zbootinit and zinitna can be used. Zinit is the standard zone
233 * initialization call.
235 vm_zone_t
236 zinit(char *name, int size, int nentries, int flags, int zalloc)
238 vm_zone_t z;
240 z = (vm_zone_t) malloc(sizeof (struct vm_zone), M_ZONE, M_NOWAIT);
241 if (z == NULL)
242 return NULL;
244 z->zflags = 0;
245 if (zinitna(z, NULL, name, size, nentries, flags, zalloc) == 0) {
246 free(z, M_ZONE);
247 return NULL;
250 return z;
254 * Initialize a zone before the system is fully up. This routine should
255 * only be called before full VM startup.
257 void
258 zbootinit(vm_zone_t z, char *name, int size, void *item, int nitems)
260 int i;
262 z->zname = name;
263 z->zsize = size;
264 z->zpagemax = 0;
265 z->zobj = NULL;
266 z->zflags = ZONE_BOOT;
267 z->zfreemin = 0;
268 z->zallocflag = 0;
269 z->zpagecount = 0;
270 z->zalloc = 0;
271 z->znalloc = 0;
272 spin_init(&z->zlock);
274 bzero(item, nitems * z->zsize);
275 z->zitems = NULL;
276 for (i = 0; i < nitems; i++) {
277 ((void **) item)[0] = z->zitems;
278 #ifdef INVARIANTS
279 ((void **) item)[1] = (void *) ZENTRY_FREE;
280 #endif
281 z->zitems = item;
282 item = (uint8_t *)item + z->zsize;
284 z->zfreecnt = nitems;
285 z->zmax = nitems;
286 z->ztotal = nitems;
288 if (zlist == 0) {
289 zlist = z;
290 } else {
291 z->znext = zlist;
292 zlist = z;
297 * void *zalloc(vm_zone_t zone) --
298 * Returns an item from a specified zone. May not be called from a
299 * FAST interrupt or IPI function.
301 * void zfree(vm_zone_t zone, void *item) --
302 * Frees an item back to a specified zone. May not be called from a
303 * FAST interrupt or IPI function.
307 * Internal zone routine. Not to be called from external (non vm_zone) code.
309 static void *
310 zget(vm_zone_t z)
312 int i;
313 vm_page_t m;
314 int nitems, nbytes;
315 void *item;
317 if (z == NULL)
318 panic("zget: null zone");
320 if (z->zflags & ZONE_INTERRUPT) {
322 * Interrupt zones do not mess with the kernel_map, they
323 * simply populate an existing mapping.
325 nbytes = z->zpagecount * PAGE_SIZE;
326 nbytes -= nbytes % z->zsize;
327 item = (char *) z->zkva + nbytes;
328 for (i = 0; ((i < z->zalloc) && (z->zpagecount < z->zpagemax));
329 i++) {
330 vm_offset_t zkva;
332 m = vm_page_alloc(z->zobj, z->zpagecount,
333 z->zallocflag);
334 /* note: z might be modified due to blocking */
335 if (m == NULL)
336 break;
338 zkva = z->zkva + z->zpagecount * PAGE_SIZE;
339 pmap_kenter(zkva, VM_PAGE_TO_PHYS(m)); /* YYY */
340 bzero((void *)zkva, PAGE_SIZE);
341 z->zpagecount++;
342 zone_kmem_pages++;
343 vmstats.v_wire_count++;
345 nitems = ((z->zpagecount * PAGE_SIZE) - nbytes) / z->zsize;
346 } else if (z->zflags & ZONE_SPECIAL) {
348 * The special zone is the one used for vm_map_entry_t's.
349 * We have to avoid an infinite recursion in
350 * vm_map_entry_reserve() by using vm_map_entry_kreserve()
351 * instead. The map entries are pre-reserved by the kernel
352 * by vm_map_entry_reserve_cpu_init().
354 nbytes = z->zalloc * PAGE_SIZE;
356 item = (void *)kmem_alloc3(kernel_map, nbytes, KM_KRESERVE);
358 /* note: z might be modified due to blocking */
359 if (item != NULL) {
360 zone_kern_pages += z->zalloc; /* not MP-safe XXX */
361 bzero(item, nbytes);
362 } else {
363 nbytes = 0;
365 nitems = nbytes / z->zsize;
366 } else {
368 * Otherwise allocate KVA from the kernel_map.
370 nbytes = z->zalloc * PAGE_SIZE;
372 item = (void *)kmem_alloc3(kernel_map, nbytes, 0);
374 /* note: z might be modified due to blocking */
375 if (item != NULL) {
376 zone_kern_pages += z->zalloc; /* not MP-safe XXX */
377 bzero(item, nbytes);
378 } else {
379 nbytes = 0;
381 nitems = nbytes / z->zsize;
384 spin_lock(&z->zlock);
385 z->ztotal += nitems;
387 * Save one for immediate allocation
389 if (nitems != 0) {
390 nitems -= 1;
391 for (i = 0; i < nitems; i++) {
392 ((void **) item)[0] = z->zitems;
393 #ifdef INVARIANTS
394 ((void **) item)[1] = (void *) ZENTRY_FREE;
395 #endif
396 z->zitems = item;
397 item = (uint8_t *)item + z->zsize;
399 z->zfreecnt += nitems;
400 z->znalloc++;
401 } else if (z->zfreecnt > 0) {
402 item = z->zitems;
403 z->zitems = ((void **) item)[0];
404 #ifdef INVARIANTS
405 if (((void **) item)[1] != (void *) ZENTRY_FREE)
406 zerror(ZONE_ERROR_NOTFREE);
407 ((void **) item)[1] = 0;
408 #endif
409 z->zfreecnt--;
410 z->znalloc++;
411 } else {
412 item = NULL;
414 spin_unlock(&z->zlock);
417 * A special zone may have used a kernel-reserved vm_map_entry. If
418 * so we have to be sure to recover our reserve so we don't run out.
419 * We will panic if we run out.
421 if (z->zflags & ZONE_SPECIAL)
422 vm_map_entry_reserve(0);
424 return item;
427 static int
428 sysctl_vm_zone(SYSCTL_HANDLER_ARGS)
430 int error=0;
431 vm_zone_t curzone, nextzone;
432 char tmpbuf[128];
433 char tmpname[14];
435 snprintf(tmpbuf, sizeof(tmpbuf),
436 "\nITEM SIZE LIMIT USED FREE REQUESTS\n");
437 error = SYSCTL_OUT(req, tmpbuf, strlen(tmpbuf));
438 if (error)
439 return (error);
441 for (curzone = zlist; curzone; curzone = nextzone) {
442 int i;
443 int len;
444 int offset;
446 nextzone = curzone->znext;
447 len = strlen(curzone->zname);
448 if (len >= (sizeof(tmpname) - 1))
449 len = (sizeof(tmpname) - 1);
450 for(i = 0; i < sizeof(tmpname) - 1; i++)
451 tmpname[i] = ' ';
452 tmpname[i] = 0;
453 memcpy(tmpname, curzone->zname, len);
454 tmpname[len] = ':';
455 offset = 0;
456 if (curzone == zlist) {
457 offset = 1;
458 tmpbuf[0] = '\n';
461 snprintf(tmpbuf + offset, sizeof(tmpbuf) - offset,
462 "%s %6.6u, %8.8u, %6.6u, %6.6u, %8.8u\n",
463 tmpname, curzone->zsize, curzone->zmax,
464 (curzone->ztotal - curzone->zfreecnt),
465 curzone->zfreecnt, curzone->znalloc);
467 len = strlen((char *)tmpbuf);
468 if (nextzone == NULL)
469 tmpbuf[len - 1] = 0;
471 error = SYSCTL_OUT(req, tmpbuf, len);
473 if (error)
474 return (error);
476 return (0);
479 #if defined(INVARIANTS)
480 void
481 zerror(int error)
483 char *msg;
485 switch (error) {
486 case ZONE_ERROR_INVALID:
487 msg = "zone: invalid zone";
488 break;
489 case ZONE_ERROR_NOTFREE:
490 msg = "zone: entry not free";
491 break;
492 case ZONE_ERROR_ALREADYFREE:
493 msg = "zone: freeing free entry";
494 break;
495 default:
496 msg = "zone: invalid error";
497 break;
499 panic(msg);
501 #endif
503 SYSCTL_OID(_vm, OID_AUTO, zone, CTLTYPE_STRING|CTLFLAG_RD, \
504 NULL, 0, sysctl_vm_zone, "A", "Zone Info");
506 SYSCTL_INT(_vm, OID_AUTO, zone_kmem_pages,
507 CTLFLAG_RD, &zone_kmem_pages, 0, "Number of interrupt safe pages allocated by zone");
508 SYSCTL_INT(_vm, OID_AUTO, zone_kmem_kvaspace,
509 CTLFLAG_RD, &zone_kmem_kvaspace, 0, "KVA space allocated by zone");
510 SYSCTL_INT(_vm, OID_AUTO, zone_kern_pages,
511 CTLFLAG_RD, &zone_kern_pages, 0, "Number of non-interrupt safe pages allocated by zone");