HAMMER 18B/many: Stabilization pass
[dragonfly.git] / sys / kern / kern_objcache.c
blob515e2653571a6770f9bac69dc208ae3ff44e4283
1 /*
2 * Copyright (c) 2005 Jeffrey M. Hsu. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Jeffrey M. Hsu.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of The DragonFly Project nor the names of its
16 * contributors may be used to endorse or promote products derived
17 * from this software without specific, prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
22 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
23 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
25 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
26 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
27 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
28 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
29 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
32 * $DragonFly: src/sys/kern/kern_objcache.c,v 1.20 2007/07/02 06:34:26 dillon Exp $
35 #include <sys/param.h>
36 #include <sys/kernel.h>
37 #include <sys/systm.h>
38 #include <sys/callout.h>
39 #include <sys/globaldata.h>
40 #include <sys/malloc.h>
41 #include <sys/queue.h>
42 #include <sys/objcache.h>
43 #include <sys/spinlock.h>
44 #include <sys/thread.h>
45 #include <sys/thread2.h>
46 #include <sys/spinlock2.h>
48 static MALLOC_DEFINE(M_OBJCACHE, "objcache", "Object Cache");
49 static MALLOC_DEFINE(M_OBJMAG, "objcache magazine", "Object Cache Magazine");
51 #define INITIAL_MAG_CAPACITY 64
53 struct magazine {
54 int rounds;
55 int capacity;
56 int cleaning;
57 SLIST_ENTRY(magazine) nextmagazine;
58 void *objects[];
61 SLIST_HEAD(magazinelist, magazine);
64 * per-cluster cache of magazines
66 * All fields in this structure are protected by the spinlock.
68 struct magazinedepot {
70 * The per-cpu object caches only exchanges completely full or
71 * completely empty magazines with the depot layer, so only have
72 * to cache these two types of magazines.
74 struct magazinelist fullmagazines;
75 struct magazinelist emptymagazines;
76 int magcapacity;
78 /* protect this structure */
79 struct spinlock spin;
81 /* magazines not yet allocated towards limit */
82 int unallocated_objects;
84 /* infrequently used fields */
85 int waiting; /* waiting for another cpu to
86 * return a full magazine to
87 * the depot */
88 int contested; /* depot contention count */
92 * per-cpu object cache
93 * All fields in this structure are protected by crit_enter().
95 struct percpu_objcache {
96 struct magazine *loaded_magazine; /* active magazine */
97 struct magazine *previous_magazine; /* backup magazine */
99 /* statistics */
100 int gets_cumulative; /* total calls to get */
101 int gets_null; /* objcache_get returned NULL */
102 int puts_cumulative; /* total calls to put */
103 int puts_othercluster; /* returned to other cluster */
105 /* infrequently used fields */
106 int waiting; /* waiting for a thread on this cpu to
107 * return an obj to the per-cpu cache */
110 /* only until we have NUMA cluster topology information XXX */
111 #define MAXCLUSTERS 1
112 #define myclusterid 0
113 #define CLUSTER_OF(obj) 0
116 * Two-level object cache consisting of NUMA cluster-level depots of
117 * fully loaded or completely empty magazines and cpu-level caches of
118 * individual objects.
120 struct objcache {
121 char *name;
123 /* object constructor and destructor from blank storage */
124 objcache_ctor_fn *ctor;
125 objcache_dtor_fn *dtor;
126 void *privdata;
128 /* interface to underlying allocator */
129 objcache_alloc_fn *alloc;
130 objcache_free_fn *free;
131 void *allocator_args;
133 SLIST_ENTRY(objcache) oc_next;
134 int exhausted; /* oops */
136 /* NUMA-cluster level caches */
137 struct magazinedepot depot[MAXCLUSTERS];
139 struct percpu_objcache cache_percpu[]; /* per-cpu caches */
142 static struct spinlock objcachelist_spin;
143 static SLIST_HEAD(objcachelist, objcache) allobjcaches;
145 static struct magazine *
146 mag_alloc(int capacity)
148 struct magazine *mag;
150 mag = kmalloc(__offsetof(struct magazine, objects[capacity]),
151 M_OBJMAG, M_INTWAIT | M_ZERO);
152 mag->capacity = capacity;
153 mag->rounds = 0;
154 mag->cleaning = 0;
155 return (mag);
159 * Utility routine for objects that don't require any de-construction.
162 static void
163 null_dtor(void *obj, void *privdata)
165 /* do nothing */
168 static boolean_t
169 null_ctor(void *obj, void *privdata, int ocflags)
171 return TRUE;
175 * Create an object cache.
177 struct objcache *
178 objcache_create(const char *name, int cluster_limit, int mag_capacity,
179 objcache_ctor_fn *ctor, objcache_dtor_fn *dtor, void *privdata,
180 objcache_alloc_fn *alloc, objcache_free_fn *free,
181 void *allocator_args)
183 struct objcache *oc;
184 struct magazinedepot *depot;
185 int cpuid;
186 int need;
187 int factor;
189 /* allocate object cache structure */
190 oc = kmalloc(__offsetof(struct objcache, cache_percpu[ncpus]),
191 M_OBJCACHE, M_WAITOK | M_ZERO);
192 oc->name = kstrdup(name, M_TEMP);
193 oc->ctor = ctor ? ctor : null_ctor;
194 oc->dtor = dtor ? dtor : null_dtor;
195 oc->privdata = privdata;
196 oc->free = free;
197 oc->allocator_args = allocator_args;
199 /* initialize depots */
200 depot = &oc->depot[0];
202 spin_init(&depot->spin);
203 SLIST_INIT(&depot->fullmagazines);
204 SLIST_INIT(&depot->emptymagazines);
206 if (mag_capacity == 0)
207 mag_capacity = INITIAL_MAG_CAPACITY;
210 * The cluster_limit must be sufficient to have three magazines per
211 * cpu. If we have a lot of cpus the mag_capacity might just be
212 * too big, reduce it if necessary.
214 * Each cpu can hold up to two magazines, with the remainder in the
215 * depot. If many objects are allocated fewer magazines are
216 * available. We have to make sure that each cpu has access to
217 * free objects until the object cache hits 75% of its limit.
219 if (cluster_limit == 0) {
220 depot->unallocated_objects = -1;
221 } else {
222 factor = 8;
223 need = mag_capacity * ncpus * factor;
224 if (cluster_limit < need && mag_capacity > 16) {
225 kprintf("objcache(%s): too small for ncpus"
226 ", adjusting mag_capacity %d->",
227 name, mag_capacity);
228 while (need > cluster_limit && mag_capacity > 16) {
229 mag_capacity >>= 1;
230 need = mag_capacity * ncpus * factor;
232 kprintf("%d\n", mag_capacity);
234 if (cluster_limit < need) {
235 kprintf("objcache(%s): too small for ncpus"
236 ", adjusting cluster_limit %d->%d\n",
237 name, cluster_limit, need);
238 cluster_limit = need;
240 depot->unallocated_objects = cluster_limit;
242 depot->magcapacity = mag_capacity;
243 oc->alloc = alloc;
245 /* initialize per-cpu caches */
246 for (cpuid = 0; cpuid < ncpus; cpuid++) {
247 struct percpu_objcache *cache_percpu = &oc->cache_percpu[cpuid];
249 cache_percpu->loaded_magazine = mag_alloc(mag_capacity);
250 cache_percpu->previous_magazine = mag_alloc(mag_capacity);
252 spin_lock_wr(&objcachelist_spin);
253 SLIST_INSERT_HEAD(&allobjcaches, oc, oc_next);
254 spin_unlock_wr(&objcachelist_spin);
256 return (oc);
259 struct objcache *
260 objcache_create_simple(malloc_type_t mtype, size_t objsize)
262 struct objcache_malloc_args *margs;
263 struct objcache *oc;
265 margs = kmalloc(sizeof(*margs), M_OBJCACHE, M_WAITOK|M_ZERO);
266 margs->objsize = objsize;
267 margs->mtype = mtype;
268 oc = objcache_create(mtype->ks_shortdesc, 0, 0,
269 NULL, NULL, NULL,
270 objcache_malloc_alloc, objcache_malloc_free,
271 margs);
272 return (oc);
275 struct objcache *
276 objcache_create_mbacked(malloc_type_t mtype, size_t objsize,
277 int cluster_limit, int mag_capacity,
278 objcache_ctor_fn *ctor, objcache_dtor_fn *dtor,
279 void *privdata)
281 struct objcache_malloc_args *margs;
282 struct objcache *oc;
284 margs = kmalloc(sizeof(*margs), M_OBJCACHE, M_WAITOK|M_ZERO);
285 margs->objsize = objsize;
286 margs->mtype = mtype;
287 oc = objcache_create(mtype->ks_shortdesc,
288 cluster_limit, mag_capacity,
289 ctor, dtor, privdata,
290 objcache_malloc_alloc, objcache_malloc_free,
291 margs);
292 return(oc);
296 #define MAGAZINE_EMPTY(mag) (mag->rounds == 0)
297 #define MAGAZINE_NOTEMPTY(mag) (mag->rounds != 0)
298 #define MAGAZINE_FULL(mag) (mag->rounds == mag->capacity)
300 #define swap(x, y) ({ struct magazine *t = x; x = y; y = t; })
303 * Get an object from the object cache.
305 * WARNING! ocflags are only used when we have to go to the underlying
306 * allocator, so we cannot depend on flags such as M_ZERO.
308 void *
309 objcache_get(struct objcache *oc, int ocflags)
311 struct percpu_objcache *cpucache = &oc->cache_percpu[mycpuid];
312 struct magazine *loadedmag;
313 struct magazine *emptymag;
314 void *obj;
315 struct magazinedepot *depot;
317 KKASSERT((ocflags & M_ZERO) == 0);
318 crit_enter();
319 ++cpucache->gets_cumulative;
321 retry:
323 * Loaded magazine has an object. This is the hot path.
324 * It is lock-free and uses a critical section to block
325 * out interrupt handlers on the same processor.
327 loadedmag = cpucache->loaded_magazine;
328 if (MAGAZINE_NOTEMPTY(loadedmag)) {
329 obj = loadedmag->objects[--loadedmag->rounds];
330 crit_exit();
331 return (obj);
334 /* Previous magazine has an object. */
335 if (MAGAZINE_NOTEMPTY(cpucache->previous_magazine)) {
336 KKASSERT(cpucache->previous_magazine->cleaning +
337 cpucache->loaded_magazine->cleaning == 0);
338 swap(cpucache->loaded_magazine, cpucache->previous_magazine);
339 loadedmag = cpucache->loaded_magazine;
340 obj = loadedmag->objects[--loadedmag->rounds];
341 crit_exit();
342 return (obj);
346 * Both magazines empty. Get a full magazine from the depot and
347 * move one of the empty ones to the depot.
349 * Obtain the depot spinlock.
351 * NOTE: Beyond this point, M_* flags are handled via oc->alloc()
353 depot = &oc->depot[myclusterid];
354 spin_lock_wr(&depot->spin);
357 * Recheck the cpucache after obtaining the depot spinlock. This
358 * shouldn't be necessary now but don't take any chances.
360 if (MAGAZINE_NOTEMPTY(cpucache->loaded_magazine) ||
361 MAGAZINE_NOTEMPTY(cpucache->previous_magazine)
363 spin_unlock_wr(&depot->spin);
364 goto retry;
367 /* Check if depot has a full magazine. */
368 if (!SLIST_EMPTY(&depot->fullmagazines)) {
369 emptymag = cpucache->previous_magazine;
370 cpucache->previous_magazine = cpucache->loaded_magazine;
371 cpucache->loaded_magazine = SLIST_FIRST(&depot->fullmagazines);
372 SLIST_REMOVE_HEAD(&depot->fullmagazines, nextmagazine);
375 * Return emptymag to the depot.
377 KKASSERT(MAGAZINE_EMPTY(emptymag));
378 SLIST_INSERT_HEAD(&depot->emptymagazines,
379 emptymag, nextmagazine);
380 spin_unlock_wr(&depot->spin);
381 goto retry;
385 * The depot does not have any non-empty magazines. If we have
386 * not hit our object limit we can allocate a new object using
387 * the back-end allocator.
389 * note: unallocated_objects can be initialized to -1, which has
390 * the effect of removing any allocation limits.
392 if (depot->unallocated_objects) {
393 --depot->unallocated_objects;
394 spin_unlock_wr(&depot->spin);
395 crit_exit();
397 obj = oc->alloc(oc->allocator_args, ocflags);
398 if (obj) {
399 if (oc->ctor(obj, oc->privdata, ocflags))
400 return (obj);
401 oc->free(obj, oc->allocator_args);
402 spin_lock_wr(&depot->spin);
403 ++depot->unallocated_objects;
404 spin_unlock_wr(&depot->spin);
405 if (depot->waiting)
406 wakeup(depot);
407 obj = NULL;
409 if (obj == NULL) {
410 crit_enter();
412 * makes debugging easier when gets_cumulative does
413 * not include gets_null.
415 ++cpucache->gets_null;
416 --cpucache->gets_cumulative;
417 crit_exit();
419 return(obj);
421 if (oc->exhausted == 0) {
422 kprintf("Warning, objcache(%s): Exhausted!\n", oc->name);
423 oc->exhausted = 1;
427 * Otherwise block if allowed to.
429 if ((ocflags & (M_WAITOK|M_NULLOK)) == M_WAITOK) {
430 ++cpucache->waiting;
431 ++depot->waiting;
432 msleep(depot, &depot->spin, 0, "objcache_get", 0);
433 --cpucache->waiting;
434 --depot->waiting;
435 spin_unlock_wr(&depot->spin);
436 goto retry;
440 * Otherwise fail
442 ++cpucache->gets_null;
443 --cpucache->gets_cumulative;
444 crit_exit();
445 spin_unlock_wr(&depot->spin);
446 return (NULL);
450 * Wrapper for malloc allocation routines.
452 void *
453 objcache_malloc_alloc(void *allocator_args, int ocflags)
455 struct objcache_malloc_args *alloc_args = allocator_args;
457 return (kmalloc(alloc_args->objsize, alloc_args->mtype,
458 ocflags & OC_MFLAGS));
461 void
462 objcache_malloc_free(void *obj, void *allocator_args)
464 struct objcache_malloc_args *alloc_args = allocator_args;
466 kfree(obj, alloc_args->mtype);
470 * Wrapper for allocation policies that pre-allocate at initialization time
471 * and don't do run-time allocation.
473 void *
474 objcache_nop_alloc(void *allocator_args, int ocflags)
476 return (NULL);
479 void
480 objcache_nop_free(void *obj, void *allocator_args)
485 * Return an object to the object cache.
487 void
488 objcache_put(struct objcache *oc, void *obj)
490 struct percpu_objcache *cpucache = &oc->cache_percpu[mycpuid];
491 struct magazine *loadedmag;
492 struct magazinedepot *depot;
494 crit_enter();
495 ++cpucache->puts_cumulative;
497 if (CLUSTER_OF(obj) != myclusterid) {
498 #ifdef notyet
499 /* use lazy IPI to send object to owning cluster XXX todo */
500 ++cpucache->puts_othercluster;
501 crit_exit();
502 return;
503 #endif
506 retry:
508 * Free slot available in loaded magazine. This is the hot path.
509 * It is lock-free and uses a critical section to block out interrupt
510 * handlers on the same processor.
512 loadedmag = cpucache->loaded_magazine;
513 if (!MAGAZINE_FULL(loadedmag)) {
514 loadedmag->objects[loadedmag->rounds++] = obj;
515 if (cpucache->waiting)
516 wakeup_mycpu(&oc->depot[myclusterid]);
517 crit_exit();
518 return;
522 * Current magazine full, but previous magazine has room. XXX
524 if (!MAGAZINE_FULL(cpucache->previous_magazine)) {
525 KKASSERT(cpucache->previous_magazine->cleaning +
526 cpucache->loaded_magazine->cleaning == 0);
527 swap(cpucache->loaded_magazine, cpucache->previous_magazine);
528 loadedmag = cpucache->loaded_magazine;
529 loadedmag->objects[loadedmag->rounds++] = obj;
530 if (cpucache->waiting)
531 wakeup_mycpu(&oc->depot[myclusterid]);
532 crit_exit();
533 return;
537 * Both magazines full. Get an empty magazine from the depot and
538 * move a full loaded magazine to the depot. Even though the
539 * magazine may wind up with space available after we block on
540 * the spinlock, we still cycle it through to avoid the non-optimal
541 * corner-case.
543 * Obtain the depot spinlock.
545 depot = &oc->depot[myclusterid];
546 spin_lock_wr(&depot->spin);
549 * If an empty magazine is available in the depot, cycle it
550 * through and retry.
552 if (!SLIST_EMPTY(&depot->emptymagazines)) {
553 KKASSERT(cpucache->previous_magazine->cleaning +
554 cpucache->loaded_magazine->cleaning == 0);
555 loadedmag = cpucache->previous_magazine;
556 cpucache->previous_magazine = cpucache->loaded_magazine;
557 cpucache->loaded_magazine = SLIST_FIRST(&depot->emptymagazines);
558 SLIST_REMOVE_HEAD(&depot->emptymagazines, nextmagazine);
561 * Return loadedmag to the depot. Due to blocking it may
562 * not be entirely full and could even be empty.
564 if (MAGAZINE_EMPTY(loadedmag)) {
565 SLIST_INSERT_HEAD(&depot->emptymagazines,
566 loadedmag, nextmagazine);
567 spin_unlock_wr(&depot->spin);
568 } else {
569 SLIST_INSERT_HEAD(&depot->fullmagazines,
570 loadedmag, nextmagazine);
571 spin_unlock_wr(&depot->spin);
572 if (depot->waiting)
573 wakeup(depot);
575 goto retry;
579 * An empty mag is not available. This is a corner case which can
580 * occur due to cpus holding partially full magazines. Do not try
581 * to allocate a mag, just free the object.
583 ++depot->unallocated_objects;
584 spin_unlock_wr(&depot->spin);
585 if (depot->waiting)
586 wakeup(depot);
587 crit_exit();
588 oc->dtor(obj, oc->privdata);
589 oc->free(obj, oc->allocator_args);
593 * The object is being put back into the cache, but the caller has
594 * indicated that the object is not in any shape to be reused and should
595 * be dtor'd immediately.
597 void
598 objcache_dtor(struct objcache *oc, void *obj)
600 struct magazinedepot *depot;
602 depot = &oc->depot[myclusterid];
603 spin_lock_wr(&depot->spin);
604 ++depot->unallocated_objects;
605 spin_unlock_wr(&depot->spin);
606 if (depot->waiting)
607 wakeup(depot);
608 oc->dtor(obj, oc->privdata);
609 oc->free(obj, oc->allocator_args);
613 * Deallocate all objects in a magazine and free the magazine if requested.
614 * The magazine must already be disassociated from the depot.
616 * Must be called with a critical section held when called with a per-cpu
617 * magazine. The magazine may be indirectly modified during the loop.
619 * The number of objects freed is returned.
621 static int
622 mag_purge(struct objcache *oc, struct magazine *mag, int freeit)
624 int count;
625 void *obj;
627 count = 0;
628 ++mag->cleaning;
629 while (mag->rounds) {
630 obj = mag->objects[--mag->rounds];
631 oc->dtor(obj, oc->privdata); /* MAY BLOCK */
632 oc->free(obj, oc->allocator_args); /* MAY BLOCK */
633 ++count;
636 * Cycle for interrupts
638 if ((count & 15) == 0) {
639 crit_exit();
640 crit_enter();
643 --mag->cleaning;
644 if (freeit)
645 kfree(mag, M_OBJMAG);
646 return(count);
650 * Disassociate zero or more magazines from a magazine list associated with
651 * the depot, update the depot, and move the magazines to a temporary
652 * list.
654 * The caller must check the depot for waiters and wake it up, typically
655 * after disposing of the magazines this function loads onto the temporary
656 * list.
658 static void
659 maglist_disassociate(struct magazinedepot *depot, struct magazinelist *maglist,
660 struct magazinelist *tmplist, boolean_t purgeall)
662 struct magazine *mag;
664 while ((mag = SLIST_FIRST(maglist)) != NULL) {
665 SLIST_REMOVE_HEAD(maglist, nextmagazine);
666 SLIST_INSERT_HEAD(tmplist, mag, nextmagazine);
667 depot->unallocated_objects += mag->rounds;
672 * Deallocate all magazines and their contents from the passed temporary
673 * list. The magazines have already been accounted for by their depots.
675 * The total number of rounds freed is returned. This number is typically
676 * only used to determine whether a wakeup on the depot is needed or not.
678 static int
679 maglist_purge(struct objcache *oc, struct magazinelist *maglist)
681 struct magazine *mag;
682 int count = 0;
685 * can't use SLIST_FOREACH because blocking releases the depot
686 * spinlock
688 while ((mag = SLIST_FIRST(maglist)) != NULL) {
689 SLIST_REMOVE_HEAD(maglist, nextmagazine);
690 count += mag_purge(oc, mag, TRUE);
692 return(count);
696 * De-allocates all magazines on the full and empty magazine lists.
698 * Because this routine is called with a spinlock held, the magazines
699 * can only be disassociated and moved to a temporary list, not freed.
701 * The caller is responsible for freeing the magazines.
703 static void
704 depot_disassociate(struct magazinedepot *depot, struct magazinelist *tmplist)
706 maglist_disassociate(depot, &depot->fullmagazines, tmplist, TRUE);
707 maglist_disassociate(depot, &depot->emptymagazines, tmplist, TRUE);
710 #ifdef notneeded
711 void
712 objcache_reclaim(struct objcache *oc)
714 struct percpu_objcache *cache_percpu = &oc->cache_percpu[myclusterid];
715 struct magazinedepot *depot = &oc->depot[myclusterid];
716 struct magazinelist tmplist;
717 int count;
719 SLIST_INIT(&tmplist);
720 crit_enter();
721 count = mag_purge(oc, cache_percpu->loaded_magazine, FALSE);
722 count += mag_purge(oc, cache_percpu->previous_magazine, FALSE);
723 crit_exit();
725 spin_lock_wr(&depot->spin);
726 depot->unallocated_objects += count;
727 depot_disassociate(depot, &tmplist);
728 spin_unlock_wr(&depot->spin);
729 count += maglist_purge(oc, &tmplist);
730 if (count && depot->waiting)
731 wakeup(depot);
733 #endif
736 * Try to free up some memory. Return as soon as some free memory is found.
737 * For each object cache on the reclaim list, first try the current per-cpu
738 * cache, then the full magazine depot.
740 boolean_t
741 objcache_reclaimlist(struct objcache *oclist[], int nlist, int ocflags)
743 struct objcache *oc;
744 struct percpu_objcache *cpucache;
745 struct magazinedepot *depot;
746 struct magazinelist tmplist;
747 int i, count;
749 SLIST_INIT(&tmplist);
751 for (i = 0; i < nlist; i++) {
752 oc = oclist[i];
753 cpucache = &oc->cache_percpu[mycpuid];
754 depot = &oc->depot[myclusterid];
756 crit_enter();
757 count = mag_purge(oc, cpucache->loaded_magazine, FALSE);
758 if (count == 0)
759 count += mag_purge(oc, cpucache->previous_magazine, FALSE);
760 crit_exit();
761 if (count > 0) {
762 spin_lock_wr(&depot->spin);
763 depot->unallocated_objects += count;
764 spin_unlock_wr(&depot->spin);
765 if (depot->waiting)
766 wakeup(depot);
767 return (TRUE);
769 spin_lock_wr(&depot->spin);
770 maglist_disassociate(depot, &depot->fullmagazines,
771 &tmplist, FALSE);
772 spin_unlock_wr(&depot->spin);
773 count = maglist_purge(oc, &tmplist);
774 if (count > 0) {
775 if (depot->waiting)
776 wakeup(depot);
777 return (TRUE);
780 return (FALSE);
784 * Destroy an object cache. Must have no existing references.
786 void
787 objcache_destroy(struct objcache *oc)
789 struct percpu_objcache *cache_percpu;
790 struct magazinedepot *depot;
791 int clusterid, cpuid;
792 struct magazinelist tmplist;
794 SLIST_INIT(&tmplist);
795 for (clusterid = 0; clusterid < MAXCLUSTERS; clusterid++) {
796 depot = &oc->depot[clusterid];
797 spin_lock_wr(&depot->spin);
798 depot_disassociate(depot, &tmplist);
799 spin_unlock_wr(&depot->spin);
801 maglist_purge(oc, &tmplist);
803 for (cpuid = 0; cpuid < ncpus; cpuid++) {
804 cache_percpu = &oc->cache_percpu[cpuid];
806 mag_purge(oc, cache_percpu->loaded_magazine, TRUE);
807 mag_purge(oc, cache_percpu->previous_magazine, TRUE);
808 cache_percpu->loaded_magazine = NULL;
809 cache_percpu->previous_magazine = NULL;
810 /* don't bother adjusting depot->unallocated_objects */
813 kfree(oc->name, M_TEMP);
814 kfree(oc, M_OBJCACHE);
817 #if 0
819 * Populate the per-cluster depot with elements from a linear block
820 * of memory. Must be called for individually for each cluster.
821 * Populated depots should not be destroyed.
823 void
824 objcache_populate_linear(struct objcache *oc, void *base, int nelts, int size)
826 char *p = base;
827 char *end = (char *)base + (nelts * size);
828 struct magazinedepot *depot = &oc->depot[myclusterid];
829 struct magazine *emptymag = mag_alloc(depot->magcapcity);
831 while (p < end) {
832 emptymag->objects[emptymag->rounds++] = p;
833 if (MAGAZINE_FULL(emptymag)) {
834 spin_lock_wr(&depot->spin);
835 SLIST_INSERT_HEAD(&depot->fullmagazines, emptymag,
836 nextmagazine);
837 depot->unallocated_objects += emptymag->rounds;
838 spin_unlock_wr(&depot->spin);
839 if (depot->waiting)
840 wakeup(depot);
841 emptymag = mag_alloc(depot->magcapacity);
843 p += size;
845 if (MAGAZINE_EMPTY(emptymag)) {
846 mag_purge(oc, emptymag, TRUE);
847 } else {
848 spin_lock_wr(&depot->spin);
849 SLIST_INSERT_HEAD(&depot->fullmagazines, emptymag,
850 nextmagazine);
851 depot->unallocated_objects += emptymag->rounds;
852 spin_unlock_wr(&depot->spin);
853 if (depot->waiting)
854 wakeup(depot);
855 emptymag = mag_alloc(depot->magcapacity);
858 #endif
860 #if 0
862 * Check depot contention once a minute.
863 * 2 contested locks per second allowed.
865 static int objcache_rebalance_period;
866 static const int objcache_contention_rate = 120;
867 static struct callout objcache_callout;
869 #define MAXMAGSIZE 512
872 * Check depot contention and increase magazine size if necessary.
874 static void
875 objcache_timer(void *dummy)
877 struct objcache *oc;
878 struct magazinedepot *depot;
879 struct magazinelist tmplist;
881 XXX we need to detect when an objcache is destroyed out from under
882 us XXX
884 SLIST_INIT(&tmplist);
886 spin_lock_wr(&objcachelist_spin);
887 SLIST_FOREACH(oc, &allobjcaches, oc_next) {
888 depot = &oc->depot[myclusterid];
889 if (depot->magcapacity < MAXMAGSIZE) {
890 if (depot->contested > objcache_contention_rate) {
891 spin_lock_wr(&depot->spin);
892 depot_disassociate(depot, &tmplist);
893 depot->magcapacity *= 2;
894 spin_unlock_wr(&depot->spin);
895 kprintf("objcache_timer: increasing cache %s"
896 " magsize to %d, contested %d times\n",
897 oc->name, depot->magcapacity,
898 depot->contested);
900 depot->contested = 0;
902 spin_unlock_wr(&objcachelist_spin);
903 if (maglist_purge(oc, &tmplist) > 0 && depot->waiting)
904 wakeup(depot);
905 spin_lock_wr(&objcachelist_spin);
907 spin_unlock_wr(&objcachelist_spin);
909 callout_reset(&objcache_callout, objcache_rebalance_period,
910 objcache_timer, NULL);
913 #endif
915 static void
916 objcache_init(void)
918 spin_init(&objcachelist_spin);
919 #if 0
920 callout_init(&objcache_callout);
921 objcache_rebalance_period = 60 * hz;
922 callout_reset(&objcache_callout, objcache_rebalance_period,
923 objcache_timer, NULL);
924 #endif
926 SYSINIT(objcache, SI_BOOT2_OBJCACHE, SI_ORDER_FIRST, objcache_init, 0);