2 * Copyright (c) 2005 Jeffrey M. Hsu. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of The DragonFly Project nor the names of its
16 * contributors may be used to endorse or promote products derived
17 * from this software without specific, prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
22 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
23 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
25 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
26 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
27 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
28 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
29 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * $DragonFly: src/sys/kern/kern_objcache.c,v 1.23 2008/10/26 04:29:19 sephe Exp $
35 #include <sys/param.h>
36 #include <sys/kernel.h>
37 #include <sys/systm.h>
38 #include <sys/callout.h>
39 #include <sys/globaldata.h>
40 #include <sys/malloc.h>
41 #include <sys/queue.h>
42 #include <sys/objcache.h>
43 #include <sys/spinlock.h>
44 #include <sys/thread.h>
45 #include <sys/thread2.h>
46 #include <sys/spinlock2.h>
48 static MALLOC_DEFINE(M_OBJCACHE
, "objcache", "Object Cache");
49 static MALLOC_DEFINE(M_OBJMAG
, "objcache magazine", "Object Cache Magazine");
51 #define INITIAL_MAG_CAPACITY 64
56 SLIST_ENTRY(magazine
) nextmagazine
;
60 SLIST_HEAD(magazinelist
, magazine
);
62 #define MAGAZINE_HDRSIZE __offsetof(struct magazine, objects[0])
63 #define MAGAZINE_CAPACITY_MAX 128
64 #define MAGAZINE_CAPACITY_MIN 4
67 * per-cluster cache of magazines
69 * All fields in this structure are protected by the spinlock.
71 struct magazinedepot
{
73 * The per-cpu object caches only exchanges completely full or
74 * completely empty magazines with the depot layer, so only have
75 * to cache these two types of magazines.
77 struct magazinelist fullmagazines
;
78 struct magazinelist emptymagazines
;
81 /* protect this structure */
84 /* magazines not yet allocated towards limit */
85 int unallocated_objects
;
87 /* infrequently used fields */
88 int waiting
; /* waiting for another cpu to
89 * return a full magazine to
91 int contested
; /* depot contention count */
95 * per-cpu object cache
96 * All fields in this structure are protected by crit_enter().
98 struct percpu_objcache
{
99 struct magazine
*loaded_magazine
; /* active magazine */
100 struct magazine
*previous_magazine
; /* backup magazine */
103 int gets_cumulative
; /* total calls to get */
104 int gets_null
; /* objcache_get returned NULL */
105 int puts_cumulative
; /* total calls to put */
106 int puts_othercluster
; /* returned to other cluster */
108 /* infrequently used fields */
109 int waiting
; /* waiting for a thread on this cpu to
110 * return an obj to the per-cpu cache */
113 /* only until we have NUMA cluster topology information XXX */
114 #define MAXCLUSTERS 1
115 #define myclusterid 0
116 #define CLUSTER_OF(obj) 0
119 * Two-level object cache consisting of NUMA cluster-level depots of
120 * fully loaded or completely empty magazines and cpu-level caches of
121 * individual objects.
126 /* object constructor and destructor from blank storage */
127 objcache_ctor_fn
*ctor
;
128 objcache_dtor_fn
*dtor
;
131 /* interface to underlying allocator */
132 objcache_alloc_fn
*alloc
;
133 objcache_free_fn
*free
;
134 void *allocator_args
;
136 LIST_ENTRY(objcache
) oc_next
;
137 int exhausted
; /* oops */
139 /* NUMA-cluster level caches */
140 struct magazinedepot depot
[MAXCLUSTERS
];
142 struct percpu_objcache cache_percpu
[]; /* per-cpu caches */
145 static struct spinlock objcachelist_spin
;
146 static LIST_HEAD(objcachelist
, objcache
) allobjcaches
;
147 static int magazine_capmin
;
148 static int magazine_capmax
;
150 static struct magazine
*
151 mag_alloc(int capacity
)
153 struct magazine
*mag
;
156 size
= __offsetof(struct magazine
, objects
[capacity
]);
157 KASSERT(size
> 0 && (size
& __VM_CACHELINE_MASK
) == 0,
158 ("magazine size is not multiple cache line size"));
160 mag
= kmalloc_cachealign(size
, M_OBJMAG
, M_INTWAIT
| M_ZERO
);
161 mag
->capacity
= capacity
;
167 mag_capacity_align(int mag_capacity
)
171 mag_size
= __VM_CACHELINE_ALIGN(
172 __offsetof(struct magazine
, objects
[mag_capacity
]));
173 mag_capacity
= (mag_size
- MAGAZINE_HDRSIZE
) / sizeof(void *);
179 * Utility routine for objects that don't require any de-construction.
183 null_dtor(void *obj
, void *privdata
)
189 null_ctor(void *obj
, void *privdata
, int ocflags
)
195 * Create an object cache.
198 objcache_create(const char *name
, int cluster_limit
, int nom_cache
,
199 objcache_ctor_fn
*ctor
, objcache_dtor_fn
*dtor
, void *privdata
,
200 objcache_alloc_fn
*alloc
, objcache_free_fn
*free
,
201 void *allocator_args
)
204 struct magazinedepot
*depot
;
211 * Allocate object cache structure
213 oc
= kmalloc_cachealign(
214 __offsetof(struct objcache
, cache_percpu
[ncpus
]),
215 M_OBJCACHE
, M_WAITOK
| M_ZERO
);
216 oc
->name
= kstrdup(name
, M_TEMP
);
217 oc
->ctor
= ctor
? ctor
: null_ctor
;
218 oc
->dtor
= dtor
? dtor
: null_dtor
;
219 oc
->privdata
= privdata
;
222 oc
->allocator_args
= allocator_args
;
225 * Initialize depot list(s).
227 depot
= &oc
->depot
[0];
229 spin_init(&depot
->spin
);
230 SLIST_INIT(&depot
->fullmagazines
);
231 SLIST_INIT(&depot
->emptymagazines
);
234 * Figure out the nominal number of free objects to cache and
235 * the magazine capacity. By default we want to cache up to
236 * half the cluster_limit. If there is no cluster_limit then
237 * we want to cache up to 128 objects.
240 nom_cache
= cluster_limit
/ 2;
241 if (cluster_limit
&& nom_cache
> cluster_limit
)
242 nom_cache
= cluster_limit
;
244 nom_cache
= INITIAL_MAG_CAPACITY
* 2;
247 * Magazine capacity for 2 active magazines per cpu plus 2
248 * magazines in the depot.
250 mag_capacity
= mag_capacity_align(nom_cache
/ (ncpus
+ 1) / 2 + 1);
251 if (mag_capacity
> magazine_capmax
)
252 mag_capacity
= magazine_capmax
;
253 else if (mag_capacity
< magazine_capmin
)
254 mag_capacity
= magazine_capmin
;
255 depot
->magcapacity
= mag_capacity
;
258 * The cluster_limit must be sufficient to have two magazines per
259 * cpu plus at least two magazines in the depot. However, because
260 * partial magazines can stay on the cpus what we really need here
261 * is to specify the number of extra magazines we allocate for the
264 if (cluster_limit
== 0) {
265 depot
->unallocated_objects
= -1;
267 depot
->unallocated_objects
= ncpus
* mag_capacity
* 2 +
272 * Initialize per-cpu caches
274 for (cpuid
= 0; cpuid
< ncpus
; cpuid
++) {
275 struct percpu_objcache
*cache_percpu
= &oc
->cache_percpu
[cpuid
];
277 cache_percpu
->loaded_magazine
= mag_alloc(mag_capacity
);
278 cache_percpu
->previous_magazine
= mag_alloc(mag_capacity
);
282 * Compute how many empty magazines to place in the depot. This
283 * determines the retained cache size and is based on nom_cache.
285 * The actual cache size is larger because there are two magazines
286 * for each cpu as well but those can be in any fill state so we
287 * just can't count them.
289 * There is a minimum of two magazines in the depot.
291 nmagdepot
= nom_cache
/ mag_capacity
+ 1;
295 kprintf("ndepotmags=%-3d x mag_cap=%-3d for %s\n",
296 nmagdepot
, mag_capacity
, name
);
300 * Put empty magazines in depot
302 for (i
= 0; i
< nmagdepot
; i
++) {
303 struct magazine
*mag
= mag_alloc(mag_capacity
);
304 SLIST_INSERT_HEAD(&depot
->emptymagazines
, mag
, nextmagazine
);
307 spin_lock(&objcachelist_spin
);
308 LIST_INSERT_HEAD(&allobjcaches
, oc
, oc_next
);
309 spin_unlock(&objcachelist_spin
);
315 objcache_create_simple(malloc_type_t mtype
, size_t objsize
)
317 struct objcache_malloc_args
*margs
;
320 margs
= kmalloc(sizeof(*margs
), M_OBJCACHE
, M_WAITOK
|M_ZERO
);
321 margs
->objsize
= objsize
;
322 margs
->mtype
= mtype
;
323 oc
= objcache_create(mtype
->ks_shortdesc
, 0, 0,
325 objcache_malloc_alloc
, objcache_malloc_free
,
331 objcache_create_mbacked(malloc_type_t mtype
, size_t objsize
,
332 int cluster_limit
, int nom_cache
,
333 objcache_ctor_fn
*ctor
, objcache_dtor_fn
*dtor
,
336 struct objcache_malloc_args
*margs
;
339 margs
= kmalloc(sizeof(*margs
), M_OBJCACHE
, M_WAITOK
|M_ZERO
);
340 margs
->objsize
= objsize
;
341 margs
->mtype
= mtype
;
342 oc
= objcache_create(mtype
->ks_shortdesc
,
343 cluster_limit
, nom_cache
,
344 ctor
, dtor
, privdata
,
345 objcache_malloc_alloc
, objcache_malloc_free
,
351 #define MAGAZINE_EMPTY(mag) (mag->rounds == 0)
352 #define MAGAZINE_NOTEMPTY(mag) (mag->rounds != 0)
353 #define MAGAZINE_FULL(mag) (mag->rounds == mag->capacity)
355 #define swap(x, y) ({ struct magazine *t = x; x = y; y = t; })
358 * Get an object from the object cache.
360 * WARNING! ocflags are only used when we have to go to the underlying
361 * allocator, so we cannot depend on flags such as M_ZERO.
364 objcache_get(struct objcache
*oc
, int ocflags
)
366 struct percpu_objcache
*cpucache
= &oc
->cache_percpu
[mycpuid
];
367 struct magazine
*loadedmag
;
368 struct magazine
*emptymag
;
370 struct magazinedepot
*depot
;
372 KKASSERT((ocflags
& M_ZERO
) == 0);
374 ++cpucache
->gets_cumulative
;
378 * Loaded magazine has an object. This is the hot path.
379 * It is lock-free and uses a critical section to block
380 * out interrupt handlers on the same processor.
382 loadedmag
= cpucache
->loaded_magazine
;
383 if (MAGAZINE_NOTEMPTY(loadedmag
)) {
384 obj
= loadedmag
->objects
[--loadedmag
->rounds
];
389 /* Previous magazine has an object. */
390 if (MAGAZINE_NOTEMPTY(cpucache
->previous_magazine
)) {
391 swap(cpucache
->loaded_magazine
, cpucache
->previous_magazine
);
392 loadedmag
= cpucache
->loaded_magazine
;
393 obj
= loadedmag
->objects
[--loadedmag
->rounds
];
399 * Both magazines empty. Get a full magazine from the depot and
400 * move one of the empty ones to the depot.
402 * Obtain the depot spinlock.
404 * NOTE: Beyond this point, M_* flags are handled via oc->alloc()
406 depot
= &oc
->depot
[myclusterid
];
407 spin_lock(&depot
->spin
);
410 * Recheck the cpucache after obtaining the depot spinlock. This
411 * shouldn't be necessary now but don't take any chances.
413 if (MAGAZINE_NOTEMPTY(cpucache
->loaded_magazine
) ||
414 MAGAZINE_NOTEMPTY(cpucache
->previous_magazine
)
416 spin_unlock(&depot
->spin
);
420 /* Check if depot has a full magazine. */
421 if (!SLIST_EMPTY(&depot
->fullmagazines
)) {
422 emptymag
= cpucache
->previous_magazine
;
423 cpucache
->previous_magazine
= cpucache
->loaded_magazine
;
424 cpucache
->loaded_magazine
= SLIST_FIRST(&depot
->fullmagazines
);
425 SLIST_REMOVE_HEAD(&depot
->fullmagazines
, nextmagazine
);
428 * Return emptymag to the depot.
430 KKASSERT(MAGAZINE_EMPTY(emptymag
));
431 SLIST_INSERT_HEAD(&depot
->emptymagazines
,
432 emptymag
, nextmagazine
);
433 spin_unlock(&depot
->spin
);
438 * The depot does not have any non-empty magazines. If we have
439 * not hit our object limit we can allocate a new object using
440 * the back-end allocator.
442 * note: unallocated_objects can be initialized to -1, which has
443 * the effect of removing any allocation limits.
445 if (depot
->unallocated_objects
) {
446 --depot
->unallocated_objects
;
447 spin_unlock(&depot
->spin
);
450 obj
= oc
->alloc(oc
->allocator_args
, ocflags
);
452 if (oc
->ctor(obj
, oc
->privdata
, ocflags
))
454 oc
->free(obj
, oc
->allocator_args
);
458 spin_lock(&depot
->spin
);
459 ++depot
->unallocated_objects
;
460 spin_unlock(&depot
->spin
);
466 * makes debugging easier when gets_cumulative does
467 * not include gets_null.
469 ++cpucache
->gets_null
;
470 --cpucache
->gets_cumulative
;
475 if (oc
->exhausted
== 0) {
476 kprintf("Warning, objcache(%s): Exhausted!\n", oc
->name
);
481 * Otherwise block if allowed to.
483 if ((ocflags
& (M_WAITOK
|M_NULLOK
)) == M_WAITOK
) {
486 ssleep(depot
, &depot
->spin
, 0, "objcache_get", 0);
489 spin_unlock(&depot
->spin
);
496 ++cpucache
->gets_null
;
497 --cpucache
->gets_cumulative
;
499 spin_unlock(&depot
->spin
);
504 * Wrapper for malloc allocation routines.
507 objcache_malloc_alloc(void *allocator_args
, int ocflags
)
509 struct objcache_malloc_args
*alloc_args
= allocator_args
;
511 return (kmalloc(alloc_args
->objsize
, alloc_args
->mtype
,
512 ocflags
& OC_MFLAGS
));
516 objcache_malloc_free(void *obj
, void *allocator_args
)
518 struct objcache_malloc_args
*alloc_args
= allocator_args
;
520 kfree(obj
, alloc_args
->mtype
);
524 * Wrapper for allocation policies that pre-allocate at initialization time
525 * and don't do run-time allocation.
528 objcache_nop_alloc(void *allocator_args
, int ocflags
)
534 objcache_nop_free(void *obj
, void *allocator_args
)
539 * Return an object to the object cache.
542 objcache_put(struct objcache
*oc
, void *obj
)
544 struct percpu_objcache
*cpucache
= &oc
->cache_percpu
[mycpuid
];
545 struct magazine
*loadedmag
;
546 struct magazinedepot
*depot
;
549 ++cpucache
->puts_cumulative
;
551 if (CLUSTER_OF(obj
) != myclusterid
) {
553 /* use lazy IPI to send object to owning cluster XXX todo */
554 ++cpucache
->puts_othercluster
;
562 * Free slot available in loaded magazine. This is the hot path.
563 * It is lock-free and uses a critical section to block out interrupt
564 * handlers on the same processor.
566 loadedmag
= cpucache
->loaded_magazine
;
567 if (!MAGAZINE_FULL(loadedmag
)) {
568 loadedmag
->objects
[loadedmag
->rounds
++] = obj
;
569 if (cpucache
->waiting
)
570 wakeup_mycpu(&oc
->depot
[myclusterid
]);
576 * Current magazine full, but previous magazine has room. XXX
578 if (!MAGAZINE_FULL(cpucache
->previous_magazine
)) {
579 swap(cpucache
->loaded_magazine
, cpucache
->previous_magazine
);
580 loadedmag
= cpucache
->loaded_magazine
;
581 loadedmag
->objects
[loadedmag
->rounds
++] = obj
;
582 if (cpucache
->waiting
)
583 wakeup_mycpu(&oc
->depot
[myclusterid
]);
589 * Both magazines full. Get an empty magazine from the depot and
590 * move a full loaded magazine to the depot. Even though the
591 * magazine may wind up with space available after we block on
592 * the spinlock, we still cycle it through to avoid the non-optimal
595 * Obtain the depot spinlock.
597 depot
= &oc
->depot
[myclusterid
];
598 spin_lock(&depot
->spin
);
601 * If an empty magazine is available in the depot, cycle it
604 if (!SLIST_EMPTY(&depot
->emptymagazines
)) {
605 loadedmag
= cpucache
->previous_magazine
;
606 cpucache
->previous_magazine
= cpucache
->loaded_magazine
;
607 cpucache
->loaded_magazine
= SLIST_FIRST(&depot
->emptymagazines
);
608 SLIST_REMOVE_HEAD(&depot
->emptymagazines
, nextmagazine
);
611 * Return loadedmag to the depot. Due to blocking it may
612 * not be entirely full and could even be empty.
614 if (MAGAZINE_EMPTY(loadedmag
)) {
615 SLIST_INSERT_HEAD(&depot
->emptymagazines
,
616 loadedmag
, nextmagazine
);
617 spin_unlock(&depot
->spin
);
619 SLIST_INSERT_HEAD(&depot
->fullmagazines
,
620 loadedmag
, nextmagazine
);
621 spin_unlock(&depot
->spin
);
629 * An empty mag is not available. This is a corner case which can
630 * occur due to cpus holding partially full magazines. Do not try
631 * to allocate a mag, just free the object.
633 ++depot
->unallocated_objects
;
634 spin_unlock(&depot
->spin
);
638 oc
->dtor(obj
, oc
->privdata
);
639 oc
->free(obj
, oc
->allocator_args
);
643 * The object is being put back into the cache, but the caller has
644 * indicated that the object is not in any shape to be reused and should
645 * be dtor'd immediately.
648 objcache_dtor(struct objcache
*oc
, void *obj
)
650 struct magazinedepot
*depot
;
652 depot
= &oc
->depot
[myclusterid
];
653 spin_lock(&depot
->spin
);
654 ++depot
->unallocated_objects
;
655 spin_unlock(&depot
->spin
);
658 oc
->dtor(obj
, oc
->privdata
);
659 oc
->free(obj
, oc
->allocator_args
);
663 * Deallocate all objects in a magazine and free the magazine if requested.
664 * When freeit is TRUE the magazine must already be disassociated from the
667 * Must be called with a critical section held when called with a per-cpu
668 * magazine. The magazine may be indirectly modified during the loop.
670 * If the magazine moves during a dtor the operation is aborted. This is
671 * only allowed when freeit is FALSE.
673 * The number of objects freed is returned.
676 mag_purge(struct objcache
*oc
, struct magazine
**magp
, int freeit
)
678 struct magazine
*mag
= *magp
;
683 while (mag
->rounds
) {
684 obj
= mag
->objects
[--mag
->rounds
];
685 oc
->dtor(obj
, oc
->privdata
); /* MAY BLOCK */
686 oc
->free(obj
, oc
->allocator_args
); /* MAY BLOCK */
690 * Cycle for interrupts.
692 if ((count
& 15) == 0) {
698 * mag may have become invalid either due to dtor/free
699 * blocking or interrupt cycling, do not derefernce it
703 kprintf("mag_purge: mag ripped out\n");
708 KKASSERT(*magp
== mag
);
710 kfree(mag
, M_OBJMAG
);
716 * Disassociate zero or more magazines from a magazine list associated with
717 * the depot, update the depot, and move the magazines to a temporary
720 * The caller must check the depot for waiters and wake it up, typically
721 * after disposing of the magazines this function loads onto the temporary
725 maglist_disassociate(struct magazinedepot
*depot
, struct magazinelist
*maglist
,
726 struct magazinelist
*tmplist
, boolean_t purgeall
)
728 struct magazine
*mag
;
730 while ((mag
= SLIST_FIRST(maglist
)) != NULL
) {
731 SLIST_REMOVE_HEAD(maglist
, nextmagazine
);
732 SLIST_INSERT_HEAD(tmplist
, mag
, nextmagazine
);
733 depot
->unallocated_objects
+= mag
->rounds
;
738 * Deallocate all magazines and their contents from the passed temporary
739 * list. The magazines have already been accounted for by their depots.
741 * The total number of rounds freed is returned. This number is typically
742 * only used to determine whether a wakeup on the depot is needed or not.
745 maglist_purge(struct objcache
*oc
, struct magazinelist
*maglist
)
747 struct magazine
*mag
;
751 * can't use SLIST_FOREACH because blocking releases the depot
755 while ((mag
= SLIST_FIRST(maglist
)) != NULL
) {
756 SLIST_REMOVE_HEAD(maglist
, nextmagazine
);
757 count
+= mag_purge(oc
, &mag
, TRUE
);
764 * De-allocates all magazines on the full and empty magazine lists.
766 * Because this routine is called with a spinlock held, the magazines
767 * can only be disassociated and moved to a temporary list, not freed.
769 * The caller is responsible for freeing the magazines.
772 depot_disassociate(struct magazinedepot
*depot
, struct magazinelist
*tmplist
)
774 maglist_disassociate(depot
, &depot
->fullmagazines
, tmplist
, TRUE
);
775 maglist_disassociate(depot
, &depot
->emptymagazines
, tmplist
, TRUE
);
780 objcache_reclaim(struct objcache
*oc
)
782 struct percpu_objcache
*cache_percpu
= &oc
->cache_percpu
[myclusterid
];
783 struct magazinedepot
*depot
= &oc
->depot
[myclusterid
];
784 struct magazinelist tmplist
;
787 SLIST_INIT(&tmplist
);
789 count
= mag_purge(oc
, &cache_percpu
->loaded_magazine
, FALSE
);
790 count
+= mag_purge(oc
, &cache_percpu
->previous_magazine
, FALSE
);
793 spin_lock(&depot
->spin
);
794 depot
->unallocated_objects
+= count
;
795 depot_disassociate(depot
, &tmplist
);
796 spin_unlock(&depot
->spin
);
797 count
+= maglist_purge(oc
, &tmplist
);
798 if (count
&& depot
->waiting
)
804 * Try to free up some memory. Return as soon as some free memory is found.
805 * For each object cache on the reclaim list, first try the current per-cpu
806 * cache, then the full magazine depot.
809 objcache_reclaimlist(struct objcache
*oclist
[], int nlist
, int ocflags
)
812 struct percpu_objcache
*cpucache
;
813 struct magazinedepot
*depot
;
814 struct magazinelist tmplist
;
817 kprintf("objcache_reclaimlist\n");
819 SLIST_INIT(&tmplist
);
821 for (i
= 0; i
< nlist
; i
++) {
823 cpucache
= &oc
->cache_percpu
[mycpuid
];
824 depot
= &oc
->depot
[myclusterid
];
827 count
= mag_purge(oc
, &cpucache
->loaded_magazine
, FALSE
);
829 count
+= mag_purge(oc
, &cpucache
->previous_magazine
, FALSE
);
832 spin_lock(&depot
->spin
);
833 depot
->unallocated_objects
+= count
;
834 spin_unlock(&depot
->spin
);
839 spin_lock(&depot
->spin
);
840 maglist_disassociate(depot
, &depot
->fullmagazines
,
842 spin_unlock(&depot
->spin
);
843 count
= maglist_purge(oc
, &tmplist
);
854 * Destroy an object cache. Must have no existing references.
857 objcache_destroy(struct objcache
*oc
)
859 struct percpu_objcache
*cache_percpu
;
860 struct magazinedepot
*depot
;
861 int clusterid
, cpuid
;
862 struct magazinelist tmplist
;
864 spin_lock(&objcachelist_spin
);
865 LIST_REMOVE(oc
, oc_next
);
866 spin_unlock(&objcachelist_spin
);
868 SLIST_INIT(&tmplist
);
869 for (clusterid
= 0; clusterid
< MAXCLUSTERS
; clusterid
++) {
870 depot
= &oc
->depot
[clusterid
];
871 spin_lock(&depot
->spin
);
872 depot_disassociate(depot
, &tmplist
);
873 spin_unlock(&depot
->spin
);
875 maglist_purge(oc
, &tmplist
);
877 for (cpuid
= 0; cpuid
< ncpus
; cpuid
++) {
878 cache_percpu
= &oc
->cache_percpu
[cpuid
];
881 mag_purge(oc
, &cache_percpu
->loaded_magazine
, TRUE
);
882 mag_purge(oc
, &cache_percpu
->previous_magazine
, TRUE
);
884 cache_percpu
->loaded_magazine
= NULL
;
885 cache_percpu
->previous_magazine
= NULL
;
886 /* don't bother adjusting depot->unallocated_objects */
889 kfree(oc
->name
, M_TEMP
);
890 kfree(oc
, M_OBJCACHE
);
895 * Populate the per-cluster depot with elements from a linear block
896 * of memory. Must be called for individually for each cluster.
897 * Populated depots should not be destroyed.
900 objcache_populate_linear(struct objcache
*oc
, void *base
, int nelts
, int size
)
903 char *end
= (char *)base
+ (nelts
* size
);
904 struct magazinedepot
*depot
= &oc
->depot
[myclusterid
];
905 struct magazine
*emptymag
= mag_alloc(depot
->magcapcity
);
908 emptymag
->objects
[emptymag
->rounds
++] = p
;
909 if (MAGAZINE_FULL(emptymag
)) {
910 spin_lock_wr(&depot
->spin
);
911 SLIST_INSERT_HEAD(&depot
->fullmagazines
, emptymag
,
913 depot
->unallocated_objects
+= emptymag
->rounds
;
914 spin_unlock_wr(&depot
->spin
);
917 emptymag
= mag_alloc(depot
->magcapacity
);
921 if (MAGAZINE_EMPTY(emptymag
)) {
923 mag_purge(oc
, &emptymag
, TRUE
);
926 spin_lock_wr(&depot
->spin
);
927 SLIST_INSERT_HEAD(&depot
->fullmagazines
, emptymag
,
929 depot
->unallocated_objects
+= emptymag
->rounds
;
930 spin_unlock_wr(&depot
->spin
);
933 emptymag
= mag_alloc(depot
->magcapacity
);
940 * Check depot contention once a minute.
941 * 2 contested locks per second allowed.
943 static int objcache_rebalance_period
;
944 static const int objcache_contention_rate
= 120;
945 static struct callout objcache_callout
;
947 #define MAXMAGSIZE 512
950 * Check depot contention and increase magazine size if necessary.
953 objcache_timer(void *dummy
)
956 struct magazinedepot
*depot
;
957 struct magazinelist tmplist
;
959 XXX we need to detect when an objcache is destroyed out from under
962 SLIST_INIT(&tmplist
);
964 spin_lock_wr(&objcachelist_spin
);
965 LIST_FOREACH(oc
, &allobjcaches
, oc_next
) {
966 depot
= &oc
->depot
[myclusterid
];
967 if (depot
->magcapacity
< MAXMAGSIZE
) {
968 if (depot
->contested
> objcache_contention_rate
) {
969 spin_lock_wr(&depot
->spin
);
970 depot_disassociate(depot
, &tmplist
);
971 depot
->magcapacity
*= 2;
972 spin_unlock_wr(&depot
->spin
);
973 kprintf("objcache_timer: increasing cache %s"
974 " magsize to %d, contested %d times\n",
975 oc
->name
, depot
->magcapacity
,
978 depot
->contested
= 0;
980 spin_unlock_wr(&objcachelist_spin
);
981 if (maglist_purge(oc
, &tmplist
) > 0 && depot
->waiting
)
983 spin_lock_wr(&objcachelist_spin
);
985 spin_unlock_wr(&objcachelist_spin
);
987 callout_reset(&objcache_callout
, objcache_rebalance_period
,
988 objcache_timer
, NULL
);
996 spin_init(&objcachelist_spin
);
998 magazine_capmin
= mag_capacity_align(MAGAZINE_CAPACITY_MIN
);
999 magazine_capmax
= mag_capacity_align(MAGAZINE_CAPACITY_MAX
);
1001 kprintf("objcache: magazine cap [%d, %d]\n",
1002 magazine_capmin
, magazine_capmax
);
1006 callout_init_mp(&objcache_callout
);
1007 objcache_rebalance_period
= 60 * hz
;
1008 callout_reset(&objcache_callout
, objcache_rebalance_period
,
1009 objcache_timer
, NULL
);
1012 SYSINIT(objcache
, SI_BOOT2_OBJCACHE
, SI_ORDER_FIRST
, objcache_init
, 0);