2 * Copyright (c) 2005 Jeffrey M. Hsu. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of The DragonFly Project nor the names of its
16 * contributors may be used to endorse or promote products derived
17 * from this software without specific, prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
22 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
23 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
25 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
26 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
27 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
28 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
29 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * $DragonFly: src/sys/kern/kern_objcache.c,v 1.16 2007/01/07 04:06:51 y0netan1 Exp $
35 #include <sys/param.h>
36 #include <sys/kernel.h>
37 #include <sys/systm.h>
38 #include <sys/callout.h>
39 #include <sys/globaldata.h>
40 #include <sys/malloc.h>
41 #include <sys/queue.h>
42 #include <sys/objcache.h>
43 #include <sys/spinlock.h>
44 #include <sys/thread.h>
45 #include <sys/thread2.h>
46 #include <sys/spinlock2.h>
48 static MALLOC_DEFINE(M_OBJCACHE
, "objcache", "Object Cache");
49 static MALLOC_DEFINE(M_OBJMAG
, "objcache magazine", "Object Cache Magazine");
51 #define INITIAL_MAG_CAPACITY 256
57 SLIST_ENTRY(magazine
) nextmagazine
;
61 SLIST_HEAD(magazinelist
, magazine
);
64 * per-cluster cache of magazines
66 * All fields in this structure are protected by the spinlock.
68 struct magazinedepot
{
70 * The per-cpu object caches only exchanges completely full or
71 * completely empty magazines with the depot layer, so only have
72 * to cache these two types of magazines.
74 struct magazinelist fullmagazines
;
75 struct magazinelist emptymagazines
;
78 /* protect this structure */
81 /* magazines not yet allocated towards limit */
82 int unallocated_objects
;
84 /* infrequently used fields */
85 int waiting
; /* waiting for another cpu to
86 * return a full magazine to
88 int contested
; /* depot contention count */
92 * per-cpu object cache
93 * All fields in this structure are protected by crit_enter().
95 struct percpu_objcache
{
96 struct magazine
*loaded_magazine
; /* active magazine */
97 struct magazine
*previous_magazine
; /* backup magazine */
100 int gets_cumulative
; /* total calls to get */
101 int gets_null
; /* objcache_get returned NULL */
102 int puts_cumulative
; /* total calls to put */
103 int puts_othercluster
; /* returned to other cluster */
105 /* infrequently used fields */
106 int waiting
; /* waiting for a thread on this cpu to
107 * return an obj to the per-cpu cache */
110 /* only until we have NUMA cluster topology information XXX */
111 #define MAXCLUSTERS 1
112 #define myclusterid 0
113 #define CLUSTER_OF(obj) 0
116 * Two-level object cache consisting of NUMA cluster-level depots of
117 * fully loaded or completely empty magazines and cpu-level caches of
118 * individual objects.
123 /* object constructor and destructor from blank storage */
124 objcache_ctor_fn
*ctor
;
125 objcache_dtor_fn
*dtor
;
128 /* interface to underlying allocator */
129 objcache_alloc_fn
*alloc
;
130 objcache_free_fn
*free
;
131 void *allocator_args
;
133 SLIST_ENTRY(objcache
) oc_next
;
135 /* NUMA-cluster level caches */
136 struct magazinedepot depot
[MAXCLUSTERS
];
138 struct percpu_objcache cache_percpu
[]; /* per-cpu caches */
141 static struct spinlock objcachelist_spin
;
142 static SLIST_HEAD(objcachelist
, objcache
) allobjcaches
;
144 static struct magazine
*
145 mag_alloc(int capacity
)
147 struct magazine
*mag
;
149 mag
= kmalloc(__offsetof(struct magazine
, objects
[capacity
]),
150 M_OBJMAG
, M_INTWAIT
| M_ZERO
);
151 mag
->capacity
= capacity
;
158 * Utility routine for objects that don't require any de-construction.
162 null_dtor(void *obj
, void *private)
168 null_ctor(void *obj
, void *private, int ocflags
)
174 * Create an object cache.
177 objcache_create(const char *name
, int cluster_limit
, int mag_capacity
,
178 objcache_ctor_fn
*ctor
, objcache_dtor_fn
*dtor
, void *private,
179 objcache_alloc_fn
*alloc
, objcache_free_fn
*free
,
180 void *allocator_args
)
183 struct magazinedepot
*depot
;
186 /* allocate object cache structure */
187 oc
= kmalloc(__offsetof(struct objcache
, cache_percpu
[ncpus
]),
188 M_OBJCACHE
, M_WAITOK
| M_ZERO
);
189 oc
->name
= kstrdup(name
, M_TEMP
);
190 oc
->ctor
= ctor
? ctor
: null_ctor
;
191 oc
->dtor
= dtor
? dtor
: null_dtor
;
192 oc
->private = private;
194 oc
->allocator_args
= allocator_args
;
196 /* initialize depots */
197 depot
= &oc
->depot
[0];
199 spin_init(&depot
->spin
);
200 SLIST_INIT(&depot
->fullmagazines
);
201 SLIST_INIT(&depot
->emptymagazines
);
203 if (mag_capacity
== 0)
204 mag_capacity
= INITIAL_MAG_CAPACITY
;
205 depot
->magcapacity
= mag_capacity
;
208 * The cluster_limit must be sufficient to have three magazines per
211 if (cluster_limit
== 0) {
212 depot
->unallocated_objects
= -1;
214 if (cluster_limit
< mag_capacity
* ncpus
* 3)
215 cluster_limit
= mag_capacity
* ncpus
* 3;
216 depot
->unallocated_objects
= cluster_limit
;
220 /* initialize per-cpu caches */
221 for (cpuid
= 0; cpuid
< ncpus
; cpuid
++) {
222 struct percpu_objcache
*cache_percpu
= &oc
->cache_percpu
[cpuid
];
224 cache_percpu
->loaded_magazine
= mag_alloc(mag_capacity
);
225 cache_percpu
->previous_magazine
= mag_alloc(mag_capacity
);
227 spin_lock_wr(&objcachelist_spin
);
228 SLIST_INSERT_HEAD(&allobjcaches
, oc
, oc_next
);
229 spin_unlock_wr(&objcachelist_spin
);
235 objcache_create_simple(malloc_type_t mtype
, size_t objsize
)
237 struct objcache_malloc_args
*margs
;
240 margs
= kmalloc(sizeof(*margs
), M_OBJCACHE
, M_WAITOK
|M_ZERO
);
241 margs
->objsize
= objsize
;
242 margs
->mtype
= mtype
;
243 oc
= objcache_create(mtype
->ks_shortdesc
, 0, 0,
245 objcache_malloc_alloc
, objcache_malloc_free
,
250 #define MAGAZINE_EMPTY(mag) (mag->rounds == 0)
251 #define MAGAZINE_NOTEMPTY(mag) (mag->rounds != 0)
252 #define MAGAZINE_FULL(mag) (mag->rounds == mag->capacity)
254 #define swap(x, y) ({ struct magazine *t = x; x = y; y = t; })
257 * Get an object from the object cache.
259 * WARNING! ocflags are only used when we have to go to the underlying
260 * allocator, so we cannot depend on flags such as M_ZERO.
263 objcache_get(struct objcache
*oc
, int ocflags
)
265 struct percpu_objcache
*cpucache
= &oc
->cache_percpu
[mycpuid
];
266 struct magazine
*loadedmag
;
267 struct magazine
*emptymag
;
269 struct magazinedepot
*depot
;
271 KKASSERT((ocflags
& M_ZERO
) == 0);
273 ++cpucache
->gets_cumulative
;
277 * Loaded magazine has an object. This is the hot path.
278 * It is lock-free and uses a critical section to block
279 * out interrupt handlers on the same processor.
281 loadedmag
= cpucache
->loaded_magazine
;
282 if (MAGAZINE_NOTEMPTY(loadedmag
)) {
283 obj
= loadedmag
->objects
[--loadedmag
->rounds
];
288 /* Previous magazine has an object. */
289 if (MAGAZINE_NOTEMPTY(cpucache
->previous_magazine
)) {
290 KKASSERT(cpucache
->previous_magazine
->cleaning
+
291 cpucache
->loaded_magazine
->cleaning
== 0);
292 swap(cpucache
->loaded_magazine
, cpucache
->previous_magazine
);
293 loadedmag
= cpucache
->loaded_magazine
;
294 obj
= loadedmag
->objects
[--loadedmag
->rounds
];
300 * Both magazines empty. Get a full magazine from the depot and
301 * move one of the empty ones to the depot.
303 * Obtain the depot spinlock.
305 * NOTE: Beyond this point, M_* flags are handled via oc->alloc()
307 depot
= &oc
->depot
[myclusterid
];
308 spin_lock_wr(&depot
->spin
);
311 * Recheck the cpucache after obtaining the depot spinlock. This
312 * shouldn't be necessary now but don't take any chances.
314 if (MAGAZINE_NOTEMPTY(cpucache
->loaded_magazine
) ||
315 MAGAZINE_NOTEMPTY(cpucache
->previous_magazine
)
317 spin_unlock_wr(&depot
->spin
);
321 /* Check if depot has a full magazine. */
322 if (!SLIST_EMPTY(&depot
->fullmagazines
)) {
323 emptymag
= cpucache
->previous_magazine
;
324 cpucache
->previous_magazine
= cpucache
->loaded_magazine
;
325 cpucache
->loaded_magazine
= SLIST_FIRST(&depot
->fullmagazines
);
326 SLIST_REMOVE_HEAD(&depot
->fullmagazines
, nextmagazine
);
329 * Return emptymag to the depot.
331 KKASSERT(MAGAZINE_EMPTY(emptymag
));
332 SLIST_INSERT_HEAD(&depot
->emptymagazines
,
333 emptymag
, nextmagazine
);
334 spin_unlock_wr(&depot
->spin
);
339 * The depot does not have any non-empty magazines. If we have
340 * not hit our object limit we can allocate a new object using
341 * the back-end allocator.
343 * note: unallocated_objects can be initialized to -1, which has
344 * the effect of removing any allocation limits.
346 if (depot
->unallocated_objects
) {
347 --depot
->unallocated_objects
;
348 spin_unlock_wr(&depot
->spin
);
351 obj
= oc
->alloc(oc
->allocator_args
, ocflags
);
353 if (oc
->ctor(obj
, oc
->private, ocflags
))
355 oc
->free(obj
, oc
->allocator_args
);
356 spin_lock_wr(&depot
->spin
);
357 ++depot
->unallocated_objects
;
358 spin_unlock_wr(&depot
->spin
);
366 * makes debugging easier when gets_cumulative does
367 * not include gets_null.
369 ++cpucache
->gets_null
;
370 --cpucache
->gets_cumulative
;
377 * Otherwise block if allowed to.
379 if ((ocflags
& (M_WAITOK
|M_NULLOK
)) == M_WAITOK
) {
382 msleep(depot
, &depot
->spin
, 0, "objcache_get", 0);
385 spin_unlock_wr(&depot
->spin
);
392 ++cpucache
->gets_null
;
393 --cpucache
->gets_cumulative
;
395 spin_unlock_wr(&depot
->spin
);
400 * Wrapper for malloc allocation routines.
403 objcache_malloc_alloc(void *allocator_args
, int ocflags
)
405 struct objcache_malloc_args
*alloc_args
= allocator_args
;
407 return (kmalloc(alloc_args
->objsize
, alloc_args
->mtype
,
408 ocflags
& OC_MFLAGS
));
412 objcache_malloc_free(void *obj
, void *allocator_args
)
414 struct objcache_malloc_args
*alloc_args
= allocator_args
;
416 kfree(obj
, alloc_args
->mtype
);
420 * Wrapper for allocation policies that pre-allocate at initialization time
421 * and don't do run-time allocation.
424 objcache_nop_alloc(void *allocator_args
, int ocflags
)
430 objcache_nop_free(void *obj
, void *allocator_args
)
435 * Return an object to the object cache.
438 objcache_put(struct objcache
*oc
, void *obj
)
440 struct percpu_objcache
*cpucache
= &oc
->cache_percpu
[mycpuid
];
441 struct magazine
*loadedmag
;
442 struct magazinedepot
*depot
;
445 ++cpucache
->puts_cumulative
;
447 if (CLUSTER_OF(obj
) != myclusterid
) {
449 /* use lazy IPI to send object to owning cluster XXX todo */
450 ++cpucache
->puts_othercluster
;
458 * Free slot available in loaded magazine. This is the hot path.
459 * It is lock-free and uses a critical section to block out interrupt
460 * handlers on the same processor.
462 loadedmag
= cpucache
->loaded_magazine
;
463 if (!MAGAZINE_FULL(loadedmag
)) {
464 loadedmag
->objects
[loadedmag
->rounds
++] = obj
;
465 if (cpucache
->waiting
)
466 wakeup_mycpu(&oc
->depot
[myclusterid
]);
472 * Current magazine full, but previous magazine has room. XXX
474 if (!MAGAZINE_FULL(cpucache
->previous_magazine
)) {
475 KKASSERT(cpucache
->previous_magazine
->cleaning
+
476 cpucache
->loaded_magazine
->cleaning
== 0);
477 swap(cpucache
->loaded_magazine
, cpucache
->previous_magazine
);
478 loadedmag
= cpucache
->loaded_magazine
;
479 loadedmag
->objects
[loadedmag
->rounds
++] = obj
;
480 if (cpucache
->waiting
)
481 wakeup_mycpu(&oc
->depot
[myclusterid
]);
487 * Both magazines full. Get an empty magazine from the depot and
488 * move a full loaded magazine to the depot. Even though the
489 * magazine may wind up with space available after we block on
490 * the spinlock, we still cycle it through to avoid the non-optimal
493 * Obtain the depot spinlock.
495 depot
= &oc
->depot
[myclusterid
];
496 spin_lock_wr(&depot
->spin
);
499 * If an empty magazine is available in the depot, cycle it
502 if (!SLIST_EMPTY(&depot
->emptymagazines
)) {
503 KKASSERT(cpucache
->previous_magazine
->cleaning
+
504 cpucache
->loaded_magazine
->cleaning
== 0);
505 loadedmag
= cpucache
->previous_magazine
;
506 cpucache
->previous_magazine
= cpucache
->loaded_magazine
;
507 cpucache
->loaded_magazine
= SLIST_FIRST(&depot
->emptymagazines
);
508 SLIST_REMOVE_HEAD(&depot
->emptymagazines
, nextmagazine
);
511 * Return loadedmag to the depot. Due to blocking it may
512 * not be entirely full and could even be empty.
514 if (MAGAZINE_EMPTY(loadedmag
)) {
515 SLIST_INSERT_HEAD(&depot
->emptymagazines
,
516 loadedmag
, nextmagazine
);
517 spin_unlock_wr(&depot
->spin
);
519 SLIST_INSERT_HEAD(&depot
->fullmagazines
,
520 loadedmag
, nextmagazine
);
521 spin_unlock_wr(&depot
->spin
);
529 * An empty mag is not available. This is a corner case which can
530 * occur due to cpus holding partially full magazines. Do not try
531 * to allocate a mag, just free the object.
533 ++depot
->unallocated_objects
;
534 spin_unlock_wr(&depot
->spin
);
538 oc
->dtor(obj
, oc
->private);
539 oc
->free(obj
, oc
->allocator_args
);
543 * The object is being put back into the cache, but the caller has
544 * indicated that the object is not in any shape to be reused and should
545 * be dtor'd immediately.
548 objcache_dtor(struct objcache
*oc
, void *obj
)
550 struct magazinedepot
*depot
;
552 depot
= &oc
->depot
[myclusterid
];
553 spin_lock_wr(&depot
->spin
);
554 ++depot
->unallocated_objects
;
555 spin_unlock_wr(&depot
->spin
);
558 oc
->dtor(obj
, oc
->private);
559 oc
->free(obj
, oc
->allocator_args
);
563 * Deallocate all objects in a magazine and free the magazine if requested.
564 * The magazine must already be disassociated from the depot.
566 * Must be called with a critical section held when called with a per-cpu
567 * magazine. The magazine may be indirectly modified during the loop.
569 * The number of objects freed is returned.
572 mag_purge(struct objcache
*oc
, struct magazine
*mag
, int freeit
)
579 while (mag
->rounds
) {
580 obj
= mag
->objects
[--mag
->rounds
];
581 oc
->dtor(obj
, oc
->private); /* MAY BLOCK */
582 oc
->free(obj
, oc
->allocator_args
); /* MAY BLOCK */
586 * Cycle for interrupts
588 if ((count
& 15) == 0) {
595 kfree(mag
, M_OBJMAG
);
600 * Disassociate zero or more magazines from a magazine list associated with
601 * the depot, update the depot, and move the magazines to a temporary
604 * The caller must check the depot for waiters and wake it up, typically
605 * after disposing of the magazines this function loads onto the temporary
609 maglist_disassociate(struct magazinedepot
*depot
, struct magazinelist
*maglist
,
610 struct magazinelist
*tmplist
, boolean_t purgeall
)
612 struct magazine
*mag
;
614 while ((mag
= SLIST_FIRST(maglist
)) != NULL
) {
615 SLIST_REMOVE_HEAD(maglist
, nextmagazine
);
616 SLIST_INSERT_HEAD(tmplist
, mag
, nextmagazine
);
617 depot
->unallocated_objects
+= mag
->rounds
;
622 * Deallocate all magazines and their contents from the passed temporary
623 * list. The magazines have already been accounted for by their depots.
625 * The total number of rounds freed is returned. This number is typically
626 * only used to determine whether a wakeup on the depot is needed or not.
629 maglist_purge(struct objcache
*oc
, struct magazinelist
*maglist
)
631 struct magazine
*mag
;
635 * can't use SLIST_FOREACH because blocking releases the depot
638 while ((mag
= SLIST_FIRST(maglist
)) != NULL
) {
639 SLIST_REMOVE_HEAD(maglist
, nextmagazine
);
640 count
+= mag_purge(oc
, mag
, TRUE
);
646 * De-allocates all magazines on the full and empty magazine lists.
648 * Because this routine is called with a spinlock held, the magazines
649 * can only be disassociated and moved to a temporary list, not freed.
651 * The caller is responsible for freeing the magazines.
654 depot_disassociate(struct magazinedepot
*depot
, struct magazinelist
*tmplist
)
656 maglist_disassociate(depot
, &depot
->fullmagazines
, tmplist
, TRUE
);
657 maglist_disassociate(depot
, &depot
->emptymagazines
, tmplist
, TRUE
);
662 objcache_reclaim(struct objcache
*oc
)
664 struct percpu_objcache
*cache_percpu
= &oc
->cache_percpu
[myclusterid
];
665 struct magazinedepot
*depot
= &oc
->depot
[myclusterid
];
666 struct magazinelist tmplist
;
669 SLIST_INIT(&tmplist
);
671 count
= mag_purge(oc
, cache_percpu
->loaded_magazine
, FALSE
);
672 count
+= mag_purge(oc
, cache_percpu
->previous_magazine
, FALSE
);
675 spin_lock_wr(&depot
->spin
);
676 depot
->unallocated_objects
+= count
;
677 depot_disassociate(depot
, &tmplist
);
678 spin_unlock_wr(&depot
->spin
);
679 count
+= maglist_purge(oc
, &tmplist
);
680 if (count
&& depot
->waiting
)
686 * Try to free up some memory. Return as soon as some free memory is found.
687 * For each object cache on the reclaim list, first try the current per-cpu
688 * cache, then the full magazine depot.
691 objcache_reclaimlist(struct objcache
*oclist
[], int nlist
, int ocflags
)
694 struct percpu_objcache
*cpucache
;
695 struct magazinedepot
*depot
;
696 struct magazinelist tmplist
;
699 SLIST_INIT(&tmplist
);
701 for (i
= 0; i
< nlist
; i
++) {
703 cpucache
= &oc
->cache_percpu
[mycpuid
];
704 depot
= &oc
->depot
[myclusterid
];
707 count
= mag_purge(oc
, cpucache
->loaded_magazine
, FALSE
);
709 count
+= mag_purge(oc
, cpucache
->previous_magazine
, FALSE
);
712 spin_lock_wr(&depot
->spin
);
713 depot
->unallocated_objects
+= count
;
714 spin_unlock_wr(&depot
->spin
);
719 spin_lock_wr(&depot
->spin
);
720 maglist_disassociate(depot
, &depot
->fullmagazines
,
722 spin_unlock_wr(&depot
->spin
);
723 count
= maglist_purge(oc
, &tmplist
);
734 * Destroy an object cache. Must have no existing references.
737 objcache_destroy(struct objcache
*oc
)
739 struct percpu_objcache
*cache_percpu
;
740 struct magazinedepot
*depot
;
741 int clusterid
, cpuid
;
742 struct magazinelist tmplist
;
744 SLIST_INIT(&tmplist
);
745 for (clusterid
= 0; clusterid
< MAXCLUSTERS
; clusterid
++) {
746 depot
= &oc
->depot
[clusterid
];
747 spin_lock_wr(&depot
->spin
);
748 depot_disassociate(depot
, &tmplist
);
749 spin_unlock_wr(&depot
->spin
);
751 maglist_purge(oc
, &tmplist
);
753 for (cpuid
= 0; cpuid
< ncpus
; cpuid
++) {
754 cache_percpu
= &oc
->cache_percpu
[cpuid
];
756 mag_purge(oc
, cache_percpu
->loaded_magazine
, TRUE
);
757 mag_purge(oc
, cache_percpu
->previous_magazine
, TRUE
);
758 cache_percpu
->loaded_magazine
= NULL
;
759 cache_percpu
->previous_magazine
= NULL
;
760 /* don't bother adjusting depot->unallocated_objects */
763 kfree(oc
->name
, M_TEMP
);
764 kfree(oc
, M_OBJCACHE
);
769 * Populate the per-cluster depot with elements from a linear block
770 * of memory. Must be called for individually for each cluster.
771 * Populated depots should not be destroyed.
774 objcache_populate_linear(struct objcache
*oc
, void *base
, int nelts
, int size
)
777 char *end
= (char *)base
+ (nelts
* size
);
778 struct magazinedepot
*depot
= &oc
->depot
[myclusterid
];
779 struct magazine
*emptymag
= mag_alloc(depot
->magcapcity
);
782 emptymag
->objects
[emptymag
->rounds
++] = p
;
783 if (MAGAZINE_FULL(emptymag
)) {
784 spin_lock_wr(&depot
->spin
);
785 SLIST_INSERT_HEAD(&depot
->fullmagazines
, emptymag
,
787 depot
->unallocated_objects
+= emptymag
->rounds
;
788 spin_unlock_wr(&depot
->spin
);
791 emptymag
= mag_alloc(depot
->magcapacity
);
795 if (MAGAZINE_EMPTY(emptymag
)) {
796 mag_purge(oc
, emptymag
, TRUE
);
798 spin_lock_wr(&depot
->spin
);
799 SLIST_INSERT_HEAD(&depot
->fullmagazines
, emptymag
,
801 depot
->unallocated_objects
+= emptymag
->rounds
;
802 spin_unlock_wr(&depot
->spin
);
805 emptymag
= mag_alloc(depot
->magcapacity
);
812 * Check depot contention once a minute.
813 * 2 contested locks per second allowed.
815 static int objcache_rebalance_period
;
816 static const int objcache_contention_rate
= 120;
817 static struct callout objcache_callout
;
819 #define MAXMAGSIZE 512
822 * Check depot contention and increase magazine size if necessary.
825 objcache_timer(void *dummy
)
828 struct magazinedepot
*depot
;
829 struct magazinelist tmplist
;
831 XXX we need to detect when an objcache is destroyed out from under
834 SLIST_INIT(&tmplist
);
836 spin_lock_wr(&objcachelist_spin
);
837 SLIST_FOREACH(oc
, &allobjcaches
, oc_next
) {
838 depot
= &oc
->depot
[myclusterid
];
839 if (depot
->magcapacity
< MAXMAGSIZE
) {
840 if (depot
->contested
> objcache_contention_rate
) {
841 spin_lock_wr(&depot
->spin
);
842 depot_disassociate(depot
, &tmplist
);
843 depot
->magcapacity
*= 2;
844 spin_unlock_wr(&depot
->spin
);
845 kprintf("objcache_timer: increasing cache %s"
846 " magsize to %d, contested %d times\n",
847 oc
->name
, depot
->magcapacity
,
850 depot
->contested
= 0;
852 spin_unlock_wr(&objcachelist_spin
);
853 if (maglist_purge(oc
, &tmplist
) > 0 && depot
->waiting
)
855 spin_lock_wr(&objcachelist_spin
);
857 spin_unlock_wr(&objcachelist_spin
);
859 callout_reset(&objcache_callout
, objcache_rebalance_period
,
860 objcache_timer
, NULL
);
868 spin_init(&objcachelist_spin
);
870 callout_init(&objcache_callout
);
871 objcache_rebalance_period
= 60 * hz
;
872 callout_reset(&objcache_callout
, objcache_rebalance_period
,
873 objcache_timer
, NULL
);
876 SYSINIT(objcache
, SI_SUB_CPU
, SI_ORDER_ANY
, objcache_init
, 0);