1 /*********************************************************************
5 * Description: General queue implementation
6 * Status: Experimental.
7 * Author: Dag Brattli <dagb@cs.uit.no>
8 * Created at: Tue Jun 9 13:29:31 1998
9 * Modified at: Sun Dec 12 13:48:22 1999
10 * Modified by: Dag Brattli <dagb@cs.uit.no>
11 * Modified at: Thu Jan 4 14:29:10 CET 2001
12 * Modified by: Marc Zyngier <mzyngier@freesurf.fr>
14 * Copyright (C) 1998-1999, Aage Kvalnes <aage@cs.uit.no>
15 * Copyright (C) 1998, Dag Brattli,
16 * All Rights Reserved.
18 * This code is taken from the Vortex Operating System written by Aage
19 * Kvalnes. Aage has agreed that this code can use the GPL licence,
20 * although he does not use that licence in his own code.
22 * This copyright does however _not_ include the ELF hash() function
23 * which I currently don't know which licence or copyright it
24 * has. Please inform me if you know.
26 * This program is free software; you can redistribute it and/or
27 * modify it under the terms of the GNU General Public License as
28 * published by the Free Software Foundation; either version 2 of
29 * the License, or (at your option) any later version.
31 * Neither Dag Brattli nor University of Tromsø admit liability nor
32 * provide warranty for any of this software. This material is
33 * provided "AS-IS" and at no charge.
35 ********************************************************************/
39 * There are various problems with this package :
40 * o the hash function for ints is pathetic (but could be changed)
41 * o locking is sometime suspicious (especially during enumeration)
42 * o most users have only a few elements (== overhead)
43 * o most users never use seach, so don't benefit from hashing
44 * Problem already fixed :
45 * o not 64 bit compliant (most users do hashv = (int) self)
46 * o hashbin_remove() is broken => use hashbin_remove_this()
47 * I think most users would be better served by a simple linked list
48 * (like include/linux/list.h) with a global spinlock per list.
53 * Notes on the concurrent access to hashbin and other SMP issues
54 * -------------------------------------------------------------
55 * Hashbins are very often in the IrDA stack a global repository of
56 * information, and therefore used in a very asynchronous manner following
57 * various events (driver calls, timers, user calls...).
58 * Therefore, very often it is highly important to consider the
59 * management of concurrent access to the hashbin and how to guarantee the
60 * consistency of the operations on it.
62 * First, we need to define the objective of locking :
63 * 1) Protect user data (content pointed by the hashbin)
64 * 2) Protect hashbin structure itself (linked list in each bin)
69 * The previous locking strategy, either HB_LOCAL or HB_GLOBAL were
70 * both inadequate in *both* aspect.
71 * o HB_GLOBAL was using a spinlock for each bin (local locking).
72 * o HB_LOCAL was disabling irq on *all* CPUs, so use a single
75 * A) Global irq disabling is no longer supported by the kernel
76 * B) No protection for the hashbin struct global data
79 * C) No protection for user data in some cases
81 * A) HB_LOCAL use global irq disabling, so doesn't work on kernel
82 * 2.5.X. Even when it is supported (kernel 2.4.X and earlier), its
83 * performance is not satisfactory on SMP setups. Most hashbins were
84 * HB_LOCAL, so (A) definitely need fixing.
85 * B) HB_LOCAL could be modified to fix (B). However, because HB_GLOBAL
86 * lock only the individual bins, it will never be able to lock the
87 * global data, so can't do (B).
88 * C) Some functions return pointer to data that is still in the
91 * o hashbin_get_first()
92 * o hashbin_get_next()
93 * As the data is still in the hashbin, it may be changed or free'd
94 * while the caller is examinimg the data. In those case, locking can't
95 * be done within the hashbin, but must include use of the data within
97 * The caller can easily do this with HB_LOCAL (just disable irqs).
98 * However, this is impossible with HB_GLOBAL because the caller has no
99 * way to know the proper bin, so don't know which spinlock to use.
101 * Quick summary : can no longer use HB_LOCAL, and HB_GLOBAL is
102 * fundamentally broken and will never work.
107 * To fix those problems, I've introduce a few changes in the
109 * 1) New HB_LOCK scheme
110 * 2) hashbin->hb_spinlock
111 * 3) New hashbin usage policy
115 * HB_LOCK is a locking scheme intermediate between the old HB_LOCAL
116 * and HB_GLOBAL. It uses a single spinlock to protect the whole content
117 * of the hashbin. As it is a single spinlock, it can protect the global
118 * data of the hashbin and not only the bins themselves.
119 * HB_LOCK can only protect some of the hashbin calls, so it only lock
120 * call that can be made 100% safe and leave other call unprotected.
121 * HB_LOCK in theory is slower than HB_GLOBAL, but as the hashbin
122 * content is always small contention is not high, so it doesn't matter
123 * much. HB_LOCK is probably faster than HB_LOCAL.
125 * hashbin->hb_spinlock :
126 * --------------------
127 * The spinlock that HB_LOCK uses is available for caller, so that
128 * the caller can protect unprotected calls (see below).
129 * If the caller want to do entirely its own locking (HB_NOLOCK), he
130 * can do so and may use safely this spinlock.
131 * Locking is done like this :
132 * spin_lock_irqsave(&hashbin->hb_spinlock, flags);
133 * Releasing the lock :
134 * spin_unlock_irqrestore(&hashbin->hb_spinlock, flags);
136 * Safe & Protected calls :
137 * ----------------------
138 * The following calls are safe or protected via HB_LOCK :
139 * o hashbin_new() -> safe
142 * o hashbin_remove_first()
144 * o hashbin_remove_this()
145 * o HASHBIN_GET_SIZE() -> atomic
147 * The following calls only protect the hashbin itself :
148 * o hashbin_lock_find()
149 * o hashbin_find_next()
151 * Unprotected calls :
153 * The following calls need to be protected by the caller :
155 * o hashbin_get_first()
156 * o hashbin_get_next()
160 * If the hashbin is used only in a single thread of execution
161 * (explicitly or implicitely), you can use HB_NOLOCK
162 * If the calling module already provide concurrent access protection,
163 * you may use HB_NOLOCK.
165 * In all other cases, you need to use HB_LOCK and lock the hashbin
166 * every time before calling one of the unprotected calls. You also must
167 * use the pointer returned by the unprotected call within the locked
170 * Extra care for enumeration :
171 * --------------------------
172 * hashbin_get_first() and hashbin_get_next() use the hashbin to
173 * store the current position, in hb_current.
174 * As long as the hashbin remains locked, this is safe. If you unlock
175 * the hashbin, the current position may change if anybody else modify
176 * or enumerate the hashbin.
177 * Summary : do the full enumeration while locked.
179 * Alternatively, you may use hashbin_find_next(). But, this will
180 * be slower, is more complex to use and doesn't protect the hashbin
181 * content. So, care is needed here as well.
185 * I believe that we are overdoing it by using spin_lock_irqsave()
186 * and we should use only spin_lock_bh() or similar. But, I don't have
187 * the balls to try it out.
188 * Don't believe that because hashbin are now (somewhat) SMP safe
189 * that the rest of the code is. Higher layers tend to be safest,
190 * but LAP and LMP would need some serious dedicated love.
194 #include <linux/module.h>
195 #include <linux/slab.h>
197 #include <net/irda/irda.h>
198 #include <net/irda/irqueue.h>
200 /************************ QUEUE SUBROUTINES ************************/
205 #define GET_HASHBIN(x) ( x & HASHBIN_MASK )
208 * Function hash (name)
210 * This function hash the input string 'name' using the ELF hash
211 * function for strings.
213 static __u32
hash( const char* name
)
219 h
= (h
<<4) + *name
++;
220 if ((g
= (h
& 0xf0000000)))
228 * Function enqueue_first (queue, proc)
230 * Insert item first in queue.
233 static void enqueue_first(irda_queue_t
**queue
, irda_queue_t
* element
)
236 IRDA_DEBUG( 4, "%s()\n", __func__
);
239 * Check if queue is empty.
241 if ( *queue
== NULL
) {
243 * Queue is empty. Insert one element into the queue.
245 element
->q_next
= element
->q_prev
= *queue
= element
;
249 * Queue is not empty. Insert element into front of queue.
251 element
->q_next
= (*queue
);
252 (*queue
)->q_prev
->q_next
= element
;
253 element
->q_prev
= (*queue
)->q_prev
;
254 (*queue
)->q_prev
= element
;
261 * Function dequeue (queue)
263 * Remove first entry in queue
266 static irda_queue_t
*dequeue_first(irda_queue_t
**queue
)
270 IRDA_DEBUG( 4, "dequeue_first()\n");
277 if ( *queue
== NULL
) {
281 } else if ( (*queue
)->q_next
== *queue
) {
283 * Queue only contained a single element. It will now be
289 * Queue contained several element. Remove the first one.
291 (*queue
)->q_prev
->q_next
= (*queue
)->q_next
;
292 (*queue
)->q_next
->q_prev
= (*queue
)->q_prev
;
293 *queue
= (*queue
)->q_next
;
297 * Return the removed entry (or NULL of queue was empty).
303 * Function dequeue_general (queue, element)
307 static irda_queue_t
*dequeue_general(irda_queue_t
**queue
, irda_queue_t
* element
)
311 IRDA_DEBUG( 4, "dequeue_general()\n");
318 if ( *queue
== NULL
) {
322 } else if ( (*queue
)->q_next
== *queue
) {
324 * Queue only contained a single element. It will now be
331 * Remove specific element.
333 element
->q_prev
->q_next
= element
->q_next
;
334 element
->q_next
->q_prev
= element
->q_prev
;
335 if ( (*queue
) == element
)
336 (*queue
) = element
->q_next
;
340 * Return the removed entry (or NULL of queue was empty).
345 /************************ HASHBIN MANAGEMENT ************************/
348 * Function hashbin_create ( type, name )
353 hashbin_t
*hashbin_new(int type
)
358 * Allocate new hashbin
360 hashbin
= kzalloc(sizeof(*hashbin
), GFP_ATOMIC
);
365 * Initialize structure
367 hashbin
->hb_type
= type
;
368 hashbin
->magic
= HB_MAGIC
;
369 //hashbin->hb_current = NULL;
371 /* Make sure all spinlock's are unlocked */
372 if ( hashbin
->hb_type
& HB_LOCK
) {
373 spin_lock_init(&hashbin
->hb_spinlock
);
378 EXPORT_SYMBOL(hashbin_new
);
382 * Function hashbin_delete (hashbin, free_func)
384 * Destroy hashbin, the free_func can be a user supplied special routine
385 * for deallocating this structure if it's complex. If not the user can
386 * just supply kfree, which should take care of the job.
388 #ifdef CONFIG_LOCKDEP
389 static int hashbin_lock_depth
= 0;
391 int hashbin_delete( hashbin_t
* hashbin
, FREE_FUNC free_func
)
394 unsigned long flags
= 0;
397 IRDA_ASSERT(hashbin
!= NULL
, return -1;);
398 IRDA_ASSERT(hashbin
->magic
== HB_MAGIC
, return -1;);
401 if ( hashbin
->hb_type
& HB_LOCK
) {
402 spin_lock_irqsave_nested(&hashbin
->hb_spinlock
, flags
,
403 hashbin_lock_depth
++);
407 * Free the entries in the hashbin, TODO: use hashbin_clear when
408 * it has been shown to work
410 for (i
= 0; i
< HASHBIN_SIZE
; i
++ ) {
411 queue
= dequeue_first((irda_queue_t
**) &hashbin
->hb_queue
[i
]);
415 queue
= dequeue_first(
416 (irda_queue_t
**) &hashbin
->hb_queue
[i
]);
420 /* Cleanup local data */
421 hashbin
->hb_current
= NULL
;
422 hashbin
->magic
= ~HB_MAGIC
;
425 if ( hashbin
->hb_type
& HB_LOCK
) {
426 spin_unlock_irqrestore(&hashbin
->hb_spinlock
, flags
);
427 #ifdef CONFIG_LOCKDEP
428 hashbin_lock_depth
--;
433 * Free the hashbin structure
439 EXPORT_SYMBOL(hashbin_delete
);
441 /********************* HASHBIN LIST OPERATIONS *********************/
444 * Function hashbin_insert (hashbin, entry, name)
446 * Insert an entry into the hashbin
449 void hashbin_insert(hashbin_t
* hashbin
, irda_queue_t
* entry
, long hashv
,
452 unsigned long flags
= 0;
455 IRDA_DEBUG( 4, "%s()\n", __func__
);
457 IRDA_ASSERT( hashbin
!= NULL
, return;);
458 IRDA_ASSERT( hashbin
->magic
== HB_MAGIC
, return;);
464 hashv
= hash( name
);
465 bin
= GET_HASHBIN( hashv
);
468 if ( hashbin
->hb_type
& HB_LOCK
) {
469 spin_lock_irqsave(&hashbin
->hb_spinlock
, flags
);
470 } /* Default is no-lock */
475 entry
->q_hash
= hashv
;
477 strlcpy( entry
->q_name
, name
, sizeof(entry
->q_name
));
480 * Insert new entry first
482 enqueue_first( (irda_queue_t
**) &hashbin
->hb_queue
[ bin
],
487 if ( hashbin
->hb_type
& HB_LOCK
) {
488 spin_unlock_irqrestore(&hashbin
->hb_spinlock
, flags
);
489 } /* Default is no-lock */
491 EXPORT_SYMBOL(hashbin_insert
);
494 * Function hashbin_remove_first (hashbin)
496 * Remove first entry of the hashbin
498 * Note : this function no longer use hashbin_remove(), but does things
499 * similar to hashbin_remove_this(), so can be considered safe.
502 void *hashbin_remove_first( hashbin_t
*hashbin
)
504 unsigned long flags
= 0;
505 irda_queue_t
*entry
= NULL
;
508 if ( hashbin
->hb_type
& HB_LOCK
) {
509 spin_lock_irqsave(&hashbin
->hb_spinlock
, flags
);
510 } /* Default is no-lock */
512 entry
= hashbin_get_first( hashbin
);
513 if ( entry
!= NULL
) {
519 hashv
= entry
->q_hash
;
520 bin
= GET_HASHBIN( hashv
);
523 * Dequeue the entry...
525 dequeue_general( (irda_queue_t
**) &hashbin
->hb_queue
[ bin
],
526 (irda_queue_t
*) entry
);
528 entry
->q_next
= NULL
;
529 entry
->q_prev
= NULL
;
532 * Check if this item is the currently selected item, and in
533 * that case we must reset hb_current
535 if ( entry
== hashbin
->hb_current
)
536 hashbin
->hb_current
= NULL
;
540 if ( hashbin
->hb_type
& HB_LOCK
) {
541 spin_unlock_irqrestore(&hashbin
->hb_spinlock
, flags
);
542 } /* Default is no-lock */
549 * Function hashbin_remove (hashbin, hashv, name)
551 * Remove entry with the given name
553 * The use of this function is highly discouraged, because the whole
554 * concept behind hashbin_remove() is broken. In many cases, it's not
555 * possible to guarantee the unicity of the index (either hashv or name),
556 * leading to removing the WRONG entry.
557 * The only simple safe use is :
558 * hashbin_remove(hasbin, (int) self, NULL);
559 * In other case, you must think hard to guarantee unicity of the index.
562 void* hashbin_remove( hashbin_t
* hashbin
, long hashv
, const char* name
)
564 int bin
, found
= FALSE
;
565 unsigned long flags
= 0;
568 IRDA_DEBUG( 4, "%s()\n", __func__
);
570 IRDA_ASSERT( hashbin
!= NULL
, return NULL
;);
571 IRDA_ASSERT( hashbin
->magic
== HB_MAGIC
, return NULL
;);
577 hashv
= hash( name
);
578 bin
= GET_HASHBIN( hashv
);
581 if ( hashbin
->hb_type
& HB_LOCK
) {
582 spin_lock_irqsave(&hashbin
->hb_spinlock
, flags
);
583 } /* Default is no-lock */
588 entry
= hashbin
->hb_queue
[ bin
];
594 if ( entry
->q_hash
== hashv
) {
599 if ( strcmp( entry
->q_name
, name
) == 0)
609 entry
= entry
->q_next
;
610 } while ( entry
!= hashbin
->hb_queue
[ bin
] );
614 * If entry was found, dequeue it
617 dequeue_general( (irda_queue_t
**) &hashbin
->hb_queue
[ bin
],
618 (irda_queue_t
*) entry
);
622 * Check if this item is the currently selected item, and in
623 * that case we must reset hb_current
625 if ( entry
== hashbin
->hb_current
)
626 hashbin
->hb_current
= NULL
;
630 if ( hashbin
->hb_type
& HB_LOCK
) {
631 spin_unlock_irqrestore(&hashbin
->hb_spinlock
, flags
);
632 } /* Default is no-lock */
642 EXPORT_SYMBOL(hashbin_remove
);
645 * Function hashbin_remove_this (hashbin, entry)
647 * Remove entry with the given name
649 * In some cases, the user of hashbin can't guarantee the unicity
650 * of either the hashv or name.
651 * In those cases, using the above function is guaranteed to cause troubles,
652 * so we use this one instead...
653 * And by the way, it's also faster, because we skip the search phase ;-)
655 void* hashbin_remove_this( hashbin_t
* hashbin
, irda_queue_t
* entry
)
657 unsigned long flags
= 0;
661 IRDA_DEBUG( 4, "%s()\n", __func__
);
663 IRDA_ASSERT( hashbin
!= NULL
, return NULL
;);
664 IRDA_ASSERT( hashbin
->magic
== HB_MAGIC
, return NULL
;);
665 IRDA_ASSERT( entry
!= NULL
, return NULL
;);
668 if ( hashbin
->hb_type
& HB_LOCK
) {
669 spin_lock_irqsave(&hashbin
->hb_spinlock
, flags
);
670 } /* Default is no-lock */
672 /* Check if valid and not already removed... */
673 if((entry
->q_next
== NULL
) || (entry
->q_prev
== NULL
)) {
681 hashv
= entry
->q_hash
;
682 bin
= GET_HASHBIN( hashv
);
685 * Dequeue the entry...
687 dequeue_general( (irda_queue_t
**) &hashbin
->hb_queue
[ bin
],
688 (irda_queue_t
*) entry
);
690 entry
->q_next
= NULL
;
691 entry
->q_prev
= NULL
;
694 * Check if this item is the currently selected item, and in
695 * that case we must reset hb_current
697 if ( entry
== hashbin
->hb_current
)
698 hashbin
->hb_current
= NULL
;
701 if ( hashbin
->hb_type
& HB_LOCK
) {
702 spin_unlock_irqrestore(&hashbin
->hb_spinlock
, flags
);
703 } /* Default is no-lock */
707 EXPORT_SYMBOL(hashbin_remove_this
);
709 /*********************** HASHBIN ENUMERATION ***********************/
712 * Function hashbin_common_find (hashbin, hashv, name)
714 * Find item with the given hashv or name
717 void* hashbin_find( hashbin_t
* hashbin
, long hashv
, const char* name
)
722 IRDA_DEBUG( 4, "hashbin_find()\n");
724 IRDA_ASSERT( hashbin
!= NULL
, return NULL
;);
725 IRDA_ASSERT( hashbin
->magic
== HB_MAGIC
, return NULL
;);
731 hashv
= hash( name
);
732 bin
= GET_HASHBIN( hashv
);
737 entry
= hashbin
->hb_queue
[ bin
];
743 if ( entry
->q_hash
== hashv
) {
748 if ( strcmp( entry
->q_name
, name
) == 0 ) {
755 entry
= entry
->q_next
;
756 } while ( entry
!= hashbin
->hb_queue
[ bin
] );
761 EXPORT_SYMBOL(hashbin_find
);
764 * Function hashbin_lock_find (hashbin, hashv, name)
766 * Find item with the given hashv or name
768 * Same, but with spinlock protection...
769 * I call it safe, but it's only safe with respect to the hashbin, not its
772 void* hashbin_lock_find( hashbin_t
* hashbin
, long hashv
, const char* name
)
774 unsigned long flags
= 0;
778 spin_lock_irqsave(&hashbin
->hb_spinlock
, flags
);
783 entry
= (irda_queue_t
* ) hashbin_find( hashbin
, hashv
, name
);
786 spin_unlock_irqrestore(&hashbin
->hb_spinlock
, flags
);
790 EXPORT_SYMBOL(hashbin_lock_find
);
793 * Function hashbin_find (hashbin, hashv, name, pnext)
795 * Find an item with the given hashv or name, and its successor
797 * This function allow to do concurrent enumerations without the
798 * need to lock over the whole session, because the caller keep the
799 * context of the search. On the other hand, it might fail and return
800 * NULL if the entry is removed. - Jean II
802 void* hashbin_find_next( hashbin_t
* hashbin
, long hashv
, const char* name
,
805 unsigned long flags
= 0;
809 spin_lock_irqsave(&hashbin
->hb_spinlock
, flags
);
812 * Search for current entry
813 * This allow to check if the current item is still in the
814 * hashbin or has been removed.
816 entry
= (irda_queue_t
* ) hashbin_find( hashbin
, hashv
, name
);
819 * Trick hashbin_get_next() to return what we want
822 hashbin
->hb_current
= entry
;
823 *pnext
= hashbin_get_next( hashbin
);
828 spin_unlock_irqrestore(&hashbin
->hb_spinlock
, flags
);
834 * Function hashbin_get_first (hashbin)
836 * Get a pointer to first element in hashbin, this function must be
837 * called before any calls to hashbin_get_next()!
840 irda_queue_t
*hashbin_get_first( hashbin_t
* hashbin
)
845 IRDA_ASSERT( hashbin
!= NULL
, return NULL
;);
846 IRDA_ASSERT( hashbin
->magic
== HB_MAGIC
, return NULL
;);
848 if ( hashbin
== NULL
)
851 for ( i
= 0; i
< HASHBIN_SIZE
; i
++ ) {
852 entry
= hashbin
->hb_queue
[ i
];
854 hashbin
->hb_current
= entry
;
859 * Did not find any item in hashbin
863 EXPORT_SYMBOL(hashbin_get_first
);
866 * Function hashbin_get_next (hashbin)
868 * Get next item in hashbin. A series of hashbin_get_next() calls must
869 * be started by a call to hashbin_get_first(). The function returns
870 * NULL when all items have been traversed
872 * The context of the search is stored within the hashbin, so you must
873 * protect yourself from concurrent enumerations. - Jean II
875 irda_queue_t
*hashbin_get_next( hashbin_t
*hashbin
)
881 IRDA_ASSERT( hashbin
!= NULL
, return NULL
;);
882 IRDA_ASSERT( hashbin
->magic
== HB_MAGIC
, return NULL
;);
884 if ( hashbin
->hb_current
== NULL
) {
885 IRDA_ASSERT( hashbin
->hb_current
!= NULL
, return NULL
;);
888 entry
= hashbin
->hb_current
->q_next
;
889 bin
= GET_HASHBIN( entry
->q_hash
);
892 * Make sure that we are not back at the beginning of the queue
895 if ( entry
!= hashbin
->hb_queue
[ bin
]) {
896 hashbin
->hb_current
= entry
;
902 * Check that this is not the last queue in hashbin
904 if ( bin
>= HASHBIN_SIZE
)
908 * Move to next queue in hashbin
911 for ( i
= bin
; i
< HASHBIN_SIZE
; i
++ ) {
912 entry
= hashbin
->hb_queue
[ i
];
914 hashbin
->hb_current
= entry
;
921 EXPORT_SYMBOL(hashbin_get_next
);