Committer: Michael Beasley <mike@snafu.setup>
[mikesnafu-overlay.git] / include / linux / blockgroup_lock.h
blob8607312983bd8045c24a402335b30ee034c4505c
1 #ifndef _LINUX_BLOCKGROUP_LOCK_H
2 #define _LINUX_BLOCKGROUP_LOCK_H
3 /*
4 * Per-blockgroup locking for ext2 and ext3.
6 * Simple hashed spinlocking.
7 */
9 #include <linux/spinlock.h>
10 #include <linux/cache.h>
12 #ifdef CONFIG_SMP
15 * We want a power-of-two. Is there a better way than this?
18 #if NR_CPUS >= 32
19 #define NR_BG_LOCKS 128
20 #elif NR_CPUS >= 16
21 #define NR_BG_LOCKS 64
22 #elif NR_CPUS >= 8
23 #define NR_BG_LOCKS 32
24 #elif NR_CPUS >= 4
25 #define NR_BG_LOCKS 16
26 #elif NR_CPUS >= 2
27 #define NR_BG_LOCKS 8
28 #else
29 #define NR_BG_LOCKS 4
30 #endif
32 #else /* CONFIG_SMP */
33 #define NR_BG_LOCKS 1
34 #endif /* CONFIG_SMP */
36 struct bgl_lock {
37 spinlock_t lock;
38 } ____cacheline_aligned_in_smp;
40 struct blockgroup_lock {
41 struct bgl_lock locks[NR_BG_LOCKS];
44 static inline void bgl_lock_init(struct blockgroup_lock *bgl)
46 int i;
48 for (i = 0; i < NR_BG_LOCKS; i++)
49 spin_lock_init(&bgl->locks[i].lock);
53 * The accessor is a macro so we can embed a blockgroup_lock into different
54 * superblock types
56 #define sb_bgl_lock(sb, block_group) \
57 (&(sb)->s_blockgroup_lock.locks[(block_group) & (NR_BG_LOCKS-1)].lock)
59 #endif