2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
12 static char junk_lvb
[GDLM_LVB_SIZE
];
14 static void queue_complete(struct gdlm_lock
*lp
)
16 struct gdlm_ls
*ls
= lp
->ls
;
18 clear_bit(LFL_ACTIVE
, &lp
->flags
);
20 spin_lock(&ls
->async_lock
);
21 list_add_tail(&lp
->clist
, &ls
->complete
);
22 spin_unlock(&ls
->async_lock
);
23 wake_up(&ls
->thread_wait
);
26 static inline void gdlm_ast(void *astarg
)
28 queue_complete(astarg
);
31 static inline void gdlm_bast(void *astarg
, int mode
)
33 struct gdlm_lock
*lp
= astarg
;
34 struct gdlm_ls
*ls
= lp
->ls
;
37 printk(KERN_INFO
"lock_dlm: bast mode zero %x,%llx\n",
39 (unsigned long long)lp
->lockname
.ln_number
);
43 spin_lock(&ls
->async_lock
);
45 list_add_tail(&lp
->blist
, &ls
->blocking
);
47 } else if (lp
->bast_mode
< mode
)
49 spin_unlock(&ls
->async_lock
);
50 wake_up(&ls
->thread_wait
);
53 void gdlm_queue_delayed(struct gdlm_lock
*lp
)
55 struct gdlm_ls
*ls
= lp
->ls
;
57 spin_lock(&ls
->async_lock
);
58 list_add_tail(&lp
->delay_list
, &ls
->delayed
);
59 spin_unlock(&ls
->async_lock
);
62 /* convert gfs lock-state to dlm lock-mode */
64 static s16
make_mode(s16 lmstate
)
76 gdlm_assert(0, "unknown LM state %d", lmstate
);
80 /* convert dlm lock-mode to gfs lock-state */
82 s16
gdlm_make_lmstate(s16 dlmmode
)
87 return LM_ST_UNLOCKED
;
89 return LM_ST_EXCLUSIVE
;
91 return LM_ST_DEFERRED
;
95 gdlm_assert(0, "unknown DLM mode %d", dlmmode
);
99 /* verify agreement with GFS on the current lock state, NB: DLM_LOCK_NL and
100 DLM_LOCK_IV are both considered LM_ST_UNLOCKED by GFS. */
102 static void check_cur_state(struct gdlm_lock
*lp
, unsigned int cur_state
)
104 s16 cur
= make_mode(cur_state
);
105 if (lp
->cur
!= DLM_LOCK_IV
)
106 gdlm_assert(lp
->cur
== cur
, "%d, %d", lp
->cur
, cur
);
109 static inline unsigned int make_flags(struct gdlm_lock
*lp
,
110 unsigned int gfs_flags
,
113 unsigned int lkf
= 0;
115 if (gfs_flags
& LM_FLAG_TRY
)
116 lkf
|= DLM_LKF_NOQUEUE
;
118 if (gfs_flags
& LM_FLAG_TRY_1CB
) {
119 lkf
|= DLM_LKF_NOQUEUE
;
120 lkf
|= DLM_LKF_NOQUEUEBAST
;
123 if (gfs_flags
& LM_FLAG_PRIORITY
) {
124 lkf
|= DLM_LKF_NOORDER
;
125 lkf
|= DLM_LKF_HEADQUE
;
128 if (gfs_flags
& LM_FLAG_ANY
) {
129 if (req
== DLM_LOCK_PR
)
130 lkf
|= DLM_LKF_ALTCW
;
131 else if (req
== DLM_LOCK_CW
)
132 lkf
|= DLM_LKF_ALTPR
;
135 if (lp
->lksb
.sb_lkid
!= 0) {
136 lkf
|= DLM_LKF_CONVERT
;
138 /* Conversion deadlock avoidance by DLM */
140 if (!test_bit(LFL_FORCE_PROMOTE
, &lp
->flags
) &&
141 !(lkf
& DLM_LKF_NOQUEUE
) &&
142 cur
> DLM_LOCK_NL
&& req
> DLM_LOCK_NL
&& cur
!= req
)
143 lkf
|= DLM_LKF_CONVDEADLK
;
147 lkf
|= DLM_LKF_VALBLK
;
152 /* make_strname - convert GFS lock numbers to a string */
154 static inline void make_strname(const struct lm_lockname
*lockname
,
155 struct gdlm_strname
*str
)
157 sprintf(str
->name
, "%8x%16llx", lockname
->ln_type
,
158 (unsigned long long)lockname
->ln_number
);
159 str
->namelen
= GDLM_STRNAME_BYTES
;
162 static int gdlm_create_lp(struct gdlm_ls
*ls
, struct lm_lockname
*name
,
163 struct gdlm_lock
**lpp
)
165 struct gdlm_lock
*lp
;
167 lp
= kzalloc(sizeof(struct gdlm_lock
), GFP_KERNEL
);
171 lp
->lockname
= *name
;
172 make_strname(name
, &lp
->strname
);
174 lp
->cur
= DLM_LOCK_IV
;
176 lp
->hold_null
= NULL
;
177 init_completion(&lp
->ast_wait
);
178 INIT_LIST_HEAD(&lp
->clist
);
179 INIT_LIST_HEAD(&lp
->blist
);
180 INIT_LIST_HEAD(&lp
->delay_list
);
182 spin_lock(&ls
->async_lock
);
183 list_add(&lp
->all_list
, &ls
->all_locks
);
184 ls
->all_locks_count
++;
185 spin_unlock(&ls
->async_lock
);
191 void gdlm_delete_lp(struct gdlm_lock
*lp
)
193 struct gdlm_ls
*ls
= lp
->ls
;
195 spin_lock(&ls
->async_lock
);
196 if (!list_empty(&lp
->clist
))
197 list_del_init(&lp
->clist
);
198 if (!list_empty(&lp
->blist
))
199 list_del_init(&lp
->blist
);
200 if (!list_empty(&lp
->delay_list
))
201 list_del_init(&lp
->delay_list
);
202 gdlm_assert(!list_empty(&lp
->all_list
), "%x,%llx", lp
->lockname
.ln_type
,
203 (unsigned long long)lp
->lockname
.ln_number
);
204 list_del_init(&lp
->all_list
);
205 ls
->all_locks_count
--;
206 spin_unlock(&ls
->async_lock
);
211 int gdlm_get_lock(void *lockspace
, struct lm_lockname
*name
,
214 struct gdlm_lock
*lp
;
217 error
= gdlm_create_lp(lockspace
, name
, &lp
);
223 void gdlm_put_lock(void *lock
)
225 gdlm_delete_lp(lock
);
228 unsigned int gdlm_do_lock(struct gdlm_lock
*lp
)
230 struct gdlm_ls
*ls
= lp
->ls
;
234 * When recovery is in progress, delay lock requests for submission
235 * once recovery is done. Requests for recovery (NOEXP) and unlocks
239 if (test_bit(DFL_BLOCK_LOCKS
, &ls
->flags
) &&
240 !test_bit(LFL_NOBLOCK
, &lp
->flags
) && lp
->req
!= DLM_LOCK_NL
) {
241 gdlm_queue_delayed(lp
);
246 * Submit the actual lock request.
249 if (test_bit(LFL_NOBAST
, &lp
->flags
))
252 set_bit(LFL_ACTIVE
, &lp
->flags
);
254 log_debug("lk %x,%llx id %x %d,%d %x", lp
->lockname
.ln_type
,
255 (unsigned long long)lp
->lockname
.ln_number
, lp
->lksb
.sb_lkid
,
256 lp
->cur
, lp
->req
, lp
->lkf
);
258 error
= dlm_lock(ls
->dlm_lockspace
, lp
->req
, &lp
->lksb
, lp
->lkf
,
259 lp
->strname
.name
, lp
->strname
.namelen
, 0, gdlm_ast
,
260 lp
, bast
? gdlm_bast
: NULL
);
262 if ((error
== -EAGAIN
) && (lp
->lkf
& DLM_LKF_NOQUEUE
)) {
263 lp
->lksb
.sb_status
= -EAGAIN
;
269 log_debug("%s: gdlm_lock %x,%llx err=%d cur=%d req=%d lkf=%x "
270 "flags=%lx", ls
->fsname
, lp
->lockname
.ln_type
,
271 (unsigned long long)lp
->lockname
.ln_number
, error
,
272 lp
->cur
, lp
->req
, lp
->lkf
, lp
->flags
);
278 static unsigned int gdlm_do_unlock(struct gdlm_lock
*lp
)
280 struct gdlm_ls
*ls
= lp
->ls
;
281 unsigned int lkf
= 0;
284 set_bit(LFL_DLM_UNLOCK
, &lp
->flags
);
285 set_bit(LFL_ACTIVE
, &lp
->flags
);
288 lkf
= DLM_LKF_VALBLK
;
290 log_debug("un %x,%llx %x %d %x", lp
->lockname
.ln_type
,
291 (unsigned long long)lp
->lockname
.ln_number
,
292 lp
->lksb
.sb_lkid
, lp
->cur
, lkf
);
294 error
= dlm_unlock(ls
->dlm_lockspace
, lp
->lksb
.sb_lkid
, lkf
, NULL
, lp
);
297 log_debug("%s: gdlm_unlock %x,%llx err=%d cur=%d req=%d lkf=%x "
298 "flags=%lx", ls
->fsname
, lp
->lockname
.ln_type
,
299 (unsigned long long)lp
->lockname
.ln_number
, error
,
300 lp
->cur
, lp
->req
, lp
->lkf
, lp
->flags
);
306 unsigned int gdlm_lock(void *lock
, unsigned int cur_state
,
307 unsigned int req_state
, unsigned int flags
)
309 struct gdlm_lock
*lp
= lock
;
311 clear_bit(LFL_DLM_CANCEL
, &lp
->flags
);
312 if (flags
& LM_FLAG_NOEXP
)
313 set_bit(LFL_NOBLOCK
, &lp
->flags
);
315 check_cur_state(lp
, cur_state
);
316 lp
->req
= make_mode(req_state
);
317 lp
->lkf
= make_flags(lp
, flags
, lp
->cur
, lp
->req
);
319 return gdlm_do_lock(lp
);
322 unsigned int gdlm_unlock(void *lock
, unsigned int cur_state
)
324 struct gdlm_lock
*lp
= lock
;
326 clear_bit(LFL_DLM_CANCEL
, &lp
->flags
);
327 if (lp
->cur
== DLM_LOCK_IV
)
329 return gdlm_do_unlock(lp
);
332 void gdlm_cancel(void *lock
)
334 struct gdlm_lock
*lp
= lock
;
335 struct gdlm_ls
*ls
= lp
->ls
;
336 int error
, delay_list
= 0;
338 if (test_bit(LFL_DLM_CANCEL
, &lp
->flags
))
341 log_info("gdlm_cancel %x,%llx flags %lx", lp
->lockname
.ln_type
,
342 (unsigned long long)lp
->lockname
.ln_number
, lp
->flags
);
344 spin_lock(&ls
->async_lock
);
345 if (!list_empty(&lp
->delay_list
)) {
346 list_del_init(&lp
->delay_list
);
349 spin_unlock(&ls
->async_lock
);
352 set_bit(LFL_CANCEL
, &lp
->flags
);
353 set_bit(LFL_ACTIVE
, &lp
->flags
);
358 if (!test_bit(LFL_ACTIVE
, &lp
->flags
) ||
359 test_bit(LFL_DLM_UNLOCK
, &lp
->flags
)) {
360 log_info("gdlm_cancel skip %x,%llx flags %lx",
361 lp
->lockname
.ln_type
,
362 (unsigned long long)lp
->lockname
.ln_number
, lp
->flags
);
366 /* the lock is blocked in the dlm */
368 set_bit(LFL_DLM_CANCEL
, &lp
->flags
);
369 set_bit(LFL_ACTIVE
, &lp
->flags
);
371 error
= dlm_unlock(ls
->dlm_lockspace
, lp
->lksb
.sb_lkid
, DLM_LKF_CANCEL
,
374 log_info("gdlm_cancel rv %d %x,%llx flags %lx", error
,
375 lp
->lockname
.ln_type
,
376 (unsigned long long)lp
->lockname
.ln_number
, lp
->flags
);
379 clear_bit(LFL_DLM_CANCEL
, &lp
->flags
);
382 static int gdlm_add_lvb(struct gdlm_lock
*lp
)
386 lvb
= kzalloc(GDLM_LVB_SIZE
, GFP_KERNEL
);
390 lp
->lksb
.sb_lvbptr
= lvb
;
395 static void gdlm_del_lvb(struct gdlm_lock
*lp
)
399 lp
->lksb
.sb_lvbptr
= NULL
;
402 /* This can do a synchronous dlm request (requiring a lock_dlm thread to get
403 the completion) because gfs won't call hold_lvb() during a callback (from
404 the context of a lock_dlm thread). */
406 static int hold_null_lock(struct gdlm_lock
*lp
)
408 struct gdlm_lock
*lpn
= NULL
;
412 printk(KERN_INFO
"lock_dlm: lvb already held\n");
416 error
= gdlm_create_lp(lp
->ls
, &lp
->lockname
, &lpn
);
420 lpn
->lksb
.sb_lvbptr
= junk_lvb
;
423 lpn
->req
= DLM_LOCK_NL
;
424 lpn
->lkf
= DLM_LKF_VALBLK
| DLM_LKF_EXPEDITE
;
425 set_bit(LFL_NOBAST
, &lpn
->flags
);
426 set_bit(LFL_INLOCK
, &lpn
->flags
);
428 init_completion(&lpn
->ast_wait
);
430 wait_for_completion(&lpn
->ast_wait
);
431 error
= lpn
->lksb
.sb_status
;
433 printk(KERN_INFO
"lock_dlm: hold_null_lock dlm error %d\n",
443 /* This cannot do a synchronous dlm request (requiring a lock_dlm thread to get
444 the completion) because gfs may call unhold_lvb() during a callback (from
445 the context of a lock_dlm thread) which could cause a deadlock since the
446 other lock_dlm thread could be engaged in recovery. */
448 static void unhold_null_lock(struct gdlm_lock
*lp
)
450 struct gdlm_lock
*lpn
= lp
->hold_null
;
452 gdlm_assert(lpn
, "%x,%llx", lp
->lockname
.ln_type
,
453 (unsigned long long)lp
->lockname
.ln_number
);
454 lpn
->lksb
.sb_lvbptr
= NULL
;
456 set_bit(LFL_UNLOCK_DELETE
, &lpn
->flags
);
458 lp
->hold_null
= NULL
;
461 /* Acquire a NL lock because gfs requires the value block to remain
462 intact on the resource while the lvb is "held" even if it's holding no locks
465 int gdlm_hold_lvb(void *lock
, char **lvbp
)
467 struct gdlm_lock
*lp
= lock
;
470 error
= gdlm_add_lvb(lp
);
476 error
= hold_null_lock(lp
);
483 void gdlm_unhold_lvb(void *lock
, char *lvb
)
485 struct gdlm_lock
*lp
= lock
;
487 unhold_null_lock(lp
);
491 void gdlm_submit_delayed(struct gdlm_ls
*ls
)
493 struct gdlm_lock
*lp
, *safe
;
495 spin_lock(&ls
->async_lock
);
496 list_for_each_entry_safe(lp
, safe
, &ls
->delayed
, delay_list
) {
497 list_del_init(&lp
->delay_list
);
498 list_add_tail(&lp
->delay_list
, &ls
->submit
);
500 spin_unlock(&ls
->async_lock
);
501 wake_up(&ls
->thread_wait
);
504 int gdlm_release_all_locks(struct gdlm_ls
*ls
)
506 struct gdlm_lock
*lp
, *safe
;
509 spin_lock(&ls
->async_lock
);
510 list_for_each_entry_safe(lp
, safe
, &ls
->all_locks
, all_list
) {
511 list_del_init(&lp
->all_list
);
513 if (lp
->lvb
&& lp
->lvb
!= junk_lvb
)
518 spin_unlock(&ls
->async_lock
);