dlm: reject messages from non-members
[linux-2.6/mini2440.git] / fs / dlm / lock.c
blobc3b9fca17044f8eb8221691f3372fe042650ca5a
1 /******************************************************************************
2 *******************************************************************************
3 **
4 ** Copyright (C) 2005-2008 Red Hat, Inc. All rights reserved.
5 **
6 ** This copyrighted material is made available to anyone wishing to use,
7 ** modify, copy, or redistribute it subject to the terms and conditions
8 ** of the GNU General Public License v.2.
9 **
10 *******************************************************************************
11 ******************************************************************************/
13 /* Central locking logic has four stages:
15 dlm_lock()
16 dlm_unlock()
18 request_lock(ls, lkb)
19 convert_lock(ls, lkb)
20 unlock_lock(ls, lkb)
21 cancel_lock(ls, lkb)
23 _request_lock(r, lkb)
24 _convert_lock(r, lkb)
25 _unlock_lock(r, lkb)
26 _cancel_lock(r, lkb)
28 do_request(r, lkb)
29 do_convert(r, lkb)
30 do_unlock(r, lkb)
31 do_cancel(r, lkb)
33 Stage 1 (lock, unlock) is mainly about checking input args and
34 splitting into one of the four main operations:
36 dlm_lock = request_lock
37 dlm_lock+CONVERT = convert_lock
38 dlm_unlock = unlock_lock
39 dlm_unlock+CANCEL = cancel_lock
41 Stage 2, xxxx_lock(), just finds and locks the relevant rsb which is
42 provided to the next stage.
44 Stage 3, _xxxx_lock(), determines if the operation is local or remote.
45 When remote, it calls send_xxxx(), when local it calls do_xxxx().
47 Stage 4, do_xxxx(), is the guts of the operation. It manipulates the
48 given rsb and lkb and queues callbacks.
50 For remote operations, send_xxxx() results in the corresponding do_xxxx()
51 function being executed on the remote node. The connecting send/receive
52 calls on local (L) and remote (R) nodes:
54 L: send_xxxx() -> R: receive_xxxx()
55 R: do_xxxx()
56 L: receive_xxxx_reply() <- R: send_xxxx_reply()
58 #include <linux/types.h>
59 #include "dlm_internal.h"
60 #include <linux/dlm_device.h>
61 #include "memory.h"
62 #include "lowcomms.h"
63 #include "requestqueue.h"
64 #include "util.h"
65 #include "dir.h"
66 #include "member.h"
67 #include "lockspace.h"
68 #include "ast.h"
69 #include "lock.h"
70 #include "rcom.h"
71 #include "recover.h"
72 #include "lvb_table.h"
73 #include "user.h"
74 #include "config.h"
76 static int send_request(struct dlm_rsb *r, struct dlm_lkb *lkb);
77 static int send_convert(struct dlm_rsb *r, struct dlm_lkb *lkb);
78 static int send_unlock(struct dlm_rsb *r, struct dlm_lkb *lkb);
79 static int send_cancel(struct dlm_rsb *r, struct dlm_lkb *lkb);
80 static int send_grant(struct dlm_rsb *r, struct dlm_lkb *lkb);
81 static int send_bast(struct dlm_rsb *r, struct dlm_lkb *lkb, int mode);
82 static int send_lookup(struct dlm_rsb *r, struct dlm_lkb *lkb);
83 static int send_remove(struct dlm_rsb *r);
84 static int _request_lock(struct dlm_rsb *r, struct dlm_lkb *lkb);
85 static int _cancel_lock(struct dlm_rsb *r, struct dlm_lkb *lkb);
86 static void __receive_convert_reply(struct dlm_rsb *r, struct dlm_lkb *lkb,
87 struct dlm_message *ms);
88 static int receive_extralen(struct dlm_message *ms);
89 static void do_purge(struct dlm_ls *ls, int nodeid, int pid);
90 static void del_timeout(struct dlm_lkb *lkb);
93 * Lock compatibilty matrix - thanks Steve
94 * UN = Unlocked state. Not really a state, used as a flag
95 * PD = Padding. Used to make the matrix a nice power of two in size
96 * Other states are the same as the VMS DLM.
97 * Usage: matrix[grmode+1][rqmode+1] (although m[rq+1][gr+1] is the same)
100 static const int __dlm_compat_matrix[8][8] = {
101 /* UN NL CR CW PR PW EX PD */
102 {1, 1, 1, 1, 1, 1, 1, 0}, /* UN */
103 {1, 1, 1, 1, 1, 1, 1, 0}, /* NL */
104 {1, 1, 1, 1, 1, 1, 0, 0}, /* CR */
105 {1, 1, 1, 1, 0, 0, 0, 0}, /* CW */
106 {1, 1, 1, 0, 1, 0, 0, 0}, /* PR */
107 {1, 1, 1, 0, 0, 0, 0, 0}, /* PW */
108 {1, 1, 0, 0, 0, 0, 0, 0}, /* EX */
109 {0, 0, 0, 0, 0, 0, 0, 0} /* PD */
113 * This defines the direction of transfer of LVB data.
114 * Granted mode is the row; requested mode is the column.
115 * Usage: matrix[grmode+1][rqmode+1]
116 * 1 = LVB is returned to the caller
117 * 0 = LVB is written to the resource
118 * -1 = nothing happens to the LVB
121 const int dlm_lvb_operations[8][8] = {
122 /* UN NL CR CW PR PW EX PD*/
123 { -1, 1, 1, 1, 1, 1, 1, -1 }, /* UN */
124 { -1, 1, 1, 1, 1, 1, 1, 0 }, /* NL */
125 { -1, -1, 1, 1, 1, 1, 1, 0 }, /* CR */
126 { -1, -1, -1, 1, 1, 1, 1, 0 }, /* CW */
127 { -1, -1, -1, -1, 1, 1, 1, 0 }, /* PR */
128 { -1, 0, 0, 0, 0, 0, 1, 0 }, /* PW */
129 { -1, 0, 0, 0, 0, 0, 0, 0 }, /* EX */
130 { -1, 0, 0, 0, 0, 0, 0, 0 } /* PD */
133 #define modes_compat(gr, rq) \
134 __dlm_compat_matrix[(gr)->lkb_grmode + 1][(rq)->lkb_rqmode + 1]
136 int dlm_modes_compat(int mode1, int mode2)
138 return __dlm_compat_matrix[mode1 + 1][mode2 + 1];
142 * Compatibility matrix for conversions with QUECVT set.
143 * Granted mode is the row; requested mode is the column.
144 * Usage: matrix[grmode+1][rqmode+1]
147 static const int __quecvt_compat_matrix[8][8] = {
148 /* UN NL CR CW PR PW EX PD */
149 {0, 0, 0, 0, 0, 0, 0, 0}, /* UN */
150 {0, 0, 1, 1, 1, 1, 1, 0}, /* NL */
151 {0, 0, 0, 1, 1, 1, 1, 0}, /* CR */
152 {0, 0, 0, 0, 1, 1, 1, 0}, /* CW */
153 {0, 0, 0, 1, 0, 1, 1, 0}, /* PR */
154 {0, 0, 0, 0, 0, 0, 1, 0}, /* PW */
155 {0, 0, 0, 0, 0, 0, 0, 0}, /* EX */
156 {0, 0, 0, 0, 0, 0, 0, 0} /* PD */
159 void dlm_print_lkb(struct dlm_lkb *lkb)
161 printk(KERN_ERR "lkb: nodeid %d id %x remid %x exflags %x flags %x\n"
162 " status %d rqmode %d grmode %d wait_type %d ast_type %d\n",
163 lkb->lkb_nodeid, lkb->lkb_id, lkb->lkb_remid, lkb->lkb_exflags,
164 lkb->lkb_flags, lkb->lkb_status, lkb->lkb_rqmode,
165 lkb->lkb_grmode, lkb->lkb_wait_type, lkb->lkb_ast_type);
168 void dlm_print_rsb(struct dlm_rsb *r)
170 printk(KERN_ERR "rsb: nodeid %d flags %lx first %x rlc %d name %s\n",
171 r->res_nodeid, r->res_flags, r->res_first_lkid,
172 r->res_recover_locks_count, r->res_name);
175 void dlm_dump_rsb(struct dlm_rsb *r)
177 struct dlm_lkb *lkb;
179 dlm_print_rsb(r);
181 printk(KERN_ERR "rsb: root_list empty %d recover_list empty %d\n",
182 list_empty(&r->res_root_list), list_empty(&r->res_recover_list));
183 printk(KERN_ERR "rsb lookup list\n");
184 list_for_each_entry(lkb, &r->res_lookup, lkb_rsb_lookup)
185 dlm_print_lkb(lkb);
186 printk(KERN_ERR "rsb grant queue:\n");
187 list_for_each_entry(lkb, &r->res_grantqueue, lkb_statequeue)
188 dlm_print_lkb(lkb);
189 printk(KERN_ERR "rsb convert queue:\n");
190 list_for_each_entry(lkb, &r->res_convertqueue, lkb_statequeue)
191 dlm_print_lkb(lkb);
192 printk(KERN_ERR "rsb wait queue:\n");
193 list_for_each_entry(lkb, &r->res_waitqueue, lkb_statequeue)
194 dlm_print_lkb(lkb);
197 /* Threads cannot use the lockspace while it's being recovered */
199 static inline void dlm_lock_recovery(struct dlm_ls *ls)
201 down_read(&ls->ls_in_recovery);
204 void dlm_unlock_recovery(struct dlm_ls *ls)
206 up_read(&ls->ls_in_recovery);
209 int dlm_lock_recovery_try(struct dlm_ls *ls)
211 return down_read_trylock(&ls->ls_in_recovery);
214 static inline int can_be_queued(struct dlm_lkb *lkb)
216 return !(lkb->lkb_exflags & DLM_LKF_NOQUEUE);
219 static inline int force_blocking_asts(struct dlm_lkb *lkb)
221 return (lkb->lkb_exflags & DLM_LKF_NOQUEUEBAST);
224 static inline int is_demoted(struct dlm_lkb *lkb)
226 return (lkb->lkb_sbflags & DLM_SBF_DEMOTED);
229 static inline int is_altmode(struct dlm_lkb *lkb)
231 return (lkb->lkb_sbflags & DLM_SBF_ALTMODE);
234 static inline int is_granted(struct dlm_lkb *lkb)
236 return (lkb->lkb_status == DLM_LKSTS_GRANTED);
239 static inline int is_remote(struct dlm_rsb *r)
241 DLM_ASSERT(r->res_nodeid >= 0, dlm_print_rsb(r););
242 return !!r->res_nodeid;
245 static inline int is_process_copy(struct dlm_lkb *lkb)
247 return (lkb->lkb_nodeid && !(lkb->lkb_flags & DLM_IFL_MSTCPY));
250 static inline int is_master_copy(struct dlm_lkb *lkb)
252 if (lkb->lkb_flags & DLM_IFL_MSTCPY)
253 DLM_ASSERT(lkb->lkb_nodeid, dlm_print_lkb(lkb););
254 return (lkb->lkb_flags & DLM_IFL_MSTCPY) ? 1 : 0;
257 static inline int middle_conversion(struct dlm_lkb *lkb)
259 if ((lkb->lkb_grmode==DLM_LOCK_PR && lkb->lkb_rqmode==DLM_LOCK_CW) ||
260 (lkb->lkb_rqmode==DLM_LOCK_PR && lkb->lkb_grmode==DLM_LOCK_CW))
261 return 1;
262 return 0;
265 static inline int down_conversion(struct dlm_lkb *lkb)
267 return (!middle_conversion(lkb) && lkb->lkb_rqmode < lkb->lkb_grmode);
270 static inline int is_overlap_unlock(struct dlm_lkb *lkb)
272 return lkb->lkb_flags & DLM_IFL_OVERLAP_UNLOCK;
275 static inline int is_overlap_cancel(struct dlm_lkb *lkb)
277 return lkb->lkb_flags & DLM_IFL_OVERLAP_CANCEL;
280 static inline int is_overlap(struct dlm_lkb *lkb)
282 return (lkb->lkb_flags & (DLM_IFL_OVERLAP_UNLOCK |
283 DLM_IFL_OVERLAP_CANCEL));
286 static void queue_cast(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv)
288 if (is_master_copy(lkb))
289 return;
291 del_timeout(lkb);
293 DLM_ASSERT(lkb->lkb_lksb, dlm_print_lkb(lkb););
295 /* if the operation was a cancel, then return -DLM_ECANCEL, if a
296 timeout caused the cancel then return -ETIMEDOUT */
297 if (rv == -DLM_ECANCEL && (lkb->lkb_flags & DLM_IFL_TIMEOUT_CANCEL)) {
298 lkb->lkb_flags &= ~DLM_IFL_TIMEOUT_CANCEL;
299 rv = -ETIMEDOUT;
302 if (rv == -DLM_ECANCEL && (lkb->lkb_flags & DLM_IFL_DEADLOCK_CANCEL)) {
303 lkb->lkb_flags &= ~DLM_IFL_DEADLOCK_CANCEL;
304 rv = -EDEADLK;
307 lkb->lkb_lksb->sb_status = rv;
308 lkb->lkb_lksb->sb_flags = lkb->lkb_sbflags;
310 dlm_add_ast(lkb, AST_COMP);
313 static inline void queue_cast_overlap(struct dlm_rsb *r, struct dlm_lkb *lkb)
315 queue_cast(r, lkb,
316 is_overlap_unlock(lkb) ? -DLM_EUNLOCK : -DLM_ECANCEL);
319 static void queue_bast(struct dlm_rsb *r, struct dlm_lkb *lkb, int rqmode)
321 if (is_master_copy(lkb))
322 send_bast(r, lkb, rqmode);
323 else {
324 lkb->lkb_bastmode = rqmode;
325 dlm_add_ast(lkb, AST_BAST);
330 * Basic operations on rsb's and lkb's
333 static struct dlm_rsb *create_rsb(struct dlm_ls *ls, char *name, int len)
335 struct dlm_rsb *r;
337 r = dlm_allocate_rsb(ls, len);
338 if (!r)
339 return NULL;
341 r->res_ls = ls;
342 r->res_length = len;
343 memcpy(r->res_name, name, len);
344 mutex_init(&r->res_mutex);
346 INIT_LIST_HEAD(&r->res_lookup);
347 INIT_LIST_HEAD(&r->res_grantqueue);
348 INIT_LIST_HEAD(&r->res_convertqueue);
349 INIT_LIST_HEAD(&r->res_waitqueue);
350 INIT_LIST_HEAD(&r->res_root_list);
351 INIT_LIST_HEAD(&r->res_recover_list);
353 return r;
356 static int search_rsb_list(struct list_head *head, char *name, int len,
357 unsigned int flags, struct dlm_rsb **r_ret)
359 struct dlm_rsb *r;
360 int error = 0;
362 list_for_each_entry(r, head, res_hashchain) {
363 if (len == r->res_length && !memcmp(name, r->res_name, len))
364 goto found;
366 return -EBADR;
368 found:
369 if (r->res_nodeid && (flags & R_MASTER))
370 error = -ENOTBLK;
371 *r_ret = r;
372 return error;
375 static int _search_rsb(struct dlm_ls *ls, char *name, int len, int b,
376 unsigned int flags, struct dlm_rsb **r_ret)
378 struct dlm_rsb *r;
379 int error;
381 error = search_rsb_list(&ls->ls_rsbtbl[b].list, name, len, flags, &r);
382 if (!error) {
383 kref_get(&r->res_ref);
384 goto out;
386 error = search_rsb_list(&ls->ls_rsbtbl[b].toss, name, len, flags, &r);
387 if (error)
388 goto out;
390 list_move(&r->res_hashchain, &ls->ls_rsbtbl[b].list);
392 if (dlm_no_directory(ls))
393 goto out;
395 if (r->res_nodeid == -1) {
396 rsb_clear_flag(r, RSB_MASTER_UNCERTAIN);
397 r->res_first_lkid = 0;
398 } else if (r->res_nodeid > 0) {
399 rsb_set_flag(r, RSB_MASTER_UNCERTAIN);
400 r->res_first_lkid = 0;
401 } else {
402 DLM_ASSERT(r->res_nodeid == 0, dlm_print_rsb(r););
403 DLM_ASSERT(!rsb_flag(r, RSB_MASTER_UNCERTAIN),);
405 out:
406 *r_ret = r;
407 return error;
410 static int search_rsb(struct dlm_ls *ls, char *name, int len, int b,
411 unsigned int flags, struct dlm_rsb **r_ret)
413 int error;
414 write_lock(&ls->ls_rsbtbl[b].lock);
415 error = _search_rsb(ls, name, len, b, flags, r_ret);
416 write_unlock(&ls->ls_rsbtbl[b].lock);
417 return error;
421 * Find rsb in rsbtbl and potentially create/add one
423 * Delaying the release of rsb's has a similar benefit to applications keeping
424 * NL locks on an rsb, but without the guarantee that the cached master value
425 * will still be valid when the rsb is reused. Apps aren't always smart enough
426 * to keep NL locks on an rsb that they may lock again shortly; this can lead
427 * to excessive master lookups and removals if we don't delay the release.
429 * Searching for an rsb means looking through both the normal list and toss
430 * list. When found on the toss list the rsb is moved to the normal list with
431 * ref count of 1; when found on normal list the ref count is incremented.
434 static int find_rsb(struct dlm_ls *ls, char *name, int namelen,
435 unsigned int flags, struct dlm_rsb **r_ret)
437 struct dlm_rsb *r, *tmp;
438 uint32_t hash, bucket;
439 int error = 0;
441 if (dlm_no_directory(ls))
442 flags |= R_CREATE;
444 hash = jhash(name, namelen, 0);
445 bucket = hash & (ls->ls_rsbtbl_size - 1);
447 error = search_rsb(ls, name, namelen, bucket, flags, &r);
448 if (!error)
449 goto out;
451 if (error == -EBADR && !(flags & R_CREATE))
452 goto out;
454 /* the rsb was found but wasn't a master copy */
455 if (error == -ENOTBLK)
456 goto out;
458 error = -ENOMEM;
459 r = create_rsb(ls, name, namelen);
460 if (!r)
461 goto out;
463 r->res_hash = hash;
464 r->res_bucket = bucket;
465 r->res_nodeid = -1;
466 kref_init(&r->res_ref);
468 /* With no directory, the master can be set immediately */
469 if (dlm_no_directory(ls)) {
470 int nodeid = dlm_dir_nodeid(r);
471 if (nodeid == dlm_our_nodeid())
472 nodeid = 0;
473 r->res_nodeid = nodeid;
476 write_lock(&ls->ls_rsbtbl[bucket].lock);
477 error = _search_rsb(ls, name, namelen, bucket, 0, &tmp);
478 if (!error) {
479 write_unlock(&ls->ls_rsbtbl[bucket].lock);
480 dlm_free_rsb(r);
481 r = tmp;
482 goto out;
484 list_add(&r->res_hashchain, &ls->ls_rsbtbl[bucket].list);
485 write_unlock(&ls->ls_rsbtbl[bucket].lock);
486 error = 0;
487 out:
488 *r_ret = r;
489 return error;
492 int dlm_find_rsb(struct dlm_ls *ls, char *name, int namelen,
493 unsigned int flags, struct dlm_rsb **r_ret)
495 return find_rsb(ls, name, namelen, flags, r_ret);
498 /* This is only called to add a reference when the code already holds
499 a valid reference to the rsb, so there's no need for locking. */
501 static inline void hold_rsb(struct dlm_rsb *r)
503 kref_get(&r->res_ref);
506 void dlm_hold_rsb(struct dlm_rsb *r)
508 hold_rsb(r);
511 static void toss_rsb(struct kref *kref)
513 struct dlm_rsb *r = container_of(kref, struct dlm_rsb, res_ref);
514 struct dlm_ls *ls = r->res_ls;
516 DLM_ASSERT(list_empty(&r->res_root_list), dlm_print_rsb(r););
517 kref_init(&r->res_ref);
518 list_move(&r->res_hashchain, &ls->ls_rsbtbl[r->res_bucket].toss);
519 r->res_toss_time = jiffies;
520 if (r->res_lvbptr) {
521 dlm_free_lvb(r->res_lvbptr);
522 r->res_lvbptr = NULL;
526 /* When all references to the rsb are gone it's transfered to
527 the tossed list for later disposal. */
529 static void put_rsb(struct dlm_rsb *r)
531 struct dlm_ls *ls = r->res_ls;
532 uint32_t bucket = r->res_bucket;
534 write_lock(&ls->ls_rsbtbl[bucket].lock);
535 kref_put(&r->res_ref, toss_rsb);
536 write_unlock(&ls->ls_rsbtbl[bucket].lock);
539 void dlm_put_rsb(struct dlm_rsb *r)
541 put_rsb(r);
544 /* See comment for unhold_lkb */
546 static void unhold_rsb(struct dlm_rsb *r)
548 int rv;
549 rv = kref_put(&r->res_ref, toss_rsb);
550 DLM_ASSERT(!rv, dlm_dump_rsb(r););
553 static void kill_rsb(struct kref *kref)
555 struct dlm_rsb *r = container_of(kref, struct dlm_rsb, res_ref);
557 /* All work is done after the return from kref_put() so we
558 can release the write_lock before the remove and free. */
560 DLM_ASSERT(list_empty(&r->res_lookup), dlm_dump_rsb(r););
561 DLM_ASSERT(list_empty(&r->res_grantqueue), dlm_dump_rsb(r););
562 DLM_ASSERT(list_empty(&r->res_convertqueue), dlm_dump_rsb(r););
563 DLM_ASSERT(list_empty(&r->res_waitqueue), dlm_dump_rsb(r););
564 DLM_ASSERT(list_empty(&r->res_root_list), dlm_dump_rsb(r););
565 DLM_ASSERT(list_empty(&r->res_recover_list), dlm_dump_rsb(r););
568 /* Attaching/detaching lkb's from rsb's is for rsb reference counting.
569 The rsb must exist as long as any lkb's for it do. */
571 static void attach_lkb(struct dlm_rsb *r, struct dlm_lkb *lkb)
573 hold_rsb(r);
574 lkb->lkb_resource = r;
577 static void detach_lkb(struct dlm_lkb *lkb)
579 if (lkb->lkb_resource) {
580 put_rsb(lkb->lkb_resource);
581 lkb->lkb_resource = NULL;
585 static int create_lkb(struct dlm_ls *ls, struct dlm_lkb **lkb_ret)
587 struct dlm_lkb *lkb, *tmp;
588 uint32_t lkid = 0;
589 uint16_t bucket;
591 lkb = dlm_allocate_lkb(ls);
592 if (!lkb)
593 return -ENOMEM;
595 lkb->lkb_nodeid = -1;
596 lkb->lkb_grmode = DLM_LOCK_IV;
597 kref_init(&lkb->lkb_ref);
598 INIT_LIST_HEAD(&lkb->lkb_ownqueue);
599 INIT_LIST_HEAD(&lkb->lkb_rsb_lookup);
600 INIT_LIST_HEAD(&lkb->lkb_time_list);
602 get_random_bytes(&bucket, sizeof(bucket));
603 bucket &= (ls->ls_lkbtbl_size - 1);
605 write_lock(&ls->ls_lkbtbl[bucket].lock);
607 /* counter can roll over so we must verify lkid is not in use */
609 while (lkid == 0) {
610 lkid = (bucket << 16) | ls->ls_lkbtbl[bucket].counter++;
612 list_for_each_entry(tmp, &ls->ls_lkbtbl[bucket].list,
613 lkb_idtbl_list) {
614 if (tmp->lkb_id != lkid)
615 continue;
616 lkid = 0;
617 break;
621 lkb->lkb_id = lkid;
622 list_add(&lkb->lkb_idtbl_list, &ls->ls_lkbtbl[bucket].list);
623 write_unlock(&ls->ls_lkbtbl[bucket].lock);
625 *lkb_ret = lkb;
626 return 0;
629 static struct dlm_lkb *__find_lkb(struct dlm_ls *ls, uint32_t lkid)
631 struct dlm_lkb *lkb;
632 uint16_t bucket = (lkid >> 16);
634 list_for_each_entry(lkb, &ls->ls_lkbtbl[bucket].list, lkb_idtbl_list) {
635 if (lkb->lkb_id == lkid)
636 return lkb;
638 return NULL;
641 static int find_lkb(struct dlm_ls *ls, uint32_t lkid, struct dlm_lkb **lkb_ret)
643 struct dlm_lkb *lkb;
644 uint16_t bucket = (lkid >> 16);
646 if (bucket >= ls->ls_lkbtbl_size)
647 return -EBADSLT;
649 read_lock(&ls->ls_lkbtbl[bucket].lock);
650 lkb = __find_lkb(ls, lkid);
651 if (lkb)
652 kref_get(&lkb->lkb_ref);
653 read_unlock(&ls->ls_lkbtbl[bucket].lock);
655 *lkb_ret = lkb;
656 return lkb ? 0 : -ENOENT;
659 static void kill_lkb(struct kref *kref)
661 struct dlm_lkb *lkb = container_of(kref, struct dlm_lkb, lkb_ref);
663 /* All work is done after the return from kref_put() so we
664 can release the write_lock before the detach_lkb */
666 DLM_ASSERT(!lkb->lkb_status, dlm_print_lkb(lkb););
669 /* __put_lkb() is used when an lkb may not have an rsb attached to
670 it so we need to provide the lockspace explicitly */
672 static int __put_lkb(struct dlm_ls *ls, struct dlm_lkb *lkb)
674 uint16_t bucket = (lkb->lkb_id >> 16);
676 write_lock(&ls->ls_lkbtbl[bucket].lock);
677 if (kref_put(&lkb->lkb_ref, kill_lkb)) {
678 list_del(&lkb->lkb_idtbl_list);
679 write_unlock(&ls->ls_lkbtbl[bucket].lock);
681 detach_lkb(lkb);
683 /* for local/process lkbs, lvbptr points to caller's lksb */
684 if (lkb->lkb_lvbptr && is_master_copy(lkb))
685 dlm_free_lvb(lkb->lkb_lvbptr);
686 dlm_free_lkb(lkb);
687 return 1;
688 } else {
689 write_unlock(&ls->ls_lkbtbl[bucket].lock);
690 return 0;
694 int dlm_put_lkb(struct dlm_lkb *lkb)
696 struct dlm_ls *ls;
698 DLM_ASSERT(lkb->lkb_resource, dlm_print_lkb(lkb););
699 DLM_ASSERT(lkb->lkb_resource->res_ls, dlm_print_lkb(lkb););
701 ls = lkb->lkb_resource->res_ls;
702 return __put_lkb(ls, lkb);
705 /* This is only called to add a reference when the code already holds
706 a valid reference to the lkb, so there's no need for locking. */
708 static inline void hold_lkb(struct dlm_lkb *lkb)
710 kref_get(&lkb->lkb_ref);
713 /* This is called when we need to remove a reference and are certain
714 it's not the last ref. e.g. del_lkb is always called between a
715 find_lkb/put_lkb and is always the inverse of a previous add_lkb.
716 put_lkb would work fine, but would involve unnecessary locking */
718 static inline void unhold_lkb(struct dlm_lkb *lkb)
720 int rv;
721 rv = kref_put(&lkb->lkb_ref, kill_lkb);
722 DLM_ASSERT(!rv, dlm_print_lkb(lkb););
725 static void lkb_add_ordered(struct list_head *new, struct list_head *head,
726 int mode)
728 struct dlm_lkb *lkb = NULL;
730 list_for_each_entry(lkb, head, lkb_statequeue)
731 if (lkb->lkb_rqmode < mode)
732 break;
734 if (!lkb)
735 list_add_tail(new, head);
736 else
737 __list_add(new, lkb->lkb_statequeue.prev, &lkb->lkb_statequeue);
740 /* add/remove lkb to rsb's grant/convert/wait queue */
742 static void add_lkb(struct dlm_rsb *r, struct dlm_lkb *lkb, int status)
744 kref_get(&lkb->lkb_ref);
746 DLM_ASSERT(!lkb->lkb_status, dlm_print_lkb(lkb););
748 lkb->lkb_status = status;
750 switch (status) {
751 case DLM_LKSTS_WAITING:
752 if (lkb->lkb_exflags & DLM_LKF_HEADQUE)
753 list_add(&lkb->lkb_statequeue, &r->res_waitqueue);
754 else
755 list_add_tail(&lkb->lkb_statequeue, &r->res_waitqueue);
756 break;
757 case DLM_LKSTS_GRANTED:
758 /* convention says granted locks kept in order of grmode */
759 lkb_add_ordered(&lkb->lkb_statequeue, &r->res_grantqueue,
760 lkb->lkb_grmode);
761 break;
762 case DLM_LKSTS_CONVERT:
763 if (lkb->lkb_exflags & DLM_LKF_HEADQUE)
764 list_add(&lkb->lkb_statequeue, &r->res_convertqueue);
765 else
766 list_add_tail(&lkb->lkb_statequeue,
767 &r->res_convertqueue);
768 break;
769 default:
770 DLM_ASSERT(0, dlm_print_lkb(lkb); printk("sts=%d\n", status););
774 static void del_lkb(struct dlm_rsb *r, struct dlm_lkb *lkb)
776 lkb->lkb_status = 0;
777 list_del(&lkb->lkb_statequeue);
778 unhold_lkb(lkb);
781 static void move_lkb(struct dlm_rsb *r, struct dlm_lkb *lkb, int sts)
783 hold_lkb(lkb);
784 del_lkb(r, lkb);
785 add_lkb(r, lkb, sts);
786 unhold_lkb(lkb);
789 static int msg_reply_type(int mstype)
791 switch (mstype) {
792 case DLM_MSG_REQUEST:
793 return DLM_MSG_REQUEST_REPLY;
794 case DLM_MSG_CONVERT:
795 return DLM_MSG_CONVERT_REPLY;
796 case DLM_MSG_UNLOCK:
797 return DLM_MSG_UNLOCK_REPLY;
798 case DLM_MSG_CANCEL:
799 return DLM_MSG_CANCEL_REPLY;
800 case DLM_MSG_LOOKUP:
801 return DLM_MSG_LOOKUP_REPLY;
803 return -1;
806 /* add/remove lkb from global waiters list of lkb's waiting for
807 a reply from a remote node */
809 static int add_to_waiters(struct dlm_lkb *lkb, int mstype)
811 struct dlm_ls *ls = lkb->lkb_resource->res_ls;
812 int error = 0;
814 mutex_lock(&ls->ls_waiters_mutex);
816 if (is_overlap_unlock(lkb) ||
817 (is_overlap_cancel(lkb) && (mstype == DLM_MSG_CANCEL))) {
818 error = -EINVAL;
819 goto out;
822 if (lkb->lkb_wait_type || is_overlap_cancel(lkb)) {
823 switch (mstype) {
824 case DLM_MSG_UNLOCK:
825 lkb->lkb_flags |= DLM_IFL_OVERLAP_UNLOCK;
826 break;
827 case DLM_MSG_CANCEL:
828 lkb->lkb_flags |= DLM_IFL_OVERLAP_CANCEL;
829 break;
830 default:
831 error = -EBUSY;
832 goto out;
834 lkb->lkb_wait_count++;
835 hold_lkb(lkb);
837 log_debug(ls, "add overlap %x cur %d new %d count %d flags %x",
838 lkb->lkb_id, lkb->lkb_wait_type, mstype,
839 lkb->lkb_wait_count, lkb->lkb_flags);
840 goto out;
843 DLM_ASSERT(!lkb->lkb_wait_count,
844 dlm_print_lkb(lkb);
845 printk("wait_count %d\n", lkb->lkb_wait_count););
847 lkb->lkb_wait_count++;
848 lkb->lkb_wait_type = mstype;
849 hold_lkb(lkb);
850 list_add(&lkb->lkb_wait_reply, &ls->ls_waiters);
851 out:
852 if (error)
853 log_error(ls, "add_to_waiters %x error %d flags %x %d %d %s",
854 lkb->lkb_id, error, lkb->lkb_flags, mstype,
855 lkb->lkb_wait_type, lkb->lkb_resource->res_name);
856 mutex_unlock(&ls->ls_waiters_mutex);
857 return error;
860 /* We clear the RESEND flag because we might be taking an lkb off the waiters
861 list as part of process_requestqueue (e.g. a lookup that has an optimized
862 request reply on the requestqueue) between dlm_recover_waiters_pre() which
863 set RESEND and dlm_recover_waiters_post() */
865 static int _remove_from_waiters(struct dlm_lkb *lkb, int mstype)
867 struct dlm_ls *ls = lkb->lkb_resource->res_ls;
868 int overlap_done = 0;
870 if (is_overlap_unlock(lkb) && (mstype == DLM_MSG_UNLOCK_REPLY)) {
871 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_UNLOCK;
872 overlap_done = 1;
873 goto out_del;
876 if (is_overlap_cancel(lkb) && (mstype == DLM_MSG_CANCEL_REPLY)) {
877 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
878 overlap_done = 1;
879 goto out_del;
882 /* N.B. type of reply may not always correspond to type of original
883 msg due to lookup->request optimization, verify others? */
885 if (lkb->lkb_wait_type) {
886 lkb->lkb_wait_type = 0;
887 goto out_del;
890 log_error(ls, "remove_from_waiters lkid %x flags %x types %d %d",
891 lkb->lkb_id, lkb->lkb_flags, mstype, lkb->lkb_wait_type);
892 return -1;
894 out_del:
895 /* the force-unlock/cancel has completed and we haven't recvd a reply
896 to the op that was in progress prior to the unlock/cancel; we
897 give up on any reply to the earlier op. FIXME: not sure when/how
898 this would happen */
900 if (overlap_done && lkb->lkb_wait_type) {
901 log_error(ls, "remove_from_waiters %x reply %d give up on %d",
902 lkb->lkb_id, mstype, lkb->lkb_wait_type);
903 lkb->lkb_wait_count--;
904 lkb->lkb_wait_type = 0;
907 DLM_ASSERT(lkb->lkb_wait_count, dlm_print_lkb(lkb););
909 lkb->lkb_flags &= ~DLM_IFL_RESEND;
910 lkb->lkb_wait_count--;
911 if (!lkb->lkb_wait_count)
912 list_del_init(&lkb->lkb_wait_reply);
913 unhold_lkb(lkb);
914 return 0;
917 static int remove_from_waiters(struct dlm_lkb *lkb, int mstype)
919 struct dlm_ls *ls = lkb->lkb_resource->res_ls;
920 int error;
922 mutex_lock(&ls->ls_waiters_mutex);
923 error = _remove_from_waiters(lkb, mstype);
924 mutex_unlock(&ls->ls_waiters_mutex);
925 return error;
928 /* Handles situations where we might be processing a "fake" or "stub" reply in
929 which we can't try to take waiters_mutex again. */
931 static int remove_from_waiters_ms(struct dlm_lkb *lkb, struct dlm_message *ms)
933 struct dlm_ls *ls = lkb->lkb_resource->res_ls;
934 int error;
936 if (ms != &ls->ls_stub_ms)
937 mutex_lock(&ls->ls_waiters_mutex);
938 error = _remove_from_waiters(lkb, ms->m_type);
939 if (ms != &ls->ls_stub_ms)
940 mutex_unlock(&ls->ls_waiters_mutex);
941 return error;
944 static void dir_remove(struct dlm_rsb *r)
946 int to_nodeid;
948 if (dlm_no_directory(r->res_ls))
949 return;
951 to_nodeid = dlm_dir_nodeid(r);
952 if (to_nodeid != dlm_our_nodeid())
953 send_remove(r);
954 else
955 dlm_dir_remove_entry(r->res_ls, to_nodeid,
956 r->res_name, r->res_length);
959 /* FIXME: shouldn't this be able to exit as soon as one non-due rsb is
960 found since they are in order of newest to oldest? */
962 static int shrink_bucket(struct dlm_ls *ls, int b)
964 struct dlm_rsb *r;
965 int count = 0, found;
967 for (;;) {
968 found = 0;
969 write_lock(&ls->ls_rsbtbl[b].lock);
970 list_for_each_entry_reverse(r, &ls->ls_rsbtbl[b].toss,
971 res_hashchain) {
972 if (!time_after_eq(jiffies, r->res_toss_time +
973 dlm_config.ci_toss_secs * HZ))
974 continue;
975 found = 1;
976 break;
979 if (!found) {
980 write_unlock(&ls->ls_rsbtbl[b].lock);
981 break;
984 if (kref_put(&r->res_ref, kill_rsb)) {
985 list_del(&r->res_hashchain);
986 write_unlock(&ls->ls_rsbtbl[b].lock);
988 if (is_master(r))
989 dir_remove(r);
990 dlm_free_rsb(r);
991 count++;
992 } else {
993 write_unlock(&ls->ls_rsbtbl[b].lock);
994 log_error(ls, "tossed rsb in use %s", r->res_name);
998 return count;
1001 void dlm_scan_rsbs(struct dlm_ls *ls)
1003 int i;
1005 for (i = 0; i < ls->ls_rsbtbl_size; i++) {
1006 shrink_bucket(ls, i);
1007 if (dlm_locking_stopped(ls))
1008 break;
1009 cond_resched();
1013 static void add_timeout(struct dlm_lkb *lkb)
1015 struct dlm_ls *ls = lkb->lkb_resource->res_ls;
1017 if (is_master_copy(lkb)) {
1018 lkb->lkb_timestamp = jiffies;
1019 return;
1022 if (test_bit(LSFL_TIMEWARN, &ls->ls_flags) &&
1023 !(lkb->lkb_exflags & DLM_LKF_NODLCKWT)) {
1024 lkb->lkb_flags |= DLM_IFL_WATCH_TIMEWARN;
1025 goto add_it;
1027 if (lkb->lkb_exflags & DLM_LKF_TIMEOUT)
1028 goto add_it;
1029 return;
1031 add_it:
1032 DLM_ASSERT(list_empty(&lkb->lkb_time_list), dlm_print_lkb(lkb););
1033 mutex_lock(&ls->ls_timeout_mutex);
1034 hold_lkb(lkb);
1035 lkb->lkb_timestamp = jiffies;
1036 list_add_tail(&lkb->lkb_time_list, &ls->ls_timeout);
1037 mutex_unlock(&ls->ls_timeout_mutex);
1040 static void del_timeout(struct dlm_lkb *lkb)
1042 struct dlm_ls *ls = lkb->lkb_resource->res_ls;
1044 mutex_lock(&ls->ls_timeout_mutex);
1045 if (!list_empty(&lkb->lkb_time_list)) {
1046 list_del_init(&lkb->lkb_time_list);
1047 unhold_lkb(lkb);
1049 mutex_unlock(&ls->ls_timeout_mutex);
1052 /* FIXME: is it safe to look at lkb_exflags, lkb_flags, lkb_timestamp, and
1053 lkb_lksb_timeout without lock_rsb? Note: we can't lock timeout_mutex
1054 and then lock rsb because of lock ordering in add_timeout. We may need
1055 to specify some special timeout-related bits in the lkb that are just to
1056 be accessed under the timeout_mutex. */
1058 void dlm_scan_timeout(struct dlm_ls *ls)
1060 struct dlm_rsb *r;
1061 struct dlm_lkb *lkb;
1062 int do_cancel, do_warn;
1064 for (;;) {
1065 if (dlm_locking_stopped(ls))
1066 break;
1068 do_cancel = 0;
1069 do_warn = 0;
1070 mutex_lock(&ls->ls_timeout_mutex);
1071 list_for_each_entry(lkb, &ls->ls_timeout, lkb_time_list) {
1073 if ((lkb->lkb_exflags & DLM_LKF_TIMEOUT) &&
1074 time_after_eq(jiffies, lkb->lkb_timestamp +
1075 lkb->lkb_timeout_cs * HZ/100))
1076 do_cancel = 1;
1078 if ((lkb->lkb_flags & DLM_IFL_WATCH_TIMEWARN) &&
1079 time_after_eq(jiffies, lkb->lkb_timestamp +
1080 dlm_config.ci_timewarn_cs * HZ/100))
1081 do_warn = 1;
1083 if (!do_cancel && !do_warn)
1084 continue;
1085 hold_lkb(lkb);
1086 break;
1088 mutex_unlock(&ls->ls_timeout_mutex);
1090 if (!do_cancel && !do_warn)
1091 break;
1093 r = lkb->lkb_resource;
1094 hold_rsb(r);
1095 lock_rsb(r);
1097 if (do_warn) {
1098 /* clear flag so we only warn once */
1099 lkb->lkb_flags &= ~DLM_IFL_WATCH_TIMEWARN;
1100 if (!(lkb->lkb_exflags & DLM_LKF_TIMEOUT))
1101 del_timeout(lkb);
1102 dlm_timeout_warn(lkb);
1105 if (do_cancel) {
1106 log_debug(ls, "timeout cancel %x node %d %s",
1107 lkb->lkb_id, lkb->lkb_nodeid, r->res_name);
1108 lkb->lkb_flags &= ~DLM_IFL_WATCH_TIMEWARN;
1109 lkb->lkb_flags |= DLM_IFL_TIMEOUT_CANCEL;
1110 del_timeout(lkb);
1111 _cancel_lock(r, lkb);
1114 unlock_rsb(r);
1115 unhold_rsb(r);
1116 dlm_put_lkb(lkb);
1120 /* This is only called by dlm_recoverd, and we rely on dlm_ls_stop() stopping
1121 dlm_recoverd before checking/setting ls_recover_begin. */
1123 void dlm_adjust_timeouts(struct dlm_ls *ls)
1125 struct dlm_lkb *lkb;
1126 long adj = jiffies - ls->ls_recover_begin;
1128 ls->ls_recover_begin = 0;
1129 mutex_lock(&ls->ls_timeout_mutex);
1130 list_for_each_entry(lkb, &ls->ls_timeout, lkb_time_list)
1131 lkb->lkb_timestamp += adj;
1132 mutex_unlock(&ls->ls_timeout_mutex);
1135 /* lkb is master or local copy */
1137 static void set_lvb_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
1139 int b, len = r->res_ls->ls_lvblen;
1141 /* b=1 lvb returned to caller
1142 b=0 lvb written to rsb or invalidated
1143 b=-1 do nothing */
1145 b = dlm_lvb_operations[lkb->lkb_grmode + 1][lkb->lkb_rqmode + 1];
1147 if (b == 1) {
1148 if (!lkb->lkb_lvbptr)
1149 return;
1151 if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
1152 return;
1154 if (!r->res_lvbptr)
1155 return;
1157 memcpy(lkb->lkb_lvbptr, r->res_lvbptr, len);
1158 lkb->lkb_lvbseq = r->res_lvbseq;
1160 } else if (b == 0) {
1161 if (lkb->lkb_exflags & DLM_LKF_IVVALBLK) {
1162 rsb_set_flag(r, RSB_VALNOTVALID);
1163 return;
1166 if (!lkb->lkb_lvbptr)
1167 return;
1169 if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
1170 return;
1172 if (!r->res_lvbptr)
1173 r->res_lvbptr = dlm_allocate_lvb(r->res_ls);
1175 if (!r->res_lvbptr)
1176 return;
1178 memcpy(r->res_lvbptr, lkb->lkb_lvbptr, len);
1179 r->res_lvbseq++;
1180 lkb->lkb_lvbseq = r->res_lvbseq;
1181 rsb_clear_flag(r, RSB_VALNOTVALID);
1184 if (rsb_flag(r, RSB_VALNOTVALID))
1185 lkb->lkb_sbflags |= DLM_SBF_VALNOTVALID;
1188 static void set_lvb_unlock(struct dlm_rsb *r, struct dlm_lkb *lkb)
1190 if (lkb->lkb_grmode < DLM_LOCK_PW)
1191 return;
1193 if (lkb->lkb_exflags & DLM_LKF_IVVALBLK) {
1194 rsb_set_flag(r, RSB_VALNOTVALID);
1195 return;
1198 if (!lkb->lkb_lvbptr)
1199 return;
1201 if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
1202 return;
1204 if (!r->res_lvbptr)
1205 r->res_lvbptr = dlm_allocate_lvb(r->res_ls);
1207 if (!r->res_lvbptr)
1208 return;
1210 memcpy(r->res_lvbptr, lkb->lkb_lvbptr, r->res_ls->ls_lvblen);
1211 r->res_lvbseq++;
1212 rsb_clear_flag(r, RSB_VALNOTVALID);
1215 /* lkb is process copy (pc) */
1217 static void set_lvb_lock_pc(struct dlm_rsb *r, struct dlm_lkb *lkb,
1218 struct dlm_message *ms)
1220 int b;
1222 if (!lkb->lkb_lvbptr)
1223 return;
1225 if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
1226 return;
1228 b = dlm_lvb_operations[lkb->lkb_grmode + 1][lkb->lkb_rqmode + 1];
1229 if (b == 1) {
1230 int len = receive_extralen(ms);
1231 memcpy(lkb->lkb_lvbptr, ms->m_extra, len);
1232 lkb->lkb_lvbseq = ms->m_lvbseq;
1236 /* Manipulate lkb's on rsb's convert/granted/waiting queues
1237 remove_lock -- used for unlock, removes lkb from granted
1238 revert_lock -- used for cancel, moves lkb from convert to granted
1239 grant_lock -- used for request and convert, adds lkb to granted or
1240 moves lkb from convert or waiting to granted
1242 Each of these is used for master or local copy lkb's. There is
1243 also a _pc() variation used to make the corresponding change on
1244 a process copy (pc) lkb. */
1246 static void _remove_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
1248 del_lkb(r, lkb);
1249 lkb->lkb_grmode = DLM_LOCK_IV;
1250 /* this unhold undoes the original ref from create_lkb()
1251 so this leads to the lkb being freed */
1252 unhold_lkb(lkb);
1255 static void remove_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
1257 set_lvb_unlock(r, lkb);
1258 _remove_lock(r, lkb);
1261 static void remove_lock_pc(struct dlm_rsb *r, struct dlm_lkb *lkb)
1263 _remove_lock(r, lkb);
1266 /* returns: 0 did nothing
1267 1 moved lock to granted
1268 -1 removed lock */
1270 static int revert_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
1272 int rv = 0;
1274 lkb->lkb_rqmode = DLM_LOCK_IV;
1276 switch (lkb->lkb_status) {
1277 case DLM_LKSTS_GRANTED:
1278 break;
1279 case DLM_LKSTS_CONVERT:
1280 move_lkb(r, lkb, DLM_LKSTS_GRANTED);
1281 rv = 1;
1282 break;
1283 case DLM_LKSTS_WAITING:
1284 del_lkb(r, lkb);
1285 lkb->lkb_grmode = DLM_LOCK_IV;
1286 /* this unhold undoes the original ref from create_lkb()
1287 so this leads to the lkb being freed */
1288 unhold_lkb(lkb);
1289 rv = -1;
1290 break;
1291 default:
1292 log_print("invalid status for revert %d", lkb->lkb_status);
1294 return rv;
1297 static int revert_lock_pc(struct dlm_rsb *r, struct dlm_lkb *lkb)
1299 return revert_lock(r, lkb);
1302 static void _grant_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
1304 if (lkb->lkb_grmode != lkb->lkb_rqmode) {
1305 lkb->lkb_grmode = lkb->lkb_rqmode;
1306 if (lkb->lkb_status)
1307 move_lkb(r, lkb, DLM_LKSTS_GRANTED);
1308 else
1309 add_lkb(r, lkb, DLM_LKSTS_GRANTED);
1312 lkb->lkb_rqmode = DLM_LOCK_IV;
1315 static void grant_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
1317 set_lvb_lock(r, lkb);
1318 _grant_lock(r, lkb);
1319 lkb->lkb_highbast = 0;
1322 static void grant_lock_pc(struct dlm_rsb *r, struct dlm_lkb *lkb,
1323 struct dlm_message *ms)
1325 set_lvb_lock_pc(r, lkb, ms);
1326 _grant_lock(r, lkb);
1329 /* called by grant_pending_locks() which means an async grant message must
1330 be sent to the requesting node in addition to granting the lock if the
1331 lkb belongs to a remote node. */
1333 static void grant_lock_pending(struct dlm_rsb *r, struct dlm_lkb *lkb)
1335 grant_lock(r, lkb);
1336 if (is_master_copy(lkb))
1337 send_grant(r, lkb);
1338 else
1339 queue_cast(r, lkb, 0);
1342 /* The special CONVDEADLK, ALTPR and ALTCW flags allow the master to
1343 change the granted/requested modes. We're munging things accordingly in
1344 the process copy.
1345 CONVDEADLK: our grmode may have been forced down to NL to resolve a
1346 conversion deadlock
1347 ALTPR/ALTCW: our rqmode may have been changed to PR or CW to become
1348 compatible with other granted locks */
1350 static void munge_demoted(struct dlm_lkb *lkb, struct dlm_message *ms)
1352 if (ms->m_type != DLM_MSG_CONVERT_REPLY) {
1353 log_print("munge_demoted %x invalid reply type %d",
1354 lkb->lkb_id, ms->m_type);
1355 return;
1358 if (lkb->lkb_rqmode == DLM_LOCK_IV || lkb->lkb_grmode == DLM_LOCK_IV) {
1359 log_print("munge_demoted %x invalid modes gr %d rq %d",
1360 lkb->lkb_id, lkb->lkb_grmode, lkb->lkb_rqmode);
1361 return;
1364 lkb->lkb_grmode = DLM_LOCK_NL;
1367 static void munge_altmode(struct dlm_lkb *lkb, struct dlm_message *ms)
1369 if (ms->m_type != DLM_MSG_REQUEST_REPLY &&
1370 ms->m_type != DLM_MSG_GRANT) {
1371 log_print("munge_altmode %x invalid reply type %d",
1372 lkb->lkb_id, ms->m_type);
1373 return;
1376 if (lkb->lkb_exflags & DLM_LKF_ALTPR)
1377 lkb->lkb_rqmode = DLM_LOCK_PR;
1378 else if (lkb->lkb_exflags & DLM_LKF_ALTCW)
1379 lkb->lkb_rqmode = DLM_LOCK_CW;
1380 else {
1381 log_print("munge_altmode invalid exflags %x", lkb->lkb_exflags);
1382 dlm_print_lkb(lkb);
1386 static inline int first_in_list(struct dlm_lkb *lkb, struct list_head *head)
1388 struct dlm_lkb *first = list_entry(head->next, struct dlm_lkb,
1389 lkb_statequeue);
1390 if (lkb->lkb_id == first->lkb_id)
1391 return 1;
1393 return 0;
1396 /* Check if the given lkb conflicts with another lkb on the queue. */
1398 static int queue_conflict(struct list_head *head, struct dlm_lkb *lkb)
1400 struct dlm_lkb *this;
1402 list_for_each_entry(this, head, lkb_statequeue) {
1403 if (this == lkb)
1404 continue;
1405 if (!modes_compat(this, lkb))
1406 return 1;
1408 return 0;
1412 * "A conversion deadlock arises with a pair of lock requests in the converting
1413 * queue for one resource. The granted mode of each lock blocks the requested
1414 * mode of the other lock."
1416 * Part 2: if the granted mode of lkb is preventing an earlier lkb in the
1417 * convert queue from being granted, then deadlk/demote lkb.
1419 * Example:
1420 * Granted Queue: empty
1421 * Convert Queue: NL->EX (first lock)
1422 * PR->EX (second lock)
1424 * The first lock can't be granted because of the granted mode of the second
1425 * lock and the second lock can't be granted because it's not first in the
1426 * list. We either cancel lkb's conversion (PR->EX) and return EDEADLK, or we
1427 * demote the granted mode of lkb (from PR to NL) if it has the CONVDEADLK
1428 * flag set and return DEMOTED in the lksb flags.
1430 * Originally, this function detected conv-deadlk in a more limited scope:
1431 * - if !modes_compat(lkb1, lkb2) && !modes_compat(lkb2, lkb1), or
1432 * - if lkb1 was the first entry in the queue (not just earlier), and was
1433 * blocked by the granted mode of lkb2, and there was nothing on the
1434 * granted queue preventing lkb1 from being granted immediately, i.e.
1435 * lkb2 was the only thing preventing lkb1 from being granted.
1437 * That second condition meant we'd only say there was conv-deadlk if
1438 * resolving it (by demotion) would lead to the first lock on the convert
1439 * queue being granted right away. It allowed conversion deadlocks to exist
1440 * between locks on the convert queue while they couldn't be granted anyway.
1442 * Now, we detect and take action on conversion deadlocks immediately when
1443 * they're created, even if they may not be immediately consequential. If
1444 * lkb1 exists anywhere in the convert queue and lkb2 comes in with a granted
1445 * mode that would prevent lkb1's conversion from being granted, we do a
1446 * deadlk/demote on lkb2 right away and don't let it onto the convert queue.
1447 * I think this means that the lkb_is_ahead condition below should always
1448 * be zero, i.e. there will never be conv-deadlk between two locks that are
1449 * both already on the convert queue.
1452 static int conversion_deadlock_detect(struct dlm_rsb *r, struct dlm_lkb *lkb2)
1454 struct dlm_lkb *lkb1;
1455 int lkb_is_ahead = 0;
1457 list_for_each_entry(lkb1, &r->res_convertqueue, lkb_statequeue) {
1458 if (lkb1 == lkb2) {
1459 lkb_is_ahead = 1;
1460 continue;
1463 if (!lkb_is_ahead) {
1464 if (!modes_compat(lkb2, lkb1))
1465 return 1;
1466 } else {
1467 if (!modes_compat(lkb2, lkb1) &&
1468 !modes_compat(lkb1, lkb2))
1469 return 1;
1472 return 0;
1476 * Return 1 if the lock can be granted, 0 otherwise.
1477 * Also detect and resolve conversion deadlocks.
1479 * lkb is the lock to be granted
1481 * now is 1 if the function is being called in the context of the
1482 * immediate request, it is 0 if called later, after the lock has been
1483 * queued.
1485 * References are from chapter 6 of "VAXcluster Principles" by Roy Davis
1488 static int _can_be_granted(struct dlm_rsb *r, struct dlm_lkb *lkb, int now)
1490 int8_t conv = (lkb->lkb_grmode != DLM_LOCK_IV);
1493 * 6-10: Version 5.4 introduced an option to address the phenomenon of
1494 * a new request for a NL mode lock being blocked.
1496 * 6-11: If the optional EXPEDITE flag is used with the new NL mode
1497 * request, then it would be granted. In essence, the use of this flag
1498 * tells the Lock Manager to expedite theis request by not considering
1499 * what may be in the CONVERTING or WAITING queues... As of this
1500 * writing, the EXPEDITE flag can be used only with new requests for NL
1501 * mode locks. This flag is not valid for conversion requests.
1503 * A shortcut. Earlier checks return an error if EXPEDITE is used in a
1504 * conversion or used with a non-NL requested mode. We also know an
1505 * EXPEDITE request is always granted immediately, so now must always
1506 * be 1. The full condition to grant an expedite request: (now &&
1507 * !conv && lkb->rqmode == DLM_LOCK_NL && (flags & EXPEDITE)) can
1508 * therefore be shortened to just checking the flag.
1511 if (lkb->lkb_exflags & DLM_LKF_EXPEDITE)
1512 return 1;
1515 * A shortcut. Without this, !queue_conflict(grantqueue, lkb) would be
1516 * added to the remaining conditions.
1519 if (queue_conflict(&r->res_grantqueue, lkb))
1520 goto out;
1523 * 6-3: By default, a conversion request is immediately granted if the
1524 * requested mode is compatible with the modes of all other granted
1525 * locks
1528 if (queue_conflict(&r->res_convertqueue, lkb))
1529 goto out;
1532 * 6-5: But the default algorithm for deciding whether to grant or
1533 * queue conversion requests does not by itself guarantee that such
1534 * requests are serviced on a "first come first serve" basis. This, in
1535 * turn, can lead to a phenomenon known as "indefinate postponement".
1537 * 6-7: This issue is dealt with by using the optional QUECVT flag with
1538 * the system service employed to request a lock conversion. This flag
1539 * forces certain conversion requests to be queued, even if they are
1540 * compatible with the granted modes of other locks on the same
1541 * resource. Thus, the use of this flag results in conversion requests
1542 * being ordered on a "first come first servce" basis.
1544 * DCT: This condition is all about new conversions being able to occur
1545 * "in place" while the lock remains on the granted queue (assuming
1546 * nothing else conflicts.) IOW if QUECVT isn't set, a conversion
1547 * doesn't _have_ to go onto the convert queue where it's processed in
1548 * order. The "now" variable is necessary to distinguish converts
1549 * being received and processed for the first time now, because once a
1550 * convert is moved to the conversion queue the condition below applies
1551 * requiring fifo granting.
1554 if (now && conv && !(lkb->lkb_exflags & DLM_LKF_QUECVT))
1555 return 1;
1558 * The NOORDER flag is set to avoid the standard vms rules on grant
1559 * order.
1562 if (lkb->lkb_exflags & DLM_LKF_NOORDER)
1563 return 1;
1566 * 6-3: Once in that queue [CONVERTING], a conversion request cannot be
1567 * granted until all other conversion requests ahead of it are granted
1568 * and/or canceled.
1571 if (!now && conv && first_in_list(lkb, &r->res_convertqueue))
1572 return 1;
1575 * 6-4: By default, a new request is immediately granted only if all
1576 * three of the following conditions are satisfied when the request is
1577 * issued:
1578 * - The queue of ungranted conversion requests for the resource is
1579 * empty.
1580 * - The queue of ungranted new requests for the resource is empty.
1581 * - The mode of the new request is compatible with the most
1582 * restrictive mode of all granted locks on the resource.
1585 if (now && !conv && list_empty(&r->res_convertqueue) &&
1586 list_empty(&r->res_waitqueue))
1587 return 1;
1590 * 6-4: Once a lock request is in the queue of ungranted new requests,
1591 * it cannot be granted until the queue of ungranted conversion
1592 * requests is empty, all ungranted new requests ahead of it are
1593 * granted and/or canceled, and it is compatible with the granted mode
1594 * of the most restrictive lock granted on the resource.
1597 if (!now && !conv && list_empty(&r->res_convertqueue) &&
1598 first_in_list(lkb, &r->res_waitqueue))
1599 return 1;
1600 out:
1601 return 0;
1604 static int can_be_granted(struct dlm_rsb *r, struct dlm_lkb *lkb, int now,
1605 int *err)
1607 int rv;
1608 int8_t alt = 0, rqmode = lkb->lkb_rqmode;
1609 int8_t is_convert = (lkb->lkb_grmode != DLM_LOCK_IV);
1611 if (err)
1612 *err = 0;
1614 rv = _can_be_granted(r, lkb, now);
1615 if (rv)
1616 goto out;
1619 * The CONVDEADLK flag is non-standard and tells the dlm to resolve
1620 * conversion deadlocks by demoting grmode to NL, otherwise the dlm
1621 * cancels one of the locks.
1624 if (is_convert && can_be_queued(lkb) &&
1625 conversion_deadlock_detect(r, lkb)) {
1626 if (lkb->lkb_exflags & DLM_LKF_CONVDEADLK) {
1627 lkb->lkb_grmode = DLM_LOCK_NL;
1628 lkb->lkb_sbflags |= DLM_SBF_DEMOTED;
1629 } else if (!(lkb->lkb_exflags & DLM_LKF_NODLCKWT)) {
1630 if (err)
1631 *err = -EDEADLK;
1632 else {
1633 log_print("can_be_granted deadlock %x now %d",
1634 lkb->lkb_id, now);
1635 dlm_dump_rsb(r);
1638 goto out;
1642 * The ALTPR and ALTCW flags are non-standard and tell the dlm to try
1643 * to grant a request in a mode other than the normal rqmode. It's a
1644 * simple way to provide a big optimization to applications that can
1645 * use them.
1648 if (rqmode != DLM_LOCK_PR && (lkb->lkb_exflags & DLM_LKF_ALTPR))
1649 alt = DLM_LOCK_PR;
1650 else if (rqmode != DLM_LOCK_CW && (lkb->lkb_exflags & DLM_LKF_ALTCW))
1651 alt = DLM_LOCK_CW;
1653 if (alt) {
1654 lkb->lkb_rqmode = alt;
1655 rv = _can_be_granted(r, lkb, now);
1656 if (rv)
1657 lkb->lkb_sbflags |= DLM_SBF_ALTMODE;
1658 else
1659 lkb->lkb_rqmode = rqmode;
1661 out:
1662 return rv;
1665 /* FIXME: I don't think that can_be_granted() can/will demote or find deadlock
1666 for locks pending on the convert list. Once verified (watch for these
1667 log_prints), we should be able to just call _can_be_granted() and not
1668 bother with the demote/deadlk cases here (and there's no easy way to deal
1669 with a deadlk here, we'd have to generate something like grant_lock with
1670 the deadlk error.) */
1672 /* Returns the highest requested mode of all blocked conversions; sets
1673 cw if there's a blocked conversion to DLM_LOCK_CW. */
1675 static int grant_pending_convert(struct dlm_rsb *r, int high, int *cw)
1677 struct dlm_lkb *lkb, *s;
1678 int hi, demoted, quit, grant_restart, demote_restart;
1679 int deadlk;
1681 quit = 0;
1682 restart:
1683 grant_restart = 0;
1684 demote_restart = 0;
1685 hi = DLM_LOCK_IV;
1687 list_for_each_entry_safe(lkb, s, &r->res_convertqueue, lkb_statequeue) {
1688 demoted = is_demoted(lkb);
1689 deadlk = 0;
1691 if (can_be_granted(r, lkb, 0, &deadlk)) {
1692 grant_lock_pending(r, lkb);
1693 grant_restart = 1;
1694 continue;
1697 if (!demoted && is_demoted(lkb)) {
1698 log_print("WARN: pending demoted %x node %d %s",
1699 lkb->lkb_id, lkb->lkb_nodeid, r->res_name);
1700 demote_restart = 1;
1701 continue;
1704 if (deadlk) {
1705 log_print("WARN: pending deadlock %x node %d %s",
1706 lkb->lkb_id, lkb->lkb_nodeid, r->res_name);
1707 dlm_dump_rsb(r);
1708 continue;
1711 hi = max_t(int, lkb->lkb_rqmode, hi);
1713 if (cw && lkb->lkb_rqmode == DLM_LOCK_CW)
1714 *cw = 1;
1717 if (grant_restart)
1718 goto restart;
1719 if (demote_restart && !quit) {
1720 quit = 1;
1721 goto restart;
1724 return max_t(int, high, hi);
1727 static int grant_pending_wait(struct dlm_rsb *r, int high, int *cw)
1729 struct dlm_lkb *lkb, *s;
1731 list_for_each_entry_safe(lkb, s, &r->res_waitqueue, lkb_statequeue) {
1732 if (can_be_granted(r, lkb, 0, NULL))
1733 grant_lock_pending(r, lkb);
1734 else {
1735 high = max_t(int, lkb->lkb_rqmode, high);
1736 if (lkb->lkb_rqmode == DLM_LOCK_CW)
1737 *cw = 1;
1741 return high;
1744 /* cw of 1 means there's a lock with a rqmode of DLM_LOCK_CW that's blocked
1745 on either the convert or waiting queue.
1746 high is the largest rqmode of all locks blocked on the convert or
1747 waiting queue. */
1749 static int lock_requires_bast(struct dlm_lkb *gr, int high, int cw)
1751 if (gr->lkb_grmode == DLM_LOCK_PR && cw) {
1752 if (gr->lkb_highbast < DLM_LOCK_EX)
1753 return 1;
1754 return 0;
1757 if (gr->lkb_highbast < high &&
1758 !__dlm_compat_matrix[gr->lkb_grmode+1][high+1])
1759 return 1;
1760 return 0;
1763 static void grant_pending_locks(struct dlm_rsb *r)
1765 struct dlm_lkb *lkb, *s;
1766 int high = DLM_LOCK_IV;
1767 int cw = 0;
1769 DLM_ASSERT(is_master(r), dlm_dump_rsb(r););
1771 high = grant_pending_convert(r, high, &cw);
1772 high = grant_pending_wait(r, high, &cw);
1774 if (high == DLM_LOCK_IV)
1775 return;
1778 * If there are locks left on the wait/convert queue then send blocking
1779 * ASTs to granted locks based on the largest requested mode (high)
1780 * found above.
1783 list_for_each_entry_safe(lkb, s, &r->res_grantqueue, lkb_statequeue) {
1784 if (lkb->lkb_bastaddr && lock_requires_bast(lkb, high, cw)) {
1785 if (cw && high == DLM_LOCK_PR)
1786 queue_bast(r, lkb, DLM_LOCK_CW);
1787 else
1788 queue_bast(r, lkb, high);
1789 lkb->lkb_highbast = high;
1794 static int modes_require_bast(struct dlm_lkb *gr, struct dlm_lkb *rq)
1796 if ((gr->lkb_grmode == DLM_LOCK_PR && rq->lkb_rqmode == DLM_LOCK_CW) ||
1797 (gr->lkb_grmode == DLM_LOCK_CW && rq->lkb_rqmode == DLM_LOCK_PR)) {
1798 if (gr->lkb_highbast < DLM_LOCK_EX)
1799 return 1;
1800 return 0;
1803 if (gr->lkb_highbast < rq->lkb_rqmode && !modes_compat(gr, rq))
1804 return 1;
1805 return 0;
1808 static void send_bast_queue(struct dlm_rsb *r, struct list_head *head,
1809 struct dlm_lkb *lkb)
1811 struct dlm_lkb *gr;
1813 list_for_each_entry(gr, head, lkb_statequeue) {
1814 if (gr->lkb_bastaddr && modes_require_bast(gr, lkb)) {
1815 queue_bast(r, gr, lkb->lkb_rqmode);
1816 gr->lkb_highbast = lkb->lkb_rqmode;
1821 static void send_blocking_asts(struct dlm_rsb *r, struct dlm_lkb *lkb)
1823 send_bast_queue(r, &r->res_grantqueue, lkb);
1826 static void send_blocking_asts_all(struct dlm_rsb *r, struct dlm_lkb *lkb)
1828 send_bast_queue(r, &r->res_grantqueue, lkb);
1829 send_bast_queue(r, &r->res_convertqueue, lkb);
1832 /* set_master(r, lkb) -- set the master nodeid of a resource
1834 The purpose of this function is to set the nodeid field in the given
1835 lkb using the nodeid field in the given rsb. If the rsb's nodeid is
1836 known, it can just be copied to the lkb and the function will return
1837 0. If the rsb's nodeid is _not_ known, it needs to be looked up
1838 before it can be copied to the lkb.
1840 When the rsb nodeid is being looked up remotely, the initial lkb
1841 causing the lookup is kept on the ls_waiters list waiting for the
1842 lookup reply. Other lkb's waiting for the same rsb lookup are kept
1843 on the rsb's res_lookup list until the master is verified.
1845 Return values:
1846 0: nodeid is set in rsb/lkb and the caller should go ahead and use it
1847 1: the rsb master is not available and the lkb has been placed on
1848 a wait queue
1851 static int set_master(struct dlm_rsb *r, struct dlm_lkb *lkb)
1853 struct dlm_ls *ls = r->res_ls;
1854 int error, dir_nodeid, ret_nodeid, our_nodeid = dlm_our_nodeid();
1856 if (rsb_flag(r, RSB_MASTER_UNCERTAIN)) {
1857 rsb_clear_flag(r, RSB_MASTER_UNCERTAIN);
1858 r->res_first_lkid = lkb->lkb_id;
1859 lkb->lkb_nodeid = r->res_nodeid;
1860 return 0;
1863 if (r->res_first_lkid && r->res_first_lkid != lkb->lkb_id) {
1864 list_add_tail(&lkb->lkb_rsb_lookup, &r->res_lookup);
1865 return 1;
1868 if (r->res_nodeid == 0) {
1869 lkb->lkb_nodeid = 0;
1870 return 0;
1873 if (r->res_nodeid > 0) {
1874 lkb->lkb_nodeid = r->res_nodeid;
1875 return 0;
1878 DLM_ASSERT(r->res_nodeid == -1, dlm_dump_rsb(r););
1880 dir_nodeid = dlm_dir_nodeid(r);
1882 if (dir_nodeid != our_nodeid) {
1883 r->res_first_lkid = lkb->lkb_id;
1884 send_lookup(r, lkb);
1885 return 1;
1888 for (;;) {
1889 /* It's possible for dlm_scand to remove an old rsb for
1890 this same resource from the toss list, us to create
1891 a new one, look up the master locally, and find it
1892 already exists just before dlm_scand does the
1893 dir_remove() on the previous rsb. */
1895 error = dlm_dir_lookup(ls, our_nodeid, r->res_name,
1896 r->res_length, &ret_nodeid);
1897 if (!error)
1898 break;
1899 log_debug(ls, "dir_lookup error %d %s", error, r->res_name);
1900 schedule();
1903 if (ret_nodeid == our_nodeid) {
1904 r->res_first_lkid = 0;
1905 r->res_nodeid = 0;
1906 lkb->lkb_nodeid = 0;
1907 } else {
1908 r->res_first_lkid = lkb->lkb_id;
1909 r->res_nodeid = ret_nodeid;
1910 lkb->lkb_nodeid = ret_nodeid;
1912 return 0;
1915 static void process_lookup_list(struct dlm_rsb *r)
1917 struct dlm_lkb *lkb, *safe;
1919 list_for_each_entry_safe(lkb, safe, &r->res_lookup, lkb_rsb_lookup) {
1920 list_del_init(&lkb->lkb_rsb_lookup);
1921 _request_lock(r, lkb);
1922 schedule();
1926 /* confirm_master -- confirm (or deny) an rsb's master nodeid */
1928 static void confirm_master(struct dlm_rsb *r, int error)
1930 struct dlm_lkb *lkb;
1932 if (!r->res_first_lkid)
1933 return;
1935 switch (error) {
1936 case 0:
1937 case -EINPROGRESS:
1938 r->res_first_lkid = 0;
1939 process_lookup_list(r);
1940 break;
1942 case -EAGAIN:
1943 case -EBADR:
1944 case -ENOTBLK:
1945 /* the remote request failed and won't be retried (it was
1946 a NOQUEUE, or has been canceled/unlocked); make a waiting
1947 lkb the first_lkid */
1949 r->res_first_lkid = 0;
1951 if (!list_empty(&r->res_lookup)) {
1952 lkb = list_entry(r->res_lookup.next, struct dlm_lkb,
1953 lkb_rsb_lookup);
1954 list_del_init(&lkb->lkb_rsb_lookup);
1955 r->res_first_lkid = lkb->lkb_id;
1956 _request_lock(r, lkb);
1957 } else
1958 r->res_nodeid = -1;
1959 break;
1961 default:
1962 log_error(r->res_ls, "confirm_master unknown error %d", error);
1966 static int set_lock_args(int mode, struct dlm_lksb *lksb, uint32_t flags,
1967 int namelen, unsigned long timeout_cs, void *ast,
1968 void *astarg, void *bast, struct dlm_args *args)
1970 int rv = -EINVAL;
1972 /* check for invalid arg usage */
1974 if (mode < 0 || mode > DLM_LOCK_EX)
1975 goto out;
1977 if (!(flags & DLM_LKF_CONVERT) && (namelen > DLM_RESNAME_MAXLEN))
1978 goto out;
1980 if (flags & DLM_LKF_CANCEL)
1981 goto out;
1983 if (flags & DLM_LKF_QUECVT && !(flags & DLM_LKF_CONVERT))
1984 goto out;
1986 if (flags & DLM_LKF_CONVDEADLK && !(flags & DLM_LKF_CONVERT))
1987 goto out;
1989 if (flags & DLM_LKF_CONVDEADLK && flags & DLM_LKF_NOQUEUE)
1990 goto out;
1992 if (flags & DLM_LKF_EXPEDITE && flags & DLM_LKF_CONVERT)
1993 goto out;
1995 if (flags & DLM_LKF_EXPEDITE && flags & DLM_LKF_QUECVT)
1996 goto out;
1998 if (flags & DLM_LKF_EXPEDITE && flags & DLM_LKF_NOQUEUE)
1999 goto out;
2001 if (flags & DLM_LKF_EXPEDITE && mode != DLM_LOCK_NL)
2002 goto out;
2004 if (!ast || !lksb)
2005 goto out;
2007 if (flags & DLM_LKF_VALBLK && !lksb->sb_lvbptr)
2008 goto out;
2010 if (flags & DLM_LKF_CONVERT && !lksb->sb_lkid)
2011 goto out;
2013 /* these args will be copied to the lkb in validate_lock_args,
2014 it cannot be done now because when converting locks, fields in
2015 an active lkb cannot be modified before locking the rsb */
2017 args->flags = flags;
2018 args->astaddr = ast;
2019 args->astparam = (long) astarg;
2020 args->bastaddr = bast;
2021 args->timeout = timeout_cs;
2022 args->mode = mode;
2023 args->lksb = lksb;
2024 rv = 0;
2025 out:
2026 return rv;
2029 static int set_unlock_args(uint32_t flags, void *astarg, struct dlm_args *args)
2031 if (flags & ~(DLM_LKF_CANCEL | DLM_LKF_VALBLK | DLM_LKF_IVVALBLK |
2032 DLM_LKF_FORCEUNLOCK))
2033 return -EINVAL;
2035 if (flags & DLM_LKF_CANCEL && flags & DLM_LKF_FORCEUNLOCK)
2036 return -EINVAL;
2038 args->flags = flags;
2039 args->astparam = (long) astarg;
2040 return 0;
2043 static int validate_lock_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
2044 struct dlm_args *args)
2046 int rv = -EINVAL;
2048 if (args->flags & DLM_LKF_CONVERT) {
2049 if (lkb->lkb_flags & DLM_IFL_MSTCPY)
2050 goto out;
2052 if (args->flags & DLM_LKF_QUECVT &&
2053 !__quecvt_compat_matrix[lkb->lkb_grmode+1][args->mode+1])
2054 goto out;
2056 rv = -EBUSY;
2057 if (lkb->lkb_status != DLM_LKSTS_GRANTED)
2058 goto out;
2060 if (lkb->lkb_wait_type)
2061 goto out;
2063 if (is_overlap(lkb))
2064 goto out;
2067 lkb->lkb_exflags = args->flags;
2068 lkb->lkb_sbflags = 0;
2069 lkb->lkb_astaddr = args->astaddr;
2070 lkb->lkb_astparam = args->astparam;
2071 lkb->lkb_bastaddr = args->bastaddr;
2072 lkb->lkb_rqmode = args->mode;
2073 lkb->lkb_lksb = args->lksb;
2074 lkb->lkb_lvbptr = args->lksb->sb_lvbptr;
2075 lkb->lkb_ownpid = (int) current->pid;
2076 lkb->lkb_timeout_cs = args->timeout;
2077 rv = 0;
2078 out:
2079 return rv;
2082 /* when dlm_unlock() sees -EBUSY with CANCEL/FORCEUNLOCK it returns 0
2083 for success */
2085 /* note: it's valid for lkb_nodeid/res_nodeid to be -1 when we get here
2086 because there may be a lookup in progress and it's valid to do
2087 cancel/unlockf on it */
2089 static int validate_unlock_args(struct dlm_lkb *lkb, struct dlm_args *args)
2091 struct dlm_ls *ls = lkb->lkb_resource->res_ls;
2092 int rv = -EINVAL;
2094 if (lkb->lkb_flags & DLM_IFL_MSTCPY) {
2095 log_error(ls, "unlock on MSTCPY %x", lkb->lkb_id);
2096 dlm_print_lkb(lkb);
2097 goto out;
2100 /* an lkb may still exist even though the lock is EOL'ed due to a
2101 cancel, unlock or failed noqueue request; an app can't use these
2102 locks; return same error as if the lkid had not been found at all */
2104 if (lkb->lkb_flags & DLM_IFL_ENDOFLIFE) {
2105 log_debug(ls, "unlock on ENDOFLIFE %x", lkb->lkb_id);
2106 rv = -ENOENT;
2107 goto out;
2110 /* an lkb may be waiting for an rsb lookup to complete where the
2111 lookup was initiated by another lock */
2113 if (args->flags & (DLM_LKF_CANCEL | DLM_LKF_FORCEUNLOCK)) {
2114 if (!list_empty(&lkb->lkb_rsb_lookup)) {
2115 log_debug(ls, "unlock on rsb_lookup %x", lkb->lkb_id);
2116 list_del_init(&lkb->lkb_rsb_lookup);
2117 queue_cast(lkb->lkb_resource, lkb,
2118 args->flags & DLM_LKF_CANCEL ?
2119 -DLM_ECANCEL : -DLM_EUNLOCK);
2120 unhold_lkb(lkb); /* undoes create_lkb() */
2121 rv = -EBUSY;
2122 goto out;
2126 /* cancel not allowed with another cancel/unlock in progress */
2128 if (args->flags & DLM_LKF_CANCEL) {
2129 if (lkb->lkb_exflags & DLM_LKF_CANCEL)
2130 goto out;
2132 if (is_overlap(lkb))
2133 goto out;
2135 /* don't let scand try to do a cancel */
2136 del_timeout(lkb);
2138 if (lkb->lkb_flags & DLM_IFL_RESEND) {
2139 lkb->lkb_flags |= DLM_IFL_OVERLAP_CANCEL;
2140 rv = -EBUSY;
2141 goto out;
2144 switch (lkb->lkb_wait_type) {
2145 case DLM_MSG_LOOKUP:
2146 case DLM_MSG_REQUEST:
2147 lkb->lkb_flags |= DLM_IFL_OVERLAP_CANCEL;
2148 rv = -EBUSY;
2149 goto out;
2150 case DLM_MSG_UNLOCK:
2151 case DLM_MSG_CANCEL:
2152 goto out;
2154 /* add_to_waiters() will set OVERLAP_CANCEL */
2155 goto out_ok;
2158 /* do we need to allow a force-unlock if there's a normal unlock
2159 already in progress? in what conditions could the normal unlock
2160 fail such that we'd want to send a force-unlock to be sure? */
2162 if (args->flags & DLM_LKF_FORCEUNLOCK) {
2163 if (lkb->lkb_exflags & DLM_LKF_FORCEUNLOCK)
2164 goto out;
2166 if (is_overlap_unlock(lkb))
2167 goto out;
2169 /* don't let scand try to do a cancel */
2170 del_timeout(lkb);
2172 if (lkb->lkb_flags & DLM_IFL_RESEND) {
2173 lkb->lkb_flags |= DLM_IFL_OVERLAP_UNLOCK;
2174 rv = -EBUSY;
2175 goto out;
2178 switch (lkb->lkb_wait_type) {
2179 case DLM_MSG_LOOKUP:
2180 case DLM_MSG_REQUEST:
2181 lkb->lkb_flags |= DLM_IFL_OVERLAP_UNLOCK;
2182 rv = -EBUSY;
2183 goto out;
2184 case DLM_MSG_UNLOCK:
2185 goto out;
2187 /* add_to_waiters() will set OVERLAP_UNLOCK */
2188 goto out_ok;
2191 /* normal unlock not allowed if there's any op in progress */
2192 rv = -EBUSY;
2193 if (lkb->lkb_wait_type || lkb->lkb_wait_count)
2194 goto out;
2196 out_ok:
2197 /* an overlapping op shouldn't blow away exflags from other op */
2198 lkb->lkb_exflags |= args->flags;
2199 lkb->lkb_sbflags = 0;
2200 lkb->lkb_astparam = args->astparam;
2201 rv = 0;
2202 out:
2203 if (rv)
2204 log_debug(ls, "validate_unlock_args %d %x %x %x %x %d %s", rv,
2205 lkb->lkb_id, lkb->lkb_flags, lkb->lkb_exflags,
2206 args->flags, lkb->lkb_wait_type,
2207 lkb->lkb_resource->res_name);
2208 return rv;
2212 * Four stage 4 varieties:
2213 * do_request(), do_convert(), do_unlock(), do_cancel()
2214 * These are called on the master node for the given lock and
2215 * from the central locking logic.
2218 static int do_request(struct dlm_rsb *r, struct dlm_lkb *lkb)
2220 int error = 0;
2222 if (can_be_granted(r, lkb, 1, NULL)) {
2223 grant_lock(r, lkb);
2224 queue_cast(r, lkb, 0);
2225 goto out;
2228 if (can_be_queued(lkb)) {
2229 error = -EINPROGRESS;
2230 add_lkb(r, lkb, DLM_LKSTS_WAITING);
2231 send_blocking_asts(r, lkb);
2232 add_timeout(lkb);
2233 goto out;
2236 error = -EAGAIN;
2237 if (force_blocking_asts(lkb))
2238 send_blocking_asts_all(r, lkb);
2239 queue_cast(r, lkb, -EAGAIN);
2241 out:
2242 return error;
2245 static int do_convert(struct dlm_rsb *r, struct dlm_lkb *lkb)
2247 int error = 0;
2248 int deadlk = 0;
2250 /* changing an existing lock may allow others to be granted */
2252 if (can_be_granted(r, lkb, 1, &deadlk)) {
2253 grant_lock(r, lkb);
2254 queue_cast(r, lkb, 0);
2255 grant_pending_locks(r);
2256 goto out;
2259 /* can_be_granted() detected that this lock would block in a conversion
2260 deadlock, so we leave it on the granted queue and return EDEADLK in
2261 the ast for the convert. */
2263 if (deadlk) {
2264 /* it's left on the granted queue */
2265 log_debug(r->res_ls, "deadlock %x node %d sts%d g%d r%d %s",
2266 lkb->lkb_id, lkb->lkb_nodeid, lkb->lkb_status,
2267 lkb->lkb_grmode, lkb->lkb_rqmode, r->res_name);
2268 revert_lock(r, lkb);
2269 queue_cast(r, lkb, -EDEADLK);
2270 error = -EDEADLK;
2271 goto out;
2274 /* is_demoted() means the can_be_granted() above set the grmode
2275 to NL, and left us on the granted queue. This auto-demotion
2276 (due to CONVDEADLK) might mean other locks, and/or this lock, are
2277 now grantable. We have to try to grant other converting locks
2278 before we try again to grant this one. */
2280 if (is_demoted(lkb)) {
2281 grant_pending_convert(r, DLM_LOCK_IV, NULL);
2282 if (_can_be_granted(r, lkb, 1)) {
2283 grant_lock(r, lkb);
2284 queue_cast(r, lkb, 0);
2285 grant_pending_locks(r);
2286 goto out;
2288 /* else fall through and move to convert queue */
2291 if (can_be_queued(lkb)) {
2292 error = -EINPROGRESS;
2293 del_lkb(r, lkb);
2294 add_lkb(r, lkb, DLM_LKSTS_CONVERT);
2295 send_blocking_asts(r, lkb);
2296 add_timeout(lkb);
2297 goto out;
2300 error = -EAGAIN;
2301 if (force_blocking_asts(lkb))
2302 send_blocking_asts_all(r, lkb);
2303 queue_cast(r, lkb, -EAGAIN);
2305 out:
2306 return error;
2309 static int do_unlock(struct dlm_rsb *r, struct dlm_lkb *lkb)
2311 remove_lock(r, lkb);
2312 queue_cast(r, lkb, -DLM_EUNLOCK);
2313 grant_pending_locks(r);
2314 return -DLM_EUNLOCK;
2317 /* returns: 0 did nothing, -DLM_ECANCEL canceled lock */
2319 static int do_cancel(struct dlm_rsb *r, struct dlm_lkb *lkb)
2321 int error;
2323 error = revert_lock(r, lkb);
2324 if (error) {
2325 queue_cast(r, lkb, -DLM_ECANCEL);
2326 grant_pending_locks(r);
2327 return -DLM_ECANCEL;
2329 return 0;
2333 * Four stage 3 varieties:
2334 * _request_lock(), _convert_lock(), _unlock_lock(), _cancel_lock()
2337 /* add a new lkb to a possibly new rsb, called by requesting process */
2339 static int _request_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
2341 int error;
2343 /* set_master: sets lkb nodeid from r */
2345 error = set_master(r, lkb);
2346 if (error < 0)
2347 goto out;
2348 if (error) {
2349 error = 0;
2350 goto out;
2353 if (is_remote(r))
2354 /* receive_request() calls do_request() on remote node */
2355 error = send_request(r, lkb);
2356 else
2357 error = do_request(r, lkb);
2358 out:
2359 return error;
2362 /* change some property of an existing lkb, e.g. mode */
2364 static int _convert_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
2366 int error;
2368 if (is_remote(r))
2369 /* receive_convert() calls do_convert() on remote node */
2370 error = send_convert(r, lkb);
2371 else
2372 error = do_convert(r, lkb);
2374 return error;
2377 /* remove an existing lkb from the granted queue */
2379 static int _unlock_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
2381 int error;
2383 if (is_remote(r))
2384 /* receive_unlock() calls do_unlock() on remote node */
2385 error = send_unlock(r, lkb);
2386 else
2387 error = do_unlock(r, lkb);
2389 return error;
2392 /* remove an existing lkb from the convert or wait queue */
2394 static int _cancel_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
2396 int error;
2398 if (is_remote(r))
2399 /* receive_cancel() calls do_cancel() on remote node */
2400 error = send_cancel(r, lkb);
2401 else
2402 error = do_cancel(r, lkb);
2404 return error;
2408 * Four stage 2 varieties:
2409 * request_lock(), convert_lock(), unlock_lock(), cancel_lock()
2412 static int request_lock(struct dlm_ls *ls, struct dlm_lkb *lkb, char *name,
2413 int len, struct dlm_args *args)
2415 struct dlm_rsb *r;
2416 int error;
2418 error = validate_lock_args(ls, lkb, args);
2419 if (error)
2420 goto out;
2422 error = find_rsb(ls, name, len, R_CREATE, &r);
2423 if (error)
2424 goto out;
2426 lock_rsb(r);
2428 attach_lkb(r, lkb);
2429 lkb->lkb_lksb->sb_lkid = lkb->lkb_id;
2431 error = _request_lock(r, lkb);
2433 unlock_rsb(r);
2434 put_rsb(r);
2436 out:
2437 return error;
2440 static int convert_lock(struct dlm_ls *ls, struct dlm_lkb *lkb,
2441 struct dlm_args *args)
2443 struct dlm_rsb *r;
2444 int error;
2446 r = lkb->lkb_resource;
2448 hold_rsb(r);
2449 lock_rsb(r);
2451 error = validate_lock_args(ls, lkb, args);
2452 if (error)
2453 goto out;
2455 error = _convert_lock(r, lkb);
2456 out:
2457 unlock_rsb(r);
2458 put_rsb(r);
2459 return error;
2462 static int unlock_lock(struct dlm_ls *ls, struct dlm_lkb *lkb,
2463 struct dlm_args *args)
2465 struct dlm_rsb *r;
2466 int error;
2468 r = lkb->lkb_resource;
2470 hold_rsb(r);
2471 lock_rsb(r);
2473 error = validate_unlock_args(lkb, args);
2474 if (error)
2475 goto out;
2477 error = _unlock_lock(r, lkb);
2478 out:
2479 unlock_rsb(r);
2480 put_rsb(r);
2481 return error;
2484 static int cancel_lock(struct dlm_ls *ls, struct dlm_lkb *lkb,
2485 struct dlm_args *args)
2487 struct dlm_rsb *r;
2488 int error;
2490 r = lkb->lkb_resource;
2492 hold_rsb(r);
2493 lock_rsb(r);
2495 error = validate_unlock_args(lkb, args);
2496 if (error)
2497 goto out;
2499 error = _cancel_lock(r, lkb);
2500 out:
2501 unlock_rsb(r);
2502 put_rsb(r);
2503 return error;
2507 * Two stage 1 varieties: dlm_lock() and dlm_unlock()
2510 int dlm_lock(dlm_lockspace_t *lockspace,
2511 int mode,
2512 struct dlm_lksb *lksb,
2513 uint32_t flags,
2514 void *name,
2515 unsigned int namelen,
2516 uint32_t parent_lkid,
2517 void (*ast) (void *astarg),
2518 void *astarg,
2519 void (*bast) (void *astarg, int mode))
2521 struct dlm_ls *ls;
2522 struct dlm_lkb *lkb;
2523 struct dlm_args args;
2524 int error, convert = flags & DLM_LKF_CONVERT;
2526 ls = dlm_find_lockspace_local(lockspace);
2527 if (!ls)
2528 return -EINVAL;
2530 dlm_lock_recovery(ls);
2532 if (convert)
2533 error = find_lkb(ls, lksb->sb_lkid, &lkb);
2534 else
2535 error = create_lkb(ls, &lkb);
2537 if (error)
2538 goto out;
2540 error = set_lock_args(mode, lksb, flags, namelen, 0, ast,
2541 astarg, bast, &args);
2542 if (error)
2543 goto out_put;
2545 if (convert)
2546 error = convert_lock(ls, lkb, &args);
2547 else
2548 error = request_lock(ls, lkb, name, namelen, &args);
2550 if (error == -EINPROGRESS)
2551 error = 0;
2552 out_put:
2553 if (convert || error)
2554 __put_lkb(ls, lkb);
2555 if (error == -EAGAIN || error == -EDEADLK)
2556 error = 0;
2557 out:
2558 dlm_unlock_recovery(ls);
2559 dlm_put_lockspace(ls);
2560 return error;
2563 int dlm_unlock(dlm_lockspace_t *lockspace,
2564 uint32_t lkid,
2565 uint32_t flags,
2566 struct dlm_lksb *lksb,
2567 void *astarg)
2569 struct dlm_ls *ls;
2570 struct dlm_lkb *lkb;
2571 struct dlm_args args;
2572 int error;
2574 ls = dlm_find_lockspace_local(lockspace);
2575 if (!ls)
2576 return -EINVAL;
2578 dlm_lock_recovery(ls);
2580 error = find_lkb(ls, lkid, &lkb);
2581 if (error)
2582 goto out;
2584 error = set_unlock_args(flags, astarg, &args);
2585 if (error)
2586 goto out_put;
2588 if (flags & DLM_LKF_CANCEL)
2589 error = cancel_lock(ls, lkb, &args);
2590 else
2591 error = unlock_lock(ls, lkb, &args);
2593 if (error == -DLM_EUNLOCK || error == -DLM_ECANCEL)
2594 error = 0;
2595 if (error == -EBUSY && (flags & (DLM_LKF_CANCEL | DLM_LKF_FORCEUNLOCK)))
2596 error = 0;
2597 out_put:
2598 dlm_put_lkb(lkb);
2599 out:
2600 dlm_unlock_recovery(ls);
2601 dlm_put_lockspace(ls);
2602 return error;
2606 * send/receive routines for remote operations and replies
2608 * send_args
2609 * send_common
2610 * send_request receive_request
2611 * send_convert receive_convert
2612 * send_unlock receive_unlock
2613 * send_cancel receive_cancel
2614 * send_grant receive_grant
2615 * send_bast receive_bast
2616 * send_lookup receive_lookup
2617 * send_remove receive_remove
2619 * send_common_reply
2620 * receive_request_reply send_request_reply
2621 * receive_convert_reply send_convert_reply
2622 * receive_unlock_reply send_unlock_reply
2623 * receive_cancel_reply send_cancel_reply
2624 * receive_lookup_reply send_lookup_reply
2627 static int _create_message(struct dlm_ls *ls, int mb_len,
2628 int to_nodeid, int mstype,
2629 struct dlm_message **ms_ret,
2630 struct dlm_mhandle **mh_ret)
2632 struct dlm_message *ms;
2633 struct dlm_mhandle *mh;
2634 char *mb;
2636 /* get_buffer gives us a message handle (mh) that we need to
2637 pass into lowcomms_commit and a message buffer (mb) that we
2638 write our data into */
2640 mh = dlm_lowcomms_get_buffer(to_nodeid, mb_len, ls->ls_allocation, &mb);
2641 if (!mh)
2642 return -ENOBUFS;
2644 memset(mb, 0, mb_len);
2646 ms = (struct dlm_message *) mb;
2648 ms->m_header.h_version = (DLM_HEADER_MAJOR | DLM_HEADER_MINOR);
2649 ms->m_header.h_lockspace = ls->ls_global_id;
2650 ms->m_header.h_nodeid = dlm_our_nodeid();
2651 ms->m_header.h_length = mb_len;
2652 ms->m_header.h_cmd = DLM_MSG;
2654 ms->m_type = mstype;
2656 *mh_ret = mh;
2657 *ms_ret = ms;
2658 return 0;
2661 static int create_message(struct dlm_rsb *r, struct dlm_lkb *lkb,
2662 int to_nodeid, int mstype,
2663 struct dlm_message **ms_ret,
2664 struct dlm_mhandle **mh_ret)
2666 int mb_len = sizeof(struct dlm_message);
2668 switch (mstype) {
2669 case DLM_MSG_REQUEST:
2670 case DLM_MSG_LOOKUP:
2671 case DLM_MSG_REMOVE:
2672 mb_len += r->res_length;
2673 break;
2674 case DLM_MSG_CONVERT:
2675 case DLM_MSG_UNLOCK:
2676 case DLM_MSG_REQUEST_REPLY:
2677 case DLM_MSG_CONVERT_REPLY:
2678 case DLM_MSG_GRANT:
2679 if (lkb && lkb->lkb_lvbptr)
2680 mb_len += r->res_ls->ls_lvblen;
2681 break;
2684 return _create_message(r->res_ls, mb_len, to_nodeid, mstype,
2685 ms_ret, mh_ret);
2688 /* further lowcomms enhancements or alternate implementations may make
2689 the return value from this function useful at some point */
2691 static int send_message(struct dlm_mhandle *mh, struct dlm_message *ms)
2693 dlm_message_out(ms);
2694 dlm_lowcomms_commit_buffer(mh);
2695 return 0;
2698 static void send_args(struct dlm_rsb *r, struct dlm_lkb *lkb,
2699 struct dlm_message *ms)
2701 ms->m_nodeid = lkb->lkb_nodeid;
2702 ms->m_pid = lkb->lkb_ownpid;
2703 ms->m_lkid = lkb->lkb_id;
2704 ms->m_remid = lkb->lkb_remid;
2705 ms->m_exflags = lkb->lkb_exflags;
2706 ms->m_sbflags = lkb->lkb_sbflags;
2707 ms->m_flags = lkb->lkb_flags;
2708 ms->m_lvbseq = lkb->lkb_lvbseq;
2709 ms->m_status = lkb->lkb_status;
2710 ms->m_grmode = lkb->lkb_grmode;
2711 ms->m_rqmode = lkb->lkb_rqmode;
2712 ms->m_hash = r->res_hash;
2714 /* m_result and m_bastmode are set from function args,
2715 not from lkb fields */
2717 if (lkb->lkb_bastaddr)
2718 ms->m_asts |= AST_BAST;
2719 if (lkb->lkb_astaddr)
2720 ms->m_asts |= AST_COMP;
2722 /* compare with switch in create_message; send_remove() doesn't
2723 use send_args() */
2725 switch (ms->m_type) {
2726 case DLM_MSG_REQUEST:
2727 case DLM_MSG_LOOKUP:
2728 memcpy(ms->m_extra, r->res_name, r->res_length);
2729 break;
2730 case DLM_MSG_CONVERT:
2731 case DLM_MSG_UNLOCK:
2732 case DLM_MSG_REQUEST_REPLY:
2733 case DLM_MSG_CONVERT_REPLY:
2734 case DLM_MSG_GRANT:
2735 if (!lkb->lkb_lvbptr)
2736 break;
2737 memcpy(ms->m_extra, lkb->lkb_lvbptr, r->res_ls->ls_lvblen);
2738 break;
2742 static int send_common(struct dlm_rsb *r, struct dlm_lkb *lkb, int mstype)
2744 struct dlm_message *ms;
2745 struct dlm_mhandle *mh;
2746 int to_nodeid, error;
2748 error = add_to_waiters(lkb, mstype);
2749 if (error)
2750 return error;
2752 to_nodeid = r->res_nodeid;
2754 error = create_message(r, lkb, to_nodeid, mstype, &ms, &mh);
2755 if (error)
2756 goto fail;
2758 send_args(r, lkb, ms);
2760 error = send_message(mh, ms);
2761 if (error)
2762 goto fail;
2763 return 0;
2765 fail:
2766 remove_from_waiters(lkb, msg_reply_type(mstype));
2767 return error;
2770 static int send_request(struct dlm_rsb *r, struct dlm_lkb *lkb)
2772 return send_common(r, lkb, DLM_MSG_REQUEST);
2775 static int send_convert(struct dlm_rsb *r, struct dlm_lkb *lkb)
2777 int error;
2779 error = send_common(r, lkb, DLM_MSG_CONVERT);
2781 /* down conversions go without a reply from the master */
2782 if (!error && down_conversion(lkb)) {
2783 remove_from_waiters(lkb, DLM_MSG_CONVERT_REPLY);
2784 r->res_ls->ls_stub_ms.m_type = DLM_MSG_CONVERT_REPLY;
2785 r->res_ls->ls_stub_ms.m_result = 0;
2786 r->res_ls->ls_stub_ms.m_flags = lkb->lkb_flags;
2787 __receive_convert_reply(r, lkb, &r->res_ls->ls_stub_ms);
2790 return error;
2793 /* FIXME: if this lkb is the only lock we hold on the rsb, then set
2794 MASTER_UNCERTAIN to force the next request on the rsb to confirm
2795 that the master is still correct. */
2797 static int send_unlock(struct dlm_rsb *r, struct dlm_lkb *lkb)
2799 return send_common(r, lkb, DLM_MSG_UNLOCK);
2802 static int send_cancel(struct dlm_rsb *r, struct dlm_lkb *lkb)
2804 return send_common(r, lkb, DLM_MSG_CANCEL);
2807 static int send_grant(struct dlm_rsb *r, struct dlm_lkb *lkb)
2809 struct dlm_message *ms;
2810 struct dlm_mhandle *mh;
2811 int to_nodeid, error;
2813 to_nodeid = lkb->lkb_nodeid;
2815 error = create_message(r, lkb, to_nodeid, DLM_MSG_GRANT, &ms, &mh);
2816 if (error)
2817 goto out;
2819 send_args(r, lkb, ms);
2821 ms->m_result = 0;
2823 error = send_message(mh, ms);
2824 out:
2825 return error;
2828 static int send_bast(struct dlm_rsb *r, struct dlm_lkb *lkb, int mode)
2830 struct dlm_message *ms;
2831 struct dlm_mhandle *mh;
2832 int to_nodeid, error;
2834 to_nodeid = lkb->lkb_nodeid;
2836 error = create_message(r, NULL, to_nodeid, DLM_MSG_BAST, &ms, &mh);
2837 if (error)
2838 goto out;
2840 send_args(r, lkb, ms);
2842 ms->m_bastmode = mode;
2844 error = send_message(mh, ms);
2845 out:
2846 return error;
2849 static int send_lookup(struct dlm_rsb *r, struct dlm_lkb *lkb)
2851 struct dlm_message *ms;
2852 struct dlm_mhandle *mh;
2853 int to_nodeid, error;
2855 error = add_to_waiters(lkb, DLM_MSG_LOOKUP);
2856 if (error)
2857 return error;
2859 to_nodeid = dlm_dir_nodeid(r);
2861 error = create_message(r, NULL, to_nodeid, DLM_MSG_LOOKUP, &ms, &mh);
2862 if (error)
2863 goto fail;
2865 send_args(r, lkb, ms);
2867 error = send_message(mh, ms);
2868 if (error)
2869 goto fail;
2870 return 0;
2872 fail:
2873 remove_from_waiters(lkb, DLM_MSG_LOOKUP_REPLY);
2874 return error;
2877 static int send_remove(struct dlm_rsb *r)
2879 struct dlm_message *ms;
2880 struct dlm_mhandle *mh;
2881 int to_nodeid, error;
2883 to_nodeid = dlm_dir_nodeid(r);
2885 error = create_message(r, NULL, to_nodeid, DLM_MSG_REMOVE, &ms, &mh);
2886 if (error)
2887 goto out;
2889 memcpy(ms->m_extra, r->res_name, r->res_length);
2890 ms->m_hash = r->res_hash;
2892 error = send_message(mh, ms);
2893 out:
2894 return error;
2897 static int send_common_reply(struct dlm_rsb *r, struct dlm_lkb *lkb,
2898 int mstype, int rv)
2900 struct dlm_message *ms;
2901 struct dlm_mhandle *mh;
2902 int to_nodeid, error;
2904 to_nodeid = lkb->lkb_nodeid;
2906 error = create_message(r, lkb, to_nodeid, mstype, &ms, &mh);
2907 if (error)
2908 goto out;
2910 send_args(r, lkb, ms);
2912 ms->m_result = rv;
2914 error = send_message(mh, ms);
2915 out:
2916 return error;
2919 static int send_request_reply(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv)
2921 return send_common_reply(r, lkb, DLM_MSG_REQUEST_REPLY, rv);
2924 static int send_convert_reply(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv)
2926 return send_common_reply(r, lkb, DLM_MSG_CONVERT_REPLY, rv);
2929 static int send_unlock_reply(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv)
2931 return send_common_reply(r, lkb, DLM_MSG_UNLOCK_REPLY, rv);
2934 static int send_cancel_reply(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv)
2936 return send_common_reply(r, lkb, DLM_MSG_CANCEL_REPLY, rv);
2939 static int send_lookup_reply(struct dlm_ls *ls, struct dlm_message *ms_in,
2940 int ret_nodeid, int rv)
2942 struct dlm_rsb *r = &ls->ls_stub_rsb;
2943 struct dlm_message *ms;
2944 struct dlm_mhandle *mh;
2945 int error, nodeid = ms_in->m_header.h_nodeid;
2947 error = create_message(r, NULL, nodeid, DLM_MSG_LOOKUP_REPLY, &ms, &mh);
2948 if (error)
2949 goto out;
2951 ms->m_lkid = ms_in->m_lkid;
2952 ms->m_result = rv;
2953 ms->m_nodeid = ret_nodeid;
2955 error = send_message(mh, ms);
2956 out:
2957 return error;
2960 /* which args we save from a received message depends heavily on the type
2961 of message, unlike the send side where we can safely send everything about
2962 the lkb for any type of message */
2964 static void receive_flags(struct dlm_lkb *lkb, struct dlm_message *ms)
2966 lkb->lkb_exflags = ms->m_exflags;
2967 lkb->lkb_sbflags = ms->m_sbflags;
2968 lkb->lkb_flags = (lkb->lkb_flags & 0xFFFF0000) |
2969 (ms->m_flags & 0x0000FFFF);
2972 static void receive_flags_reply(struct dlm_lkb *lkb, struct dlm_message *ms)
2974 lkb->lkb_sbflags = ms->m_sbflags;
2975 lkb->lkb_flags = (lkb->lkb_flags & 0xFFFF0000) |
2976 (ms->m_flags & 0x0000FFFF);
2979 static int receive_extralen(struct dlm_message *ms)
2981 return (ms->m_header.h_length - sizeof(struct dlm_message));
2984 static int receive_lvb(struct dlm_ls *ls, struct dlm_lkb *lkb,
2985 struct dlm_message *ms)
2987 int len;
2989 if (lkb->lkb_exflags & DLM_LKF_VALBLK) {
2990 if (!lkb->lkb_lvbptr)
2991 lkb->lkb_lvbptr = dlm_allocate_lvb(ls);
2992 if (!lkb->lkb_lvbptr)
2993 return -ENOMEM;
2994 len = receive_extralen(ms);
2995 memcpy(lkb->lkb_lvbptr, ms->m_extra, len);
2997 return 0;
3000 static int receive_request_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
3001 struct dlm_message *ms)
3003 lkb->lkb_nodeid = ms->m_header.h_nodeid;
3004 lkb->lkb_ownpid = ms->m_pid;
3005 lkb->lkb_remid = ms->m_lkid;
3006 lkb->lkb_grmode = DLM_LOCK_IV;
3007 lkb->lkb_rqmode = ms->m_rqmode;
3008 lkb->lkb_bastaddr = (void *) (long) (ms->m_asts & AST_BAST);
3009 lkb->lkb_astaddr = (void *) (long) (ms->m_asts & AST_COMP);
3011 DLM_ASSERT(is_master_copy(lkb), dlm_print_lkb(lkb););
3013 if (lkb->lkb_exflags & DLM_LKF_VALBLK) {
3014 /* lkb was just created so there won't be an lvb yet */
3015 lkb->lkb_lvbptr = dlm_allocate_lvb(ls);
3016 if (!lkb->lkb_lvbptr)
3017 return -ENOMEM;
3020 return 0;
3023 static int receive_convert_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
3024 struct dlm_message *ms)
3026 if (lkb->lkb_nodeid != ms->m_header.h_nodeid) {
3027 log_error(ls, "convert_args nodeid %d %d lkid %x %x",
3028 lkb->lkb_nodeid, ms->m_header.h_nodeid,
3029 lkb->lkb_id, lkb->lkb_remid);
3030 return -EINVAL;
3033 if (!is_master_copy(lkb))
3034 return -EINVAL;
3036 if (lkb->lkb_status != DLM_LKSTS_GRANTED)
3037 return -EBUSY;
3039 if (receive_lvb(ls, lkb, ms))
3040 return -ENOMEM;
3042 lkb->lkb_rqmode = ms->m_rqmode;
3043 lkb->lkb_lvbseq = ms->m_lvbseq;
3045 return 0;
3048 static int receive_unlock_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
3049 struct dlm_message *ms)
3051 if (!is_master_copy(lkb))
3052 return -EINVAL;
3053 if (receive_lvb(ls, lkb, ms))
3054 return -ENOMEM;
3055 return 0;
3058 /* We fill in the stub-lkb fields with the info that send_xxxx_reply()
3059 uses to send a reply and that the remote end uses to process the reply. */
3061 static void setup_stub_lkb(struct dlm_ls *ls, struct dlm_message *ms)
3063 struct dlm_lkb *lkb = &ls->ls_stub_lkb;
3064 lkb->lkb_nodeid = ms->m_header.h_nodeid;
3065 lkb->lkb_remid = ms->m_lkid;
3068 static void receive_request(struct dlm_ls *ls, struct dlm_message *ms)
3070 struct dlm_lkb *lkb;
3071 struct dlm_rsb *r;
3072 int error, namelen;
3074 error = create_lkb(ls, &lkb);
3075 if (error)
3076 goto fail;
3078 receive_flags(lkb, ms);
3079 lkb->lkb_flags |= DLM_IFL_MSTCPY;
3080 error = receive_request_args(ls, lkb, ms);
3081 if (error) {
3082 __put_lkb(ls, lkb);
3083 goto fail;
3086 namelen = receive_extralen(ms);
3088 error = find_rsb(ls, ms->m_extra, namelen, R_MASTER, &r);
3089 if (error) {
3090 __put_lkb(ls, lkb);
3091 goto fail;
3094 lock_rsb(r);
3096 attach_lkb(r, lkb);
3097 error = do_request(r, lkb);
3098 send_request_reply(r, lkb, error);
3100 unlock_rsb(r);
3101 put_rsb(r);
3103 if (error == -EINPROGRESS)
3104 error = 0;
3105 if (error)
3106 dlm_put_lkb(lkb);
3107 return;
3109 fail:
3110 setup_stub_lkb(ls, ms);
3111 send_request_reply(&ls->ls_stub_rsb, &ls->ls_stub_lkb, error);
3114 static void receive_convert(struct dlm_ls *ls, struct dlm_message *ms)
3116 struct dlm_lkb *lkb;
3117 struct dlm_rsb *r;
3118 int error, reply = 1;
3120 error = find_lkb(ls, ms->m_remid, &lkb);
3121 if (error)
3122 goto fail;
3124 r = lkb->lkb_resource;
3126 hold_rsb(r);
3127 lock_rsb(r);
3129 receive_flags(lkb, ms);
3130 error = receive_convert_args(ls, lkb, ms);
3131 if (error)
3132 goto out;
3133 reply = !down_conversion(lkb);
3135 error = do_convert(r, lkb);
3136 out:
3137 if (reply)
3138 send_convert_reply(r, lkb, error);
3140 unlock_rsb(r);
3141 put_rsb(r);
3142 dlm_put_lkb(lkb);
3143 return;
3145 fail:
3146 setup_stub_lkb(ls, ms);
3147 send_convert_reply(&ls->ls_stub_rsb, &ls->ls_stub_lkb, error);
3150 static void receive_unlock(struct dlm_ls *ls, struct dlm_message *ms)
3152 struct dlm_lkb *lkb;
3153 struct dlm_rsb *r;
3154 int error;
3156 error = find_lkb(ls, ms->m_remid, &lkb);
3157 if (error)
3158 goto fail;
3160 r = lkb->lkb_resource;
3162 hold_rsb(r);
3163 lock_rsb(r);
3165 receive_flags(lkb, ms);
3166 error = receive_unlock_args(ls, lkb, ms);
3167 if (error)
3168 goto out;
3170 error = do_unlock(r, lkb);
3171 out:
3172 send_unlock_reply(r, lkb, error);
3174 unlock_rsb(r);
3175 put_rsb(r);
3176 dlm_put_lkb(lkb);
3177 return;
3179 fail:
3180 setup_stub_lkb(ls, ms);
3181 send_unlock_reply(&ls->ls_stub_rsb, &ls->ls_stub_lkb, error);
3184 static void receive_cancel(struct dlm_ls *ls, struct dlm_message *ms)
3186 struct dlm_lkb *lkb;
3187 struct dlm_rsb *r;
3188 int error;
3190 error = find_lkb(ls, ms->m_remid, &lkb);
3191 if (error)
3192 goto fail;
3194 receive_flags(lkb, ms);
3196 r = lkb->lkb_resource;
3198 hold_rsb(r);
3199 lock_rsb(r);
3201 error = do_cancel(r, lkb);
3202 send_cancel_reply(r, lkb, error);
3204 unlock_rsb(r);
3205 put_rsb(r);
3206 dlm_put_lkb(lkb);
3207 return;
3209 fail:
3210 setup_stub_lkb(ls, ms);
3211 send_cancel_reply(&ls->ls_stub_rsb, &ls->ls_stub_lkb, error);
3214 static void receive_grant(struct dlm_ls *ls, struct dlm_message *ms)
3216 struct dlm_lkb *lkb;
3217 struct dlm_rsb *r;
3218 int error;
3220 error = find_lkb(ls, ms->m_remid, &lkb);
3221 if (error) {
3222 log_error(ls, "receive_grant no lkb");
3223 return;
3225 DLM_ASSERT(is_process_copy(lkb), dlm_print_lkb(lkb););
3227 r = lkb->lkb_resource;
3229 hold_rsb(r);
3230 lock_rsb(r);
3232 receive_flags_reply(lkb, ms);
3233 if (is_altmode(lkb))
3234 munge_altmode(lkb, ms);
3235 grant_lock_pc(r, lkb, ms);
3236 queue_cast(r, lkb, 0);
3238 unlock_rsb(r);
3239 put_rsb(r);
3240 dlm_put_lkb(lkb);
3243 static void receive_bast(struct dlm_ls *ls, struct dlm_message *ms)
3245 struct dlm_lkb *lkb;
3246 struct dlm_rsb *r;
3247 int error;
3249 error = find_lkb(ls, ms->m_remid, &lkb);
3250 if (error) {
3251 log_error(ls, "receive_bast no lkb");
3252 return;
3254 DLM_ASSERT(is_process_copy(lkb), dlm_print_lkb(lkb););
3256 r = lkb->lkb_resource;
3258 hold_rsb(r);
3259 lock_rsb(r);
3261 queue_bast(r, lkb, ms->m_bastmode);
3263 unlock_rsb(r);
3264 put_rsb(r);
3265 dlm_put_lkb(lkb);
3268 static void receive_lookup(struct dlm_ls *ls, struct dlm_message *ms)
3270 int len, error, ret_nodeid, dir_nodeid, from_nodeid, our_nodeid;
3272 from_nodeid = ms->m_header.h_nodeid;
3273 our_nodeid = dlm_our_nodeid();
3275 len = receive_extralen(ms);
3277 dir_nodeid = dlm_hash2nodeid(ls, ms->m_hash);
3278 if (dir_nodeid != our_nodeid) {
3279 log_error(ls, "lookup dir_nodeid %d from %d",
3280 dir_nodeid, from_nodeid);
3281 error = -EINVAL;
3282 ret_nodeid = -1;
3283 goto out;
3286 error = dlm_dir_lookup(ls, from_nodeid, ms->m_extra, len, &ret_nodeid);
3288 /* Optimization: we're master so treat lookup as a request */
3289 if (!error && ret_nodeid == our_nodeid) {
3290 receive_request(ls, ms);
3291 return;
3293 out:
3294 send_lookup_reply(ls, ms, ret_nodeid, error);
3297 static void receive_remove(struct dlm_ls *ls, struct dlm_message *ms)
3299 int len, dir_nodeid, from_nodeid;
3301 from_nodeid = ms->m_header.h_nodeid;
3303 len = receive_extralen(ms);
3305 dir_nodeid = dlm_hash2nodeid(ls, ms->m_hash);
3306 if (dir_nodeid != dlm_our_nodeid()) {
3307 log_error(ls, "remove dir entry dir_nodeid %d from %d",
3308 dir_nodeid, from_nodeid);
3309 return;
3312 dlm_dir_remove_entry(ls, from_nodeid, ms->m_extra, len);
3315 static void receive_purge(struct dlm_ls *ls, struct dlm_message *ms)
3317 do_purge(ls, ms->m_nodeid, ms->m_pid);
3320 static void receive_request_reply(struct dlm_ls *ls, struct dlm_message *ms)
3322 struct dlm_lkb *lkb;
3323 struct dlm_rsb *r;
3324 int error, mstype, result;
3326 error = find_lkb(ls, ms->m_remid, &lkb);
3327 if (error) {
3328 log_error(ls, "receive_request_reply no lkb");
3329 return;
3331 DLM_ASSERT(is_process_copy(lkb), dlm_print_lkb(lkb););
3333 r = lkb->lkb_resource;
3334 hold_rsb(r);
3335 lock_rsb(r);
3337 mstype = lkb->lkb_wait_type;
3338 error = remove_from_waiters(lkb, DLM_MSG_REQUEST_REPLY);
3339 if (error)
3340 goto out;
3342 /* Optimization: the dir node was also the master, so it took our
3343 lookup as a request and sent request reply instead of lookup reply */
3344 if (mstype == DLM_MSG_LOOKUP) {
3345 r->res_nodeid = ms->m_header.h_nodeid;
3346 lkb->lkb_nodeid = r->res_nodeid;
3349 /* this is the value returned from do_request() on the master */
3350 result = ms->m_result;
3352 switch (result) {
3353 case -EAGAIN:
3354 /* request would block (be queued) on remote master */
3355 queue_cast(r, lkb, -EAGAIN);
3356 confirm_master(r, -EAGAIN);
3357 unhold_lkb(lkb); /* undoes create_lkb() */
3358 break;
3360 case -EINPROGRESS:
3361 case 0:
3362 /* request was queued or granted on remote master */
3363 receive_flags_reply(lkb, ms);
3364 lkb->lkb_remid = ms->m_lkid;
3365 if (is_altmode(lkb))
3366 munge_altmode(lkb, ms);
3367 if (result) {
3368 add_lkb(r, lkb, DLM_LKSTS_WAITING);
3369 add_timeout(lkb);
3370 } else {
3371 grant_lock_pc(r, lkb, ms);
3372 queue_cast(r, lkb, 0);
3374 confirm_master(r, result);
3375 break;
3377 case -EBADR:
3378 case -ENOTBLK:
3379 /* find_rsb failed to find rsb or rsb wasn't master */
3380 log_debug(ls, "receive_request_reply %x %x master diff %d %d",
3381 lkb->lkb_id, lkb->lkb_flags, r->res_nodeid, result);
3382 r->res_nodeid = -1;
3383 lkb->lkb_nodeid = -1;
3385 if (is_overlap(lkb)) {
3386 /* we'll ignore error in cancel/unlock reply */
3387 queue_cast_overlap(r, lkb);
3388 confirm_master(r, result);
3389 unhold_lkb(lkb); /* undoes create_lkb() */
3390 } else
3391 _request_lock(r, lkb);
3392 break;
3394 default:
3395 log_error(ls, "receive_request_reply %x error %d",
3396 lkb->lkb_id, result);
3399 if (is_overlap_unlock(lkb) && (result == 0 || result == -EINPROGRESS)) {
3400 log_debug(ls, "receive_request_reply %x result %d unlock",
3401 lkb->lkb_id, result);
3402 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_UNLOCK;
3403 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
3404 send_unlock(r, lkb);
3405 } else if (is_overlap_cancel(lkb) && (result == -EINPROGRESS)) {
3406 log_debug(ls, "receive_request_reply %x cancel", lkb->lkb_id);
3407 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_UNLOCK;
3408 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
3409 send_cancel(r, lkb);
3410 } else {
3411 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
3412 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_UNLOCK;
3414 out:
3415 unlock_rsb(r);
3416 put_rsb(r);
3417 dlm_put_lkb(lkb);
3420 static void __receive_convert_reply(struct dlm_rsb *r, struct dlm_lkb *lkb,
3421 struct dlm_message *ms)
3423 /* this is the value returned from do_convert() on the master */
3424 switch (ms->m_result) {
3425 case -EAGAIN:
3426 /* convert would block (be queued) on remote master */
3427 queue_cast(r, lkb, -EAGAIN);
3428 break;
3430 case -EDEADLK:
3431 receive_flags_reply(lkb, ms);
3432 revert_lock_pc(r, lkb);
3433 queue_cast(r, lkb, -EDEADLK);
3434 break;
3436 case -EINPROGRESS:
3437 /* convert was queued on remote master */
3438 receive_flags_reply(lkb, ms);
3439 if (is_demoted(lkb))
3440 munge_demoted(lkb, ms);
3441 del_lkb(r, lkb);
3442 add_lkb(r, lkb, DLM_LKSTS_CONVERT);
3443 add_timeout(lkb);
3444 break;
3446 case 0:
3447 /* convert was granted on remote master */
3448 receive_flags_reply(lkb, ms);
3449 if (is_demoted(lkb))
3450 munge_demoted(lkb, ms);
3451 grant_lock_pc(r, lkb, ms);
3452 queue_cast(r, lkb, 0);
3453 break;
3455 default:
3456 log_error(r->res_ls, "receive_convert_reply %x error %d",
3457 lkb->lkb_id, ms->m_result);
3461 static void _receive_convert_reply(struct dlm_lkb *lkb, struct dlm_message *ms)
3463 struct dlm_rsb *r = lkb->lkb_resource;
3464 int error;
3466 hold_rsb(r);
3467 lock_rsb(r);
3469 /* stub reply can happen with waiters_mutex held */
3470 error = remove_from_waiters_ms(lkb, ms);
3471 if (error)
3472 goto out;
3474 __receive_convert_reply(r, lkb, ms);
3475 out:
3476 unlock_rsb(r);
3477 put_rsb(r);
3480 static void receive_convert_reply(struct dlm_ls *ls, struct dlm_message *ms)
3482 struct dlm_lkb *lkb;
3483 int error;
3485 error = find_lkb(ls, ms->m_remid, &lkb);
3486 if (error) {
3487 log_error(ls, "receive_convert_reply no lkb");
3488 return;
3490 DLM_ASSERT(is_process_copy(lkb), dlm_print_lkb(lkb););
3492 _receive_convert_reply(lkb, ms);
3493 dlm_put_lkb(lkb);
3496 static void _receive_unlock_reply(struct dlm_lkb *lkb, struct dlm_message *ms)
3498 struct dlm_rsb *r = lkb->lkb_resource;
3499 int error;
3501 hold_rsb(r);
3502 lock_rsb(r);
3504 /* stub reply can happen with waiters_mutex held */
3505 error = remove_from_waiters_ms(lkb, ms);
3506 if (error)
3507 goto out;
3509 /* this is the value returned from do_unlock() on the master */
3511 switch (ms->m_result) {
3512 case -DLM_EUNLOCK:
3513 receive_flags_reply(lkb, ms);
3514 remove_lock_pc(r, lkb);
3515 queue_cast(r, lkb, -DLM_EUNLOCK);
3516 break;
3517 case -ENOENT:
3518 break;
3519 default:
3520 log_error(r->res_ls, "receive_unlock_reply %x error %d",
3521 lkb->lkb_id, ms->m_result);
3523 out:
3524 unlock_rsb(r);
3525 put_rsb(r);
3528 static void receive_unlock_reply(struct dlm_ls *ls, struct dlm_message *ms)
3530 struct dlm_lkb *lkb;
3531 int error;
3533 error = find_lkb(ls, ms->m_remid, &lkb);
3534 if (error) {
3535 log_error(ls, "receive_unlock_reply no lkb");
3536 return;
3538 DLM_ASSERT(is_process_copy(lkb), dlm_print_lkb(lkb););
3540 _receive_unlock_reply(lkb, ms);
3541 dlm_put_lkb(lkb);
3544 static void _receive_cancel_reply(struct dlm_lkb *lkb, struct dlm_message *ms)
3546 struct dlm_rsb *r = lkb->lkb_resource;
3547 int error;
3549 hold_rsb(r);
3550 lock_rsb(r);
3552 /* stub reply can happen with waiters_mutex held */
3553 error = remove_from_waiters_ms(lkb, ms);
3554 if (error)
3555 goto out;
3557 /* this is the value returned from do_cancel() on the master */
3559 switch (ms->m_result) {
3560 case -DLM_ECANCEL:
3561 receive_flags_reply(lkb, ms);
3562 revert_lock_pc(r, lkb);
3563 queue_cast(r, lkb, -DLM_ECANCEL);
3564 break;
3565 case 0:
3566 break;
3567 default:
3568 log_error(r->res_ls, "receive_cancel_reply %x error %d",
3569 lkb->lkb_id, ms->m_result);
3571 out:
3572 unlock_rsb(r);
3573 put_rsb(r);
3576 static void receive_cancel_reply(struct dlm_ls *ls, struct dlm_message *ms)
3578 struct dlm_lkb *lkb;
3579 int error;
3581 error = find_lkb(ls, ms->m_remid, &lkb);
3582 if (error) {
3583 log_error(ls, "receive_cancel_reply no lkb");
3584 return;
3586 DLM_ASSERT(is_process_copy(lkb), dlm_print_lkb(lkb););
3588 _receive_cancel_reply(lkb, ms);
3589 dlm_put_lkb(lkb);
3592 static void receive_lookup_reply(struct dlm_ls *ls, struct dlm_message *ms)
3594 struct dlm_lkb *lkb;
3595 struct dlm_rsb *r;
3596 int error, ret_nodeid;
3598 error = find_lkb(ls, ms->m_lkid, &lkb);
3599 if (error) {
3600 log_error(ls, "receive_lookup_reply no lkb");
3601 return;
3604 /* ms->m_result is the value returned by dlm_dir_lookup on dir node
3605 FIXME: will a non-zero error ever be returned? */
3607 r = lkb->lkb_resource;
3608 hold_rsb(r);
3609 lock_rsb(r);
3611 error = remove_from_waiters(lkb, DLM_MSG_LOOKUP_REPLY);
3612 if (error)
3613 goto out;
3615 ret_nodeid = ms->m_nodeid;
3616 if (ret_nodeid == dlm_our_nodeid()) {
3617 r->res_nodeid = 0;
3618 ret_nodeid = 0;
3619 r->res_first_lkid = 0;
3620 } else {
3621 /* set_master() will copy res_nodeid to lkb_nodeid */
3622 r->res_nodeid = ret_nodeid;
3625 if (is_overlap(lkb)) {
3626 log_debug(ls, "receive_lookup_reply %x unlock %x",
3627 lkb->lkb_id, lkb->lkb_flags);
3628 queue_cast_overlap(r, lkb);
3629 unhold_lkb(lkb); /* undoes create_lkb() */
3630 goto out_list;
3633 _request_lock(r, lkb);
3635 out_list:
3636 if (!ret_nodeid)
3637 process_lookup_list(r);
3638 out:
3639 unlock_rsb(r);
3640 put_rsb(r);
3641 dlm_put_lkb(lkb);
3644 static void _receive_message(struct dlm_ls *ls, struct dlm_message *ms)
3646 if (!dlm_is_member(ls, ms->m_header.h_nodeid)) {
3647 log_debug(ls, "ignore non-member message %d from %d %x %x %d",
3648 ms->m_type, ms->m_header.h_nodeid, ms->m_lkid,
3649 ms->m_remid, ms->m_result);
3650 return;
3653 switch (ms->m_type) {
3655 /* messages sent to a master node */
3657 case DLM_MSG_REQUEST:
3658 receive_request(ls, ms);
3659 break;
3661 case DLM_MSG_CONVERT:
3662 receive_convert(ls, ms);
3663 break;
3665 case DLM_MSG_UNLOCK:
3666 receive_unlock(ls, ms);
3667 break;
3669 case DLM_MSG_CANCEL:
3670 receive_cancel(ls, ms);
3671 break;
3673 /* messages sent from a master node (replies to above) */
3675 case DLM_MSG_REQUEST_REPLY:
3676 receive_request_reply(ls, ms);
3677 break;
3679 case DLM_MSG_CONVERT_REPLY:
3680 receive_convert_reply(ls, ms);
3681 break;
3683 case DLM_MSG_UNLOCK_REPLY:
3684 receive_unlock_reply(ls, ms);
3685 break;
3687 case DLM_MSG_CANCEL_REPLY:
3688 receive_cancel_reply(ls, ms);
3689 break;
3691 /* messages sent from a master node (only two types of async msg) */
3693 case DLM_MSG_GRANT:
3694 receive_grant(ls, ms);
3695 break;
3697 case DLM_MSG_BAST:
3698 receive_bast(ls, ms);
3699 break;
3701 /* messages sent to a dir node */
3703 case DLM_MSG_LOOKUP:
3704 receive_lookup(ls, ms);
3705 break;
3707 case DLM_MSG_REMOVE:
3708 receive_remove(ls, ms);
3709 break;
3711 /* messages sent from a dir node (remove has no reply) */
3713 case DLM_MSG_LOOKUP_REPLY:
3714 receive_lookup_reply(ls, ms);
3715 break;
3717 /* other messages */
3719 case DLM_MSG_PURGE:
3720 receive_purge(ls, ms);
3721 break;
3723 default:
3724 log_error(ls, "unknown message type %d", ms->m_type);
3727 dlm_astd_wake();
3730 /* If the lockspace is in recovery mode (locking stopped), then normal
3731 messages are saved on the requestqueue for processing after recovery is
3732 done. When not in recovery mode, we wait for dlm_recoverd to drain saved
3733 messages off the requestqueue before we process new ones. This occurs right
3734 after recovery completes when we transition from saving all messages on
3735 requestqueue, to processing all the saved messages, to processing new
3736 messages as they arrive. */
3738 static void dlm_receive_message(struct dlm_ls *ls, struct dlm_message *ms,
3739 int nodeid)
3741 if (dlm_locking_stopped(ls)) {
3742 dlm_add_requestqueue(ls, nodeid, (struct dlm_header *) ms);
3743 } else {
3744 dlm_wait_requestqueue(ls);
3745 _receive_message(ls, ms);
3749 /* This is called by dlm_recoverd to process messages that were saved on
3750 the requestqueue. */
3752 void dlm_receive_message_saved(struct dlm_ls *ls, struct dlm_message *ms)
3754 _receive_message(ls, ms);
3757 /* This is called by the midcomms layer when something is received for
3758 the lockspace. It could be either a MSG (normal message sent as part of
3759 standard locking activity) or an RCOM (recovery message sent as part of
3760 lockspace recovery). */
3762 void dlm_receive_buffer(struct dlm_header *hd, int nodeid)
3764 struct dlm_message *ms = (struct dlm_message *) hd;
3765 struct dlm_rcom *rc = (struct dlm_rcom *) hd;
3766 struct dlm_ls *ls;
3767 int type = 0;
3769 switch (hd->h_cmd) {
3770 case DLM_MSG:
3771 dlm_message_in(ms);
3772 type = ms->m_type;
3773 break;
3774 case DLM_RCOM:
3775 dlm_rcom_in(rc);
3776 type = rc->rc_type;
3777 break;
3778 default:
3779 log_print("invalid h_cmd %d from %u", hd->h_cmd, nodeid);
3780 return;
3783 if (hd->h_nodeid != nodeid) {
3784 log_print("invalid h_nodeid %d from %d lockspace %x",
3785 hd->h_nodeid, nodeid, hd->h_lockspace);
3786 return;
3789 ls = dlm_find_lockspace_global(hd->h_lockspace);
3790 if (!ls) {
3791 log_print("invalid h_lockspace %x from %d cmd %d type %d",
3792 hd->h_lockspace, nodeid, hd->h_cmd, type);
3794 if (hd->h_cmd == DLM_RCOM && type == DLM_RCOM_STATUS)
3795 dlm_send_ls_not_ready(nodeid, rc);
3796 return;
3799 /* this rwsem allows dlm_ls_stop() to wait for all dlm_recv threads to
3800 be inactive (in this ls) before transitioning to recovery mode */
3802 down_read(&ls->ls_recv_active);
3803 if (hd->h_cmd == DLM_MSG)
3804 dlm_receive_message(ls, ms, nodeid);
3805 else
3806 dlm_receive_rcom(ls, rc, nodeid);
3807 up_read(&ls->ls_recv_active);
3809 dlm_put_lockspace(ls);
3812 static void recover_convert_waiter(struct dlm_ls *ls, struct dlm_lkb *lkb)
3814 if (middle_conversion(lkb)) {
3815 hold_lkb(lkb);
3816 ls->ls_stub_ms.m_type = DLM_MSG_CONVERT_REPLY;
3817 ls->ls_stub_ms.m_result = -EINPROGRESS;
3818 ls->ls_stub_ms.m_flags = lkb->lkb_flags;
3819 _receive_convert_reply(lkb, &ls->ls_stub_ms);
3821 /* Same special case as in receive_rcom_lock_args() */
3822 lkb->lkb_grmode = DLM_LOCK_IV;
3823 rsb_set_flag(lkb->lkb_resource, RSB_RECOVER_CONVERT);
3824 unhold_lkb(lkb);
3826 } else if (lkb->lkb_rqmode >= lkb->lkb_grmode) {
3827 lkb->lkb_flags |= DLM_IFL_RESEND;
3830 /* lkb->lkb_rqmode < lkb->lkb_grmode shouldn't happen since down
3831 conversions are async; there's no reply from the remote master */
3834 /* A waiting lkb needs recovery if the master node has failed, or
3835 the master node is changing (only when no directory is used) */
3837 static int waiter_needs_recovery(struct dlm_ls *ls, struct dlm_lkb *lkb)
3839 if (dlm_is_removed(ls, lkb->lkb_nodeid))
3840 return 1;
3842 if (!dlm_no_directory(ls))
3843 return 0;
3845 if (dlm_dir_nodeid(lkb->lkb_resource) != lkb->lkb_nodeid)
3846 return 1;
3848 return 0;
3851 /* Recovery for locks that are waiting for replies from nodes that are now
3852 gone. We can just complete unlocks and cancels by faking a reply from the
3853 dead node. Requests and up-conversions we flag to be resent after
3854 recovery. Down-conversions can just be completed with a fake reply like
3855 unlocks. Conversions between PR and CW need special attention. */
3857 void dlm_recover_waiters_pre(struct dlm_ls *ls)
3859 struct dlm_lkb *lkb, *safe;
3860 int wait_type, stub_unlock_result, stub_cancel_result;
3862 mutex_lock(&ls->ls_waiters_mutex);
3864 list_for_each_entry_safe(lkb, safe, &ls->ls_waiters, lkb_wait_reply) {
3865 log_debug(ls, "pre recover waiter lkid %x type %d flags %x",
3866 lkb->lkb_id, lkb->lkb_wait_type, lkb->lkb_flags);
3868 /* all outstanding lookups, regardless of destination will be
3869 resent after recovery is done */
3871 if (lkb->lkb_wait_type == DLM_MSG_LOOKUP) {
3872 lkb->lkb_flags |= DLM_IFL_RESEND;
3873 continue;
3876 if (!waiter_needs_recovery(ls, lkb))
3877 continue;
3879 wait_type = lkb->lkb_wait_type;
3880 stub_unlock_result = -DLM_EUNLOCK;
3881 stub_cancel_result = -DLM_ECANCEL;
3883 /* Main reply may have been received leaving a zero wait_type,
3884 but a reply for the overlapping op may not have been
3885 received. In that case we need to fake the appropriate
3886 reply for the overlap op. */
3888 if (!wait_type) {
3889 if (is_overlap_cancel(lkb)) {
3890 wait_type = DLM_MSG_CANCEL;
3891 if (lkb->lkb_grmode == DLM_LOCK_IV)
3892 stub_cancel_result = 0;
3894 if (is_overlap_unlock(lkb)) {
3895 wait_type = DLM_MSG_UNLOCK;
3896 if (lkb->lkb_grmode == DLM_LOCK_IV)
3897 stub_unlock_result = -ENOENT;
3900 log_debug(ls, "rwpre overlap %x %x %d %d %d",
3901 lkb->lkb_id, lkb->lkb_flags, wait_type,
3902 stub_cancel_result, stub_unlock_result);
3905 switch (wait_type) {
3907 case DLM_MSG_REQUEST:
3908 lkb->lkb_flags |= DLM_IFL_RESEND;
3909 break;
3911 case DLM_MSG_CONVERT:
3912 recover_convert_waiter(ls, lkb);
3913 break;
3915 case DLM_MSG_UNLOCK:
3916 hold_lkb(lkb);
3917 ls->ls_stub_ms.m_type = DLM_MSG_UNLOCK_REPLY;
3918 ls->ls_stub_ms.m_result = stub_unlock_result;
3919 ls->ls_stub_ms.m_flags = lkb->lkb_flags;
3920 _receive_unlock_reply(lkb, &ls->ls_stub_ms);
3921 dlm_put_lkb(lkb);
3922 break;
3924 case DLM_MSG_CANCEL:
3925 hold_lkb(lkb);
3926 ls->ls_stub_ms.m_type = DLM_MSG_CANCEL_REPLY;
3927 ls->ls_stub_ms.m_result = stub_cancel_result;
3928 ls->ls_stub_ms.m_flags = lkb->lkb_flags;
3929 _receive_cancel_reply(lkb, &ls->ls_stub_ms);
3930 dlm_put_lkb(lkb);
3931 break;
3933 default:
3934 log_error(ls, "invalid lkb wait_type %d %d",
3935 lkb->lkb_wait_type, wait_type);
3937 schedule();
3939 mutex_unlock(&ls->ls_waiters_mutex);
3942 static struct dlm_lkb *find_resend_waiter(struct dlm_ls *ls)
3944 struct dlm_lkb *lkb;
3945 int found = 0;
3947 mutex_lock(&ls->ls_waiters_mutex);
3948 list_for_each_entry(lkb, &ls->ls_waiters, lkb_wait_reply) {
3949 if (lkb->lkb_flags & DLM_IFL_RESEND) {
3950 hold_lkb(lkb);
3951 found = 1;
3952 break;
3955 mutex_unlock(&ls->ls_waiters_mutex);
3957 if (!found)
3958 lkb = NULL;
3959 return lkb;
3962 /* Deal with lookups and lkb's marked RESEND from _pre. We may now be the
3963 master or dir-node for r. Processing the lkb may result in it being placed
3964 back on waiters. */
3966 /* We do this after normal locking has been enabled and any saved messages
3967 (in requestqueue) have been processed. We should be confident that at
3968 this point we won't get or process a reply to any of these waiting
3969 operations. But, new ops may be coming in on the rsbs/locks here from
3970 userspace or remotely. */
3972 /* there may have been an overlap unlock/cancel prior to recovery or after
3973 recovery. if before, the lkb may still have a pos wait_count; if after, the
3974 overlap flag would just have been set and nothing new sent. we can be
3975 confident here than any replies to either the initial op or overlap ops
3976 prior to recovery have been received. */
3978 int dlm_recover_waiters_post(struct dlm_ls *ls)
3980 struct dlm_lkb *lkb;
3981 struct dlm_rsb *r;
3982 int error = 0, mstype, err, oc, ou;
3984 while (1) {
3985 if (dlm_locking_stopped(ls)) {
3986 log_debug(ls, "recover_waiters_post aborted");
3987 error = -EINTR;
3988 break;
3991 lkb = find_resend_waiter(ls);
3992 if (!lkb)
3993 break;
3995 r = lkb->lkb_resource;
3996 hold_rsb(r);
3997 lock_rsb(r);
3999 mstype = lkb->lkb_wait_type;
4000 oc = is_overlap_cancel(lkb);
4001 ou = is_overlap_unlock(lkb);
4002 err = 0;
4004 log_debug(ls, "recover_waiters_post %x type %d flags %x %s",
4005 lkb->lkb_id, mstype, lkb->lkb_flags, r->res_name);
4007 /* At this point we assume that we won't get a reply to any
4008 previous op or overlap op on this lock. First, do a big
4009 remove_from_waiters() for all previous ops. */
4011 lkb->lkb_flags &= ~DLM_IFL_RESEND;
4012 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_UNLOCK;
4013 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
4014 lkb->lkb_wait_type = 0;
4015 lkb->lkb_wait_count = 0;
4016 mutex_lock(&ls->ls_waiters_mutex);
4017 list_del_init(&lkb->lkb_wait_reply);
4018 mutex_unlock(&ls->ls_waiters_mutex);
4019 unhold_lkb(lkb); /* for waiters list */
4021 if (oc || ou) {
4022 /* do an unlock or cancel instead of resending */
4023 switch (mstype) {
4024 case DLM_MSG_LOOKUP:
4025 case DLM_MSG_REQUEST:
4026 queue_cast(r, lkb, ou ? -DLM_EUNLOCK :
4027 -DLM_ECANCEL);
4028 unhold_lkb(lkb); /* undoes create_lkb() */
4029 break;
4030 case DLM_MSG_CONVERT:
4031 if (oc) {
4032 queue_cast(r, lkb, -DLM_ECANCEL);
4033 } else {
4034 lkb->lkb_exflags |= DLM_LKF_FORCEUNLOCK;
4035 _unlock_lock(r, lkb);
4037 break;
4038 default:
4039 err = 1;
4041 } else {
4042 switch (mstype) {
4043 case DLM_MSG_LOOKUP:
4044 case DLM_MSG_REQUEST:
4045 _request_lock(r, lkb);
4046 if (is_master(r))
4047 confirm_master(r, 0);
4048 break;
4049 case DLM_MSG_CONVERT:
4050 _convert_lock(r, lkb);
4051 break;
4052 default:
4053 err = 1;
4057 if (err)
4058 log_error(ls, "recover_waiters_post %x %d %x %d %d",
4059 lkb->lkb_id, mstype, lkb->lkb_flags, oc, ou);
4060 unlock_rsb(r);
4061 put_rsb(r);
4062 dlm_put_lkb(lkb);
4065 return error;
4068 static void purge_queue(struct dlm_rsb *r, struct list_head *queue,
4069 int (*test)(struct dlm_ls *ls, struct dlm_lkb *lkb))
4071 struct dlm_ls *ls = r->res_ls;
4072 struct dlm_lkb *lkb, *safe;
4074 list_for_each_entry_safe(lkb, safe, queue, lkb_statequeue) {
4075 if (test(ls, lkb)) {
4076 rsb_set_flag(r, RSB_LOCKS_PURGED);
4077 del_lkb(r, lkb);
4078 /* this put should free the lkb */
4079 if (!dlm_put_lkb(lkb))
4080 log_error(ls, "purged lkb not released");
4085 static int purge_dead_test(struct dlm_ls *ls, struct dlm_lkb *lkb)
4087 return (is_master_copy(lkb) && dlm_is_removed(ls, lkb->lkb_nodeid));
4090 static int purge_mstcpy_test(struct dlm_ls *ls, struct dlm_lkb *lkb)
4092 return is_master_copy(lkb);
4095 static void purge_dead_locks(struct dlm_rsb *r)
4097 purge_queue(r, &r->res_grantqueue, &purge_dead_test);
4098 purge_queue(r, &r->res_convertqueue, &purge_dead_test);
4099 purge_queue(r, &r->res_waitqueue, &purge_dead_test);
4102 void dlm_purge_mstcpy_locks(struct dlm_rsb *r)
4104 purge_queue(r, &r->res_grantqueue, &purge_mstcpy_test);
4105 purge_queue(r, &r->res_convertqueue, &purge_mstcpy_test);
4106 purge_queue(r, &r->res_waitqueue, &purge_mstcpy_test);
4109 /* Get rid of locks held by nodes that are gone. */
4111 int dlm_purge_locks(struct dlm_ls *ls)
4113 struct dlm_rsb *r;
4115 log_debug(ls, "dlm_purge_locks");
4117 down_write(&ls->ls_root_sem);
4118 list_for_each_entry(r, &ls->ls_root_list, res_root_list) {
4119 hold_rsb(r);
4120 lock_rsb(r);
4121 if (is_master(r))
4122 purge_dead_locks(r);
4123 unlock_rsb(r);
4124 unhold_rsb(r);
4126 schedule();
4128 up_write(&ls->ls_root_sem);
4130 return 0;
4133 static struct dlm_rsb *find_purged_rsb(struct dlm_ls *ls, int bucket)
4135 struct dlm_rsb *r, *r_ret = NULL;
4137 read_lock(&ls->ls_rsbtbl[bucket].lock);
4138 list_for_each_entry(r, &ls->ls_rsbtbl[bucket].list, res_hashchain) {
4139 if (!rsb_flag(r, RSB_LOCKS_PURGED))
4140 continue;
4141 hold_rsb(r);
4142 rsb_clear_flag(r, RSB_LOCKS_PURGED);
4143 r_ret = r;
4144 break;
4146 read_unlock(&ls->ls_rsbtbl[bucket].lock);
4147 return r_ret;
4150 void dlm_grant_after_purge(struct dlm_ls *ls)
4152 struct dlm_rsb *r;
4153 int bucket = 0;
4155 while (1) {
4156 r = find_purged_rsb(ls, bucket);
4157 if (!r) {
4158 if (bucket == ls->ls_rsbtbl_size - 1)
4159 break;
4160 bucket++;
4161 continue;
4163 lock_rsb(r);
4164 if (is_master(r)) {
4165 grant_pending_locks(r);
4166 confirm_master(r, 0);
4168 unlock_rsb(r);
4169 put_rsb(r);
4170 schedule();
4174 static struct dlm_lkb *search_remid_list(struct list_head *head, int nodeid,
4175 uint32_t remid)
4177 struct dlm_lkb *lkb;
4179 list_for_each_entry(lkb, head, lkb_statequeue) {
4180 if (lkb->lkb_nodeid == nodeid && lkb->lkb_remid == remid)
4181 return lkb;
4183 return NULL;
4186 static struct dlm_lkb *search_remid(struct dlm_rsb *r, int nodeid,
4187 uint32_t remid)
4189 struct dlm_lkb *lkb;
4191 lkb = search_remid_list(&r->res_grantqueue, nodeid, remid);
4192 if (lkb)
4193 return lkb;
4194 lkb = search_remid_list(&r->res_convertqueue, nodeid, remid);
4195 if (lkb)
4196 return lkb;
4197 lkb = search_remid_list(&r->res_waitqueue, nodeid, remid);
4198 if (lkb)
4199 return lkb;
4200 return NULL;
4203 static int receive_rcom_lock_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
4204 struct dlm_rsb *r, struct dlm_rcom *rc)
4206 struct rcom_lock *rl = (struct rcom_lock *) rc->rc_buf;
4207 int lvblen;
4209 lkb->lkb_nodeid = rc->rc_header.h_nodeid;
4210 lkb->lkb_ownpid = rl->rl_ownpid;
4211 lkb->lkb_remid = rl->rl_lkid;
4212 lkb->lkb_exflags = rl->rl_exflags;
4213 lkb->lkb_flags = rl->rl_flags & 0x0000FFFF;
4214 lkb->lkb_flags |= DLM_IFL_MSTCPY;
4215 lkb->lkb_lvbseq = rl->rl_lvbseq;
4216 lkb->lkb_rqmode = rl->rl_rqmode;
4217 lkb->lkb_grmode = rl->rl_grmode;
4218 /* don't set lkb_status because add_lkb wants to itself */
4220 lkb->lkb_bastaddr = (void *) (long) (rl->rl_asts & AST_BAST);
4221 lkb->lkb_astaddr = (void *) (long) (rl->rl_asts & AST_COMP);
4223 if (lkb->lkb_exflags & DLM_LKF_VALBLK) {
4224 lkb->lkb_lvbptr = dlm_allocate_lvb(ls);
4225 if (!lkb->lkb_lvbptr)
4226 return -ENOMEM;
4227 lvblen = rc->rc_header.h_length - sizeof(struct dlm_rcom) -
4228 sizeof(struct rcom_lock);
4229 memcpy(lkb->lkb_lvbptr, rl->rl_lvb, lvblen);
4232 /* Conversions between PR and CW (middle modes) need special handling.
4233 The real granted mode of these converting locks cannot be determined
4234 until all locks have been rebuilt on the rsb (recover_conversion) */
4236 if (rl->rl_wait_type == DLM_MSG_CONVERT && middle_conversion(lkb)) {
4237 rl->rl_status = DLM_LKSTS_CONVERT;
4238 lkb->lkb_grmode = DLM_LOCK_IV;
4239 rsb_set_flag(r, RSB_RECOVER_CONVERT);
4242 return 0;
4245 /* This lkb may have been recovered in a previous aborted recovery so we need
4246 to check if the rsb already has an lkb with the given remote nodeid/lkid.
4247 If so we just send back a standard reply. If not, we create a new lkb with
4248 the given values and send back our lkid. We send back our lkid by sending
4249 back the rcom_lock struct we got but with the remid field filled in. */
4251 int dlm_recover_master_copy(struct dlm_ls *ls, struct dlm_rcom *rc)
4253 struct rcom_lock *rl = (struct rcom_lock *) rc->rc_buf;
4254 struct dlm_rsb *r;
4255 struct dlm_lkb *lkb;
4256 int error;
4258 if (rl->rl_parent_lkid) {
4259 error = -EOPNOTSUPP;
4260 goto out;
4263 error = find_rsb(ls, rl->rl_name, rl->rl_namelen, R_MASTER, &r);
4264 if (error)
4265 goto out;
4267 lock_rsb(r);
4269 lkb = search_remid(r, rc->rc_header.h_nodeid, rl->rl_lkid);
4270 if (lkb) {
4271 error = -EEXIST;
4272 goto out_remid;
4275 error = create_lkb(ls, &lkb);
4276 if (error)
4277 goto out_unlock;
4279 error = receive_rcom_lock_args(ls, lkb, r, rc);
4280 if (error) {
4281 __put_lkb(ls, lkb);
4282 goto out_unlock;
4285 attach_lkb(r, lkb);
4286 add_lkb(r, lkb, rl->rl_status);
4287 error = 0;
4289 out_remid:
4290 /* this is the new value returned to the lock holder for
4291 saving in its process-copy lkb */
4292 rl->rl_remid = lkb->lkb_id;
4294 out_unlock:
4295 unlock_rsb(r);
4296 put_rsb(r);
4297 out:
4298 if (error)
4299 log_debug(ls, "recover_master_copy %d %x", error, rl->rl_lkid);
4300 rl->rl_result = error;
4301 return error;
4304 int dlm_recover_process_copy(struct dlm_ls *ls, struct dlm_rcom *rc)
4306 struct rcom_lock *rl = (struct rcom_lock *) rc->rc_buf;
4307 struct dlm_rsb *r;
4308 struct dlm_lkb *lkb;
4309 int error;
4311 error = find_lkb(ls, rl->rl_lkid, &lkb);
4312 if (error) {
4313 log_error(ls, "recover_process_copy no lkid %x", rl->rl_lkid);
4314 return error;
4317 DLM_ASSERT(is_process_copy(lkb), dlm_print_lkb(lkb););
4319 error = rl->rl_result;
4321 r = lkb->lkb_resource;
4322 hold_rsb(r);
4323 lock_rsb(r);
4325 switch (error) {
4326 case -EBADR:
4327 /* There's a chance the new master received our lock before
4328 dlm_recover_master_reply(), this wouldn't happen if we did
4329 a barrier between recover_masters and recover_locks. */
4330 log_debug(ls, "master copy not ready %x r %lx %s", lkb->lkb_id,
4331 (unsigned long)r, r->res_name);
4332 dlm_send_rcom_lock(r, lkb);
4333 goto out;
4334 case -EEXIST:
4335 log_debug(ls, "master copy exists %x", lkb->lkb_id);
4336 /* fall through */
4337 case 0:
4338 lkb->lkb_remid = rl->rl_remid;
4339 break;
4340 default:
4341 log_error(ls, "dlm_recover_process_copy unknown error %d %x",
4342 error, lkb->lkb_id);
4345 /* an ack for dlm_recover_locks() which waits for replies from
4346 all the locks it sends to new masters */
4347 dlm_recovered_lock(r);
4348 out:
4349 unlock_rsb(r);
4350 put_rsb(r);
4351 dlm_put_lkb(lkb);
4353 return 0;
4356 int dlm_user_request(struct dlm_ls *ls, struct dlm_user_args *ua,
4357 int mode, uint32_t flags, void *name, unsigned int namelen,
4358 unsigned long timeout_cs)
4360 struct dlm_lkb *lkb;
4361 struct dlm_args args;
4362 int error;
4364 dlm_lock_recovery(ls);
4366 error = create_lkb(ls, &lkb);
4367 if (error) {
4368 kfree(ua);
4369 goto out;
4372 if (flags & DLM_LKF_VALBLK) {
4373 ua->lksb.sb_lvbptr = kzalloc(DLM_USER_LVB_LEN, GFP_KERNEL);
4374 if (!ua->lksb.sb_lvbptr) {
4375 kfree(ua);
4376 __put_lkb(ls, lkb);
4377 error = -ENOMEM;
4378 goto out;
4382 /* After ua is attached to lkb it will be freed by dlm_free_lkb().
4383 When DLM_IFL_USER is set, the dlm knows that this is a userspace
4384 lock and that lkb_astparam is the dlm_user_args structure. */
4386 error = set_lock_args(mode, &ua->lksb, flags, namelen, timeout_cs,
4387 DLM_FAKE_USER_AST, ua, DLM_FAKE_USER_AST, &args);
4388 lkb->lkb_flags |= DLM_IFL_USER;
4389 ua->old_mode = DLM_LOCK_IV;
4391 if (error) {
4392 __put_lkb(ls, lkb);
4393 goto out;
4396 error = request_lock(ls, lkb, name, namelen, &args);
4398 switch (error) {
4399 case 0:
4400 break;
4401 case -EINPROGRESS:
4402 error = 0;
4403 break;
4404 case -EAGAIN:
4405 error = 0;
4406 /* fall through */
4407 default:
4408 __put_lkb(ls, lkb);
4409 goto out;
4412 /* add this new lkb to the per-process list of locks */
4413 spin_lock(&ua->proc->locks_spin);
4414 hold_lkb(lkb);
4415 list_add_tail(&lkb->lkb_ownqueue, &ua->proc->locks);
4416 spin_unlock(&ua->proc->locks_spin);
4417 out:
4418 dlm_unlock_recovery(ls);
4419 return error;
4422 int dlm_user_convert(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
4423 int mode, uint32_t flags, uint32_t lkid, char *lvb_in,
4424 unsigned long timeout_cs)
4426 struct dlm_lkb *lkb;
4427 struct dlm_args args;
4428 struct dlm_user_args *ua;
4429 int error;
4431 dlm_lock_recovery(ls);
4433 error = find_lkb(ls, lkid, &lkb);
4434 if (error)
4435 goto out;
4437 /* user can change the params on its lock when it converts it, or
4438 add an lvb that didn't exist before */
4440 ua = (struct dlm_user_args *)lkb->lkb_astparam;
4442 if (flags & DLM_LKF_VALBLK && !ua->lksb.sb_lvbptr) {
4443 ua->lksb.sb_lvbptr = kzalloc(DLM_USER_LVB_LEN, GFP_KERNEL);
4444 if (!ua->lksb.sb_lvbptr) {
4445 error = -ENOMEM;
4446 goto out_put;
4449 if (lvb_in && ua->lksb.sb_lvbptr)
4450 memcpy(ua->lksb.sb_lvbptr, lvb_in, DLM_USER_LVB_LEN);
4452 ua->xid = ua_tmp->xid;
4453 ua->castparam = ua_tmp->castparam;
4454 ua->castaddr = ua_tmp->castaddr;
4455 ua->bastparam = ua_tmp->bastparam;
4456 ua->bastaddr = ua_tmp->bastaddr;
4457 ua->user_lksb = ua_tmp->user_lksb;
4458 ua->old_mode = lkb->lkb_grmode;
4460 error = set_lock_args(mode, &ua->lksb, flags, 0, timeout_cs,
4461 DLM_FAKE_USER_AST, ua, DLM_FAKE_USER_AST, &args);
4462 if (error)
4463 goto out_put;
4465 error = convert_lock(ls, lkb, &args);
4467 if (error == -EINPROGRESS || error == -EAGAIN || error == -EDEADLK)
4468 error = 0;
4469 out_put:
4470 dlm_put_lkb(lkb);
4471 out:
4472 dlm_unlock_recovery(ls);
4473 kfree(ua_tmp);
4474 return error;
4477 int dlm_user_unlock(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
4478 uint32_t flags, uint32_t lkid, char *lvb_in)
4480 struct dlm_lkb *lkb;
4481 struct dlm_args args;
4482 struct dlm_user_args *ua;
4483 int error;
4485 dlm_lock_recovery(ls);
4487 error = find_lkb(ls, lkid, &lkb);
4488 if (error)
4489 goto out;
4491 ua = (struct dlm_user_args *)lkb->lkb_astparam;
4493 if (lvb_in && ua->lksb.sb_lvbptr)
4494 memcpy(ua->lksb.sb_lvbptr, lvb_in, DLM_USER_LVB_LEN);
4495 if (ua_tmp->castparam)
4496 ua->castparam = ua_tmp->castparam;
4497 ua->user_lksb = ua_tmp->user_lksb;
4499 error = set_unlock_args(flags, ua, &args);
4500 if (error)
4501 goto out_put;
4503 error = unlock_lock(ls, lkb, &args);
4505 if (error == -DLM_EUNLOCK)
4506 error = 0;
4507 /* from validate_unlock_args() */
4508 if (error == -EBUSY && (flags & DLM_LKF_FORCEUNLOCK))
4509 error = 0;
4510 if (error)
4511 goto out_put;
4513 spin_lock(&ua->proc->locks_spin);
4514 /* dlm_user_add_ast() may have already taken lkb off the proc list */
4515 if (!list_empty(&lkb->lkb_ownqueue))
4516 list_move(&lkb->lkb_ownqueue, &ua->proc->unlocking);
4517 spin_unlock(&ua->proc->locks_spin);
4518 out_put:
4519 dlm_put_lkb(lkb);
4520 out:
4521 dlm_unlock_recovery(ls);
4522 kfree(ua_tmp);
4523 return error;
4526 int dlm_user_cancel(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
4527 uint32_t flags, uint32_t lkid)
4529 struct dlm_lkb *lkb;
4530 struct dlm_args args;
4531 struct dlm_user_args *ua;
4532 int error;
4534 dlm_lock_recovery(ls);
4536 error = find_lkb(ls, lkid, &lkb);
4537 if (error)
4538 goto out;
4540 ua = (struct dlm_user_args *)lkb->lkb_astparam;
4541 if (ua_tmp->castparam)
4542 ua->castparam = ua_tmp->castparam;
4543 ua->user_lksb = ua_tmp->user_lksb;
4545 error = set_unlock_args(flags, ua, &args);
4546 if (error)
4547 goto out_put;
4549 error = cancel_lock(ls, lkb, &args);
4551 if (error == -DLM_ECANCEL)
4552 error = 0;
4553 /* from validate_unlock_args() */
4554 if (error == -EBUSY)
4555 error = 0;
4556 out_put:
4557 dlm_put_lkb(lkb);
4558 out:
4559 dlm_unlock_recovery(ls);
4560 kfree(ua_tmp);
4561 return error;
4564 int dlm_user_deadlock(struct dlm_ls *ls, uint32_t flags, uint32_t lkid)
4566 struct dlm_lkb *lkb;
4567 struct dlm_args args;
4568 struct dlm_user_args *ua;
4569 struct dlm_rsb *r;
4570 int error;
4572 dlm_lock_recovery(ls);
4574 error = find_lkb(ls, lkid, &lkb);
4575 if (error)
4576 goto out;
4578 ua = (struct dlm_user_args *)lkb->lkb_astparam;
4580 error = set_unlock_args(flags, ua, &args);
4581 if (error)
4582 goto out_put;
4584 /* same as cancel_lock(), but set DEADLOCK_CANCEL after lock_rsb */
4586 r = lkb->lkb_resource;
4587 hold_rsb(r);
4588 lock_rsb(r);
4590 error = validate_unlock_args(lkb, &args);
4591 if (error)
4592 goto out_r;
4593 lkb->lkb_flags |= DLM_IFL_DEADLOCK_CANCEL;
4595 error = _cancel_lock(r, lkb);
4596 out_r:
4597 unlock_rsb(r);
4598 put_rsb(r);
4600 if (error == -DLM_ECANCEL)
4601 error = 0;
4602 /* from validate_unlock_args() */
4603 if (error == -EBUSY)
4604 error = 0;
4605 out_put:
4606 dlm_put_lkb(lkb);
4607 out:
4608 dlm_unlock_recovery(ls);
4609 return error;
4612 /* lkb's that are removed from the waiters list by revert are just left on the
4613 orphans list with the granted orphan locks, to be freed by purge */
4615 static int orphan_proc_lock(struct dlm_ls *ls, struct dlm_lkb *lkb)
4617 struct dlm_user_args *ua = (struct dlm_user_args *)lkb->lkb_astparam;
4618 struct dlm_args args;
4619 int error;
4621 hold_lkb(lkb);
4622 mutex_lock(&ls->ls_orphans_mutex);
4623 list_add_tail(&lkb->lkb_ownqueue, &ls->ls_orphans);
4624 mutex_unlock(&ls->ls_orphans_mutex);
4626 set_unlock_args(0, ua, &args);
4628 error = cancel_lock(ls, lkb, &args);
4629 if (error == -DLM_ECANCEL)
4630 error = 0;
4631 return error;
4634 /* The force flag allows the unlock to go ahead even if the lkb isn't granted.
4635 Regardless of what rsb queue the lock is on, it's removed and freed. */
4637 static int unlock_proc_lock(struct dlm_ls *ls, struct dlm_lkb *lkb)
4639 struct dlm_user_args *ua = (struct dlm_user_args *)lkb->lkb_astparam;
4640 struct dlm_args args;
4641 int error;
4643 set_unlock_args(DLM_LKF_FORCEUNLOCK, ua, &args);
4645 error = unlock_lock(ls, lkb, &args);
4646 if (error == -DLM_EUNLOCK)
4647 error = 0;
4648 return error;
4651 /* We have to release clear_proc_locks mutex before calling unlock_proc_lock()
4652 (which does lock_rsb) due to deadlock with receiving a message that does
4653 lock_rsb followed by dlm_user_add_ast() */
4655 static struct dlm_lkb *del_proc_lock(struct dlm_ls *ls,
4656 struct dlm_user_proc *proc)
4658 struct dlm_lkb *lkb = NULL;
4660 mutex_lock(&ls->ls_clear_proc_locks);
4661 if (list_empty(&proc->locks))
4662 goto out;
4664 lkb = list_entry(proc->locks.next, struct dlm_lkb, lkb_ownqueue);
4665 list_del_init(&lkb->lkb_ownqueue);
4667 if (lkb->lkb_exflags & DLM_LKF_PERSISTENT)
4668 lkb->lkb_flags |= DLM_IFL_ORPHAN;
4669 else
4670 lkb->lkb_flags |= DLM_IFL_DEAD;
4671 out:
4672 mutex_unlock(&ls->ls_clear_proc_locks);
4673 return lkb;
4676 /* The ls_clear_proc_locks mutex protects against dlm_user_add_asts() which
4677 1) references lkb->ua which we free here and 2) adds lkbs to proc->asts,
4678 which we clear here. */
4680 /* proc CLOSING flag is set so no more device_reads should look at proc->asts
4681 list, and no more device_writes should add lkb's to proc->locks list; so we
4682 shouldn't need to take asts_spin or locks_spin here. this assumes that
4683 device reads/writes/closes are serialized -- FIXME: we may need to serialize
4684 them ourself. */
4686 void dlm_clear_proc_locks(struct dlm_ls *ls, struct dlm_user_proc *proc)
4688 struct dlm_lkb *lkb, *safe;
4690 dlm_lock_recovery(ls);
4692 while (1) {
4693 lkb = del_proc_lock(ls, proc);
4694 if (!lkb)
4695 break;
4696 del_timeout(lkb);
4697 if (lkb->lkb_exflags & DLM_LKF_PERSISTENT)
4698 orphan_proc_lock(ls, lkb);
4699 else
4700 unlock_proc_lock(ls, lkb);
4702 /* this removes the reference for the proc->locks list
4703 added by dlm_user_request, it may result in the lkb
4704 being freed */
4706 dlm_put_lkb(lkb);
4709 mutex_lock(&ls->ls_clear_proc_locks);
4711 /* in-progress unlocks */
4712 list_for_each_entry_safe(lkb, safe, &proc->unlocking, lkb_ownqueue) {
4713 list_del_init(&lkb->lkb_ownqueue);
4714 lkb->lkb_flags |= DLM_IFL_DEAD;
4715 dlm_put_lkb(lkb);
4718 list_for_each_entry_safe(lkb, safe, &proc->asts, lkb_astqueue) {
4719 lkb->lkb_ast_type = 0;
4720 list_del(&lkb->lkb_astqueue);
4721 dlm_put_lkb(lkb);
4724 mutex_unlock(&ls->ls_clear_proc_locks);
4725 dlm_unlock_recovery(ls);
4728 static void purge_proc_locks(struct dlm_ls *ls, struct dlm_user_proc *proc)
4730 struct dlm_lkb *lkb, *safe;
4732 while (1) {
4733 lkb = NULL;
4734 spin_lock(&proc->locks_spin);
4735 if (!list_empty(&proc->locks)) {
4736 lkb = list_entry(proc->locks.next, struct dlm_lkb,
4737 lkb_ownqueue);
4738 list_del_init(&lkb->lkb_ownqueue);
4740 spin_unlock(&proc->locks_spin);
4742 if (!lkb)
4743 break;
4745 lkb->lkb_flags |= DLM_IFL_DEAD;
4746 unlock_proc_lock(ls, lkb);
4747 dlm_put_lkb(lkb); /* ref from proc->locks list */
4750 spin_lock(&proc->locks_spin);
4751 list_for_each_entry_safe(lkb, safe, &proc->unlocking, lkb_ownqueue) {
4752 list_del_init(&lkb->lkb_ownqueue);
4753 lkb->lkb_flags |= DLM_IFL_DEAD;
4754 dlm_put_lkb(lkb);
4756 spin_unlock(&proc->locks_spin);
4758 spin_lock(&proc->asts_spin);
4759 list_for_each_entry_safe(lkb, safe, &proc->asts, lkb_astqueue) {
4760 list_del(&lkb->lkb_astqueue);
4761 dlm_put_lkb(lkb);
4763 spin_unlock(&proc->asts_spin);
4766 /* pid of 0 means purge all orphans */
4768 static void do_purge(struct dlm_ls *ls, int nodeid, int pid)
4770 struct dlm_lkb *lkb, *safe;
4772 mutex_lock(&ls->ls_orphans_mutex);
4773 list_for_each_entry_safe(lkb, safe, &ls->ls_orphans, lkb_ownqueue) {
4774 if (pid && lkb->lkb_ownpid != pid)
4775 continue;
4776 unlock_proc_lock(ls, lkb);
4777 list_del_init(&lkb->lkb_ownqueue);
4778 dlm_put_lkb(lkb);
4780 mutex_unlock(&ls->ls_orphans_mutex);
4783 static int send_purge(struct dlm_ls *ls, int nodeid, int pid)
4785 struct dlm_message *ms;
4786 struct dlm_mhandle *mh;
4787 int error;
4789 error = _create_message(ls, sizeof(struct dlm_message), nodeid,
4790 DLM_MSG_PURGE, &ms, &mh);
4791 if (error)
4792 return error;
4793 ms->m_nodeid = nodeid;
4794 ms->m_pid = pid;
4796 return send_message(mh, ms);
4799 int dlm_user_purge(struct dlm_ls *ls, struct dlm_user_proc *proc,
4800 int nodeid, int pid)
4802 int error = 0;
4804 if (nodeid != dlm_our_nodeid()) {
4805 error = send_purge(ls, nodeid, pid);
4806 } else {
4807 dlm_lock_recovery(ls);
4808 if (pid == current->pid)
4809 purge_proc_locks(ls, proc);
4810 else
4811 do_purge(ls, nodeid, pid);
4812 dlm_unlock_recovery(ls);
4814 return error;