1 /* -*- mode: c; c-basic-offset: 8; -*-
2 * vim: noexpandtab sw=8 ts=8 sts=0:
6 * underlying calls for lock creation
8 * Copyright (C) 2004 Oracle. All rights reserved.
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public
12 * License as published by the Free Software Foundation; either
13 * version 2 of the License, or (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
20 * You should have received a copy of the GNU General Public
21 * License along with this program; if not, write to the
22 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
23 * Boston, MA 021110-1307, USA.
28 #include <linux/module.h>
30 #include <linux/types.h>
31 #include <linux/slab.h>
32 #include <linux/highmem.h>
33 #include <linux/utsname.h>
34 #include <linux/init.h>
35 #include <linux/sysctl.h>
36 #include <linux/random.h>
37 #include <linux/blkdev.h>
38 #include <linux/socket.h>
39 #include <linux/inet.h>
40 #include <linux/spinlock.h>
41 #include <linux/delay.h>
44 #include "cluster/heartbeat.h"
45 #include "cluster/nodemanager.h"
46 #include "cluster/tcp.h"
49 #include "dlmcommon.h"
51 #include "dlmconvert.h"
53 #define MLOG_MASK_PREFIX ML_DLM
54 #include "cluster/masklog.h"
56 static DEFINE_SPINLOCK(dlm_cookie_lock
);
57 static u64 dlm_next_cookie
= 1;
59 static enum dlm_status
dlm_send_remote_lock_request(struct dlm_ctxt
*dlm
,
60 struct dlm_lock_resource
*res
,
61 struct dlm_lock
*lock
, int flags
);
62 static void dlm_init_lock(struct dlm_lock
*newlock
, int type
,
64 static void dlm_lock_release(struct kref
*kref
);
65 static void dlm_lock_detach_lockres(struct dlm_lock
*lock
);
67 /* Tell us whether we can grant a new lock request.
69 * caller needs: res->spinlock
72 * returns: 1 if the lock can be granted, 0 otherwise.
74 static int dlm_can_grant_new_lock(struct dlm_lock_resource
*res
,
75 struct dlm_lock
*lock
)
77 struct list_head
*iter
;
78 struct dlm_lock
*tmplock
;
80 list_for_each(iter
, &res
->granted
) {
81 tmplock
= list_entry(iter
, struct dlm_lock
, list
);
83 if (!dlm_lock_compatible(tmplock
->ml
.type
, lock
->ml
.type
))
87 list_for_each(iter
, &res
->converting
) {
88 tmplock
= list_entry(iter
, struct dlm_lock
, list
);
90 if (!dlm_lock_compatible(tmplock
->ml
.type
, lock
->ml
.type
))
97 /* performs lock creation at the lockres master site
100 * taken: takes and drops res->spinlock
102 * returns: DLM_NORMAL, DLM_NOTQUEUED
104 static enum dlm_status
dlmlock_master(struct dlm_ctxt
*dlm
,
105 struct dlm_lock_resource
*res
,
106 struct dlm_lock
*lock
, int flags
)
108 int call_ast
= 0, kick_thread
= 0;
109 enum dlm_status status
= DLM_NORMAL
;
111 mlog_entry("type=%d\n", lock
->ml
.type
);
113 spin_lock(&res
->spinlock
);
114 /* if called from dlm_create_lock_handler, need to
115 * ensure it will not sleep in dlm_wait_on_lockres */
116 status
= __dlm_lockres_state_to_status(res
);
117 if (status
!= DLM_NORMAL
&&
118 lock
->ml
.node
!= dlm
->node_num
) {
119 /* erf. state changed after lock was dropped. */
120 spin_unlock(&res
->spinlock
);
124 __dlm_wait_on_lockres(res
);
125 __dlm_lockres_reserve_ast(res
);
127 if (dlm_can_grant_new_lock(res
, lock
)) {
128 mlog(0, "I can grant this lock right away\n");
129 /* got it right away */
130 lock
->lksb
->status
= DLM_NORMAL
;
133 list_add_tail(&lock
->list
, &res
->granted
);
135 /* for the recovery lock, we can't allow the ast
136 * to be queued since the dlmthread is already
137 * frozen. but the recovery lock is always locked
138 * with LKM_NOQUEUE so we do not need the ast in
139 * this special case */
140 if (!dlm_is_recovery_lock(res
->lockname
.name
,
141 res
->lockname
.len
)) {
145 mlog(0, "%s: returning DLM_NORMAL to "
146 "node %u for reco lock\n", dlm
->name
,
150 /* for NOQUEUE request, unless we get the
151 * lock right away, return DLM_NOTQUEUED */
152 if (flags
& LKM_NOQUEUE
) {
153 status
= DLM_NOTQUEUED
;
154 if (dlm_is_recovery_lock(res
->lockname
.name
,
155 res
->lockname
.len
)) {
156 mlog(0, "%s: returning NOTQUEUED to "
157 "node %u for reco lock\n", dlm
->name
,
162 list_add_tail(&lock
->list
, &res
->blocked
);
166 /* reduce the inflight count, this may result in the lockres
167 * being purged below during calc_usage */
168 if (lock
->ml
.node
== dlm
->node_num
)
169 dlm_lockres_drop_inflight_ref(dlm
, res
);
171 spin_unlock(&res
->spinlock
);
174 /* either queue the ast or release it */
176 dlm_queue_ast(dlm
, lock
);
178 dlm_lockres_release_ast(dlm
, res
);
180 dlm_lockres_calc_usage(dlm
, res
);
182 dlm_kick_thread(dlm
, res
);
187 void dlm_revert_pending_lock(struct dlm_lock_resource
*res
,
188 struct dlm_lock
*lock
)
190 /* remove from local queue if it failed */
191 list_del_init(&lock
->list
);
192 lock
->lksb
->flags
&= ~DLM_LKSB_GET_LVB
;
199 * taken: takes and drops res->spinlock
201 * returns: DLM_DENIED, DLM_RECOVERING, or net status
203 static enum dlm_status
dlmlock_remote(struct dlm_ctxt
*dlm
,
204 struct dlm_lock_resource
*res
,
205 struct dlm_lock
*lock
, int flags
)
207 enum dlm_status status
= DLM_DENIED
;
208 int lockres_changed
= 1;
210 mlog_entry("type=%d\n", lock
->ml
.type
);
211 mlog(0, "lockres %.*s, flags = 0x%x\n", res
->lockname
.len
,
212 res
->lockname
.name
, flags
);
214 spin_lock(&res
->spinlock
);
216 /* will exit this call with spinlock held */
217 __dlm_wait_on_lockres(res
);
218 res
->state
|= DLM_LOCK_RES_IN_PROGRESS
;
220 /* add lock to local (secondary) queue */
222 list_add_tail(&lock
->list
, &res
->blocked
);
223 lock
->lock_pending
= 1;
224 spin_unlock(&res
->spinlock
);
226 /* spec seems to say that you will get DLM_NORMAL when the lock
227 * has been queued, meaning we need to wait for a reply here. */
228 status
= dlm_send_remote_lock_request(dlm
, res
, lock
, flags
);
230 spin_lock(&res
->spinlock
);
231 res
->state
&= ~DLM_LOCK_RES_IN_PROGRESS
;
232 lock
->lock_pending
= 0;
233 if (status
!= DLM_NORMAL
) {
234 if (status
== DLM_RECOVERING
&&
235 dlm_is_recovery_lock(res
->lockname
.name
,
236 res
->lockname
.len
)) {
237 /* recovery lock was mastered by dead node.
238 * we need to have calc_usage shoot down this
239 * lockres and completely remaster it. */
240 mlog(0, "%s: recovery lock was owned by "
241 "dead node %u, remaster it now.\n",
242 dlm
->name
, res
->owner
);
243 } else if (status
!= DLM_NOTQUEUED
) {
245 * DO NOT call calc_usage, as this would unhash
246 * the remote lockres before we ever get to use
247 * it. treat as if we never made any change to
253 dlm_revert_pending_lock(res
, lock
);
255 } else if (dlm_is_recovery_lock(res
->lockname
.name
,
256 res
->lockname
.len
)) {
257 /* special case for the $RECOVERY lock.
258 * there will never be an AST delivered to put
259 * this lock on the proper secondary queue
260 * (granted), so do it manually. */
261 mlog(0, "%s: $RECOVERY lock for this node (%u) is "
262 "mastered by %u; got lock, manually granting (no ast)\n",
263 dlm
->name
, dlm
->node_num
, res
->owner
);
264 list_move_tail(&lock
->list
, &res
->granted
);
266 spin_unlock(&res
->spinlock
);
269 dlm_lockres_calc_usage(dlm
, res
);
276 /* for remote lock creation.
278 * caller needs: none, but need res->state & DLM_LOCK_RES_IN_PROGRESS
281 * returns: DLM_NOLOCKMGR, or net status
283 static enum dlm_status
dlm_send_remote_lock_request(struct dlm_ctxt
*dlm
,
284 struct dlm_lock_resource
*res
,
285 struct dlm_lock
*lock
, int flags
)
287 struct dlm_create_lock create
;
288 int tmpret
, status
= 0;
293 memset(&create
, 0, sizeof(create
));
294 create
.node_idx
= dlm
->node_num
;
295 create
.requested_type
= lock
->ml
.type
;
296 create
.cookie
= lock
->ml
.cookie
;
297 create
.namelen
= res
->lockname
.len
;
298 create
.flags
= cpu_to_be32(flags
);
299 memcpy(create
.name
, res
->lockname
.name
, create
.namelen
);
301 tmpret
= o2net_send_message(DLM_CREATE_LOCK_MSG
, dlm
->key
, &create
,
302 sizeof(create
), res
->owner
, &status
);
304 // successfully sent and received
305 ret
= status
; // this is already a dlm_status
306 if (ret
== DLM_REJECTED
) {
307 mlog(ML_ERROR
, "%s:%.*s: BUG. this is a stale lockres "
308 "no longer owned by %u. that node is coming back "
309 "up currently.\n", dlm
->name
, create
.namelen
,
310 create
.name
, res
->owner
);
311 dlm_print_one_lock_resource(res
);
316 if (dlm_is_host_down(tmpret
)) {
317 ret
= DLM_RECOVERING
;
318 mlog(0, "node %u died so returning DLM_RECOVERING "
319 "from lock message!\n", res
->owner
);
321 ret
= dlm_err_to_dlm_status(tmpret
);
328 void dlm_lock_get(struct dlm_lock
*lock
)
330 kref_get(&lock
->lock_refs
);
333 void dlm_lock_put(struct dlm_lock
*lock
)
335 kref_put(&lock
->lock_refs
, dlm_lock_release
);
338 static void dlm_lock_release(struct kref
*kref
)
340 struct dlm_lock
*lock
;
342 lock
= container_of(kref
, struct dlm_lock
, lock_refs
);
344 BUG_ON(!list_empty(&lock
->list
));
345 BUG_ON(!list_empty(&lock
->ast_list
));
346 BUG_ON(!list_empty(&lock
->bast_list
));
347 BUG_ON(lock
->ast_pending
);
348 BUG_ON(lock
->bast_pending
);
350 dlm_lock_detach_lockres(lock
);
352 if (lock
->lksb_kernel_allocated
) {
353 mlog(0, "freeing kernel-allocated lksb\n");
359 /* associate a lock with it's lockres, getting a ref on the lockres */
360 void dlm_lock_attach_lockres(struct dlm_lock
*lock
,
361 struct dlm_lock_resource
*res
)
363 dlm_lockres_get(res
);
367 /* drop ref on lockres, if there is still one associated with lock */
368 static void dlm_lock_detach_lockres(struct dlm_lock
*lock
)
370 struct dlm_lock_resource
*res
;
374 lock
->lockres
= NULL
;
375 mlog(0, "removing lock's lockres reference\n");
376 dlm_lockres_put(res
);
380 static void dlm_init_lock(struct dlm_lock
*newlock
, int type
,
383 INIT_LIST_HEAD(&newlock
->list
);
384 INIT_LIST_HEAD(&newlock
->ast_list
);
385 INIT_LIST_HEAD(&newlock
->bast_list
);
386 spin_lock_init(&newlock
->spinlock
);
387 newlock
->ml
.type
= type
;
388 newlock
->ml
.convert_type
= LKM_IVMODE
;
389 newlock
->ml
.highest_blocked
= LKM_IVMODE
;
390 newlock
->ml
.node
= node
;
391 newlock
->ml
.pad1
= 0;
392 newlock
->ml
.list
= 0;
393 newlock
->ml
.flags
= 0;
395 newlock
->bast
= NULL
;
396 newlock
->astdata
= NULL
;
397 newlock
->ml
.cookie
= cpu_to_be64(cookie
);
398 newlock
->ast_pending
= 0;
399 newlock
->bast_pending
= 0;
400 newlock
->convert_pending
= 0;
401 newlock
->lock_pending
= 0;
402 newlock
->unlock_pending
= 0;
403 newlock
->cancel_pending
= 0;
404 newlock
->lksb_kernel_allocated
= 0;
406 kref_init(&newlock
->lock_refs
);
409 struct dlm_lock
* dlm_new_lock(int type
, u8 node
, u64 cookie
,
410 struct dlm_lockstatus
*lksb
)
412 struct dlm_lock
*lock
;
413 int kernel_allocated
= 0;
415 lock
= kzalloc(sizeof(*lock
), GFP_NOFS
);
420 /* zero memory only if kernel-allocated */
421 lksb
= kzalloc(sizeof(*lksb
), GFP_NOFS
);
426 kernel_allocated
= 1;
429 dlm_init_lock(lock
, type
, node
, cookie
);
430 if (kernel_allocated
)
431 lock
->lksb_kernel_allocated
= 1;
437 /* handler for lock creation net message
440 * taken: takes and drops res->spinlock
442 * returns: DLM_NORMAL, DLM_SYSERR, DLM_IVLOCKID, DLM_NOTQUEUED
444 int dlm_create_lock_handler(struct o2net_msg
*msg
, u32 len
, void *data
,
447 struct dlm_ctxt
*dlm
= data
;
448 struct dlm_create_lock
*create
= (struct dlm_create_lock
*)msg
->buf
;
449 struct dlm_lock_resource
*res
= NULL
;
450 struct dlm_lock
*newlock
= NULL
;
451 struct dlm_lockstatus
*lksb
= NULL
;
452 enum dlm_status status
= DLM_NORMAL
;
454 unsigned int namelen
;
464 namelen
= create
->namelen
;
465 status
= DLM_REJECTED
;
466 if (!dlm_domain_fully_joined(dlm
)) {
467 mlog(ML_ERROR
, "Domain %s not fully joined, but node %u is "
468 "sending a create_lock message for lock %.*s!\n",
469 dlm
->name
, create
->node_idx
, namelen
, name
);
474 status
= DLM_IVBUFLEN
;
475 if (namelen
> DLM_LOCKID_NAME_MAX
) {
481 newlock
= dlm_new_lock(create
->requested_type
,
483 be64_to_cpu(create
->cookie
), NULL
);
489 lksb
= newlock
->lksb
;
491 if (be32_to_cpu(create
->flags
) & LKM_GET_LVB
) {
492 lksb
->flags
|= DLM_LKSB_GET_LVB
;
493 mlog(0, "set DLM_LKSB_GET_LVB flag\n");
496 status
= DLM_IVLOCKID
;
497 res
= dlm_lookup_lockres(dlm
, name
, namelen
);
503 spin_lock(&res
->spinlock
);
504 status
= __dlm_lockres_state_to_status(res
);
505 spin_unlock(&res
->spinlock
);
507 if (status
!= DLM_NORMAL
) {
508 mlog(0, "lockres recovering/migrating/in-progress\n");
512 dlm_lock_attach_lockres(newlock
, res
);
514 status
= dlmlock_master(dlm
, res
, newlock
, be32_to_cpu(create
->flags
));
516 if (status
!= DLM_NORMAL
)
518 dlm_lock_put(newlock
);
521 dlm_lockres_put(res
);
529 /* fetch next node-local (u8 nodenum + u56 cookie) into u64 */
530 static inline void dlm_get_next_cookie(u8 node_num
, u64
*cookie
)
532 u64 tmpnode
= node_num
;
534 /* shift single byte of node num into top 8 bits */
537 spin_lock(&dlm_cookie_lock
);
538 *cookie
= (dlm_next_cookie
| tmpnode
);
539 if (++dlm_next_cookie
& 0xff00000000000000ull
) {
540 mlog(0, "This node's cookie will now wrap!\n");
543 spin_unlock(&dlm_cookie_lock
);
546 enum dlm_status
dlmlock(struct dlm_ctxt
*dlm
, int mode
,
547 struct dlm_lockstatus
*lksb
, int flags
,
548 const char *name
, int namelen
, dlm_astlockfunc_t
*ast
,
549 void *data
, dlm_bastlockfunc_t
*bast
)
551 enum dlm_status status
;
552 struct dlm_lock_resource
*res
= NULL
;
553 struct dlm_lock
*lock
= NULL
;
554 int convert
= 0, recovery
= 0;
556 /* yes this function is a mess.
557 * TODO: clean this up. lots of common code in the
558 * lock and convert paths, especially in the retry blocks */
560 dlm_error(DLM_BADARGS
);
564 status
= DLM_BADPARAM
;
565 if (mode
!= LKM_EXMODE
&& mode
!= LKM_PRMODE
&& mode
!= LKM_NLMODE
) {
570 if (flags
& ~LKM_VALID_FLAGS
) {
575 convert
= (flags
& LKM_CONVERT
);
576 recovery
= (flags
& LKM_RECOVERY
);
579 (!dlm_is_recovery_lock(name
, namelen
) || convert
) ) {
583 if (convert
&& (flags
& LKM_LOCAL
)) {
584 mlog(ML_ERROR
, "strange LOCAL convert request!\n");
589 /* CONVERT request */
591 /* if converting, must pass in a valid dlm_lock */
594 mlog(ML_ERROR
, "NULL lock pointer in convert "
601 mlog(ML_ERROR
, "NULL lockres pointer in convert "
605 dlm_lockres_get(res
);
607 /* XXX: for ocfs2 purposes, the ast/bast/astdata/lksb are
608 * static after the original lock call. convert requests will
609 * ensure that everything is the same, or return DLM_BADARGS.
610 * this means that DLM_DENIED_NOASTS will never be returned.
612 if (lock
->lksb
!= lksb
|| lock
->ast
!= ast
||
613 lock
->bast
!= bast
|| lock
->astdata
!= data
) {
614 status
= DLM_BADARGS
;
615 mlog(ML_ERROR
, "new args: lksb=%p, ast=%p, bast=%p, "
616 "astdata=%p\n", lksb
, ast
, bast
, data
);
617 mlog(ML_ERROR
, "orig args: lksb=%p, ast=%p, bast=%p, "
618 "astdata=%p\n", lock
->lksb
, lock
->ast
,
619 lock
->bast
, lock
->astdata
);
623 dlm_wait_for_recovery(dlm
);
625 if (res
->owner
== dlm
->node_num
)
626 status
= dlmconvert_master(dlm
, res
, lock
, flags
, mode
);
628 status
= dlmconvert_remote(dlm
, res
, lock
, flags
, mode
);
629 if (status
== DLM_RECOVERING
|| status
== DLM_MIGRATING
||
630 status
== DLM_FORWARD
) {
631 /* for now, see how this works without sleeping
632 * and just retry right away. I suspect the reco
633 * or migration will complete fast enough that
634 * no waiting will be necessary */
635 mlog(0, "retrying convert with migration/recovery/"
644 status
= DLM_BADARGS
;
650 status
= DLM_IVBUFLEN
;
651 if (namelen
> DLM_LOCKID_NAME_MAX
|| namelen
< 1) {
656 dlm_get_next_cookie(dlm
->node_num
, &tmpcookie
);
657 lock
= dlm_new_lock(mode
, dlm
->node_num
, tmpcookie
, lksb
);
664 dlm_wait_for_recovery(dlm
);
666 /* find or create the lock resource */
667 res
= dlm_get_lock_resource(dlm
, name
, namelen
, flags
);
669 status
= DLM_IVLOCKID
;
674 mlog(0, "type=%d, flags = 0x%x\n", mode
, flags
);
675 mlog(0, "creating lock: lock=%p res=%p\n", lock
, res
);
677 dlm_lock_attach_lockres(lock
, res
);
680 lock
->astdata
= data
;
683 if (flags
& LKM_VALBLK
) {
684 mlog(0, "LKM_VALBLK passed by caller\n");
686 /* LVB requests for non PR, PW or EX locks are
688 if (mode
< LKM_PRMODE
)
689 flags
&= ~LKM_VALBLK
;
691 flags
|= LKM_GET_LVB
;
692 lock
->lksb
->flags
|= DLM_LKSB_GET_LVB
;
696 if (res
->owner
== dlm
->node_num
)
697 status
= dlmlock_master(dlm
, res
, lock
, flags
);
699 status
= dlmlock_remote(dlm
, res
, lock
, flags
);
701 if (status
== DLM_RECOVERING
|| status
== DLM_MIGRATING
||
702 status
== DLM_FORWARD
) {
703 mlog(0, "retrying lock with migration/"
704 "recovery/in progress\n");
706 /* no waiting for dlm_reco_thread */
708 if (status
!= DLM_RECOVERING
)
711 mlog(0, "%s: got RECOVERING "
712 "for $RECOVERY lock, master "
713 "was %u\n", dlm
->name
,
715 /* wait to see the node go down, then
716 * drop down and allow the lockres to
717 * get cleaned up. need to remaster. */
718 dlm_wait_for_node_death(dlm
, res
->owner
,
719 DLM_NODE_DEATH_WAIT_MAX
);
721 dlm_wait_for_recovery(dlm
);
726 if (status
!= DLM_NORMAL
) {
727 lock
->lksb
->flags
&= ~DLM_LKSB_GET_LVB
;
728 if (status
!= DLM_NOTQUEUED
)
735 if (status
!= DLM_NORMAL
) {
736 if (lock
&& !convert
)
738 // this is kind of unnecessary
739 lksb
->status
= status
;
742 /* put lockres ref from the convert path
743 * or from dlm_get_lock_resource */
745 dlm_lockres_put(res
);
749 EXPORT_SYMBOL_GPL(dlmlock
);