1 /* -*- mode: c; c-basic-offset: 8; -*-
2 * vim: noexpandtab sw=8 ts=8 sts=0:
8 * Copyright (C) 2004 Oracle. All rights reserved.
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public
12 * License as published by the Free Software Foundation; either
13 * version 2 of the License, or (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
20 * You should have received a copy of the GNU General Public
21 * License along with this program; if not, write to the
22 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
23 * Boston, MA 021110-1307, USA.
28 #include <linux/module.h>
30 #include <linux/types.h>
31 #include <linux/slab.h>
32 #include <linux/highmem.h>
33 #include <linux/utsname.h>
34 #include <linux/init.h>
35 #include <linux/sysctl.h>
36 #include <linux/random.h>
37 #include <linux/blkdev.h>
38 #include <linux/socket.h>
39 #include <linux/inet.h>
40 #include <linux/timer.h>
41 #include <linux/kthread.h>
42 #include <linux/delay.h>
45 #include "cluster/heartbeat.h"
46 #include "cluster/nodemanager.h"
47 #include "cluster/tcp.h"
50 #include "dlmcommon.h"
51 #include "dlmdomain.h"
53 #define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_RECOVERY)
54 #include "cluster/masklog.h"
56 static void dlm_do_local_recovery_cleanup(struct dlm_ctxt
*dlm
, u8 dead_node
);
58 static int dlm_recovery_thread(void *data
);
59 void dlm_complete_recovery_thread(struct dlm_ctxt
*dlm
);
60 int dlm_launch_recovery_thread(struct dlm_ctxt
*dlm
);
61 void dlm_kick_recovery_thread(struct dlm_ctxt
*dlm
);
62 static int dlm_do_recovery(struct dlm_ctxt
*dlm
);
64 static int dlm_pick_recovery_master(struct dlm_ctxt
*dlm
);
65 static int dlm_remaster_locks(struct dlm_ctxt
*dlm
, u8 dead_node
);
66 static int dlm_init_recovery_area(struct dlm_ctxt
*dlm
, u8 dead_node
);
67 static int dlm_request_all_locks(struct dlm_ctxt
*dlm
,
68 u8 request_from
, u8 dead_node
);
69 static void dlm_destroy_recovery_area(struct dlm_ctxt
*dlm
, u8 dead_node
);
71 static inline int dlm_num_locks_in_lockres(struct dlm_lock_resource
*res
);
72 static void dlm_init_migratable_lockres(struct dlm_migratable_lockres
*mres
,
73 const char *lockname
, int namelen
,
74 int total_locks
, u64 cookie
,
76 static int dlm_send_mig_lockres_msg(struct dlm_ctxt
*dlm
,
77 struct dlm_migratable_lockres
*mres
,
79 struct dlm_lock_resource
*res
,
81 static int dlm_process_recovery_data(struct dlm_ctxt
*dlm
,
82 struct dlm_lock_resource
*res
,
83 struct dlm_migratable_lockres
*mres
);
84 static int dlm_send_finalize_reco_message(struct dlm_ctxt
*dlm
);
85 static int dlm_send_all_done_msg(struct dlm_ctxt
*dlm
,
86 u8 dead_node
, u8 send_to
);
87 static int dlm_send_begin_reco_message(struct dlm_ctxt
*dlm
, u8 dead_node
);
88 static void dlm_move_reco_locks_to_list(struct dlm_ctxt
*dlm
,
89 struct list_head
*list
, u8 dead_node
);
90 static void dlm_finish_local_lockres_recovery(struct dlm_ctxt
*dlm
,
91 u8 dead_node
, u8 new_master
);
92 static void dlm_reco_ast(void *astdata
);
93 static void dlm_reco_bast(void *astdata
, int blocked_type
);
94 static void dlm_reco_unlock_ast(void *astdata
, enum dlm_status st
);
95 static void dlm_request_all_locks_worker(struct dlm_work_item
*item
,
97 static void dlm_mig_lockres_worker(struct dlm_work_item
*item
, void *data
);
98 static int dlm_lockres_master_requery(struct dlm_ctxt
*dlm
,
99 struct dlm_lock_resource
*res
,
102 static u64
dlm_get_next_mig_cookie(void);
104 static DEFINE_SPINLOCK(dlm_reco_state_lock
);
105 static DEFINE_SPINLOCK(dlm_mig_cookie_lock
);
106 static u64 dlm_mig_cookie
= 1;
108 static u64
dlm_get_next_mig_cookie(void)
111 spin_lock(&dlm_mig_cookie_lock
);
113 if (dlm_mig_cookie
== (~0ULL))
117 spin_unlock(&dlm_mig_cookie_lock
);
121 static inline void dlm_set_reco_dead_node(struct dlm_ctxt
*dlm
,
124 assert_spin_locked(&dlm
->spinlock
);
125 if (dlm
->reco
.dead_node
!= dead_node
)
126 mlog(0, "%s: changing dead_node from %u to %u\n",
127 dlm
->name
, dlm
->reco
.dead_node
, dead_node
);
128 dlm
->reco
.dead_node
= dead_node
;
131 static inline void dlm_set_reco_master(struct dlm_ctxt
*dlm
,
134 assert_spin_locked(&dlm
->spinlock
);
135 mlog(0, "%s: changing new_master from %u to %u\n",
136 dlm
->name
, dlm
->reco
.new_master
, master
);
137 dlm
->reco
.new_master
= master
;
140 static inline void __dlm_reset_recovery(struct dlm_ctxt
*dlm
)
142 assert_spin_locked(&dlm
->spinlock
);
143 clear_bit(dlm
->reco
.dead_node
, dlm
->recovery_map
);
144 dlm_set_reco_dead_node(dlm
, O2NM_INVALID_NODE_NUM
);
145 dlm_set_reco_master(dlm
, O2NM_INVALID_NODE_NUM
);
148 static inline void dlm_reset_recovery(struct dlm_ctxt
*dlm
)
150 spin_lock(&dlm
->spinlock
);
151 __dlm_reset_recovery(dlm
);
152 spin_unlock(&dlm
->spinlock
);
155 /* Worker function used during recovery. */
156 void dlm_dispatch_work(struct work_struct
*work
)
158 struct dlm_ctxt
*dlm
=
159 container_of(work
, struct dlm_ctxt
, dispatched_work
);
161 struct dlm_work_item
*item
, *next
;
162 dlm_workfunc_t
*workfunc
;
165 spin_lock(&dlm
->work_lock
);
166 list_splice_init(&dlm
->work_list
, &tmp_list
);
167 spin_unlock(&dlm
->work_lock
);
169 list_for_each_entry(item
, &tmp_list
, list
) {
172 mlog(0, "%s: work thread has %d work items\n", dlm
->name
, tot
);
174 list_for_each_entry_safe(item
, next
, &tmp_list
, list
) {
175 workfunc
= item
->func
;
176 list_del_init(&item
->list
);
178 /* already have ref on dlm to avoid having
179 * it disappear. just double-check. */
180 BUG_ON(item
->dlm
!= dlm
);
182 /* this is allowed to sleep and
183 * call network stuff */
184 workfunc(item
, item
->data
);
195 void dlm_kick_recovery_thread(struct dlm_ctxt
*dlm
)
197 /* wake the recovery thread
198 * this will wake the reco thread in one of three places
199 * 1) sleeping with no recovery happening
200 * 2) sleeping with recovery mastered elsewhere
201 * 3) recovery mastered here, waiting on reco data */
203 wake_up(&dlm
->dlm_reco_thread_wq
);
206 /* Launch the recovery thread */
207 int dlm_launch_recovery_thread(struct dlm_ctxt
*dlm
)
209 mlog(0, "starting dlm recovery thread...\n");
211 dlm
->dlm_reco_thread_task
= kthread_run(dlm_recovery_thread
, dlm
,
213 if (IS_ERR(dlm
->dlm_reco_thread_task
)) {
214 mlog_errno(PTR_ERR(dlm
->dlm_reco_thread_task
));
215 dlm
->dlm_reco_thread_task
= NULL
;
222 void dlm_complete_recovery_thread(struct dlm_ctxt
*dlm
)
224 if (dlm
->dlm_reco_thread_task
) {
225 mlog(0, "waiting for dlm recovery thread to exit\n");
226 kthread_stop(dlm
->dlm_reco_thread_task
);
227 dlm
->dlm_reco_thread_task
= NULL
;
234 * this is lame, but here's how recovery works...
235 * 1) all recovery threads cluster wide will work on recovering
237 * 2) negotiate who will take over all the locks for the dead node.
238 * thats right... ALL the locks.
239 * 3) once a new master is chosen, everyone scans all locks
240 * and moves aside those mastered by the dead guy
241 * 4) each of these locks should be locked until recovery is done
242 * 5) the new master collects up all of secondary lock queue info
243 * one lock at a time, forcing each node to communicate back
245 * 6) each secondary lock queue responds with the full known lock info
246 * 7) once the new master has run all its locks, it sends a ALLDONE!
247 * message to everyone
248 * 8) upon receiving this message, the secondary queue node unlocks
249 * and responds to the ALLDONE
250 * 9) once the new master gets responses from everyone, he unlocks
251 * everything and recovery for this dead node is done
252 *10) go back to 2) while there are still dead nodes
256 static void dlm_print_reco_node_status(struct dlm_ctxt
*dlm
)
258 struct dlm_reco_node_data
*ndata
;
259 struct dlm_lock_resource
*res
;
261 mlog(ML_NOTICE
, "%s(%d): recovery info, state=%s, dead=%u, master=%u\n",
262 dlm
->name
, task_pid_nr(dlm
->dlm_reco_thread_task
),
263 dlm
->reco
.state
& DLM_RECO_STATE_ACTIVE
? "ACTIVE" : "inactive",
264 dlm
->reco
.dead_node
, dlm
->reco
.new_master
);
266 list_for_each_entry(ndata
, &dlm
->reco
.node_data
, list
) {
267 char *st
= "unknown";
268 switch (ndata
->state
) {
269 case DLM_RECO_NODE_DATA_INIT
:
272 case DLM_RECO_NODE_DATA_REQUESTING
:
275 case DLM_RECO_NODE_DATA_DEAD
:
278 case DLM_RECO_NODE_DATA_RECEIVING
:
281 case DLM_RECO_NODE_DATA_REQUESTED
:
284 case DLM_RECO_NODE_DATA_DONE
:
287 case DLM_RECO_NODE_DATA_FINALIZE_SENT
:
288 st
= "finalize-sent";
294 mlog(ML_NOTICE
, "%s: reco state, node %u, state=%s\n",
295 dlm
->name
, ndata
->node_num
, st
);
297 list_for_each_entry(res
, &dlm
->reco
.resources
, recovering
) {
298 mlog(ML_NOTICE
, "%s: lockres %.*s on recovering list\n",
299 dlm
->name
, res
->lockname
.len
, res
->lockname
.name
);
303 #define DLM_RECO_THREAD_TIMEOUT_MS (5 * 1000)
305 static int dlm_recovery_thread(void *data
)
308 struct dlm_ctxt
*dlm
= data
;
309 unsigned long timeout
= msecs_to_jiffies(DLM_RECO_THREAD_TIMEOUT_MS
);
311 mlog(0, "dlm thread running for %s...\n", dlm
->name
);
313 while (!kthread_should_stop()) {
314 if (dlm_joined(dlm
)) {
315 status
= dlm_do_recovery(dlm
);
316 if (status
== -EAGAIN
) {
317 /* do not sleep, recheck immediately. */
324 wait_event_interruptible_timeout(dlm
->dlm_reco_thread_wq
,
325 kthread_should_stop(),
329 mlog(0, "quitting DLM recovery thread\n");
333 /* returns true when the recovery master has contacted us */
334 static int dlm_reco_master_ready(struct dlm_ctxt
*dlm
)
337 spin_lock(&dlm
->spinlock
);
338 ready
= (dlm
->reco
.new_master
!= O2NM_INVALID_NODE_NUM
);
339 spin_unlock(&dlm
->spinlock
);
343 /* returns true if node is no longer in the domain
344 * could be dead or just not joined */
345 int dlm_is_node_dead(struct dlm_ctxt
*dlm
, u8 node
)
348 spin_lock(&dlm
->spinlock
);
349 dead
= !test_bit(node
, dlm
->domain_map
);
350 spin_unlock(&dlm
->spinlock
);
354 /* returns true if node is no longer in the domain
355 * could be dead or just not joined */
356 static int dlm_is_node_recovered(struct dlm_ctxt
*dlm
, u8 node
)
359 spin_lock(&dlm
->spinlock
);
360 recovered
= !test_bit(node
, dlm
->recovery_map
);
361 spin_unlock(&dlm
->spinlock
);
366 int dlm_wait_for_node_death(struct dlm_ctxt
*dlm
, u8 node
, int timeout
)
369 mlog(ML_NOTICE
, "%s: waiting %dms for notification of "
370 "death of node %u\n", dlm
->name
, timeout
, node
);
371 wait_event_timeout(dlm
->dlm_reco_thread_wq
,
372 dlm_is_node_dead(dlm
, node
),
373 msecs_to_jiffies(timeout
));
375 mlog(ML_NOTICE
, "%s: waiting indefinitely for notification "
376 "of death of node %u\n", dlm
->name
, node
);
377 wait_event(dlm
->dlm_reco_thread_wq
,
378 dlm_is_node_dead(dlm
, node
));
380 /* for now, return 0 */
384 int dlm_wait_for_node_recovery(struct dlm_ctxt
*dlm
, u8 node
, int timeout
)
387 mlog(0, "%s: waiting %dms for notification of "
388 "recovery of node %u\n", dlm
->name
, timeout
, node
);
389 wait_event_timeout(dlm
->dlm_reco_thread_wq
,
390 dlm_is_node_recovered(dlm
, node
),
391 msecs_to_jiffies(timeout
));
393 mlog(0, "%s: waiting indefinitely for notification "
394 "of recovery of node %u\n", dlm
->name
, node
);
395 wait_event(dlm
->dlm_reco_thread_wq
,
396 dlm_is_node_recovered(dlm
, node
));
398 /* for now, return 0 */
402 /* callers of the top-level api calls (dlmlock/dlmunlock) should
403 * block on the dlm->reco.event when recovery is in progress.
404 * the dlm recovery thread will set this state when it begins
405 * recovering a dead node (as the new master or not) and clear
406 * the state and wake as soon as all affected lock resources have
407 * been marked with the RECOVERY flag */
408 static int dlm_in_recovery(struct dlm_ctxt
*dlm
)
411 spin_lock(&dlm
->spinlock
);
412 in_recovery
= !!(dlm
->reco
.state
& DLM_RECO_STATE_ACTIVE
);
413 spin_unlock(&dlm
->spinlock
);
418 void dlm_wait_for_recovery(struct dlm_ctxt
*dlm
)
420 if (dlm_in_recovery(dlm
)) {
421 mlog(0, "%s: reco thread %d in recovery: "
422 "state=%d, master=%u, dead=%u\n",
423 dlm
->name
, task_pid_nr(dlm
->dlm_reco_thread_task
),
424 dlm
->reco
.state
, dlm
->reco
.new_master
,
425 dlm
->reco
.dead_node
);
427 wait_event(dlm
->reco
.event
, !dlm_in_recovery(dlm
));
430 static void dlm_begin_recovery(struct dlm_ctxt
*dlm
)
432 spin_lock(&dlm
->spinlock
);
433 BUG_ON(dlm
->reco
.state
& DLM_RECO_STATE_ACTIVE
);
434 dlm
->reco
.state
|= DLM_RECO_STATE_ACTIVE
;
435 spin_unlock(&dlm
->spinlock
);
438 static void dlm_end_recovery(struct dlm_ctxt
*dlm
)
440 spin_lock(&dlm
->spinlock
);
441 BUG_ON(!(dlm
->reco
.state
& DLM_RECO_STATE_ACTIVE
));
442 dlm
->reco
.state
&= ~DLM_RECO_STATE_ACTIVE
;
443 spin_unlock(&dlm
->spinlock
);
444 wake_up(&dlm
->reco
.event
);
447 static int dlm_do_recovery(struct dlm_ctxt
*dlm
)
452 spin_lock(&dlm
->spinlock
);
454 /* check to see if the new master has died */
455 if (dlm
->reco
.new_master
!= O2NM_INVALID_NODE_NUM
&&
456 test_bit(dlm
->reco
.new_master
, dlm
->recovery_map
)) {
457 mlog(0, "new master %u died while recovering %u!\n",
458 dlm
->reco
.new_master
, dlm
->reco
.dead_node
);
459 /* unset the new_master, leave dead_node */
460 dlm_set_reco_master(dlm
, O2NM_INVALID_NODE_NUM
);
463 /* select a target to recover */
464 if (dlm
->reco
.dead_node
== O2NM_INVALID_NODE_NUM
) {
467 bit
= find_next_bit (dlm
->recovery_map
, O2NM_MAX_NODES
+1, 0);
468 if (bit
>= O2NM_MAX_NODES
|| bit
< 0)
469 dlm_set_reco_dead_node(dlm
, O2NM_INVALID_NODE_NUM
);
471 dlm_set_reco_dead_node(dlm
, bit
);
472 } else if (!test_bit(dlm
->reco
.dead_node
, dlm
->recovery_map
)) {
474 mlog(ML_ERROR
, "dead_node %u no longer in recovery map!\n",
475 dlm
->reco
.dead_node
);
476 dlm_set_reco_dead_node(dlm
, O2NM_INVALID_NODE_NUM
);
479 if (dlm
->reco
.dead_node
== O2NM_INVALID_NODE_NUM
) {
480 // mlog(0, "nothing to recover! sleeping now!\n");
481 spin_unlock(&dlm
->spinlock
);
482 /* return to main thread loop and sleep. */
485 mlog(0, "%s(%d):recovery thread found node %u in the recovery map!\n",
486 dlm
->name
, task_pid_nr(dlm
->dlm_reco_thread_task
),
487 dlm
->reco
.dead_node
);
488 spin_unlock(&dlm
->spinlock
);
490 /* take write barrier */
491 /* (stops the list reshuffling thread, proxy ast handling) */
492 dlm_begin_recovery(dlm
);
494 if (dlm
->reco
.new_master
== dlm
->node_num
)
497 if (dlm
->reco
.new_master
== O2NM_INVALID_NODE_NUM
) {
498 /* choose a new master, returns 0 if this node
499 * is the master, -EEXIST if it's another node.
500 * this does not return until a new master is chosen
501 * or recovery completes entirely. */
502 ret
= dlm_pick_recovery_master(dlm
);
504 /* already notified everyone. go. */
507 mlog(0, "another node will master this recovery session.\n");
509 mlog(0, "dlm=%s (%d), new_master=%u, this node=%u, dead_node=%u\n",
510 dlm
->name
, task_pid_nr(dlm
->dlm_reco_thread_task
), dlm
->reco
.new_master
,
511 dlm
->node_num
, dlm
->reco
.dead_node
);
513 /* it is safe to start everything back up here
514 * because all of the dead node's lock resources
515 * have been marked as in-recovery */
516 dlm_end_recovery(dlm
);
518 /* sleep out in main dlm_recovery_thread loop. */
522 mlog(ML_NOTICE
, "(%d) Node %u is the Recovery Master for the Dead Node "
523 "%u for Domain %s\n", task_pid_nr(dlm
->dlm_reco_thread_task
),
524 dlm
->node_num
, dlm
->reco
.dead_node
, dlm
->name
);
526 status
= dlm_remaster_locks(dlm
, dlm
->reco
.dead_node
);
528 /* we should never hit this anymore */
529 mlog(ML_ERROR
, "error %d remastering locks for node %u, "
530 "retrying.\n", status
, dlm
->reco
.dead_node
);
531 /* yield a bit to allow any final network messages
532 * to get handled on remaining nodes */
535 /* success! see if any other nodes need recovery */
536 mlog(0, "DONE mastering recovery of %s:%u here(this=%u)!\n",
537 dlm
->name
, dlm
->reco
.dead_node
, dlm
->node_num
);
538 dlm_reset_recovery(dlm
);
540 dlm_end_recovery(dlm
);
542 /* continue and look for another dead node */
546 static int dlm_remaster_locks(struct dlm_ctxt
*dlm
, u8 dead_node
)
549 struct dlm_reco_node_data
*ndata
;
555 /* we have become recovery master. there is no escaping
556 * this, so just keep trying until we get it. */
557 status
= dlm_init_recovery_area(dlm
, dead_node
);
559 mlog(ML_ERROR
, "%s: failed to alloc recovery area, "
560 "retrying\n", dlm
->name
);
563 } while (status
!= 0);
565 /* safe to access the node data list without a lock, since this
566 * process is the only one to change the list */
567 list_for_each_entry(ndata
, &dlm
->reco
.node_data
, list
) {
568 BUG_ON(ndata
->state
!= DLM_RECO_NODE_DATA_INIT
);
569 ndata
->state
= DLM_RECO_NODE_DATA_REQUESTING
;
571 mlog(0, "requesting lock info from node %u\n",
574 if (ndata
->node_num
== dlm
->node_num
) {
575 ndata
->state
= DLM_RECO_NODE_DATA_DONE
;
580 status
= dlm_request_all_locks(dlm
, ndata
->node_num
,
584 if (dlm_is_host_down(status
)) {
585 /* node died, ignore it for recovery */
587 ndata
->state
= DLM_RECO_NODE_DATA_DEAD
;
588 /* wait for the domain map to catch up
589 * with the network state. */
590 wait_event_timeout(dlm
->dlm_reco_thread_wq
,
591 dlm_is_node_dead(dlm
,
593 msecs_to_jiffies(1000));
594 mlog(0, "waited 1 sec for %u, "
595 "dead? %s\n", ndata
->node_num
,
596 dlm_is_node_dead(dlm
, ndata
->node_num
) ?
599 /* -ENOMEM on the other node */
600 mlog(0, "%s: node %u returned "
601 "%d during recovery, retrying "
602 "after a short wait\n",
603 dlm
->name
, ndata
->node_num
,
608 } while (status
!= 0);
610 spin_lock(&dlm_reco_state_lock
);
611 switch (ndata
->state
) {
612 case DLM_RECO_NODE_DATA_INIT
:
613 case DLM_RECO_NODE_DATA_FINALIZE_SENT
:
614 case DLM_RECO_NODE_DATA_REQUESTED
:
617 case DLM_RECO_NODE_DATA_DEAD
:
618 mlog(0, "node %u died after requesting "
619 "recovery info for node %u\n",
620 ndata
->node_num
, dead_node
);
621 /* fine. don't need this node's info.
622 * continue without it. */
624 case DLM_RECO_NODE_DATA_REQUESTING
:
625 ndata
->state
= DLM_RECO_NODE_DATA_REQUESTED
;
626 mlog(0, "now receiving recovery data from "
627 "node %u for dead node %u\n",
628 ndata
->node_num
, dead_node
);
630 case DLM_RECO_NODE_DATA_RECEIVING
:
631 mlog(0, "already receiving recovery data from "
632 "node %u for dead node %u\n",
633 ndata
->node_num
, dead_node
);
635 case DLM_RECO_NODE_DATA_DONE
:
636 mlog(0, "already DONE receiving recovery data "
637 "from node %u for dead node %u\n",
638 ndata
->node_num
, dead_node
);
641 spin_unlock(&dlm_reco_state_lock
);
644 mlog(0, "done requesting all lock info\n");
646 /* nodes should be sending reco data now
647 * just need to wait */
650 /* check all the nodes now to see if we are
651 * done, or if anyone died */
653 spin_lock(&dlm_reco_state_lock
);
654 list_for_each_entry(ndata
, &dlm
->reco
.node_data
, list
) {
655 mlog(0, "checking recovery state of node %u\n",
657 switch (ndata
->state
) {
658 case DLM_RECO_NODE_DATA_INIT
:
659 case DLM_RECO_NODE_DATA_REQUESTING
:
660 mlog(ML_ERROR
, "bad ndata state for "
661 "node %u: state=%d\n",
662 ndata
->node_num
, ndata
->state
);
665 case DLM_RECO_NODE_DATA_DEAD
:
666 mlog(0, "node %u died after "
667 "requesting recovery info for "
668 "node %u\n", ndata
->node_num
,
671 case DLM_RECO_NODE_DATA_RECEIVING
:
672 case DLM_RECO_NODE_DATA_REQUESTED
:
673 mlog(0, "%s: node %u still in state %s\n",
674 dlm
->name
, ndata
->node_num
,
675 ndata
->state
==DLM_RECO_NODE_DATA_RECEIVING
?
676 "receiving" : "requested");
679 case DLM_RECO_NODE_DATA_DONE
:
680 mlog(0, "%s: node %u state is done\n",
681 dlm
->name
, ndata
->node_num
);
683 case DLM_RECO_NODE_DATA_FINALIZE_SENT
:
684 mlog(0, "%s: node %u state is finalize\n",
685 dlm
->name
, ndata
->node_num
);
689 spin_unlock(&dlm_reco_state_lock
);
691 mlog(0, "pass #%d, all_nodes_done?: %s\n", ++pass
,
692 all_nodes_done
?"yes":"no");
693 if (all_nodes_done
) {
696 /* all nodes are now in DLM_RECO_NODE_DATA_DONE state
697 * just send a finalize message to everyone and
699 mlog(0, "all nodes are done! send finalize\n");
700 ret
= dlm_send_finalize_reco_message(dlm
);
704 spin_lock(&dlm
->spinlock
);
705 dlm_finish_local_lockres_recovery(dlm
, dead_node
,
707 spin_unlock(&dlm
->spinlock
);
708 mlog(0, "should be done with recovery!\n");
710 mlog(0, "finishing recovery of %s at %lu, "
711 "dead=%u, this=%u, new=%u\n", dlm
->name
,
712 jiffies
, dlm
->reco
.dead_node
,
713 dlm
->node_num
, dlm
->reco
.new_master
);
716 /* rescan everything marked dirty along the way */
717 dlm_kick_thread(dlm
, NULL
);
720 /* wait to be signalled, with periodic timeout
721 * to check for node death */
722 wait_event_interruptible_timeout(dlm
->dlm_reco_thread_wq
,
723 kthread_should_stop(),
724 msecs_to_jiffies(DLM_RECO_THREAD_TIMEOUT_MS
));
729 dlm_destroy_recovery_area(dlm
, dead_node
);
735 static int dlm_init_recovery_area(struct dlm_ctxt
*dlm
, u8 dead_node
)
738 struct dlm_reco_node_data
*ndata
;
740 spin_lock(&dlm
->spinlock
);
741 memcpy(dlm
->reco
.node_map
, dlm
->domain_map
, sizeof(dlm
->domain_map
));
742 /* nodes can only be removed (by dying) after dropping
743 * this lock, and death will be trapped later, so this should do */
744 spin_unlock(&dlm
->spinlock
);
747 num
= find_next_bit (dlm
->reco
.node_map
, O2NM_MAX_NODES
, num
);
748 if (num
>= O2NM_MAX_NODES
) {
751 BUG_ON(num
== dead_node
);
753 ndata
= kzalloc(sizeof(*ndata
), GFP_NOFS
);
755 dlm_destroy_recovery_area(dlm
, dead_node
);
758 ndata
->node_num
= num
;
759 ndata
->state
= DLM_RECO_NODE_DATA_INIT
;
760 spin_lock(&dlm_reco_state_lock
);
761 list_add_tail(&ndata
->list
, &dlm
->reco
.node_data
);
762 spin_unlock(&dlm_reco_state_lock
);
769 static void dlm_destroy_recovery_area(struct dlm_ctxt
*dlm
, u8 dead_node
)
771 struct dlm_reco_node_data
*ndata
, *next
;
774 spin_lock(&dlm_reco_state_lock
);
775 list_splice_init(&dlm
->reco
.node_data
, &tmplist
);
776 spin_unlock(&dlm_reco_state_lock
);
778 list_for_each_entry_safe(ndata
, next
, &tmplist
, list
) {
779 list_del_init(&ndata
->list
);
784 static int dlm_request_all_locks(struct dlm_ctxt
*dlm
, u8 request_from
,
787 struct dlm_lock_request lr
;
793 mlog(0, "dlm_request_all_locks: dead node is %u, sending request "
794 "to %u\n", dead_node
, request_from
);
796 memset(&lr
, 0, sizeof(lr
));
797 lr
.node_idx
= dlm
->node_num
;
798 lr
.dead_node
= dead_node
;
802 ret
= o2net_send_message(DLM_LOCK_REQUEST_MSG
, dlm
->key
,
803 &lr
, sizeof(lr
), request_from
, NULL
);
805 /* negative status is handled by caller */
809 // return from here, then
810 // sleep until all received or error
815 int dlm_request_all_locks_handler(struct o2net_msg
*msg
, u32 len
, void *data
,
818 struct dlm_ctxt
*dlm
= data
;
819 struct dlm_lock_request
*lr
= (struct dlm_lock_request
*)msg
->buf
;
821 struct dlm_work_item
*item
= NULL
;
826 if (lr
->dead_node
!= dlm
->reco
.dead_node
) {
827 mlog(ML_ERROR
, "%s: node %u sent dead_node=%u, but local "
828 "dead_node is %u\n", dlm
->name
, lr
->node_idx
,
829 lr
->dead_node
, dlm
->reco
.dead_node
);
830 dlm_print_reco_node_status(dlm
);
835 BUG_ON(lr
->dead_node
!= dlm
->reco
.dead_node
);
837 item
= kzalloc(sizeof(*item
), GFP_NOFS
);
843 /* this will get freed by dlm_request_all_locks_worker */
844 buf
= (char *) __get_free_page(GFP_NOFS
);
851 /* queue up work for dlm_request_all_locks_worker */
852 dlm_grab(dlm
); /* get an extra ref for the work item */
853 dlm_init_work_item(dlm
, item
, dlm_request_all_locks_worker
, buf
);
854 item
->u
.ral
.reco_master
= lr
->node_idx
;
855 item
->u
.ral
.dead_node
= lr
->dead_node
;
856 spin_lock(&dlm
->work_lock
);
857 list_add_tail(&item
->list
, &dlm
->work_list
);
858 spin_unlock(&dlm
->work_lock
);
859 queue_work(dlm
->dlm_worker
, &dlm
->dispatched_work
);
865 static void dlm_request_all_locks_worker(struct dlm_work_item
*item
, void *data
)
867 struct dlm_migratable_lockres
*mres
;
868 struct dlm_lock_resource
*res
;
869 struct dlm_ctxt
*dlm
;
870 LIST_HEAD(resources
);
872 u8 dead_node
, reco_master
;
873 int skip_all_done
= 0;
876 dead_node
= item
->u
.ral
.dead_node
;
877 reco_master
= item
->u
.ral
.reco_master
;
878 mres
= (struct dlm_migratable_lockres
*)data
;
880 mlog(0, "%s: recovery worker started, dead=%u, master=%u\n",
881 dlm
->name
, dead_node
, reco_master
);
883 if (dead_node
!= dlm
->reco
.dead_node
||
884 reco_master
!= dlm
->reco
.new_master
) {
885 /* worker could have been created before the recovery master
886 * died. if so, do not continue, but do not error. */
887 if (dlm
->reco
.new_master
== O2NM_INVALID_NODE_NUM
) {
888 mlog(ML_NOTICE
, "%s: will not send recovery state, "
889 "recovery master %u died, thread=(dead=%u,mas=%u)"
890 " current=(dead=%u,mas=%u)\n", dlm
->name
,
891 reco_master
, dead_node
, reco_master
,
892 dlm
->reco
.dead_node
, dlm
->reco
.new_master
);
894 mlog(ML_NOTICE
, "%s: reco state invalid: reco(dead=%u, "
895 "master=%u), request(dead=%u, master=%u)\n",
896 dlm
->name
, dlm
->reco
.dead_node
,
897 dlm
->reco
.new_master
, dead_node
, reco_master
);
902 /* lock resources should have already been moved to the
903 * dlm->reco.resources list. now move items from that list
904 * to a temp list if the dead owner matches. note that the
905 * whole cluster recovers only one node at a time, so we
906 * can safely move UNKNOWN lock resources for each recovery
908 dlm_move_reco_locks_to_list(dlm
, &resources
, dead_node
);
910 /* now we can begin blasting lockreses without the dlm lock */
912 /* any errors returned will be due to the new_master dying,
913 * the dlm_reco_thread should detect this */
914 list_for_each_entry(res
, &resources
, recovering
) {
915 ret
= dlm_send_one_lockres(dlm
, res
, mres
, reco_master
,
918 mlog(ML_ERROR
, "%s: node %u went down while sending "
919 "recovery state for dead node %u, ret=%d\n", dlm
->name
,
920 reco_master
, dead_node
, ret
);
926 /* move the resources back to the list */
927 spin_lock(&dlm
->spinlock
);
928 list_splice_init(&resources
, &dlm
->reco
.resources
);
929 spin_unlock(&dlm
->spinlock
);
931 if (!skip_all_done
) {
932 ret
= dlm_send_all_done_msg(dlm
, dead_node
, reco_master
);
934 mlog(ML_ERROR
, "%s: node %u went down while sending "
935 "recovery all-done for dead node %u, ret=%d\n",
936 dlm
->name
, reco_master
, dead_node
, ret
);
940 free_page((unsigned long)data
);
944 static int dlm_send_all_done_msg(struct dlm_ctxt
*dlm
, u8 dead_node
, u8 send_to
)
947 struct dlm_reco_data_done done_msg
;
949 memset(&done_msg
, 0, sizeof(done_msg
));
950 done_msg
.node_idx
= dlm
->node_num
;
951 done_msg
.dead_node
= dead_node
;
952 mlog(0, "sending DATA DONE message to %u, "
953 "my node=%u, dead node=%u\n", send_to
, done_msg
.node_idx
,
956 ret
= o2net_send_message(DLM_RECO_DATA_DONE_MSG
, dlm
->key
, &done_msg
,
957 sizeof(done_msg
), send_to
, &tmpret
);
959 if (!dlm_is_host_down(ret
)) {
961 mlog(ML_ERROR
, "%s: unknown error sending data-done "
962 "to %u\n", dlm
->name
, send_to
);
971 int dlm_reco_data_done_handler(struct o2net_msg
*msg
, u32 len
, void *data
,
974 struct dlm_ctxt
*dlm
= data
;
975 struct dlm_reco_data_done
*done
= (struct dlm_reco_data_done
*)msg
->buf
;
976 struct dlm_reco_node_data
*ndata
= NULL
;
982 mlog(0, "got DATA DONE: dead_node=%u, reco.dead_node=%u, "
983 "node_idx=%u, this node=%u\n", done
->dead_node
,
984 dlm
->reco
.dead_node
, done
->node_idx
, dlm
->node_num
);
986 mlog_bug_on_msg((done
->dead_node
!= dlm
->reco
.dead_node
),
987 "Got DATA DONE: dead_node=%u, reco.dead_node=%u, "
988 "node_idx=%u, this node=%u\n", done
->dead_node
,
989 dlm
->reco
.dead_node
, done
->node_idx
, dlm
->node_num
);
991 spin_lock(&dlm_reco_state_lock
);
992 list_for_each_entry(ndata
, &dlm
->reco
.node_data
, list
) {
993 if (ndata
->node_num
!= done
->node_idx
)
996 switch (ndata
->state
) {
997 /* should have moved beyond INIT but not to FINALIZE yet */
998 case DLM_RECO_NODE_DATA_INIT
:
999 case DLM_RECO_NODE_DATA_DEAD
:
1000 case DLM_RECO_NODE_DATA_FINALIZE_SENT
:
1001 mlog(ML_ERROR
, "bad ndata state for node %u:"
1002 " state=%d\n", ndata
->node_num
,
1006 /* these states are possible at this point, anywhere along
1007 * the line of recovery */
1008 case DLM_RECO_NODE_DATA_DONE
:
1009 case DLM_RECO_NODE_DATA_RECEIVING
:
1010 case DLM_RECO_NODE_DATA_REQUESTED
:
1011 case DLM_RECO_NODE_DATA_REQUESTING
:
1012 mlog(0, "node %u is DONE sending "
1016 ndata
->state
= DLM_RECO_NODE_DATA_DONE
;
1021 spin_unlock(&dlm_reco_state_lock
);
1023 /* wake the recovery thread, some node is done */
1025 dlm_kick_recovery_thread(dlm
);
1028 mlog(ML_ERROR
, "failed to find recovery node data for node "
1029 "%u\n", done
->node_idx
);
1032 mlog(0, "leaving reco data done handler, ret=%d\n", ret
);
1036 static void dlm_move_reco_locks_to_list(struct dlm_ctxt
*dlm
,
1037 struct list_head
*list
,
1040 struct dlm_lock_resource
*res
, *next
;
1041 struct dlm_lock
*lock
;
1043 spin_lock(&dlm
->spinlock
);
1044 list_for_each_entry_safe(res
, next
, &dlm
->reco
.resources
, recovering
) {
1045 /* always prune any $RECOVERY entries for dead nodes,
1046 * otherwise hangs can occur during later recovery */
1047 if (dlm_is_recovery_lock(res
->lockname
.name
,
1048 res
->lockname
.len
)) {
1049 spin_lock(&res
->spinlock
);
1050 list_for_each_entry(lock
, &res
->granted
, list
) {
1051 if (lock
->ml
.node
== dead_node
) {
1052 mlog(0, "AHA! there was "
1053 "a $RECOVERY lock for dead "
1055 dead_node
, dlm
->name
);
1056 list_del_init(&lock
->list
);
1061 spin_unlock(&res
->spinlock
);
1065 if (res
->owner
== dead_node
) {
1066 mlog(0, "found lockres owned by dead node while "
1067 "doing recovery for node %u. sending it.\n",
1069 list_move_tail(&res
->recovering
, list
);
1070 } else if (res
->owner
== DLM_LOCK_RES_OWNER_UNKNOWN
) {
1071 mlog(0, "found UNKNOWN owner while doing recovery "
1072 "for node %u. sending it.\n", dead_node
);
1073 list_move_tail(&res
->recovering
, list
);
1076 spin_unlock(&dlm
->spinlock
);
1079 static inline int dlm_num_locks_in_lockres(struct dlm_lock_resource
*res
)
1081 int total_locks
= 0;
1082 struct list_head
*iter
, *queue
= &res
->granted
;
1085 for (i
=0; i
<3; i
++) {
1086 list_for_each(iter
, queue
)
1094 static int dlm_send_mig_lockres_msg(struct dlm_ctxt
*dlm
,
1095 struct dlm_migratable_lockres
*mres
,
1097 struct dlm_lock_resource
*res
,
1100 u64 mig_cookie
= be64_to_cpu(mres
->mig_cookie
);
1101 int mres_total_locks
= be32_to_cpu(mres
->total_locks
);
1102 int sz
, ret
= 0, status
= 0;
1103 u8 orig_flags
= mres
->flags
,
1104 orig_master
= mres
->master
;
1106 BUG_ON(mres
->num_locks
> DLM_MAX_MIGRATABLE_LOCKS
);
1107 if (!mres
->num_locks
)
1110 sz
= sizeof(struct dlm_migratable_lockres
) +
1111 (mres
->num_locks
* sizeof(struct dlm_migratable_lock
));
1113 /* add an all-done flag if we reached the last lock */
1114 orig_flags
= mres
->flags
;
1115 BUG_ON(total_locks
> mres_total_locks
);
1116 if (total_locks
== mres_total_locks
)
1117 mres
->flags
|= DLM_MRES_ALL_DONE
;
1119 mlog(0, "%s:%.*s: sending mig lockres (%s) to %u\n",
1120 dlm
->name
, res
->lockname
.len
, res
->lockname
.name
,
1121 orig_flags
& DLM_MRES_MIGRATION
? "migrate" : "recovery",
1125 ret
= o2net_send_message(DLM_MIG_LOCKRES_MSG
, dlm
->key
, mres
,
1126 sz
, send_to
, &status
);
1128 /* XXX: negative status is not handled.
1129 * this will end up killing this node. */
1132 /* might get an -ENOMEM back here */
1137 if (ret
== -EFAULT
) {
1138 mlog(ML_ERROR
, "node %u told me to kill "
1139 "myself!\n", send_to
);
1145 /* zero and reinit the message buffer */
1146 dlm_init_migratable_lockres(mres
, res
->lockname
.name
,
1147 res
->lockname
.len
, mres_total_locks
,
1148 mig_cookie
, orig_flags
, orig_master
);
1152 static void dlm_init_migratable_lockres(struct dlm_migratable_lockres
*mres
,
1153 const char *lockname
, int namelen
,
1154 int total_locks
, u64 cookie
,
1155 u8 flags
, u8 master
)
1157 /* mres here is one full page */
1159 mres
->lockname_len
= namelen
;
1160 memcpy(mres
->lockname
, lockname
, namelen
);
1161 mres
->num_locks
= 0;
1162 mres
->total_locks
= cpu_to_be32(total_locks
);
1163 mres
->mig_cookie
= cpu_to_be64(cookie
);
1164 mres
->flags
= flags
;
1165 mres
->master
= master
;
1169 /* returns 1 if this lock fills the network structure,
1171 static int dlm_add_lock_to_array(struct dlm_lock
*lock
,
1172 struct dlm_migratable_lockres
*mres
, int queue
)
1174 struct dlm_migratable_lock
*ml
;
1175 int lock_num
= mres
->num_locks
;
1177 ml
= &(mres
->ml
[lock_num
]);
1178 ml
->cookie
= lock
->ml
.cookie
;
1179 ml
->type
= lock
->ml
.type
;
1180 ml
->convert_type
= lock
->ml
.convert_type
;
1181 ml
->highest_blocked
= lock
->ml
.highest_blocked
;
1184 ml
->flags
= lock
->lksb
->flags
;
1185 /* send our current lvb */
1186 if (ml
->type
== LKM_EXMODE
||
1187 ml
->type
== LKM_PRMODE
) {
1188 /* if it is already set, this had better be a PR
1189 * and it has to match */
1190 if (!dlm_lvb_is_empty(mres
->lvb
) &&
1191 (ml
->type
== LKM_EXMODE
||
1192 memcmp(mres
->lvb
, lock
->lksb
->lvb
, DLM_LVB_LEN
))) {
1193 mlog(ML_ERROR
, "mismatched lvbs!\n");
1194 dlm_print_one_lock_resource(lock
->lockres
);
1197 memcpy(mres
->lvb
, lock
->lksb
->lvb
, DLM_LVB_LEN
);
1200 ml
->node
= lock
->ml
.node
;
1202 /* we reached the max, send this network message */
1203 if (mres
->num_locks
== DLM_MAX_MIGRATABLE_LOCKS
)
1208 static void dlm_add_dummy_lock(struct dlm_ctxt
*dlm
,
1209 struct dlm_migratable_lockres
*mres
)
1211 struct dlm_lock dummy
;
1212 memset(&dummy
, 0, sizeof(dummy
));
1213 dummy
.ml
.cookie
= 0;
1214 dummy
.ml
.type
= LKM_IVMODE
;
1215 dummy
.ml
.convert_type
= LKM_IVMODE
;
1216 dummy
.ml
.highest_blocked
= LKM_IVMODE
;
1218 dummy
.ml
.node
= dlm
->node_num
;
1219 dlm_add_lock_to_array(&dummy
, mres
, DLM_BLOCKED_LIST
);
1222 static inline int dlm_is_dummy_lock(struct dlm_ctxt
*dlm
,
1223 struct dlm_migratable_lock
*ml
,
1226 if (unlikely(ml
->cookie
== 0 &&
1227 ml
->type
== LKM_IVMODE
&&
1228 ml
->convert_type
== LKM_IVMODE
&&
1229 ml
->highest_blocked
== LKM_IVMODE
&&
1230 ml
->list
== DLM_BLOCKED_LIST
)) {
1231 *nodenum
= ml
->node
;
1237 int dlm_send_one_lockres(struct dlm_ctxt
*dlm
, struct dlm_lock_resource
*res
,
1238 struct dlm_migratable_lockres
*mres
,
1239 u8 send_to
, u8 flags
)
1241 struct list_head
*queue
;
1244 struct dlm_lock
*lock
;
1247 BUG_ON(!(flags
& (DLM_MRES_RECOVERY
|DLM_MRES_MIGRATION
)));
1249 mlog(0, "sending to %u\n", send_to
);
1251 total_locks
= dlm_num_locks_in_lockres(res
);
1252 if (total_locks
> DLM_MAX_MIGRATABLE_LOCKS
) {
1253 /* rare, but possible */
1254 mlog(0, "argh. lockres has %d locks. this will "
1255 "require more than one network packet to "
1256 "migrate\n", total_locks
);
1257 mig_cookie
= dlm_get_next_mig_cookie();
1260 dlm_init_migratable_lockres(mres
, res
->lockname
.name
,
1261 res
->lockname
.len
, total_locks
,
1262 mig_cookie
, flags
, res
->owner
);
1265 for (i
=DLM_GRANTED_LIST
; i
<=DLM_BLOCKED_LIST
; i
++) {
1266 queue
= dlm_list_idx_to_ptr(res
, i
);
1267 list_for_each_entry(lock
, queue
, list
) {
1268 /* add another lock. */
1270 if (!dlm_add_lock_to_array(lock
, mres
, i
))
1273 /* this filled the lock message,
1274 * we must send it immediately. */
1275 ret
= dlm_send_mig_lockres_msg(dlm
, mres
, send_to
,
1281 if (total_locks
== 0) {
1282 /* send a dummy lock to indicate a mastery reference only */
1283 mlog(0, "%s:%.*s: sending dummy lock to %u, %s\n",
1284 dlm
->name
, res
->lockname
.len
, res
->lockname
.name
,
1285 send_to
, flags
& DLM_MRES_RECOVERY
? "recovery" :
1287 dlm_add_dummy_lock(dlm
, mres
);
1289 /* flush any remaining locks */
1290 ret
= dlm_send_mig_lockres_msg(dlm
, mres
, send_to
, res
, total_locks
);
1296 mlog(ML_ERROR
, "%s: dlm_send_mig_lockres_msg returned %d\n",
1298 if (!dlm_is_host_down(ret
))
1300 mlog(0, "%s: node %u went down while sending %s "
1301 "lockres %.*s\n", dlm
->name
, send_to
,
1302 flags
& DLM_MRES_RECOVERY
? "recovery" : "migration",
1303 res
->lockname
.len
, res
->lockname
.name
);
1310 * this message will contain no more than one page worth of
1311 * recovery data, and it will work on only one lockres.
1312 * there may be many locks in this page, and we may need to wait
1313 * for additional packets to complete all the locks (rare, but
1317 * NOTE: the allocation error cases here are scary
1318 * we really cannot afford to fail an alloc in recovery
1319 * do we spin? returning an error only delays the problem really
1322 int dlm_mig_lockres_handler(struct o2net_msg
*msg
, u32 len
, void *data
,
1325 struct dlm_ctxt
*dlm
= data
;
1326 struct dlm_migratable_lockres
*mres
=
1327 (struct dlm_migratable_lockres
*)msg
->buf
;
1332 struct dlm_work_item
*item
= NULL
;
1333 struct dlm_lock_resource
*res
= NULL
;
1338 BUG_ON(!(mres
->flags
& (DLM_MRES_RECOVERY
|DLM_MRES_MIGRATION
)));
1340 real_master
= mres
->master
;
1341 if (real_master
== DLM_LOCK_RES_OWNER_UNKNOWN
) {
1342 /* cannot migrate a lockres with no master */
1343 BUG_ON(!(mres
->flags
& DLM_MRES_RECOVERY
));
1346 mlog(0, "%s message received from node %u\n",
1347 (mres
->flags
& DLM_MRES_RECOVERY
) ?
1348 "recovery" : "migration", mres
->master
);
1349 if (mres
->flags
& DLM_MRES_ALL_DONE
)
1350 mlog(0, "all done flag. all lockres data received!\n");
1353 buf
= kmalloc(be16_to_cpu(msg
->data_len
), GFP_NOFS
);
1354 item
= kzalloc(sizeof(*item
), GFP_NOFS
);
1358 /* lookup the lock to see if we have a secondary queue for this
1359 * already... just add the locks in and this will have its owner
1360 * and RECOVERY flag changed when it completes. */
1361 res
= dlm_lookup_lockres(dlm
, mres
->lockname
, mres
->lockname_len
);
1363 /* this will get a ref on res */
1364 /* mark it as recovering/migrating and hash it */
1365 spin_lock(&res
->spinlock
);
1366 if (mres
->flags
& DLM_MRES_RECOVERY
) {
1367 res
->state
|= DLM_LOCK_RES_RECOVERING
;
1369 if (res
->state
& DLM_LOCK_RES_MIGRATING
) {
1370 /* this is at least the second
1371 * lockres message */
1372 mlog(0, "lock %.*s is already migrating\n",
1375 } else if (res
->state
& DLM_LOCK_RES_RECOVERING
) {
1376 /* caller should BUG */
1377 mlog(ML_ERROR
, "node is attempting to migrate "
1378 "lock %.*s, but marked as recovering!\n",
1379 mres
->lockname_len
, mres
->lockname
);
1381 spin_unlock(&res
->spinlock
);
1384 res
->state
|= DLM_LOCK_RES_MIGRATING
;
1386 spin_unlock(&res
->spinlock
);
1388 /* need to allocate, just like if it was
1389 * mastered here normally */
1390 res
= dlm_new_lockres(dlm
, mres
->lockname
, mres
->lockname_len
);
1394 /* to match the ref that we would have gotten if
1395 * dlm_lookup_lockres had succeeded */
1396 dlm_lockres_get(res
);
1398 /* mark it as recovering/migrating and hash it */
1399 if (mres
->flags
& DLM_MRES_RECOVERY
)
1400 res
->state
|= DLM_LOCK_RES_RECOVERING
;
1402 res
->state
|= DLM_LOCK_RES_MIGRATING
;
1404 spin_lock(&dlm
->spinlock
);
1405 __dlm_insert_lockres(dlm
, res
);
1406 spin_unlock(&dlm
->spinlock
);
1408 /* Add an extra ref for this lock-less lockres lest the
1409 * dlm_thread purges it before we get the chance to add
1411 dlm_lockres_get(res
);
1413 /* There are three refs that need to be put.
1415 * 2. kref_init in dlm_new_lockres()->dlm_init_lockres().
1416 * 3. dlm_lookup_lockres()
1417 * The first one is handled at the end of this function. The
1418 * other two are handled in the worker thread after locks have
1419 * been attached. Yes, we don't wait for purge time to match
1420 * kref_init. The lockres will still have atleast one ref
1421 * added because it is in the hash __dlm_insert_lockres() */
1424 /* now that the new lockres is inserted,
1425 * make it usable by other processes */
1426 spin_lock(&res
->spinlock
);
1427 res
->state
&= ~DLM_LOCK_RES_IN_PROGRESS
;
1428 spin_unlock(&res
->spinlock
);
1432 /* at this point we have allocated everything we need,
1433 * and we have a hashed lockres with an extra ref and
1434 * the proper res->state flags. */
1436 spin_lock(&res
->spinlock
);
1437 /* drop this either when master requery finds a different master
1438 * or when a lock is added by the recovery worker */
1439 dlm_lockres_grab_inflight_ref(dlm
, res
);
1440 if (mres
->master
== DLM_LOCK_RES_OWNER_UNKNOWN
) {
1441 /* migration cannot have an unknown master */
1442 BUG_ON(!(mres
->flags
& DLM_MRES_RECOVERY
));
1443 mlog(0, "recovery has passed me a lockres with an "
1444 "unknown owner.. will need to requery: "
1445 "%.*s\n", mres
->lockname_len
, mres
->lockname
);
1447 /* take a reference now to pin the lockres, drop it
1448 * when locks are added in the worker */
1449 dlm_change_lockres_owner(dlm
, res
, dlm
->node_num
);
1451 spin_unlock(&res
->spinlock
);
1453 /* queue up work for dlm_mig_lockres_worker */
1454 dlm_grab(dlm
); /* get an extra ref for the work item */
1455 memcpy(buf
, msg
->buf
, be16_to_cpu(msg
->data_len
)); /* copy the whole message */
1456 dlm_init_work_item(dlm
, item
, dlm_mig_lockres_worker
, buf
);
1457 item
->u
.ml
.lockres
= res
; /* already have a ref */
1458 item
->u
.ml
.real_master
= real_master
;
1459 item
->u
.ml
.extra_ref
= extra_refs
;
1460 spin_lock(&dlm
->work_lock
);
1461 list_add_tail(&item
->list
, &dlm
->work_list
);
1462 spin_unlock(&dlm
->work_lock
);
1463 queue_work(dlm
->dlm_worker
, &dlm
->dispatched_work
);
1466 /* One extra ref taken needs to be put here */
1468 dlm_lockres_put(res
);
1483 static void dlm_mig_lockres_worker(struct dlm_work_item
*item
, void *data
)
1485 struct dlm_ctxt
*dlm
;
1486 struct dlm_migratable_lockres
*mres
;
1488 struct dlm_lock_resource
*res
;
1493 mres
= (struct dlm_migratable_lockres
*)data
;
1495 res
= item
->u
.ml
.lockres
;
1496 real_master
= item
->u
.ml
.real_master
;
1497 extra_ref
= item
->u
.ml
.extra_ref
;
1499 if (real_master
== DLM_LOCK_RES_OWNER_UNKNOWN
) {
1500 /* this case is super-rare. only occurs if
1501 * node death happens during migration. */
1503 ret
= dlm_lockres_master_requery(dlm
, res
, &real_master
);
1505 mlog(0, "dlm_lockres_master_requery ret=%d\n",
1509 if (real_master
== DLM_LOCK_RES_OWNER_UNKNOWN
) {
1510 mlog(0, "lockres %.*s not claimed. "
1511 "this node will take it.\n",
1512 res
->lockname
.len
, res
->lockname
.name
);
1514 spin_lock(&res
->spinlock
);
1515 dlm_lockres_drop_inflight_ref(dlm
, res
);
1516 spin_unlock(&res
->spinlock
);
1517 mlog(0, "master needs to respond to sender "
1518 "that node %u still owns %.*s\n",
1519 real_master
, res
->lockname
.len
,
1520 res
->lockname
.name
);
1521 /* cannot touch this lockres */
1526 ret
= dlm_process_recovery_data(dlm
, res
, mres
);
1528 mlog(0, "dlm_process_recovery_data returned %d\n", ret
);
1530 mlog(0, "dlm_process_recovery_data succeeded\n");
1532 if ((mres
->flags
& (DLM_MRES_MIGRATION
|DLM_MRES_ALL_DONE
)) ==
1533 (DLM_MRES_MIGRATION
|DLM_MRES_ALL_DONE
)) {
1534 ret
= dlm_finish_migration(dlm
, res
, mres
->master
);
1540 /* See comment in dlm_mig_lockres_handler() */
1543 dlm_lockres_put(res
);
1544 dlm_lockres_put(res
);
1552 static int dlm_lockres_master_requery(struct dlm_ctxt
*dlm
,
1553 struct dlm_lock_resource
*res
,
1556 struct dlm_node_iter iter
;
1560 *real_master
= DLM_LOCK_RES_OWNER_UNKNOWN
;
1562 /* we only reach here if one of the two nodes in a
1563 * migration died while the migration was in progress.
1564 * at this point we need to requery the master. we
1565 * know that the new_master got as far as creating
1566 * an mle on at least one node, but we do not know
1567 * if any nodes had actually cleared the mle and set
1568 * the master to the new_master. the old master
1569 * is supposed to set the owner to UNKNOWN in the
1570 * event of a new_master death, so the only possible
1571 * responses that we can get from nodes here are
1572 * that the master is new_master, or that the master
1574 * if all nodes come back with UNKNOWN then we know
1575 * the lock needs remastering here.
1576 * if any node comes back with a valid master, check
1577 * to see if that master is the one that we are
1578 * recovering. if so, then the new_master died and
1579 * we need to remaster this lock. if not, then the
1580 * new_master survived and that node will respond to
1581 * other nodes about the owner.
1582 * if there is an owner, this node needs to dump this
1583 * lockres and alert the sender that this lockres
1585 spin_lock(&dlm
->spinlock
);
1586 dlm_node_iter_init(dlm
->domain_map
, &iter
);
1587 spin_unlock(&dlm
->spinlock
);
1589 while ((nodenum
= dlm_node_iter_next(&iter
)) >= 0) {
1590 /* do not send to self */
1591 if (nodenum
== dlm
->node_num
)
1593 ret
= dlm_do_master_requery(dlm
, res
, nodenum
, real_master
);
1596 if (!dlm_is_host_down(ret
))
1598 /* host is down, so answer for that node would be
1599 * DLM_LOCK_RES_OWNER_UNKNOWN. continue. */
1601 if (*real_master
!= DLM_LOCK_RES_OWNER_UNKNOWN
) {
1602 mlog(0, "lock master is %u\n", *real_master
);
1610 int dlm_do_master_requery(struct dlm_ctxt
*dlm
, struct dlm_lock_resource
*res
,
1611 u8 nodenum
, u8
*real_master
)
1614 struct dlm_master_requery req
;
1615 int status
= DLM_LOCK_RES_OWNER_UNKNOWN
;
1617 memset(&req
, 0, sizeof(req
));
1618 req
.node_idx
= dlm
->node_num
;
1619 req
.namelen
= res
->lockname
.len
;
1620 memcpy(req
.name
, res
->lockname
.name
, res
->lockname
.len
);
1622 ret
= o2net_send_message(DLM_MASTER_REQUERY_MSG
, dlm
->key
,
1623 &req
, sizeof(req
), nodenum
, &status
);
1624 /* XXX: negative status not handled properly here. */
1629 BUG_ON(status
> DLM_LOCK_RES_OWNER_UNKNOWN
);
1630 *real_master
= (u8
) (status
& 0xff);
1631 mlog(0, "node %u responded to master requery with %u\n",
1632 nodenum
, *real_master
);
1639 /* this function cannot error, so unless the sending
1640 * or receiving of the message failed, the owner can
1642 int dlm_master_requery_handler(struct o2net_msg
*msg
, u32 len
, void *data
,
1645 struct dlm_ctxt
*dlm
= data
;
1646 struct dlm_master_requery
*req
= (struct dlm_master_requery
*)msg
->buf
;
1647 struct dlm_lock_resource
*res
= NULL
;
1649 int master
= DLM_LOCK_RES_OWNER_UNKNOWN
;
1650 u32 flags
= DLM_ASSERT_MASTER_REQUERY
;
1652 if (!dlm_grab(dlm
)) {
1653 /* since the domain has gone away on this
1654 * node, the proper response is UNKNOWN */
1658 hash
= dlm_lockid_hash(req
->name
, req
->namelen
);
1660 spin_lock(&dlm
->spinlock
);
1661 res
= __dlm_lookup_lockres(dlm
, req
->name
, req
->namelen
, hash
);
1663 spin_lock(&res
->spinlock
);
1664 master
= res
->owner
;
1665 if (master
== dlm
->node_num
) {
1666 int ret
= dlm_dispatch_assert_master(dlm
, res
,
1669 mlog_errno(-ENOMEM
);
1673 } else /* put.. incase we are not the master */
1674 dlm_lockres_put(res
);
1675 spin_unlock(&res
->spinlock
);
1677 spin_unlock(&dlm
->spinlock
);
1683 static inline struct list_head
*
1684 dlm_list_num_to_pointer(struct dlm_lock_resource
*res
, int list_num
)
1686 struct list_head
*ret
;
1687 BUG_ON(list_num
< 0);
1688 BUG_ON(list_num
> 2);
1689 ret
= &(res
->granted
);
1693 /* TODO: do ast flush business
1694 * TODO: do MIGRATING and RECOVERING spinning
1698 * NOTE about in-flight requests during migration:
1700 * Before attempting the migrate, the master has marked the lockres as
1701 * MIGRATING and then flushed all of its pending ASTS. So any in-flight
1702 * requests either got queued before the MIGRATING flag got set, in which
1703 * case the lock data will reflect the change and a return message is on
1704 * the way, or the request failed to get in before MIGRATING got set. In
1705 * this case, the caller will be told to spin and wait for the MIGRATING
1706 * flag to be dropped, then recheck the master.
1707 * This holds true for the convert, cancel and unlock cases, and since lvb
1708 * updates are tied to these same messages, it applies to lvb updates as
1709 * well. For the lock case, there is no way a lock can be on the master
1710 * queue and not be on the secondary queue since the lock is always added
1711 * locally first. This means that the new target node will never be sent
1712 * a lock that he doesn't already have on the list.
1713 * In total, this means that the local lock is correct and should not be
1714 * updated to match the one sent by the master. Any messages sent back
1715 * from the master before the MIGRATING flag will bring the lock properly
1716 * up-to-date, and the change will be ordered properly for the waiter.
1717 * We will *not* attempt to modify the lock underneath the waiter.
1720 static int dlm_process_recovery_data(struct dlm_ctxt
*dlm
,
1721 struct dlm_lock_resource
*res
,
1722 struct dlm_migratable_lockres
*mres
)
1724 struct dlm_migratable_lock
*ml
;
1725 struct list_head
*queue
;
1726 struct list_head
*tmpq
= NULL
;
1727 struct dlm_lock
*newlock
= NULL
;
1728 struct dlm_lockstatus
*lksb
= NULL
;
1731 struct dlm_lock
*lock
= NULL
;
1732 u8 from
= O2NM_MAX_NODES
;
1733 unsigned int added
= 0;
1735 mlog(0, "running %d locks for this lockres\n", mres
->num_locks
);
1736 for (i
=0; i
<mres
->num_locks
; i
++) {
1737 ml
= &(mres
->ml
[i
]);
1739 if (dlm_is_dummy_lock(dlm
, ml
, &from
)) {
1740 /* placeholder, just need to set the refmap bit */
1741 BUG_ON(mres
->num_locks
!= 1);
1742 mlog(0, "%s:%.*s: dummy lock for %u\n",
1743 dlm
->name
, mres
->lockname_len
, mres
->lockname
,
1745 spin_lock(&res
->spinlock
);
1746 dlm_lockres_set_refmap_bit(from
, res
);
1747 spin_unlock(&res
->spinlock
);
1751 BUG_ON(ml
->highest_blocked
!= LKM_IVMODE
);
1755 queue
= dlm_list_num_to_pointer(res
, ml
->list
);
1758 /* if the lock is for the local node it needs to
1759 * be moved to the proper location within the queue.
1760 * do not allocate a new lock structure. */
1761 if (ml
->node
== dlm
->node_num
) {
1762 /* MIGRATION ONLY! */
1763 BUG_ON(!(mres
->flags
& DLM_MRES_MIGRATION
));
1765 spin_lock(&res
->spinlock
);
1766 for (j
= DLM_GRANTED_LIST
; j
<= DLM_BLOCKED_LIST
; j
++) {
1767 tmpq
= dlm_list_idx_to_ptr(res
, j
);
1768 list_for_each_entry(lock
, tmpq
, list
) {
1769 if (lock
->ml
.cookie
!= ml
->cookie
)
1778 /* lock is always created locally first, and
1779 * destroyed locally last. it must be on the list */
1781 __be64 c
= ml
->cookie
;
1782 mlog(ML_ERROR
, "could not find local lock "
1783 "with cookie %u:%llu!\n",
1784 dlm_get_lock_cookie_node(be64_to_cpu(c
)),
1785 dlm_get_lock_cookie_seq(be64_to_cpu(c
)));
1786 __dlm_print_one_lock_resource(res
);
1789 BUG_ON(lock
->ml
.node
!= ml
->node
);
1791 if (tmpq
!= queue
) {
1792 mlog(0, "lock was on %u instead of %u for %.*s\n",
1793 j
, ml
->list
, res
->lockname
.len
, res
->lockname
.name
);
1794 spin_unlock(&res
->spinlock
);
1798 /* see NOTE above about why we do not update
1799 * to match the master here */
1801 /* move the lock to its proper place */
1802 /* do not alter lock refcount. switching lists. */
1803 list_move_tail(&lock
->list
, queue
);
1804 spin_unlock(&res
->spinlock
);
1807 mlog(0, "just reordered a local lock!\n");
1811 /* lock is for another node. */
1812 newlock
= dlm_new_lock(ml
->type
, ml
->node
,
1813 be64_to_cpu(ml
->cookie
), NULL
);
1818 lksb
= newlock
->lksb
;
1819 dlm_lock_attach_lockres(newlock
, res
);
1821 if (ml
->convert_type
!= LKM_IVMODE
) {
1822 BUG_ON(queue
!= &res
->converting
);
1823 newlock
->ml
.convert_type
= ml
->convert_type
;
1825 lksb
->flags
|= (ml
->flags
&
1826 (DLM_LKSB_PUT_LVB
|DLM_LKSB_GET_LVB
));
1828 if (ml
->type
== LKM_NLMODE
)
1831 if (!dlm_lvb_is_empty(mres
->lvb
)) {
1832 if (lksb
->flags
& DLM_LKSB_PUT_LVB
) {
1833 /* other node was trying to update
1834 * lvb when node died. recreate the
1835 * lksb with the updated lvb. */
1836 memcpy(lksb
->lvb
, mres
->lvb
, DLM_LVB_LEN
);
1837 /* the lock resource lvb update must happen
1838 * NOW, before the spinlock is dropped.
1839 * we no longer wait for the AST to update
1841 memcpy(res
->lvb
, mres
->lvb
, DLM_LVB_LEN
);
1843 /* otherwise, the node is sending its
1844 * most recent valid lvb info */
1845 BUG_ON(ml
->type
!= LKM_EXMODE
&&
1846 ml
->type
!= LKM_PRMODE
);
1847 if (!dlm_lvb_is_empty(res
->lvb
) &&
1848 (ml
->type
== LKM_EXMODE
||
1849 memcmp(res
->lvb
, mres
->lvb
, DLM_LVB_LEN
))) {
1851 mlog(ML_ERROR
, "%s:%.*s: received bad "
1852 "lvb! type=%d\n", dlm
->name
,
1854 res
->lockname
.name
, ml
->type
);
1855 printk("lockres lvb=[");
1856 for (i
=0; i
<DLM_LVB_LEN
; i
++)
1857 printk("%02x", res
->lvb
[i
]);
1858 printk("]\nmigrated lvb=[");
1859 for (i
=0; i
<DLM_LVB_LEN
; i
++)
1860 printk("%02x", mres
->lvb
[i
]);
1862 dlm_print_one_lock_resource(res
);
1865 memcpy(res
->lvb
, mres
->lvb
, DLM_LVB_LEN
);
1871 * wrt lock queue ordering and recovery:
1872 * 1. order of locks on granted queue is
1874 * 2. order of locks on converting queue is
1875 * LOST with the node death. sorry charlie.
1876 * 3. order of locks on the blocked queue is
1878 * order of locks does not affect integrity, it
1879 * just means that a lock request may get pushed
1880 * back in line as a result of the node death.
1881 * also note that for a given node the lock order
1882 * for its secondary queue locks is preserved
1883 * relative to each other, but clearly *not*
1884 * preserved relative to locks from other nodes.
1887 spin_lock(&res
->spinlock
);
1888 list_for_each_entry(lock
, queue
, list
) {
1889 if (lock
->ml
.cookie
== ml
->cookie
) {
1890 __be64 c
= lock
->ml
.cookie
;
1891 mlog(ML_ERROR
, "%s:%.*s: %u:%llu: lock already "
1892 "exists on this lockres!\n", dlm
->name
,
1893 res
->lockname
.len
, res
->lockname
.name
,
1894 dlm_get_lock_cookie_node(be64_to_cpu(c
)),
1895 dlm_get_lock_cookie_seq(be64_to_cpu(c
)));
1897 mlog(ML_NOTICE
, "sent lock: type=%d, conv=%d, "
1898 "node=%u, cookie=%u:%llu, queue=%d\n",
1899 ml
->type
, ml
->convert_type
, ml
->node
,
1900 dlm_get_lock_cookie_node(be64_to_cpu(ml
->cookie
)),
1901 dlm_get_lock_cookie_seq(be64_to_cpu(ml
->cookie
)),
1904 __dlm_print_one_lock_resource(res
);
1910 dlm_lock_get(newlock
);
1911 list_add_tail(&newlock
->list
, queue
);
1912 mlog(0, "%s:%.*s: added lock for node %u, "
1913 "setting refmap bit\n", dlm
->name
,
1914 res
->lockname
.len
, res
->lockname
.name
, ml
->node
);
1915 dlm_lockres_set_refmap_bit(ml
->node
, res
);
1918 spin_unlock(&res
->spinlock
);
1920 mlog(0, "done running all the locks\n");
1923 /* balance the ref taken when the work was queued */
1924 spin_lock(&res
->spinlock
);
1925 dlm_lockres_drop_inflight_ref(dlm
, res
);
1926 spin_unlock(&res
->spinlock
);
1931 dlm_lock_put(newlock
);
1938 void dlm_move_lockres_to_recovery_list(struct dlm_ctxt
*dlm
,
1939 struct dlm_lock_resource
*res
)
1942 struct list_head
*queue
;
1943 struct dlm_lock
*lock
, *next
;
1945 res
->state
|= DLM_LOCK_RES_RECOVERING
;
1946 if (!list_empty(&res
->recovering
)) {
1948 "Recovering res %s:%.*s, is already on recovery list!\n",
1949 dlm
->name
, res
->lockname
.len
, res
->lockname
.name
);
1950 list_del_init(&res
->recovering
);
1951 dlm_lockres_put(res
);
1953 /* We need to hold a reference while on the recovery list */
1954 dlm_lockres_get(res
);
1955 list_add_tail(&res
->recovering
, &dlm
->reco
.resources
);
1957 /* find any pending locks and put them back on proper list */
1958 for (i
=DLM_BLOCKED_LIST
; i
>=DLM_GRANTED_LIST
; i
--) {
1959 queue
= dlm_list_idx_to_ptr(res
, i
);
1960 list_for_each_entry_safe(lock
, next
, queue
, list
) {
1962 if (lock
->convert_pending
) {
1963 /* move converting lock back to granted */
1964 BUG_ON(i
!= DLM_CONVERTING_LIST
);
1965 mlog(0, "node died with convert pending "
1966 "on %.*s. move back to granted list.\n",
1967 res
->lockname
.len
, res
->lockname
.name
);
1968 dlm_revert_pending_convert(res
, lock
);
1969 lock
->convert_pending
= 0;
1970 } else if (lock
->lock_pending
) {
1971 /* remove pending lock requests completely */
1972 BUG_ON(i
!= DLM_BLOCKED_LIST
);
1973 mlog(0, "node died with lock pending "
1974 "on %.*s. remove from blocked list and skip.\n",
1975 res
->lockname
.len
, res
->lockname
.name
);
1976 /* lock will be floating until ref in
1977 * dlmlock_remote is freed after the network
1978 * call returns. ok for it to not be on any
1979 * list since no ast can be called
1980 * (the master is dead). */
1981 dlm_revert_pending_lock(res
, lock
);
1982 lock
->lock_pending
= 0;
1983 } else if (lock
->unlock_pending
) {
1984 /* if an unlock was in progress, treat as
1985 * if this had completed successfully
1986 * before sending this lock state to the
1987 * new master. note that the dlm_unlock
1988 * call is still responsible for calling
1989 * the unlockast. that will happen after
1990 * the network call times out. for now,
1991 * just move lists to prepare the new
1992 * recovery master. */
1993 BUG_ON(i
!= DLM_GRANTED_LIST
);
1994 mlog(0, "node died with unlock pending "
1995 "on %.*s. remove from blocked list and skip.\n",
1996 res
->lockname
.len
, res
->lockname
.name
);
1997 dlm_commit_pending_unlock(res
, lock
);
1998 lock
->unlock_pending
= 0;
1999 } else if (lock
->cancel_pending
) {
2000 /* if a cancel was in progress, treat as
2001 * if this had completed successfully
2002 * before sending this lock state to the
2004 BUG_ON(i
!= DLM_CONVERTING_LIST
);
2005 mlog(0, "node died with cancel pending "
2006 "on %.*s. move back to granted list.\n",
2007 res
->lockname
.len
, res
->lockname
.name
);
2008 dlm_commit_pending_cancel(res
, lock
);
2009 lock
->cancel_pending
= 0;
2018 /* removes all recovered locks from the recovery list.
2019 * sets the res->owner to the new master.
2020 * unsets the RECOVERY flag and wakes waiters. */
2021 static void dlm_finish_local_lockres_recovery(struct dlm_ctxt
*dlm
,
2022 u8 dead_node
, u8 new_master
)
2025 struct hlist_node
*hash_iter
;
2026 struct hlist_head
*bucket
;
2027 struct dlm_lock_resource
*res
, *next
;
2031 assert_spin_locked(&dlm
->spinlock
);
2033 list_for_each_entry_safe(res
, next
, &dlm
->reco
.resources
, recovering
) {
2034 if (res
->owner
== dead_node
) {
2035 list_del_init(&res
->recovering
);
2036 spin_lock(&res
->spinlock
);
2037 /* new_master has our reference from
2038 * the lock state sent during recovery */
2039 dlm_change_lockres_owner(dlm
, res
, new_master
);
2040 res
->state
&= ~DLM_LOCK_RES_RECOVERING
;
2041 if (__dlm_lockres_has_locks(res
))
2042 __dlm_dirty_lockres(dlm
, res
);
2043 spin_unlock(&res
->spinlock
);
2045 dlm_lockres_put(res
);
2049 /* this will become unnecessary eventually, but
2050 * for now we need to run the whole hash, clear
2051 * the RECOVERING state and set the owner
2053 for (i
= 0; i
< DLM_HASH_BUCKETS
; i
++) {
2054 bucket
= dlm_lockres_hash(dlm
, i
);
2055 hlist_for_each_entry(res
, hash_iter
, bucket
, hash_node
) {
2056 if (res
->state
& DLM_LOCK_RES_RECOVERING
) {
2057 if (res
->owner
== dead_node
) {
2058 mlog(0, "(this=%u) res %.*s owner=%u "
2059 "was not on recovering list, but "
2060 "clearing state anyway\n",
2061 dlm
->node_num
, res
->lockname
.len
,
2062 res
->lockname
.name
, new_master
);
2063 } else if (res
->owner
== dlm
->node_num
) {
2064 mlog(0, "(this=%u) res %.*s owner=%u "
2065 "was not on recovering list, "
2066 "owner is THIS node, clearing\n",
2067 dlm
->node_num
, res
->lockname
.len
,
2068 res
->lockname
.name
, new_master
);
2072 if (!list_empty(&res
->recovering
)) {
2073 mlog(0, "%s:%.*s: lockres was "
2074 "marked RECOVERING, owner=%u\n",
2075 dlm
->name
, res
->lockname
.len
,
2076 res
->lockname
.name
, res
->owner
);
2077 list_del_init(&res
->recovering
);
2078 dlm_lockres_put(res
);
2080 spin_lock(&res
->spinlock
);
2081 /* new_master has our reference from
2082 * the lock state sent during recovery */
2083 dlm_change_lockres_owner(dlm
, res
, new_master
);
2084 res
->state
&= ~DLM_LOCK_RES_RECOVERING
;
2085 if (__dlm_lockres_has_locks(res
))
2086 __dlm_dirty_lockres(dlm
, res
);
2087 spin_unlock(&res
->spinlock
);
2094 static inline int dlm_lvb_needs_invalidation(struct dlm_lock
*lock
, int local
)
2097 if (lock
->ml
.type
!= LKM_EXMODE
&&
2098 lock
->ml
.type
!= LKM_PRMODE
)
2100 } else if (lock
->ml
.type
== LKM_EXMODE
)
2105 static void dlm_revalidate_lvb(struct dlm_ctxt
*dlm
,
2106 struct dlm_lock_resource
*res
, u8 dead_node
)
2108 struct list_head
*queue
;
2109 struct dlm_lock
*lock
;
2110 int blank_lvb
= 0, local
= 0;
2114 assert_spin_locked(&dlm
->spinlock
);
2115 assert_spin_locked(&res
->spinlock
);
2117 if (res
->owner
== dlm
->node_num
)
2118 /* if this node owned the lockres, and if the dead node
2119 * had an EX when he died, blank out the lvb */
2120 search_node
= dead_node
;
2122 /* if this is a secondary lockres, and we had no EX or PR
2123 * locks granted, we can no longer trust the lvb */
2124 search_node
= dlm
->node_num
;
2125 local
= 1; /* check local state for valid lvb */
2128 for (i
=DLM_GRANTED_LIST
; i
<=DLM_CONVERTING_LIST
; i
++) {
2129 queue
= dlm_list_idx_to_ptr(res
, i
);
2130 list_for_each_entry(lock
, queue
, list
) {
2131 if (lock
->ml
.node
== search_node
) {
2132 if (dlm_lvb_needs_invalidation(lock
, local
)) {
2133 /* zero the lksb lvb and lockres lvb */
2135 memset(lock
->lksb
->lvb
, 0, DLM_LVB_LEN
);
2142 mlog(0, "clearing %.*s lvb, dead node %u had EX\n",
2143 res
->lockname
.len
, res
->lockname
.name
, dead_node
);
2144 memset(res
->lvb
, 0, DLM_LVB_LEN
);
2148 static void dlm_free_dead_locks(struct dlm_ctxt
*dlm
,
2149 struct dlm_lock_resource
*res
, u8 dead_node
)
2151 struct dlm_lock
*lock
, *next
;
2152 unsigned int freed
= 0;
2154 /* this node is the lockres master:
2155 * 1) remove any stale locks for the dead node
2156 * 2) if the dead node had an EX when he died, blank out the lvb
2158 assert_spin_locked(&dlm
->spinlock
);
2159 assert_spin_locked(&res
->spinlock
);
2161 /* We do two dlm_lock_put(). One for removing from list and the other is
2162 * to force the DLM_UNLOCK_FREE_LOCK action so as to free the locks */
2164 /* TODO: check pending_asts, pending_basts here */
2165 list_for_each_entry_safe(lock
, next
, &res
->granted
, list
) {
2166 if (lock
->ml
.node
== dead_node
) {
2167 list_del_init(&lock
->list
);
2169 /* Can't schedule DLM_UNLOCK_FREE_LOCK - do manually */
2174 list_for_each_entry_safe(lock
, next
, &res
->converting
, list
) {
2175 if (lock
->ml
.node
== dead_node
) {
2176 list_del_init(&lock
->list
);
2178 /* Can't schedule DLM_UNLOCK_FREE_LOCK - do manually */
2183 list_for_each_entry_safe(lock
, next
, &res
->blocked
, list
) {
2184 if (lock
->ml
.node
== dead_node
) {
2185 list_del_init(&lock
->list
);
2187 /* Can't schedule DLM_UNLOCK_FREE_LOCK - do manually */
2194 mlog(0, "%s:%.*s: freed %u locks for dead node %u, "
2195 "dropping ref from lockres\n", dlm
->name
,
2196 res
->lockname
.len
, res
->lockname
.name
, freed
, dead_node
);
2197 BUG_ON(!test_bit(dead_node
, res
->refmap
));
2198 dlm_lockres_clear_refmap_bit(dead_node
, res
);
2199 } else if (test_bit(dead_node
, res
->refmap
)) {
2200 mlog(0, "%s:%.*s: dead node %u had a ref, but had "
2201 "no locks and had not purged before dying\n", dlm
->name
,
2202 res
->lockname
.len
, res
->lockname
.name
, dead_node
);
2203 dlm_lockres_clear_refmap_bit(dead_node
, res
);
2206 /* do not kick thread yet */
2207 __dlm_dirty_lockres(dlm
, res
);
2210 /* if this node is the recovery master, and there are no
2211 * locks for a given lockres owned by this node that are in
2212 * either PR or EX mode, zero out the lvb before requesting.
2217 static void dlm_do_local_recovery_cleanup(struct dlm_ctxt
*dlm
, u8 dead_node
)
2219 struct hlist_node
*iter
;
2220 struct dlm_lock_resource
*res
;
2222 struct hlist_head
*bucket
;
2223 struct dlm_lock
*lock
;
2226 /* purge any stale mles */
2227 dlm_clean_master_list(dlm
, dead_node
);
2230 * now clean up all lock resources. there are two rules:
2232 * 1) if the dead node was the master, move the lockres
2233 * to the recovering list. set the RECOVERING flag.
2234 * this lockres needs to be cleaned up before it can
2237 * 2) if this node was the master, remove all locks from
2238 * each of the lockres queues that were owned by the
2239 * dead node. once recovery finishes, the dlm thread
2240 * can be kicked again to see if any ASTs or BASTs
2241 * need to be fired as a result.
2243 for (i
= 0; i
< DLM_HASH_BUCKETS
; i
++) {
2244 bucket
= dlm_lockres_hash(dlm
, i
);
2245 hlist_for_each_entry(res
, iter
, bucket
, hash_node
) {
2246 /* always prune any $RECOVERY entries for dead nodes,
2247 * otherwise hangs can occur during later recovery */
2248 if (dlm_is_recovery_lock(res
->lockname
.name
,
2249 res
->lockname
.len
)) {
2250 spin_lock(&res
->spinlock
);
2251 list_for_each_entry(lock
, &res
->granted
, list
) {
2252 if (lock
->ml
.node
== dead_node
) {
2253 mlog(0, "AHA! there was "
2254 "a $RECOVERY lock for dead "
2256 dead_node
, dlm
->name
);
2257 list_del_init(&lock
->list
);
2262 spin_unlock(&res
->spinlock
);
2265 spin_lock(&res
->spinlock
);
2266 /* zero the lvb if necessary */
2267 dlm_revalidate_lvb(dlm
, res
, dead_node
);
2268 if (res
->owner
== dead_node
) {
2269 if (res
->state
& DLM_LOCK_RES_DROPPING_REF
)
2270 mlog(0, "%s:%.*s: owned by "
2271 "dead node %u, this node was "
2272 "dropping its ref when it died. "
2273 "continue, dropping the flag.\n",
2274 dlm
->name
, res
->lockname
.len
,
2275 res
->lockname
.name
, dead_node
);
2277 /* the wake_up for this will happen when the
2278 * RECOVERING flag is dropped later */
2279 res
->state
&= ~DLM_LOCK_RES_DROPPING_REF
;
2281 dlm_move_lockres_to_recovery_list(dlm
, res
);
2282 } else if (res
->owner
== dlm
->node_num
) {
2283 dlm_free_dead_locks(dlm
, res
, dead_node
);
2284 __dlm_lockres_calc_usage(dlm
, res
);
2286 spin_unlock(&res
->spinlock
);
2292 static void __dlm_hb_node_down(struct dlm_ctxt
*dlm
, int idx
)
2294 assert_spin_locked(&dlm
->spinlock
);
2296 if (dlm
->reco
.new_master
== idx
) {
2297 mlog(0, "%s: recovery master %d just died\n",
2299 if (dlm
->reco
.state
& DLM_RECO_STATE_FINALIZE
) {
2300 /* finalize1 was reached, so it is safe to clear
2301 * the new_master and dead_node. that recovery
2303 mlog(0, "%s: dead master %d had reached "
2304 "finalize1 state, clearing\n", dlm
->name
, idx
);
2305 dlm
->reco
.state
&= ~DLM_RECO_STATE_FINALIZE
;
2306 __dlm_reset_recovery(dlm
);
2310 /* Clean up join state on node death. */
2311 if (dlm
->joining_node
== idx
) {
2312 mlog(0, "Clearing join state for node %u\n", idx
);
2313 __dlm_set_joining_node(dlm
, DLM_LOCK_RES_OWNER_UNKNOWN
);
2316 /* check to see if the node is already considered dead */
2317 if (!test_bit(idx
, dlm
->live_nodes_map
)) {
2318 mlog(0, "for domain %s, node %d is already dead. "
2319 "another node likely did recovery already.\n",
2324 /* check to see if we do not care about this node */
2325 if (!test_bit(idx
, dlm
->domain_map
)) {
2326 /* This also catches the case that we get a node down
2327 * but haven't joined the domain yet. */
2328 mlog(0, "node %u already removed from domain!\n", idx
);
2332 clear_bit(idx
, dlm
->live_nodes_map
);
2334 /* make sure local cleanup occurs before the heartbeat events */
2335 if (!test_bit(idx
, dlm
->recovery_map
))
2336 dlm_do_local_recovery_cleanup(dlm
, idx
);
2338 /* notify anything attached to the heartbeat events */
2339 dlm_hb_event_notify_attached(dlm
, idx
, 0);
2341 mlog(0, "node %u being removed from domain map!\n", idx
);
2342 clear_bit(idx
, dlm
->domain_map
);
2343 /* wake up migration waiters if a node goes down.
2344 * perhaps later we can genericize this for other waiters. */
2345 wake_up(&dlm
->migration_wq
);
2347 if (test_bit(idx
, dlm
->recovery_map
))
2348 mlog(0, "domain %s, node %u already added "
2349 "to recovery map!\n", dlm
->name
, idx
);
2351 set_bit(idx
, dlm
->recovery_map
);
2354 void dlm_hb_node_down_cb(struct o2nm_node
*node
, int idx
, void *data
)
2356 struct dlm_ctxt
*dlm
= data
;
2362 * This will notify any dlm users that a node in our domain
2363 * went away without notifying us first.
2365 if (test_bit(idx
, dlm
->domain_map
))
2366 dlm_fire_domain_eviction_callbacks(dlm
, idx
);
2368 spin_lock(&dlm
->spinlock
);
2369 __dlm_hb_node_down(dlm
, idx
);
2370 spin_unlock(&dlm
->spinlock
);
2375 void dlm_hb_node_up_cb(struct o2nm_node
*node
, int idx
, void *data
)
2377 struct dlm_ctxt
*dlm
= data
;
2382 spin_lock(&dlm
->spinlock
);
2383 set_bit(idx
, dlm
->live_nodes_map
);
2384 /* do NOT notify mle attached to the heartbeat events.
2385 * new nodes are not interesting in mastery until joined. */
2386 spin_unlock(&dlm
->spinlock
);
2391 static void dlm_reco_ast(void *astdata
)
2393 struct dlm_ctxt
*dlm
= astdata
;
2394 mlog(0, "ast for recovery lock fired!, this=%u, dlm=%s\n",
2395 dlm
->node_num
, dlm
->name
);
2397 static void dlm_reco_bast(void *astdata
, int blocked_type
)
2399 struct dlm_ctxt
*dlm
= astdata
;
2400 mlog(0, "bast for recovery lock fired!, this=%u, dlm=%s\n",
2401 dlm
->node_num
, dlm
->name
);
2403 static void dlm_reco_unlock_ast(void *astdata
, enum dlm_status st
)
2405 mlog(0, "unlockast for recovery lock fired!\n");
2409 * dlm_pick_recovery_master will continually attempt to use
2410 * dlmlock() on the special "$RECOVERY" lockres with the
2411 * LKM_NOQUEUE flag to get an EX. every thread that enters
2412 * this function on each node racing to become the recovery
2413 * master will not stop attempting this until either:
2414 * a) this node gets the EX (and becomes the recovery master),
2415 * or b) dlm->reco.new_master gets set to some nodenum
2416 * != O2NM_INVALID_NODE_NUM (another node will do the reco).
2417 * so each time a recovery master is needed, the entire cluster
2418 * will sync at this point. if the new master dies, that will
2419 * be detected in dlm_do_recovery */
2420 static int dlm_pick_recovery_master(struct dlm_ctxt
*dlm
)
2422 enum dlm_status ret
;
2423 struct dlm_lockstatus lksb
;
2424 int status
= -EINVAL
;
2426 mlog(0, "starting recovery of %s at %lu, dead=%u, this=%u\n",
2427 dlm
->name
, jiffies
, dlm
->reco
.dead_node
, dlm
->node_num
);
2429 memset(&lksb
, 0, sizeof(lksb
));
2431 ret
= dlmlock(dlm
, LKM_EXMODE
, &lksb
, LKM_NOQUEUE
|LKM_RECOVERY
,
2432 DLM_RECOVERY_LOCK_NAME
, DLM_RECOVERY_LOCK_NAME_LEN
,
2433 dlm_reco_ast
, dlm
, dlm_reco_bast
);
2435 mlog(0, "%s: dlmlock($RECOVERY) returned %d, lksb=%d\n",
2436 dlm
->name
, ret
, lksb
.status
);
2438 if (ret
== DLM_NORMAL
) {
2439 mlog(0, "dlm=%s dlmlock says I got it (this=%u)\n",
2440 dlm
->name
, dlm
->node_num
);
2442 /* got the EX lock. check to see if another node
2443 * just became the reco master */
2444 if (dlm_reco_master_ready(dlm
)) {
2445 mlog(0, "%s: got reco EX lock, but %u will "
2446 "do the recovery\n", dlm
->name
,
2447 dlm
->reco
.new_master
);
2452 /* see if recovery was already finished elsewhere */
2453 spin_lock(&dlm
->spinlock
);
2454 if (dlm
->reco
.dead_node
== O2NM_INVALID_NODE_NUM
) {
2456 mlog(0, "%s: got reco EX lock, but "
2457 "node got recovered already\n", dlm
->name
);
2458 if (dlm
->reco
.new_master
!= O2NM_INVALID_NODE_NUM
) {
2459 mlog(ML_ERROR
, "%s: new master is %u "
2460 "but no dead node!\n",
2461 dlm
->name
, dlm
->reco
.new_master
);
2465 spin_unlock(&dlm
->spinlock
);
2468 /* if this node has actually become the recovery master,
2469 * set the master and send the messages to begin recovery */
2471 mlog(0, "%s: dead=%u, this=%u, sending "
2472 "begin_reco now\n", dlm
->name
,
2473 dlm
->reco
.dead_node
, dlm
->node_num
);
2474 status
= dlm_send_begin_reco_message(dlm
,
2475 dlm
->reco
.dead_node
);
2476 /* this always succeeds */
2479 /* set the new_master to this node */
2480 spin_lock(&dlm
->spinlock
);
2481 dlm_set_reco_master(dlm
, dlm
->node_num
);
2482 spin_unlock(&dlm
->spinlock
);
2485 /* recovery lock is a special case. ast will not get fired,
2486 * so just go ahead and unlock it. */
2487 ret
= dlmunlock(dlm
, &lksb
, 0, dlm_reco_unlock_ast
, dlm
);
2488 if (ret
== DLM_DENIED
) {
2489 mlog(0, "got DLM_DENIED, trying LKM_CANCEL\n");
2490 ret
= dlmunlock(dlm
, &lksb
, LKM_CANCEL
, dlm_reco_unlock_ast
, dlm
);
2492 if (ret
!= DLM_NORMAL
) {
2493 /* this would really suck. this could only happen
2494 * if there was a network error during the unlock
2495 * because of node death. this means the unlock
2496 * is actually "done" and the lock structure is
2497 * even freed. we can continue, but only
2498 * because this specific lock name is special. */
2499 mlog(ML_ERROR
, "dlmunlock returned %d\n", ret
);
2501 } else if (ret
== DLM_NOTQUEUED
) {
2502 mlog(0, "dlm=%s dlmlock says another node got it (this=%u)\n",
2503 dlm
->name
, dlm
->node_num
);
2504 /* another node is master. wait on
2505 * reco.new_master != O2NM_INVALID_NODE_NUM
2506 * for at most one second */
2507 wait_event_timeout(dlm
->dlm_reco_thread_wq
,
2508 dlm_reco_master_ready(dlm
),
2509 msecs_to_jiffies(1000));
2510 if (!dlm_reco_master_ready(dlm
)) {
2511 mlog(0, "%s: reco master taking awhile\n",
2515 /* another node has informed this one that it is reco master */
2516 mlog(0, "%s: reco master %u is ready to recover %u\n",
2517 dlm
->name
, dlm
->reco
.new_master
, dlm
->reco
.dead_node
);
2519 } else if (ret
== DLM_RECOVERING
) {
2520 mlog(0, "dlm=%s dlmlock says master node died (this=%u)\n",
2521 dlm
->name
, dlm
->node_num
);
2524 struct dlm_lock_resource
*res
;
2526 /* dlmlock returned something other than NOTQUEUED or NORMAL */
2527 mlog(ML_ERROR
, "%s: got %s from dlmlock($RECOVERY), "
2528 "lksb.status=%s\n", dlm
->name
, dlm_errname(ret
),
2529 dlm_errname(lksb
.status
));
2530 res
= dlm_lookup_lockres(dlm
, DLM_RECOVERY_LOCK_NAME
,
2531 DLM_RECOVERY_LOCK_NAME_LEN
);
2533 dlm_print_one_lock_resource(res
);
2534 dlm_lockres_put(res
);
2536 mlog(ML_ERROR
, "recovery lock not found\n");
2544 static int dlm_send_begin_reco_message(struct dlm_ctxt
*dlm
, u8 dead_node
)
2546 struct dlm_begin_reco br
;
2548 struct dlm_node_iter iter
;
2552 mlog_entry("%u\n", dead_node
);
2554 mlog(0, "%s: dead node is %u\n", dlm
->name
, dead_node
);
2556 spin_lock(&dlm
->spinlock
);
2557 dlm_node_iter_init(dlm
->domain_map
, &iter
);
2558 spin_unlock(&dlm
->spinlock
);
2560 clear_bit(dead_node
, iter
.node_map
);
2562 memset(&br
, 0, sizeof(br
));
2563 br
.node_idx
= dlm
->node_num
;
2564 br
.dead_node
= dead_node
;
2566 while ((nodenum
= dlm_node_iter_next(&iter
)) >= 0) {
2568 if (nodenum
== dead_node
) {
2569 mlog(0, "not sending begin reco to dead node "
2573 if (nodenum
== dlm
->node_num
) {
2574 mlog(0, "not sending begin reco to self\n");
2579 mlog(0, "attempting to send begin reco msg to %d\n",
2581 ret
= o2net_send_message(DLM_BEGIN_RECO_MSG
, dlm
->key
,
2582 &br
, sizeof(br
), nodenum
, &status
);
2583 /* negative status is handled ok by caller here */
2586 if (dlm_is_host_down(ret
)) {
2587 /* node is down. not involved in recovery
2588 * so just keep going */
2589 mlog(0, "%s: node %u was down when sending "
2590 "begin reco msg (%d)\n", dlm
->name
, nodenum
, ret
);
2594 struct dlm_lock_resource
*res
;
2595 /* this is now a serious problem, possibly ENOMEM
2596 * in the network stack. must retry */
2598 mlog(ML_ERROR
, "begin reco of dlm %s to node %u "
2599 " returned %d\n", dlm
->name
, nodenum
, ret
);
2600 res
= dlm_lookup_lockres(dlm
, DLM_RECOVERY_LOCK_NAME
,
2601 DLM_RECOVERY_LOCK_NAME_LEN
);
2603 dlm_print_one_lock_resource(res
);
2604 dlm_lockres_put(res
);
2606 mlog(ML_ERROR
, "recovery lock not found\n");
2608 /* sleep for a bit in hopes that we can avoid
2612 } else if (ret
== EAGAIN
) {
2613 mlog(0, "%s: trying to start recovery of node "
2614 "%u, but node %u is waiting for last recovery "
2615 "to complete, backoff for a bit\n", dlm
->name
,
2616 dead_node
, nodenum
);
2617 /* TODO Look into replacing msleep with cond_resched() */
2626 int dlm_begin_reco_handler(struct o2net_msg
*msg
, u32 len
, void *data
,
2629 struct dlm_ctxt
*dlm
= data
;
2630 struct dlm_begin_reco
*br
= (struct dlm_begin_reco
*)msg
->buf
;
2632 /* ok to return 0, domain has gone away */
2636 spin_lock(&dlm
->spinlock
);
2637 if (dlm
->reco
.state
& DLM_RECO_STATE_FINALIZE
) {
2638 mlog(0, "%s: node %u wants to recover node %u (%u:%u) "
2639 "but this node is in finalize state, waiting on finalize2\n",
2640 dlm
->name
, br
->node_idx
, br
->dead_node
,
2641 dlm
->reco
.dead_node
, dlm
->reco
.new_master
);
2642 spin_unlock(&dlm
->spinlock
);
2645 spin_unlock(&dlm
->spinlock
);
2647 mlog(0, "%s: node %u wants to recover node %u (%u:%u)\n",
2648 dlm
->name
, br
->node_idx
, br
->dead_node
,
2649 dlm
->reco
.dead_node
, dlm
->reco
.new_master
);
2651 dlm_fire_domain_eviction_callbacks(dlm
, br
->dead_node
);
2653 spin_lock(&dlm
->spinlock
);
2654 if (dlm
->reco
.new_master
!= O2NM_INVALID_NODE_NUM
) {
2655 if (test_bit(dlm
->reco
.new_master
, dlm
->recovery_map
)) {
2656 mlog(0, "%s: new_master %u died, changing "
2657 "to %u\n", dlm
->name
, dlm
->reco
.new_master
,
2660 mlog(0, "%s: new_master %u NOT DEAD, changing "
2661 "to %u\n", dlm
->name
, dlm
->reco
.new_master
,
2663 /* may not have seen the new master as dead yet */
2666 if (dlm
->reco
.dead_node
!= O2NM_INVALID_NODE_NUM
) {
2667 mlog(ML_NOTICE
, "%s: dead_node previously set to %u, "
2668 "node %u changing it to %u\n", dlm
->name
,
2669 dlm
->reco
.dead_node
, br
->node_idx
, br
->dead_node
);
2671 dlm_set_reco_master(dlm
, br
->node_idx
);
2672 dlm_set_reco_dead_node(dlm
, br
->dead_node
);
2673 if (!test_bit(br
->dead_node
, dlm
->recovery_map
)) {
2674 mlog(0, "recovery master %u sees %u as dead, but this "
2675 "node has not yet. marking %u as dead\n",
2676 br
->node_idx
, br
->dead_node
, br
->dead_node
);
2677 if (!test_bit(br
->dead_node
, dlm
->domain_map
) ||
2678 !test_bit(br
->dead_node
, dlm
->live_nodes_map
))
2679 mlog(0, "%u not in domain/live_nodes map "
2680 "so setting it in reco map manually\n",
2682 /* force the recovery cleanup in __dlm_hb_node_down
2683 * both of these will be cleared in a moment */
2684 set_bit(br
->dead_node
, dlm
->domain_map
);
2685 set_bit(br
->dead_node
, dlm
->live_nodes_map
);
2686 __dlm_hb_node_down(dlm
, br
->dead_node
);
2688 spin_unlock(&dlm
->spinlock
);
2690 dlm_kick_recovery_thread(dlm
);
2692 mlog(0, "%s: recovery started by node %u, for %u (%u:%u)\n",
2693 dlm
->name
, br
->node_idx
, br
->dead_node
,
2694 dlm
->reco
.dead_node
, dlm
->reco
.new_master
);
2700 #define DLM_FINALIZE_STAGE2 0x01
2701 static int dlm_send_finalize_reco_message(struct dlm_ctxt
*dlm
)
2704 struct dlm_finalize_reco fr
;
2705 struct dlm_node_iter iter
;
2710 mlog(0, "finishing recovery for node %s:%u, "
2711 "stage %d\n", dlm
->name
, dlm
->reco
.dead_node
, stage
);
2713 spin_lock(&dlm
->spinlock
);
2714 dlm_node_iter_init(dlm
->domain_map
, &iter
);
2715 spin_unlock(&dlm
->spinlock
);
2718 memset(&fr
, 0, sizeof(fr
));
2719 fr
.node_idx
= dlm
->node_num
;
2720 fr
.dead_node
= dlm
->reco
.dead_node
;
2722 fr
.flags
|= DLM_FINALIZE_STAGE2
;
2724 while ((nodenum
= dlm_node_iter_next(&iter
)) >= 0) {
2725 if (nodenum
== dlm
->node_num
)
2727 ret
= o2net_send_message(DLM_FINALIZE_RECO_MSG
, dlm
->key
,
2728 &fr
, sizeof(fr
), nodenum
, &status
);
2733 if (dlm_is_host_down(ret
)) {
2734 /* this has no effect on this recovery
2735 * session, so set the status to zero to
2736 * finish out the last recovery */
2737 mlog(ML_ERROR
, "node %u went down after this "
2738 "node finished recovery.\n", nodenum
);
2746 /* reset the node_iter back to the top and send finalize2 */
2755 int dlm_finalize_reco_handler(struct o2net_msg
*msg
, u32 len
, void *data
,
2758 struct dlm_ctxt
*dlm
= data
;
2759 struct dlm_finalize_reco
*fr
= (struct dlm_finalize_reco
*)msg
->buf
;
2762 /* ok to return 0, domain has gone away */
2766 if (fr
->flags
& DLM_FINALIZE_STAGE2
)
2769 mlog(0, "%s: node %u finalizing recovery stage%d of "
2770 "node %u (%u:%u)\n", dlm
->name
, fr
->node_idx
, stage
,
2771 fr
->dead_node
, dlm
->reco
.dead_node
, dlm
->reco
.new_master
);
2773 spin_lock(&dlm
->spinlock
);
2775 if (dlm
->reco
.new_master
!= fr
->node_idx
) {
2776 mlog(ML_ERROR
, "node %u sent recovery finalize msg, but node "
2777 "%u is supposed to be the new master, dead=%u\n",
2778 fr
->node_idx
, dlm
->reco
.new_master
, fr
->dead_node
);
2781 if (dlm
->reco
.dead_node
!= fr
->dead_node
) {
2782 mlog(ML_ERROR
, "node %u sent recovery finalize msg for dead "
2783 "node %u, but node %u is supposed to be dead\n",
2784 fr
->node_idx
, fr
->dead_node
, dlm
->reco
.dead_node
);
2790 dlm_finish_local_lockres_recovery(dlm
, fr
->dead_node
, fr
->node_idx
);
2791 if (dlm
->reco
.state
& DLM_RECO_STATE_FINALIZE
) {
2792 mlog(ML_ERROR
, "%s: received finalize1 from "
2793 "new master %u for dead node %u, but "
2794 "this node has already received it!\n",
2795 dlm
->name
, fr
->node_idx
, fr
->dead_node
);
2796 dlm_print_reco_node_status(dlm
);
2799 dlm
->reco
.state
|= DLM_RECO_STATE_FINALIZE
;
2800 spin_unlock(&dlm
->spinlock
);
2803 if (!(dlm
->reco
.state
& DLM_RECO_STATE_FINALIZE
)) {
2804 mlog(ML_ERROR
, "%s: received finalize2 from "
2805 "new master %u for dead node %u, but "
2806 "this node did not have finalize1!\n",
2807 dlm
->name
, fr
->node_idx
, fr
->dead_node
);
2808 dlm_print_reco_node_status(dlm
);
2811 dlm
->reco
.state
&= ~DLM_RECO_STATE_FINALIZE
;
2812 spin_unlock(&dlm
->spinlock
);
2813 dlm_reset_recovery(dlm
);
2814 dlm_kick_recovery_thread(dlm
);
2820 mlog(0, "%s: recovery done, reco master was %u, dead now %u, master now %u\n",
2821 dlm
->name
, fr
->node_idx
, dlm
->reco
.dead_node
, dlm
->reco
.new_master
);