1 /* -*- mode: c; c-basic-offset: 8; -*-
2 * vim: noexpandtab sw=8 ts=8 sts=0:
8 * Copyright (C) 2004 Oracle. All rights reserved.
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public
12 * License as published by the Free Software Foundation; either
13 * version 2 of the License, or (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
20 * You should have received a copy of the GNU General Public
21 * License along with this program; if not, write to the
22 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
23 * Boston, MA 021110-1307, USA.
28 #include <linux/module.h>
30 #include <linux/types.h>
31 #include <linux/slab.h>
32 #include <linux/highmem.h>
33 #include <linux/init.h>
34 #include <linux/sysctl.h>
35 #include <linux/random.h>
36 #include <linux/blkdev.h>
37 #include <linux/socket.h>
38 #include <linux/inet.h>
39 #include <linux/timer.h>
40 #include <linux/kthread.h>
41 #include <linux/delay.h>
44 #include "cluster/heartbeat.h"
45 #include "cluster/nodemanager.h"
46 #include "cluster/tcp.h"
49 #include "dlmcommon.h"
50 #include "dlmdomain.h"
52 #define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_RECOVERY)
53 #include "cluster/masklog.h"
55 static void dlm_do_local_recovery_cleanup(struct dlm_ctxt
*dlm
, u8 dead_node
);
57 static int dlm_recovery_thread(void *data
);
58 static int dlm_do_recovery(struct dlm_ctxt
*dlm
);
60 static int dlm_pick_recovery_master(struct dlm_ctxt
*dlm
);
61 static int dlm_remaster_locks(struct dlm_ctxt
*dlm
, u8 dead_node
);
62 static int dlm_init_recovery_area(struct dlm_ctxt
*dlm
, u8 dead_node
);
63 static int dlm_request_all_locks(struct dlm_ctxt
*dlm
,
64 u8 request_from
, u8 dead_node
);
65 static void dlm_destroy_recovery_area(struct dlm_ctxt
*dlm
, u8 dead_node
);
67 static inline int dlm_num_locks_in_lockres(struct dlm_lock_resource
*res
);
68 static void dlm_init_migratable_lockres(struct dlm_migratable_lockres
*mres
,
69 const char *lockname
, int namelen
,
70 int total_locks
, u64 cookie
,
72 static int dlm_send_mig_lockres_msg(struct dlm_ctxt
*dlm
,
73 struct dlm_migratable_lockres
*mres
,
75 struct dlm_lock_resource
*res
,
77 static int dlm_process_recovery_data(struct dlm_ctxt
*dlm
,
78 struct dlm_lock_resource
*res
,
79 struct dlm_migratable_lockres
*mres
);
80 static int dlm_send_finalize_reco_message(struct dlm_ctxt
*dlm
);
81 static int dlm_send_all_done_msg(struct dlm_ctxt
*dlm
,
82 u8 dead_node
, u8 send_to
);
83 static int dlm_send_begin_reco_message(struct dlm_ctxt
*dlm
, u8 dead_node
);
84 static void dlm_move_reco_locks_to_list(struct dlm_ctxt
*dlm
,
85 struct list_head
*list
, u8 dead_node
);
86 static void dlm_finish_local_lockres_recovery(struct dlm_ctxt
*dlm
,
87 u8 dead_node
, u8 new_master
);
88 static void dlm_reco_ast(void *astdata
);
89 static void dlm_reco_bast(void *astdata
, int blocked_type
);
90 static void dlm_reco_unlock_ast(void *astdata
, enum dlm_status st
);
91 static void dlm_request_all_locks_worker(struct dlm_work_item
*item
,
93 static void dlm_mig_lockres_worker(struct dlm_work_item
*item
, void *data
);
94 static int dlm_lockres_master_requery(struct dlm_ctxt
*dlm
,
95 struct dlm_lock_resource
*res
,
98 static u64
dlm_get_next_mig_cookie(void);
100 static DEFINE_SPINLOCK(dlm_reco_state_lock
);
101 static DEFINE_SPINLOCK(dlm_mig_cookie_lock
);
102 static u64 dlm_mig_cookie
= 1;
104 static u64
dlm_get_next_mig_cookie(void)
107 spin_lock(&dlm_mig_cookie_lock
);
109 if (dlm_mig_cookie
== (~0ULL))
113 spin_unlock(&dlm_mig_cookie_lock
);
117 static inline void dlm_set_reco_dead_node(struct dlm_ctxt
*dlm
,
120 assert_spin_locked(&dlm
->spinlock
);
121 if (dlm
->reco
.dead_node
!= dead_node
)
122 mlog(0, "%s: changing dead_node from %u to %u\n",
123 dlm
->name
, dlm
->reco
.dead_node
, dead_node
);
124 dlm
->reco
.dead_node
= dead_node
;
127 static inline void dlm_set_reco_master(struct dlm_ctxt
*dlm
,
130 assert_spin_locked(&dlm
->spinlock
);
131 mlog(0, "%s: changing new_master from %u to %u\n",
132 dlm
->name
, dlm
->reco
.new_master
, master
);
133 dlm
->reco
.new_master
= master
;
136 static inline void __dlm_reset_recovery(struct dlm_ctxt
*dlm
)
138 assert_spin_locked(&dlm
->spinlock
);
139 clear_bit(dlm
->reco
.dead_node
, dlm
->recovery_map
);
140 dlm_set_reco_dead_node(dlm
, O2NM_INVALID_NODE_NUM
);
141 dlm_set_reco_master(dlm
, O2NM_INVALID_NODE_NUM
);
144 static inline void dlm_reset_recovery(struct dlm_ctxt
*dlm
)
146 spin_lock(&dlm
->spinlock
);
147 __dlm_reset_recovery(dlm
);
148 spin_unlock(&dlm
->spinlock
);
151 /* Worker function used during recovery. */
152 void dlm_dispatch_work(struct work_struct
*work
)
154 struct dlm_ctxt
*dlm
=
155 container_of(work
, struct dlm_ctxt
, dispatched_work
);
157 struct dlm_work_item
*item
, *next
;
158 dlm_workfunc_t
*workfunc
;
161 spin_lock(&dlm
->work_lock
);
162 list_splice_init(&dlm
->work_list
, &tmp_list
);
163 spin_unlock(&dlm
->work_lock
);
165 list_for_each_entry(item
, &tmp_list
, list
) {
168 mlog(0, "%s: work thread has %d work items\n", dlm
->name
, tot
);
170 list_for_each_entry_safe(item
, next
, &tmp_list
, list
) {
171 workfunc
= item
->func
;
172 list_del_init(&item
->list
);
174 /* already have ref on dlm to avoid having
175 * it disappear. just double-check. */
176 BUG_ON(item
->dlm
!= dlm
);
178 /* this is allowed to sleep and
179 * call network stuff */
180 workfunc(item
, item
->data
);
191 void dlm_kick_recovery_thread(struct dlm_ctxt
*dlm
)
193 /* wake the recovery thread
194 * this will wake the reco thread in one of three places
195 * 1) sleeping with no recovery happening
196 * 2) sleeping with recovery mastered elsewhere
197 * 3) recovery mastered here, waiting on reco data */
199 wake_up(&dlm
->dlm_reco_thread_wq
);
202 /* Launch the recovery thread */
203 int dlm_launch_recovery_thread(struct dlm_ctxt
*dlm
)
205 mlog(0, "starting dlm recovery thread...\n");
207 dlm
->dlm_reco_thread_task
= kthread_run(dlm_recovery_thread
, dlm
,
209 if (IS_ERR(dlm
->dlm_reco_thread_task
)) {
210 mlog_errno(PTR_ERR(dlm
->dlm_reco_thread_task
));
211 dlm
->dlm_reco_thread_task
= NULL
;
218 void dlm_complete_recovery_thread(struct dlm_ctxt
*dlm
)
220 if (dlm
->dlm_reco_thread_task
) {
221 mlog(0, "waiting for dlm recovery thread to exit\n");
222 kthread_stop(dlm
->dlm_reco_thread_task
);
223 dlm
->dlm_reco_thread_task
= NULL
;
230 * this is lame, but here's how recovery works...
231 * 1) all recovery threads cluster wide will work on recovering
233 * 2) negotiate who will take over all the locks for the dead node.
234 * thats right... ALL the locks.
235 * 3) once a new master is chosen, everyone scans all locks
236 * and moves aside those mastered by the dead guy
237 * 4) each of these locks should be locked until recovery is done
238 * 5) the new master collects up all of secondary lock queue info
239 * one lock at a time, forcing each node to communicate back
241 * 6) each secondary lock queue responds with the full known lock info
242 * 7) once the new master has run all its locks, it sends a ALLDONE!
243 * message to everyone
244 * 8) upon receiving this message, the secondary queue node unlocks
245 * and responds to the ALLDONE
246 * 9) once the new master gets responses from everyone, he unlocks
247 * everything and recovery for this dead node is done
248 *10) go back to 2) while there are still dead nodes
252 static void dlm_print_reco_node_status(struct dlm_ctxt
*dlm
)
254 struct dlm_reco_node_data
*ndata
;
255 struct dlm_lock_resource
*res
;
257 mlog(ML_NOTICE
, "%s(%d): recovery info, state=%s, dead=%u, master=%u\n",
258 dlm
->name
, task_pid_nr(dlm
->dlm_reco_thread_task
),
259 dlm
->reco
.state
& DLM_RECO_STATE_ACTIVE
? "ACTIVE" : "inactive",
260 dlm
->reco
.dead_node
, dlm
->reco
.new_master
);
262 list_for_each_entry(ndata
, &dlm
->reco
.node_data
, list
) {
263 char *st
= "unknown";
264 switch (ndata
->state
) {
265 case DLM_RECO_NODE_DATA_INIT
:
268 case DLM_RECO_NODE_DATA_REQUESTING
:
271 case DLM_RECO_NODE_DATA_DEAD
:
274 case DLM_RECO_NODE_DATA_RECEIVING
:
277 case DLM_RECO_NODE_DATA_REQUESTED
:
280 case DLM_RECO_NODE_DATA_DONE
:
283 case DLM_RECO_NODE_DATA_FINALIZE_SENT
:
284 st
= "finalize-sent";
290 mlog(ML_NOTICE
, "%s: reco state, node %u, state=%s\n",
291 dlm
->name
, ndata
->node_num
, st
);
293 list_for_each_entry(res
, &dlm
->reco
.resources
, recovering
) {
294 mlog(ML_NOTICE
, "%s: lockres %.*s on recovering list\n",
295 dlm
->name
, res
->lockname
.len
, res
->lockname
.name
);
299 #define DLM_RECO_THREAD_TIMEOUT_MS (5 * 1000)
301 static int dlm_recovery_thread(void *data
)
304 struct dlm_ctxt
*dlm
= data
;
305 unsigned long timeout
= msecs_to_jiffies(DLM_RECO_THREAD_TIMEOUT_MS
);
307 mlog(0, "dlm thread running for %s...\n", dlm
->name
);
309 while (!kthread_should_stop()) {
310 if (dlm_domain_fully_joined(dlm
)) {
311 status
= dlm_do_recovery(dlm
);
312 if (status
== -EAGAIN
) {
313 /* do not sleep, recheck immediately. */
320 wait_event_interruptible_timeout(dlm
->dlm_reco_thread_wq
,
321 kthread_should_stop(),
325 mlog(0, "quitting DLM recovery thread\n");
329 /* returns true when the recovery master has contacted us */
330 static int dlm_reco_master_ready(struct dlm_ctxt
*dlm
)
333 spin_lock(&dlm
->spinlock
);
334 ready
= (dlm
->reco
.new_master
!= O2NM_INVALID_NODE_NUM
);
335 spin_unlock(&dlm
->spinlock
);
339 /* returns true if node is no longer in the domain
340 * could be dead or just not joined */
341 int dlm_is_node_dead(struct dlm_ctxt
*dlm
, u8 node
)
344 spin_lock(&dlm
->spinlock
);
345 dead
= !test_bit(node
, dlm
->domain_map
);
346 spin_unlock(&dlm
->spinlock
);
350 /* returns true if node is no longer in the domain
351 * could be dead or just not joined */
352 static int dlm_is_node_recovered(struct dlm_ctxt
*dlm
, u8 node
)
355 spin_lock(&dlm
->spinlock
);
356 recovered
= !test_bit(node
, dlm
->recovery_map
);
357 spin_unlock(&dlm
->spinlock
);
362 void dlm_wait_for_node_death(struct dlm_ctxt
*dlm
, u8 node
, int timeout
)
364 if (dlm_is_node_dead(dlm
, node
))
367 printk(KERN_NOTICE
"o2dlm: Waiting on the death of node %u in "
368 "domain %s\n", node
, dlm
->name
);
371 wait_event_timeout(dlm
->dlm_reco_thread_wq
,
372 dlm_is_node_dead(dlm
, node
),
373 msecs_to_jiffies(timeout
));
375 wait_event(dlm
->dlm_reco_thread_wq
,
376 dlm_is_node_dead(dlm
, node
));
379 void dlm_wait_for_node_recovery(struct dlm_ctxt
*dlm
, u8 node
, int timeout
)
381 if (dlm_is_node_recovered(dlm
, node
))
384 printk(KERN_NOTICE
"o2dlm: Waiting on the recovery of node %u in "
385 "domain %s\n", node
, dlm
->name
);
388 wait_event_timeout(dlm
->dlm_reco_thread_wq
,
389 dlm_is_node_recovered(dlm
, node
),
390 msecs_to_jiffies(timeout
));
392 wait_event(dlm
->dlm_reco_thread_wq
,
393 dlm_is_node_recovered(dlm
, node
));
396 /* callers of the top-level api calls (dlmlock/dlmunlock) should
397 * block on the dlm->reco.event when recovery is in progress.
398 * the dlm recovery thread will set this state when it begins
399 * recovering a dead node (as the new master or not) and clear
400 * the state and wake as soon as all affected lock resources have
401 * been marked with the RECOVERY flag */
402 static int dlm_in_recovery(struct dlm_ctxt
*dlm
)
405 spin_lock(&dlm
->spinlock
);
406 in_recovery
= !!(dlm
->reco
.state
& DLM_RECO_STATE_ACTIVE
);
407 spin_unlock(&dlm
->spinlock
);
412 void dlm_wait_for_recovery(struct dlm_ctxt
*dlm
)
414 if (dlm_in_recovery(dlm
)) {
415 mlog(0, "%s: reco thread %d in recovery: "
416 "state=%d, master=%u, dead=%u\n",
417 dlm
->name
, task_pid_nr(dlm
->dlm_reco_thread_task
),
418 dlm
->reco
.state
, dlm
->reco
.new_master
,
419 dlm
->reco
.dead_node
);
421 wait_event(dlm
->reco
.event
, !dlm_in_recovery(dlm
));
424 static void dlm_begin_recovery(struct dlm_ctxt
*dlm
)
426 spin_lock(&dlm
->spinlock
);
427 BUG_ON(dlm
->reco
.state
& DLM_RECO_STATE_ACTIVE
);
428 printk(KERN_NOTICE
"o2dlm: Begin recovery on domain %s for node %u\n",
429 dlm
->name
, dlm
->reco
.dead_node
);
430 dlm
->reco
.state
|= DLM_RECO_STATE_ACTIVE
;
431 spin_unlock(&dlm
->spinlock
);
434 static void dlm_end_recovery(struct dlm_ctxt
*dlm
)
436 spin_lock(&dlm
->spinlock
);
437 BUG_ON(!(dlm
->reco
.state
& DLM_RECO_STATE_ACTIVE
));
438 dlm
->reco
.state
&= ~DLM_RECO_STATE_ACTIVE
;
439 spin_unlock(&dlm
->spinlock
);
440 printk(KERN_NOTICE
"o2dlm: End recovery on domain %s\n", dlm
->name
);
441 wake_up(&dlm
->reco
.event
);
444 static void dlm_print_recovery_master(struct dlm_ctxt
*dlm
)
446 printk(KERN_NOTICE
"o2dlm: Node %u (%s) is the Recovery Master for the "
447 "dead node %u in domain %s\n", dlm
->reco
.new_master
,
448 (dlm
->node_num
== dlm
->reco
.new_master
? "me" : "he"),
449 dlm
->reco
.dead_node
, dlm
->name
);
452 static int dlm_do_recovery(struct dlm_ctxt
*dlm
)
457 spin_lock(&dlm
->spinlock
);
459 /* check to see if the new master has died */
460 if (dlm
->reco
.new_master
!= O2NM_INVALID_NODE_NUM
&&
461 test_bit(dlm
->reco
.new_master
, dlm
->recovery_map
)) {
462 mlog(0, "new master %u died while recovering %u!\n",
463 dlm
->reco
.new_master
, dlm
->reco
.dead_node
);
464 /* unset the new_master, leave dead_node */
465 dlm_set_reco_master(dlm
, O2NM_INVALID_NODE_NUM
);
468 /* select a target to recover */
469 if (dlm
->reco
.dead_node
== O2NM_INVALID_NODE_NUM
) {
472 bit
= find_next_bit (dlm
->recovery_map
, O2NM_MAX_NODES
, 0);
473 if (bit
>= O2NM_MAX_NODES
|| bit
< 0)
474 dlm_set_reco_dead_node(dlm
, O2NM_INVALID_NODE_NUM
);
476 dlm_set_reco_dead_node(dlm
, bit
);
477 } else if (!test_bit(dlm
->reco
.dead_node
, dlm
->recovery_map
)) {
479 mlog(ML_ERROR
, "dead_node %u no longer in recovery map!\n",
480 dlm
->reco
.dead_node
);
481 dlm_set_reco_dead_node(dlm
, O2NM_INVALID_NODE_NUM
);
484 if (dlm
->reco
.dead_node
== O2NM_INVALID_NODE_NUM
) {
485 // mlog(0, "nothing to recover! sleeping now!\n");
486 spin_unlock(&dlm
->spinlock
);
487 /* return to main thread loop and sleep. */
490 mlog(0, "%s(%d):recovery thread found node %u in the recovery map!\n",
491 dlm
->name
, task_pid_nr(dlm
->dlm_reco_thread_task
),
492 dlm
->reco
.dead_node
);
493 spin_unlock(&dlm
->spinlock
);
495 /* take write barrier */
496 /* (stops the list reshuffling thread, proxy ast handling) */
497 dlm_begin_recovery(dlm
);
499 if (dlm
->reco
.new_master
== dlm
->node_num
)
502 if (dlm
->reco
.new_master
== O2NM_INVALID_NODE_NUM
) {
503 /* choose a new master, returns 0 if this node
504 * is the master, -EEXIST if it's another node.
505 * this does not return until a new master is chosen
506 * or recovery completes entirely. */
507 ret
= dlm_pick_recovery_master(dlm
);
509 /* already notified everyone. go. */
512 mlog(0, "another node will master this recovery session.\n");
515 dlm_print_recovery_master(dlm
);
517 /* it is safe to start everything back up here
518 * because all of the dead node's lock resources
519 * have been marked as in-recovery */
520 dlm_end_recovery(dlm
);
522 /* sleep out in main dlm_recovery_thread loop. */
526 dlm_print_recovery_master(dlm
);
528 status
= dlm_remaster_locks(dlm
, dlm
->reco
.dead_node
);
530 /* we should never hit this anymore */
531 mlog(ML_ERROR
, "%s: Error %d remastering locks for node %u, "
532 "retrying.\n", dlm
->name
, status
, dlm
->reco
.dead_node
);
533 /* yield a bit to allow any final network messages
534 * to get handled on remaining nodes */
537 /* success! see if any other nodes need recovery */
538 mlog(0, "DONE mastering recovery of %s:%u here(this=%u)!\n",
539 dlm
->name
, dlm
->reco
.dead_node
, dlm
->node_num
);
540 dlm_reset_recovery(dlm
);
542 dlm_end_recovery(dlm
);
544 /* continue and look for another dead node */
548 static int dlm_remaster_locks(struct dlm_ctxt
*dlm
, u8 dead_node
)
551 struct dlm_reco_node_data
*ndata
;
557 /* we have become recovery master. there is no escaping
558 * this, so just keep trying until we get it. */
559 status
= dlm_init_recovery_area(dlm
, dead_node
);
561 mlog(ML_ERROR
, "%s: failed to alloc recovery area, "
562 "retrying\n", dlm
->name
);
565 } while (status
!= 0);
567 /* safe to access the node data list without a lock, since this
568 * process is the only one to change the list */
569 list_for_each_entry(ndata
, &dlm
->reco
.node_data
, list
) {
570 BUG_ON(ndata
->state
!= DLM_RECO_NODE_DATA_INIT
);
571 ndata
->state
= DLM_RECO_NODE_DATA_REQUESTING
;
573 mlog(0, "%s: Requesting lock info from node %u\n", dlm
->name
,
576 if (ndata
->node_num
== dlm
->node_num
) {
577 ndata
->state
= DLM_RECO_NODE_DATA_DONE
;
582 status
= dlm_request_all_locks(dlm
, ndata
->node_num
,
586 if (dlm_is_host_down(status
)) {
587 /* node died, ignore it for recovery */
589 ndata
->state
= DLM_RECO_NODE_DATA_DEAD
;
590 /* wait for the domain map to catch up
591 * with the network state. */
592 wait_event_timeout(dlm
->dlm_reco_thread_wq
,
593 dlm_is_node_dead(dlm
,
595 msecs_to_jiffies(1000));
596 mlog(0, "waited 1 sec for %u, "
597 "dead? %s\n", ndata
->node_num
,
598 dlm_is_node_dead(dlm
, ndata
->node_num
) ?
601 /* -ENOMEM on the other node */
602 mlog(0, "%s: node %u returned "
603 "%d during recovery, retrying "
604 "after a short wait\n",
605 dlm
->name
, ndata
->node_num
,
610 } while (status
!= 0);
612 spin_lock(&dlm_reco_state_lock
);
613 switch (ndata
->state
) {
614 case DLM_RECO_NODE_DATA_INIT
:
615 case DLM_RECO_NODE_DATA_FINALIZE_SENT
:
616 case DLM_RECO_NODE_DATA_REQUESTED
:
619 case DLM_RECO_NODE_DATA_DEAD
:
620 mlog(0, "node %u died after requesting "
621 "recovery info for node %u\n",
622 ndata
->node_num
, dead_node
);
623 /* fine. don't need this node's info.
624 * continue without it. */
626 case DLM_RECO_NODE_DATA_REQUESTING
:
627 ndata
->state
= DLM_RECO_NODE_DATA_REQUESTED
;
628 mlog(0, "now receiving recovery data from "
629 "node %u for dead node %u\n",
630 ndata
->node_num
, dead_node
);
632 case DLM_RECO_NODE_DATA_RECEIVING
:
633 mlog(0, "already receiving recovery data from "
634 "node %u for dead node %u\n",
635 ndata
->node_num
, dead_node
);
637 case DLM_RECO_NODE_DATA_DONE
:
638 mlog(0, "already DONE receiving recovery data "
639 "from node %u for dead node %u\n",
640 ndata
->node_num
, dead_node
);
643 spin_unlock(&dlm_reco_state_lock
);
646 mlog(0, "%s: Done requesting all lock info\n", dlm
->name
);
648 /* nodes should be sending reco data now
649 * just need to wait */
652 /* check all the nodes now to see if we are
653 * done, or if anyone died */
655 spin_lock(&dlm_reco_state_lock
);
656 list_for_each_entry(ndata
, &dlm
->reco
.node_data
, list
) {
657 mlog(0, "checking recovery state of node %u\n",
659 switch (ndata
->state
) {
660 case DLM_RECO_NODE_DATA_INIT
:
661 case DLM_RECO_NODE_DATA_REQUESTING
:
662 mlog(ML_ERROR
, "bad ndata state for "
663 "node %u: state=%d\n",
664 ndata
->node_num
, ndata
->state
);
667 case DLM_RECO_NODE_DATA_DEAD
:
668 mlog(0, "node %u died after "
669 "requesting recovery info for "
670 "node %u\n", ndata
->node_num
,
673 case DLM_RECO_NODE_DATA_RECEIVING
:
674 case DLM_RECO_NODE_DATA_REQUESTED
:
675 mlog(0, "%s: node %u still in state %s\n",
676 dlm
->name
, ndata
->node_num
,
677 ndata
->state
==DLM_RECO_NODE_DATA_RECEIVING
?
678 "receiving" : "requested");
681 case DLM_RECO_NODE_DATA_DONE
:
682 mlog(0, "%s: node %u state is done\n",
683 dlm
->name
, ndata
->node_num
);
685 case DLM_RECO_NODE_DATA_FINALIZE_SENT
:
686 mlog(0, "%s: node %u state is finalize\n",
687 dlm
->name
, ndata
->node_num
);
691 spin_unlock(&dlm_reco_state_lock
);
693 mlog(0, "pass #%d, all_nodes_done?: %s\n", ++pass
,
694 all_nodes_done
?"yes":"no");
695 if (all_nodes_done
) {
698 /* all nodes are now in DLM_RECO_NODE_DATA_DONE state
699 * just send a finalize message to everyone and
701 mlog(0, "all nodes are done! send finalize\n");
702 ret
= dlm_send_finalize_reco_message(dlm
);
706 spin_lock(&dlm
->spinlock
);
707 dlm_finish_local_lockres_recovery(dlm
, dead_node
,
709 spin_unlock(&dlm
->spinlock
);
710 mlog(0, "should be done with recovery!\n");
712 mlog(0, "finishing recovery of %s at %lu, "
713 "dead=%u, this=%u, new=%u\n", dlm
->name
,
714 jiffies
, dlm
->reco
.dead_node
,
715 dlm
->node_num
, dlm
->reco
.new_master
);
718 /* rescan everything marked dirty along the way */
719 dlm_kick_thread(dlm
, NULL
);
722 /* wait to be signalled, with periodic timeout
723 * to check for node death */
724 wait_event_interruptible_timeout(dlm
->dlm_reco_thread_wq
,
725 kthread_should_stop(),
726 msecs_to_jiffies(DLM_RECO_THREAD_TIMEOUT_MS
));
731 dlm_destroy_recovery_area(dlm
, dead_node
);
736 static int dlm_init_recovery_area(struct dlm_ctxt
*dlm
, u8 dead_node
)
739 struct dlm_reco_node_data
*ndata
;
741 spin_lock(&dlm
->spinlock
);
742 memcpy(dlm
->reco
.node_map
, dlm
->domain_map
, sizeof(dlm
->domain_map
));
743 /* nodes can only be removed (by dying) after dropping
744 * this lock, and death will be trapped later, so this should do */
745 spin_unlock(&dlm
->spinlock
);
748 num
= find_next_bit (dlm
->reco
.node_map
, O2NM_MAX_NODES
, num
);
749 if (num
>= O2NM_MAX_NODES
) {
752 BUG_ON(num
== dead_node
);
754 ndata
= kzalloc(sizeof(*ndata
), GFP_NOFS
);
756 dlm_destroy_recovery_area(dlm
, dead_node
);
759 ndata
->node_num
= num
;
760 ndata
->state
= DLM_RECO_NODE_DATA_INIT
;
761 spin_lock(&dlm_reco_state_lock
);
762 list_add_tail(&ndata
->list
, &dlm
->reco
.node_data
);
763 spin_unlock(&dlm_reco_state_lock
);
770 static void dlm_destroy_recovery_area(struct dlm_ctxt
*dlm
, u8 dead_node
)
772 struct dlm_reco_node_data
*ndata
, *next
;
775 spin_lock(&dlm_reco_state_lock
);
776 list_splice_init(&dlm
->reco
.node_data
, &tmplist
);
777 spin_unlock(&dlm_reco_state_lock
);
779 list_for_each_entry_safe(ndata
, next
, &tmplist
, list
) {
780 list_del_init(&ndata
->list
);
785 static int dlm_request_all_locks(struct dlm_ctxt
*dlm
, u8 request_from
,
788 struct dlm_lock_request lr
;
795 mlog(0, "dlm_request_all_locks: dead node is %u, sending request "
796 "to %u\n", dead_node
, request_from
);
798 memset(&lr
, 0, sizeof(lr
));
799 lr
.node_idx
= dlm
->node_num
;
800 lr
.dead_node
= dead_node
;
803 ret
= o2net_send_message(DLM_LOCK_REQUEST_MSG
, dlm
->key
,
804 &lr
, sizeof(lr
), request_from
, &status
);
806 /* negative status is handled by caller */
808 mlog(ML_ERROR
, "%s: Error %d send LOCK_REQUEST to node %u "
809 "to recover dead node %u\n", dlm
->name
, ret
,
810 request_from
, dead_node
);
813 // return from here, then
814 // sleep until all received or error
819 int dlm_request_all_locks_handler(struct o2net_msg
*msg
, u32 len
, void *data
,
822 struct dlm_ctxt
*dlm
= data
;
823 struct dlm_lock_request
*lr
= (struct dlm_lock_request
*)msg
->buf
;
825 struct dlm_work_item
*item
= NULL
;
830 if (lr
->dead_node
!= dlm
->reco
.dead_node
) {
831 mlog(ML_ERROR
, "%s: node %u sent dead_node=%u, but local "
832 "dead_node is %u\n", dlm
->name
, lr
->node_idx
,
833 lr
->dead_node
, dlm
->reco
.dead_node
);
834 dlm_print_reco_node_status(dlm
);
839 BUG_ON(lr
->dead_node
!= dlm
->reco
.dead_node
);
841 item
= kzalloc(sizeof(*item
), GFP_NOFS
);
847 /* this will get freed by dlm_request_all_locks_worker */
848 buf
= (char *) __get_free_page(GFP_NOFS
);
855 /* queue up work for dlm_request_all_locks_worker */
856 dlm_grab(dlm
); /* get an extra ref for the work item */
857 dlm_init_work_item(dlm
, item
, dlm_request_all_locks_worker
, buf
);
858 item
->u
.ral
.reco_master
= lr
->node_idx
;
859 item
->u
.ral
.dead_node
= lr
->dead_node
;
860 spin_lock(&dlm
->work_lock
);
861 list_add_tail(&item
->list
, &dlm
->work_list
);
862 spin_unlock(&dlm
->work_lock
);
863 queue_work(dlm
->dlm_worker
, &dlm
->dispatched_work
);
869 static void dlm_request_all_locks_worker(struct dlm_work_item
*item
, void *data
)
871 struct dlm_migratable_lockres
*mres
;
872 struct dlm_lock_resource
*res
;
873 struct dlm_ctxt
*dlm
;
874 LIST_HEAD(resources
);
876 u8 dead_node
, reco_master
;
877 int skip_all_done
= 0;
880 dead_node
= item
->u
.ral
.dead_node
;
881 reco_master
= item
->u
.ral
.reco_master
;
882 mres
= (struct dlm_migratable_lockres
*)data
;
884 mlog(0, "%s: recovery worker started, dead=%u, master=%u\n",
885 dlm
->name
, dead_node
, reco_master
);
887 if (dead_node
!= dlm
->reco
.dead_node
||
888 reco_master
!= dlm
->reco
.new_master
) {
889 /* worker could have been created before the recovery master
890 * died. if so, do not continue, but do not error. */
891 if (dlm
->reco
.new_master
== O2NM_INVALID_NODE_NUM
) {
892 mlog(ML_NOTICE
, "%s: will not send recovery state, "
893 "recovery master %u died, thread=(dead=%u,mas=%u)"
894 " current=(dead=%u,mas=%u)\n", dlm
->name
,
895 reco_master
, dead_node
, reco_master
,
896 dlm
->reco
.dead_node
, dlm
->reco
.new_master
);
898 mlog(ML_NOTICE
, "%s: reco state invalid: reco(dead=%u, "
899 "master=%u), request(dead=%u, master=%u)\n",
900 dlm
->name
, dlm
->reco
.dead_node
,
901 dlm
->reco
.new_master
, dead_node
, reco_master
);
906 /* lock resources should have already been moved to the
907 * dlm->reco.resources list. now move items from that list
908 * to a temp list if the dead owner matches. note that the
909 * whole cluster recovers only one node at a time, so we
910 * can safely move UNKNOWN lock resources for each recovery
912 dlm_move_reco_locks_to_list(dlm
, &resources
, dead_node
);
914 /* now we can begin blasting lockreses without the dlm lock */
916 /* any errors returned will be due to the new_master dying,
917 * the dlm_reco_thread should detect this */
918 list_for_each_entry(res
, &resources
, recovering
) {
919 ret
= dlm_send_one_lockres(dlm
, res
, mres
, reco_master
,
922 mlog(ML_ERROR
, "%s: node %u went down while sending "
923 "recovery state for dead node %u, ret=%d\n", dlm
->name
,
924 reco_master
, dead_node
, ret
);
930 /* move the resources back to the list */
931 spin_lock(&dlm
->spinlock
);
932 list_splice_init(&resources
, &dlm
->reco
.resources
);
933 spin_unlock(&dlm
->spinlock
);
935 if (!skip_all_done
) {
936 ret
= dlm_send_all_done_msg(dlm
, dead_node
, reco_master
);
938 mlog(ML_ERROR
, "%s: node %u went down while sending "
939 "recovery all-done for dead node %u, ret=%d\n",
940 dlm
->name
, reco_master
, dead_node
, ret
);
944 free_page((unsigned long)data
);
948 static int dlm_send_all_done_msg(struct dlm_ctxt
*dlm
, u8 dead_node
, u8 send_to
)
951 struct dlm_reco_data_done done_msg
;
953 memset(&done_msg
, 0, sizeof(done_msg
));
954 done_msg
.node_idx
= dlm
->node_num
;
955 done_msg
.dead_node
= dead_node
;
956 mlog(0, "sending DATA DONE message to %u, "
957 "my node=%u, dead node=%u\n", send_to
, done_msg
.node_idx
,
960 ret
= o2net_send_message(DLM_RECO_DATA_DONE_MSG
, dlm
->key
, &done_msg
,
961 sizeof(done_msg
), send_to
, &tmpret
);
963 mlog(ML_ERROR
, "%s: Error %d send RECO_DATA_DONE to node %u "
964 "to recover dead node %u\n", dlm
->name
, ret
, send_to
,
966 if (!dlm_is_host_down(ret
)) {
975 int dlm_reco_data_done_handler(struct o2net_msg
*msg
, u32 len
, void *data
,
978 struct dlm_ctxt
*dlm
= data
;
979 struct dlm_reco_data_done
*done
= (struct dlm_reco_data_done
*)msg
->buf
;
980 struct dlm_reco_node_data
*ndata
= NULL
;
986 mlog(0, "got DATA DONE: dead_node=%u, reco.dead_node=%u, "
987 "node_idx=%u, this node=%u\n", done
->dead_node
,
988 dlm
->reco
.dead_node
, done
->node_idx
, dlm
->node_num
);
990 mlog_bug_on_msg((done
->dead_node
!= dlm
->reco
.dead_node
),
991 "Got DATA DONE: dead_node=%u, reco.dead_node=%u, "
992 "node_idx=%u, this node=%u\n", done
->dead_node
,
993 dlm
->reco
.dead_node
, done
->node_idx
, dlm
->node_num
);
995 spin_lock(&dlm_reco_state_lock
);
996 list_for_each_entry(ndata
, &dlm
->reco
.node_data
, list
) {
997 if (ndata
->node_num
!= done
->node_idx
)
1000 switch (ndata
->state
) {
1001 /* should have moved beyond INIT but not to FINALIZE yet */
1002 case DLM_RECO_NODE_DATA_INIT
:
1003 case DLM_RECO_NODE_DATA_DEAD
:
1004 case DLM_RECO_NODE_DATA_FINALIZE_SENT
:
1005 mlog(ML_ERROR
, "bad ndata state for node %u:"
1006 " state=%d\n", ndata
->node_num
,
1010 /* these states are possible at this point, anywhere along
1011 * the line of recovery */
1012 case DLM_RECO_NODE_DATA_DONE
:
1013 case DLM_RECO_NODE_DATA_RECEIVING
:
1014 case DLM_RECO_NODE_DATA_REQUESTED
:
1015 case DLM_RECO_NODE_DATA_REQUESTING
:
1016 mlog(0, "node %u is DONE sending "
1020 ndata
->state
= DLM_RECO_NODE_DATA_DONE
;
1025 spin_unlock(&dlm_reco_state_lock
);
1027 /* wake the recovery thread, some node is done */
1029 dlm_kick_recovery_thread(dlm
);
1032 mlog(ML_ERROR
, "failed to find recovery node data for node "
1033 "%u\n", done
->node_idx
);
1036 mlog(0, "leaving reco data done handler, ret=%d\n", ret
);
1040 static void dlm_move_reco_locks_to_list(struct dlm_ctxt
*dlm
,
1041 struct list_head
*list
,
1044 struct dlm_lock_resource
*res
, *next
;
1045 struct dlm_lock
*lock
;
1047 spin_lock(&dlm
->spinlock
);
1048 list_for_each_entry_safe(res
, next
, &dlm
->reco
.resources
, recovering
) {
1049 /* always prune any $RECOVERY entries for dead nodes,
1050 * otherwise hangs can occur during later recovery */
1051 if (dlm_is_recovery_lock(res
->lockname
.name
,
1052 res
->lockname
.len
)) {
1053 spin_lock(&res
->spinlock
);
1054 list_for_each_entry(lock
, &res
->granted
, list
) {
1055 if (lock
->ml
.node
== dead_node
) {
1056 mlog(0, "AHA! there was "
1057 "a $RECOVERY lock for dead "
1059 dead_node
, dlm
->name
);
1060 list_del_init(&lock
->list
);
1065 spin_unlock(&res
->spinlock
);
1069 if (res
->owner
== dead_node
) {
1070 mlog(0, "found lockres owned by dead node while "
1071 "doing recovery for node %u. sending it.\n",
1073 list_move_tail(&res
->recovering
, list
);
1074 } else if (res
->owner
== DLM_LOCK_RES_OWNER_UNKNOWN
) {
1075 mlog(0, "found UNKNOWN owner while doing recovery "
1076 "for node %u. sending it.\n", dead_node
);
1077 list_move_tail(&res
->recovering
, list
);
1080 spin_unlock(&dlm
->spinlock
);
1083 static inline int dlm_num_locks_in_lockres(struct dlm_lock_resource
*res
)
1085 int total_locks
= 0;
1086 struct list_head
*iter
, *queue
= &res
->granted
;
1089 for (i
=0; i
<3; i
++) {
1090 list_for_each(iter
, queue
)
1098 static int dlm_send_mig_lockres_msg(struct dlm_ctxt
*dlm
,
1099 struct dlm_migratable_lockres
*mres
,
1101 struct dlm_lock_resource
*res
,
1104 u64 mig_cookie
= be64_to_cpu(mres
->mig_cookie
);
1105 int mres_total_locks
= be32_to_cpu(mres
->total_locks
);
1106 int sz
, ret
= 0, status
= 0;
1107 u8 orig_flags
= mres
->flags
,
1108 orig_master
= mres
->master
;
1110 BUG_ON(mres
->num_locks
> DLM_MAX_MIGRATABLE_LOCKS
);
1111 if (!mres
->num_locks
)
1114 sz
= sizeof(struct dlm_migratable_lockres
) +
1115 (mres
->num_locks
* sizeof(struct dlm_migratable_lock
));
1117 /* add an all-done flag if we reached the last lock */
1118 orig_flags
= mres
->flags
;
1119 BUG_ON(total_locks
> mres_total_locks
);
1120 if (total_locks
== mres_total_locks
)
1121 mres
->flags
|= DLM_MRES_ALL_DONE
;
1123 mlog(0, "%s:%.*s: sending mig lockres (%s) to %u\n",
1124 dlm
->name
, res
->lockname
.len
, res
->lockname
.name
,
1125 orig_flags
& DLM_MRES_MIGRATION
? "migration" : "recovery",
1129 ret
= o2net_send_message(DLM_MIG_LOCKRES_MSG
, dlm
->key
, mres
,
1130 sz
, send_to
, &status
);
1132 /* XXX: negative status is not handled.
1133 * this will end up killing this node. */
1134 mlog(ML_ERROR
, "%s: res %.*s, Error %d send MIG_LOCKRES to "
1135 "node %u (%s)\n", dlm
->name
, mres
->lockname_len
,
1136 mres
->lockname
, ret
, send_to
,
1137 (orig_flags
& DLM_MRES_MIGRATION
?
1138 "migration" : "recovery"));
1140 /* might get an -ENOMEM back here */
1145 if (ret
== -EFAULT
) {
1146 mlog(ML_ERROR
, "node %u told me to kill "
1147 "myself!\n", send_to
);
1153 /* zero and reinit the message buffer */
1154 dlm_init_migratable_lockres(mres
, res
->lockname
.name
,
1155 res
->lockname
.len
, mres_total_locks
,
1156 mig_cookie
, orig_flags
, orig_master
);
1160 static void dlm_init_migratable_lockres(struct dlm_migratable_lockres
*mres
,
1161 const char *lockname
, int namelen
,
1162 int total_locks
, u64 cookie
,
1163 u8 flags
, u8 master
)
1165 /* mres here is one full page */
1167 mres
->lockname_len
= namelen
;
1168 memcpy(mres
->lockname
, lockname
, namelen
);
1169 mres
->num_locks
= 0;
1170 mres
->total_locks
= cpu_to_be32(total_locks
);
1171 mres
->mig_cookie
= cpu_to_be64(cookie
);
1172 mres
->flags
= flags
;
1173 mres
->master
= master
;
1176 static void dlm_prepare_lvb_for_migration(struct dlm_lock
*lock
,
1177 struct dlm_migratable_lockres
*mres
,
1183 /* Ignore lvb in all locks in the blocked list */
1184 if (queue
== DLM_BLOCKED_LIST
)
1187 /* Only consider lvbs in locks with granted EX or PR lock levels */
1188 if (lock
->ml
.type
!= LKM_EXMODE
&& lock
->ml
.type
!= LKM_PRMODE
)
1191 if (dlm_lvb_is_empty(mres
->lvb
)) {
1192 memcpy(mres
->lvb
, lock
->lksb
->lvb
, DLM_LVB_LEN
);
1196 /* Ensure the lvb copied for migration matches in other valid locks */
1197 if (!memcmp(mres
->lvb
, lock
->lksb
->lvb
, DLM_LVB_LEN
))
1200 mlog(ML_ERROR
, "Mismatched lvb in lock cookie=%u:%llu, name=%.*s, "
1202 dlm_get_lock_cookie_node(be64_to_cpu(lock
->ml
.cookie
)),
1203 dlm_get_lock_cookie_seq(be64_to_cpu(lock
->ml
.cookie
)),
1204 lock
->lockres
->lockname
.len
, lock
->lockres
->lockname
.name
,
1206 dlm_print_one_lock_resource(lock
->lockres
);
1210 /* returns 1 if this lock fills the network structure,
1212 static int dlm_add_lock_to_array(struct dlm_lock
*lock
,
1213 struct dlm_migratable_lockres
*mres
, int queue
)
1215 struct dlm_migratable_lock
*ml
;
1216 int lock_num
= mres
->num_locks
;
1218 ml
= &(mres
->ml
[lock_num
]);
1219 ml
->cookie
= lock
->ml
.cookie
;
1220 ml
->type
= lock
->ml
.type
;
1221 ml
->convert_type
= lock
->ml
.convert_type
;
1222 ml
->highest_blocked
= lock
->ml
.highest_blocked
;
1225 ml
->flags
= lock
->lksb
->flags
;
1226 dlm_prepare_lvb_for_migration(lock
, mres
, queue
);
1228 ml
->node
= lock
->ml
.node
;
1230 /* we reached the max, send this network message */
1231 if (mres
->num_locks
== DLM_MAX_MIGRATABLE_LOCKS
)
1236 static void dlm_add_dummy_lock(struct dlm_ctxt
*dlm
,
1237 struct dlm_migratable_lockres
*mres
)
1239 struct dlm_lock dummy
;
1240 memset(&dummy
, 0, sizeof(dummy
));
1241 dummy
.ml
.cookie
= 0;
1242 dummy
.ml
.type
= LKM_IVMODE
;
1243 dummy
.ml
.convert_type
= LKM_IVMODE
;
1244 dummy
.ml
.highest_blocked
= LKM_IVMODE
;
1246 dummy
.ml
.node
= dlm
->node_num
;
1247 dlm_add_lock_to_array(&dummy
, mres
, DLM_BLOCKED_LIST
);
1250 static inline int dlm_is_dummy_lock(struct dlm_ctxt
*dlm
,
1251 struct dlm_migratable_lock
*ml
,
1254 if (unlikely(ml
->cookie
== 0 &&
1255 ml
->type
== LKM_IVMODE
&&
1256 ml
->convert_type
== LKM_IVMODE
&&
1257 ml
->highest_blocked
== LKM_IVMODE
&&
1258 ml
->list
== DLM_BLOCKED_LIST
)) {
1259 *nodenum
= ml
->node
;
1265 int dlm_send_one_lockres(struct dlm_ctxt
*dlm
, struct dlm_lock_resource
*res
,
1266 struct dlm_migratable_lockres
*mres
,
1267 u8 send_to
, u8 flags
)
1269 struct list_head
*queue
;
1272 struct dlm_lock
*lock
;
1275 BUG_ON(!(flags
& (DLM_MRES_RECOVERY
|DLM_MRES_MIGRATION
)));
1277 mlog(0, "sending to %u\n", send_to
);
1279 total_locks
= dlm_num_locks_in_lockres(res
);
1280 if (total_locks
> DLM_MAX_MIGRATABLE_LOCKS
) {
1281 /* rare, but possible */
1282 mlog(0, "argh. lockres has %d locks. this will "
1283 "require more than one network packet to "
1284 "migrate\n", total_locks
);
1285 mig_cookie
= dlm_get_next_mig_cookie();
1288 dlm_init_migratable_lockres(mres
, res
->lockname
.name
,
1289 res
->lockname
.len
, total_locks
,
1290 mig_cookie
, flags
, res
->owner
);
1293 for (i
=DLM_GRANTED_LIST
; i
<=DLM_BLOCKED_LIST
; i
++) {
1294 queue
= dlm_list_idx_to_ptr(res
, i
);
1295 list_for_each_entry(lock
, queue
, list
) {
1296 /* add another lock. */
1298 if (!dlm_add_lock_to_array(lock
, mres
, i
))
1301 /* this filled the lock message,
1302 * we must send it immediately. */
1303 ret
= dlm_send_mig_lockres_msg(dlm
, mres
, send_to
,
1309 if (total_locks
== 0) {
1310 /* send a dummy lock to indicate a mastery reference only */
1311 mlog(0, "%s:%.*s: sending dummy lock to %u, %s\n",
1312 dlm
->name
, res
->lockname
.len
, res
->lockname
.name
,
1313 send_to
, flags
& DLM_MRES_RECOVERY
? "recovery" :
1315 dlm_add_dummy_lock(dlm
, mres
);
1317 /* flush any remaining locks */
1318 ret
= dlm_send_mig_lockres_msg(dlm
, mres
, send_to
, res
, total_locks
);
1324 mlog(ML_ERROR
, "%s: dlm_send_mig_lockres_msg returned %d\n",
1326 if (!dlm_is_host_down(ret
))
1328 mlog(0, "%s: node %u went down while sending %s "
1329 "lockres %.*s\n", dlm
->name
, send_to
,
1330 flags
& DLM_MRES_RECOVERY
? "recovery" : "migration",
1331 res
->lockname
.len
, res
->lockname
.name
);
1338 * this message will contain no more than one page worth of
1339 * recovery data, and it will work on only one lockres.
1340 * there may be many locks in this page, and we may need to wait
1341 * for additional packets to complete all the locks (rare, but
1345 * NOTE: the allocation error cases here are scary
1346 * we really cannot afford to fail an alloc in recovery
1347 * do we spin? returning an error only delays the problem really
1350 int dlm_mig_lockres_handler(struct o2net_msg
*msg
, u32 len
, void *data
,
1353 struct dlm_ctxt
*dlm
= data
;
1354 struct dlm_migratable_lockres
*mres
=
1355 (struct dlm_migratable_lockres
*)msg
->buf
;
1360 struct dlm_work_item
*item
= NULL
;
1361 struct dlm_lock_resource
*res
= NULL
;
1366 BUG_ON(!(mres
->flags
& (DLM_MRES_RECOVERY
|DLM_MRES_MIGRATION
)));
1368 real_master
= mres
->master
;
1369 if (real_master
== DLM_LOCK_RES_OWNER_UNKNOWN
) {
1370 /* cannot migrate a lockres with no master */
1371 BUG_ON(!(mres
->flags
& DLM_MRES_RECOVERY
));
1374 mlog(0, "%s message received from node %u\n",
1375 (mres
->flags
& DLM_MRES_RECOVERY
) ?
1376 "recovery" : "migration", mres
->master
);
1377 if (mres
->flags
& DLM_MRES_ALL_DONE
)
1378 mlog(0, "all done flag. all lockres data received!\n");
1381 buf
= kmalloc(be16_to_cpu(msg
->data_len
), GFP_NOFS
);
1382 item
= kzalloc(sizeof(*item
), GFP_NOFS
);
1386 /* lookup the lock to see if we have a secondary queue for this
1387 * already... just add the locks in and this will have its owner
1388 * and RECOVERY flag changed when it completes. */
1389 res
= dlm_lookup_lockres(dlm
, mres
->lockname
, mres
->lockname_len
);
1391 /* this will get a ref on res */
1392 /* mark it as recovering/migrating and hash it */
1393 spin_lock(&res
->spinlock
);
1394 if (mres
->flags
& DLM_MRES_RECOVERY
) {
1395 res
->state
|= DLM_LOCK_RES_RECOVERING
;
1397 if (res
->state
& DLM_LOCK_RES_MIGRATING
) {
1398 /* this is at least the second
1399 * lockres message */
1400 mlog(0, "lock %.*s is already migrating\n",
1403 } else if (res
->state
& DLM_LOCK_RES_RECOVERING
) {
1404 /* caller should BUG */
1405 mlog(ML_ERROR
, "node is attempting to migrate "
1406 "lock %.*s, but marked as recovering!\n",
1407 mres
->lockname_len
, mres
->lockname
);
1409 spin_unlock(&res
->spinlock
);
1410 dlm_lockres_put(res
);
1413 res
->state
|= DLM_LOCK_RES_MIGRATING
;
1415 spin_unlock(&res
->spinlock
);
1417 /* need to allocate, just like if it was
1418 * mastered here normally */
1419 res
= dlm_new_lockres(dlm
, mres
->lockname
, mres
->lockname_len
);
1423 /* to match the ref that we would have gotten if
1424 * dlm_lookup_lockres had succeeded */
1425 dlm_lockres_get(res
);
1427 /* mark it as recovering/migrating and hash it */
1428 if (mres
->flags
& DLM_MRES_RECOVERY
)
1429 res
->state
|= DLM_LOCK_RES_RECOVERING
;
1431 res
->state
|= DLM_LOCK_RES_MIGRATING
;
1433 spin_lock(&dlm
->spinlock
);
1434 __dlm_insert_lockres(dlm
, res
);
1435 spin_unlock(&dlm
->spinlock
);
1437 /* Add an extra ref for this lock-less lockres lest the
1438 * dlm_thread purges it before we get the chance to add
1440 dlm_lockres_get(res
);
1442 /* There are three refs that need to be put.
1444 * 2. kref_init in dlm_new_lockres()->dlm_init_lockres().
1445 * 3. dlm_lookup_lockres()
1446 * The first one is handled at the end of this function. The
1447 * other two are handled in the worker thread after locks have
1448 * been attached. Yes, we don't wait for purge time to match
1449 * kref_init. The lockres will still have atleast one ref
1450 * added because it is in the hash __dlm_insert_lockres() */
1453 /* now that the new lockres is inserted,
1454 * make it usable by other processes */
1455 spin_lock(&res
->spinlock
);
1456 res
->state
&= ~DLM_LOCK_RES_IN_PROGRESS
;
1457 spin_unlock(&res
->spinlock
);
1461 /* at this point we have allocated everything we need,
1462 * and we have a hashed lockres with an extra ref and
1463 * the proper res->state flags. */
1465 spin_lock(&res
->spinlock
);
1466 /* drop this either when master requery finds a different master
1467 * or when a lock is added by the recovery worker */
1468 dlm_lockres_grab_inflight_ref(dlm
, res
);
1469 if (mres
->master
== DLM_LOCK_RES_OWNER_UNKNOWN
) {
1470 /* migration cannot have an unknown master */
1471 BUG_ON(!(mres
->flags
& DLM_MRES_RECOVERY
));
1472 mlog(0, "recovery has passed me a lockres with an "
1473 "unknown owner.. will need to requery: "
1474 "%.*s\n", mres
->lockname_len
, mres
->lockname
);
1476 /* take a reference now to pin the lockres, drop it
1477 * when locks are added in the worker */
1478 dlm_change_lockres_owner(dlm
, res
, dlm
->node_num
);
1480 spin_unlock(&res
->spinlock
);
1482 /* queue up work for dlm_mig_lockres_worker */
1483 dlm_grab(dlm
); /* get an extra ref for the work item */
1484 memcpy(buf
, msg
->buf
, be16_to_cpu(msg
->data_len
)); /* copy the whole message */
1485 dlm_init_work_item(dlm
, item
, dlm_mig_lockres_worker
, buf
);
1486 item
->u
.ml
.lockres
= res
; /* already have a ref */
1487 item
->u
.ml
.real_master
= real_master
;
1488 item
->u
.ml
.extra_ref
= extra_refs
;
1489 spin_lock(&dlm
->work_lock
);
1490 list_add_tail(&item
->list
, &dlm
->work_list
);
1491 spin_unlock(&dlm
->work_lock
);
1492 queue_work(dlm
->dlm_worker
, &dlm
->dispatched_work
);
1495 /* One extra ref taken needs to be put here */
1497 dlm_lockres_put(res
);
1510 static void dlm_mig_lockres_worker(struct dlm_work_item
*item
, void *data
)
1512 struct dlm_ctxt
*dlm
;
1513 struct dlm_migratable_lockres
*mres
;
1515 struct dlm_lock_resource
*res
;
1520 mres
= (struct dlm_migratable_lockres
*)data
;
1522 res
= item
->u
.ml
.lockres
;
1523 real_master
= item
->u
.ml
.real_master
;
1524 extra_ref
= item
->u
.ml
.extra_ref
;
1526 if (real_master
== DLM_LOCK_RES_OWNER_UNKNOWN
) {
1527 /* this case is super-rare. only occurs if
1528 * node death happens during migration. */
1530 ret
= dlm_lockres_master_requery(dlm
, res
, &real_master
);
1532 mlog(0, "dlm_lockres_master_requery ret=%d\n",
1536 if (real_master
== DLM_LOCK_RES_OWNER_UNKNOWN
) {
1537 mlog(0, "lockres %.*s not claimed. "
1538 "this node will take it.\n",
1539 res
->lockname
.len
, res
->lockname
.name
);
1541 spin_lock(&res
->spinlock
);
1542 dlm_lockres_drop_inflight_ref(dlm
, res
);
1543 spin_unlock(&res
->spinlock
);
1544 mlog(0, "master needs to respond to sender "
1545 "that node %u still owns %.*s\n",
1546 real_master
, res
->lockname
.len
,
1547 res
->lockname
.name
);
1548 /* cannot touch this lockres */
1553 ret
= dlm_process_recovery_data(dlm
, res
, mres
);
1555 mlog(0, "dlm_process_recovery_data returned %d\n", ret
);
1557 mlog(0, "dlm_process_recovery_data succeeded\n");
1559 if ((mres
->flags
& (DLM_MRES_MIGRATION
|DLM_MRES_ALL_DONE
)) ==
1560 (DLM_MRES_MIGRATION
|DLM_MRES_ALL_DONE
)) {
1561 ret
= dlm_finish_migration(dlm
, res
, mres
->master
);
1567 /* See comment in dlm_mig_lockres_handler() */
1570 dlm_lockres_put(res
);
1571 dlm_lockres_put(res
);
1578 static int dlm_lockres_master_requery(struct dlm_ctxt
*dlm
,
1579 struct dlm_lock_resource
*res
,
1582 struct dlm_node_iter iter
;
1586 *real_master
= DLM_LOCK_RES_OWNER_UNKNOWN
;
1588 /* we only reach here if one of the two nodes in a
1589 * migration died while the migration was in progress.
1590 * at this point we need to requery the master. we
1591 * know that the new_master got as far as creating
1592 * an mle on at least one node, but we do not know
1593 * if any nodes had actually cleared the mle and set
1594 * the master to the new_master. the old master
1595 * is supposed to set the owner to UNKNOWN in the
1596 * event of a new_master death, so the only possible
1597 * responses that we can get from nodes here are
1598 * that the master is new_master, or that the master
1600 * if all nodes come back with UNKNOWN then we know
1601 * the lock needs remastering here.
1602 * if any node comes back with a valid master, check
1603 * to see if that master is the one that we are
1604 * recovering. if so, then the new_master died and
1605 * we need to remaster this lock. if not, then the
1606 * new_master survived and that node will respond to
1607 * other nodes about the owner.
1608 * if there is an owner, this node needs to dump this
1609 * lockres and alert the sender that this lockres
1611 spin_lock(&dlm
->spinlock
);
1612 dlm_node_iter_init(dlm
->domain_map
, &iter
);
1613 spin_unlock(&dlm
->spinlock
);
1615 while ((nodenum
= dlm_node_iter_next(&iter
)) >= 0) {
1616 /* do not send to self */
1617 if (nodenum
== dlm
->node_num
)
1619 ret
= dlm_do_master_requery(dlm
, res
, nodenum
, real_master
);
1622 if (!dlm_is_host_down(ret
))
1624 /* host is down, so answer for that node would be
1625 * DLM_LOCK_RES_OWNER_UNKNOWN. continue. */
1627 if (*real_master
!= DLM_LOCK_RES_OWNER_UNKNOWN
) {
1628 mlog(0, "lock master is %u\n", *real_master
);
1636 int dlm_do_master_requery(struct dlm_ctxt
*dlm
, struct dlm_lock_resource
*res
,
1637 u8 nodenum
, u8
*real_master
)
1640 struct dlm_master_requery req
;
1641 int status
= DLM_LOCK_RES_OWNER_UNKNOWN
;
1643 memset(&req
, 0, sizeof(req
));
1644 req
.node_idx
= dlm
->node_num
;
1645 req
.namelen
= res
->lockname
.len
;
1646 memcpy(req
.name
, res
->lockname
.name
, res
->lockname
.len
);
1648 ret
= o2net_send_message(DLM_MASTER_REQUERY_MSG
, dlm
->key
,
1649 &req
, sizeof(req
), nodenum
, &status
);
1650 /* XXX: negative status not handled properly here. */
1652 mlog(ML_ERROR
, "Error %d when sending message %u (key "
1653 "0x%x) to node %u\n", ret
, DLM_MASTER_REQUERY_MSG
,
1657 BUG_ON(status
> DLM_LOCK_RES_OWNER_UNKNOWN
);
1658 *real_master
= (u8
) (status
& 0xff);
1659 mlog(0, "node %u responded to master requery with %u\n",
1660 nodenum
, *real_master
);
1667 /* this function cannot error, so unless the sending
1668 * or receiving of the message failed, the owner can
1670 int dlm_master_requery_handler(struct o2net_msg
*msg
, u32 len
, void *data
,
1673 struct dlm_ctxt
*dlm
= data
;
1674 struct dlm_master_requery
*req
= (struct dlm_master_requery
*)msg
->buf
;
1675 struct dlm_lock_resource
*res
= NULL
;
1677 int master
= DLM_LOCK_RES_OWNER_UNKNOWN
;
1678 u32 flags
= DLM_ASSERT_MASTER_REQUERY
;
1680 if (!dlm_grab(dlm
)) {
1681 /* since the domain has gone away on this
1682 * node, the proper response is UNKNOWN */
1686 hash
= dlm_lockid_hash(req
->name
, req
->namelen
);
1688 spin_lock(&dlm
->spinlock
);
1689 res
= __dlm_lookup_lockres(dlm
, req
->name
, req
->namelen
, hash
);
1691 spin_lock(&res
->spinlock
);
1692 master
= res
->owner
;
1693 if (master
== dlm
->node_num
) {
1694 int ret
= dlm_dispatch_assert_master(dlm
, res
,
1697 mlog_errno(-ENOMEM
);
1701 } else /* put.. incase we are not the master */
1702 dlm_lockres_put(res
);
1703 spin_unlock(&res
->spinlock
);
1705 spin_unlock(&dlm
->spinlock
);
1711 static inline struct list_head
*
1712 dlm_list_num_to_pointer(struct dlm_lock_resource
*res
, int list_num
)
1714 struct list_head
*ret
;
1715 BUG_ON(list_num
< 0);
1716 BUG_ON(list_num
> 2);
1717 ret
= &(res
->granted
);
1721 /* TODO: do ast flush business
1722 * TODO: do MIGRATING and RECOVERING spinning
1726 * NOTE about in-flight requests during migration:
1728 * Before attempting the migrate, the master has marked the lockres as
1729 * MIGRATING and then flushed all of its pending ASTS. So any in-flight
1730 * requests either got queued before the MIGRATING flag got set, in which
1731 * case the lock data will reflect the change and a return message is on
1732 * the way, or the request failed to get in before MIGRATING got set. In
1733 * this case, the caller will be told to spin and wait for the MIGRATING
1734 * flag to be dropped, then recheck the master.
1735 * This holds true for the convert, cancel and unlock cases, and since lvb
1736 * updates are tied to these same messages, it applies to lvb updates as
1737 * well. For the lock case, there is no way a lock can be on the master
1738 * queue and not be on the secondary queue since the lock is always added
1739 * locally first. This means that the new target node will never be sent
1740 * a lock that he doesn't already have on the list.
1741 * In total, this means that the local lock is correct and should not be
1742 * updated to match the one sent by the master. Any messages sent back
1743 * from the master before the MIGRATING flag will bring the lock properly
1744 * up-to-date, and the change will be ordered properly for the waiter.
1745 * We will *not* attempt to modify the lock underneath the waiter.
1748 static int dlm_process_recovery_data(struct dlm_ctxt
*dlm
,
1749 struct dlm_lock_resource
*res
,
1750 struct dlm_migratable_lockres
*mres
)
1752 struct dlm_migratable_lock
*ml
;
1753 struct list_head
*queue
;
1754 struct list_head
*tmpq
= NULL
;
1755 struct dlm_lock
*newlock
= NULL
;
1756 struct dlm_lockstatus
*lksb
= NULL
;
1759 struct dlm_lock
*lock
= NULL
;
1760 u8 from
= O2NM_MAX_NODES
;
1761 unsigned int added
= 0;
1764 mlog(0, "running %d locks for this lockres\n", mres
->num_locks
);
1765 for (i
=0; i
<mres
->num_locks
; i
++) {
1766 ml
= &(mres
->ml
[i
]);
1768 if (dlm_is_dummy_lock(dlm
, ml
, &from
)) {
1769 /* placeholder, just need to set the refmap bit */
1770 BUG_ON(mres
->num_locks
!= 1);
1771 mlog(0, "%s:%.*s: dummy lock for %u\n",
1772 dlm
->name
, mres
->lockname_len
, mres
->lockname
,
1774 spin_lock(&res
->spinlock
);
1775 dlm_lockres_set_refmap_bit(dlm
, res
, from
);
1776 spin_unlock(&res
->spinlock
);
1780 BUG_ON(ml
->highest_blocked
!= LKM_IVMODE
);
1784 queue
= dlm_list_num_to_pointer(res
, ml
->list
);
1787 /* if the lock is for the local node it needs to
1788 * be moved to the proper location within the queue.
1789 * do not allocate a new lock structure. */
1790 if (ml
->node
== dlm
->node_num
) {
1791 /* MIGRATION ONLY! */
1792 BUG_ON(!(mres
->flags
& DLM_MRES_MIGRATION
));
1794 spin_lock(&res
->spinlock
);
1795 for (j
= DLM_GRANTED_LIST
; j
<= DLM_BLOCKED_LIST
; j
++) {
1796 tmpq
= dlm_list_idx_to_ptr(res
, j
);
1797 list_for_each_entry(lock
, tmpq
, list
) {
1798 if (lock
->ml
.cookie
!= ml
->cookie
)
1807 /* lock is always created locally first, and
1808 * destroyed locally last. it must be on the list */
1811 mlog(ML_ERROR
, "Could not find local lock "
1812 "with cookie %u:%llu, node %u, "
1813 "list %u, flags 0x%x, type %d, "
1814 "conv %d, highest blocked %d\n",
1815 dlm_get_lock_cookie_node(be64_to_cpu(c
)),
1816 dlm_get_lock_cookie_seq(be64_to_cpu(c
)),
1817 ml
->node
, ml
->list
, ml
->flags
, ml
->type
,
1818 ml
->convert_type
, ml
->highest_blocked
);
1819 __dlm_print_one_lock_resource(res
);
1823 if (lock
->ml
.node
!= ml
->node
) {
1824 c
= lock
->ml
.cookie
;
1825 mlog(ML_ERROR
, "Mismatched node# in lock "
1826 "cookie %u:%llu, name %.*s, node %u\n",
1827 dlm_get_lock_cookie_node(be64_to_cpu(c
)),
1828 dlm_get_lock_cookie_seq(be64_to_cpu(c
)),
1829 res
->lockname
.len
, res
->lockname
.name
,
1832 mlog(ML_ERROR
, "Migrate lock cookie %u:%llu, "
1833 "node %u, list %u, flags 0x%x, type %d, "
1834 "conv %d, highest blocked %d\n",
1835 dlm_get_lock_cookie_node(be64_to_cpu(c
)),
1836 dlm_get_lock_cookie_seq(be64_to_cpu(c
)),
1837 ml
->node
, ml
->list
, ml
->flags
, ml
->type
,
1838 ml
->convert_type
, ml
->highest_blocked
);
1839 __dlm_print_one_lock_resource(res
);
1843 if (tmpq
!= queue
) {
1845 mlog(0, "Lock cookie %u:%llu was on list %u "
1846 "instead of list %u for %.*s\n",
1847 dlm_get_lock_cookie_node(be64_to_cpu(c
)),
1848 dlm_get_lock_cookie_seq(be64_to_cpu(c
)),
1849 j
, ml
->list
, res
->lockname
.len
,
1850 res
->lockname
.name
);
1851 __dlm_print_one_lock_resource(res
);
1852 spin_unlock(&res
->spinlock
);
1856 /* see NOTE above about why we do not update
1857 * to match the master here */
1859 /* move the lock to its proper place */
1860 /* do not alter lock refcount. switching lists. */
1861 list_move_tail(&lock
->list
, queue
);
1862 spin_unlock(&res
->spinlock
);
1865 mlog(0, "just reordered a local lock!\n");
1869 /* lock is for another node. */
1870 newlock
= dlm_new_lock(ml
->type
, ml
->node
,
1871 be64_to_cpu(ml
->cookie
), NULL
);
1876 lksb
= newlock
->lksb
;
1877 dlm_lock_attach_lockres(newlock
, res
);
1879 if (ml
->convert_type
!= LKM_IVMODE
) {
1880 BUG_ON(queue
!= &res
->converting
);
1881 newlock
->ml
.convert_type
= ml
->convert_type
;
1883 lksb
->flags
|= (ml
->flags
&
1884 (DLM_LKSB_PUT_LVB
|DLM_LKSB_GET_LVB
));
1886 if (ml
->type
== LKM_NLMODE
)
1890 * If the lock is in the blocked list it can't have a valid lvb,
1893 if (ml
->list
== DLM_BLOCKED_LIST
)
1896 if (!dlm_lvb_is_empty(mres
->lvb
)) {
1897 if (lksb
->flags
& DLM_LKSB_PUT_LVB
) {
1898 /* other node was trying to update
1899 * lvb when node died. recreate the
1900 * lksb with the updated lvb. */
1901 memcpy(lksb
->lvb
, mres
->lvb
, DLM_LVB_LEN
);
1902 /* the lock resource lvb update must happen
1903 * NOW, before the spinlock is dropped.
1904 * we no longer wait for the AST to update
1906 memcpy(res
->lvb
, mres
->lvb
, DLM_LVB_LEN
);
1908 /* otherwise, the node is sending its
1909 * most recent valid lvb info */
1910 BUG_ON(ml
->type
!= LKM_EXMODE
&&
1911 ml
->type
!= LKM_PRMODE
);
1912 if (!dlm_lvb_is_empty(res
->lvb
) &&
1913 (ml
->type
== LKM_EXMODE
||
1914 memcmp(res
->lvb
, mres
->lvb
, DLM_LVB_LEN
))) {
1916 mlog(ML_ERROR
, "%s:%.*s: received bad "
1917 "lvb! type=%d\n", dlm
->name
,
1919 res
->lockname
.name
, ml
->type
);
1920 printk("lockres lvb=[");
1921 for (i
=0; i
<DLM_LVB_LEN
; i
++)
1922 printk("%02x", res
->lvb
[i
]);
1923 printk("]\nmigrated lvb=[");
1924 for (i
=0; i
<DLM_LVB_LEN
; i
++)
1925 printk("%02x", mres
->lvb
[i
]);
1927 dlm_print_one_lock_resource(res
);
1930 memcpy(res
->lvb
, mres
->lvb
, DLM_LVB_LEN
);
1936 * wrt lock queue ordering and recovery:
1937 * 1. order of locks on granted queue is
1939 * 2. order of locks on converting queue is
1940 * LOST with the node death. sorry charlie.
1941 * 3. order of locks on the blocked queue is
1943 * order of locks does not affect integrity, it
1944 * just means that a lock request may get pushed
1945 * back in line as a result of the node death.
1946 * also note that for a given node the lock order
1947 * for its secondary queue locks is preserved
1948 * relative to each other, but clearly *not*
1949 * preserved relative to locks from other nodes.
1952 spin_lock(&res
->spinlock
);
1953 list_for_each_entry(lock
, queue
, list
) {
1954 if (lock
->ml
.cookie
== ml
->cookie
) {
1955 c
= lock
->ml
.cookie
;
1956 mlog(ML_ERROR
, "%s:%.*s: %u:%llu: lock already "
1957 "exists on this lockres!\n", dlm
->name
,
1958 res
->lockname
.len
, res
->lockname
.name
,
1959 dlm_get_lock_cookie_node(be64_to_cpu(c
)),
1960 dlm_get_lock_cookie_seq(be64_to_cpu(c
)));
1962 mlog(ML_NOTICE
, "sent lock: type=%d, conv=%d, "
1963 "node=%u, cookie=%u:%llu, queue=%d\n",
1964 ml
->type
, ml
->convert_type
, ml
->node
,
1965 dlm_get_lock_cookie_node(be64_to_cpu(ml
->cookie
)),
1966 dlm_get_lock_cookie_seq(be64_to_cpu(ml
->cookie
)),
1969 __dlm_print_one_lock_resource(res
);
1975 dlm_lock_get(newlock
);
1976 list_add_tail(&newlock
->list
, queue
);
1977 mlog(0, "%s:%.*s: added lock for node %u, "
1978 "setting refmap bit\n", dlm
->name
,
1979 res
->lockname
.len
, res
->lockname
.name
, ml
->node
);
1980 dlm_lockres_set_refmap_bit(dlm
, res
, ml
->node
);
1983 spin_unlock(&res
->spinlock
);
1985 mlog(0, "done running all the locks\n");
1988 /* balance the ref taken when the work was queued */
1989 spin_lock(&res
->spinlock
);
1990 dlm_lockres_drop_inflight_ref(dlm
, res
);
1991 spin_unlock(&res
->spinlock
);
1996 dlm_lock_put(newlock
);
2002 void dlm_move_lockres_to_recovery_list(struct dlm_ctxt
*dlm
,
2003 struct dlm_lock_resource
*res
)
2006 struct list_head
*queue
;
2007 struct dlm_lock
*lock
, *next
;
2009 assert_spin_locked(&dlm
->spinlock
);
2010 assert_spin_locked(&res
->spinlock
);
2011 res
->state
|= DLM_LOCK_RES_RECOVERING
;
2012 if (!list_empty(&res
->recovering
)) {
2014 "Recovering res %s:%.*s, is already on recovery list!\n",
2015 dlm
->name
, res
->lockname
.len
, res
->lockname
.name
);
2016 list_del_init(&res
->recovering
);
2017 dlm_lockres_put(res
);
2019 /* We need to hold a reference while on the recovery list */
2020 dlm_lockres_get(res
);
2021 list_add_tail(&res
->recovering
, &dlm
->reco
.resources
);
2023 /* find any pending locks and put them back on proper list */
2024 for (i
=DLM_BLOCKED_LIST
; i
>=DLM_GRANTED_LIST
; i
--) {
2025 queue
= dlm_list_idx_to_ptr(res
, i
);
2026 list_for_each_entry_safe(lock
, next
, queue
, list
) {
2028 if (lock
->convert_pending
) {
2029 /* move converting lock back to granted */
2030 BUG_ON(i
!= DLM_CONVERTING_LIST
);
2031 mlog(0, "node died with convert pending "
2032 "on %.*s. move back to granted list.\n",
2033 res
->lockname
.len
, res
->lockname
.name
);
2034 dlm_revert_pending_convert(res
, lock
);
2035 lock
->convert_pending
= 0;
2036 } else if (lock
->lock_pending
) {
2037 /* remove pending lock requests completely */
2038 BUG_ON(i
!= DLM_BLOCKED_LIST
);
2039 mlog(0, "node died with lock pending "
2040 "on %.*s. remove from blocked list and skip.\n",
2041 res
->lockname
.len
, res
->lockname
.name
);
2042 /* lock will be floating until ref in
2043 * dlmlock_remote is freed after the network
2044 * call returns. ok for it to not be on any
2045 * list since no ast can be called
2046 * (the master is dead). */
2047 dlm_revert_pending_lock(res
, lock
);
2048 lock
->lock_pending
= 0;
2049 } else if (lock
->unlock_pending
) {
2050 /* if an unlock was in progress, treat as
2051 * if this had completed successfully
2052 * before sending this lock state to the
2053 * new master. note that the dlm_unlock
2054 * call is still responsible for calling
2055 * the unlockast. that will happen after
2056 * the network call times out. for now,
2057 * just move lists to prepare the new
2058 * recovery master. */
2059 BUG_ON(i
!= DLM_GRANTED_LIST
);
2060 mlog(0, "node died with unlock pending "
2061 "on %.*s. remove from blocked list and skip.\n",
2062 res
->lockname
.len
, res
->lockname
.name
);
2063 dlm_commit_pending_unlock(res
, lock
);
2064 lock
->unlock_pending
= 0;
2065 } else if (lock
->cancel_pending
) {
2066 /* if a cancel was in progress, treat as
2067 * if this had completed successfully
2068 * before sending this lock state to the
2070 BUG_ON(i
!= DLM_CONVERTING_LIST
);
2071 mlog(0, "node died with cancel pending "
2072 "on %.*s. move back to granted list.\n",
2073 res
->lockname
.len
, res
->lockname
.name
);
2074 dlm_commit_pending_cancel(res
, lock
);
2075 lock
->cancel_pending
= 0;
2084 /* removes all recovered locks from the recovery list.
2085 * sets the res->owner to the new master.
2086 * unsets the RECOVERY flag and wakes waiters. */
2087 static void dlm_finish_local_lockres_recovery(struct dlm_ctxt
*dlm
,
2088 u8 dead_node
, u8 new_master
)
2091 struct hlist_head
*bucket
;
2092 struct dlm_lock_resource
*res
, *next
;
2094 assert_spin_locked(&dlm
->spinlock
);
2096 list_for_each_entry_safe(res
, next
, &dlm
->reco
.resources
, recovering
) {
2097 if (res
->owner
== dead_node
) {
2098 mlog(0, "%s: res %.*s, Changing owner from %u to %u\n",
2099 dlm
->name
, res
->lockname
.len
, res
->lockname
.name
,
2100 res
->owner
, new_master
);
2101 list_del_init(&res
->recovering
);
2102 spin_lock(&res
->spinlock
);
2103 /* new_master has our reference from
2104 * the lock state sent during recovery */
2105 dlm_change_lockres_owner(dlm
, res
, new_master
);
2106 res
->state
&= ~DLM_LOCK_RES_RECOVERING
;
2107 if (__dlm_lockres_has_locks(res
))
2108 __dlm_dirty_lockres(dlm
, res
);
2109 spin_unlock(&res
->spinlock
);
2111 dlm_lockres_put(res
);
2115 /* this will become unnecessary eventually, but
2116 * for now we need to run the whole hash, clear
2117 * the RECOVERING state and set the owner
2119 for (i
= 0; i
< DLM_HASH_BUCKETS
; i
++) {
2120 bucket
= dlm_lockres_hash(dlm
, i
);
2121 hlist_for_each_entry(res
, bucket
, hash_node
) {
2122 if (!(res
->state
& DLM_LOCK_RES_RECOVERING
))
2125 if (res
->owner
!= dead_node
&&
2126 res
->owner
!= dlm
->node_num
)
2129 if (!list_empty(&res
->recovering
)) {
2130 list_del_init(&res
->recovering
);
2131 dlm_lockres_put(res
);
2134 /* new_master has our reference from
2135 * the lock state sent during recovery */
2136 mlog(0, "%s: res %.*s, Changing owner from %u to %u\n",
2137 dlm
->name
, res
->lockname
.len
, res
->lockname
.name
,
2138 res
->owner
, new_master
);
2139 spin_lock(&res
->spinlock
);
2140 dlm_change_lockres_owner(dlm
, res
, new_master
);
2141 res
->state
&= ~DLM_LOCK_RES_RECOVERING
;
2142 if (__dlm_lockres_has_locks(res
))
2143 __dlm_dirty_lockres(dlm
, res
);
2144 spin_unlock(&res
->spinlock
);
2150 static inline int dlm_lvb_needs_invalidation(struct dlm_lock
*lock
, int local
)
2153 if (lock
->ml
.type
!= LKM_EXMODE
&&
2154 lock
->ml
.type
!= LKM_PRMODE
)
2156 } else if (lock
->ml
.type
== LKM_EXMODE
)
2161 static void dlm_revalidate_lvb(struct dlm_ctxt
*dlm
,
2162 struct dlm_lock_resource
*res
, u8 dead_node
)
2164 struct list_head
*queue
;
2165 struct dlm_lock
*lock
;
2166 int blank_lvb
= 0, local
= 0;
2170 assert_spin_locked(&dlm
->spinlock
);
2171 assert_spin_locked(&res
->spinlock
);
2173 if (res
->owner
== dlm
->node_num
)
2174 /* if this node owned the lockres, and if the dead node
2175 * had an EX when he died, blank out the lvb */
2176 search_node
= dead_node
;
2178 /* if this is a secondary lockres, and we had no EX or PR
2179 * locks granted, we can no longer trust the lvb */
2180 search_node
= dlm
->node_num
;
2181 local
= 1; /* check local state for valid lvb */
2184 for (i
=DLM_GRANTED_LIST
; i
<=DLM_CONVERTING_LIST
; i
++) {
2185 queue
= dlm_list_idx_to_ptr(res
, i
);
2186 list_for_each_entry(lock
, queue
, list
) {
2187 if (lock
->ml
.node
== search_node
) {
2188 if (dlm_lvb_needs_invalidation(lock
, local
)) {
2189 /* zero the lksb lvb and lockres lvb */
2191 memset(lock
->lksb
->lvb
, 0, DLM_LVB_LEN
);
2198 mlog(0, "clearing %.*s lvb, dead node %u had EX\n",
2199 res
->lockname
.len
, res
->lockname
.name
, dead_node
);
2200 memset(res
->lvb
, 0, DLM_LVB_LEN
);
2204 static void dlm_free_dead_locks(struct dlm_ctxt
*dlm
,
2205 struct dlm_lock_resource
*res
, u8 dead_node
)
2207 struct dlm_lock
*lock
, *next
;
2208 unsigned int freed
= 0;
2210 /* this node is the lockres master:
2211 * 1) remove any stale locks for the dead node
2212 * 2) if the dead node had an EX when he died, blank out the lvb
2214 assert_spin_locked(&dlm
->spinlock
);
2215 assert_spin_locked(&res
->spinlock
);
2217 /* We do two dlm_lock_put(). One for removing from list and the other is
2218 * to force the DLM_UNLOCK_FREE_LOCK action so as to free the locks */
2220 /* TODO: check pending_asts, pending_basts here */
2221 list_for_each_entry_safe(lock
, next
, &res
->granted
, list
) {
2222 if (lock
->ml
.node
== dead_node
) {
2223 list_del_init(&lock
->list
);
2225 /* Can't schedule DLM_UNLOCK_FREE_LOCK - do manually */
2230 list_for_each_entry_safe(lock
, next
, &res
->converting
, list
) {
2231 if (lock
->ml
.node
== dead_node
) {
2232 list_del_init(&lock
->list
);
2234 /* Can't schedule DLM_UNLOCK_FREE_LOCK - do manually */
2239 list_for_each_entry_safe(lock
, next
, &res
->blocked
, list
) {
2240 if (lock
->ml
.node
== dead_node
) {
2241 list_del_init(&lock
->list
);
2243 /* Can't schedule DLM_UNLOCK_FREE_LOCK - do manually */
2250 mlog(0, "%s:%.*s: freed %u locks for dead node %u, "
2251 "dropping ref from lockres\n", dlm
->name
,
2252 res
->lockname
.len
, res
->lockname
.name
, freed
, dead_node
);
2253 if(!test_bit(dead_node
, res
->refmap
)) {
2254 mlog(ML_ERROR
, "%s:%.*s: freed %u locks for dead node %u, "
2255 "but ref was not set\n", dlm
->name
,
2256 res
->lockname
.len
, res
->lockname
.name
, freed
, dead_node
);
2257 __dlm_print_one_lock_resource(res
);
2259 dlm_lockres_clear_refmap_bit(dlm
, res
, dead_node
);
2260 } else if (test_bit(dead_node
, res
->refmap
)) {
2261 mlog(0, "%s:%.*s: dead node %u had a ref, but had "
2262 "no locks and had not purged before dying\n", dlm
->name
,
2263 res
->lockname
.len
, res
->lockname
.name
, dead_node
);
2264 dlm_lockres_clear_refmap_bit(dlm
, res
, dead_node
);
2267 /* do not kick thread yet */
2268 __dlm_dirty_lockres(dlm
, res
);
2271 /* if this node is the recovery master, and there are no
2272 * locks for a given lockres owned by this node that are in
2273 * either PR or EX mode, zero out the lvb before requesting.
2278 static void dlm_do_local_recovery_cleanup(struct dlm_ctxt
*dlm
, u8 dead_node
)
2280 struct dlm_lock_resource
*res
;
2282 struct hlist_head
*bucket
;
2283 struct dlm_lock
*lock
;
2286 /* purge any stale mles */
2287 dlm_clean_master_list(dlm
, dead_node
);
2290 * now clean up all lock resources. there are two rules:
2292 * 1) if the dead node was the master, move the lockres
2293 * to the recovering list. set the RECOVERING flag.
2294 * this lockres needs to be cleaned up before it can
2297 * 2) if this node was the master, remove all locks from
2298 * each of the lockres queues that were owned by the
2299 * dead node. once recovery finishes, the dlm thread
2300 * can be kicked again to see if any ASTs or BASTs
2301 * need to be fired as a result.
2303 for (i
= 0; i
< DLM_HASH_BUCKETS
; i
++) {
2304 bucket
= dlm_lockres_hash(dlm
, i
);
2305 hlist_for_each_entry(res
, bucket
, hash_node
) {
2306 /* always prune any $RECOVERY entries for dead nodes,
2307 * otherwise hangs can occur during later recovery */
2308 if (dlm_is_recovery_lock(res
->lockname
.name
,
2309 res
->lockname
.len
)) {
2310 spin_lock(&res
->spinlock
);
2311 list_for_each_entry(lock
, &res
->granted
, list
) {
2312 if (lock
->ml
.node
== dead_node
) {
2313 mlog(0, "AHA! there was "
2314 "a $RECOVERY lock for dead "
2316 dead_node
, dlm
->name
);
2317 list_del_init(&lock
->list
);
2322 spin_unlock(&res
->spinlock
);
2325 spin_lock(&res
->spinlock
);
2326 /* zero the lvb if necessary */
2327 dlm_revalidate_lvb(dlm
, res
, dead_node
);
2328 if (res
->owner
== dead_node
) {
2329 if (res
->state
& DLM_LOCK_RES_DROPPING_REF
) {
2330 mlog(ML_NOTICE
, "%s: res %.*s, Skip "
2331 "recovery as it is being freed\n",
2332 dlm
->name
, res
->lockname
.len
,
2333 res
->lockname
.name
);
2335 dlm_move_lockres_to_recovery_list(dlm
,
2338 } else if (res
->owner
== dlm
->node_num
) {
2339 dlm_free_dead_locks(dlm
, res
, dead_node
);
2340 __dlm_lockres_calc_usage(dlm
, res
);
2341 } else if (res
->owner
== DLM_LOCK_RES_OWNER_UNKNOWN
) {
2342 if (test_bit(dead_node
, res
->refmap
)) {
2343 mlog(0, "%s:%.*s: dead node %u had a ref, but had "
2344 "no locks and had not purged before dying\n",
2345 dlm
->name
, res
->lockname
.len
,
2346 res
->lockname
.name
, dead_node
);
2347 dlm_lockres_clear_refmap_bit(dlm
, res
, dead_node
);
2350 spin_unlock(&res
->spinlock
);
2356 static void __dlm_hb_node_down(struct dlm_ctxt
*dlm
, int idx
)
2358 assert_spin_locked(&dlm
->spinlock
);
2360 if (dlm
->reco
.new_master
== idx
) {
2361 mlog(0, "%s: recovery master %d just died\n",
2363 if (dlm
->reco
.state
& DLM_RECO_STATE_FINALIZE
) {
2364 /* finalize1 was reached, so it is safe to clear
2365 * the new_master and dead_node. that recovery
2367 mlog(0, "%s: dead master %d had reached "
2368 "finalize1 state, clearing\n", dlm
->name
, idx
);
2369 dlm
->reco
.state
&= ~DLM_RECO_STATE_FINALIZE
;
2370 __dlm_reset_recovery(dlm
);
2374 /* Clean up join state on node death. */
2375 if (dlm
->joining_node
== idx
) {
2376 mlog(0, "Clearing join state for node %u\n", idx
);
2377 __dlm_set_joining_node(dlm
, DLM_LOCK_RES_OWNER_UNKNOWN
);
2380 /* check to see if the node is already considered dead */
2381 if (!test_bit(idx
, dlm
->live_nodes_map
)) {
2382 mlog(0, "for domain %s, node %d is already dead. "
2383 "another node likely did recovery already.\n",
2388 /* check to see if we do not care about this node */
2389 if (!test_bit(idx
, dlm
->domain_map
)) {
2390 /* This also catches the case that we get a node down
2391 * but haven't joined the domain yet. */
2392 mlog(0, "node %u already removed from domain!\n", idx
);
2396 clear_bit(idx
, dlm
->live_nodes_map
);
2398 /* make sure local cleanup occurs before the heartbeat events */
2399 if (!test_bit(idx
, dlm
->recovery_map
))
2400 dlm_do_local_recovery_cleanup(dlm
, idx
);
2402 /* notify anything attached to the heartbeat events */
2403 dlm_hb_event_notify_attached(dlm
, idx
, 0);
2405 mlog(0, "node %u being removed from domain map!\n", idx
);
2406 clear_bit(idx
, dlm
->domain_map
);
2407 clear_bit(idx
, dlm
->exit_domain_map
);
2408 /* wake up migration waiters if a node goes down.
2409 * perhaps later we can genericize this for other waiters. */
2410 wake_up(&dlm
->migration_wq
);
2412 if (test_bit(idx
, dlm
->recovery_map
))
2413 mlog(0, "domain %s, node %u already added "
2414 "to recovery map!\n", dlm
->name
, idx
);
2416 set_bit(idx
, dlm
->recovery_map
);
2419 void dlm_hb_node_down_cb(struct o2nm_node
*node
, int idx
, void *data
)
2421 struct dlm_ctxt
*dlm
= data
;
2427 * This will notify any dlm users that a node in our domain
2428 * went away without notifying us first.
2430 if (test_bit(idx
, dlm
->domain_map
))
2431 dlm_fire_domain_eviction_callbacks(dlm
, idx
);
2433 spin_lock(&dlm
->spinlock
);
2434 __dlm_hb_node_down(dlm
, idx
);
2435 spin_unlock(&dlm
->spinlock
);
2440 void dlm_hb_node_up_cb(struct o2nm_node
*node
, int idx
, void *data
)
2442 struct dlm_ctxt
*dlm
= data
;
2447 spin_lock(&dlm
->spinlock
);
2448 set_bit(idx
, dlm
->live_nodes_map
);
2449 /* do NOT notify mle attached to the heartbeat events.
2450 * new nodes are not interesting in mastery until joined. */
2451 spin_unlock(&dlm
->spinlock
);
2456 static void dlm_reco_ast(void *astdata
)
2458 struct dlm_ctxt
*dlm
= astdata
;
2459 mlog(0, "ast for recovery lock fired!, this=%u, dlm=%s\n",
2460 dlm
->node_num
, dlm
->name
);
2462 static void dlm_reco_bast(void *astdata
, int blocked_type
)
2464 struct dlm_ctxt
*dlm
= astdata
;
2465 mlog(0, "bast for recovery lock fired!, this=%u, dlm=%s\n",
2466 dlm
->node_num
, dlm
->name
);
2468 static void dlm_reco_unlock_ast(void *astdata
, enum dlm_status st
)
2470 mlog(0, "unlockast for recovery lock fired!\n");
2474 * dlm_pick_recovery_master will continually attempt to use
2475 * dlmlock() on the special "$RECOVERY" lockres with the
2476 * LKM_NOQUEUE flag to get an EX. every thread that enters
2477 * this function on each node racing to become the recovery
2478 * master will not stop attempting this until either:
2479 * a) this node gets the EX (and becomes the recovery master),
2480 * or b) dlm->reco.new_master gets set to some nodenum
2481 * != O2NM_INVALID_NODE_NUM (another node will do the reco).
2482 * so each time a recovery master is needed, the entire cluster
2483 * will sync at this point. if the new master dies, that will
2484 * be detected in dlm_do_recovery */
2485 static int dlm_pick_recovery_master(struct dlm_ctxt
*dlm
)
2487 enum dlm_status ret
;
2488 struct dlm_lockstatus lksb
;
2489 int status
= -EINVAL
;
2491 mlog(0, "starting recovery of %s at %lu, dead=%u, this=%u\n",
2492 dlm
->name
, jiffies
, dlm
->reco
.dead_node
, dlm
->node_num
);
2494 memset(&lksb
, 0, sizeof(lksb
));
2496 ret
= dlmlock(dlm
, LKM_EXMODE
, &lksb
, LKM_NOQUEUE
|LKM_RECOVERY
,
2497 DLM_RECOVERY_LOCK_NAME
, DLM_RECOVERY_LOCK_NAME_LEN
,
2498 dlm_reco_ast
, dlm
, dlm_reco_bast
);
2500 mlog(0, "%s: dlmlock($RECOVERY) returned %d, lksb=%d\n",
2501 dlm
->name
, ret
, lksb
.status
);
2503 if (ret
== DLM_NORMAL
) {
2504 mlog(0, "dlm=%s dlmlock says I got it (this=%u)\n",
2505 dlm
->name
, dlm
->node_num
);
2507 /* got the EX lock. check to see if another node
2508 * just became the reco master */
2509 if (dlm_reco_master_ready(dlm
)) {
2510 mlog(0, "%s: got reco EX lock, but %u will "
2511 "do the recovery\n", dlm
->name
,
2512 dlm
->reco
.new_master
);
2517 /* see if recovery was already finished elsewhere */
2518 spin_lock(&dlm
->spinlock
);
2519 if (dlm
->reco
.dead_node
== O2NM_INVALID_NODE_NUM
) {
2521 mlog(0, "%s: got reco EX lock, but "
2522 "node got recovered already\n", dlm
->name
);
2523 if (dlm
->reco
.new_master
!= O2NM_INVALID_NODE_NUM
) {
2524 mlog(ML_ERROR
, "%s: new master is %u "
2525 "but no dead node!\n",
2526 dlm
->name
, dlm
->reco
.new_master
);
2530 spin_unlock(&dlm
->spinlock
);
2533 /* if this node has actually become the recovery master,
2534 * set the master and send the messages to begin recovery */
2536 mlog(0, "%s: dead=%u, this=%u, sending "
2537 "begin_reco now\n", dlm
->name
,
2538 dlm
->reco
.dead_node
, dlm
->node_num
);
2539 status
= dlm_send_begin_reco_message(dlm
,
2540 dlm
->reco
.dead_node
);
2541 /* this always succeeds */
2544 /* set the new_master to this node */
2545 spin_lock(&dlm
->spinlock
);
2546 dlm_set_reco_master(dlm
, dlm
->node_num
);
2547 spin_unlock(&dlm
->spinlock
);
2550 /* recovery lock is a special case. ast will not get fired,
2551 * so just go ahead and unlock it. */
2552 ret
= dlmunlock(dlm
, &lksb
, 0, dlm_reco_unlock_ast
, dlm
);
2553 if (ret
== DLM_DENIED
) {
2554 mlog(0, "got DLM_DENIED, trying LKM_CANCEL\n");
2555 ret
= dlmunlock(dlm
, &lksb
, LKM_CANCEL
, dlm_reco_unlock_ast
, dlm
);
2557 if (ret
!= DLM_NORMAL
) {
2558 /* this would really suck. this could only happen
2559 * if there was a network error during the unlock
2560 * because of node death. this means the unlock
2561 * is actually "done" and the lock structure is
2562 * even freed. we can continue, but only
2563 * because this specific lock name is special. */
2564 mlog(ML_ERROR
, "dlmunlock returned %d\n", ret
);
2566 } else if (ret
== DLM_NOTQUEUED
) {
2567 mlog(0, "dlm=%s dlmlock says another node got it (this=%u)\n",
2568 dlm
->name
, dlm
->node_num
);
2569 /* another node is master. wait on
2570 * reco.new_master != O2NM_INVALID_NODE_NUM
2571 * for at most one second */
2572 wait_event_timeout(dlm
->dlm_reco_thread_wq
,
2573 dlm_reco_master_ready(dlm
),
2574 msecs_to_jiffies(1000));
2575 if (!dlm_reco_master_ready(dlm
)) {
2576 mlog(0, "%s: reco master taking awhile\n",
2580 /* another node has informed this one that it is reco master */
2581 mlog(0, "%s: reco master %u is ready to recover %u\n",
2582 dlm
->name
, dlm
->reco
.new_master
, dlm
->reco
.dead_node
);
2584 } else if (ret
== DLM_RECOVERING
) {
2585 mlog(0, "dlm=%s dlmlock says master node died (this=%u)\n",
2586 dlm
->name
, dlm
->node_num
);
2589 struct dlm_lock_resource
*res
;
2591 /* dlmlock returned something other than NOTQUEUED or NORMAL */
2592 mlog(ML_ERROR
, "%s: got %s from dlmlock($RECOVERY), "
2593 "lksb.status=%s\n", dlm
->name
, dlm_errname(ret
),
2594 dlm_errname(lksb
.status
));
2595 res
= dlm_lookup_lockres(dlm
, DLM_RECOVERY_LOCK_NAME
,
2596 DLM_RECOVERY_LOCK_NAME_LEN
);
2598 dlm_print_one_lock_resource(res
);
2599 dlm_lockres_put(res
);
2601 mlog(ML_ERROR
, "recovery lock not found\n");
2609 static int dlm_send_begin_reco_message(struct dlm_ctxt
*dlm
, u8 dead_node
)
2611 struct dlm_begin_reco br
;
2613 struct dlm_node_iter iter
;
2617 mlog(0, "%s: dead node is %u\n", dlm
->name
, dead_node
);
2619 spin_lock(&dlm
->spinlock
);
2620 dlm_node_iter_init(dlm
->domain_map
, &iter
);
2621 spin_unlock(&dlm
->spinlock
);
2623 clear_bit(dead_node
, iter
.node_map
);
2625 memset(&br
, 0, sizeof(br
));
2626 br
.node_idx
= dlm
->node_num
;
2627 br
.dead_node
= dead_node
;
2629 while ((nodenum
= dlm_node_iter_next(&iter
)) >= 0) {
2631 if (nodenum
== dead_node
) {
2632 mlog(0, "not sending begin reco to dead node "
2636 if (nodenum
== dlm
->node_num
) {
2637 mlog(0, "not sending begin reco to self\n");
2642 mlog(0, "attempting to send begin reco msg to %d\n",
2644 ret
= o2net_send_message(DLM_BEGIN_RECO_MSG
, dlm
->key
,
2645 &br
, sizeof(br
), nodenum
, &status
);
2646 /* negative status is handled ok by caller here */
2649 if (dlm_is_host_down(ret
)) {
2650 /* node is down. not involved in recovery
2651 * so just keep going */
2652 mlog(ML_NOTICE
, "%s: node %u was down when sending "
2653 "begin reco msg (%d)\n", dlm
->name
, nodenum
, ret
);
2658 * Prior to commit aad1b15310b9bcd59fa81ab8f2b1513b59553ea8,
2659 * dlm_begin_reco_handler() returned EAGAIN and not -EAGAIN.
2660 * We are handling both for compatibility reasons.
2662 if (ret
== -EAGAIN
|| ret
== EAGAIN
) {
2663 mlog(0, "%s: trying to start recovery of node "
2664 "%u, but node %u is waiting for last recovery "
2665 "to complete, backoff for a bit\n", dlm
->name
,
2666 dead_node
, nodenum
);
2671 struct dlm_lock_resource
*res
;
2673 /* this is now a serious problem, possibly ENOMEM
2674 * in the network stack. must retry */
2676 mlog(ML_ERROR
, "begin reco of dlm %s to node %u "
2677 "returned %d\n", dlm
->name
, nodenum
, ret
);
2678 res
= dlm_lookup_lockres(dlm
, DLM_RECOVERY_LOCK_NAME
,
2679 DLM_RECOVERY_LOCK_NAME_LEN
);
2681 dlm_print_one_lock_resource(res
);
2682 dlm_lockres_put(res
);
2684 mlog(ML_ERROR
, "recovery lock not found\n");
2686 /* sleep for a bit in hopes that we can avoid
2696 int dlm_begin_reco_handler(struct o2net_msg
*msg
, u32 len
, void *data
,
2699 struct dlm_ctxt
*dlm
= data
;
2700 struct dlm_begin_reco
*br
= (struct dlm_begin_reco
*)msg
->buf
;
2702 /* ok to return 0, domain has gone away */
2706 spin_lock(&dlm
->spinlock
);
2707 if (dlm
->reco
.state
& DLM_RECO_STATE_FINALIZE
) {
2708 mlog(0, "%s: node %u wants to recover node %u (%u:%u) "
2709 "but this node is in finalize state, waiting on finalize2\n",
2710 dlm
->name
, br
->node_idx
, br
->dead_node
,
2711 dlm
->reco
.dead_node
, dlm
->reco
.new_master
);
2712 spin_unlock(&dlm
->spinlock
);
2716 spin_unlock(&dlm
->spinlock
);
2718 mlog(0, "%s: node %u wants to recover node %u (%u:%u)\n",
2719 dlm
->name
, br
->node_idx
, br
->dead_node
,
2720 dlm
->reco
.dead_node
, dlm
->reco
.new_master
);
2722 dlm_fire_domain_eviction_callbacks(dlm
, br
->dead_node
);
2724 spin_lock(&dlm
->spinlock
);
2725 if (dlm
->reco
.new_master
!= O2NM_INVALID_NODE_NUM
) {
2726 if (test_bit(dlm
->reco
.new_master
, dlm
->recovery_map
)) {
2727 mlog(0, "%s: new_master %u died, changing "
2728 "to %u\n", dlm
->name
, dlm
->reco
.new_master
,
2731 mlog(0, "%s: new_master %u NOT DEAD, changing "
2732 "to %u\n", dlm
->name
, dlm
->reco
.new_master
,
2734 /* may not have seen the new master as dead yet */
2737 if (dlm
->reco
.dead_node
!= O2NM_INVALID_NODE_NUM
) {
2738 mlog(ML_NOTICE
, "%s: dead_node previously set to %u, "
2739 "node %u changing it to %u\n", dlm
->name
,
2740 dlm
->reco
.dead_node
, br
->node_idx
, br
->dead_node
);
2742 dlm_set_reco_master(dlm
, br
->node_idx
);
2743 dlm_set_reco_dead_node(dlm
, br
->dead_node
);
2744 if (!test_bit(br
->dead_node
, dlm
->recovery_map
)) {
2745 mlog(0, "recovery master %u sees %u as dead, but this "
2746 "node has not yet. marking %u as dead\n",
2747 br
->node_idx
, br
->dead_node
, br
->dead_node
);
2748 if (!test_bit(br
->dead_node
, dlm
->domain_map
) ||
2749 !test_bit(br
->dead_node
, dlm
->live_nodes_map
))
2750 mlog(0, "%u not in domain/live_nodes map "
2751 "so setting it in reco map manually\n",
2753 /* force the recovery cleanup in __dlm_hb_node_down
2754 * both of these will be cleared in a moment */
2755 set_bit(br
->dead_node
, dlm
->domain_map
);
2756 set_bit(br
->dead_node
, dlm
->live_nodes_map
);
2757 __dlm_hb_node_down(dlm
, br
->dead_node
);
2759 spin_unlock(&dlm
->spinlock
);
2761 dlm_kick_recovery_thread(dlm
);
2763 mlog(0, "%s: recovery started by node %u, for %u (%u:%u)\n",
2764 dlm
->name
, br
->node_idx
, br
->dead_node
,
2765 dlm
->reco
.dead_node
, dlm
->reco
.new_master
);
2771 #define DLM_FINALIZE_STAGE2 0x01
2772 static int dlm_send_finalize_reco_message(struct dlm_ctxt
*dlm
)
2775 struct dlm_finalize_reco fr
;
2776 struct dlm_node_iter iter
;
2781 mlog(0, "finishing recovery for node %s:%u, "
2782 "stage %d\n", dlm
->name
, dlm
->reco
.dead_node
, stage
);
2784 spin_lock(&dlm
->spinlock
);
2785 dlm_node_iter_init(dlm
->domain_map
, &iter
);
2786 spin_unlock(&dlm
->spinlock
);
2789 memset(&fr
, 0, sizeof(fr
));
2790 fr
.node_idx
= dlm
->node_num
;
2791 fr
.dead_node
= dlm
->reco
.dead_node
;
2793 fr
.flags
|= DLM_FINALIZE_STAGE2
;
2795 while ((nodenum
= dlm_node_iter_next(&iter
)) >= 0) {
2796 if (nodenum
== dlm
->node_num
)
2798 ret
= o2net_send_message(DLM_FINALIZE_RECO_MSG
, dlm
->key
,
2799 &fr
, sizeof(fr
), nodenum
, &status
);
2803 mlog(ML_ERROR
, "Error %d when sending message %u (key "
2804 "0x%x) to node %u\n", ret
, DLM_FINALIZE_RECO_MSG
,
2806 if (dlm_is_host_down(ret
)) {
2807 /* this has no effect on this recovery
2808 * session, so set the status to zero to
2809 * finish out the last recovery */
2810 mlog(ML_ERROR
, "node %u went down after this "
2811 "node finished recovery.\n", nodenum
);
2819 /* reset the node_iter back to the top and send finalize2 */
2828 int dlm_finalize_reco_handler(struct o2net_msg
*msg
, u32 len
, void *data
,
2831 struct dlm_ctxt
*dlm
= data
;
2832 struct dlm_finalize_reco
*fr
= (struct dlm_finalize_reco
*)msg
->buf
;
2835 /* ok to return 0, domain has gone away */
2839 if (fr
->flags
& DLM_FINALIZE_STAGE2
)
2842 mlog(0, "%s: node %u finalizing recovery stage%d of "
2843 "node %u (%u:%u)\n", dlm
->name
, fr
->node_idx
, stage
,
2844 fr
->dead_node
, dlm
->reco
.dead_node
, dlm
->reco
.new_master
);
2846 spin_lock(&dlm
->spinlock
);
2848 if (dlm
->reco
.new_master
!= fr
->node_idx
) {
2849 mlog(ML_ERROR
, "node %u sent recovery finalize msg, but node "
2850 "%u is supposed to be the new master, dead=%u\n",
2851 fr
->node_idx
, dlm
->reco
.new_master
, fr
->dead_node
);
2854 if (dlm
->reco
.dead_node
!= fr
->dead_node
) {
2855 mlog(ML_ERROR
, "node %u sent recovery finalize msg for dead "
2856 "node %u, but node %u is supposed to be dead\n",
2857 fr
->node_idx
, fr
->dead_node
, dlm
->reco
.dead_node
);
2863 dlm_finish_local_lockres_recovery(dlm
, fr
->dead_node
, fr
->node_idx
);
2864 if (dlm
->reco
.state
& DLM_RECO_STATE_FINALIZE
) {
2865 mlog(ML_ERROR
, "%s: received finalize1 from "
2866 "new master %u for dead node %u, but "
2867 "this node has already received it!\n",
2868 dlm
->name
, fr
->node_idx
, fr
->dead_node
);
2869 dlm_print_reco_node_status(dlm
);
2872 dlm
->reco
.state
|= DLM_RECO_STATE_FINALIZE
;
2873 spin_unlock(&dlm
->spinlock
);
2876 if (!(dlm
->reco
.state
& DLM_RECO_STATE_FINALIZE
)) {
2877 mlog(ML_ERROR
, "%s: received finalize2 from "
2878 "new master %u for dead node %u, but "
2879 "this node did not have finalize1!\n",
2880 dlm
->name
, fr
->node_idx
, fr
->dead_node
);
2881 dlm_print_reco_node_status(dlm
);
2884 dlm
->reco
.state
&= ~DLM_RECO_STATE_FINALIZE
;
2885 spin_unlock(&dlm
->spinlock
);
2886 dlm_reset_recovery(dlm
);
2887 dlm_kick_recovery_thread(dlm
);
2893 mlog(0, "%s: recovery done, reco master was %u, dead now %u, master now %u\n",
2894 dlm
->name
, fr
->node_idx
, dlm
->reco
.dead_node
, dlm
->reco
.new_master
);