1 /* -*- mode: c; c-basic-offset: 8; -*-
3 * vim: noexpandtab sw=8 ts=8 sts=0:
5 * Copyright (C) 2005 Oracle. All rights reserved.
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
17 * You should have received a copy of the GNU General Public
18 * License along with this program; if not, write to the
19 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
20 * Boston, MA 021110-1307, USA.
23 /* This quorum hack is only here until we transition to some more rational
24 * approach that is driven from userspace. Honest. No foolin'.
26 * Imagine two nodes lose network connectivity to each other but they're still
27 * up and operating in every other way. Presumably a network timeout indicates
28 * that a node is broken and should be recovered. They can't both recover each
29 * other and both carry on without serialising their access to the file system.
30 * They need to decide who is authoritative. Now extend that problem to
31 * arbitrary groups of nodes losing connectivity between each other.
33 * So we declare that a node which has given up on connecting to a majority
34 * of nodes who are still heartbeating will fence itself.
36 * There are huge opportunities for races here. After we give up on a node's
37 * connection we need to wait long enough to give heartbeat an opportunity
38 * to declare the node as truly dead. We also need to be careful with the
39 * race between when we see a node start heartbeating and when we connect
42 * So nodes that are in this transtion put a hold on the quorum decision
43 * with a counter. As they fall out of this transition they drop the count
44 * and if they're the last, they fire off the decision.
46 #include <linux/kernel.h>
47 #include <linux/slab.h>
48 #include <linux/workqueue.h>
49 #include <linux/reboot.h>
51 #include "heartbeat.h"
52 #include "nodemanager.h"
53 #define MLOG_MASK_PREFIX ML_QUORUM
57 static struct o2quo_state
{
59 struct work_struct qs_work
;
62 unsigned long qs_hb_bm
[BITS_TO_LONGS(O2NM_MAX_NODES
)];
64 unsigned long qs_conn_bm
[BITS_TO_LONGS(O2NM_MAX_NODES
)];
66 unsigned long qs_hold_bm
[BITS_TO_LONGS(O2NM_MAX_NODES
)];
69 /* this is horribly heavy-handed. It should instead flip the file
70 * system RO and call some userspace script. */
71 static void o2quo_fence_self(void)
73 /* panic spins with interrupts enabled. with preempt
74 * threads can still schedule, etc, etc */
75 o2hb_stop_all_regions();
77 printk("ocfs2 is very sorry to be fencing this system by restarting\n");
81 /* Indicate that a timeout occured on a hearbeat region write. The
82 * other nodes in the cluster may consider us dead at that time so we
83 * want to "fence" ourselves so that we don't scribble on the disk
84 * after they think they've recovered us. This can't solve all
85 * problems related to writeout after recovery but this hack can at
86 * least close some of those gaps. When we have real fencing, this can
87 * go away as our node would be fenced externally before other nodes
89 void o2quo_disk_timeout(void)
94 static void o2quo_make_decision(struct work_struct
*work
)
97 int lowest_hb
, lowest_reachable
= 0, fence
= 0;
98 struct o2quo_state
*qs
= &o2quo_state
;
100 spin_lock(&qs
->qs_lock
);
102 lowest_hb
= find_first_bit(qs
->qs_hb_bm
, O2NM_MAX_NODES
);
103 if (lowest_hb
!= O2NM_MAX_NODES
)
104 lowest_reachable
= test_bit(lowest_hb
, qs
->qs_conn_bm
);
106 mlog(0, "heartbeating: %d, connected: %d, "
107 "lowest: %d (%sreachable)\n", qs
->qs_heartbeating
,
108 qs
->qs_connected
, lowest_hb
, lowest_reachable
? "" : "un");
110 if (!test_bit(o2nm_this_node(), qs
->qs_hb_bm
) ||
111 qs
->qs_heartbeating
== 1)
114 if (qs
->qs_heartbeating
& 1) {
115 /* the odd numbered cluster case is straight forward --
116 * if we can't talk to the majority we're hosed */
117 quorum
= (qs
->qs_heartbeating
+ 1)/2;
118 if (qs
->qs_connected
< quorum
) {
119 mlog(ML_ERROR
, "fencing this node because it is "
120 "only connected to %u nodes and %u is needed "
121 "to make a quorum out of %u heartbeating nodes\n",
122 qs
->qs_connected
, quorum
,
123 qs
->qs_heartbeating
);
127 /* the even numbered cluster adds the possibility of each half
128 * of the cluster being able to talk amongst themselves.. in
129 * that case we're hosed if we can't talk to the group that has
130 * the lowest numbered node */
131 quorum
= qs
->qs_heartbeating
/ 2;
132 if (qs
->qs_connected
< quorum
) {
133 mlog(ML_ERROR
, "fencing this node because it is "
134 "only connected to %u nodes and %u is needed "
135 "to make a quorum out of %u heartbeating nodes\n",
136 qs
->qs_connected
, quorum
,
137 qs
->qs_heartbeating
);
140 else if ((qs
->qs_connected
== quorum
) &&
142 mlog(ML_ERROR
, "fencing this node because it is "
143 "connected to a half-quorum of %u out of %u "
144 "nodes which doesn't include the lowest active "
145 "node %u\n", quorum
, qs
->qs_heartbeating
,
152 spin_unlock(&qs
->qs_lock
);
157 static void o2quo_set_hold(struct o2quo_state
*qs
, u8 node
)
159 assert_spin_locked(&qs
->qs_lock
);
161 if (!test_and_set_bit(node
, qs
->qs_hold_bm
)) {
163 mlog_bug_on_msg(qs
->qs_holds
== O2NM_MAX_NODES
,
165 mlog(0, "node %u, %d total\n", node
, qs
->qs_holds
);
169 static void o2quo_clear_hold(struct o2quo_state
*qs
, u8 node
)
171 assert_spin_locked(&qs
->qs_lock
);
173 if (test_and_clear_bit(node
, qs
->qs_hold_bm
)) {
174 mlog(0, "node %u, %d total\n", node
, qs
->qs_holds
- 1);
175 if (--qs
->qs_holds
== 0) {
176 if (qs
->qs_pending
) {
178 schedule_work(&qs
->qs_work
);
181 mlog_bug_on_msg(qs
->qs_holds
< 0, "node %u, holds %d\n",
186 /* as a node comes up we delay the quorum decision until we know the fate of
187 * the connection. the hold will be droped in conn_up or hb_down. it might be
188 * perpetuated by con_err until hb_down. if we already have a conn, we might
189 * be dropping a hold that conn_up got. */
190 void o2quo_hb_up(u8 node
)
192 struct o2quo_state
*qs
= &o2quo_state
;
194 spin_lock(&qs
->qs_lock
);
196 qs
->qs_heartbeating
++;
197 mlog_bug_on_msg(qs
->qs_heartbeating
== O2NM_MAX_NODES
,
199 mlog_bug_on_msg(test_bit(node
, qs
->qs_hb_bm
), "node %u\n", node
);
200 set_bit(node
, qs
->qs_hb_bm
);
202 mlog(0, "node %u, %d total\n", node
, qs
->qs_heartbeating
);
204 if (!test_bit(node
, qs
->qs_conn_bm
))
205 o2quo_set_hold(qs
, node
);
207 o2quo_clear_hold(qs
, node
);
209 spin_unlock(&qs
->qs_lock
);
212 /* hb going down releases any holds we might have had due to this node from
213 * conn_up, conn_err, or hb_up */
214 void o2quo_hb_down(u8 node
)
216 struct o2quo_state
*qs
= &o2quo_state
;
218 spin_lock(&qs
->qs_lock
);
220 qs
->qs_heartbeating
--;
221 mlog_bug_on_msg(qs
->qs_heartbeating
< 0,
222 "node %u, %d heartbeating\n",
223 node
, qs
->qs_heartbeating
);
224 mlog_bug_on_msg(!test_bit(node
, qs
->qs_hb_bm
), "node %u\n", node
);
225 clear_bit(node
, qs
->qs_hb_bm
);
227 mlog(0, "node %u, %d total\n", node
, qs
->qs_heartbeating
);
229 o2quo_clear_hold(qs
, node
);
231 spin_unlock(&qs
->qs_lock
);
234 /* this tells us that we've decided that the node is still heartbeating
235 * even though we've lost it's conn. it must only be called after conn_err
236 * and indicates that we must now make a quorum decision in the future,
237 * though we might be doing so after waiting for holds to drain. Here
238 * we'll be dropping the hold from conn_err. */
239 void o2quo_hb_still_up(u8 node
)
241 struct o2quo_state
*qs
= &o2quo_state
;
243 spin_lock(&qs
->qs_lock
);
245 mlog(0, "node %u\n", node
);
248 o2quo_clear_hold(qs
, node
);
250 spin_unlock(&qs
->qs_lock
);
253 /* This is analagous to hb_up. as a node's connection comes up we delay the
254 * quorum decision until we see it heartbeating. the hold will be droped in
255 * hb_up or hb_down. it might be perpetuated by con_err until hb_down. if
256 * it's already heartbeating we we might be dropping a hold that conn_up got.
258 void o2quo_conn_up(u8 node
)
260 struct o2quo_state
*qs
= &o2quo_state
;
262 spin_lock(&qs
->qs_lock
);
265 mlog_bug_on_msg(qs
->qs_connected
== O2NM_MAX_NODES
,
267 mlog_bug_on_msg(test_bit(node
, qs
->qs_conn_bm
), "node %u\n", node
);
268 set_bit(node
, qs
->qs_conn_bm
);
270 mlog(0, "node %u, %d total\n", node
, qs
->qs_connected
);
272 if (!test_bit(node
, qs
->qs_hb_bm
))
273 o2quo_set_hold(qs
, node
);
275 o2quo_clear_hold(qs
, node
);
277 spin_unlock(&qs
->qs_lock
);
280 /* we've decided that we won't ever be connecting to the node again. if it's
281 * still heartbeating we grab a hold that will delay decisions until either the
282 * node stops heartbeating from hb_down or the caller decides that the node is
283 * still up and calls still_up */
284 void o2quo_conn_err(u8 node
)
286 struct o2quo_state
*qs
= &o2quo_state
;
288 spin_lock(&qs
->qs_lock
);
290 if (test_bit(node
, qs
->qs_conn_bm
)) {
292 mlog_bug_on_msg(qs
->qs_connected
< 0,
293 "node %u, connected %d\n",
294 node
, qs
->qs_connected
);
296 clear_bit(node
, qs
->qs_conn_bm
);
299 mlog(0, "node %u, %d total\n", node
, qs
->qs_connected
);
301 if (test_bit(node
, qs
->qs_hb_bm
))
302 o2quo_set_hold(qs
, node
);
304 spin_unlock(&qs
->qs_lock
);
307 void o2quo_init(void)
309 struct o2quo_state
*qs
= &o2quo_state
;
311 spin_lock_init(&qs
->qs_lock
);
312 INIT_WORK(&qs
->qs_work
, o2quo_make_decision
);
315 void o2quo_exit(void)
317 flush_scheduled_work();