ldap_server: Move a variable into a smaller scope
[Samba.git] / ctdb / server / ctdb_monitor.c
blob738acb1167ba0b10d7e8c8d7e583c0f917a6bf07
1 /*
2 monitoring links to all other nodes to detect dead nodes
5 Copyright (C) Ronnie Sahlberg 2007
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, see <http://www.gnu.org/licenses/>.
21 #include "replace.h"
22 #include "system/filesys.h"
23 #include "system/network.h"
24 #include "system/wait.h"
26 #include <talloc.h>
27 #include <tevent.h>
29 #include "lib/util/debug.h"
30 #include "lib/util/samba_util.h"
31 #include "lib/util/util_process.h"
33 #include "ctdb_private.h"
35 #include "common/system.h"
36 #include "common/common.h"
37 #include "common/logging.h"
39 struct ctdb_monitor_state {
40 uint32_t monitoring_mode;
41 TALLOC_CTX *monitor_context;
42 uint32_t next_interval;
43 uint32_t event_script_timeouts;
46 static void ctdb_check_health(struct tevent_context *ev,
47 struct tevent_timer *te,
48 struct timeval t, void *private_data);
51 setup the notification script
53 int ctdb_set_notification_script(struct ctdb_context *ctdb, const char *script)
55 ctdb->notification_script = talloc_strdup(ctdb, script);
56 CTDB_NO_MEMORY(ctdb, ctdb->notification_script);
57 return 0;
60 static int ctdb_run_notification_script_child(struct ctdb_context *ctdb, const char *event)
62 struct stat st;
63 int ret;
64 char *cmd;
66 if (stat(ctdb->notification_script, &st) != 0) {
67 DEBUG(DEBUG_ERR,("Could not stat notification script %s. Can not send notifications.\n", ctdb->notification_script));
68 return -1;
70 if (!(st.st_mode & S_IXUSR)) {
71 DEBUG(DEBUG_ERR,("Notification script %s is not executable.\n", ctdb->notification_script));
72 return -1;
75 cmd = talloc_asprintf(ctdb, "%s %s\n", ctdb->notification_script, event);
76 CTDB_NO_MEMORY(ctdb, cmd);
78 ret = system(cmd);
79 /* if the system() call was successful, translate ret into the
80 return code from the command
82 if (ret != -1) {
83 ret = WEXITSTATUS(ret);
85 if (ret != 0) {
86 DEBUG(DEBUG_ERR,("Notification script \"%s\" failed with error %d\n", cmd, ret));
89 return ret;
92 void ctdb_run_notification_script(struct ctdb_context *ctdb, const char *event)
94 pid_t child;
96 if (ctdb->notification_script == NULL) {
97 return;
100 child = ctdb_fork(ctdb);
101 if (child == (pid_t)-1) {
102 DEBUG(DEBUG_ERR,("Failed to fork() a notification child process\n"));
103 return;
105 if (child == 0) {
106 int ret;
108 prctl_set_comment("ctdb_notification");
109 ret = ctdb_run_notification_script_child(ctdb, event);
110 if (ret != 0) {
111 DEBUG(DEBUG_ERR,(__location__ " Notification script failed\n"));
113 _exit(0);
116 return;
120 called when a health monitoring event script finishes
122 static void ctdb_health_callback(struct ctdb_context *ctdb, int status, void *p)
124 struct ctdb_node *node = ctdb->nodes[ctdb->pnn];
125 TDB_DATA data;
126 struct ctdb_node_flag_change c;
127 uint32_t next_interval;
128 int ret;
129 TDB_DATA rddata;
130 struct ctdb_srvid_message rd;
131 const char *state_str = NULL;
133 c.pnn = ctdb->pnn;
134 c.old_flags = node->flags;
136 ZERO_STRUCT(rd);
137 rd.pnn = ctdb->pnn;
138 rd.srvid = 0;
140 rddata.dptr = (uint8_t *)&rd;
141 rddata.dsize = sizeof(rd);
143 if (status == -ECANCELED) {
144 DEBUG(DEBUG_ERR,("Monitoring event was cancelled\n"));
145 goto after_change_status;
148 if (status == -ETIME) {
149 ctdb->monitor->event_script_timeouts++;
151 if (ctdb->monitor->event_script_timeouts >=
152 ctdb->tunable.monitor_timeout_count) {
153 DEBUG(DEBUG_ERR,
154 ("Maximum monitor timeout count %u reached."
155 " Making node unhealthy\n",
156 ctdb->tunable.monitor_timeout_count));
157 } else {
158 /* We pretend this is OK. */
159 goto after_change_status;
161 } else {
162 ctdb->monitor->event_script_timeouts = 0;
165 if (status != 0 && !(node->flags & NODE_FLAGS_UNHEALTHY)) {
166 DEBUG(DEBUG_NOTICE,("monitor event failed - disabling node\n"));
167 node->flags |= NODE_FLAGS_UNHEALTHY;
168 ctdb->monitor->next_interval = 5;
170 ctdb_run_notification_script(ctdb, "unhealthy");
171 } else if (status == 0 && (node->flags & NODE_FLAGS_UNHEALTHY)) {
172 DEBUG(DEBUG_NOTICE,("monitor event OK - node re-enabled\n"));
173 node->flags &= ~NODE_FLAGS_UNHEALTHY;
174 ctdb->monitor->next_interval = 5;
176 ctdb_run_notification_script(ctdb, "healthy");
179 after_change_status:
180 next_interval = ctdb->monitor->next_interval;
182 ctdb->monitor->next_interval *= 2;
183 if (ctdb->monitor->next_interval > ctdb->tunable.monitor_interval) {
184 ctdb->monitor->next_interval = ctdb->tunable.monitor_interval;
187 tevent_add_timer(ctdb->ev, ctdb->monitor->monitor_context,
188 timeval_current_ofs(next_interval, 0),
189 ctdb_check_health, ctdb);
191 if (c.old_flags == node->flags) {
192 return;
195 c.new_flags = node->flags;
197 data.dptr = (uint8_t *)&c;
198 data.dsize = sizeof(c);
200 /* ask the recovery daemon to push these changes out to all nodes */
201 ctdb_daemon_send_message(ctdb, ctdb->pnn,
202 CTDB_SRVID_PUSH_NODE_FLAGS, data);
204 if (c.new_flags & NODE_FLAGS_UNHEALTHY) {
205 state_str = "UNHEALTHY";
206 } else {
207 state_str = "HEALTHY";
210 /* ask the recmaster to reallocate all addresses */
211 DEBUG(DEBUG_ERR,
212 ("Node became %s. Ask recovery master to reallocate IPs\n",
213 state_str));
214 ret = ctdb_daemon_send_message(ctdb, CTDB_BROADCAST_CONNECTED, CTDB_SRVID_TAKEOVER_RUN, rddata);
215 if (ret != 0) {
216 DEBUG(DEBUG_ERR,
217 (__location__
218 " Failed to send IP takeover run request\n"));
223 static void ctdb_run_startup(struct tevent_context *ev,
224 struct tevent_timer *te,
225 struct timeval t, void *private_data);
227 called when the startup event script finishes
229 static void ctdb_startup_callback(struct ctdb_context *ctdb, int status, void *p)
231 if (status != 0) {
232 DEBUG(DEBUG_ERR,("startup event failed\n"));
233 tevent_add_timer(ctdb->ev, ctdb->monitor->monitor_context,
234 timeval_current_ofs(5, 0),
235 ctdb_run_startup, ctdb);
236 return;
239 DEBUG(DEBUG_NOTICE,("startup event OK - enabling monitoring\n"));
240 ctdb_set_runstate(ctdb, CTDB_RUNSTATE_RUNNING);
241 ctdb->monitor->next_interval = 2;
242 ctdb_run_notification_script(ctdb, "startup");
244 ctdb->monitor->monitoring_mode = CTDB_MONITORING_ENABLED;
246 tevent_add_timer(ctdb->ev, ctdb->monitor->monitor_context,
247 timeval_current_ofs(ctdb->monitor->next_interval, 0),
248 ctdb_check_health, ctdb);
251 static void ctdb_run_startup(struct tevent_context *ev,
252 struct tevent_timer *te,
253 struct timeval t, void *private_data)
255 struct ctdb_context *ctdb = talloc_get_type(private_data,
256 struct ctdb_context);
257 int ret;
259 /* This is necessary to avoid the "startup" event colliding
260 * with the "ipreallocated" event from the takeover run
261 * following the first recovery. We might as well serialise
262 * these things if we can.
264 if (ctdb->runstate < CTDB_RUNSTATE_STARTUP) {
265 DEBUG(DEBUG_NOTICE,
266 ("Not yet in startup runstate. Wait one more second\n"));
267 tevent_add_timer(ctdb->ev, ctdb->monitor->monitor_context,
268 timeval_current_ofs(1, 0),
269 ctdb_run_startup, ctdb);
270 return;
273 /* release any IPs we hold from previous runs of the daemon */
274 ctdb_release_all_ips(ctdb);
276 DEBUG(DEBUG_NOTICE,("Running the \"startup\" event.\n"));
277 ret = ctdb_event_script_callback(ctdb,
278 ctdb->monitor->monitor_context,
279 ctdb_startup_callback,
280 ctdb, CTDB_EVENT_STARTUP, "%s", "");
282 if (ret != 0) {
283 DEBUG(DEBUG_ERR,("Unable to launch startup event script\n"));
284 tevent_add_timer(ctdb->ev, ctdb->monitor->monitor_context,
285 timeval_current_ofs(5, 0),
286 ctdb_run_startup, ctdb);
291 wait until we have finished initial recoveries before we start the
292 monitoring events
294 static void ctdb_wait_until_recovered(struct tevent_context *ev,
295 struct tevent_timer *te,
296 struct timeval t, void *private_data)
298 struct ctdb_context *ctdb = talloc_get_type(private_data, struct ctdb_context);
299 int ret;
300 static int count = 0;
302 count++;
304 if (count < 60 || count%600 == 0) {
305 DEBUG(DEBUG_NOTICE,("CTDB_WAIT_UNTIL_RECOVERED\n"));
306 if (ctdb->nodes[ctdb->pnn]->flags & NODE_FLAGS_STOPPED) {
307 DEBUG(DEBUG_NOTICE,("Node is STOPPED. Node will NOT recover.\n"));
311 if (ctdb->vnn_map->generation == INVALID_GENERATION) {
312 ctdb->db_persistent_startup_generation = INVALID_GENERATION;
314 tevent_add_timer(ctdb->ev, ctdb->monitor->monitor_context,
315 timeval_current_ofs(1, 0),
316 ctdb_wait_until_recovered, ctdb);
317 return;
320 if (ctdb->recovery_mode != CTDB_RECOVERY_NORMAL) {
321 ctdb->db_persistent_startup_generation = INVALID_GENERATION;
323 DEBUG(DEBUG_NOTICE,(__location__ " in recovery. Wait one more second\n"));
324 tevent_add_timer(ctdb->ev, ctdb->monitor->monitor_context,
325 timeval_current_ofs(1, 0),
326 ctdb_wait_until_recovered, ctdb);
327 return;
331 if (!fast_start && timeval_elapsed(&ctdb->last_recovery_finished) < (ctdb->tunable.rerecovery_timeout + 3)) {
332 ctdb->db_persistent_startup_generation = INVALID_GENERATION;
334 DEBUG(DEBUG_NOTICE,(__location__ " wait for pending recoveries to end. Wait one more second.\n"));
336 tevent_add_timer(ctdb->ev, ctdb->monitor->monitor_context,
337 timeval_current_ofs(1, 0),
338 ctdb_wait_until_recovered, ctdb);
339 return;
342 if (ctdb->vnn_map->generation == ctdb->db_persistent_startup_generation) {
343 DEBUG(DEBUG_INFO,(__location__ " skip ctdb_recheck_persistent_health() "
344 "until the next recovery\n"));
345 tevent_add_timer(ctdb->ev, ctdb->monitor->monitor_context,
346 timeval_current_ofs(1, 0),
347 ctdb_wait_until_recovered, ctdb);
348 return;
351 ctdb->db_persistent_startup_generation = ctdb->vnn_map->generation;
352 ret = ctdb_recheck_persistent_health(ctdb);
353 if (ret != 0) {
354 ctdb->db_persistent_check_errors++;
355 if (ctdb->db_persistent_check_errors < ctdb->max_persistent_check_errors) {
356 DEBUG(DEBUG_ERR,
357 (__location__ "ctdb_recheck_persistent_health() "
358 "failed (%llu of %llu times) - retry later\n",
359 (unsigned long long)ctdb->db_persistent_check_errors,
360 (unsigned long long)ctdb->max_persistent_check_errors));
361 tevent_add_timer(ctdb->ev,
362 ctdb->monitor->monitor_context,
363 timeval_current_ofs(1, 0),
364 ctdb_wait_until_recovered, ctdb);
365 return;
367 DEBUG(DEBUG_ALERT,(__location__
368 "ctdb_recheck_persistent_health() failed (%llu times) - prepare shutdown\n",
369 (unsigned long long)ctdb->db_persistent_check_errors));
370 ctdb_shutdown_sequence(ctdb, 11);
371 /* In case above returns due to duplicate shutdown */
372 return;
374 ctdb->db_persistent_check_errors = 0;
376 tevent_add_timer(ctdb->ev, ctdb->monitor->monitor_context,
377 timeval_current(), ctdb_run_startup, ctdb);
382 see if the event scripts think we are healthy
384 static void ctdb_check_health(struct tevent_context *ev,
385 struct tevent_timer *te,
386 struct timeval t, void *private_data)
388 struct ctdb_context *ctdb = talloc_get_type(private_data, struct ctdb_context);
389 bool skip_monitoring = false;
390 int ret = 0;
392 if (ctdb->recovery_mode != CTDB_RECOVERY_NORMAL ||
393 ctdb->monitor->monitoring_mode == CTDB_MONITORING_DISABLED) {
394 skip_monitoring = true;
395 } else {
396 if (ctdb_db_all_frozen(ctdb)) {
397 DEBUG(DEBUG_ERR,
398 ("Skip monitoring since databases are frozen\n"));
399 skip_monitoring = true;
403 if (skip_monitoring) {
404 tevent_add_timer(ctdb->ev, ctdb->monitor->monitor_context,
405 timeval_current_ofs(ctdb->monitor->next_interval, 0),
406 ctdb_check_health, ctdb);
407 return;
410 ret = ctdb_event_script_callback(ctdb,
411 ctdb->monitor->monitor_context,
412 ctdb_health_callback,
413 ctdb, CTDB_EVENT_MONITOR, "%s", "");
414 if (ret != 0) {
415 DEBUG(DEBUG_ERR,("Unable to launch monitor event script\n"));
416 ctdb->monitor->next_interval = 5;
417 tevent_add_timer(ctdb->ev, ctdb->monitor->monitor_context,
418 timeval_current_ofs(5, 0),
419 ctdb_check_health, ctdb);
424 (Temporaily) Disabling monitoring will stop the monitor event scripts
425 from running but node health checks will still occur
427 void ctdb_disable_monitoring(struct ctdb_context *ctdb)
429 ctdb->monitor->monitoring_mode = CTDB_MONITORING_DISABLED;
430 DEBUG(DEBUG_INFO,("Monitoring has been disabled\n"));
434 Re-enable running monitor events after they have been disabled
436 void ctdb_enable_monitoring(struct ctdb_context *ctdb)
438 ctdb->monitor->monitoring_mode = CTDB_MONITORING_ENABLED;
439 ctdb->monitor->next_interval = 5;
440 DEBUG(DEBUG_INFO,("Monitoring has been enabled\n"));
443 /* stop any monitoring
444 this should only be done when shutting down the daemon
446 void ctdb_stop_monitoring(struct ctdb_context *ctdb)
448 talloc_free(ctdb->monitor->monitor_context);
449 ctdb->monitor->monitor_context = NULL;
451 ctdb->monitor->monitoring_mode = CTDB_MONITORING_DISABLED;
452 ctdb->monitor->next_interval = 5;
453 DEBUG(DEBUG_NOTICE,("Monitoring has been stopped\n"));
457 start watching for nodes that might be dead
459 void ctdb_wait_for_first_recovery(struct ctdb_context *ctdb)
461 ctdb_set_runstate(ctdb, CTDB_RUNSTATE_FIRST_RECOVERY);
463 ctdb->monitor = talloc(ctdb, struct ctdb_monitor_state);
464 CTDB_NO_MEMORY_FATAL(ctdb, ctdb->monitor);
466 ctdb->monitor->monitor_context = talloc_new(ctdb->monitor);
467 CTDB_NO_MEMORY_FATAL(ctdb, ctdb->monitor->monitor_context);
469 tevent_add_timer(ctdb->ev, ctdb->monitor->monitor_context,
470 timeval_current_ofs(1, 0),
471 ctdb_wait_until_recovered, ctdb);
476 modify flags on a node
478 int32_t ctdb_control_modflags(struct ctdb_context *ctdb, TDB_DATA indata)
480 struct ctdb_node_flag_change *c = (struct ctdb_node_flag_change *)indata.dptr;
481 struct ctdb_node *node;
482 uint32_t old_flags;
484 if (c->pnn >= ctdb->num_nodes) {
485 DEBUG(DEBUG_ERR,(__location__ " Node %d is invalid, num_nodes :%d\n", c->pnn, ctdb->num_nodes));
486 return -1;
489 node = ctdb->nodes[c->pnn];
490 old_flags = node->flags;
491 if (c->pnn != ctdb->pnn) {
492 c->old_flags = node->flags;
494 node->flags = c->new_flags & ~NODE_FLAGS_DISCONNECTED;
495 node->flags |= (c->old_flags & NODE_FLAGS_DISCONNECTED);
497 /* we don't let other nodes modify our STOPPED status */
498 if (c->pnn == ctdb->pnn) {
499 node->flags &= ~NODE_FLAGS_STOPPED;
500 if (old_flags & NODE_FLAGS_STOPPED) {
501 node->flags |= NODE_FLAGS_STOPPED;
505 /* we don't let other nodes modify our BANNED status */
506 if (c->pnn == ctdb->pnn) {
507 node->flags &= ~NODE_FLAGS_BANNED;
508 if (old_flags & NODE_FLAGS_BANNED) {
509 node->flags |= NODE_FLAGS_BANNED;
513 if (node->flags == c->old_flags) {
514 DEBUG(DEBUG_INFO, ("Control modflags on node %u - Unchanged - flags 0x%x\n", c->pnn, node->flags));
515 return 0;
518 DEBUG(DEBUG_INFO, ("Control modflags on node %u - flags now 0x%x\n", c->pnn, node->flags));
520 if (node->flags == 0 && ctdb->runstate <= CTDB_RUNSTATE_STARTUP) {
521 DEBUG(DEBUG_ERR, (__location__ " Node %u became healthy - force recovery for startup\n",
522 c->pnn));
523 ctdb->recovery_mode = CTDB_RECOVERY_ACTIVE;
526 /* tell the recovery daemon something has changed */
527 c->new_flags = node->flags;
528 ctdb_daemon_send_message(ctdb, ctdb->pnn,
529 CTDB_SRVID_SET_NODE_FLAGS, indata);
531 return 0;
535 return the monitoring mode
537 int32_t ctdb_monitoring_mode(struct ctdb_context *ctdb)
539 if (ctdb->monitor == NULL) {
540 return CTDB_MONITORING_DISABLED;
542 return ctdb->monitor->monitoring_mode;
546 * Check if monitoring has been stopped
548 bool ctdb_stopped_monitoring(struct ctdb_context *ctdb)
550 return (ctdb->monitor->monitor_context == NULL ? true : false);