ctdb-daemon: Ignore SIGUSR1
[Samba.git] / ctdb / server / ctdb_daemon.c
blobac2db7594c402448b560f446fbcde8bd647214d2
1 /*
2 ctdb daemon code
4 Copyright (C) Andrew Tridgell 2006
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, see <http://www.gnu.org/licenses/>.
20 #include "includes.h"
21 #include "lib/tdb_wrap/tdb_wrap.h"
22 #include "tdb.h"
23 #include "lib/util/dlinklist.h"
24 #include "system/network.h"
25 #include "system/filesys.h"
26 #include "system/wait.h"
27 #include "../include/ctdb_version.h"
28 #include "../include/ctdb_client.h"
29 #include "../include/ctdb_private.h"
30 #include "../common/rb_tree.h"
31 #include <sys/socket.h>
33 struct ctdb_client_pid_list {
34 struct ctdb_client_pid_list *next, *prev;
35 struct ctdb_context *ctdb;
36 pid_t pid;
37 struct ctdb_client *client;
40 const char *ctdbd_pidfile = NULL;
42 static void daemon_incoming_packet(void *, struct ctdb_req_header *);
44 static void print_exit_message(void)
46 if (debug_extra != NULL && debug_extra[0] != '\0') {
47 DEBUG(DEBUG_NOTICE,("CTDB %s shutting down\n", debug_extra));
48 } else {
49 DEBUG(DEBUG_NOTICE,("CTDB daemon shutting down\n"));
51 /* Wait a second to allow pending log messages to be flushed */
52 sleep(1);
58 static void ctdb_time_tick(struct event_context *ev, struct timed_event *te,
59 struct timeval t, void *private_data)
61 struct ctdb_context *ctdb = talloc_get_type(private_data, struct ctdb_context);
63 if (getpid() != ctdb->ctdbd_pid) {
64 return;
67 event_add_timed(ctdb->ev, ctdb,
68 timeval_current_ofs(1, 0),
69 ctdb_time_tick, ctdb);
72 /* Used to trigger a dummy event once per second, to make
73 * detection of hangs more reliable.
75 static void ctdb_start_time_tickd(struct ctdb_context *ctdb)
77 event_add_timed(ctdb->ev, ctdb,
78 timeval_current_ofs(1, 0),
79 ctdb_time_tick, ctdb);
82 static void ctdb_start_periodic_events(struct ctdb_context *ctdb)
84 /* start monitoring for connected/disconnected nodes */
85 ctdb_start_keepalive(ctdb);
87 /* start periodic update of tcp tickle lists */
88 ctdb_start_tcp_tickle_update(ctdb);
90 /* start listening for recovery daemon pings */
91 ctdb_control_recd_ping(ctdb);
93 /* start listening to timer ticks */
94 ctdb_start_time_tickd(ctdb);
97 static void ignore_signal(int signum)
99 struct sigaction act;
101 memset(&act, 0, sizeof(act));
103 act.sa_handler = SIG_IGN;
104 sigemptyset(&act.sa_mask);
105 sigaddset(&act.sa_mask, signum);
106 sigaction(signum, &act, NULL);
111 send a packet to a client
113 static int daemon_queue_send(struct ctdb_client *client, struct ctdb_req_header *hdr)
115 CTDB_INCREMENT_STAT(client->ctdb, client_packets_sent);
116 if (hdr->operation == CTDB_REQ_MESSAGE) {
117 if (ctdb_queue_length(client->queue) > client->ctdb->tunable.max_queue_depth_drop_msg) {
118 DEBUG(DEBUG_ERR,("CTDB_REQ_MESSAGE queue full - killing client connection.\n"));
119 talloc_free(client);
120 return -1;
123 return ctdb_queue_send(client->queue, (uint8_t *)hdr, hdr->length);
127 message handler for when we are in daemon mode. This redirects the message
128 to the right client
130 static void daemon_message_handler(struct ctdb_context *ctdb, uint64_t srvid,
131 TDB_DATA data, void *private_data)
133 struct ctdb_client *client = talloc_get_type(private_data, struct ctdb_client);
134 struct ctdb_req_message *r;
135 int len;
137 /* construct a message to send to the client containing the data */
138 len = offsetof(struct ctdb_req_message, data) + data.dsize;
139 r = ctdbd_allocate_pkt(ctdb, ctdb, CTDB_REQ_MESSAGE,
140 len, struct ctdb_req_message);
141 CTDB_NO_MEMORY_VOID(ctdb, r);
143 talloc_set_name_const(r, "req_message packet");
145 r->srvid = srvid;
146 r->datalen = data.dsize;
147 memcpy(&r->data[0], data.dptr, data.dsize);
149 daemon_queue_send(client, &r->hdr);
151 talloc_free(r);
155 this is called when the ctdb daemon received a ctdb request to
156 set the srvid from the client
158 int daemon_register_message_handler(struct ctdb_context *ctdb, uint32_t client_id, uint64_t srvid)
160 struct ctdb_client *client = ctdb_reqid_find(ctdb, client_id, struct ctdb_client);
161 int res;
162 if (client == NULL) {
163 DEBUG(DEBUG_ERR,("Bad client_id in daemon_request_register_message_handler\n"));
164 return -1;
166 res = ctdb_register_message_handler(ctdb, client, srvid, daemon_message_handler, client);
167 if (res != 0) {
168 DEBUG(DEBUG_ERR,(__location__ " Failed to register handler %llu in daemon\n",
169 (unsigned long long)srvid));
170 } else {
171 DEBUG(DEBUG_INFO,(__location__ " Registered message handler for srvid=%llu\n",
172 (unsigned long long)srvid));
175 return res;
179 this is called when the ctdb daemon received a ctdb request to
180 remove a srvid from the client
182 int daemon_deregister_message_handler(struct ctdb_context *ctdb, uint32_t client_id, uint64_t srvid)
184 struct ctdb_client *client = ctdb_reqid_find(ctdb, client_id, struct ctdb_client);
185 if (client == NULL) {
186 DEBUG(DEBUG_ERR,("Bad client_id in daemon_request_deregister_message_handler\n"));
187 return -1;
189 return ctdb_deregister_message_handler(ctdb, srvid, client);
192 int daemon_check_srvids(struct ctdb_context *ctdb, TDB_DATA indata,
193 TDB_DATA *outdata)
195 uint64_t *ids;
196 int i, num_ids;
197 uint8_t *results;
199 if ((indata.dsize % sizeof(uint64_t)) != 0) {
200 DEBUG(DEBUG_ERR, ("Bad indata in daemon_check_srvids, "
201 "size=%d\n", (int)indata.dsize));
202 return -1;
205 ids = (uint64_t *)indata.dptr;
206 num_ids = indata.dsize / 8;
208 results = talloc_zero_array(outdata, uint8_t, (num_ids+7)/8);
209 if (results == NULL) {
210 DEBUG(DEBUG_ERR, ("talloc failed in daemon_check_srvids\n"));
211 return -1;
213 for (i=0; i<num_ids; i++) {
214 if (ctdb_check_message_handler(ctdb, ids[i])) {
215 results[i/8] |= (1 << (i%8));
218 outdata->dptr = (uint8_t *)results;
219 outdata->dsize = talloc_get_size(results);
220 return 0;
224 destroy a ctdb_client
226 static int ctdb_client_destructor(struct ctdb_client *client)
228 struct ctdb_db_context *ctdb_db;
230 ctdb_takeover_client_destructor_hook(client);
231 ctdb_reqid_remove(client->ctdb, client->client_id);
232 client->ctdb->num_clients--;
234 if (client->num_persistent_updates != 0) {
235 DEBUG(DEBUG_ERR,(__location__ " Client disconnecting with %u persistent updates in flight. Starting recovery\n", client->num_persistent_updates));
236 client->ctdb->recovery_mode = CTDB_RECOVERY_ACTIVE;
238 ctdb_db = find_ctdb_db(client->ctdb, client->db_id);
239 if (ctdb_db) {
240 DEBUG(DEBUG_ERR, (__location__ " client exit while transaction "
241 "commit active. Forcing recovery.\n"));
242 client->ctdb->recovery_mode = CTDB_RECOVERY_ACTIVE;
245 * trans3 transaction state:
247 * The destructor sets the pointer to NULL.
249 talloc_free(ctdb_db->persistent_state);
252 return 0;
257 this is called when the ctdb daemon received a ctdb request message
258 from a local client over the unix domain socket
260 static void daemon_request_message_from_client(struct ctdb_client *client,
261 struct ctdb_req_message *c)
263 TDB_DATA data;
264 int res;
266 if (c->hdr.destnode == CTDB_CURRENT_NODE) {
267 c->hdr.destnode = ctdb_get_pnn(client->ctdb);
270 /* maybe the message is for another client on this node */
271 if (ctdb_get_pnn(client->ctdb)==c->hdr.destnode) {
272 ctdb_request_message(client->ctdb, (struct ctdb_req_header *)c);
273 return;
276 /* its for a remote node */
277 data.dptr = &c->data[0];
278 data.dsize = c->datalen;
279 res = ctdb_daemon_send_message(client->ctdb, c->hdr.destnode,
280 c->srvid, data);
281 if (res != 0) {
282 DEBUG(DEBUG_ERR,(__location__ " Failed to send message to remote node %u\n",
283 c->hdr.destnode));
288 struct daemon_call_state {
289 struct ctdb_client *client;
290 uint32_t reqid;
291 struct ctdb_call *call;
292 struct timeval start_time;
294 /* readonly request ? */
295 uint32_t readonly_fetch;
296 uint32_t client_callid;
300 complete a call from a client
302 static void daemon_call_from_client_callback(struct ctdb_call_state *state)
304 struct daemon_call_state *dstate = talloc_get_type(state->async.private_data,
305 struct daemon_call_state);
306 struct ctdb_reply_call *r;
307 int res;
308 uint32_t length;
309 struct ctdb_client *client = dstate->client;
310 struct ctdb_db_context *ctdb_db = state->ctdb_db;
312 talloc_steal(client, dstate);
313 talloc_steal(dstate, dstate->call);
315 res = ctdb_daemon_call_recv(state, dstate->call);
316 if (res != 0) {
317 DEBUG(DEBUG_ERR, (__location__ " ctdbd_call_recv() returned error\n"));
318 CTDB_DECREMENT_STAT(client->ctdb, pending_calls);
320 CTDB_UPDATE_LATENCY(client->ctdb, ctdb_db, "call_from_client_cb 1", call_latency, dstate->start_time);
321 return;
324 length = offsetof(struct ctdb_reply_call, data) + dstate->call->reply_data.dsize;
325 /* If the client asked for readonly FETCH, we remapped this to
326 FETCH_WITH_HEADER when calling the daemon. So we must
327 strip the extra header off the reply data before passing
328 it back to the client.
330 if (dstate->readonly_fetch
331 && dstate->client_callid == CTDB_FETCH_FUNC) {
332 length -= sizeof(struct ctdb_ltdb_header);
335 r = ctdbd_allocate_pkt(client->ctdb, dstate, CTDB_REPLY_CALL,
336 length, struct ctdb_reply_call);
337 if (r == NULL) {
338 DEBUG(DEBUG_ERR, (__location__ " Failed to allocate reply_call in ctdb daemon\n"));
339 CTDB_DECREMENT_STAT(client->ctdb, pending_calls);
340 CTDB_UPDATE_LATENCY(client->ctdb, ctdb_db, "call_from_client_cb 2", call_latency, dstate->start_time);
341 return;
343 r->hdr.reqid = dstate->reqid;
344 r->status = dstate->call->status;
346 if (dstate->readonly_fetch
347 && dstate->client_callid == CTDB_FETCH_FUNC) {
348 /* client only asked for a FETCH so we must strip off
349 the extra ctdb_ltdb header
351 r->datalen = dstate->call->reply_data.dsize - sizeof(struct ctdb_ltdb_header);
352 memcpy(&r->data[0], dstate->call->reply_data.dptr + sizeof(struct ctdb_ltdb_header), r->datalen);
353 } else {
354 r->datalen = dstate->call->reply_data.dsize;
355 memcpy(&r->data[0], dstate->call->reply_data.dptr, r->datalen);
358 res = daemon_queue_send(client, &r->hdr);
359 if (res == -1) {
360 /* client is dead - return immediately */
361 return;
363 if (res != 0) {
364 DEBUG(DEBUG_ERR, (__location__ " Failed to queue packet from daemon to client\n"));
366 CTDB_UPDATE_LATENCY(client->ctdb, ctdb_db, "call_from_client_cb 3", call_latency, dstate->start_time);
367 CTDB_DECREMENT_STAT(client->ctdb, pending_calls);
368 talloc_free(dstate);
371 struct ctdb_daemon_packet_wrap {
372 struct ctdb_context *ctdb;
373 uint32_t client_id;
377 a wrapper to catch disconnected clients
379 static void daemon_incoming_packet_wrap(void *p, struct ctdb_req_header *hdr)
381 struct ctdb_client *client;
382 struct ctdb_daemon_packet_wrap *w = talloc_get_type(p,
383 struct ctdb_daemon_packet_wrap);
384 if (w == NULL) {
385 DEBUG(DEBUG_CRIT,(__location__ " Bad packet type '%s'\n", talloc_get_name(p)));
386 return;
389 client = ctdb_reqid_find(w->ctdb, w->client_id, struct ctdb_client);
390 if (client == NULL) {
391 DEBUG(DEBUG_ERR,(__location__ " Packet for disconnected client %u\n",
392 w->client_id));
393 talloc_free(w);
394 return;
396 talloc_free(w);
398 /* process it */
399 daemon_incoming_packet(client, hdr);
402 struct ctdb_deferred_fetch_call {
403 struct ctdb_deferred_fetch_call *next, *prev;
404 struct ctdb_req_call *c;
405 struct ctdb_daemon_packet_wrap *w;
408 struct ctdb_deferred_fetch_queue {
409 struct ctdb_deferred_fetch_call *deferred_calls;
412 struct ctdb_deferred_requeue {
413 struct ctdb_deferred_fetch_call *dfc;
414 struct ctdb_client *client;
417 /* called from a timer event and starts reprocessing the deferred call.*/
418 static void reprocess_deferred_call(struct event_context *ev, struct timed_event *te,
419 struct timeval t, void *private_data)
421 struct ctdb_deferred_requeue *dfr = (struct ctdb_deferred_requeue *)private_data;
422 struct ctdb_client *client = dfr->client;
424 talloc_steal(client, dfr->dfc->c);
425 daemon_incoming_packet(client, (struct ctdb_req_header *)dfr->dfc->c);
426 talloc_free(dfr);
429 /* the referral context is destroyed either after a timeout or when the initial
430 fetch-lock has finished.
431 at this stage, immediately start reprocessing the queued up deferred
432 calls so they get reprocessed immediately (and since we are dmaster at
433 this stage, trigger the waiting smbd processes to pick up and aquire the
434 record right away.
436 static int deferred_fetch_queue_destructor(struct ctdb_deferred_fetch_queue *dfq)
439 /* need to reprocess the packets from the queue explicitely instead of
440 just using a normal destructor since we want, need, to
441 call the clients in the same oder as the requests queued up
443 while (dfq->deferred_calls != NULL) {
444 struct ctdb_client *client;
445 struct ctdb_deferred_fetch_call *dfc = dfq->deferred_calls;
446 struct ctdb_deferred_requeue *dfr;
448 DLIST_REMOVE(dfq->deferred_calls, dfc);
450 client = ctdb_reqid_find(dfc->w->ctdb, dfc->w->client_id, struct ctdb_client);
451 if (client == NULL) {
452 DEBUG(DEBUG_ERR,(__location__ " Packet for disconnected client %u\n",
453 dfc->w->client_id));
454 continue;
457 /* process it by pushing it back onto the eventloop */
458 dfr = talloc(client, struct ctdb_deferred_requeue);
459 if (dfr == NULL) {
460 DEBUG(DEBUG_ERR,("Failed to allocate deferred fetch requeue structure\n"));
461 continue;
464 dfr->dfc = talloc_steal(dfr, dfc);
465 dfr->client = client;
467 event_add_timed(dfc->w->ctdb->ev, client, timeval_zero(), reprocess_deferred_call, dfr);
470 return 0;
473 /* insert the new deferral context into the rb tree.
474 there should never be a pre-existing context here, but check for it
475 warn and destroy the previous context if there is already a deferral context
476 for this key.
478 static void *insert_dfq_callback(void *parm, void *data)
480 if (data) {
481 DEBUG(DEBUG_ERR,("Already have DFQ registered. Free old %p and create new %p\n", data, parm));
482 talloc_free(data);
484 return parm;
487 /* if the original fetch-lock did not complete within a reasonable time,
488 free the context and context for all deferred requests to cause them to be
489 re-inserted into the event system.
491 static void dfq_timeout(struct event_context *ev, struct timed_event *te,
492 struct timeval t, void *private_data)
494 talloc_free(private_data);
497 /* This function is used in the local daemon to register a KEY in a database
498 for being "fetched"
499 While the remote fetch is in-flight, any futher attempts to re-fetch the
500 same record will be deferred until the fetch completes.
502 static int setup_deferred_fetch_locks(struct ctdb_db_context *ctdb_db, struct ctdb_call *call)
504 uint32_t *k;
505 struct ctdb_deferred_fetch_queue *dfq;
507 k = ctdb_key_to_idkey(call, call->key);
508 if (k == NULL) {
509 DEBUG(DEBUG_ERR,("Failed to allocate key for deferred fetch\n"));
510 return -1;
513 dfq = talloc(call, struct ctdb_deferred_fetch_queue);
514 if (dfq == NULL) {
515 DEBUG(DEBUG_ERR,("Failed to allocate key for deferred fetch queue structure\n"));
516 talloc_free(k);
517 return -1;
519 dfq->deferred_calls = NULL;
521 trbt_insertarray32_callback(ctdb_db->deferred_fetch, k[0], &k[0], insert_dfq_callback, dfq);
523 talloc_set_destructor(dfq, deferred_fetch_queue_destructor);
525 /* if the fetch havent completed in 30 seconds, just tear it all down
526 and let it try again as the events are reissued */
527 event_add_timed(ctdb_db->ctdb->ev, dfq, timeval_current_ofs(30, 0), dfq_timeout, dfq);
529 talloc_free(k);
530 return 0;
533 /* check if this is a duplicate request to a fetch already in-flight
534 if it is, make this call deferred to be reprocessed later when
535 the in-flight fetch completes.
537 static int requeue_duplicate_fetch(struct ctdb_db_context *ctdb_db, struct ctdb_client *client, TDB_DATA key, struct ctdb_req_call *c)
539 uint32_t *k;
540 struct ctdb_deferred_fetch_queue *dfq;
541 struct ctdb_deferred_fetch_call *dfc;
543 k = ctdb_key_to_idkey(c, key);
544 if (k == NULL) {
545 DEBUG(DEBUG_ERR,("Failed to allocate key for deferred fetch\n"));
546 return -1;
549 dfq = trbt_lookuparray32(ctdb_db->deferred_fetch, k[0], &k[0]);
550 if (dfq == NULL) {
551 talloc_free(k);
552 return -1;
556 talloc_free(k);
558 dfc = talloc(dfq, struct ctdb_deferred_fetch_call);
559 if (dfc == NULL) {
560 DEBUG(DEBUG_ERR, ("Failed to allocate deferred fetch call structure\n"));
561 return -1;
564 dfc->w = talloc(dfc, struct ctdb_daemon_packet_wrap);
565 if (dfc->w == NULL) {
566 DEBUG(DEBUG_ERR,("Failed to allocate deferred fetch daemon packet wrap structure\n"));
567 talloc_free(dfc);
568 return -1;
571 dfc->c = talloc_steal(dfc, c);
572 dfc->w->ctdb = ctdb_db->ctdb;
573 dfc->w->client_id = client->client_id;
575 DLIST_ADD_END(dfq->deferred_calls, dfc, NULL);
577 return 0;
582 this is called when the ctdb daemon received a ctdb request call
583 from a local client over the unix domain socket
585 static void daemon_request_call_from_client(struct ctdb_client *client,
586 struct ctdb_req_call *c)
588 struct ctdb_call_state *state;
589 struct ctdb_db_context *ctdb_db;
590 struct daemon_call_state *dstate;
591 struct ctdb_call *call;
592 struct ctdb_ltdb_header header;
593 TDB_DATA key, data;
594 int ret;
595 struct ctdb_context *ctdb = client->ctdb;
596 struct ctdb_daemon_packet_wrap *w;
598 CTDB_INCREMENT_STAT(ctdb, total_calls);
599 CTDB_INCREMENT_STAT(ctdb, pending_calls);
601 ctdb_db = find_ctdb_db(client->ctdb, c->db_id);
602 if (!ctdb_db) {
603 DEBUG(DEBUG_ERR, (__location__ " Unknown database in request. db_id==0x%08x",
604 c->db_id));
605 CTDB_DECREMENT_STAT(ctdb, pending_calls);
606 return;
609 if (ctdb_db->unhealthy_reason) {
611 * this is just a warning, as the tdb should be empty anyway,
612 * and only persistent databases can be unhealthy, which doesn't
613 * use this code patch
615 DEBUG(DEBUG_WARNING,("warn: db(%s) unhealty in daemon_request_call_from_client(): %s\n",
616 ctdb_db->db_name, ctdb_db->unhealthy_reason));
619 key.dptr = c->data;
620 key.dsize = c->keylen;
622 w = talloc(ctdb, struct ctdb_daemon_packet_wrap);
623 CTDB_NO_MEMORY_VOID(ctdb, w);
625 w->ctdb = ctdb;
626 w->client_id = client->client_id;
628 ret = ctdb_ltdb_lock_fetch_requeue(ctdb_db, key, &header,
629 (struct ctdb_req_header *)c, &data,
630 daemon_incoming_packet_wrap, w, true);
631 if (ret == -2) {
632 /* will retry later */
633 CTDB_DECREMENT_STAT(ctdb, pending_calls);
634 return;
637 talloc_free(w);
639 if (ret != 0) {
640 DEBUG(DEBUG_ERR,(__location__ " Unable to fetch record\n"));
641 CTDB_DECREMENT_STAT(ctdb, pending_calls);
642 return;
646 /* check if this fetch request is a duplicate for a
647 request we already have in flight. If so defer it until
648 the first request completes.
650 if (ctdb->tunable.fetch_collapse == 1) {
651 if (requeue_duplicate_fetch(ctdb_db, client, key, c) == 0) {
652 ret = ctdb_ltdb_unlock(ctdb_db, key);
653 if (ret != 0) {
654 DEBUG(DEBUG_ERR,(__location__ " ctdb_ltdb_unlock() failed with error %d\n", ret));
656 CTDB_DECREMENT_STAT(ctdb, pending_calls);
657 return;
661 /* Dont do READONLY if we dont have a tracking database */
662 if ((c->flags & CTDB_WANT_READONLY) && !ctdb_db->readonly) {
663 c->flags &= ~CTDB_WANT_READONLY;
666 if (header.flags & CTDB_REC_RO_REVOKE_COMPLETE) {
667 header.flags &= ~CTDB_REC_RO_FLAGS;
668 CTDB_INCREMENT_STAT(ctdb, total_ro_revokes);
669 CTDB_INCREMENT_DB_STAT(ctdb_db, db_ro_revokes);
670 if (ctdb_ltdb_store(ctdb_db, key, &header, data) != 0) {
671 ctdb_fatal(ctdb, "Failed to write header with cleared REVOKE flag");
673 /* and clear out the tracking data */
674 if (tdb_delete(ctdb_db->rottdb, key) != 0) {
675 DEBUG(DEBUG_ERR,(__location__ " Failed to clear out trackingdb record\n"));
679 /* if we are revoking, we must defer all other calls until the revoke
680 * had completed.
682 if (header.flags & CTDB_REC_RO_REVOKING_READONLY) {
683 talloc_free(data.dptr);
684 ret = ctdb_ltdb_unlock(ctdb_db, key);
686 if (ctdb_add_revoke_deferred_call(ctdb, ctdb_db, key, (struct ctdb_req_header *)c, daemon_incoming_packet, client) != 0) {
687 ctdb_fatal(ctdb, "Failed to add deferred call for revoke child");
689 CTDB_DECREMENT_STAT(ctdb, pending_calls);
690 return;
693 if ((header.dmaster == ctdb->pnn)
694 && (!(c->flags & CTDB_WANT_READONLY))
695 && (header.flags & (CTDB_REC_RO_HAVE_DELEGATIONS|CTDB_REC_RO_HAVE_READONLY)) ) {
696 header.flags |= CTDB_REC_RO_REVOKING_READONLY;
697 if (ctdb_ltdb_store(ctdb_db, key, &header, data) != 0) {
698 ctdb_fatal(ctdb, "Failed to store record with HAVE_DELEGATIONS set");
700 ret = ctdb_ltdb_unlock(ctdb_db, key);
702 if (ctdb_start_revoke_ro_record(ctdb, ctdb_db, key, &header, data) != 0) {
703 ctdb_fatal(ctdb, "Failed to start record revoke");
705 talloc_free(data.dptr);
707 if (ctdb_add_revoke_deferred_call(ctdb, ctdb_db, key, (struct ctdb_req_header *)c, daemon_incoming_packet, client) != 0) {
708 ctdb_fatal(ctdb, "Failed to add deferred call for revoke child");
711 CTDB_DECREMENT_STAT(ctdb, pending_calls);
712 return;
715 dstate = talloc(client, struct daemon_call_state);
716 if (dstate == NULL) {
717 ret = ctdb_ltdb_unlock(ctdb_db, key);
718 if (ret != 0) {
719 DEBUG(DEBUG_ERR,(__location__ " ctdb_ltdb_unlock() failed with error %d\n", ret));
722 DEBUG(DEBUG_ERR,(__location__ " Unable to allocate dstate\n"));
723 CTDB_DECREMENT_STAT(ctdb, pending_calls);
724 return;
726 dstate->start_time = timeval_current();
727 dstate->client = client;
728 dstate->reqid = c->hdr.reqid;
729 talloc_steal(dstate, data.dptr);
731 call = dstate->call = talloc_zero(dstate, struct ctdb_call);
732 if (call == NULL) {
733 ret = ctdb_ltdb_unlock(ctdb_db, key);
734 if (ret != 0) {
735 DEBUG(DEBUG_ERR,(__location__ " ctdb_ltdb_unlock() failed with error %d\n", ret));
738 DEBUG(DEBUG_ERR,(__location__ " Unable to allocate call\n"));
739 CTDB_DECREMENT_STAT(ctdb, pending_calls);
740 CTDB_UPDATE_LATENCY(ctdb, ctdb_db, "call_from_client 1", call_latency, dstate->start_time);
741 return;
744 dstate->readonly_fetch = 0;
745 call->call_id = c->callid;
746 call->key = key;
747 call->call_data.dptr = c->data + c->keylen;
748 call->call_data.dsize = c->calldatalen;
749 call->flags = c->flags;
751 if (c->flags & CTDB_WANT_READONLY) {
752 /* client wants readonly record, so translate this into a
753 fetch with header. remember what the client asked for
754 so we can remap the reply back to the proper format for
755 the client in the reply
757 dstate->client_callid = call->call_id;
758 call->call_id = CTDB_FETCH_WITH_HEADER_FUNC;
759 dstate->readonly_fetch = 1;
762 if (header.dmaster == ctdb->pnn) {
763 state = ctdb_call_local_send(ctdb_db, call, &header, &data);
764 } else {
765 state = ctdb_daemon_call_send_remote(ctdb_db, call, &header);
766 if (ctdb->tunable.fetch_collapse == 1) {
767 /* This request triggered a remote fetch-lock.
768 set up a deferral for this key so any additional
769 fetch-locks are deferred until the current one
770 finishes.
772 setup_deferred_fetch_locks(ctdb_db, call);
776 ret = ctdb_ltdb_unlock(ctdb_db, key);
777 if (ret != 0) {
778 DEBUG(DEBUG_ERR,(__location__ " ctdb_ltdb_unlock() failed with error %d\n", ret));
781 if (state == NULL) {
782 DEBUG(DEBUG_ERR,(__location__ " Unable to setup call send\n"));
783 CTDB_DECREMENT_STAT(ctdb, pending_calls);
784 CTDB_UPDATE_LATENCY(ctdb, ctdb_db, "call_from_client 2", call_latency, dstate->start_time);
785 return;
787 talloc_steal(state, dstate);
788 talloc_steal(client, state);
790 state->async.fn = daemon_call_from_client_callback;
791 state->async.private_data = dstate;
795 static void daemon_request_control_from_client(struct ctdb_client *client,
796 struct ctdb_req_control *c);
798 /* data contains a packet from the client */
799 static void daemon_incoming_packet(void *p, struct ctdb_req_header *hdr)
801 struct ctdb_client *client = talloc_get_type(p, struct ctdb_client);
802 TALLOC_CTX *tmp_ctx;
803 struct ctdb_context *ctdb = client->ctdb;
805 /* place the packet as a child of a tmp_ctx. We then use
806 talloc_free() below to free it. If any of the calls want
807 to keep it, then they will steal it somewhere else, and the
808 talloc_free() will be a no-op */
809 tmp_ctx = talloc_new(client);
810 talloc_steal(tmp_ctx, hdr);
812 if (hdr->ctdb_magic != CTDB_MAGIC) {
813 ctdb_set_error(client->ctdb, "Non CTDB packet rejected in daemon\n");
814 goto done;
817 if (hdr->ctdb_version != CTDB_PROTOCOL) {
818 ctdb_set_error(client->ctdb, "Bad CTDB version 0x%x rejected in daemon\n", hdr->ctdb_version);
819 goto done;
822 switch (hdr->operation) {
823 case CTDB_REQ_CALL:
824 CTDB_INCREMENT_STAT(ctdb, client.req_call);
825 daemon_request_call_from_client(client, (struct ctdb_req_call *)hdr);
826 break;
828 case CTDB_REQ_MESSAGE:
829 CTDB_INCREMENT_STAT(ctdb, client.req_message);
830 daemon_request_message_from_client(client, (struct ctdb_req_message *)hdr);
831 break;
833 case CTDB_REQ_CONTROL:
834 CTDB_INCREMENT_STAT(ctdb, client.req_control);
835 daemon_request_control_from_client(client, (struct ctdb_req_control *)hdr);
836 break;
838 default:
839 DEBUG(DEBUG_CRIT,(__location__ " daemon: unrecognized operation %u\n",
840 hdr->operation));
843 done:
844 talloc_free(tmp_ctx);
848 called when the daemon gets a incoming packet
850 static void ctdb_daemon_read_cb(uint8_t *data, size_t cnt, void *args)
852 struct ctdb_client *client = talloc_get_type(args, struct ctdb_client);
853 struct ctdb_req_header *hdr;
855 if (cnt == 0) {
856 talloc_free(client);
857 return;
860 CTDB_INCREMENT_STAT(client->ctdb, client_packets_recv);
862 if (cnt < sizeof(*hdr)) {
863 ctdb_set_error(client->ctdb, "Bad packet length %u in daemon\n",
864 (unsigned)cnt);
865 return;
867 hdr = (struct ctdb_req_header *)data;
868 if (cnt != hdr->length) {
869 ctdb_set_error(client->ctdb, "Bad header length %u expected %u\n in daemon",
870 (unsigned)hdr->length, (unsigned)cnt);
871 return;
874 if (hdr->ctdb_magic != CTDB_MAGIC) {
875 ctdb_set_error(client->ctdb, "Non CTDB packet rejected\n");
876 return;
879 if (hdr->ctdb_version != CTDB_PROTOCOL) {
880 ctdb_set_error(client->ctdb, "Bad CTDB version 0x%x rejected in daemon\n", hdr->ctdb_version);
881 return;
884 DEBUG(DEBUG_DEBUG,(__location__ " client request %u of type %u length %u from "
885 "node %u to %u\n", hdr->reqid, hdr->operation, hdr->length,
886 hdr->srcnode, hdr->destnode));
888 /* it is the responsibility of the incoming packet function to free 'data' */
889 daemon_incoming_packet(client, hdr);
893 static int ctdb_clientpid_destructor(struct ctdb_client_pid_list *client_pid)
895 if (client_pid->ctdb->client_pids != NULL) {
896 DLIST_REMOVE(client_pid->ctdb->client_pids, client_pid);
899 return 0;
903 static void ctdb_accept_client(struct event_context *ev, struct fd_event *fde,
904 uint16_t flags, void *private_data)
906 struct sockaddr_un addr;
907 socklen_t len;
908 int fd;
909 struct ctdb_context *ctdb = talloc_get_type(private_data, struct ctdb_context);
910 struct ctdb_client *client;
911 struct ctdb_client_pid_list *client_pid;
912 pid_t peer_pid = 0;
914 memset(&addr, 0, sizeof(addr));
915 len = sizeof(addr);
916 fd = accept(ctdb->daemon.sd, (struct sockaddr *)&addr, &len);
917 if (fd == -1) {
918 return;
921 set_nonblocking(fd);
922 set_close_on_exec(fd);
924 DEBUG(DEBUG_DEBUG,(__location__ " Created SOCKET FD:%d to connected child\n", fd));
926 client = talloc_zero(ctdb, struct ctdb_client);
927 if (ctdb_get_peer_pid(fd, &peer_pid) == 0) {
928 DEBUG(DEBUG_INFO,("Connected client with pid:%u\n", (unsigned)peer_pid));
931 client->ctdb = ctdb;
932 client->fd = fd;
933 client->client_id = ctdb_reqid_new(ctdb, client);
934 client->pid = peer_pid;
936 client_pid = talloc(client, struct ctdb_client_pid_list);
937 if (client_pid == NULL) {
938 DEBUG(DEBUG_ERR,("Failed to allocate client pid structure\n"));
939 close(fd);
940 talloc_free(client);
941 return;
943 client_pid->ctdb = ctdb;
944 client_pid->pid = peer_pid;
945 client_pid->client = client;
947 DLIST_ADD(ctdb->client_pids, client_pid);
949 client->queue = ctdb_queue_setup(ctdb, client, fd, CTDB_DS_ALIGNMENT,
950 ctdb_daemon_read_cb, client,
951 "client-%u", client->pid);
953 talloc_set_destructor(client, ctdb_client_destructor);
954 talloc_set_destructor(client_pid, ctdb_clientpid_destructor);
955 ctdb->num_clients++;
961 create a unix domain socket and bind it
962 return a file descriptor open on the socket
964 static int ux_socket_bind(struct ctdb_context *ctdb)
966 struct sockaddr_un addr;
968 ctdb->daemon.sd = socket(AF_UNIX, SOCK_STREAM, 0);
969 if (ctdb->daemon.sd == -1) {
970 return -1;
973 memset(&addr, 0, sizeof(addr));
974 addr.sun_family = AF_UNIX;
975 strncpy(addr.sun_path, ctdb->daemon.name, sizeof(addr.sun_path)-1);
977 /* First check if an old ctdbd might be running */
978 if (connect(ctdb->daemon.sd,
979 (struct sockaddr *)&addr, sizeof(addr)) == 0) {
980 DEBUG(DEBUG_CRIT,
981 ("Something is already listening on ctdb socket '%s'\n",
982 ctdb->daemon.name));
983 goto failed;
986 /* Remove any old socket */
987 unlink(ctdb->daemon.name);
989 set_close_on_exec(ctdb->daemon.sd);
990 set_nonblocking(ctdb->daemon.sd);
992 if (bind(ctdb->daemon.sd, (struct sockaddr *)&addr, sizeof(addr)) == -1) {
993 DEBUG(DEBUG_CRIT,("Unable to bind on ctdb socket '%s'\n", ctdb->daemon.name));
994 goto failed;
997 if (chown(ctdb->daemon.name, geteuid(), getegid()) != 0 ||
998 chmod(ctdb->daemon.name, 0700) != 0) {
999 DEBUG(DEBUG_CRIT,("Unable to secure ctdb socket '%s', ctdb->daemon.name\n", ctdb->daemon.name));
1000 goto failed;
1004 if (listen(ctdb->daemon.sd, 100) != 0) {
1005 DEBUG(DEBUG_CRIT,("Unable to listen on ctdb socket '%s'\n", ctdb->daemon.name));
1006 goto failed;
1009 return 0;
1011 failed:
1012 close(ctdb->daemon.sd);
1013 ctdb->daemon.sd = -1;
1014 return -1;
1017 static void initialise_node_flags (struct ctdb_context *ctdb)
1019 if (ctdb->pnn == -1) {
1020 ctdb_fatal(ctdb, "PNN is set to -1 (unknown value)");
1023 ctdb->nodes[ctdb->pnn]->flags &= ~NODE_FLAGS_DISCONNECTED;
1025 /* do we start out in DISABLED mode? */
1026 if (ctdb->start_as_disabled != 0) {
1027 DEBUG(DEBUG_NOTICE, ("This node is configured to start in DISABLED state\n"));
1028 ctdb->nodes[ctdb->pnn]->flags |= NODE_FLAGS_DISABLED;
1030 /* do we start out in STOPPED mode? */
1031 if (ctdb->start_as_stopped != 0) {
1032 DEBUG(DEBUG_NOTICE, ("This node is configured to start in STOPPED state\n"));
1033 ctdb->nodes[ctdb->pnn]->flags |= NODE_FLAGS_STOPPED;
1037 static void ctdb_setup_event_callback(struct ctdb_context *ctdb, int status,
1038 void *private_data)
1040 if (status != 0) {
1041 ctdb_die(ctdb, "Failed to run setup event");
1043 ctdb_run_notification_script(ctdb, "setup");
1045 /* tell all other nodes we've just started up */
1046 ctdb_daemon_send_control(ctdb, CTDB_BROADCAST_ALL,
1047 0, CTDB_CONTROL_STARTUP, 0,
1048 CTDB_CTRL_FLAG_NOREPLY,
1049 tdb_null, NULL, NULL);
1051 /* Start the recovery daemon */
1052 if (ctdb_start_recoverd(ctdb) != 0) {
1053 DEBUG(DEBUG_ALERT,("Failed to start recovery daemon\n"));
1054 exit(11);
1057 ctdb_start_periodic_events(ctdb);
1059 ctdb_wait_for_first_recovery(ctdb);
1062 static struct timeval tevent_before_wait_ts;
1063 static struct timeval tevent_after_wait_ts;
1065 static void ctdb_tevent_trace(enum tevent_trace_point tp,
1066 void *private_data)
1068 struct timeval diff;
1069 struct timeval now;
1070 struct ctdb_context *ctdb =
1071 talloc_get_type(private_data, struct ctdb_context);
1073 if (getpid() != ctdb->ctdbd_pid) {
1074 return;
1077 now = timeval_current();
1079 switch (tp) {
1080 case TEVENT_TRACE_BEFORE_WAIT:
1081 if (!timeval_is_zero(&tevent_after_wait_ts)) {
1082 diff = timeval_until(&tevent_after_wait_ts, &now);
1083 if (diff.tv_sec > 3) {
1084 DEBUG(DEBUG_ERR,
1085 ("Handling event took %ld seconds!\n",
1086 diff.tv_sec));
1089 tevent_before_wait_ts = now;
1090 break;
1092 case TEVENT_TRACE_AFTER_WAIT:
1093 if (!timeval_is_zero(&tevent_before_wait_ts)) {
1094 diff = timeval_until(&tevent_before_wait_ts, &now);
1095 if (diff.tv_sec > 3) {
1096 DEBUG(DEBUG_CRIT,
1097 ("No event for %ld seconds!\n",
1098 diff.tv_sec));
1101 tevent_after_wait_ts = now;
1102 break;
1104 default:
1105 /* Do nothing for future tevent trace points */ ;
1109 static void ctdb_remove_pidfile(void)
1111 /* Only the main ctdbd's PID matches the SID */
1112 if (ctdbd_pidfile != NULL && getsid(0) == getpid()) {
1113 if (unlink(ctdbd_pidfile) == 0) {
1114 DEBUG(DEBUG_NOTICE, ("Removed PID file %s\n",
1115 ctdbd_pidfile));
1116 } else {
1117 DEBUG(DEBUG_WARNING, ("Failed to Remove PID file %s\n",
1118 ctdbd_pidfile));
1123 static void ctdb_create_pidfile(pid_t pid)
1125 if (ctdbd_pidfile != NULL) {
1126 FILE *fp;
1128 fp = fopen(ctdbd_pidfile, "w");
1129 if (fp == NULL) {
1130 DEBUG(DEBUG_ALERT,
1131 ("Failed to open PID file %s\n", ctdbd_pidfile));
1132 exit(11);
1135 fprintf(fp, "%d\n", pid);
1136 fclose(fp);
1137 DEBUG(DEBUG_NOTICE, ("Created PID file %s\n", ctdbd_pidfile));
1138 atexit(ctdb_remove_pidfile);
1142 static void ctdb_initialise_vnn_map(struct ctdb_context *ctdb)
1144 int i, j, count;
1146 /* initialize the vnn mapping table, skipping any deleted nodes */
1147 ctdb->vnn_map = talloc(ctdb, struct ctdb_vnn_map);
1148 CTDB_NO_MEMORY_FATAL(ctdb, ctdb->vnn_map);
1150 count = 0;
1151 for (i = 0; i < ctdb->num_nodes; i++) {
1152 if ((ctdb->nodes[i]->flags & NODE_FLAGS_DELETED) == 0) {
1153 count++;
1157 ctdb->vnn_map->generation = INVALID_GENERATION;
1158 ctdb->vnn_map->size = count;
1159 ctdb->vnn_map->map = talloc_array(ctdb->vnn_map, uint32_t, ctdb->vnn_map->size);
1160 CTDB_NO_MEMORY_FATAL(ctdb, ctdb->vnn_map->map);
1162 for(i=0, j=0; i < ctdb->vnn_map->size; i++) {
1163 if (ctdb->nodes[i]->flags & NODE_FLAGS_DELETED) {
1164 continue;
1166 ctdb->vnn_map->map[j] = i;
1167 j++;
1171 static void ctdb_set_my_pnn(struct ctdb_context *ctdb)
1173 int nodeid;
1175 if (ctdb->address == NULL) {
1176 ctdb_fatal(ctdb,
1177 "Can not determine PNN - node address is not set\n");
1180 nodeid = ctdb_ip_to_nodeid(ctdb, ctdb->address);
1181 if (nodeid == -1) {
1182 ctdb_fatal(ctdb,
1183 "Can not determine PNN - node address not found in node list\n");
1186 ctdb->pnn = ctdb->nodes[nodeid]->pnn;
1187 DEBUG(DEBUG_NOTICE, ("PNN is %u\n", ctdb->pnn));
1191 start the protocol going as a daemon
1193 int ctdb_start_daemon(struct ctdb_context *ctdb, bool do_fork)
1195 int res, ret = -1;
1196 struct fd_event *fde;
1198 /* create a unix domain stream socket to listen to */
1199 res = ux_socket_bind(ctdb);
1200 if (res!=0) {
1201 DEBUG(DEBUG_ALERT,("Cannot continue. Exiting!\n"));
1202 exit(10);
1205 if (do_fork && fork()) {
1206 return 0;
1209 tdb_reopen_all(false);
1211 if (do_fork) {
1212 if (setsid() == -1) {
1213 ctdb_die(ctdb, "Failed to setsid()\n");
1215 close(0);
1216 if (open("/dev/null", O_RDONLY) != 0) {
1217 DEBUG(DEBUG_ALERT,(__location__ " Failed to setup stdin on /dev/null\n"));
1218 exit(11);
1221 ignore_signal(SIGPIPE);
1222 ignore_signal(SIGUSR1);
1224 ctdb->ctdbd_pid = getpid();
1225 DEBUG(DEBUG_ERR, ("Starting CTDBD (Version %s) as PID: %u\n",
1226 CTDB_VERSION_STRING, ctdb->ctdbd_pid));
1227 ctdb_create_pidfile(ctdb->ctdbd_pid);
1229 /* Make sure we log something when the daemon terminates.
1230 * This must be the first exit handler to run (so the last to
1231 * be registered.
1233 atexit(print_exit_message);
1235 if (ctdb->do_setsched) {
1236 /* try to set us up as realtime */
1237 if (!set_scheduler()) {
1238 exit(1);
1240 DEBUG(DEBUG_NOTICE, ("Set real-time scheduler priority\n"));
1243 ctdb->ev = event_context_init(NULL);
1244 tevent_loop_allow_nesting(ctdb->ev);
1245 tevent_set_trace_callback(ctdb->ev, ctdb_tevent_trace, ctdb);
1246 ret = ctdb_init_tevent_logging(ctdb);
1247 if (ret != 0) {
1248 DEBUG(DEBUG_ALERT,("Failed to initialize TEVENT logging\n"));
1249 exit(1);
1252 /* set up a handler to pick up sigchld */
1253 if (ctdb_init_sigchld(ctdb) == NULL) {
1254 DEBUG(DEBUG_CRIT,("Failed to set up signal handler for SIGCHLD\n"));
1255 exit(1);
1258 ctdb_set_child_logging(ctdb);
1260 /* initialize statistics collection */
1261 ctdb_statistics_init(ctdb);
1263 /* force initial recovery for election */
1264 ctdb->recovery_mode = CTDB_RECOVERY_ACTIVE;
1266 ctdb_set_runstate(ctdb, CTDB_RUNSTATE_INIT);
1267 ret = ctdb_event_script(ctdb, CTDB_EVENT_INIT);
1268 if (ret != 0) {
1269 ctdb_die(ctdb, "Failed to run init event\n");
1271 ctdb_run_notification_script(ctdb, "init");
1273 if (strcmp(ctdb->transport, "tcp") == 0) {
1274 ret = ctdb_tcp_init(ctdb);
1276 #ifdef USE_INFINIBAND
1277 if (strcmp(ctdb->transport, "ib") == 0) {
1278 ret = ctdb_ibw_init(ctdb);
1280 #endif
1281 if (ret != 0) {
1282 DEBUG(DEBUG_ERR,("Failed to initialise transport '%s'\n", ctdb->transport));
1283 return -1;
1286 if (ctdb->methods == NULL) {
1287 DEBUG(DEBUG_ALERT,(__location__ " Can not initialize transport. ctdb->methods is NULL\n"));
1288 ctdb_fatal(ctdb, "transport is unavailable. can not initialize.");
1291 /* Initialise the transport. This sets the node address if it
1292 * was not set via the command-line. */
1293 if (ctdb->methods->initialise(ctdb) != 0) {
1294 ctdb_fatal(ctdb, "transport failed to initialise");
1297 ctdb_set_my_pnn(ctdb);
1299 initialise_node_flags(ctdb);
1301 if (ctdb->public_addresses_file) {
1302 ret = ctdb_set_public_addresses(ctdb, true);
1303 if (ret == -1) {
1304 DEBUG(DEBUG_ALERT,("Unable to setup public address list\n"));
1305 exit(1);
1309 ctdb_initialise_vnn_map(ctdb);
1311 /* attach to existing databases */
1312 if (ctdb_attach_databases(ctdb) != 0) {
1313 ctdb_fatal(ctdb, "Failed to attach to databases\n");
1316 /* start frozen, then let the first election sort things out */
1317 if (!ctdb_blocking_freeze(ctdb)) {
1318 ctdb_fatal(ctdb, "Failed to get initial freeze\n");
1321 /* now start accepting clients, only can do this once frozen */
1322 fde = event_add_fd(ctdb->ev, ctdb, ctdb->daemon.sd,
1323 EVENT_FD_READ,
1324 ctdb_accept_client, ctdb);
1325 if (fde == NULL) {
1326 ctdb_fatal(ctdb, "Failed to add daemon socket to event loop");
1328 tevent_fd_set_auto_close(fde);
1330 /* Start the transport */
1331 if (ctdb->methods->start(ctdb) != 0) {
1332 DEBUG(DEBUG_ALERT,("transport failed to start!\n"));
1333 ctdb_fatal(ctdb, "transport failed to start");
1336 /* Recovery daemon and timed events are started from the
1337 * callback, only after the setup event completes
1338 * successfully.
1340 ctdb_set_runstate(ctdb, CTDB_RUNSTATE_SETUP);
1341 ret = ctdb_event_script_callback(ctdb,
1342 ctdb,
1343 ctdb_setup_event_callback,
1344 ctdb,
1345 CTDB_EVENT_SETUP,
1346 "%s",
1347 "");
1348 if (ret != 0) {
1349 DEBUG(DEBUG_CRIT,("Failed to set up 'setup' event\n"));
1350 exit(1);
1353 lockdown_memory(ctdb->valgrinding);
1355 /* go into a wait loop to allow other nodes to complete */
1356 event_loop_wait(ctdb->ev);
1358 DEBUG(DEBUG_CRIT,("event_loop_wait() returned. this should not happen\n"));
1359 exit(1);
1363 allocate a packet for use in daemon<->daemon communication
1365 struct ctdb_req_header *_ctdb_transport_allocate(struct ctdb_context *ctdb,
1366 TALLOC_CTX *mem_ctx,
1367 enum ctdb_operation operation,
1368 size_t length, size_t slength,
1369 const char *type)
1371 int size;
1372 struct ctdb_req_header *hdr;
1374 length = MAX(length, slength);
1375 size = (length+(CTDB_DS_ALIGNMENT-1)) & ~(CTDB_DS_ALIGNMENT-1);
1377 if (ctdb->methods == NULL) {
1378 DEBUG(DEBUG_INFO,(__location__ " Unable to allocate transport packet for operation %u of length %u. Transport is DOWN.\n",
1379 operation, (unsigned)length));
1380 return NULL;
1383 hdr = (struct ctdb_req_header *)ctdb->methods->allocate_pkt(mem_ctx, size);
1384 if (hdr == NULL) {
1385 DEBUG(DEBUG_ERR,("Unable to allocate transport packet for operation %u of length %u\n",
1386 operation, (unsigned)length));
1387 return NULL;
1389 talloc_set_name_const(hdr, type);
1390 memset(hdr, 0, slength);
1391 hdr->length = length;
1392 hdr->operation = operation;
1393 hdr->ctdb_magic = CTDB_MAGIC;
1394 hdr->ctdb_version = CTDB_PROTOCOL;
1395 hdr->generation = ctdb->vnn_map->generation;
1396 hdr->srcnode = ctdb->pnn;
1398 return hdr;
1401 struct daemon_control_state {
1402 struct daemon_control_state *next, *prev;
1403 struct ctdb_client *client;
1404 struct ctdb_req_control *c;
1405 uint32_t reqid;
1406 struct ctdb_node *node;
1410 callback when a control reply comes in
1412 static void daemon_control_callback(struct ctdb_context *ctdb,
1413 int32_t status, TDB_DATA data,
1414 const char *errormsg,
1415 void *private_data)
1417 struct daemon_control_state *state = talloc_get_type(private_data,
1418 struct daemon_control_state);
1419 struct ctdb_client *client = state->client;
1420 struct ctdb_reply_control *r;
1421 size_t len;
1422 int ret;
1424 /* construct a message to send to the client containing the data */
1425 len = offsetof(struct ctdb_reply_control, data) + data.dsize;
1426 if (errormsg) {
1427 len += strlen(errormsg);
1429 r = ctdbd_allocate_pkt(ctdb, state, CTDB_REPLY_CONTROL, len,
1430 struct ctdb_reply_control);
1431 CTDB_NO_MEMORY_VOID(ctdb, r);
1433 r->hdr.reqid = state->reqid;
1434 r->status = status;
1435 r->datalen = data.dsize;
1436 r->errorlen = 0;
1437 memcpy(&r->data[0], data.dptr, data.dsize);
1438 if (errormsg) {
1439 r->errorlen = strlen(errormsg);
1440 memcpy(&r->data[r->datalen], errormsg, r->errorlen);
1443 ret = daemon_queue_send(client, &r->hdr);
1444 if (ret != -1) {
1445 talloc_free(state);
1450 fail all pending controls to a disconnected node
1452 void ctdb_daemon_cancel_controls(struct ctdb_context *ctdb, struct ctdb_node *node)
1454 struct daemon_control_state *state;
1455 while ((state = node->pending_controls)) {
1456 DLIST_REMOVE(node->pending_controls, state);
1457 daemon_control_callback(ctdb, (uint32_t)-1, tdb_null,
1458 "node is disconnected", state);
1463 destroy a daemon_control_state
1465 static int daemon_control_destructor(struct daemon_control_state *state)
1467 if (state->node) {
1468 DLIST_REMOVE(state->node->pending_controls, state);
1470 return 0;
1474 this is called when the ctdb daemon received a ctdb request control
1475 from a local client over the unix domain socket
1477 static void daemon_request_control_from_client(struct ctdb_client *client,
1478 struct ctdb_req_control *c)
1480 TDB_DATA data;
1481 int res;
1482 struct daemon_control_state *state;
1483 TALLOC_CTX *tmp_ctx = talloc_new(client);
1485 if (c->hdr.destnode == CTDB_CURRENT_NODE) {
1486 c->hdr.destnode = client->ctdb->pnn;
1489 state = talloc(client, struct daemon_control_state);
1490 CTDB_NO_MEMORY_VOID(client->ctdb, state);
1492 state->client = client;
1493 state->c = talloc_steal(state, c);
1494 state->reqid = c->hdr.reqid;
1495 if (ctdb_validate_pnn(client->ctdb, c->hdr.destnode)) {
1496 state->node = client->ctdb->nodes[c->hdr.destnode];
1497 DLIST_ADD(state->node->pending_controls, state);
1498 } else {
1499 state->node = NULL;
1502 talloc_set_destructor(state, daemon_control_destructor);
1504 if (c->flags & CTDB_CTRL_FLAG_NOREPLY) {
1505 talloc_steal(tmp_ctx, state);
1508 data.dptr = &c->data[0];
1509 data.dsize = c->datalen;
1510 res = ctdb_daemon_send_control(client->ctdb, c->hdr.destnode,
1511 c->srvid, c->opcode, client->client_id,
1512 c->flags,
1513 data, daemon_control_callback,
1514 state);
1515 if (res != 0) {
1516 DEBUG(DEBUG_ERR,(__location__ " Failed to send control to remote node %u\n",
1517 c->hdr.destnode));
1520 talloc_free(tmp_ctx);
1524 register a call function
1526 int ctdb_daemon_set_call(struct ctdb_context *ctdb, uint32_t db_id,
1527 ctdb_fn_t fn, int id)
1529 struct ctdb_registered_call *call;
1530 struct ctdb_db_context *ctdb_db;
1532 ctdb_db = find_ctdb_db(ctdb, db_id);
1533 if (ctdb_db == NULL) {
1534 return -1;
1537 call = talloc(ctdb_db, struct ctdb_registered_call);
1538 call->fn = fn;
1539 call->id = id;
1541 DLIST_ADD(ctdb_db->calls, call);
1542 return 0;
1548 this local messaging handler is ugly, but is needed to prevent
1549 recursion in ctdb_send_message() when the destination node is the
1550 same as the source node
1552 struct ctdb_local_message {
1553 struct ctdb_context *ctdb;
1554 uint64_t srvid;
1555 TDB_DATA data;
1558 static void ctdb_local_message_trigger(struct event_context *ev, struct timed_event *te,
1559 struct timeval t, void *private_data)
1561 struct ctdb_local_message *m = talloc_get_type(private_data,
1562 struct ctdb_local_message);
1563 int res;
1565 res = ctdb_dispatch_message(m->ctdb, m->srvid, m->data);
1566 if (res != 0) {
1567 DEBUG(DEBUG_ERR, (__location__ " Failed to dispatch message for srvid=%llu\n",
1568 (unsigned long long)m->srvid));
1570 talloc_free(m);
1573 static int ctdb_local_message(struct ctdb_context *ctdb, uint64_t srvid, TDB_DATA data)
1575 struct ctdb_local_message *m;
1576 m = talloc(ctdb, struct ctdb_local_message);
1577 CTDB_NO_MEMORY(ctdb, m);
1579 m->ctdb = ctdb;
1580 m->srvid = srvid;
1581 m->data = data;
1582 m->data.dptr = talloc_memdup(m, m->data.dptr, m->data.dsize);
1583 if (m->data.dptr == NULL) {
1584 talloc_free(m);
1585 return -1;
1588 /* this needs to be done as an event to prevent recursion */
1589 event_add_timed(ctdb->ev, m, timeval_zero(), ctdb_local_message_trigger, m);
1590 return 0;
1594 send a ctdb message
1596 int ctdb_daemon_send_message(struct ctdb_context *ctdb, uint32_t pnn,
1597 uint64_t srvid, TDB_DATA data)
1599 struct ctdb_req_message *r;
1600 int len;
1602 if (ctdb->methods == NULL) {
1603 DEBUG(DEBUG_INFO,(__location__ " Failed to send message. Transport is DOWN\n"));
1604 return -1;
1607 /* see if this is a message to ourselves */
1608 if (pnn == ctdb->pnn) {
1609 return ctdb_local_message(ctdb, srvid, data);
1612 len = offsetof(struct ctdb_req_message, data) + data.dsize;
1613 r = ctdb_transport_allocate(ctdb, ctdb, CTDB_REQ_MESSAGE, len,
1614 struct ctdb_req_message);
1615 CTDB_NO_MEMORY(ctdb, r);
1617 r->hdr.destnode = pnn;
1618 r->srvid = srvid;
1619 r->datalen = data.dsize;
1620 memcpy(&r->data[0], data.dptr, data.dsize);
1622 ctdb_queue_packet(ctdb, &r->hdr);
1624 talloc_free(r);
1625 return 0;
1630 struct ctdb_client_notify_list {
1631 struct ctdb_client_notify_list *next, *prev;
1632 struct ctdb_context *ctdb;
1633 uint64_t srvid;
1634 TDB_DATA data;
1638 static int ctdb_client_notify_destructor(struct ctdb_client_notify_list *nl)
1640 int ret;
1642 DEBUG(DEBUG_ERR,("Sending client notify message for srvid:%llu\n", (unsigned long long)nl->srvid));
1644 ret = ctdb_daemon_send_message(nl->ctdb, CTDB_BROADCAST_CONNECTED, (unsigned long long)nl->srvid, nl->data);
1645 if (ret != 0) {
1646 DEBUG(DEBUG_ERR,("Failed to send client notify message\n"));
1649 return 0;
1652 int32_t ctdb_control_register_notify(struct ctdb_context *ctdb, uint32_t client_id, TDB_DATA indata)
1654 struct ctdb_client_notify_register *notify = (struct ctdb_client_notify_register *)indata.dptr;
1655 struct ctdb_client *client = ctdb_reqid_find(ctdb, client_id, struct ctdb_client);
1656 struct ctdb_client_notify_list *nl;
1658 DEBUG(DEBUG_INFO,("Register srvid %llu for client %d\n", (unsigned long long)notify->srvid, client_id));
1660 if (indata.dsize < offsetof(struct ctdb_client_notify_register, notify_data)) {
1661 DEBUG(DEBUG_ERR,(__location__ " Too little data in control : %d\n", (int)indata.dsize));
1662 return -1;
1665 if (indata.dsize != (notify->len + offsetof(struct ctdb_client_notify_register, notify_data))) {
1666 DEBUG(DEBUG_ERR,(__location__ " Wrong amount of data in control. Got %d, expected %d\n", (int)indata.dsize, (int)(notify->len + offsetof(struct ctdb_client_notify_register, notify_data))));
1667 return -1;
1671 if (client == NULL) {
1672 DEBUG(DEBUG_ERR,(__location__ " Could not find client parent structure. You can not send this control to a remote node\n"));
1673 return -1;
1676 for(nl=client->notify; nl; nl=nl->next) {
1677 if (nl->srvid == notify->srvid) {
1678 break;
1681 if (nl != NULL) {
1682 DEBUG(DEBUG_ERR,(__location__ " Notification for srvid:%llu already exists for this client\n", (unsigned long long)notify->srvid));
1683 return -1;
1686 nl = talloc(client, struct ctdb_client_notify_list);
1687 CTDB_NO_MEMORY(ctdb, nl);
1688 nl->ctdb = ctdb;
1689 nl->srvid = notify->srvid;
1690 nl->data.dsize = notify->len;
1691 nl->data.dptr = talloc_size(nl, nl->data.dsize);
1692 CTDB_NO_MEMORY(ctdb, nl->data.dptr);
1693 memcpy(nl->data.dptr, notify->notify_data, nl->data.dsize);
1695 DLIST_ADD(client->notify, nl);
1696 talloc_set_destructor(nl, ctdb_client_notify_destructor);
1698 return 0;
1701 int32_t ctdb_control_deregister_notify(struct ctdb_context *ctdb, uint32_t client_id, TDB_DATA indata)
1703 struct ctdb_client_notify_deregister *notify = (struct ctdb_client_notify_deregister *)indata.dptr;
1704 struct ctdb_client *client = ctdb_reqid_find(ctdb, client_id, struct ctdb_client);
1705 struct ctdb_client_notify_list *nl;
1707 DEBUG(DEBUG_INFO,("Deregister srvid %llu for client %d\n", (unsigned long long)notify->srvid, client_id));
1709 if (client == NULL) {
1710 DEBUG(DEBUG_ERR,(__location__ " Could not find client parent structure. You can not send this control to a remote node\n"));
1711 return -1;
1714 for(nl=client->notify; nl; nl=nl->next) {
1715 if (nl->srvid == notify->srvid) {
1716 break;
1719 if (nl == NULL) {
1720 DEBUG(DEBUG_ERR,(__location__ " No notification for srvid:%llu found for this client\n", (unsigned long long)notify->srvid));
1721 return -1;
1724 DLIST_REMOVE(client->notify, nl);
1725 talloc_set_destructor(nl, NULL);
1726 talloc_free(nl);
1728 return 0;
1731 struct ctdb_client *ctdb_find_client_by_pid(struct ctdb_context *ctdb, pid_t pid)
1733 struct ctdb_client_pid_list *client_pid;
1735 for (client_pid = ctdb->client_pids; client_pid; client_pid=client_pid->next) {
1736 if (client_pid->pid == pid) {
1737 return client_pid->client;
1740 return NULL;
1744 /* This control is used by samba when probing if a process (of a samba daemon)
1745 exists on the node.
1746 Samba does this when it needs/wants to check if a subrecord in one of the
1747 databases is still valied, or if it is stale and can be removed.
1748 If the node is in unhealthy or stopped state we just kill of the samba
1749 process holding htis sub-record and return to the calling samba that
1750 the process does not exist.
1751 This allows us to forcefully recall subrecords registered by samba processes
1752 on banned and stopped nodes.
1754 int32_t ctdb_control_process_exists(struct ctdb_context *ctdb, pid_t pid)
1756 struct ctdb_client *client;
1758 if (ctdb->nodes[ctdb->pnn]->flags & (NODE_FLAGS_BANNED|NODE_FLAGS_STOPPED)) {
1759 client = ctdb_find_client_by_pid(ctdb, pid);
1760 if (client != NULL) {
1761 DEBUG(DEBUG_NOTICE,(__location__ " Killing client with pid:%d on banned/stopped node\n", (int)pid));
1762 talloc_free(client);
1764 return -1;
1767 return kill(pid, 0);
1770 int ctdb_control_getnodesfile(struct ctdb_context *ctdb, uint32_t opcode, TDB_DATA indata, TDB_DATA *outdata)
1772 struct ctdb_node_map *node_map = NULL;
1774 CHECK_CONTROL_DATA_SIZE(0);
1776 node_map = ctdb_read_nodes_file(ctdb, ctdb->nodes_file);
1777 if (node_map == NULL) {
1778 DEBUG(DEBUG_ERR, ("Failed to read nodes file\n"));
1779 return -1;
1782 outdata->dptr = (unsigned char *)node_map;
1783 outdata->dsize = talloc_get_size(outdata->dptr);
1785 return 0;
1788 void ctdb_shutdown_sequence(struct ctdb_context *ctdb, int exit_code)
1790 if (ctdb->runstate == CTDB_RUNSTATE_SHUTDOWN) {
1791 DEBUG(DEBUG_NOTICE,("Already shutting down so will not proceed.\n"));
1792 return;
1795 DEBUG(DEBUG_NOTICE,("Shutdown sequence commencing.\n"));
1796 ctdb_set_runstate(ctdb, CTDB_RUNSTATE_SHUTDOWN);
1797 ctdb_stop_recoverd(ctdb);
1798 ctdb_stop_keepalive(ctdb);
1799 ctdb_stop_monitoring(ctdb);
1800 ctdb_release_all_ips(ctdb);
1801 ctdb_event_script(ctdb, CTDB_EVENT_SHUTDOWN);
1802 if (ctdb->methods != NULL) {
1803 ctdb->methods->shutdown(ctdb);
1806 DEBUG(DEBUG_NOTICE,("Shutdown sequence complete, exiting.\n"));
1807 exit(exit_code);