python/samba: Another object.next() to next(object) py2/py3 converstion
[Samba.git] / source3 / lib / ctdbd_conn.c
blob9832dfae7b42a2254ced1c09927989fb523ea106
1 /*
2 Unix SMB/CIFS implementation.
3 Samba internal messaging functions
4 Copyright (C) 2007 by Volker Lendecke
5 Copyright (C) 2007 by Andrew Tridgell
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>.
21 #include "replace.h"
22 #include <tevent.h>
23 #include "util_tdb.h"
24 #include "serverid.h"
25 #include "ctdbd_conn.h"
26 #include "system/select.h"
27 #include "lib/util/sys_rw_data.h"
28 #include "lib/util/iov_buf.h"
29 #include "lib/util/select.h"
30 #include "lib/util/debug.h"
31 #include "lib/util/talloc_stack.h"
32 #include "lib/util/genrand.h"
33 #include "lib/util/fault.h"
34 #include "lib/util/dlinklist.h"
35 #include "lib/util/tevent_unix.c"
36 #include "lib/util/sys_rw.h"
37 #include "lib/util/blocking.h"
38 #include "ctdb/include/ctdb_protocol.h"
40 /* paths to these include files come from --with-ctdb= in configure */
42 struct ctdbd_srvid_cb {
43 uint64_t srvid;
44 int (*cb)(struct tevent_context *ev,
45 uint32_t src_vnn, uint32_t dst_vnn,
46 uint64_t dst_srvid,
47 const uint8_t *msg, size_t msglen,
48 void *private_data);
49 void *private_data;
52 struct ctdb_pkt_send_state;
53 struct ctdb_pkt_recv_state;
55 struct ctdbd_connection {
56 uint32_t reqid;
57 uint32_t our_vnn;
58 uint64_t rand_srvid;
59 struct ctdbd_srvid_cb *callbacks;
60 int fd;
61 int timeout;
63 /* For async connections, enabled via ctdbd_setup_fde() */
64 struct tevent_fd *fde;
66 /* State to track in-progress read */
67 struct ctdb_read_state {
68 /* Receive buffer for the initial packet length */
69 uint32_t msglen;
71 /* iovec state for current read */
72 struct iovec iov;
73 struct iovec *iovs;
74 int iovcnt;
76 /* allocated receive buffer based on packet length */
77 struct ctdb_req_header *hdr;
78 } read_state;
80 /* Lists of pending async reads and writes */
81 struct ctdb_pkt_recv_state *recv_list;
82 struct ctdb_pkt_send_state *send_list;
85 static void ctdbd_async_socket_handler(struct tevent_context *ev,
86 struct tevent_fd *fde,
87 uint16_t flags,
88 void *private_data);
90 static bool ctdbd_conn_has_async_sends(struct ctdbd_connection *conn)
92 return (conn->send_list != NULL);
95 static bool ctdbd_conn_has_async_reqs(struct ctdbd_connection *conn)
97 return (conn->fde != NULL);
100 static uint32_t ctdbd_next_reqid(struct ctdbd_connection *conn)
102 conn->reqid += 1;
103 if (conn->reqid == 0) {
104 conn->reqid += 1;
106 return conn->reqid;
109 static int ctdbd_control(struct ctdbd_connection *conn,
110 uint32_t vnn, uint32_t opcode,
111 uint64_t srvid, uint32_t flags,
112 TDB_DATA data,
113 TALLOC_CTX *mem_ctx, TDB_DATA *outdata,
114 int32_t *cstatus);
117 * exit on fatal communications errors with the ctdbd daemon
119 static void cluster_fatal(const char *why)
121 DEBUG(0,("cluster fatal event: %s - exiting immediately\n", why));
122 /* we don't use smb_panic() as we don't want to delay to write
123 a core file. We need to release this process id immediately
124 so that someone else can take over without getting sharing
125 violations */
126 _exit(1);
132 static void ctdb_packet_dump(struct ctdb_req_header *hdr)
134 if (DEBUGLEVEL < 11) {
135 return;
137 DEBUGADD(11, ("len=%d, magic=%x, vers=%d, gen=%d, op=%d, reqid=%d\n",
138 (int)hdr->length, (int)hdr->ctdb_magic,
139 (int)hdr->ctdb_version, (int)hdr->generation,
140 (int)hdr->operation, (int)hdr->reqid));
144 * Register a srvid with ctdbd
146 int register_with_ctdbd(struct ctdbd_connection *conn, uint64_t srvid,
147 int (*cb)(struct tevent_context *ev,
148 uint32_t src_vnn, uint32_t dst_vnn,
149 uint64_t dst_srvid,
150 const uint8_t *msg, size_t msglen,
151 void *private_data),
152 void *private_data)
155 int ret;
156 int32_t cstatus;
157 size_t num_callbacks;
158 struct ctdbd_srvid_cb *tmp;
160 ret = ctdbd_control_local(conn, CTDB_CONTROL_REGISTER_SRVID, srvid, 0,
161 tdb_null, NULL, NULL, &cstatus);
162 if (ret != 0) {
163 return ret;
166 num_callbacks = talloc_array_length(conn->callbacks);
168 tmp = talloc_realloc(conn, conn->callbacks, struct ctdbd_srvid_cb,
169 num_callbacks + 1);
170 if (tmp == NULL) {
171 return ENOMEM;
173 conn->callbacks = tmp;
175 conn->callbacks[num_callbacks] = (struct ctdbd_srvid_cb) {
176 .srvid = srvid, .cb = cb, .private_data = private_data
179 return 0;
182 static int ctdbd_msg_call_back(struct tevent_context *ev,
183 struct ctdbd_connection *conn,
184 struct ctdb_req_message_old *msg)
186 uint32_t msg_len;
187 size_t i, num_callbacks;
189 msg_len = msg->hdr.length;
190 if (msg_len < offsetof(struct ctdb_req_message_old, data)) {
191 DBG_DEBUG("len %"PRIu32" too small\n", msg_len);
192 return 0;
194 msg_len -= offsetof(struct ctdb_req_message_old, data);
196 if (msg_len < msg->datalen) {
197 DBG_DEBUG("msg_len=%"PRIu32" < msg->datalen=%"PRIu32"\n",
198 msg_len, msg->datalen);
199 return 0;
202 num_callbacks = talloc_array_length(conn->callbacks);
204 for (i=0; i<num_callbacks; i++) {
205 struct ctdbd_srvid_cb *cb = &conn->callbacks[i];
207 if ((cb->srvid == msg->srvid) && (cb->cb != NULL)) {
208 int ret;
210 ret = cb->cb(ev,
211 msg->hdr.srcnode, msg->hdr.destnode,
212 msg->srvid, msg->data, msg->datalen,
213 cb->private_data);
214 if (ret != 0) {
215 return ret;
219 return 0;
223 * get our vnn from the cluster
225 static int get_cluster_vnn(struct ctdbd_connection *conn, uint32_t *vnn)
227 int32_t cstatus=-1;
228 int ret;
229 ret = ctdbd_control_local(conn, CTDB_CONTROL_GET_PNN, 0, 0,
230 tdb_null, NULL, NULL, &cstatus);
231 if (ret != 0) {
232 DEBUG(1, ("ctdbd_control failed: %s\n", strerror(ret)));
233 return ret;
235 *vnn = (uint32_t)cstatus;
236 return ret;
240 * Are we active (i.e. not banned or stopped?)
242 static bool ctdbd_working(struct ctdbd_connection *conn, uint32_t vnn)
244 int32_t cstatus=-1;
245 TDB_DATA outdata;
246 struct ctdb_node_map_old *m;
247 bool ok = false;
248 uint32_t i;
249 int ret;
251 ret = ctdbd_control_local(conn, CTDB_CONTROL_GET_NODEMAP, 0, 0,
252 tdb_null, talloc_tos(), &outdata, &cstatus);
253 if (ret != 0) {
254 DEBUG(1, ("ctdbd_control failed: %s\n", strerror(ret)));
255 return false;
257 if ((cstatus != 0) || (outdata.dptr == NULL)) {
258 DEBUG(2, ("Received invalid ctdb data\n"));
259 return false;
262 m = (struct ctdb_node_map_old *)outdata.dptr;
264 for (i=0; i<m->num; i++) {
265 if (vnn == m->nodes[i].pnn) {
266 break;
270 if (i == m->num) {
271 DEBUG(2, ("Did not find ourselves (node %d) in nodemap\n",
272 (int)vnn));
273 goto fail;
276 if ((m->nodes[i].flags & NODE_FLAGS_INACTIVE) != 0) {
277 DEBUG(2, ("Node has status %x, not active\n",
278 (int)m->nodes[i].flags));
279 goto fail;
282 ok = true;
283 fail:
284 TALLOC_FREE(outdata.dptr);
285 return ok;
288 uint32_t ctdbd_vnn(const struct ctdbd_connection *conn)
290 return conn->our_vnn;
294 * Get us a ctdb connection
297 static int ctdbd_connect(const char *sockname, int *pfd)
299 struct sockaddr_un addr = { 0, };
300 int fd;
301 socklen_t salen;
302 size_t namelen;
304 fd = socket(AF_UNIX, SOCK_STREAM, 0);
305 if (fd == -1) {
306 int err = errno;
307 DEBUG(3, ("Could not create socket: %s\n", strerror(err)));
308 return err;
311 addr.sun_family = AF_UNIX;
313 namelen = strlcpy(addr.sun_path, sockname, sizeof(addr.sun_path));
314 if (namelen >= sizeof(addr.sun_path)) {
315 DEBUG(3, ("%s: Socket name too long: %s\n", __func__,
316 sockname));
317 close(fd);
318 return ENAMETOOLONG;
321 salen = sizeof(struct sockaddr_un);
323 if (connect(fd, (struct sockaddr *)(void *)&addr, salen) == -1) {
324 int err = errno;
325 DEBUG(1, ("connect(%s) failed: %s\n", sockname,
326 strerror(err)));
327 close(fd);
328 return err;
331 *pfd = fd;
332 return 0;
335 static int ctdb_read_packet(int fd, int timeout, TALLOC_CTX *mem_ctx,
336 struct ctdb_req_header **result)
338 struct ctdb_req_header *req;
339 uint32_t msglen;
340 ssize_t nread;
342 if (timeout != -1) {
343 struct pollfd pfd = { .fd = fd, .events = POLLIN };
344 int ret;
346 ret = sys_poll_intr(&pfd, 1, timeout);
347 if (ret == -1) {
348 return errno;
350 if (ret == 0) {
351 return ETIMEDOUT;
353 if (ret != 1) {
354 return EIO;
358 nread = read_data(fd, &msglen, sizeof(msglen));
359 if (nread == -1) {
360 return errno;
362 if (nread == 0) {
363 return EIO;
366 if (msglen < sizeof(struct ctdb_req_header)) {
367 return EIO;
370 req = talloc_size(mem_ctx, msglen);
371 if (req == NULL) {
372 return ENOMEM;
374 talloc_set_name_const(req, "struct ctdb_req_header");
376 req->length = msglen;
378 nread = read_data(fd, ((char *)req) + sizeof(msglen),
379 msglen - sizeof(msglen));
380 if (nread == -1) {
381 TALLOC_FREE(req);
382 return errno;
384 if (nread == 0) {
385 TALLOC_FREE(req);
386 return EIO;
389 *result = req;
390 return 0;
394 * Read a full ctdbd request. If we have a messaging context, defer incoming
395 * messages that might come in between.
398 static int ctdb_read_req(struct ctdbd_connection *conn, uint32_t reqid,
399 TALLOC_CTX *mem_ctx, struct ctdb_req_header **result)
401 struct ctdb_req_header *hdr;
402 int ret;
404 next_pkt:
406 ret = ctdb_read_packet(conn->fd, conn->timeout, mem_ctx, &hdr);
407 if (ret != 0) {
408 DEBUG(0, ("ctdb_read_packet failed: %s\n", strerror(ret)));
409 cluster_fatal("ctdbd died\n");
412 DEBUG(11, ("Received ctdb packet\n"));
413 ctdb_packet_dump(hdr);
415 if (hdr->operation == CTDB_REQ_MESSAGE) {
416 struct ctdb_req_message_old *msg = (struct ctdb_req_message_old *)hdr;
418 ret = ctdbd_msg_call_back(NULL, conn, msg);
419 if (ret != 0) {
420 TALLOC_FREE(hdr);
421 return ret;
424 TALLOC_FREE(hdr);
425 goto next_pkt;
428 if ((reqid != 0) && (hdr->reqid != reqid)) {
429 /* we got the wrong reply */
430 DEBUG(0,("Discarding mismatched ctdb reqid %u should have "
431 "been %u\n", hdr->reqid, reqid));
432 TALLOC_FREE(hdr);
433 goto next_pkt;
436 *result = talloc_move(mem_ctx, &hdr);
438 return 0;
442 * This prepares conn for handling async requests
444 int ctdbd_setup_fde(struct ctdbd_connection *conn, struct tevent_context *ev)
446 int ret;
448 ret = set_blocking(conn->fd, false);
449 if (ret == -1) {
450 return errno;
453 conn->fde = tevent_add_fd(ev,
454 conn,
455 conn->fd,
456 TEVENT_FD_READ,
457 ctdbd_async_socket_handler,
458 conn);
459 if (conn->fde == NULL) {
460 return ENOMEM;
463 return 0;
466 static int ctdbd_connection_destructor(struct ctdbd_connection *c);
469 * Get us a ctdbd connection
472 static int ctdbd_init_connection_internal(TALLOC_CTX *mem_ctx,
473 const char *sockname, int timeout,
474 struct ctdbd_connection *conn)
476 int ret;
478 conn->timeout = timeout;
479 if (conn->timeout == 0) {
480 conn->timeout = -1;
483 ret = ctdbd_connect(sockname, &conn->fd);
484 if (ret != 0) {
485 DEBUG(1, ("ctdbd_connect failed: %s\n", strerror(ret)));
486 return ret;
488 talloc_set_destructor(conn, ctdbd_connection_destructor);
490 ret = get_cluster_vnn(conn, &conn->our_vnn);
491 if (ret != 0) {
492 DEBUG(10, ("get_cluster_vnn failed: %s\n", strerror(ret)));
493 return ret;
496 if (!ctdbd_working(conn, conn->our_vnn)) {
497 DEBUG(2, ("Node is not working, can not connect\n"));
498 return EIO;
501 generate_random_buffer((unsigned char *)&conn->rand_srvid,
502 sizeof(conn->rand_srvid));
504 ret = register_with_ctdbd(conn, conn->rand_srvid, NULL, NULL);
505 if (ret != 0) {
506 DEBUG(5, ("Could not register random srvid: %s\n",
507 strerror(ret)));
508 return ret;
511 return 0;
514 int ctdbd_init_connection(TALLOC_CTX *mem_ctx,
515 const char *sockname, int timeout,
516 struct ctdbd_connection **pconn)
518 struct ctdbd_connection *conn;
519 int ret;
521 if (!(conn = talloc_zero(mem_ctx, struct ctdbd_connection))) {
522 DEBUG(0, ("talloc failed\n"));
523 return ENOMEM;
526 ret = ctdbd_init_connection_internal(mem_ctx,
527 sockname,
528 timeout,
529 conn);
530 if (ret != 0) {
531 DBG_ERR("ctdbd_init_connection_internal failed (%s)\n",
532 strerror(ret));
533 goto fail;
536 *pconn = conn;
537 return 0;
539 fail:
540 TALLOC_FREE(conn);
541 return ret;
544 int ctdbd_reinit_connection(TALLOC_CTX *mem_ctx,
545 const char *sockname, int timeout,
546 struct ctdbd_connection *conn)
548 int ret;
550 ret = ctdbd_connection_destructor(conn);
551 if (ret != 0) {
552 DBG_ERR("ctdbd_connection_destructor failed\n");
553 return ret;
556 ret = ctdbd_init_connection_internal(mem_ctx,
557 sockname,
558 timeout,
559 conn);
560 if (ret != 0) {
561 DBG_ERR("ctdbd_init_connection_internal failed (%s)\n",
562 strerror(ret));
563 return ret;
566 return 0;
569 int ctdbd_conn_get_fd(struct ctdbd_connection *conn)
571 return conn->fd;
575 * Packet handler to receive and handle a ctdb message
577 static int ctdb_handle_message(struct tevent_context *ev,
578 struct ctdbd_connection *conn,
579 struct ctdb_req_header *hdr)
581 struct ctdb_req_message_old *msg;
583 if (hdr->operation != CTDB_REQ_MESSAGE) {
584 DEBUG(0, ("Received async msg of type %u, discarding\n",
585 hdr->operation));
586 return EINVAL;
589 msg = (struct ctdb_req_message_old *)hdr;
591 ctdbd_msg_call_back(ev, conn, msg);
593 return 0;
596 void ctdbd_socket_readable(struct tevent_context *ev,
597 struct ctdbd_connection *conn)
599 struct ctdb_req_header *hdr = NULL;
600 int ret;
602 ret = ctdb_read_packet(conn->fd, conn->timeout, talloc_tos(), &hdr);
603 if (ret != 0) {
604 DEBUG(0, ("ctdb_read_packet failed: %s\n", strerror(ret)));
605 cluster_fatal("ctdbd died\n");
608 ret = ctdb_handle_message(ev, conn, hdr);
610 TALLOC_FREE(hdr);
612 if (ret != 0) {
613 DEBUG(10, ("could not handle incoming message: %s\n",
614 strerror(ret)));
618 static int ctdb_pkt_send_handler(struct ctdbd_connection *conn);
619 static int ctdb_pkt_recv_handler(struct ctdbd_connection *conn);
621 /* Used for async connection and async ctcb requests */
622 static void ctdbd_async_socket_handler(struct tevent_context *ev,
623 struct tevent_fd *fde,
624 uint16_t flags,
625 void *private_data)
627 struct ctdbd_connection *conn = talloc_get_type_abort(
628 private_data, struct ctdbd_connection);
629 int ret;
631 if ((flags & TEVENT_FD_READ) != 0) {
632 ret = ctdb_pkt_recv_handler(conn);
633 if (ret != 0) {
634 DBG_DEBUG("ctdb_read_iov_handler returned %s\n",
635 strerror(ret));
637 return;
640 if ((flags & TEVENT_FD_WRITE) != 0) {
641 ret = ctdb_pkt_send_handler(conn);
642 if (ret != 0) {
643 DBG_DEBUG("ctdb_write_iov_handler returned %s\n",
644 strerror(ret));
645 return;
647 return;
650 return;
653 int ctdbd_messaging_send_iov(struct ctdbd_connection *conn,
654 uint32_t dst_vnn, uint64_t dst_srvid,
655 const struct iovec *iov, int iovlen)
657 struct ctdb_req_message_old r;
658 struct iovec iov2[iovlen+1];
659 size_t buflen = iov_buflen(iov, iovlen);
660 ssize_t nwritten;
662 r.hdr.length = offsetof(struct ctdb_req_message_old, data) + buflen;
663 r.hdr.ctdb_magic = CTDB_MAGIC;
664 r.hdr.ctdb_version = CTDB_PROTOCOL;
665 r.hdr.generation = 1;
666 r.hdr.operation = CTDB_REQ_MESSAGE;
667 r.hdr.destnode = dst_vnn;
668 r.hdr.srcnode = conn->our_vnn;
669 r.hdr.reqid = 0;
670 r.srvid = dst_srvid;
671 r.datalen = buflen;
673 DEBUG(10, ("ctdbd_messaging_send: Sending ctdb packet\n"));
674 ctdb_packet_dump(&r.hdr);
676 iov2[0].iov_base = &r;
677 iov2[0].iov_len = offsetof(struct ctdb_req_message_old, data);
678 memcpy(&iov2[1], iov, iovlen * sizeof(struct iovec));
680 nwritten = write_data_iov(conn->fd, iov2, iovlen+1);
681 if (nwritten == -1) {
682 DEBUG(3, ("write_data_iov failed: %s\n", strerror(errno)));
683 cluster_fatal("cluster dispatch daemon msg write error\n");
686 return 0;
690 * send/recv a generic ctdb control message
692 static int ctdbd_control(struct ctdbd_connection *conn,
693 uint32_t vnn, uint32_t opcode,
694 uint64_t srvid, uint32_t flags,
695 TDB_DATA data,
696 TALLOC_CTX *mem_ctx, TDB_DATA *outdata,
697 int32_t *cstatus)
699 struct ctdb_req_control_old req;
700 struct ctdb_req_header *hdr;
701 struct ctdb_reply_control_old *reply = NULL;
702 struct iovec iov[2];
703 ssize_t nwritten;
704 int ret;
706 if (ctdbd_conn_has_async_reqs(conn)) {
708 * Can't use sync call while an async call is in flight. Adding
709 * this check as a safety net. We'll be using different
710 * connections for sync and async requests, so this shouldn't
711 * happen, but who knows...
713 DBG_ERR("Async ctdb req on sync connection\n");
714 return EINVAL;
717 ZERO_STRUCT(req);
718 req.hdr.length = offsetof(struct ctdb_req_control_old, data) + data.dsize;
719 req.hdr.ctdb_magic = CTDB_MAGIC;
720 req.hdr.ctdb_version = CTDB_PROTOCOL;
721 req.hdr.operation = CTDB_REQ_CONTROL;
722 req.hdr.reqid = ctdbd_next_reqid(conn);
723 req.hdr.destnode = vnn;
724 req.opcode = opcode;
725 req.srvid = srvid;
726 req.datalen = data.dsize;
727 req.flags = flags;
729 DBG_DEBUG("Sending ctdb packet reqid=%"PRIu32", vnn=%"PRIu32", "
730 "opcode=%"PRIu32", srvid=%"PRIu64"\n", req.hdr.reqid,
731 req.hdr.destnode, req.opcode, req.srvid);
732 ctdb_packet_dump(&req.hdr);
734 iov[0].iov_base = &req;
735 iov[0].iov_len = offsetof(struct ctdb_req_control_old, data);
736 iov[1].iov_base = data.dptr;
737 iov[1].iov_len = data.dsize;
739 nwritten = write_data_iov(conn->fd, iov, ARRAY_SIZE(iov));
740 if (nwritten == -1) {
741 DEBUG(3, ("write_data_iov failed: %s\n", strerror(errno)));
742 cluster_fatal("cluster dispatch daemon msg write error\n");
745 if (flags & CTDB_CTRL_FLAG_NOREPLY) {
746 if (cstatus) {
747 *cstatus = 0;
749 return 0;
752 ret = ctdb_read_req(conn, req.hdr.reqid, NULL, &hdr);
753 if (ret != 0) {
754 DEBUG(10, ("ctdb_read_req failed: %s\n", strerror(ret)));
755 return ret;
758 if (hdr->operation != CTDB_REPLY_CONTROL) {
759 DEBUG(0, ("received invalid reply\n"));
760 TALLOC_FREE(hdr);
761 return EIO;
763 reply = (struct ctdb_reply_control_old *)hdr;
765 if (outdata) {
766 if (!(outdata->dptr = (uint8_t *)talloc_memdup(
767 mem_ctx, reply->data, reply->datalen))) {
768 TALLOC_FREE(reply);
769 return ENOMEM;
771 outdata->dsize = reply->datalen;
773 if (cstatus) {
774 (*cstatus) = reply->status;
777 TALLOC_FREE(reply);
778 return ret;
782 * see if a remote process exists
784 bool ctdbd_process_exists(struct ctdbd_connection *conn, uint32_t vnn,
785 pid_t pid, uint64_t unique_id)
787 uint8_t buf[sizeof(pid)+sizeof(unique_id)];
788 int32_t cstatus = 0;
789 int ret;
791 if (unique_id == SERVERID_UNIQUE_ID_NOT_TO_VERIFY) {
792 ret = ctdbd_control(conn, vnn, CTDB_CONTROL_PROCESS_EXISTS,
793 0, 0,
794 (TDB_DATA) { .dptr = (uint8_t *)&pid,
795 .dsize = sizeof(pid) },
796 NULL, NULL, &cstatus);
797 if (ret != 0) {
798 return false;
800 return (cstatus == 0);
803 memcpy(buf, &pid, sizeof(pid));
804 memcpy(buf+sizeof(pid), &unique_id, sizeof(unique_id));
806 ret = ctdbd_control(conn, vnn, CTDB_CONTROL_CHECK_PID_SRVID, 0, 0,
807 (TDB_DATA) { .dptr = buf, .dsize = sizeof(buf) },
808 NULL, NULL, &cstatus);
809 if (ret != 0) {
810 return false;
812 return (cstatus == 0);
816 * Get a db path
818 char *ctdbd_dbpath(struct ctdbd_connection *conn,
819 TALLOC_CTX *mem_ctx, uint32_t db_id)
821 int ret;
822 TDB_DATA data;
823 TDB_DATA rdata = {0};
824 int32_t cstatus = 0;
826 data.dptr = (uint8_t*)&db_id;
827 data.dsize = sizeof(db_id);
829 ret = ctdbd_control_local(conn, CTDB_CONTROL_GETDBPATH, 0, 0, data,
830 mem_ctx, &rdata, &cstatus);
831 if ((ret != 0) || cstatus != 0) {
832 DEBUG(0, (__location__ " ctdb_control for getdbpath failed: %s\n",
833 strerror(ret)));
834 return NULL;
837 return (char *)rdata.dptr;
841 * attach to a ctdb database
843 int ctdbd_db_attach(struct ctdbd_connection *conn,
844 const char *name, uint32_t *db_id, bool persistent)
846 int ret;
847 TDB_DATA data;
848 int32_t cstatus;
850 data = string_term_tdb_data(name);
852 ret = ctdbd_control_local(conn,
853 persistent
854 ? CTDB_CONTROL_DB_ATTACH_PERSISTENT
855 : CTDB_CONTROL_DB_ATTACH,
856 0, 0, data, NULL, &data, &cstatus);
857 if (ret != 0) {
858 DEBUG(0, (__location__ " ctdb_control for db_attach "
859 "failed: %s\n", strerror(ret)));
860 return ret;
863 if (cstatus != 0 || data.dsize != sizeof(uint32_t)) {
864 DEBUG(0,(__location__ " ctdb_control for db_attach failed\n"));
865 return EIO;
868 *db_id = *(uint32_t *)data.dptr;
869 talloc_free(data.dptr);
871 return 0;
875 * force the migration of a record to this node
877 int ctdbd_migrate(struct ctdbd_connection *conn, uint32_t db_id, TDB_DATA key)
879 struct ctdb_req_call_old req;
880 struct ctdb_req_header *hdr = NULL;
881 struct iovec iov[2];
882 ssize_t nwritten;
883 int ret;
885 if (ctdbd_conn_has_async_reqs(conn)) {
887 * Can't use sync call while an async call is in flight. Adding
888 * this check as a safety net. We'll be using different
889 * connections for sync and async requests, so this shouldn't
890 * happen, but who knows...
892 DBG_ERR("Async ctdb req on sync connection\n");
893 return EINVAL;
896 ZERO_STRUCT(req);
898 req.hdr.length = offsetof(struct ctdb_req_call_old, data) + key.dsize;
899 req.hdr.ctdb_magic = CTDB_MAGIC;
900 req.hdr.ctdb_version = CTDB_PROTOCOL;
901 req.hdr.operation = CTDB_REQ_CALL;
902 req.hdr.reqid = ctdbd_next_reqid(conn);
903 req.flags = CTDB_IMMEDIATE_MIGRATION;
904 req.callid = CTDB_NULL_FUNC;
905 req.db_id = db_id;
906 req.keylen = key.dsize;
908 DEBUG(10, ("ctdbd_migrate: Sending ctdb packet\n"));
909 ctdb_packet_dump(&req.hdr);
911 iov[0].iov_base = &req;
912 iov[0].iov_len = offsetof(struct ctdb_req_call_old, data);
913 iov[1].iov_base = key.dptr;
914 iov[1].iov_len = key.dsize;
916 nwritten = write_data_iov(conn->fd, iov, ARRAY_SIZE(iov));
917 if (nwritten == -1) {
918 DEBUG(3, ("write_data_iov failed: %s\n", strerror(errno)));
919 cluster_fatal("cluster dispatch daemon msg write error\n");
922 ret = ctdb_read_req(conn, req.hdr.reqid, NULL, &hdr);
923 if (ret != 0) {
924 DEBUG(10, ("ctdb_read_req failed: %s\n", strerror(ret)));
925 goto fail;
928 if (hdr->operation != CTDB_REPLY_CALL) {
929 if (hdr->operation == CTDB_REPLY_ERROR) {
930 DBG_ERR("received error from ctdb\n");
931 } else {
932 DBG_ERR("received invalid reply\n");
934 ret = EIO;
935 goto fail;
938 fail:
940 TALLOC_FREE(hdr);
941 return ret;
945 * Fetch a record and parse it
947 int ctdbd_parse(struct ctdbd_connection *conn, uint32_t db_id,
948 TDB_DATA key, bool local_copy,
949 void (*parser)(TDB_DATA key, TDB_DATA data,
950 void *private_data),
951 void *private_data)
953 struct ctdb_req_call_old req;
954 struct ctdb_req_header *hdr = NULL;
955 struct ctdb_reply_call_old *reply;
956 struct iovec iov[2];
957 ssize_t nwritten;
958 uint32_t flags;
959 int ret;
961 if (ctdbd_conn_has_async_reqs(conn)) {
963 * Can't use sync call while an async call is in flight. Adding
964 * this check as a safety net. We'll be using different
965 * connections for sync and async requests, so this shouldn't
966 * happen, but who knows...
968 DBG_ERR("Async ctdb req on sync connection\n");
969 return EINVAL;
972 flags = local_copy ? CTDB_WANT_READONLY : 0;
974 ZERO_STRUCT(req);
976 req.hdr.length = offsetof(struct ctdb_req_call_old, data) + key.dsize;
977 req.hdr.ctdb_magic = CTDB_MAGIC;
978 req.hdr.ctdb_version = CTDB_PROTOCOL;
979 req.hdr.operation = CTDB_REQ_CALL;
980 req.hdr.reqid = ctdbd_next_reqid(conn);
981 req.flags = flags;
982 req.callid = CTDB_FETCH_FUNC;
983 req.db_id = db_id;
984 req.keylen = key.dsize;
986 iov[0].iov_base = &req;
987 iov[0].iov_len = offsetof(struct ctdb_req_call_old, data);
988 iov[1].iov_base = key.dptr;
989 iov[1].iov_len = key.dsize;
991 nwritten = write_data_iov(conn->fd, iov, ARRAY_SIZE(iov));
992 if (nwritten == -1) {
993 DEBUG(3, ("write_data_iov failed: %s\n", strerror(errno)));
994 cluster_fatal("cluster dispatch daemon msg write error\n");
997 ret = ctdb_read_req(conn, req.hdr.reqid, NULL, &hdr);
998 if (ret != 0) {
999 DEBUG(10, ("ctdb_read_req failed: %s\n", strerror(ret)));
1000 goto fail;
1003 if ((hdr == NULL) || (hdr->operation != CTDB_REPLY_CALL)) {
1004 DEBUG(0, ("received invalid reply\n"));
1005 ret = EIO;
1006 goto fail;
1008 reply = (struct ctdb_reply_call_old *)hdr;
1010 if (reply->datalen == 0) {
1012 * Treat an empty record as non-existing
1014 ret = ENOENT;
1015 goto fail;
1018 parser(key, make_tdb_data(&reply->data[0], reply->datalen),
1019 private_data);
1021 ret = 0;
1022 fail:
1023 TALLOC_FREE(hdr);
1024 return ret;
1028 Traverse a ctdb database. "conn" must be an otherwise unused
1029 ctdb_connection where no other messages but the traverse ones are
1030 expected.
1033 int ctdbd_traverse(struct ctdbd_connection *conn, uint32_t db_id,
1034 void (*fn)(TDB_DATA key, TDB_DATA data,
1035 void *private_data),
1036 void *private_data)
1038 int ret;
1039 TDB_DATA key, data;
1040 struct ctdb_traverse_start t;
1041 int32_t cstatus;
1043 if (ctdbd_conn_has_async_reqs(conn)) {
1045 * Can't use sync call while an async call is in flight. Adding
1046 * this check as a safety net. We'll be using different
1047 * connections for sync and async requests, so this shouldn't
1048 * happen, but who knows...
1050 DBG_ERR("Async ctdb req on sync connection\n");
1051 return EINVAL;
1054 t.db_id = db_id;
1055 t.srvid = conn->rand_srvid;
1056 t.reqid = ctdbd_next_reqid(conn);
1058 data.dptr = (uint8_t *)&t;
1059 data.dsize = sizeof(t);
1061 ret = ctdbd_control_local(conn, CTDB_CONTROL_TRAVERSE_START,
1062 conn->rand_srvid,
1063 0, data, NULL, NULL, &cstatus);
1065 if ((ret != 0) || (cstatus != 0)) {
1066 DEBUG(0,("ctdbd_control failed: %s, %d\n", strerror(ret),
1067 cstatus));
1069 if (ret == 0) {
1071 * We need a mapping here
1073 ret = EIO;
1075 return ret;
1078 while (true) {
1079 struct ctdb_req_header *hdr = NULL;
1080 struct ctdb_req_message_old *m;
1081 struct ctdb_rec_data_old *d;
1083 ret = ctdb_read_packet(conn->fd, conn->timeout, conn, &hdr);
1084 if (ret != 0) {
1085 DEBUG(0, ("ctdb_read_packet failed: %s\n",
1086 strerror(ret)));
1087 cluster_fatal("ctdbd died\n");
1090 if (hdr->operation != CTDB_REQ_MESSAGE) {
1091 DEBUG(0, ("Got operation %u, expected a message\n",
1092 (unsigned)hdr->operation));
1093 return EIO;
1096 m = (struct ctdb_req_message_old *)hdr;
1097 d = (struct ctdb_rec_data_old *)&m->data[0];
1098 if (m->datalen < sizeof(uint32_t) || m->datalen != d->length) {
1099 DEBUG(0, ("Got invalid traverse data of length %d\n",
1100 (int)m->datalen));
1101 return EIO;
1104 key.dsize = d->keylen;
1105 key.dptr = &d->data[0];
1106 data.dsize = d->datalen;
1107 data.dptr = &d->data[d->keylen];
1109 if (key.dsize == 0 && data.dsize == 0) {
1110 /* end of traverse */
1111 return 0;
1114 if (data.dsize < sizeof(struct ctdb_ltdb_header)) {
1115 DEBUG(0, ("Got invalid ltdb header length %d\n",
1116 (int)data.dsize));
1117 return EIO;
1119 data.dsize -= sizeof(struct ctdb_ltdb_header);
1120 data.dptr += sizeof(struct ctdb_ltdb_header);
1122 if (fn != NULL) {
1123 fn(key, data, private_data);
1126 return 0;
1130 This is used to canonicalize a ctdb_sock_addr structure.
1132 static void smbd_ctdb_canonicalize_ip(const struct sockaddr_storage *in,
1133 struct sockaddr_storage *out)
1135 memcpy(out, in, sizeof (*out));
1137 #ifdef HAVE_IPV6
1138 if (in->ss_family == AF_INET6) {
1139 const char prefix[12] = { 0,0,0,0,0,0,0,0,0,0,0xff,0xff };
1140 const struct sockaddr_in6 *in6 =
1141 (const struct sockaddr_in6 *)in;
1142 struct sockaddr_in *out4 = (struct sockaddr_in *)out;
1143 if (memcmp(&in6->sin6_addr, prefix, 12) == 0) {
1144 memset(out, 0, sizeof(*out));
1145 #ifdef HAVE_SOCK_SIN_LEN
1146 out4->sin_len = sizeof(*out);
1147 #endif
1148 out4->sin_family = AF_INET;
1149 out4->sin_port = in6->sin6_port;
1150 memcpy(&out4->sin_addr, &in6->sin6_addr.s6_addr[12], 4);
1153 #endif
1157 * Register us as a server for a particular tcp connection
1160 int ctdbd_register_ips(struct ctdbd_connection *conn,
1161 const struct sockaddr_storage *_server,
1162 const struct sockaddr_storage *_client,
1163 int (*cb)(struct tevent_context *ev,
1164 uint32_t src_vnn, uint32_t dst_vnn,
1165 uint64_t dst_srvid,
1166 const uint8_t *msg, size_t msglen,
1167 void *private_data),
1168 void *private_data)
1170 struct ctdb_connection p;
1171 TDB_DATA data = { .dptr = (uint8_t *)&p, .dsize = sizeof(p) };
1172 int ret;
1173 struct sockaddr_storage client;
1174 struct sockaddr_storage server;
1177 * Only one connection so far
1180 smbd_ctdb_canonicalize_ip(_client, &client);
1181 smbd_ctdb_canonicalize_ip(_server, &server);
1183 switch (client.ss_family) {
1184 case AF_INET:
1185 memcpy(&p.dst.ip, &server, sizeof(p.dst.ip));
1186 memcpy(&p.src.ip, &client, sizeof(p.src.ip));
1187 break;
1188 case AF_INET6:
1189 memcpy(&p.dst.ip6, &server, sizeof(p.dst.ip6));
1190 memcpy(&p.src.ip6, &client, sizeof(p.src.ip6));
1191 break;
1192 default:
1193 return EIO;
1197 * We want to be told about IP releases
1200 ret = register_with_ctdbd(conn, CTDB_SRVID_RELEASE_IP,
1201 cb, private_data);
1202 if (ret != 0) {
1203 return ret;
1207 * inform ctdb of our tcp connection, so if IP takeover happens ctdb
1208 * can send an extra ack to trigger a reset for our client, so it
1209 * immediately reconnects
1211 ret = ctdbd_control(conn, CTDB_CURRENT_NODE,
1212 CTDB_CONTROL_TCP_CLIENT, 0,
1213 CTDB_CTRL_FLAG_NOREPLY, data, NULL, NULL,
1214 NULL);
1215 if (ret != 0) {
1216 return ret;
1218 return 0;
1222 call a control on the local node
1224 int ctdbd_control_local(struct ctdbd_connection *conn, uint32_t opcode,
1225 uint64_t srvid, uint32_t flags, TDB_DATA data,
1226 TALLOC_CTX *mem_ctx, TDB_DATA *outdata,
1227 int32_t *cstatus)
1229 return ctdbd_control(conn, CTDB_CURRENT_NODE, opcode, srvid, flags, data,
1230 mem_ctx, outdata, cstatus);
1233 int ctdb_watch_us(struct ctdbd_connection *conn)
1235 struct ctdb_notify_data_old reg_data;
1236 size_t struct_len;
1237 int ret;
1238 int32_t cstatus;
1240 reg_data.srvid = CTDB_SRVID_SAMBA_NOTIFY;
1241 reg_data.len = 1;
1242 reg_data.notify_data[0] = 0;
1244 struct_len = offsetof(struct ctdb_notify_data_old,
1245 notify_data) + reg_data.len;
1247 ret = ctdbd_control_local(
1248 conn, CTDB_CONTROL_REGISTER_NOTIFY, conn->rand_srvid, 0,
1249 make_tdb_data((uint8_t *)&reg_data, struct_len),
1250 NULL, NULL, &cstatus);
1251 if (ret != 0) {
1252 DEBUG(1, ("ctdbd_control_local failed: %s\n",
1253 strerror(ret)));
1255 return ret;
1258 int ctdb_unwatch(struct ctdbd_connection *conn)
1260 uint64_t srvid = CTDB_SRVID_SAMBA_NOTIFY;
1261 int ret;
1262 int32_t cstatus;
1264 ret = ctdbd_control_local(
1265 conn, CTDB_CONTROL_DEREGISTER_NOTIFY, conn->rand_srvid, 0,
1266 make_tdb_data((uint8_t *)&srvid, sizeof(srvid)),
1267 NULL, NULL, &cstatus);
1268 if (ret != 0) {
1269 DEBUG(1, ("ctdbd_control_local failed: %s\n",
1270 strerror(ret)));
1272 return ret;
1275 int ctdbd_probe(const char *sockname, int timeout)
1278 * Do a very early check if ctdbd is around to avoid an abort and core
1279 * later
1281 struct ctdbd_connection *conn = NULL;
1282 int ret;
1284 ret = ctdbd_init_connection(talloc_tos(), sockname, timeout,
1285 &conn);
1288 * We only care if we can connect.
1290 TALLOC_FREE(conn);
1292 return ret;
1295 struct ctdb_pkt_send_state {
1296 struct ctdb_pkt_send_state *prev, *next;
1297 struct tevent_context *ev;
1298 struct ctdbd_connection *conn;
1300 /* ctdb request id */
1301 uint32_t reqid;
1303 /* the associated tevent request */
1304 struct tevent_req *req;
1306 /* iovec array with data to send */
1307 struct iovec _iov;
1308 struct iovec *iov;
1309 int iovcnt;
1311 /* Initial packet length */
1312 size_t packet_len;
1315 static void ctdb_pkt_send_cleanup(struct tevent_req *req,
1316 enum tevent_req_state req_state);
1319 * Asynchronously send a ctdb packet given as iovec array
1321 * Note: the passed iov array is not const here. Similar
1322 * functions in samba take a const array and create a copy
1323 * before calling iov_advance() on the array.
1325 * This function will modify the iov array! But
1326 * this is a static function and our only caller
1327 * ctdb_parse_send/recv is preparared for this to
1328 * happen!
1330 static struct tevent_req *ctdb_pkt_send_send(TALLOC_CTX *mem_ctx,
1331 struct tevent_context *ev,
1332 struct ctdbd_connection *conn,
1333 uint32_t reqid,
1334 struct iovec *iov,
1335 int iovcnt,
1336 enum dbwrap_req_state *req_state)
1338 struct tevent_req *req = NULL;
1339 struct ctdb_pkt_send_state *state = NULL;
1340 ssize_t nwritten;
1341 bool ok;
1343 DBG_DEBUG("sending async ctdb reqid [%" PRIu32 "]\n", reqid);
1345 req = tevent_req_create(mem_ctx, &state, struct ctdb_pkt_send_state);
1346 if (req == NULL) {
1347 return NULL;
1350 *state = (struct ctdb_pkt_send_state) {
1351 .ev = ev,
1352 .conn = conn,
1353 .req = req,
1354 .reqid = reqid,
1355 .iov = iov,
1356 .iovcnt = iovcnt,
1357 .packet_len = iov_buflen(iov, iovcnt),
1360 tevent_req_set_cleanup_fn(req, ctdb_pkt_send_cleanup);
1362 *req_state = DBWRAP_REQ_QUEUED;
1364 if (ctdbd_conn_has_async_sends(conn)) {
1366 * Can't attempt direct write with messages already queued and
1367 * possibly in progress
1369 DLIST_ADD_END(conn->send_list, state);
1370 return req;
1374 * Attempt a direct write. If this returns short, schedule the
1375 * remaining data as an async write, otherwise we're already done.
1378 nwritten = writev(conn->fd, state->iov, state->iovcnt);
1379 if (nwritten == state->packet_len) {
1380 DBG_DEBUG("Finished sending reqid [%" PRIu32 "]\n", reqid);
1382 *req_state = DBWRAP_REQ_DISPATCHED;
1383 tevent_req_done(req);
1384 return tevent_req_post(req, ev);
1387 if (nwritten == -1) {
1388 if (errno != EINTR && errno != EAGAIN && errno != EWOULDBLOCK) {
1389 cluster_fatal("cluster write error\n");
1391 nwritten = 0;
1394 DBG_DEBUG("Posting async write of reqid [%" PRIu32"]"
1395 "after short write [%zd]\n", reqid, nwritten);
1397 ok = iov_advance(&state->iov, &state->iovcnt, nwritten);
1398 if (!ok) {
1399 *req_state = DBWRAP_REQ_ERROR;
1400 tevent_req_error(req, EIO);
1401 return tevent_req_post(req, ev);
1405 * As this is the first async write req we post, we must enable
1406 * fd-writable events.
1408 TEVENT_FD_WRITEABLE(conn->fde);
1409 DLIST_ADD_END(conn->send_list, state);
1410 return req;
1413 static int ctdb_pkt_send_state_destructor(struct ctdb_pkt_send_state *state)
1415 struct ctdbd_connection *conn = state->conn;
1417 if (conn == NULL) {
1418 return 0;
1421 if (state->req == NULL) {
1422 DBG_DEBUG("Removing cancelled reqid [%" PRIu32"]\n",
1423 state->reqid);
1424 state->conn = NULL;
1425 DLIST_REMOVE(conn->send_list, state);
1426 return 0;
1429 DBG_DEBUG("Reparenting cancelled reqid [%" PRIu32"]\n",
1430 state->reqid);
1432 talloc_reparent(state->req, conn, state);
1433 state->req = NULL;
1434 return -1;
1437 static void ctdb_pkt_send_cleanup(struct tevent_req *req,
1438 enum tevent_req_state req_state)
1440 struct ctdb_pkt_send_state *state = tevent_req_data(
1441 req, struct ctdb_pkt_send_state);
1442 struct ctdbd_connection *conn = state->conn;
1443 size_t missing_len = 0;
1445 if (conn == NULL) {
1446 return;
1449 missing_len = iov_buflen(state->iov, state->iovcnt);
1450 if (state->packet_len == missing_len) {
1452 * We haven't yet started sending this one, so we can just
1453 * remove it from the pending list
1455 missing_len = 0;
1457 if (missing_len != 0) {
1458 uint8_t *buf = NULL;
1460 if (req_state != TEVENT_REQ_RECEIVED) {
1462 * Wait til the req_state is TEVENT_REQ_RECEIVED, as
1463 * that will be the final state when the request state
1464 * is talloc_free'd from tallloc_req_received(). Which
1465 * ensures we only run the following code *ONCE*!
1467 return;
1470 DBG_DEBUG("Cancelling in-flight reqid [%" PRIu32"]\n",
1471 state->reqid);
1473 * A request in progress of being sent. Reparent the iov buffer
1474 * so we can continue sending the request. See also the comment
1475 * in ctdbd_parse_send() when copying the key buffer.
1478 buf = iov_concat(state, state->iov, state->iovcnt);
1479 if (buf == NULL) {
1480 cluster_fatal("iov_concat error\n");
1481 return;
1484 state->iovcnt = 1;
1485 state->_iov.iov_base = buf;
1486 state->_iov.iov_len = missing_len;
1487 state->iov = &state->_iov;
1489 talloc_set_destructor(state, ctdb_pkt_send_state_destructor);
1490 return;
1493 DBG_DEBUG("Removing pending reqid [%" PRIu32"]\n", state->reqid);
1495 state->conn = NULL;
1496 DLIST_REMOVE(conn->send_list, state);
1498 if (!ctdbd_conn_has_async_sends(conn)) {
1499 DBG_DEBUG("No more sends, disabling fd-writable events\n");
1500 TEVENT_FD_NOT_WRITEABLE(conn->fde);
1504 static int ctdb_pkt_send_handler(struct ctdbd_connection *conn)
1506 struct ctdb_pkt_send_state *state = NULL;
1507 ssize_t nwritten;
1508 ssize_t iovlen;
1509 bool ok;
1511 DBG_DEBUG("send handler\n");
1513 if (!ctdbd_conn_has_async_sends(conn)) {
1514 DBG_WARNING("Writable fd-event without pending send\n");
1515 TEVENT_FD_NOT_WRITEABLE(conn->fde);
1516 return 0;
1519 state = conn->send_list;
1520 iovlen = iov_buflen(state->iov, state->iovcnt);
1522 nwritten = writev(conn->fd, state->iov, state->iovcnt);
1523 if (nwritten == -1) {
1524 if (errno != EINTR && errno != EAGAIN && errno != EWOULDBLOCK) {
1525 DBG_ERR("writev failed: %s\n", strerror(errno));
1526 cluster_fatal("cluster write error\n");
1528 DBG_DEBUG("recoverable writev error, retry\n");
1529 return 0;
1532 if (nwritten < iovlen) {
1533 DBG_DEBUG("short write\n");
1535 ok = iov_advance(&state->iov, &state->iovcnt, nwritten);
1536 if (!ok) {
1537 DBG_ERR("iov_advance failed\n");
1538 if (state->req == NULL) {
1539 TALLOC_FREE(state);
1540 return 0;
1542 tevent_req_error(state->req, EIO);
1543 return 0;
1545 return 0;
1548 if (state->req == NULL) {
1549 DBG_DEBUG("Finished sending cancelled reqid [%" PRIu32 "]\n",
1550 state->reqid);
1551 TALLOC_FREE(state);
1552 return 0;
1555 DBG_DEBUG("Finished send request id [%" PRIu32 "]\n", state->reqid);
1557 tevent_req_done(state->req);
1558 return 0;
1561 static int ctdb_pkt_send_recv(struct tevent_req *req)
1563 int ret;
1565 if (tevent_req_is_unix_error(req, &ret)) {
1566 tevent_req_received(req);
1567 return ret;
1570 tevent_req_received(req);
1571 return 0;
1574 struct ctdb_pkt_recv_state {
1575 struct ctdb_pkt_recv_state *prev, *next;
1576 struct tevent_context *ev;
1577 struct ctdbd_connection *conn;
1579 /* ctdb request id */
1580 uint32_t reqid;
1582 /* the associated tevent_req */
1583 struct tevent_req *req;
1585 /* pointer to allocated ctdb packet buffer */
1586 struct ctdb_req_header *hdr;
1589 static void ctdb_pkt_recv_cleanup(struct tevent_req *req,
1590 enum tevent_req_state req_state);
1592 static struct tevent_req *ctdb_pkt_recv_send(TALLOC_CTX *mem_ctx,
1593 struct tevent_context *ev,
1594 struct ctdbd_connection *conn,
1595 uint32_t reqid)
1597 struct tevent_req *req = NULL;
1598 struct ctdb_pkt_recv_state *state = NULL;
1600 req = tevent_req_create(mem_ctx, &state, struct ctdb_pkt_recv_state);
1601 if (req == NULL) {
1602 return NULL;
1605 *state = (struct ctdb_pkt_recv_state) {
1606 .ev = ev,
1607 .conn = conn,
1608 .reqid = reqid,
1609 .req = req,
1612 tevent_req_set_cleanup_fn(req, ctdb_pkt_recv_cleanup);
1615 * fd-readable event is always set for the fde, no need to deal with
1616 * that here.
1619 DLIST_ADD_END(conn->recv_list, state);
1620 DBG_DEBUG("Posted receive reqid [%" PRIu32 "]\n", state->reqid);
1622 return req;
1625 static void ctdb_pkt_recv_cleanup(struct tevent_req *req,
1626 enum tevent_req_state req_state)
1628 struct ctdb_pkt_recv_state *state = tevent_req_data(
1629 req, struct ctdb_pkt_recv_state);
1630 struct ctdbd_connection *conn = state->conn;
1632 if (conn == NULL) {
1633 return;
1635 state->conn = NULL;
1636 DLIST_REMOVE(conn->recv_list, state);
1639 static int ctdb_pkt_recv_handler(struct ctdbd_connection *conn)
1641 struct ctdb_pkt_recv_state *state = NULL;
1642 ssize_t nread;
1643 ssize_t iovlen;
1644 bool ok;
1646 DBG_DEBUG("receive handler\n");
1648 if (conn->read_state.iovs == NULL) {
1649 conn->read_state.iov.iov_base = &conn->read_state.msglen;
1650 conn->read_state.iov.iov_len = sizeof(conn->read_state.msglen);
1651 conn->read_state.iovs = &conn->read_state.iov;
1652 conn->read_state.iovcnt = 1;
1655 iovlen = iov_buflen(conn->read_state.iovs, conn->read_state.iovcnt);
1657 DBG_DEBUG("iovlen [%zd]\n", iovlen);
1659 nread = readv(conn->fd, conn->read_state.iovs, conn->read_state.iovcnt);
1660 if (nread == 0) {
1661 cluster_fatal("cluster read error, peer closed connection\n");
1663 if (nread == -1) {
1664 if (errno != EINTR && errno != EAGAIN && errno != EWOULDBLOCK) {
1665 cluster_fatal("cluster read error\n");
1667 DBG_DEBUG("recoverable error from readv, retry\n");
1668 return 0;
1671 if (nread < iovlen) {
1672 DBG_DEBUG("iovlen [%zd] nread [%zd]\n", iovlen, nread);
1673 ok = iov_advance(&conn->read_state.iovs,
1674 &conn->read_state.iovcnt,
1675 nread);
1676 if (!ok) {
1677 return EIO;
1679 return 0;
1682 conn->read_state.iovs = NULL;
1683 conn->read_state.iovcnt = 0;
1685 if (conn->read_state.hdr == NULL) {
1687 * Going this way after reading the 4 initial byte message
1688 * length
1690 uint32_t msglen = conn->read_state.msglen;
1691 uint8_t *readbuf = NULL;
1692 size_t readlen;
1694 DBG_DEBUG("msglen: %" PRIu32 "\n", msglen);
1696 if (msglen < sizeof(struct ctdb_req_header)) {
1697 DBG_ERR("short message %" PRIu32 "\n", msglen);
1698 return EIO;
1701 conn->read_state.hdr = talloc_size(conn, msglen);
1702 if (conn->read_state.hdr == NULL) {
1703 return ENOMEM;
1705 conn->read_state.hdr->length = msglen;
1706 talloc_set_name_const(conn->read_state.hdr,
1707 "struct ctdb_req_header");
1709 readbuf = (uint8_t *)conn->read_state.hdr + sizeof(msglen);
1710 readlen = msglen - sizeof(msglen);
1712 conn->read_state.iov.iov_base = readbuf;
1713 conn->read_state.iov.iov_len = readlen;
1714 conn->read_state.iovs = &conn->read_state.iov;
1715 conn->read_state.iovcnt = 1;
1717 DBG_DEBUG("Scheduled packet read size %zd\n", readlen);
1718 return 0;
1722 * Searching a list here is expected to be cheap, as messages are
1723 * exepcted to be coming in more or less ordered and we should find the
1724 * waiting request near the beginning of the list.
1726 for (state = conn->recv_list; state != NULL; state = state->next) {
1727 if (state->reqid == conn->read_state.hdr->reqid) {
1728 break;
1732 if (state == NULL) {
1733 DBG_ERR("Discarding async ctdb reqid %u\n",
1734 conn->read_state.hdr->reqid);
1735 TALLOC_FREE(conn->read_state.hdr);
1736 ZERO_STRUCT(conn->read_state);
1737 return EINVAL;
1740 DBG_DEBUG("Got reply for reqid [%" PRIu32 "]\n", state->reqid);
1742 state->hdr = talloc_move(state, &conn->read_state.hdr);
1743 ZERO_STRUCT(conn->read_state);
1744 tevent_req_done(state->req);
1745 return 0;
1748 static int ctdb_pkt_recv_recv(struct tevent_req *req,
1749 TALLOC_CTX *mem_ctx,
1750 struct ctdb_req_header **_hdr)
1752 struct ctdb_pkt_recv_state *state = tevent_req_data(
1753 req, struct ctdb_pkt_recv_state);
1754 int error;
1756 if (tevent_req_is_unix_error(req, &error)) {
1757 DBG_ERR("ctdb_read_req failed %s\n", strerror(error));
1758 tevent_req_received(req);
1759 return error;
1762 *_hdr = talloc_move(mem_ctx, &state->hdr);
1764 tevent_req_received(req);
1765 return 0;
1768 static int ctdbd_connection_destructor(struct ctdbd_connection *c)
1770 TALLOC_FREE(c->fde);
1771 if (c->fd != -1) {
1772 close(c->fd);
1773 c->fd = -1;
1776 TALLOC_FREE(c->read_state.hdr);
1777 ZERO_STRUCT(c->read_state);
1779 while (c->send_list != NULL) {
1780 struct ctdb_pkt_send_state *send_state = c->send_list;
1781 DLIST_REMOVE(c->send_list, send_state);
1782 send_state->conn = NULL;
1783 tevent_req_defer_callback(send_state->req, send_state->ev);
1784 tevent_req_error(send_state->req, EIO);
1787 while (c->recv_list != NULL) {
1788 struct ctdb_pkt_recv_state *recv_state = c->recv_list;
1789 DLIST_REMOVE(c->recv_list, recv_state);
1790 recv_state->conn = NULL;
1791 tevent_req_defer_callback(recv_state->req, recv_state->ev);
1792 tevent_req_error(recv_state->req, EIO);
1795 return 0;
1798 struct ctdbd_parse_state {
1799 struct tevent_context *ev;
1800 struct ctdbd_connection *conn;
1801 uint32_t reqid;
1802 TDB_DATA key;
1803 uint8_t _keybuf[64];
1804 struct ctdb_req_call_old ctdb_req;
1805 struct iovec iov[2];
1806 void (*parser)(TDB_DATA key,
1807 TDB_DATA data,
1808 void *private_data);
1809 void *private_data;
1810 enum dbwrap_req_state *req_state;
1813 static void ctdbd_parse_pkt_send_done(struct tevent_req *subreq);
1814 static void ctdbd_parse_done(struct tevent_req *subreq);
1816 struct tevent_req *ctdbd_parse_send(TALLOC_CTX *mem_ctx,
1817 struct tevent_context *ev,
1818 struct ctdbd_connection *conn,
1819 uint32_t db_id,
1820 TDB_DATA key,
1821 bool local_copy,
1822 void (*parser)(TDB_DATA key,
1823 TDB_DATA data,
1824 void *private_data),
1825 void *private_data,
1826 enum dbwrap_req_state *req_state)
1828 struct tevent_req *req = NULL;
1829 struct ctdbd_parse_state *state = NULL;
1830 uint32_t flags;
1831 uint32_t packet_length;
1832 struct tevent_req *subreq = NULL;
1834 req = tevent_req_create(mem_ctx, &state, struct ctdbd_parse_state);
1835 if (req == NULL) {
1836 *req_state = DBWRAP_REQ_ERROR;
1837 return NULL;
1840 *state = (struct ctdbd_parse_state) {
1841 .ev = ev,
1842 .conn = conn,
1843 .reqid = ctdbd_next_reqid(conn),
1844 .parser = parser,
1845 .private_data = private_data,
1846 .req_state = req_state,
1849 flags = local_copy ? CTDB_WANT_READONLY : 0;
1850 packet_length = offsetof(struct ctdb_req_call_old, data) + key.dsize;
1853 * Copy the key into our state, as ctdb_pkt_send_cleanup() requires that
1854 * all passed iov elements have a lifetime longer that the tevent_req
1855 * returned by ctdb_pkt_send_send(). This is required continue sending a
1856 * the low level request into the ctdb socket, if a higher level
1857 * ('this') request is canceled (or talloc free'd) by the application
1858 * layer, without sending invalid packets to ctdb.
1860 if (key.dsize > sizeof(state->_keybuf)) {
1861 state->key.dptr = talloc_memdup(state, key.dptr, key.dsize);
1862 if (tevent_req_nomem(state->key.dptr, req)) {
1863 return tevent_req_post(req, ev);
1865 } else {
1866 memcpy(state->_keybuf, key.dptr, key.dsize);
1867 state->key.dptr = state->_keybuf;
1869 state->key.dsize = key.dsize;
1871 state->ctdb_req.hdr.length = packet_length;
1872 state->ctdb_req.hdr.ctdb_magic = CTDB_MAGIC;
1873 state->ctdb_req.hdr.ctdb_version = CTDB_PROTOCOL;
1874 state->ctdb_req.hdr.operation = CTDB_REQ_CALL;
1875 state->ctdb_req.hdr.reqid = state->reqid;
1876 state->ctdb_req.flags = flags;
1877 state->ctdb_req.callid = CTDB_FETCH_FUNC;
1878 state->ctdb_req.db_id = db_id;
1879 state->ctdb_req.keylen = state->key.dsize;
1881 state->iov[0].iov_base = &state->ctdb_req;
1882 state->iov[0].iov_len = offsetof(struct ctdb_req_call_old, data);
1883 state->iov[1].iov_base = state->key.dptr;
1884 state->iov[1].iov_len = state->key.dsize;
1887 * Note that ctdb_pkt_send_send()
1888 * will modify state->iov using
1889 * iov_advance() without making a copy.
1891 subreq = ctdb_pkt_send_send(state,
1893 conn,
1894 state->reqid,
1895 state->iov,
1896 ARRAY_SIZE(state->iov),
1897 req_state);
1898 if (tevent_req_nomem(subreq, req)) {
1899 *req_state = DBWRAP_REQ_ERROR;
1900 return tevent_req_post(req, ev);
1902 tevent_req_set_callback(subreq, ctdbd_parse_pkt_send_done, req);
1904 return req;
1907 static void ctdbd_parse_pkt_send_done(struct tevent_req *subreq)
1909 struct tevent_req *req = tevent_req_callback_data(
1910 subreq, struct tevent_req);
1911 struct ctdbd_parse_state *state = tevent_req_data(
1912 req, struct ctdbd_parse_state);
1913 int ret;
1915 ret = ctdb_pkt_send_recv(subreq);
1916 TALLOC_FREE(subreq);
1917 if (tevent_req_error(req, ret)) {
1918 DBG_DEBUG("ctdb_pkt_send_recv failed %s\n", strerror(ret));
1919 return;
1922 subreq = ctdb_pkt_recv_send(state,
1923 state->ev,
1924 state->conn,
1925 state->reqid);
1926 if (tevent_req_nomem(subreq, req)) {
1927 return;
1930 *state->req_state = DBWRAP_REQ_DISPATCHED;
1931 tevent_req_set_callback(subreq, ctdbd_parse_done, req);
1932 return;
1935 static void ctdbd_parse_done(struct tevent_req *subreq)
1937 struct tevent_req *req = tevent_req_callback_data(
1938 subreq, struct tevent_req);
1939 struct ctdbd_parse_state *state = tevent_req_data(
1940 req, struct ctdbd_parse_state);
1941 struct ctdb_req_header *hdr = NULL;
1942 struct ctdb_reply_call_old *reply = NULL;
1943 int ret;
1945 DBG_DEBUG("async parse request finished\n");
1947 ret = ctdb_pkt_recv_recv(subreq, state, &hdr);
1948 TALLOC_FREE(subreq);
1949 if (tevent_req_error(req, ret)) {
1950 DBG_ERR("ctdb_pkt_recv_recv returned %s\n", strerror(ret));
1951 return;
1954 if (hdr->operation != CTDB_REPLY_CALL) {
1955 DBG_ERR("received invalid reply\n");
1956 ctdb_packet_dump(hdr);
1957 tevent_req_error(req, EIO);
1958 return;
1961 reply = (struct ctdb_reply_call_old *)hdr;
1963 if (reply->datalen == 0) {
1965 * Treat an empty record as non-existing
1967 tevent_req_error(req, ENOENT);
1968 return;
1971 state->parser(state->key,
1972 make_tdb_data(&reply->data[0], reply->datalen),
1973 state->private_data);
1975 tevent_req_done(req);
1976 return;
1979 int ctdbd_parse_recv(struct tevent_req *req)
1981 int error;
1983 if (tevent_req_is_unix_error(req, &error)) {
1984 DBG_DEBUG("async parse returned %s\n", strerror(error));
1985 tevent_req_received(req);
1986 return error;
1989 tevent_req_received(req);
1990 return 0;