s4:kdc: Add helper function to extract AES256 key and salt
[Samba.git] / ctdb / common / ctdb_io.c
blobbf8bc73b77dc81575be1346412ebb454adb72df4
1 /*
2 ctdb database library
3 Utility functions to read/write blobs of data from a file descriptor
4 and handle the case where we might need multiple read/writes to get all the
5 data.
7 Copyright (C) Andrew Tridgell 2006
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program; if not, see <http://www.gnu.org/licenses/>.
23 #include "replace.h"
24 #include "system/network.h"
25 #include "system/filesys.h"
27 #include <tdb.h>
28 #include <talloc.h>
29 #include <tevent.h>
31 #include "lib/util/dlinklist.h"
32 #include "lib/util/debug.h"
33 #include "lib/util/sys_rw.h"
35 #include "ctdb_private.h"
36 #include "ctdb_client.h"
38 #include "common/logging.h"
39 #include "common/common.h"
41 /* structures for packet queueing - see common/ctdb_io.c */
42 struct ctdb_buffer {
43 uint8_t *data;
44 uint32_t length;
45 uint32_t size;
46 uint32_t offset;
49 struct ctdb_queue_pkt {
50 struct ctdb_queue_pkt *next, *prev;
51 uint8_t *data;
52 uint32_t length;
53 uint32_t full_length;
54 uint8_t buf[];
57 struct ctdb_queue {
58 struct ctdb_context *ctdb;
59 struct tevent_immediate *im;
60 struct ctdb_buffer buffer; /* input buffer */
61 struct ctdb_queue_pkt *out_queue, *out_queue_tail;
62 uint32_t out_queue_length;
63 struct tevent_fd *fde;
64 int fd;
65 size_t alignment;
66 void *private_data;
67 ctdb_queue_cb_fn_t callback;
68 TALLOC_CTX *data_pool;
69 const char *name;
70 uint32_t buffer_size;
75 uint32_t ctdb_queue_length(struct ctdb_queue *queue)
77 return queue->out_queue_length;
80 static void queue_process(struct ctdb_queue *queue);
82 static void queue_process_event(struct tevent_context *ev, struct tevent_immediate *im,
83 void *private_data)
85 struct ctdb_queue *queue = talloc_get_type(private_data, struct ctdb_queue);
87 queue_process(queue);
91 * This function is used to process data in queue buffer.
93 * Queue callback function can end up freeing the queue, there should not be a
94 * loop processing packets from queue buffer. Instead set up a timed event for
95 * immediate run to process remaining packets from buffer.
97 static void queue_process(struct ctdb_queue *queue)
99 uint32_t pkt_size;
100 uint8_t *data = NULL;
102 if (queue->buffer.length < sizeof(pkt_size)) {
103 return;
106 /* Did we at least read the size into the buffer */
107 pkt_size = *(uint32_t *)(queue->buffer.data + queue->buffer.offset);
108 if (pkt_size == 0) {
109 DEBUG(DEBUG_CRIT, ("Invalid packet of length 0\n"));
110 goto failed;
113 /* the buffer doesn't contain the full packet, return to get the rest */
114 if (queue->buffer.length < pkt_size) {
115 return;
118 /* Extract complete packet */
119 data = talloc_memdup(queue->data_pool,
120 queue->buffer.data + queue->buffer.offset,
121 pkt_size);
123 if (data == NULL) {
124 D_ERR("read error alloc failed for %u\n", pkt_size);
125 return;
128 queue->buffer.offset += pkt_size;
129 queue->buffer.length -= pkt_size;
131 if (queue->buffer.offset < pkt_size ||
132 queue->buffer.offset > queue->buffer.size) {
133 D_ERR("buffer offset overflow\n");
134 TALLOC_FREE(queue->buffer.data);
135 memset(&queue->buffer, 0, sizeof(queue->buffer));
136 goto failed;
139 if (queue->buffer.length > 0) {
140 /* There is more data to be processed, schedule an event */
141 tevent_schedule_immediate(queue->im, queue->ctdb->ev,
142 queue_process_event, queue);
143 } else {
144 if (queue->buffer.size > queue->buffer_size) {
145 TALLOC_FREE(queue->buffer.data);
146 queue->buffer.size = 0;
148 queue->buffer.offset = 0;
151 /* It is the responsibility of the callback to free 'data' */
152 queue->callback(data, pkt_size, queue->private_data);
153 return;
155 failed:
156 queue->callback(NULL, 0, queue->private_data);
160 called when an incoming connection is readable
161 This function MUST be safe for reentry via the queue callback!
163 static void queue_io_read(struct ctdb_queue *queue)
165 int num_ready = 0;
166 uint32_t pkt_size = 0;
167 uint32_t start_offset;
168 ssize_t nread;
169 uint8_t *data;
171 /* check how much data is available on the socket for immediately
172 guaranteed nonblocking access.
173 as long as we are careful never to try to read more than this
174 we know all reads will be successful and will neither block
175 nor fail with a "data not available right now" error
177 if (ioctl(queue->fd, FIONREAD, &num_ready) != 0) {
178 return;
180 if (num_ready == 0) {
181 /* the descriptor has been closed */
182 goto failed;
185 if (queue->buffer.data == NULL) {
186 /* starting fresh, allocate buf to read data */
187 queue->buffer.data = talloc_size(queue, queue->buffer_size);
188 if (queue->buffer.data == NULL) {
189 DEBUG(DEBUG_ERR, ("read error alloc failed for %u\n", num_ready));
190 goto failed;
192 queue->buffer.size = queue->buffer_size;
193 goto data_read;
196 if (sizeof(pkt_size) > queue->buffer.length) {
197 /* data read is not sufficient to gather message size */
198 goto buffer_shift;
201 pkt_size = *(uint32_t *)(queue->buffer.data + queue->buffer.offset);
202 if (pkt_size > queue->buffer.size) {
203 data = talloc_realloc_size(queue,
204 queue->buffer.data,
205 pkt_size);
206 if (data == NULL) {
207 DBG_ERR("read error realloc failed for %u\n", pkt_size);
208 goto failed;
210 queue->buffer.data = data;
211 queue->buffer.size = pkt_size;
212 /* fall through here as we might need to move the data as well */
215 buffer_shift:
216 if (sizeof(pkt_size) > queue->buffer.size - queue->buffer.offset ||
217 pkt_size > queue->buffer.size - queue->buffer.offset) {
218 /* Either the offset has progressed too far to host at least
219 * the size information or the remaining space in the buffer
220 * is not sufficient for the full message.
221 * Therefore, move the data and try again.
223 memmove(queue->buffer.data,
224 queue->buffer.data + queue->buffer.offset,
225 queue->buffer.length);
226 queue->buffer.offset = 0;
229 data_read:
230 start_offset = queue->buffer.length + queue->buffer.offset;
231 if (start_offset < queue->buffer.length) {
232 DBG_ERR("Buffer overflow\n");
233 goto failed;
235 if (start_offset > queue->buffer.size) {
236 DBG_ERR("Buffer overflow\n");
237 goto failed;
240 num_ready = MIN(num_ready, queue->buffer.size - start_offset);
242 if (num_ready > 0) {
243 nread = sys_read(queue->fd,
244 queue->buffer.data +
245 queue->buffer.offset +
246 queue->buffer.length,
247 num_ready);
248 if (nread <= 0) {
249 DEBUG(DEBUG_ERR, ("read error nread=%d\n", (int)nread));
250 goto failed;
252 queue->buffer.length += nread;
255 queue_process(queue);
256 return;
258 failed:
259 queue->callback(NULL, 0, queue->private_data);
263 /* used when an event triggers a dead queue */
264 static void queue_dead(struct tevent_context *ev, struct tevent_immediate *im,
265 void *private_data)
267 struct ctdb_queue *queue = talloc_get_type(private_data, struct ctdb_queue);
268 queue->callback(NULL, 0, queue->private_data);
273 called when an incoming connection is writeable
275 static void queue_io_write(struct ctdb_queue *queue)
277 while (queue->out_queue) {
278 struct ctdb_queue_pkt *pkt = queue->out_queue;
279 ssize_t n;
280 if (queue->ctdb->flags & CTDB_FLAG_TORTURE) {
281 n = write(queue->fd, pkt->data, 1);
282 } else {
283 n = write(queue->fd, pkt->data, pkt->length);
286 if (n == -1 && errno != EAGAIN && errno != EWOULDBLOCK) {
287 if (pkt->length != pkt->full_length) {
288 /* partial packet sent - we have to drop it */
289 DLIST_REMOVE(queue->out_queue, pkt);
290 queue->out_queue_length--;
291 talloc_free(pkt);
293 TALLOC_FREE(queue->fde);
294 queue->fd = -1;
295 tevent_schedule_immediate(queue->im, queue->ctdb->ev,
296 queue_dead, queue);
297 return;
299 if (n <= 0) return;
301 if (n != pkt->length) {
302 pkt->length -= n;
303 pkt->data += n;
304 return;
307 DLIST_REMOVE(queue->out_queue, pkt);
308 queue->out_queue_length--;
309 talloc_free(pkt);
312 TEVENT_FD_NOT_WRITEABLE(queue->fde);
316 called when an incoming connection is readable or writeable
318 static void queue_io_handler(struct tevent_context *ev, struct tevent_fd *fde,
319 uint16_t flags, void *private_data)
321 struct ctdb_queue *queue = talloc_get_type(private_data, struct ctdb_queue);
323 if (flags & TEVENT_FD_READ) {
324 queue_io_read(queue);
325 } else {
326 queue_io_write(queue);
332 queue a packet for sending
334 int ctdb_queue_send(struct ctdb_queue *queue, uint8_t *data, uint32_t length)
336 struct ctdb_req_header *hdr = (struct ctdb_req_header *)data;
337 struct ctdb_queue_pkt *pkt;
338 uint32_t length2, full_length;
340 /* If the queue does not have valid fd, no point queueing a packet */
341 if (queue->fd == -1) {
342 return 0;
345 if (queue->alignment) {
346 /* enforce the length and alignment rules from the tcp packet allocator */
347 length2 = (length+(queue->alignment-1)) & ~(queue->alignment-1);
348 *(uint32_t *)data = length2;
349 } else {
350 length2 = length;
353 if (length2 != length) {
354 memset(data+length, 0, length2-length);
357 full_length = length2;
359 /* if the queue is empty then try an immediate write, avoiding
360 queue overhead. This relies on non-blocking sockets */
361 if (queue->out_queue == NULL && queue->fd != -1 &&
362 !(queue->ctdb->flags & CTDB_FLAG_TORTURE)) {
363 ssize_t n = write(queue->fd, data, length2);
364 if (n == -1 && errno != EAGAIN && errno != EWOULDBLOCK) {
365 TALLOC_FREE(queue->fde);
366 queue->fd = -1;
367 tevent_schedule_immediate(queue->im, queue->ctdb->ev,
368 queue_dead, queue);
369 /* yes, we report success, as the dead node is
370 handled via a separate event */
371 return 0;
373 if (n > 0) {
374 data += n;
375 length2 -= n;
377 if (length2 == 0) return 0;
380 pkt = talloc_size(
381 queue, offsetof(struct ctdb_queue_pkt, buf) + length2);
382 CTDB_NO_MEMORY(queue->ctdb, pkt);
383 talloc_set_name_const(pkt, "struct ctdb_queue_pkt");
385 pkt->data = pkt->buf;
386 memcpy(pkt->data, data, length2);
388 pkt->length = length2;
389 pkt->full_length = full_length;
391 if (queue->out_queue == NULL && queue->fd != -1) {
392 TEVENT_FD_WRITEABLE(queue->fde);
395 DLIST_ADD_END(queue->out_queue, pkt);
397 queue->out_queue_length++;
399 if (queue->ctdb->tunable.verbose_memory_names != 0) {
400 switch (hdr->operation) {
401 case CTDB_REQ_CONTROL: {
402 struct ctdb_req_control_old *c = (struct ctdb_req_control_old *)hdr;
403 talloc_set_name(pkt, "ctdb_queue_pkt: %s control opcode=%u srvid=%llu datalen=%u",
404 queue->name, (unsigned)c->opcode, (unsigned long long)c->srvid, (unsigned)c->datalen);
405 break;
407 case CTDB_REQ_MESSAGE: {
408 struct ctdb_req_message_old *m = (struct ctdb_req_message_old *)hdr;
409 talloc_set_name(pkt, "ctdb_queue_pkt: %s message srvid=%llu datalen=%u",
410 queue->name, (unsigned long long)m->srvid, (unsigned)m->datalen);
411 break;
413 default:
414 talloc_set_name(pkt, "ctdb_queue_pkt: %s operation=%u length=%u src=%u dest=%u",
415 queue->name, (unsigned)hdr->operation, (unsigned)hdr->length,
416 (unsigned)hdr->srcnode, (unsigned)hdr->destnode);
417 break;
421 return 0;
426 setup the fd used by the queue
428 int ctdb_queue_set_fd(struct ctdb_queue *queue, int fd)
430 queue->fd = fd;
431 TALLOC_FREE(queue->fde);
433 if (fd != -1) {
434 queue->fde = tevent_add_fd(queue->ctdb->ev, queue, fd,
435 TEVENT_FD_READ,
436 queue_io_handler, queue);
437 if (queue->fde == NULL) {
438 return -1;
440 tevent_fd_set_auto_close(queue->fde);
442 if (queue->out_queue) {
443 TEVENT_FD_WRITEABLE(queue->fde);
447 return 0;
451 setup a packet queue on a socket
453 struct ctdb_queue *ctdb_queue_setup(struct ctdb_context *ctdb,
454 TALLOC_CTX *mem_ctx, int fd, int alignment,
455 ctdb_queue_cb_fn_t callback,
456 void *private_data, const char *fmt, ...)
458 struct ctdb_queue *queue;
459 va_list ap;
461 queue = talloc_zero(mem_ctx, struct ctdb_queue);
462 CTDB_NO_MEMORY_NULL(ctdb, queue);
463 va_start(ap, fmt);
464 queue->name = talloc_vasprintf(mem_ctx, fmt, ap);
465 va_end(ap);
466 CTDB_NO_MEMORY_NULL(ctdb, queue->name);
468 queue->im= tevent_create_immediate(queue);
469 CTDB_NO_MEMORY_NULL(ctdb, queue->im);
471 queue->ctdb = ctdb;
472 queue->fd = fd;
473 queue->alignment = alignment;
474 queue->private_data = private_data;
475 queue->callback = callback;
476 if (fd != -1) {
477 if (ctdb_queue_set_fd(queue, fd) != 0) {
478 talloc_free(queue);
479 return NULL;
483 queue->buffer_size = ctdb->tunable.queue_buffer_size;
484 /* In client code, ctdb->tunable is not initialized.
485 * This does not affect recovery daemon.
487 if (queue->buffer_size == 0) {
488 queue->buffer_size = 1024;
491 queue->data_pool = talloc_pool(queue, queue->buffer_size);
492 if (queue->data_pool == NULL) {
493 TALLOC_FREE(queue);
494 return NULL;
497 return queue;