ctdb-vacuum: Fix the incorrect counting of remote errors
[Samba.git] / ctdb / server / ctdb_vacuum.c
blob8faf803efb91a42df90ac93ff28d4466798d3f80
1 /*
2 ctdb vacuuming events
4 Copyright (C) Ronnie Sahlberg 2009
5 Copyright (C) Michael Adam 2010-2013
6 Copyright (C) Stefan Metzmacher 2010-2011
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program; if not, see <http://www.gnu.org/licenses/>.
22 #include "replace.h"
23 #include "system/network.h"
24 #include "system/filesys.h"
25 #include "system/time.h"
27 #include <talloc.h>
28 #include <tevent.h>
30 #include "lib/tdb_wrap/tdb_wrap.h"
31 #include "lib/util/dlinklist.h"
32 #include "lib/util/debug.h"
33 #include "lib/util/samba_util.h"
34 #include "lib/util/sys_rw.h"
35 #include "lib/util/util_process.h"
37 #include "ctdb_private.h"
38 #include "ctdb_client.h"
40 #include "common/rb_tree.h"
41 #include "common/common.h"
42 #include "common/logging.h"
44 #define TIMELIMIT() timeval_current_ofs(10, 0)
46 enum vacuum_child_status { VACUUM_RUNNING, VACUUM_OK, VACUUM_ERROR, VACUUM_TIMEOUT};
48 struct ctdb_vacuum_child_context {
49 struct ctdb_vacuum_child_context *next, *prev;
50 struct ctdb_vacuum_handle *vacuum_handle;
51 /* fd child writes status to */
52 int fd[2];
53 pid_t child_pid;
54 enum vacuum_child_status status;
55 struct timeval start_time;
58 struct ctdb_vacuum_handle {
59 struct ctdb_db_context *ctdb_db;
60 struct ctdb_vacuum_child_context *child_ctx;
61 uint32_t fast_path_count;
65 /* a list of records to possibly delete */
66 struct vacuum_data {
67 struct ctdb_context *ctdb;
68 struct ctdb_db_context *ctdb_db;
69 struct tdb_context *dest_db;
70 trbt_tree_t *delete_list;
71 struct ctdb_marshall_buffer **vacuum_fetch_list;
72 struct timeval start;
73 bool traverse_error;
74 bool vacuum;
75 struct {
76 struct {
77 uint32_t added_to_vacuum_fetch_list;
78 uint32_t added_to_delete_list;
79 uint32_t deleted;
80 uint32_t skipped;
81 uint32_t error;
82 uint32_t total;
83 } delete_queue;
84 struct {
85 uint32_t scheduled;
86 uint32_t skipped;
87 uint32_t error;
88 uint32_t total;
89 } db_traverse;
90 struct {
91 uint32_t total;
92 uint32_t remote_error;
93 uint32_t local_error;
94 uint32_t deleted;
95 uint32_t skipped;
96 uint32_t left;
97 } delete_list;
98 struct {
99 uint32_t vacuumed;
100 uint32_t copied;
101 } repack;
102 } count;
105 /* this structure contains the information for one record to be deleted */
106 struct delete_record_data {
107 struct ctdb_context *ctdb;
108 struct ctdb_db_context *ctdb_db;
109 struct ctdb_ltdb_header hdr;
110 uint32_t remote_fail_count;
111 TDB_DATA key;
112 uint8_t keydata[1];
115 struct delete_records_list {
116 struct ctdb_marshall_buffer *records;
117 struct vacuum_data *vdata;
120 static int insert_record_into_delete_queue(struct ctdb_db_context *ctdb_db,
121 const struct ctdb_ltdb_header *hdr,
122 TDB_DATA key);
125 * Store key and header in a tree, indexed by the key hash.
127 static int insert_delete_record_data_into_tree(struct ctdb_context *ctdb,
128 struct ctdb_db_context *ctdb_db,
129 trbt_tree_t *tree,
130 const struct ctdb_ltdb_header *hdr,
131 TDB_DATA key)
133 struct delete_record_data *dd;
134 uint32_t hash;
135 size_t len;
137 len = offsetof(struct delete_record_data, keydata) + key.dsize;
139 dd = (struct delete_record_data *)talloc_size(tree, len);
140 if (dd == NULL) {
141 DEBUG(DEBUG_ERR,(__location__ " Out of memory\n"));
142 return -1;
144 talloc_set_name_const(dd, "struct delete_record_data");
146 dd->ctdb = ctdb;
147 dd->ctdb_db = ctdb_db;
148 dd->key.dsize = key.dsize;
149 dd->key.dptr = dd->keydata;
150 memcpy(dd->keydata, key.dptr, key.dsize);
152 dd->hdr = *hdr;
153 dd->remote_fail_count = 0;
155 hash = ctdb_hash(&key);
157 trbt_insert32(tree, hash, dd);
159 return 0;
162 static int add_record_to_delete_list(struct vacuum_data *vdata, TDB_DATA key,
163 struct ctdb_ltdb_header *hdr)
165 struct ctdb_context *ctdb = vdata->ctdb;
166 struct ctdb_db_context *ctdb_db = vdata->ctdb_db;
167 uint32_t hash;
168 int ret;
170 hash = ctdb_hash(&key);
172 if (trbt_lookup32(vdata->delete_list, hash)) {
173 DEBUG(DEBUG_INFO, (__location__ " Hash collision when vacuuming, skipping this record.\n"));
174 return 0;
177 ret = insert_delete_record_data_into_tree(ctdb, ctdb_db,
178 vdata->delete_list,
179 hdr, key);
180 if (ret != 0) {
181 return -1;
184 vdata->count.delete_list.total++;
186 return 0;
190 * Add a record to the list of records to be sent
191 * to their lmaster with VACUUM_FETCH.
193 static int add_record_to_vacuum_fetch_list(struct vacuum_data *vdata,
194 TDB_DATA key)
196 struct ctdb_context *ctdb = vdata->ctdb;
197 uint32_t lmaster;
198 struct ctdb_marshall_buffer *vfl;
200 lmaster = ctdb_lmaster(ctdb, &key);
202 vfl = vdata->vacuum_fetch_list[lmaster];
204 vfl = ctdb_marshall_add(ctdb, vfl, vfl->db_id, ctdb->pnn,
205 key, NULL, tdb_null);
206 if (vfl == NULL) {
207 DEBUG(DEBUG_ERR,(__location__ " Out of memory\n"));
208 vdata->traverse_error = true;
209 return -1;
212 vdata->vacuum_fetch_list[lmaster] = vfl;
214 return 0;
218 static void ctdb_vacuum_event(struct tevent_context *ev,
219 struct tevent_timer *te,
220 struct timeval t, void *private_data);
222 static int vacuum_record_parser(TDB_DATA key, TDB_DATA data, void *private_data)
224 struct ctdb_ltdb_header *header =
225 (struct ctdb_ltdb_header *)private_data;
227 if (data.dsize != sizeof(struct ctdb_ltdb_header)) {
228 return -1;
231 *header = *(struct ctdb_ltdb_header *)data.dptr;
233 return 0;
237 * traverse function for gathering the records that can be deleted
239 static int vacuum_traverse(struct tdb_context *tdb, TDB_DATA key, TDB_DATA data,
240 void *private_data)
242 struct vacuum_data *vdata = talloc_get_type(private_data,
243 struct vacuum_data);
244 struct ctdb_context *ctdb = vdata->ctdb;
245 struct ctdb_db_context *ctdb_db = vdata->ctdb_db;
246 uint32_t lmaster;
247 struct ctdb_ltdb_header *hdr;
248 int res = 0;
250 vdata->count.db_traverse.total++;
252 lmaster = ctdb_lmaster(ctdb, &key);
253 if (lmaster >= ctdb->num_nodes) {
254 vdata->count.db_traverse.error++;
255 DEBUG(DEBUG_CRIT, (__location__
256 " lmaster[%u] >= ctdb->num_nodes[%u] for key"
257 " with hash[%u]!\n",
258 (unsigned)lmaster,
259 (unsigned)ctdb->num_nodes,
260 (unsigned)ctdb_hash(&key)));
261 return -1;
264 if (data.dsize != sizeof(struct ctdb_ltdb_header)) {
265 /* it is not a deleted record */
266 vdata->count.db_traverse.skipped++;
267 return 0;
270 hdr = (struct ctdb_ltdb_header *)data.dptr;
272 if (hdr->dmaster != ctdb->pnn) {
273 vdata->count.db_traverse.skipped++;
274 return 0;
278 * Add the record to this process's delete_queue for processing
279 * in the subsequent traverse in the fast vacuum run.
281 res = insert_record_into_delete_queue(ctdb_db, hdr, key);
282 if (res != 0) {
283 vdata->count.db_traverse.error++;
284 } else {
285 vdata->count.db_traverse.scheduled++;
288 return 0;
292 * traverse the tree of records to delete and marshall them into
293 * a blob
295 static int delete_marshall_traverse(void *param, void *data)
297 struct delete_record_data *dd = talloc_get_type(data, struct delete_record_data);
298 struct delete_records_list *recs = talloc_get_type(param, struct delete_records_list);
299 struct ctdb_marshall_buffer *m;
301 m = ctdb_marshall_add(recs, recs->records, recs->records->db_id,
302 recs->records->db_id,
303 dd->key, &dd->hdr, tdb_null);
304 if (m == NULL) {
305 DEBUG(DEBUG_ERR, (__location__ " failed to marshall record\n"));
306 return -1;
309 recs->records = m;
310 return 0;
314 * traverse function for the traversal of the delete_queue,
315 * the fast-path vacuuming list.
317 * - If the record has been migrated off the node
318 * or has been revived (filled with data) on the node,
319 * then skip the record.
321 * - If the current node is the record's lmaster and it is
322 * a record that has never been migrated with data, then
323 * delete the record from the local tdb.
325 * - If the current node is the record's lmaster and it has
326 * been migrated with data, then schedule it for the normal
327 * vacuuming procedure (i.e. add it to the delete_list).
329 * - If the current node is NOT the record's lmaster then
330 * add it to the list of records that are to be sent to
331 * the lmaster with the VACUUM_FETCH message.
333 static int delete_queue_traverse(void *param, void *data)
335 struct delete_record_data *dd =
336 talloc_get_type(data, struct delete_record_data);
337 struct vacuum_data *vdata = talloc_get_type(param, struct vacuum_data);
338 struct ctdb_db_context *ctdb_db = dd->ctdb_db;
339 struct ctdb_context *ctdb = ctdb_db->ctdb; /* or dd->ctdb ??? */
340 int res;
341 struct ctdb_ltdb_header header;
342 uint32_t lmaster;
343 uint32_t hash = ctdb_hash(&(dd->key));
345 vdata->count.delete_queue.total++;
347 res = tdb_chainlock_nonblock(ctdb_db->ltdb->tdb, dd->key);
348 if (res != 0) {
349 vdata->count.delete_queue.error++;
350 return 0;
353 res = tdb_parse_record(ctdb_db->ltdb->tdb, dd->key,
354 vacuum_record_parser, &header);
355 if (res != 0) {
356 goto skipped;
359 if (header.dmaster != ctdb->pnn) {
360 /* The record has been migrated off the node. Skip. */
361 goto skipped;
364 if (header.rsn != dd->hdr.rsn) {
366 * The record has been migrated off the node and back again.
367 * But not requeued for deletion. Skip it.
369 goto skipped;
373 * We are dmaster, and the record has no data, and it has
374 * not been migrated after it has been queued for deletion.
376 * At this stage, the record could still have been revived locally
377 * and last been written with empty data. This can only be
378 * fixed with the addition of an active or delete flag. (TODO)
381 lmaster = ctdb_lmaster(ctdb_db->ctdb, &dd->key);
383 if (lmaster != ctdb->pnn) {
384 res = add_record_to_vacuum_fetch_list(vdata, dd->key);
386 if (res != 0) {
387 DEBUG(DEBUG_ERR,
388 (__location__ " Error adding record to list "
389 "of records to send to lmaster.\n"));
390 vdata->count.delete_queue.error++;
391 } else {
392 vdata->count.delete_queue.added_to_vacuum_fetch_list++;
394 goto done;
397 /* use header->flags or dd->hdr.flags ?? */
398 if (dd->hdr.flags & CTDB_REC_FLAG_MIGRATED_WITH_DATA) {
399 res = add_record_to_delete_list(vdata, dd->key, &dd->hdr);
401 if (res != 0) {
402 DEBUG(DEBUG_ERR,
403 (__location__ " Error adding record to list "
404 "of records for deletion on lmaster.\n"));
405 vdata->count.delete_queue.error++;
406 } else {
407 vdata->count.delete_queue.added_to_delete_list++;
409 } else {
410 res = tdb_delete(ctdb_db->ltdb->tdb, dd->key);
412 if (res != 0) {
413 DEBUG(DEBUG_ERR,
414 (__location__ " Error deleting record with key "
415 "hash [0x%08x] from local data base db[%s].\n",
416 hash, ctdb_db->db_name));
417 vdata->count.delete_queue.error++;
418 goto done;
421 DEBUG(DEBUG_DEBUG,
422 (__location__ " Deleted record with key hash "
423 "[0x%08x] from local data base db[%s].\n",
424 hash, ctdb_db->db_name));
425 vdata->count.delete_queue.deleted++;
428 goto done;
430 skipped:
431 vdata->count.delete_queue.skipped++;
433 done:
434 tdb_chainunlock(ctdb_db->ltdb->tdb, dd->key);
436 return 0;
440 * Delete the records that we are lmaster and dmaster for and
441 * that could be deleted on all other nodes via the TRY_DELETE_RECORDS
442 * control.
444 static int delete_record_traverse(void *param, void *data)
446 struct delete_record_data *dd =
447 talloc_get_type(data, struct delete_record_data);
448 struct vacuum_data *vdata = talloc_get_type(param, struct vacuum_data);
449 struct ctdb_db_context *ctdb_db = dd->ctdb_db;
450 struct ctdb_context *ctdb = ctdb_db->ctdb;
451 int res;
452 struct ctdb_ltdb_header header;
453 uint32_t lmaster;
454 uint32_t hash = ctdb_hash(&(dd->key));
456 if (dd->remote_fail_count > 0) {
457 vdata->count.delete_list.remote_error++;
458 vdata->count.delete_list.left--;
459 talloc_free(dd);
460 return 0;
463 res = tdb_chainlock(ctdb_db->ltdb->tdb, dd->key);
464 if (res != 0) {
465 DEBUG(DEBUG_ERR,
466 (__location__ " Error getting chainlock on record with "
467 "key hash [0x%08x] on database db[%s].\n",
468 hash, ctdb_db->db_name));
469 vdata->count.delete_list.local_error++;
470 vdata->count.delete_list.left--;
471 talloc_free(dd);
472 return 0;
476 * Verify that the record is still empty, its RSN has not
477 * changed and that we are still its lmaster and dmaster.
480 res = tdb_parse_record(ctdb_db->ltdb->tdb, dd->key,
481 vacuum_record_parser, &header);
482 if (res != 0) {
483 goto skip;
486 if (header.flags & CTDB_REC_RO_FLAGS) {
487 DEBUG(DEBUG_INFO, (__location__ ": record with hash [0x%08x] "
488 "on database db[%s] has read-only flags. "
489 "skipping.\n",
490 hash, ctdb_db->db_name));
491 goto skip;
494 if (header.dmaster != ctdb->pnn) {
495 DEBUG(DEBUG_INFO, (__location__ ": record with hash [0x%08x] "
496 "on database db[%s] has been migrated away. "
497 "skipping.\n",
498 hash, ctdb_db->db_name));
499 goto skip;
502 if (header.rsn != dd->hdr.rsn) {
504 * The record has been migrated off the node and back again.
505 * But not requeued for deletion. Skip it.
507 DEBUG(DEBUG_INFO, (__location__ ": record with hash [0x%08x] "
508 "on database db[%s] seems to have been "
509 "migrated away and back again (with empty "
510 "data). skipping.\n",
511 hash, ctdb_db->db_name));
512 goto skip;
515 lmaster = ctdb_lmaster(ctdb_db->ctdb, &dd->key);
517 if (lmaster != ctdb->pnn) {
518 DEBUG(DEBUG_INFO, (__location__ ": not lmaster for record in "
519 "delete list (key hash [0x%08x], db[%s]). "
520 "Strange! skipping.\n",
521 hash, ctdb_db->db_name));
522 goto skip;
525 res = tdb_delete(ctdb_db->ltdb->tdb, dd->key);
527 if (res != 0) {
528 DEBUG(DEBUG_ERR,
529 (__location__ " Error deleting record with key hash "
530 "[0x%08x] from local data base db[%s].\n",
531 hash, ctdb_db->db_name));
532 vdata->count.delete_list.local_error++;
533 goto done;
536 DEBUG(DEBUG_DEBUG,
537 (__location__ " Deleted record with key hash [0x%08x] from "
538 "local data base db[%s].\n", hash, ctdb_db->db_name));
540 vdata->count.delete_list.deleted++;
541 goto done;
543 skip:
544 vdata->count.delete_list.skipped++;
546 done:
547 tdb_chainunlock(ctdb_db->ltdb->tdb, dd->key);
549 talloc_free(dd);
550 vdata->count.delete_list.left--;
552 return 0;
556 * Traverse the delete_queue.
557 * Records are either deleted directly or filled
558 * into the delete list or the vacuum fetch lists
559 * for further processing.
561 static void ctdb_process_delete_queue(struct ctdb_db_context *ctdb_db,
562 struct vacuum_data *vdata)
564 uint32_t sum;
565 int ret;
567 ret = trbt_traversearray32(ctdb_db->delete_queue, 1,
568 delete_queue_traverse, vdata);
570 if (ret != 0) {
571 DEBUG(DEBUG_ERR, (__location__ " Error traversing "
572 "the delete queue.\n"));
575 sum = vdata->count.delete_queue.deleted
576 + vdata->count.delete_queue.skipped
577 + vdata->count.delete_queue.error
578 + vdata->count.delete_queue.added_to_delete_list
579 + vdata->count.delete_queue.added_to_vacuum_fetch_list;
581 if (vdata->count.delete_queue.total != sum) {
582 DEBUG(DEBUG_ERR, (__location__ " Inconsistency in fast vacuum "
583 "counts for db[%s]: total[%u] != sum[%u]\n",
584 ctdb_db->db_name,
585 (unsigned)vdata->count.delete_queue.total,
586 (unsigned)sum));
589 if (vdata->count.delete_queue.total > 0) {
590 DEBUG(DEBUG_INFO,
591 (__location__
592 " fast vacuuming delete_queue traverse statistics: "
593 "db[%s] "
594 "total[%u] "
595 "del[%u] "
596 "skp[%u] "
597 "err[%u] "
598 "adl[%u] "
599 "avf[%u]\n",
600 ctdb_db->db_name,
601 (unsigned)vdata->count.delete_queue.total,
602 (unsigned)vdata->count.delete_queue.deleted,
603 (unsigned)vdata->count.delete_queue.skipped,
604 (unsigned)vdata->count.delete_queue.error,
605 (unsigned)vdata->count.delete_queue.added_to_delete_list,
606 (unsigned)vdata->count.delete_queue.added_to_vacuum_fetch_list));
609 return;
613 * read-only traverse of the database, looking for records that
614 * might be able to be vacuumed.
616 * This is not done each time but only every tunable
617 * VacuumFastPathCount times.
619 static void ctdb_vacuum_traverse_db(struct ctdb_db_context *ctdb_db,
620 struct vacuum_data *vdata)
622 int ret;
624 ret = tdb_traverse_read(ctdb_db->ltdb->tdb, vacuum_traverse, vdata);
625 if (ret == -1 || vdata->traverse_error) {
626 DEBUG(DEBUG_ERR, (__location__ " Traverse error in vacuuming "
627 "'%s'\n", ctdb_db->db_name));
628 return;
631 if (vdata->count.db_traverse.total > 0) {
632 DEBUG(DEBUG_INFO,
633 (__location__
634 " full vacuuming db traverse statistics: "
635 "db[%s] "
636 "total[%u] "
637 "skp[%u] "
638 "err[%u] "
639 "sched[%u]\n",
640 ctdb_db->db_name,
641 (unsigned)vdata->count.db_traverse.total,
642 (unsigned)vdata->count.db_traverse.skipped,
643 (unsigned)vdata->count.db_traverse.error,
644 (unsigned)vdata->count.db_traverse.scheduled));
647 return;
651 * Process the vacuum fetch lists:
652 * For records for which we are not the lmaster, tell the lmaster to
653 * fetch the record.
655 static void ctdb_process_vacuum_fetch_lists(struct ctdb_db_context *ctdb_db,
656 struct vacuum_data *vdata)
658 int i;
659 struct ctdb_context *ctdb = ctdb_db->ctdb;
661 for (i = 0; i < ctdb->num_nodes; i++) {
662 TDB_DATA data;
663 struct ctdb_marshall_buffer *vfl = vdata->vacuum_fetch_list[i];
665 if (ctdb->nodes[i]->pnn == ctdb->pnn) {
666 continue;
669 if (vfl->count == 0) {
670 continue;
673 DEBUG(DEBUG_INFO, ("Found %u records for lmaster %u in '%s'\n",
674 vfl->count, ctdb->nodes[i]->pnn,
675 ctdb_db->db_name));
677 data = ctdb_marshall_finish(vfl);
678 if (ctdb_client_send_message(ctdb, ctdb->nodes[i]->pnn,
679 CTDB_SRVID_VACUUM_FETCH,
680 data) != 0)
682 DEBUG(DEBUG_ERR, (__location__ " Failed to send vacuum "
683 "fetch message to %u\n",
684 ctdb->nodes[i]->pnn));
688 return;
692 * Process the delete list:
694 * This is the last step of vacuuming that consistently deletes
695 * those records that have been migrated with data and can hence
696 * not be deleted when leaving a node.
698 * In this step, the lmaster does the final deletion of those empty
699 * records that it is also dmaster for. It has ususally received
700 * at least some of these records previously from the former dmasters
701 * with the vacuum fetch message.
703 * 1) Send the records to all active nodes with the TRY_DELETE_RECORDS
704 * control. The remote notes delete their local copy.
705 * 2) The lmaster locally deletes its copies of all records that
706 * could successfully be deleted remotely in step #2.
708 static void ctdb_process_delete_list(struct ctdb_db_context *ctdb_db,
709 struct vacuum_data *vdata)
711 int ret, i;
712 struct ctdb_context *ctdb = ctdb_db->ctdb;
713 struct delete_records_list *recs;
714 TDB_DATA indata;
715 struct ctdb_node_map_old *nodemap;
716 uint32_t *active_nodes;
717 int num_active_nodes;
718 TALLOC_CTX *tmp_ctx;
719 uint32_t sum;
721 if (vdata->count.delete_list.total == 0) {
722 return;
725 tmp_ctx = talloc_new(vdata);
726 if (tmp_ctx == NULL) {
727 DEBUG(DEBUG_ERR,(__location__ " Out of memory\n"));
728 return;
731 vdata->count.delete_list.left = vdata->count.delete_list.total;
734 * get the list of currently active nodes
737 ret = ctdb_ctrl_getnodemap(ctdb, TIMELIMIT(),
738 CTDB_CURRENT_NODE,
739 tmp_ctx,
740 &nodemap);
741 if (ret != 0) {
742 DEBUG(DEBUG_ERR,(__location__ " unable to get node map\n"));
743 goto done;
746 active_nodes = list_of_active_nodes(ctdb, nodemap,
747 nodemap, /* talloc context */
748 false /* include self */);
749 /* yuck! ;-) */
750 num_active_nodes = talloc_get_size(active_nodes)/sizeof(*active_nodes);
753 * Now delete the records all active nodes in a two-phase process:
754 * 1) tell all active remote nodes to delete all their copy
755 * 2) if all remote nodes deleted their record copy, delete it locally
758 recs = talloc_zero(tmp_ctx, struct delete_records_list);
759 if (recs == NULL) {
760 DEBUG(DEBUG_ERR,(__location__ " Out of memory\n"));
761 goto done;
765 * Step 1:
766 * Send all records to all active nodes for deletion.
770 * Create a marshall blob from the remaining list of records to delete.
773 recs->records = (struct ctdb_marshall_buffer *)
774 talloc_zero_size(recs,
775 offsetof(struct ctdb_marshall_buffer, data));
776 if (recs->records == NULL) {
777 DEBUG(DEBUG_ERR,(__location__ " Out of memory\n"));
778 goto done;
780 recs->records->db_id = ctdb_db->db_id;
782 ret = trbt_traversearray32(vdata->delete_list, 1,
783 delete_marshall_traverse, recs);
784 if (ret != 0) {
785 DEBUG(DEBUG_ERR, (__location__ " Error traversing the "
786 "delete list for second marshalling.\n"));
787 goto done;
790 indata = ctdb_marshall_finish(recs->records);
792 for (i = 0; i < num_active_nodes; i++) {
793 struct ctdb_marshall_buffer *records;
794 struct ctdb_rec_data_old *rec;
795 int32_t res;
796 TDB_DATA outdata;
798 ret = ctdb_control(ctdb, active_nodes[i], 0,
799 CTDB_CONTROL_TRY_DELETE_RECORDS, 0,
800 indata, recs, &outdata, &res,
801 NULL, NULL);
802 if (ret != 0 || res != 0) {
803 DEBUG(DEBUG_ERR, ("Failed to delete records on "
804 "node %u: ret[%d] res[%d]\n",
805 active_nodes[i], ret, res));
806 goto done;
810 * outdata contains the list of records coming back
811 * from the node: These are the records that the
812 * remote node could not delete. We remove these from
813 * the list to delete locally.
815 records = (struct ctdb_marshall_buffer *)outdata.dptr;
816 rec = (struct ctdb_rec_data_old *)&records->data[0];
817 while (records->count-- > 1) {
818 TDB_DATA reckey, recdata;
819 struct ctdb_ltdb_header *rechdr;
820 struct delete_record_data *dd;
822 reckey.dptr = &rec->data[0];
823 reckey.dsize = rec->keylen;
824 recdata.dptr = &rec->data[reckey.dsize];
825 recdata.dsize = rec->datalen;
827 if (recdata.dsize < sizeof(struct ctdb_ltdb_header)) {
828 DEBUG(DEBUG_CRIT,(__location__ " bad ltdb record\n"));
829 goto done;
831 rechdr = (struct ctdb_ltdb_header *)recdata.dptr;
832 recdata.dptr += sizeof(*rechdr);
833 recdata.dsize -= sizeof(*rechdr);
835 dd = (struct delete_record_data *)trbt_lookup32(
836 vdata->delete_list,
837 ctdb_hash(&reckey));
838 if (dd != NULL) {
840 * The remote node could not delete the
841 * record. Since other remote nodes can
842 * also fail, we just mark the record.
844 dd->remote_fail_count++;
845 } else {
846 DEBUG(DEBUG_ERR, (__location__ " Failed to "
847 "find record with hash 0x%08x coming "
848 "back from TRY_DELETE_RECORDS "
849 "control in delete list.\n",
850 ctdb_hash(&reckey)));
853 rec = (struct ctdb_rec_data_old *)(rec->length + (uint8_t *)rec);
857 if (vdata->count.delete_list.left == 0) {
858 goto success;
862 * Step 2:
863 * Delete the remaining records locally.
865 * These records have successfully been deleted on all
866 * active remote nodes.
869 ret = trbt_traversearray32(vdata->delete_list, 1,
870 delete_record_traverse, vdata);
871 if (ret != 0) {
872 DEBUG(DEBUG_ERR, (__location__ " Error traversing the "
873 "delete list for deletion.\n"));
876 success:
878 if (vdata->count.delete_list.left != 0) {
879 DEBUG(DEBUG_ERR, (__location__ " Vaccum db[%s] error: "
880 "there are %u records left for deletion after "
881 "processing delete list\n",
882 ctdb_db->db_name,
883 (unsigned)vdata->count.delete_list.left));
886 sum = vdata->count.delete_list.deleted
887 + vdata->count.delete_list.skipped
888 + vdata->count.delete_list.remote_error
889 + vdata->count.delete_list.local_error
890 + vdata->count.delete_list.left;
892 if (vdata->count.delete_list.total != sum) {
893 DEBUG(DEBUG_ERR, (__location__ " Inconsistency in vacuum "
894 "delete list counts for db[%s]: total[%u] != sum[%u]\n",
895 ctdb_db->db_name,
896 (unsigned)vdata->count.delete_list.total,
897 (unsigned)sum));
900 if (vdata->count.delete_list.total > 0) {
901 DEBUG(DEBUG_INFO,
902 (__location__
903 " vacuum delete list statistics: "
904 "db[%s] "
905 "total[%u] "
906 "del[%u] "
907 "skip[%u] "
908 "rem.err[%u] "
909 "loc.err[%u] "
910 "left[%u]\n",
911 ctdb_db->db_name,
912 (unsigned)vdata->count.delete_list.total,
913 (unsigned)vdata->count.delete_list.deleted,
914 (unsigned)vdata->count.delete_list.skipped,
915 (unsigned)vdata->count.delete_list.remote_error,
916 (unsigned)vdata->count.delete_list.local_error,
917 (unsigned)vdata->count.delete_list.left));
920 done:
921 talloc_free(tmp_ctx);
923 return;
927 * initialize the vacuum_data
929 static struct vacuum_data *ctdb_vacuum_init_vacuum_data(
930 struct ctdb_db_context *ctdb_db,
931 TALLOC_CTX *mem_ctx)
933 int i;
934 struct ctdb_context *ctdb = ctdb_db->ctdb;
935 struct vacuum_data *vdata;
937 vdata = talloc_zero(mem_ctx, struct vacuum_data);
938 if (vdata == NULL) {
939 DEBUG(DEBUG_ERR,(__location__ " Out of memory\n"));
940 return NULL;
943 vdata->ctdb = ctdb_db->ctdb;
944 vdata->ctdb_db = ctdb_db;
945 vdata->delete_list = trbt_create(vdata, 0);
946 if (vdata->delete_list == NULL) {
947 DEBUG(DEBUG_ERR,(__location__ " Out of memory\n"));
948 goto fail;
951 vdata->start = timeval_current();
953 vdata->count.delete_queue.added_to_delete_list = 0;
954 vdata->count.delete_queue.added_to_vacuum_fetch_list = 0;
955 vdata->count.delete_queue.deleted = 0;
956 vdata->count.delete_queue.skipped = 0;
957 vdata->count.delete_queue.error = 0;
958 vdata->count.delete_queue.total = 0;
959 vdata->count.db_traverse.scheduled = 0;
960 vdata->count.db_traverse.skipped = 0;
961 vdata->count.db_traverse.error = 0;
962 vdata->count.db_traverse.total = 0;
963 vdata->count.delete_list.total = 0;
964 vdata->count.delete_list.left = 0;
965 vdata->count.delete_list.remote_error = 0;
966 vdata->count.delete_list.local_error = 0;
967 vdata->count.delete_list.skipped = 0;
968 vdata->count.delete_list.deleted = 0;
970 /* the list needs to be of length num_nodes */
971 vdata->vacuum_fetch_list = talloc_zero_array(vdata,
972 struct ctdb_marshall_buffer *,
973 ctdb->num_nodes);
974 if (vdata->vacuum_fetch_list == NULL) {
975 DEBUG(DEBUG_ERR,(__location__ " Out of memory\n"));
976 goto fail;
978 for (i = 0; i < ctdb->num_nodes; i++) {
979 vdata->vacuum_fetch_list[i] = (struct ctdb_marshall_buffer *)
980 talloc_zero_size(vdata->vacuum_fetch_list,
981 offsetof(struct ctdb_marshall_buffer, data));
982 if (vdata->vacuum_fetch_list[i] == NULL) {
983 DEBUG(DEBUG_ERR,(__location__ " Out of memory\n"));
984 talloc_free(vdata);
985 return NULL;
987 vdata->vacuum_fetch_list[i]->db_id = ctdb_db->db_id;
990 return vdata;
992 fail:
993 talloc_free(vdata);
994 return NULL;
998 * Vacuum a DB:
999 * - Always do the fast vacuuming run, which traverses
1000 * the in-memory delete queue: these records have been
1001 * scheduled for deletion.
1002 * - Only if explicitly requested, the database is traversed
1003 * in order to use the traditional heuristics on empty records
1004 * to trigger deletion.
1005 * This is done only every VacuumFastPathCount'th vacuuming run.
1007 * The traverse runs fill two lists:
1009 * - The delete_list:
1010 * This is the list of empty records the current
1011 * node is lmaster and dmaster for. These records are later
1012 * deleted first on other nodes and then locally.
1014 * The fast vacuuming run has a short cut for those records
1015 * that have never been migrated with data: these records
1016 * are immediately deleted locally, since they have left
1017 * no trace on other nodes.
1019 * - The vacuum_fetch lists
1020 * (one for each other lmaster node):
1021 * The records in this list are sent for deletion to
1022 * their lmaster in a bulk VACUUM_FETCH message.
1024 * The lmaster then migrates all these records to itelf
1025 * so that they can be vacuumed there.
1027 * This executes in the child context.
1029 static int ctdb_vacuum_db(struct ctdb_db_context *ctdb_db,
1030 bool full_vacuum_run)
1032 struct ctdb_context *ctdb = ctdb_db->ctdb;
1033 int ret, pnn;
1034 struct vacuum_data *vdata;
1035 TALLOC_CTX *tmp_ctx;
1037 DEBUG(DEBUG_INFO, (__location__ " Entering %s vacuum run for db "
1038 "%s db_id[0x%08x]\n",
1039 full_vacuum_run ? "full" : "fast",
1040 ctdb_db->db_name, ctdb_db->db_id));
1042 ret = ctdb_ctrl_getvnnmap(ctdb, TIMELIMIT(), CTDB_CURRENT_NODE, ctdb, &ctdb->vnn_map);
1043 if (ret != 0) {
1044 DEBUG(DEBUG_ERR, ("Unable to get vnnmap from local node\n"));
1045 return ret;
1048 pnn = ctdb_ctrl_getpnn(ctdb, TIMELIMIT(), CTDB_CURRENT_NODE);
1049 if (pnn == -1) {
1050 DEBUG(DEBUG_ERR, ("Unable to get pnn from local node\n"));
1051 return -1;
1054 ctdb->pnn = pnn;
1056 tmp_ctx = talloc_new(ctdb_db);
1057 if (tmp_ctx == NULL) {
1058 DEBUG(DEBUG_ERR, ("Out of memory!\n"));
1059 return -1;
1062 vdata = ctdb_vacuum_init_vacuum_data(ctdb_db, tmp_ctx);
1063 if (vdata == NULL) {
1064 talloc_free(tmp_ctx);
1065 return -1;
1068 if (full_vacuum_run) {
1069 ctdb_vacuum_traverse_db(ctdb_db, vdata);
1072 ctdb_process_delete_queue(ctdb_db, vdata);
1074 ctdb_process_vacuum_fetch_lists(ctdb_db, vdata);
1076 ctdb_process_delete_list(ctdb_db, vdata);
1078 talloc_free(tmp_ctx);
1080 /* this ensures we run our event queue */
1081 ctdb_ctrl_getpnn(ctdb, TIMELIMIT(), CTDB_CURRENT_NODE);
1083 return 0;
1087 * repack and vaccum a db
1088 * called from the child context
1090 static int ctdb_vacuum_and_repack_db(struct ctdb_db_context *ctdb_db,
1091 bool full_vacuum_run)
1093 uint32_t repack_limit = ctdb_db->ctdb->tunable.repack_limit;
1094 const char *name = ctdb_db->db_name;
1095 int freelist_size = 0;
1096 int ret;
1098 if (ctdb_vacuum_db(ctdb_db, full_vacuum_run) != 0) {
1099 DEBUG(DEBUG_ERR,(__location__ " Failed to vacuum '%s'\n", name));
1102 freelist_size = tdb_freelist_size(ctdb_db->ltdb->tdb);
1103 if (freelist_size == -1) {
1104 DEBUG(DEBUG_ERR,(__location__ " Failed to get freelist size for '%s'\n", name));
1105 return -1;
1109 * decide if a repack is necessary
1111 if ((repack_limit == 0 || (uint32_t)freelist_size < repack_limit))
1113 return 0;
1116 DEBUG(DEBUG_INFO, ("Repacking %s with %u freelist entries\n",
1117 name, freelist_size));
1119 ret = tdb_repack(ctdb_db->ltdb->tdb);
1120 if (ret != 0) {
1121 DEBUG(DEBUG_ERR,(__location__ " Failed to repack '%s'\n", name));
1122 return -1;
1125 return 0;
1128 static uint32_t get_vacuum_interval(struct ctdb_db_context *ctdb_db)
1130 uint32_t interval = ctdb_db->ctdb->tunable.vacuum_interval;
1132 return interval;
1135 static int vacuum_child_destructor(struct ctdb_vacuum_child_context *child_ctx)
1137 double l = timeval_elapsed(&child_ctx->start_time);
1138 struct ctdb_db_context *ctdb_db = child_ctx->vacuum_handle->ctdb_db;
1139 struct ctdb_context *ctdb = ctdb_db->ctdb;
1141 CTDB_UPDATE_DB_LATENCY(ctdb_db, "vacuum", vacuum.latency, l);
1142 DEBUG(DEBUG_INFO,("Vacuuming took %.3f seconds for database %s\n", l, ctdb_db->db_name));
1144 if (child_ctx->child_pid != -1) {
1145 ctdb_kill(ctdb, child_ctx->child_pid, SIGKILL);
1146 } else {
1147 /* Bump the number of successful fast-path runs. */
1148 child_ctx->vacuum_handle->fast_path_count++;
1151 DLIST_REMOVE(ctdb->vacuumers, child_ctx);
1153 tevent_add_timer(ctdb->ev, child_ctx->vacuum_handle,
1154 timeval_current_ofs(get_vacuum_interval(ctdb_db), 0),
1155 ctdb_vacuum_event, child_ctx->vacuum_handle);
1157 return 0;
1161 * this event is generated when a vacuum child process times out
1163 static void vacuum_child_timeout(struct tevent_context *ev,
1164 struct tevent_timer *te,
1165 struct timeval t, void *private_data)
1167 struct ctdb_vacuum_child_context *child_ctx = talloc_get_type(private_data, struct ctdb_vacuum_child_context);
1169 DEBUG(DEBUG_ERR,("Vacuuming child process timed out for db %s\n", child_ctx->vacuum_handle->ctdb_db->db_name));
1171 child_ctx->status = VACUUM_TIMEOUT;
1173 talloc_free(child_ctx);
1178 * this event is generated when a vacuum child process has completed
1180 static void vacuum_child_handler(struct tevent_context *ev,
1181 struct tevent_fd *fde,
1182 uint16_t flags, void *private_data)
1184 struct ctdb_vacuum_child_context *child_ctx = talloc_get_type(private_data, struct ctdb_vacuum_child_context);
1185 char c = 0;
1186 int ret;
1188 DEBUG(DEBUG_INFO,("Vacuuming child process %d finished for db %s\n", child_ctx->child_pid, child_ctx->vacuum_handle->ctdb_db->db_name));
1189 child_ctx->child_pid = -1;
1191 ret = sys_read(child_ctx->fd[0], &c, 1);
1192 if (ret != 1 || c != 0) {
1193 child_ctx->status = VACUUM_ERROR;
1194 DEBUG(DEBUG_ERR, ("A vacuum child process failed with an error for database %s. ret=%d c=%d\n", child_ctx->vacuum_handle->ctdb_db->db_name, ret, c));
1195 } else {
1196 child_ctx->status = VACUUM_OK;
1199 talloc_free(child_ctx);
1203 * this event is called every time we need to start a new vacuum process
1205 static void ctdb_vacuum_event(struct tevent_context *ev,
1206 struct tevent_timer *te,
1207 struct timeval t, void *private_data)
1209 struct ctdb_vacuum_handle *vacuum_handle = talloc_get_type(private_data, struct ctdb_vacuum_handle);
1210 struct ctdb_db_context *ctdb_db = vacuum_handle->ctdb_db;
1211 struct ctdb_context *ctdb = ctdb_db->ctdb;
1212 struct ctdb_vacuum_child_context *child_ctx;
1213 struct tevent_fd *fde;
1214 int ret;
1216 /* we don't vacuum if we are in recovery mode, or db frozen */
1217 if (ctdb->recovery_mode == CTDB_RECOVERY_ACTIVE ||
1218 ctdb_db_frozen(ctdb_db)) {
1219 DEBUG(DEBUG_INFO, ("Not vacuuming %s (%s)\n", ctdb_db->db_name,
1220 ctdb->recovery_mode == CTDB_RECOVERY_ACTIVE ?
1221 "in recovery" : "frozen"));
1222 tevent_add_timer(ctdb->ev, vacuum_handle,
1223 timeval_current_ofs(get_vacuum_interval(ctdb_db), 0),
1224 ctdb_vacuum_event, vacuum_handle);
1225 return;
1228 /* Do not allow multiple vacuuming child processes to be active at the
1229 * same time. If there is vacuuming child process active, delay
1230 * new vacuuming event to stagger vacuuming events.
1232 if (ctdb->vacuumers != NULL) {
1233 tevent_add_timer(ctdb->ev, vacuum_handle,
1234 timeval_current_ofs(0, 500*1000),
1235 ctdb_vacuum_event, vacuum_handle);
1236 return;
1239 child_ctx = talloc(vacuum_handle, struct ctdb_vacuum_child_context);
1240 if (child_ctx == NULL) {
1241 DEBUG(DEBUG_CRIT, (__location__ " Failed to allocate child context for vacuuming of %s\n", ctdb_db->db_name));
1242 ctdb_fatal(ctdb, "Out of memory when crating vacuum child context. Shutting down\n");
1246 ret = pipe(child_ctx->fd);
1247 if (ret != 0) {
1248 talloc_free(child_ctx);
1249 DEBUG(DEBUG_ERR, ("Failed to create pipe for vacuum child process.\n"));
1250 tevent_add_timer(ctdb->ev, vacuum_handle,
1251 timeval_current_ofs(get_vacuum_interval(ctdb_db), 0),
1252 ctdb_vacuum_event, vacuum_handle);
1253 return;
1256 if (vacuum_handle->fast_path_count > ctdb->tunable.vacuum_fast_path_count) {
1257 vacuum_handle->fast_path_count = 0;
1260 child_ctx->child_pid = ctdb_fork(ctdb);
1261 if (child_ctx->child_pid == (pid_t)-1) {
1262 close(child_ctx->fd[0]);
1263 close(child_ctx->fd[1]);
1264 talloc_free(child_ctx);
1265 DEBUG(DEBUG_ERR, ("Failed to fork vacuum child process.\n"));
1266 tevent_add_timer(ctdb->ev, vacuum_handle,
1267 timeval_current_ofs(get_vacuum_interval(ctdb_db), 0),
1268 ctdb_vacuum_event, vacuum_handle);
1269 return;
1273 if (child_ctx->child_pid == 0) {
1274 char cc = 0;
1275 bool full_vacuum_run = false;
1276 close(child_ctx->fd[0]);
1278 DEBUG(DEBUG_INFO,("Vacuuming child process %d for db %s started\n", getpid(), ctdb_db->db_name));
1279 prctl_set_comment("ctdb_vacuum");
1280 if (switch_from_server_to_client(ctdb) != 0) {
1281 DEBUG(DEBUG_CRIT, (__location__ "ERROR: failed to switch vacuum daemon into client mode. Shutting down.\n"));
1282 _exit(1);
1285 if ((ctdb->tunable.vacuum_fast_path_count > 0) &&
1286 (vacuum_handle->fast_path_count == 0))
1288 full_vacuum_run = true;
1290 cc = ctdb_vacuum_and_repack_db(ctdb_db, full_vacuum_run);
1292 sys_write(child_ctx->fd[1], &cc, 1);
1293 _exit(0);
1296 set_close_on_exec(child_ctx->fd[0]);
1297 close(child_ctx->fd[1]);
1299 child_ctx->status = VACUUM_RUNNING;
1300 child_ctx->start_time = timeval_current();
1302 DLIST_ADD(ctdb->vacuumers, child_ctx);
1303 talloc_set_destructor(child_ctx, vacuum_child_destructor);
1306 * Clear the fastpath vacuuming list in the parent.
1308 talloc_free(ctdb_db->delete_queue);
1309 ctdb_db->delete_queue = trbt_create(ctdb_db, 0);
1310 if (ctdb_db->delete_queue == NULL) {
1311 /* fatal here? ... */
1312 ctdb_fatal(ctdb, "Out of memory when re-creating vacuum tree "
1313 "in parent context. Shutting down\n");
1316 tevent_add_timer(ctdb->ev, child_ctx,
1317 timeval_current_ofs(ctdb->tunable.vacuum_max_run_time, 0),
1318 vacuum_child_timeout, child_ctx);
1320 DEBUG(DEBUG_DEBUG, (__location__ " Created PIPE FD:%d to child vacuum process\n", child_ctx->fd[0]));
1322 fde = tevent_add_fd(ctdb->ev, child_ctx, child_ctx->fd[0],
1323 TEVENT_FD_READ, vacuum_child_handler, child_ctx);
1324 tevent_fd_set_auto_close(fde);
1326 vacuum_handle->child_ctx = child_ctx;
1327 child_ctx->vacuum_handle = vacuum_handle;
1330 void ctdb_stop_vacuuming(struct ctdb_context *ctdb)
1332 /* Simply free them all. */
1333 while (ctdb->vacuumers) {
1334 DEBUG(DEBUG_INFO, ("Aborting vacuuming for %s (%i)\n",
1335 ctdb->vacuumers->vacuum_handle->ctdb_db->db_name,
1336 (int)ctdb->vacuumers->child_pid));
1337 /* vacuum_child_destructor kills it, removes from list */
1338 talloc_free(ctdb->vacuumers);
1342 /* this function initializes the vacuuming context for a database
1343 * starts the vacuuming events
1345 int ctdb_vacuum_init(struct ctdb_db_context *ctdb_db)
1347 if (! ctdb_db_volatile(ctdb_db)) {
1348 DEBUG(DEBUG_ERR,
1349 ("Vacuuming is disabled for non-volatile database %s\n",
1350 ctdb_db->db_name));
1351 return 0;
1354 ctdb_db->vacuum_handle = talloc(ctdb_db, struct ctdb_vacuum_handle);
1355 CTDB_NO_MEMORY(ctdb_db->ctdb, ctdb_db->vacuum_handle);
1357 ctdb_db->vacuum_handle->ctdb_db = ctdb_db;
1358 ctdb_db->vacuum_handle->fast_path_count = 0;
1360 tevent_add_timer(ctdb_db->ctdb->ev, ctdb_db->vacuum_handle,
1361 timeval_current_ofs(get_vacuum_interval(ctdb_db), 0),
1362 ctdb_vacuum_event, ctdb_db->vacuum_handle);
1364 return 0;
1367 static void remove_record_from_delete_queue(struct ctdb_db_context *ctdb_db,
1368 const struct ctdb_ltdb_header *hdr,
1369 const TDB_DATA key)
1371 struct delete_record_data *kd;
1372 uint32_t hash;
1374 hash = (uint32_t)ctdb_hash(&key);
1376 DEBUG(DEBUG_DEBUG, (__location__
1377 " remove_record_from_delete_queue: "
1378 "db[%s] "
1379 "db_id[0x%08x] "
1380 "key_hash[0x%08x] "
1381 "lmaster[%u] "
1382 "migrated_with_data[%s]\n",
1383 ctdb_db->db_name, ctdb_db->db_id,
1384 hash,
1385 ctdb_lmaster(ctdb_db->ctdb, &key),
1386 hdr->flags & CTDB_REC_FLAG_MIGRATED_WITH_DATA ? "yes" : "no"));
1388 kd = (struct delete_record_data *)trbt_lookup32(ctdb_db->delete_queue, hash);
1389 if (kd == NULL) {
1390 DEBUG(DEBUG_DEBUG, (__location__
1391 " remove_record_from_delete_queue: "
1392 "record not in queue (hash[0x%08x])\n.",
1393 hash));
1394 return;
1397 if ((kd->key.dsize != key.dsize) ||
1398 (memcmp(kd->key.dptr, key.dptr, key.dsize) != 0))
1400 DEBUG(DEBUG_DEBUG, (__location__
1401 " remove_record_from_delete_queue: "
1402 "hash collision for key with hash[0x%08x] "
1403 "in db[%s] - skipping\n",
1404 hash, ctdb_db->db_name));
1405 return;
1408 DEBUG(DEBUG_DEBUG, (__location__
1409 " remove_record_from_delete_queue: "
1410 "removing key with hash[0x%08x]\n",
1411 hash));
1413 talloc_free(kd);
1415 return;
1419 * Insert a record into the ctdb_db context's delete queue,
1420 * handling hash collisions.
1422 static int insert_record_into_delete_queue(struct ctdb_db_context *ctdb_db,
1423 const struct ctdb_ltdb_header *hdr,
1424 TDB_DATA key)
1426 struct delete_record_data *kd;
1427 uint32_t hash;
1428 int ret;
1430 hash = (uint32_t)ctdb_hash(&key);
1432 DEBUG(DEBUG_DEBUG, (__location__ " schedule for deletion: db[%s] "
1433 "db_id[0x%08x] "
1434 "key_hash[0x%08x] "
1435 "lmaster[%u] "
1436 "migrated_with_data[%s]\n",
1437 ctdb_db->db_name, ctdb_db->db_id,
1438 hash,
1439 ctdb_lmaster(ctdb_db->ctdb, &key),
1440 hdr->flags & CTDB_REC_FLAG_MIGRATED_WITH_DATA ? "yes" : "no"));
1442 kd = (struct delete_record_data *)trbt_lookup32(ctdb_db->delete_queue, hash);
1443 if (kd != NULL) {
1444 if ((kd->key.dsize != key.dsize) ||
1445 (memcmp(kd->key.dptr, key.dptr, key.dsize) != 0))
1447 DEBUG(DEBUG_INFO,
1448 (__location__ " schedule for deletion: "
1449 "hash collision for key hash [0x%08x]. "
1450 "Skipping the record.\n", hash));
1451 return 0;
1452 } else {
1453 DEBUG(DEBUG_DEBUG,
1454 (__location__ " schedule for deletion: "
1455 "updating entry for key with hash [0x%08x].\n",
1456 hash));
1460 ret = insert_delete_record_data_into_tree(ctdb_db->ctdb, ctdb_db,
1461 ctdb_db->delete_queue,
1462 hdr, key);
1463 if (ret != 0) {
1464 DEBUG(DEBUG_INFO,
1465 (__location__ " schedule for deletion: error "
1466 "inserting key with hash [0x%08x] into delete queue\n",
1467 hash));
1468 return -1;
1471 return 0;
1475 * Schedule a record for deletetion.
1476 * Called from the parent context.
1478 int32_t ctdb_control_schedule_for_deletion(struct ctdb_context *ctdb,
1479 TDB_DATA indata)
1481 struct ctdb_control_schedule_for_deletion *dd;
1482 struct ctdb_db_context *ctdb_db;
1483 int ret;
1484 TDB_DATA key;
1486 dd = (struct ctdb_control_schedule_for_deletion *)indata.dptr;
1488 ctdb_db = find_ctdb_db(ctdb, dd->db_id);
1489 if (ctdb_db == NULL) {
1490 DEBUG(DEBUG_ERR, (__location__ " Unknown db id 0x%08x\n",
1491 dd->db_id));
1492 return -1;
1495 key.dsize = dd->keylen;
1496 key.dptr = dd->key;
1498 ret = insert_record_into_delete_queue(ctdb_db, &dd->hdr, key);
1500 return ret;
1503 int32_t ctdb_local_schedule_for_deletion(struct ctdb_db_context *ctdb_db,
1504 const struct ctdb_ltdb_header *hdr,
1505 TDB_DATA key)
1507 int ret;
1508 struct ctdb_control_schedule_for_deletion *dd;
1509 TDB_DATA indata;
1510 int32_t status;
1512 if (ctdb_db->ctdb->ctdbd_pid == getpid()) {
1513 /* main daemon - directly queue */
1514 ret = insert_record_into_delete_queue(ctdb_db, hdr, key);
1516 return ret;
1519 /* if we don't have a connection to the daemon we can not send
1520 a control. For example sometimes from update_record control child
1521 process.
1523 if (!ctdb_db->ctdb->can_send_controls) {
1524 return -1;
1528 /* child process: send the main daemon a control */
1529 indata.dsize = offsetof(struct ctdb_control_schedule_for_deletion, key) + key.dsize;
1530 indata.dptr = talloc_zero_array(ctdb_db, uint8_t, indata.dsize);
1531 if (indata.dptr == NULL) {
1532 DEBUG(DEBUG_ERR, (__location__ " out of memory\n"));
1533 return -1;
1535 dd = (struct ctdb_control_schedule_for_deletion *)(void *)indata.dptr;
1536 dd->db_id = ctdb_db->db_id;
1537 dd->hdr = *hdr;
1538 dd->keylen = key.dsize;
1539 memcpy(dd->key, key.dptr, key.dsize);
1541 ret = ctdb_control(ctdb_db->ctdb,
1542 CTDB_CURRENT_NODE,
1543 ctdb_db->db_id,
1544 CTDB_CONTROL_SCHEDULE_FOR_DELETION,
1545 CTDB_CTRL_FLAG_NOREPLY, /* flags */
1546 indata,
1547 NULL, /* mem_ctx */
1548 NULL, /* outdata */
1549 &status,
1550 NULL, /* timeout : NULL == wait forever */
1551 NULL); /* error message */
1553 talloc_free(indata.dptr);
1555 if (ret != 0 || status != 0) {
1556 DEBUG(DEBUG_ERR, (__location__ " Error sending "
1557 "SCHEDULE_FOR_DELETION "
1558 "control.\n"));
1559 if (status != 0) {
1560 ret = -1;
1564 return ret;
1567 void ctdb_local_remove_from_delete_queue(struct ctdb_db_context *ctdb_db,
1568 const struct ctdb_ltdb_header *hdr,
1569 const TDB_DATA key)
1571 if (ctdb_db->ctdb->ctdbd_pid != getpid()) {
1573 * Only remove the record from the delete queue if called
1574 * in the main daemon.
1576 return;
1579 remove_record_from_delete_queue(ctdb_db, hdr, key);
1581 return;