ctdb-vacuum: catch and log error of traverse in ctdb_process_delete_queue()
[Samba/wip.git] / ctdb / server / ctdb_vacuum.c
bloba491703288f5889f29e0f1ce0d489fe80dc377e7
1 /*
2 ctdb vacuuming events
4 Copyright (C) Ronnie Sahlberg 2009
5 Copyright (C) Michael Adam 2010-2013
6 Copyright (C) Stefan Metzmacher 2010-2011
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program; if not, see <http://www.gnu.org/licenses/>.
22 #include "includes.h"
23 #include "tdb.h"
24 #include "system/network.h"
25 #include "system/filesys.h"
26 #include "system/dir.h"
27 #include "../include/ctdb_private.h"
28 #include "db_wrap.h"
29 #include "lib/util/dlinklist.h"
30 #include "../include/ctdb_private.h"
31 #include "../common/rb_tree.h"
33 #define TIMELIMIT() timeval_current_ofs(10, 0)
35 enum vacuum_child_status { VACUUM_RUNNING, VACUUM_OK, VACUUM_ERROR, VACUUM_TIMEOUT};
37 struct ctdb_vacuum_child_context {
38 struct ctdb_vacuum_child_context *next, *prev;
39 struct ctdb_vacuum_handle *vacuum_handle;
40 /* fd child writes status to */
41 int fd[2];
42 pid_t child_pid;
43 enum vacuum_child_status status;
44 struct timeval start_time;
47 struct ctdb_vacuum_handle {
48 struct ctdb_db_context *ctdb_db;
49 struct ctdb_vacuum_child_context *child_ctx;
50 uint32_t fast_path_count;
54 /* a list of records to possibly delete */
55 struct vacuum_data {
56 uint32_t repack_limit;
57 struct ctdb_context *ctdb;
58 struct ctdb_db_context *ctdb_db;
59 struct tdb_context *dest_db;
60 trbt_tree_t *delete_list;
61 uint32_t delete_count;
62 struct ctdb_marshall_buffer **vacuum_fetch_list;
63 struct timeval start;
64 bool traverse_error;
65 bool vacuum;
66 uint32_t total;
67 uint32_t vacuumed;
68 uint32_t copied;
69 uint32_t fast_added_to_vacuum_fetch_list;
70 uint32_t fast_added_to_delete_list;
71 uint32_t fast_deleted;
72 uint32_t fast_skipped;
73 uint32_t fast_error;
74 uint32_t fast_total;
75 uint32_t full_scheduled;
76 uint32_t full_skipped;
77 uint32_t full_error;
78 uint32_t full_total;
79 uint32_t delete_left;
80 uint32_t delete_remote_error;
81 uint32_t delete_local_error;
82 uint32_t delete_deleted;
83 uint32_t delete_skipped;
86 /* this structure contains the information for one record to be deleted */
87 struct delete_record_data {
88 struct ctdb_context *ctdb;
89 struct ctdb_db_context *ctdb_db;
90 struct ctdb_ltdb_header hdr;
91 TDB_DATA key;
92 uint8_t keydata[1];
95 struct delete_records_list {
96 struct ctdb_marshall_buffer *records;
97 struct vacuum_data *vdata;
100 static int insert_record_into_delete_queue(struct ctdb_db_context *ctdb_db,
101 const struct ctdb_ltdb_header *hdr,
102 TDB_DATA key);
105 * Store key and header in a tree, indexed by the key hash.
107 static int insert_delete_record_data_into_tree(struct ctdb_context *ctdb,
108 struct ctdb_db_context *ctdb_db,
109 trbt_tree_t *tree,
110 const struct ctdb_ltdb_header *hdr,
111 TDB_DATA key)
113 struct delete_record_data *dd;
114 uint32_t hash;
115 size_t len;
117 len = offsetof(struct delete_record_data, keydata) + key.dsize;
119 dd = (struct delete_record_data *)talloc_size(tree, len);
120 if (dd == NULL) {
121 DEBUG(DEBUG_ERR,(__location__ " Out of memory\n"));
122 return -1;
124 talloc_set_name_const(dd, "struct delete_record_data");
126 dd->ctdb = ctdb;
127 dd->ctdb_db = ctdb_db;
128 dd->key.dsize = key.dsize;
129 dd->key.dptr = dd->keydata;
130 memcpy(dd->keydata, key.dptr, key.dsize);
132 dd->hdr = *hdr;
134 hash = ctdb_hash(&key);
136 trbt_insert32(tree, hash, dd);
138 return 0;
141 static int add_record_to_delete_list(struct vacuum_data *vdata, TDB_DATA key,
142 struct ctdb_ltdb_header *hdr)
144 struct ctdb_context *ctdb = vdata->ctdb;
145 struct ctdb_db_context *ctdb_db = vdata->ctdb_db;
146 uint32_t hash;
147 int ret;
149 hash = ctdb_hash(&key);
151 if (trbt_lookup32(vdata->delete_list, hash)) {
152 DEBUG(DEBUG_INFO, (__location__ " Hash collision when vacuuming, skipping this record.\n"));
153 return 0;
156 ret = insert_delete_record_data_into_tree(ctdb, ctdb_db,
157 vdata->delete_list,
158 hdr, key);
159 if (ret != 0) {
160 return -1;
163 vdata->delete_count++;
165 return 0;
169 * Add a record to the list of records to be sent
170 * to their lmaster with VACUUM_FETCH.
172 static int add_record_to_vacuum_fetch_list(struct vacuum_data *vdata,
173 TDB_DATA key)
175 struct ctdb_context *ctdb = vdata->ctdb;
176 struct ctdb_rec_data *rec;
177 uint32_t lmaster;
178 size_t old_size;
179 struct ctdb_marshall_buffer *vfl;
181 lmaster = ctdb_lmaster(ctdb, &key);
183 vfl = vdata->vacuum_fetch_list[lmaster];
185 rec = ctdb_marshall_record(vfl, ctdb->pnn, key, NULL, tdb_null);
186 if (rec == NULL) {
187 DEBUG(DEBUG_ERR,(__location__ " Out of memory\n"));
188 vdata->traverse_error = true;
189 return -1;
192 old_size = talloc_get_size(vfl);
193 vfl = talloc_realloc_size(NULL, vfl, old_size + rec->length);
194 if (vfl == NULL) {
195 DEBUG(DEBUG_ERR,(__location__ " Failed to expand\n"));
196 vdata->traverse_error = true;
197 return -1;
199 vdata->vacuum_fetch_list[lmaster] = vfl;
201 vfl->count++;
202 memcpy(old_size+(uint8_t *)vfl, rec, rec->length);
203 talloc_free(rec);
205 vdata->total++;
207 return 0;
211 static void ctdb_vacuum_event(struct event_context *ev, struct timed_event *te,
212 struct timeval t, void *private_data);
214 static int vacuum_record_parser(TDB_DATA key, TDB_DATA data, void *private_data)
216 struct ctdb_ltdb_header *header =
217 (struct ctdb_ltdb_header *)private_data;
219 if (data.dsize != sizeof(struct ctdb_ltdb_header)) {
220 return -1;
223 *header = *(struct ctdb_ltdb_header *)data.dptr;
225 return 0;
229 * traverse function for gathering the records that can be deleted
231 static int vacuum_traverse(struct tdb_context *tdb, TDB_DATA key, TDB_DATA data,
232 void *private_data)
234 struct vacuum_data *vdata = talloc_get_type(private_data,
235 struct vacuum_data);
236 struct ctdb_context *ctdb = vdata->ctdb;
237 struct ctdb_db_context *ctdb_db = vdata->ctdb_db;
238 uint32_t lmaster;
239 struct ctdb_ltdb_header *hdr;
240 int res = 0;
242 vdata->full_total++;
244 lmaster = ctdb_lmaster(ctdb, &key);
245 if (lmaster >= ctdb->num_nodes) {
246 vdata->full_error++;
247 DEBUG(DEBUG_CRIT, (__location__
248 " lmaster[%u] >= ctdb->num_nodes[%u] for key"
249 " with hash[%u]!\n",
250 (unsigned)lmaster,
251 (unsigned)ctdb->num_nodes,
252 (unsigned)ctdb_hash(&key)));
253 return -1;
256 if (data.dsize != sizeof(struct ctdb_ltdb_header)) {
257 /* it is not a deleted record */
258 vdata->full_skipped++;
259 return 0;
262 hdr = (struct ctdb_ltdb_header *)data.dptr;
264 if (hdr->dmaster != ctdb->pnn) {
265 vdata->full_skipped++;
266 return 0;
270 * Add the record to this process's delete_queue for processing
271 * in the subsequent traverse in the fast vacuum run.
273 res = insert_record_into_delete_queue(ctdb_db, hdr, key);
274 if (res != 0) {
275 vdata->full_error++;
276 } else {
277 vdata->full_scheduled++;
280 return 0;
284 * traverse the tree of records to delete and marshall them into
285 * a blob
287 static int delete_marshall_traverse(void *param, void *data)
289 struct delete_record_data *dd = talloc_get_type(data, struct delete_record_data);
290 struct delete_records_list *recs = talloc_get_type(param, struct delete_records_list);
291 struct ctdb_rec_data *rec;
292 size_t old_size;
294 rec = ctdb_marshall_record(dd, recs->records->db_id, dd->key, &dd->hdr, tdb_null);
295 if (rec == NULL) {
296 DEBUG(DEBUG_ERR, (__location__ " failed to marshall record\n"));
297 return 0;
300 old_size = talloc_get_size(recs->records);
301 recs->records = talloc_realloc_size(NULL, recs->records, old_size + rec->length);
302 if (recs->records == NULL) {
303 DEBUG(DEBUG_ERR,(__location__ " Failed to expand\n"));
304 return 0;
306 recs->records->count++;
307 memcpy(old_size+(uint8_t *)(recs->records), rec, rec->length);
308 return 0;
312 * Variant of delete_marshall_traverse() that bumps the
313 * RSN of each traversed record in the database.
315 * This is needed to ensure that when rolling out our
316 * empty record copy before remote deletion, we as the
317 * record's dmaster keep a higher RSN than the non-dmaster
318 * nodes. This is needed to prevent old copies from
319 * resurrection in recoveries.
321 static int delete_marshall_traverse_first(void *param, void *data)
323 struct delete_record_data *dd = talloc_get_type(data, struct delete_record_data);
324 struct delete_records_list *recs = talloc_get_type(param, struct delete_records_list);
325 struct ctdb_db_context *ctdb_db = dd->ctdb_db;
326 struct ctdb_context *ctdb = ctdb_db->ctdb;
327 struct ctdb_ltdb_header header;
328 uint32_t lmaster;
329 uint32_t hash = ctdb_hash(&(dd->key));
330 int res;
332 res = tdb_chainlock(ctdb_db->ltdb->tdb, dd->key);
333 if (res != 0) {
334 DEBUG(DEBUG_ERR,
335 (__location__ " Error getting chainlock on record with "
336 "key hash [0x%08x] on database db[%s].\n",
337 hash, ctdb_db->db_name));
338 recs->vdata->delete_skipped++;
339 talloc_free(dd);
340 return 0;
344 * Verify that the record is still empty, its RSN has not
345 * changed and that we are still its lmaster and dmaster.
348 res = tdb_parse_record(ctdb_db->ltdb->tdb, dd->key,
349 vacuum_record_parser, &header);
350 if (res != 0) {
351 goto skip;
354 if (header.flags & CTDB_REC_RO_FLAGS) {
355 DEBUG(DEBUG_INFO, (__location__ ": record with hash [0x%08x] "
356 "on database db[%s] has read-only flags. "
357 "skipping.\n",
358 hash, ctdb_db->db_name));
359 goto skip;
362 if (header.dmaster != ctdb->pnn) {
363 DEBUG(DEBUG_INFO, (__location__ ": record with hash [0x%08x] "
364 "on database db[%s] has been migrated away. "
365 "skipping.\n",
366 hash, ctdb_db->db_name));
367 goto skip;
370 if (header.rsn != dd->hdr.rsn) {
371 DEBUG(DEBUG_INFO, (__location__ ": record with hash [0x%08x] "
372 "on database db[%s] seems to have been "
373 "migrated away and back again (with empty "
374 "data). skipping.\n",
375 hash, ctdb_db->db_name));
376 goto skip;
379 lmaster = ctdb_lmaster(ctdb_db->ctdb, &dd->key);
381 if (lmaster != ctdb->pnn) {
382 DEBUG(DEBUG_INFO, (__location__ ": not lmaster for record in "
383 "delete list (key hash [0x%08x], db[%s]). "
384 "Strange! skipping.\n",
385 hash, ctdb_db->db_name));
386 goto skip;
390 * Increment the record's RSN to ensure the dmaster (i.e. the current
391 * node) has the highest RSN of the record in the cluster.
392 * This is to prevent old record copies from resurrecting in recoveries
393 * if something should fail during the deletion process.
394 * Note that ctdb_ltdb_store_server() increments the RSN if called
395 * on the record's dmaster.
398 res = ctdb_ltdb_store(ctdb_db, dd->key, &header, tdb_null);
399 if (res != 0) {
400 DEBUG(DEBUG_ERR, (__location__ ": Failed to store record with "
401 "key hash [0x%08x] on database db[%s].\n",
402 hash, ctdb_db->db_name));
403 goto skip;
406 tdb_chainunlock(ctdb_db->ltdb->tdb, dd->key);
408 goto done;
410 skip:
411 tdb_chainunlock(ctdb_db->ltdb->tdb, dd->key);
413 recs->vdata->delete_skipped++;
414 talloc_free(dd);
415 dd = NULL;
417 done:
418 if (dd == NULL) {
419 return 0;
422 return delete_marshall_traverse(param, data);
426 * traverse function for the traversal of the delete_queue,
427 * the fast-path vacuuming list.
429 * - If the record has been migrated off the node
430 * or has been revived (filled with data) on the node,
431 * then skip the record.
433 * - If the current node is the record's lmaster and it is
434 * a record that has never been migrated with data, then
435 * delete the record from the local tdb.
437 * - If the current node is the record's lmaster and it has
438 * been migrated with data, then schedule it for the normal
439 * vacuuming procedure (i.e. add it to the delete_list).
441 * - If the current node is NOT the record's lmaster then
442 * add it to the list of records that are to be sent to
443 * the lmaster with the VACUUM_FETCH message.
445 static int delete_queue_traverse(void *param, void *data)
447 struct delete_record_data *dd =
448 talloc_get_type(data, struct delete_record_data);
449 struct vacuum_data *vdata = talloc_get_type(param, struct vacuum_data);
450 struct ctdb_db_context *ctdb_db = dd->ctdb_db;
451 struct ctdb_context *ctdb = ctdb_db->ctdb; /* or dd->ctdb ??? */
452 int res;
453 struct ctdb_ltdb_header header;
454 uint32_t lmaster;
455 uint32_t hash = ctdb_hash(&(dd->key));
457 vdata->fast_total++;
459 res = tdb_chainlock(ctdb_db->ltdb->tdb, dd->key);
460 if (res != 0) {
461 DEBUG(DEBUG_ERR,
462 (__location__ " Error getting chainlock on record with "
463 "key hash [0x%08x] on database db[%s].\n",
464 hash, ctdb_db->db_name));
465 vdata->fast_error++;
466 return 0;
469 res = tdb_parse_record(ctdb_db->ltdb->tdb, dd->key,
470 vacuum_record_parser, &header);
471 if (res != 0) {
472 goto skipped;
475 if (header.dmaster != ctdb->pnn) {
476 /* The record has been migrated off the node. Skip. */
477 goto skipped;
480 if (header.rsn != dd->hdr.rsn) {
482 * The record has been migrated off the node and back again.
483 * But not requeued for deletion. Skip it.
485 goto skipped;
489 * We are dmaster, and the record has no data, and it has
490 * not been migrated after it has been queued for deletion.
492 * At this stage, the record could still have been revived locally
493 * and last been written with empty data. This can only be
494 * fixed with the addition of an active or delete flag. (TODO)
497 lmaster = ctdb_lmaster(ctdb_db->ctdb, &dd->key);
499 if (lmaster != ctdb->pnn) {
500 res = add_record_to_vacuum_fetch_list(vdata, dd->key);
502 if (res != 0) {
503 DEBUG(DEBUG_ERR,
504 (__location__ " Error adding record to list "
505 "of records to send to lmaster.\n"));
506 vdata->fast_error++;
507 } else {
508 vdata->fast_added_to_vacuum_fetch_list++;
510 goto done;
513 /* use header->flags or dd->hdr.flags ?? */
514 if (dd->hdr.flags & CTDB_REC_FLAG_MIGRATED_WITH_DATA) {
515 res = add_record_to_delete_list(vdata, dd->key, &dd->hdr);
517 if (res != 0) {
518 DEBUG(DEBUG_ERR,
519 (__location__ " Error adding record to list "
520 "of records for deletion on lmaster.\n"));
521 vdata->fast_error++;
522 } else {
523 vdata->fast_added_to_delete_list++;
525 } else {
526 res = tdb_delete(ctdb_db->ltdb->tdb, dd->key);
528 if (res != 0) {
529 DEBUG(DEBUG_ERR,
530 (__location__ " Error deleting record with key "
531 "hash [0x%08x] from local data base db[%s].\n",
532 hash, ctdb_db->db_name));
533 vdata->fast_error++;
534 goto done;
537 DEBUG(DEBUG_DEBUG,
538 (__location__ " Deleted record with key hash "
539 "[0x%08x] from local data base db[%s].\n",
540 hash, ctdb_db->db_name));
541 vdata->fast_deleted++;
544 goto done;
546 skipped:
547 vdata->fast_skipped++;
549 done:
550 tdb_chainunlock(ctdb_db->ltdb->tdb, dd->key);
552 return 0;
556 * Delete the records that we are lmaster and dmaster for and
557 * that could be deleted on all other nodes via the TRY_DELETE_RECORDS
558 * control.
560 static int delete_record_traverse(void *param, void *data)
562 struct delete_record_data *dd =
563 talloc_get_type(data, struct delete_record_data);
564 struct vacuum_data *vdata = talloc_get_type(param, struct vacuum_data);
565 struct ctdb_db_context *ctdb_db = dd->ctdb_db;
566 struct ctdb_context *ctdb = ctdb_db->ctdb;
567 int res;
568 struct ctdb_ltdb_header header;
569 uint32_t lmaster;
570 uint32_t hash = ctdb_hash(&(dd->key));
572 res = tdb_chainlock(ctdb_db->ltdb->tdb, dd->key);
573 if (res != 0) {
574 DEBUG(DEBUG_ERR,
575 (__location__ " Error getting chainlock on record with "
576 "key hash [0x%08x] on database db[%s].\n",
577 hash, ctdb_db->db_name));
578 vdata->delete_local_error++;
579 vdata->delete_left--;
580 talloc_free(dd);
581 return 0;
585 * Verify that the record is still empty, its RSN has not
586 * changed and that we are still its lmaster and dmaster.
589 res = tdb_parse_record(ctdb_db->ltdb->tdb, dd->key,
590 vacuum_record_parser, &header);
591 if (res != 0) {
592 goto skip;
595 if (header.flags & CTDB_REC_RO_FLAGS) {
596 DEBUG(DEBUG_INFO, (__location__ ": record with hash [0x%08x] "
597 "on database db[%s] has read-only flags. "
598 "skipping.\n",
599 hash, ctdb_db->db_name));
600 goto skip;
603 if (header.dmaster != ctdb->pnn) {
604 DEBUG(DEBUG_INFO, (__location__ ": record with hash [0x%08x] "
605 "on database db[%s] has been migrated away. "
606 "skipping.\n",
607 hash, ctdb_db->db_name));
608 goto skip;
611 if (header.rsn != dd->hdr.rsn + 1) {
613 * The record has been migrated off the node and back again.
614 * But not requeued for deletion. Skip it.
615 * (Note that the first marshall traverse has bumped the RSN
616 * on disk.)
618 DEBUG(DEBUG_INFO, (__location__ ": record with hash [0x%08x] "
619 "on database db[%s] seems to have been "
620 "migrated away and back again (with empty "
621 "data). skipping.\n",
622 hash, ctdb_db->db_name));
623 goto skip;
626 lmaster = ctdb_lmaster(ctdb_db->ctdb, &dd->key);
628 if (lmaster != ctdb->pnn) {
629 DEBUG(DEBUG_INFO, (__location__ ": not lmaster for record in "
630 "delete list (key hash [0x%08x], db[%s]). "
631 "Strange! skipping.\n",
632 hash, ctdb_db->db_name));
633 goto skip;
636 res = tdb_delete(ctdb_db->ltdb->tdb, dd->key);
638 if (res != 0) {
639 DEBUG(DEBUG_ERR,
640 (__location__ " Error deleting record with key hash "
641 "[0x%08x] from local data base db[%s].\n",
642 hash, ctdb_db->db_name));
643 vdata->delete_local_error++;
644 goto done;
647 DEBUG(DEBUG_DEBUG,
648 (__location__ " Deleted record with key hash [0x%08x] from "
649 "local data base db[%s].\n", hash, ctdb_db->db_name));
651 vdata->delete_deleted++;
652 goto done;
654 skip:
655 vdata->delete_skipped++;
657 done:
658 tdb_chainunlock(ctdb_db->ltdb->tdb, dd->key);
660 talloc_free(dd);
661 vdata->delete_left--;
663 return 0;
667 * Traverse the delete_queue.
668 * Records are either deleted directly or filled
669 * into the delete list or the vacuum fetch lists
670 * for further processing.
672 static void ctdb_process_delete_queue(struct ctdb_db_context *ctdb_db,
673 struct vacuum_data *vdata)
675 uint32_t sum;
676 int ret;
678 ret = trbt_traversearray32(ctdb_db->delete_queue, 1,
679 delete_queue_traverse, vdata);
681 if (ret != 0) {
682 DEBUG(DEBUG_ERR, (__location__ " Error traversing "
683 "the delete queue.\n"));
686 sum = vdata->fast_deleted
687 + vdata->fast_skipped
688 + vdata->fast_error
689 + vdata->fast_added_to_delete_list
690 + vdata->fast_added_to_vacuum_fetch_list;
692 if (vdata->fast_total != sum) {
693 DEBUG(DEBUG_ERR, (__location__ " Inconsistency in fast vacuum "
694 "counts for db[%s]: total[%u] != sum[%u]\n",
695 ctdb_db->db_name, (unsigned)vdata->fast_total,
696 (unsigned)sum));
699 if (vdata->fast_total > 0) {
700 DEBUG(DEBUG_INFO,
701 (__location__
702 " fast vacuuming delete_queue traverse statistics: "
703 "db[%s] "
704 "total[%u] "
705 "del[%u] "
706 "skp[%u] "
707 "err[%u] "
708 "adl[%u] "
709 "avf[%u]\n",
710 ctdb_db->db_name,
711 (unsigned)vdata->fast_total,
712 (unsigned)vdata->fast_deleted,
713 (unsigned)vdata->fast_skipped,
714 (unsigned)vdata->fast_error,
715 (unsigned)vdata->fast_added_to_delete_list,
716 (unsigned)vdata->fast_added_to_vacuum_fetch_list));
719 return;
723 * read-only traverse of the database, looking for records that
724 * might be able to be vacuumed.
726 * This is not done each time but only every tunable
727 * VacuumFastPathCount times.
729 static int ctdb_vacuum_traverse_db(struct ctdb_db_context *ctdb_db,
730 struct vacuum_data *vdata)
732 int ret;
734 ret = tdb_traverse_read(ctdb_db->ltdb->tdb, vacuum_traverse, vdata);
735 if (ret == -1 || vdata->traverse_error) {
736 DEBUG(DEBUG_ERR, (__location__ " Traverse error in vacuuming "
737 "'%s'\n", ctdb_db->db_name));
738 return -1;
741 if (vdata->full_total > 0) {
742 DEBUG(DEBUG_INFO,
743 (__location__
744 " full vacuuming db traverse statistics: "
745 "db[%s] "
746 "total[%u] "
747 "skp[%u] "
748 "err[%u] "
749 "sched[%u]\n",
750 ctdb_db->db_name,
751 (unsigned)vdata->full_total,
752 (unsigned)vdata->full_skipped,
753 (unsigned)vdata->full_error,
754 (unsigned)vdata->full_scheduled));
757 return 0;
761 * Process the vacuum fetch lists:
762 * For records for which we are not the lmaster, tell the lmaster to
763 * fetch the record.
765 static int ctdb_process_vacuum_fetch_lists(struct ctdb_db_context *ctdb_db,
766 struct vacuum_data *vdata)
768 int i;
769 struct ctdb_context *ctdb = ctdb_db->ctdb;
771 for (i = 0; i < ctdb->num_nodes; i++) {
772 TDB_DATA data;
773 struct ctdb_marshall_buffer *vfl = vdata->vacuum_fetch_list[i];
775 if (ctdb->nodes[i]->pnn == ctdb->pnn) {
776 continue;
779 if (vfl->count == 0) {
780 continue;
783 DEBUG(DEBUG_INFO, ("Found %u records for lmaster %u in '%s'\n",
784 vfl->count, ctdb->nodes[i]->pnn,
785 ctdb_db->db_name));
787 data.dsize = talloc_get_size(vfl);
788 data.dptr = (void *)vfl;
789 if (ctdb_client_send_message(ctdb, ctdb->nodes[i]->pnn,
790 CTDB_SRVID_VACUUM_FETCH,
791 data) != 0)
793 DEBUG(DEBUG_ERR, (__location__ " Failed to send vacuum "
794 "fetch message to %u\n",
795 ctdb->nodes[i]->pnn));
796 return -1;
800 return 0;
804 * Process the delete list:
806 * This is the last step of vacuuming that consistently deletes
807 * those records that have been migrated with data and can hence
808 * not be deleted when leaving a node.
810 * In this step, the lmaster does the final deletion of those empty
811 * records that it is also dmaster for. It has ususally received
812 * at least some of these records previously from the former dmasters
813 * with the vacuum fetch message.
815 * This last step is implemented as a 3-phase process to protect from
816 * races leading to data corruption:
818 * 1) Send the lmaster's copy to all other active nodes with the
819 * RECEIVE_RECORDS control: The remote nodes store the lmaster's copy.
820 * 2) Send the records that could successfully be stored remotely
821 * in step #1 to all active nodes with the TRY_DELETE_RECORDS
822 * control. The remote notes delete their local copy.
823 * 3) The lmaster locally deletes its copies of all records that
824 * could successfully be deleted remotely in step #2.
826 static int ctdb_process_delete_list(struct ctdb_db_context *ctdb_db,
827 struct vacuum_data *vdata)
829 int ret, i;
830 struct ctdb_context *ctdb = ctdb_db->ctdb;
831 struct delete_records_list *recs;
832 TDB_DATA indata;
833 struct ctdb_node_map *nodemap;
834 uint32_t *active_nodes;
835 int num_active_nodes;
836 TALLOC_CTX *tmp_ctx;
837 uint32_t sum;
839 if (vdata->delete_count == 0) {
840 return 0;
843 tmp_ctx = talloc_new(vdata);
844 if (tmp_ctx == NULL) {
845 DEBUG(DEBUG_ERR,(__location__ " Out of memory\n"));
846 return 0;
849 vdata->delete_left = vdata->delete_count;
852 * get the list of currently active nodes
855 ret = ctdb_ctrl_getnodemap(ctdb, TIMELIMIT(),
856 CTDB_CURRENT_NODE,
857 tmp_ctx,
858 &nodemap);
859 if (ret != 0) {
860 DEBUG(DEBUG_ERR,(__location__ " unable to get node map\n"));
861 ret = -1;
862 goto done;
865 active_nodes = list_of_active_nodes(ctdb, nodemap,
866 nodemap, /* talloc context */
867 false /* include self */);
868 /* yuck! ;-) */
869 num_active_nodes = talloc_get_size(active_nodes)/sizeof(*active_nodes);
872 * Now delete the records all active nodes in a three-phase process:
873 * 1) send all active remote nodes the current empty copy with this
874 * node as DMASTER
875 * 2) if all nodes could store the new copy,
876 * tell all the active remote nodes to delete all their copy
877 * 3) if all remote nodes deleted their record copy, delete it locally
881 * Step 1:
882 * Send currently empty record copy to all active nodes for storing.
885 recs = talloc_zero(tmp_ctx, struct delete_records_list);
886 if (recs == NULL) {
887 DEBUG(DEBUG_ERR,(__location__ " Out of memory\n"));
888 ret = -1;
889 goto done;
891 recs->records = (struct ctdb_marshall_buffer *)
892 talloc_zero_size(recs,
893 offsetof(struct ctdb_marshall_buffer, data));
894 if (recs->records == NULL) {
895 DEBUG(DEBUG_ERR,(__location__ " Out of memory\n"));
896 ret = -1;
897 goto done;
899 recs->records->db_id = ctdb_db->db_id;
900 recs->vdata = vdata;
903 * traverse the tree of all records we want to delete and
904 * create a blob we can send to the other nodes.
906 * We call delete_marshall_traverse_first() to bump the
907 * records' RSNs in the database, to ensure we (as dmaster)
908 * keep the highest RSN of the records in the cluster.
910 trbt_traversearray32(vdata->delete_list, 1,
911 delete_marshall_traverse_first, recs);
913 indata.dsize = talloc_get_size(recs->records);
914 indata.dptr = (void *)recs->records;
916 for (i = 0; i < num_active_nodes; i++) {
917 struct ctdb_marshall_buffer *records;
918 struct ctdb_rec_data *rec;
919 int32_t res;
920 TDB_DATA outdata;
922 ret = ctdb_control(ctdb, active_nodes[i], 0,
923 CTDB_CONTROL_RECEIVE_RECORDS, 0,
924 indata, recs, &outdata, &res,
925 NULL, NULL);
926 if (ret != 0 || res != 0) {
927 DEBUG(DEBUG_ERR, ("Error storing record copies on "
928 "node %u: ret[%d] res[%d]\n",
929 active_nodes[i], ret, res));
930 ret = -1;
931 goto done;
935 * outdata contains the list of records coming back
936 * from the node: These are the records that the
937 * remote node could not store. We remove these from
938 * the list to process further.
940 records = (struct ctdb_marshall_buffer *)outdata.dptr;
941 rec = (struct ctdb_rec_data *)&records->data[0];
942 while (records->count-- > 1) {
943 TDB_DATA reckey, recdata;
944 struct ctdb_ltdb_header *rechdr;
945 struct delete_record_data *dd;
947 reckey.dptr = &rec->data[0];
948 reckey.dsize = rec->keylen;
949 recdata.dptr = &rec->data[reckey.dsize];
950 recdata.dsize = rec->datalen;
952 if (recdata.dsize < sizeof(struct ctdb_ltdb_header)) {
953 DEBUG(DEBUG_CRIT,(__location__ " bad ltdb record\n"));
954 ret = -1;
955 goto done;
957 rechdr = (struct ctdb_ltdb_header *)recdata.dptr;
958 recdata.dptr += sizeof(*rechdr);
959 recdata.dsize -= sizeof(*rechdr);
961 dd = (struct delete_record_data *)trbt_lookup32(
962 vdata->delete_list,
963 ctdb_hash(&reckey));
964 if (dd != NULL) {
966 * The other node could not store the record
967 * copy and it is the first node that failed.
968 * So we should remove it from the tree and
969 * update statistics.
971 talloc_free(dd);
972 vdata->delete_remote_error++;
973 vdata->delete_left--;
976 rec = (struct ctdb_rec_data *)(rec->length + (uint8_t *)rec);
980 if (vdata->delete_left == 0) {
981 goto success;
985 * Step 2:
986 * Send the remaining records to all active nodes for deletion.
988 * The lmaster's (i.e. our) copies of these records have been stored
989 * successfully on the other nodes.
993 * Create a marshall blob from the remaining list of records to delete.
996 talloc_free(recs->records);
998 recs->records = (struct ctdb_marshall_buffer *)
999 talloc_zero_size(recs,
1000 offsetof(struct ctdb_marshall_buffer, data));
1001 if (recs->records == NULL) {
1002 DEBUG(DEBUG_ERR,(__location__ " Out of memory\n"));
1003 ret = -1;
1004 goto done;
1006 recs->records->db_id = ctdb_db->db_id;
1008 trbt_traversearray32(vdata->delete_list, 1,
1009 delete_marshall_traverse, recs);
1011 indata.dsize = talloc_get_size(recs->records);
1012 indata.dptr = (void *)recs->records;
1014 for (i = 0; i < num_active_nodes; i++) {
1015 struct ctdb_marshall_buffer *records;
1016 struct ctdb_rec_data *rec;
1017 int32_t res;
1018 TDB_DATA outdata;
1020 ret = ctdb_control(ctdb, active_nodes[i], 0,
1021 CTDB_CONTROL_TRY_DELETE_RECORDS, 0,
1022 indata, recs, &outdata, &res,
1023 NULL, NULL);
1024 if (ret != 0 || res != 0) {
1025 DEBUG(DEBUG_ERR, ("Failed to delete records on "
1026 "node %u: ret[%d] res[%d]\n",
1027 active_nodes[i], ret, res));
1028 ret = -1;
1029 goto done;
1033 * outdata contains the list of records coming back
1034 * from the node: These are the records that the
1035 * remote node could not delete. We remove these from
1036 * the list to delete locally.
1038 records = (struct ctdb_marshall_buffer *)outdata.dptr;
1039 rec = (struct ctdb_rec_data *)&records->data[0];
1040 while (records->count-- > 1) {
1041 TDB_DATA reckey, recdata;
1042 struct ctdb_ltdb_header *rechdr;
1043 struct delete_record_data *dd;
1045 reckey.dptr = &rec->data[0];
1046 reckey.dsize = rec->keylen;
1047 recdata.dptr = &rec->data[reckey.dsize];
1048 recdata.dsize = rec->datalen;
1050 if (recdata.dsize < sizeof(struct ctdb_ltdb_header)) {
1051 DEBUG(DEBUG_CRIT,(__location__ " bad ltdb record\n"));
1052 ret = -1;
1053 goto done;
1055 rechdr = (struct ctdb_ltdb_header *)recdata.dptr;
1056 recdata.dptr += sizeof(*rechdr);
1057 recdata.dsize -= sizeof(*rechdr);
1059 dd = (struct delete_record_data *)trbt_lookup32(
1060 vdata->delete_list,
1061 ctdb_hash(&reckey));
1062 if (dd != NULL) {
1064 * The other node could not delete the
1065 * record and it is the first node that
1066 * failed. So we should remove it from
1067 * the tree and update statistics.
1069 talloc_free(dd);
1070 vdata->delete_remote_error++;
1071 vdata->delete_left--;
1074 rec = (struct ctdb_rec_data *)(rec->length + (uint8_t *)rec);
1078 if (vdata->delete_left == 0) {
1079 goto success;
1083 * Step 3:
1084 * Delete the remaining records locally.
1086 * These records have successfully been deleted on all
1087 * active remote nodes.
1090 trbt_traversearray32(vdata->delete_list, 1,
1091 delete_record_traverse, vdata);
1093 success:
1095 if (vdata->delete_left != 0) {
1096 DEBUG(DEBUG_ERR, (__location__ " Vaccum db[%s] error: "
1097 "there are %u records left for deletion after "
1098 "processing delete list\n",
1099 ctdb_db->db_name,
1100 (unsigned)vdata->delete_left));
1103 sum = vdata->delete_deleted
1104 + vdata->delete_skipped
1105 + vdata->delete_remote_error
1106 + vdata->delete_local_error
1107 + vdata->delete_left;
1109 if (vdata->delete_count != sum) {
1110 DEBUG(DEBUG_ERR, (__location__ " Inconsistency in vacuum "
1111 "delete list counts for db[%s]: total[%u] != sum[%u]\n",
1112 ctdb_db->db_name, (unsigned)vdata->delete_count,
1113 (unsigned)sum));
1116 if (vdata->delete_count > 0) {
1117 DEBUG(DEBUG_INFO,
1118 (__location__
1119 " vacuum delete list statistics: "
1120 "db[%s] "
1121 "total[%u] "
1122 "del[%u] "
1123 "skip[%u] "
1124 "rem.err[%u] "
1125 "loc.err[%u] "
1126 "left[%u]\n",
1127 ctdb_db->db_name,
1128 (unsigned)vdata->delete_count,
1129 (unsigned)vdata->delete_deleted,
1130 (unsigned)vdata->delete_skipped,
1131 (unsigned)vdata->delete_remote_error,
1132 (unsigned)vdata->delete_local_error,
1133 (unsigned)vdata->delete_left));
1136 ret = 0;
1138 done:
1139 talloc_free(tmp_ctx);
1141 return ret;
1145 * initialize the vacuum_data
1147 static int ctdb_vacuum_init_vacuum_data(struct ctdb_db_context *ctdb_db,
1148 struct vacuum_data *vdata)
1150 int i;
1151 struct ctdb_context *ctdb = ctdb_db->ctdb;
1153 vdata->fast_added_to_delete_list = 0;
1154 vdata->fast_added_to_vacuum_fetch_list = 0;
1155 vdata->fast_deleted = 0;
1156 vdata->fast_skipped = 0;
1157 vdata->fast_error = 0;
1158 vdata->fast_total = 0;
1159 vdata->full_scheduled = 0;
1160 vdata->full_skipped = 0;
1161 vdata->full_error = 0;
1162 vdata->full_total = 0;
1163 vdata->delete_count = 0;
1164 vdata->delete_left = 0;
1165 vdata->delete_remote_error = 0;
1166 vdata->delete_local_error = 0;
1167 vdata->delete_skipped = 0;
1168 vdata->delete_deleted = 0;
1170 /* the list needs to be of length num_nodes */
1171 vdata->vacuum_fetch_list = talloc_zero_array(vdata,
1172 struct ctdb_marshall_buffer *,
1173 ctdb->num_nodes);
1174 if (vdata->vacuum_fetch_list == NULL) {
1175 DEBUG(DEBUG_ERR,(__location__ " Out of memory\n"));
1176 return -1;
1178 for (i = 0; i < ctdb->num_nodes; i++) {
1179 vdata->vacuum_fetch_list[i] = (struct ctdb_marshall_buffer *)
1180 talloc_zero_size(vdata->vacuum_fetch_list,
1181 offsetof(struct ctdb_marshall_buffer, data));
1182 if (vdata->vacuum_fetch_list[i] == NULL) {
1183 DEBUG(DEBUG_ERR,(__location__ " Out of memory\n"));
1184 return -1;
1186 vdata->vacuum_fetch_list[i]->db_id = ctdb_db->db_id;
1189 return 0;
1193 * Vacuum a DB:
1194 * - Always do the fast vacuuming run, which traverses
1195 * the in-memory delete queue: these records have been
1196 * scheduled for deletion.
1197 * - Only if explicitly requested, the database is traversed
1198 * in order to use the traditional heuristics on empty records
1199 * to trigger deletion.
1200 * This is done only every VacuumFastPathCount'th vacuuming run.
1202 * The traverse runs fill two lists:
1204 * - The delete_list:
1205 * This is the list of empty records the current
1206 * node is lmaster and dmaster for. These records are later
1207 * deleted first on other nodes and then locally.
1209 * The fast vacuuming run has a short cut for those records
1210 * that have never been migrated with data: these records
1211 * are immediately deleted locally, since they have left
1212 * no trace on other nodes.
1214 * - The vacuum_fetch lists
1215 * (one for each other lmaster node):
1216 * The records in this list are sent for deletion to
1217 * their lmaster in a bulk VACUUM_FETCH message.
1219 * The lmaster then migrates all these records to itelf
1220 * so that they can be vacuumed there.
1222 * This executes in the child context.
1224 static int ctdb_vacuum_db(struct ctdb_db_context *ctdb_db,
1225 struct vacuum_data *vdata,
1226 bool full_vacuum_run)
1228 struct ctdb_context *ctdb = ctdb_db->ctdb;
1229 int ret, pnn;
1231 DEBUG(DEBUG_INFO, (__location__ " Entering %s vacuum run for db "
1232 "%s db_id[0x%08x]\n",
1233 full_vacuum_run ? "full" : "fast",
1234 ctdb_db->db_name, ctdb_db->db_id));
1236 ret = ctdb_ctrl_getvnnmap(ctdb, TIMELIMIT(), CTDB_CURRENT_NODE, ctdb, &ctdb->vnn_map);
1237 if (ret != 0) {
1238 DEBUG(DEBUG_ERR, ("Unable to get vnnmap from local node\n"));
1239 return ret;
1242 pnn = ctdb_ctrl_getpnn(ctdb, TIMELIMIT(), CTDB_CURRENT_NODE);
1243 if (pnn == -1) {
1244 DEBUG(DEBUG_ERR, ("Unable to get pnn from local node\n"));
1245 return -1;
1248 ctdb->pnn = pnn;
1250 ret = ctdb_vacuum_init_vacuum_data(ctdb_db, vdata);
1251 if (ret != 0) {
1252 return ret;
1255 if (full_vacuum_run) {
1256 ret = ctdb_vacuum_traverse_db(ctdb_db, vdata);
1257 if (ret != 0) {
1258 return ret;
1262 ctdb_process_delete_queue(ctdb_db, vdata);
1264 ret = ctdb_process_vacuum_fetch_lists(ctdb_db, vdata);
1265 if (ret != 0) {
1266 return ret;
1269 ret = ctdb_process_delete_list(ctdb_db, vdata);
1270 if (ret != 0) {
1271 return ret;
1274 /* this ensures we run our event queue */
1275 ctdb_ctrl_getpnn(ctdb, TIMELIMIT(), CTDB_CURRENT_NODE);
1277 return 0;
1282 * traverse function for repacking
1284 static int repack_traverse(struct tdb_context *tdb, TDB_DATA key, TDB_DATA data,
1285 void *private_data)
1287 struct vacuum_data *vdata = (struct vacuum_data *)private_data;
1289 if (vdata->vacuum) {
1290 uint32_t hash = ctdb_hash(&key);
1291 struct delete_record_data *kd;
1293 * check if we can ignore this record because it's in the delete_list
1295 kd = (struct delete_record_data *)trbt_lookup32(vdata->delete_list, hash);
1297 * there might be hash collisions so we have to compare the keys here to be sure
1299 if (kd && kd->key.dsize == key.dsize && memcmp(kd->key.dptr, key.dptr, key.dsize) == 0) {
1300 struct ctdb_ltdb_header *hdr = (struct ctdb_ltdb_header *)data.dptr;
1302 * we have to check if the record hasn't changed in the meantime in order to
1303 * savely remove it from the database
1305 if (data.dsize == sizeof(struct ctdb_ltdb_header) &&
1306 hdr->dmaster == kd->ctdb->pnn &&
1307 ctdb_lmaster(kd->ctdb, &(kd->key)) == kd->ctdb->pnn &&
1308 kd->hdr.rsn == hdr->rsn) {
1309 vdata->vacuumed++;
1310 return 0;
1314 if (tdb_store(vdata->dest_db, key, data, TDB_INSERT) != 0) {
1315 vdata->traverse_error = true;
1316 return -1;
1318 vdata->copied++;
1319 return 0;
1323 * repack a tdb
1325 static int ctdb_repack_tdb(struct tdb_context *tdb, TALLOC_CTX *mem_ctx, struct vacuum_data *vdata)
1327 struct tdb_context *tmp_db;
1329 if (tdb_transaction_start(tdb) != 0) {
1330 DEBUG(DEBUG_ERR,(__location__ " Failed to start transaction\n"));
1331 return -1;
1334 tmp_db = tdb_open("tmpdb", tdb_hash_size(tdb),
1335 TDB_INTERNAL|TDB_DISALLOW_NESTING,
1336 O_RDWR|O_CREAT, 0);
1337 if (tmp_db == NULL) {
1338 DEBUG(DEBUG_ERR,(__location__ " Failed to create tmp_db\n"));
1339 tdb_transaction_cancel(tdb);
1340 return -1;
1343 vdata->traverse_error = false;
1344 vdata->dest_db = tmp_db;
1345 vdata->vacuum = true;
1346 vdata->vacuumed = 0;
1347 vdata->copied = 0;
1350 * repack and vacuum on-the-fly by not writing the records that are
1351 * no longer needed
1353 if (tdb_traverse_read(tdb, repack_traverse, vdata) == -1) {
1354 DEBUG(DEBUG_ERR,(__location__ " Failed to traverse copying out\n"));
1355 tdb_transaction_cancel(tdb);
1356 tdb_close(tmp_db);
1357 return -1;
1360 DEBUG(DEBUG_INFO,(__location__ " %u records vacuumed\n", vdata->vacuumed));
1362 if (vdata->traverse_error) {
1363 DEBUG(DEBUG_ERR,(__location__ " Error during traversal\n"));
1364 tdb_transaction_cancel(tdb);
1365 tdb_close(tmp_db);
1366 return -1;
1369 if (tdb_wipe_all(tdb) != 0) {
1370 DEBUG(DEBUG_ERR,(__location__ " Failed to wipe database\n"));
1371 tdb_transaction_cancel(tdb);
1372 tdb_close(tmp_db);
1373 return -1;
1376 vdata->traverse_error = false;
1377 vdata->dest_db = tdb;
1378 vdata->vacuum = false;
1379 vdata->copied = 0;
1381 if (tdb_traverse_read(tmp_db, repack_traverse, vdata) == -1) {
1382 DEBUG(DEBUG_ERR,(__location__ " Failed to traverse copying back\n"));
1383 tdb_transaction_cancel(tdb);
1384 tdb_close(tmp_db);
1385 return -1;
1388 if (vdata->traverse_error) {
1389 DEBUG(DEBUG_ERR,(__location__ " Error during second traversal\n"));
1390 tdb_transaction_cancel(tdb);
1391 tdb_close(tmp_db);
1392 return -1;
1395 tdb_close(tmp_db);
1398 if (tdb_transaction_commit(tdb) != 0) {
1399 DEBUG(DEBUG_ERR,(__location__ " Failed to commit\n"));
1400 return -1;
1402 DEBUG(DEBUG_INFO,(__location__ " %u records copied\n", vdata->copied));
1404 return 0;
1408 * repack and vaccum a db
1409 * called from the child context
1411 static int ctdb_vacuum_and_repack_db(struct ctdb_db_context *ctdb_db,
1412 TALLOC_CTX *mem_ctx,
1413 bool full_vacuum_run)
1415 uint32_t repack_limit = ctdb_db->ctdb->tunable.repack_limit;
1416 const char *name = ctdb_db->db_name;
1417 int freelist_size = 0;
1418 struct vacuum_data *vdata;
1420 vdata = talloc_zero(mem_ctx, struct vacuum_data);
1421 if (vdata == NULL) {
1422 DEBUG(DEBUG_ERR,(__location__ " Out of memory\n"));
1423 return -1;
1426 vdata->ctdb = ctdb_db->ctdb;
1427 vdata->repack_limit = repack_limit;
1428 vdata->delete_list = trbt_create(vdata, 0);
1429 vdata->ctdb_db = ctdb_db;
1430 if (vdata->delete_list == NULL) {
1431 DEBUG(DEBUG_ERR,(__location__ " Out of memory\n"));
1432 talloc_free(vdata);
1433 return -1;
1436 vdata->start = timeval_current();
1439 * gather all records that can be deleted in vdata
1441 if (ctdb_vacuum_db(ctdb_db, vdata, full_vacuum_run) != 0) {
1442 DEBUG(DEBUG_ERR,(__location__ " Failed to vacuum '%s'\n", name));
1445 if (repack_limit != 0) {
1446 freelist_size = tdb_freelist_size(ctdb_db->ltdb->tdb);
1447 if (freelist_size == -1) {
1448 DEBUG(DEBUG_ERR,(__location__ " Failed to get freelist size for '%s'\n", name));
1449 talloc_free(vdata);
1450 return -1;
1455 * decide if a repack is necessary
1457 if ((repack_limit == 0 || (uint32_t)freelist_size < repack_limit))
1459 talloc_free(vdata);
1460 return 0;
1463 DEBUG(DEBUG_INFO,("Repacking %s with %u freelist entries and %u records to delete\n",
1464 name, freelist_size, vdata->delete_left));
1467 * repack and implicitely get rid of the records we can delete
1469 if (ctdb_repack_tdb(ctdb_db->ltdb->tdb, mem_ctx, vdata) != 0) {
1470 DEBUG(DEBUG_ERR,(__location__ " Failed to repack '%s'\n", name));
1471 talloc_free(vdata);
1472 return -1;
1474 talloc_free(vdata);
1476 return 0;
1479 static uint32_t get_vacuum_interval(struct ctdb_db_context *ctdb_db)
1481 uint32_t interval = ctdb_db->ctdb->tunable.vacuum_interval;
1483 return interval;
1486 static int vacuum_child_destructor(struct ctdb_vacuum_child_context *child_ctx)
1488 double l = timeval_elapsed(&child_ctx->start_time);
1489 struct ctdb_db_context *ctdb_db = child_ctx->vacuum_handle->ctdb_db;
1490 struct ctdb_context *ctdb = ctdb_db->ctdb;
1492 DEBUG(DEBUG_INFO,("Vacuuming took %.3f seconds for database %s\n", l, ctdb_db->db_name));
1494 if (child_ctx->child_pid != -1) {
1495 ctdb_kill(ctdb, child_ctx->child_pid, SIGKILL);
1496 } else {
1497 /* Bump the number of successful fast-path runs. */
1498 child_ctx->vacuum_handle->fast_path_count++;
1501 DLIST_REMOVE(ctdb->vacuumers, child_ctx);
1503 event_add_timed(ctdb->ev, child_ctx->vacuum_handle,
1504 timeval_current_ofs(get_vacuum_interval(ctdb_db), 0),
1505 ctdb_vacuum_event, child_ctx->vacuum_handle);
1507 return 0;
1511 * this event is generated when a vacuum child process times out
1513 static void vacuum_child_timeout(struct event_context *ev, struct timed_event *te,
1514 struct timeval t, void *private_data)
1516 struct ctdb_vacuum_child_context *child_ctx = talloc_get_type(private_data, struct ctdb_vacuum_child_context);
1518 DEBUG(DEBUG_ERR,("Vacuuming child process timed out for db %s\n", child_ctx->vacuum_handle->ctdb_db->db_name));
1520 child_ctx->status = VACUUM_TIMEOUT;
1522 talloc_free(child_ctx);
1527 * this event is generated when a vacuum child process has completed
1529 static void vacuum_child_handler(struct event_context *ev, struct fd_event *fde,
1530 uint16_t flags, void *private_data)
1532 struct ctdb_vacuum_child_context *child_ctx = talloc_get_type(private_data, struct ctdb_vacuum_child_context);
1533 char c = 0;
1534 int ret;
1536 DEBUG(DEBUG_INFO,("Vacuuming child process %d finished for db %s\n", child_ctx->child_pid, child_ctx->vacuum_handle->ctdb_db->db_name));
1537 child_ctx->child_pid = -1;
1539 ret = read(child_ctx->fd[0], &c, 1);
1540 if (ret != 1 || c != 0) {
1541 child_ctx->status = VACUUM_ERROR;
1542 DEBUG(DEBUG_ERR, ("A vacuum child process failed with an error for database %s. ret=%d c=%d\n", child_ctx->vacuum_handle->ctdb_db->db_name, ret, c));
1543 } else {
1544 child_ctx->status = VACUUM_OK;
1547 talloc_free(child_ctx);
1551 * this event is called every time we need to start a new vacuum process
1553 static void
1554 ctdb_vacuum_event(struct event_context *ev, struct timed_event *te,
1555 struct timeval t, void *private_data)
1557 struct ctdb_vacuum_handle *vacuum_handle = talloc_get_type(private_data, struct ctdb_vacuum_handle);
1558 struct ctdb_db_context *ctdb_db = vacuum_handle->ctdb_db;
1559 struct ctdb_context *ctdb = ctdb_db->ctdb;
1560 struct ctdb_vacuum_child_context *child_ctx;
1561 struct tevent_fd *fde;
1562 int ret;
1564 /* we dont vacuum if we are in recovery mode, or db frozen */
1565 if (ctdb->recovery_mode == CTDB_RECOVERY_ACTIVE ||
1566 ctdb->freeze_mode[ctdb_db->priority] != CTDB_FREEZE_NONE) {
1567 DEBUG(DEBUG_INFO, ("Not vacuuming %s (%s)\n", ctdb_db->db_name,
1568 ctdb->recovery_mode == CTDB_RECOVERY_ACTIVE ? "in recovery"
1569 : ctdb->freeze_mode[ctdb_db->priority] == CTDB_FREEZE_PENDING
1570 ? "freeze pending"
1571 : "frozen"));
1572 event_add_timed(ctdb->ev, vacuum_handle,
1573 timeval_current_ofs(get_vacuum_interval(ctdb_db), 0),
1574 ctdb_vacuum_event, vacuum_handle);
1575 return;
1578 child_ctx = talloc(vacuum_handle, struct ctdb_vacuum_child_context);
1579 if (child_ctx == NULL) {
1580 DEBUG(DEBUG_CRIT, (__location__ " Failed to allocate child context for vacuuming of %s\n", ctdb_db->db_name));
1581 ctdb_fatal(ctdb, "Out of memory when crating vacuum child context. Shutting down\n");
1585 ret = pipe(child_ctx->fd);
1586 if (ret != 0) {
1587 talloc_free(child_ctx);
1588 DEBUG(DEBUG_ERR, ("Failed to create pipe for vacuum child process.\n"));
1589 event_add_timed(ctdb->ev, vacuum_handle,
1590 timeval_current_ofs(get_vacuum_interval(ctdb_db), 0),
1591 ctdb_vacuum_event, vacuum_handle);
1592 return;
1595 if (vacuum_handle->fast_path_count > ctdb->tunable.vacuum_fast_path_count) {
1596 vacuum_handle->fast_path_count = 0;
1599 child_ctx->child_pid = ctdb_fork(ctdb);
1600 if (child_ctx->child_pid == (pid_t)-1) {
1601 close(child_ctx->fd[0]);
1602 close(child_ctx->fd[1]);
1603 talloc_free(child_ctx);
1604 DEBUG(DEBUG_ERR, ("Failed to fork vacuum child process.\n"));
1605 event_add_timed(ctdb->ev, vacuum_handle,
1606 timeval_current_ofs(get_vacuum_interval(ctdb_db), 0),
1607 ctdb_vacuum_event, vacuum_handle);
1608 return;
1612 if (child_ctx->child_pid == 0) {
1613 char cc = 0;
1614 bool full_vacuum_run = false;
1615 close(child_ctx->fd[0]);
1617 DEBUG(DEBUG_INFO,("Vacuuming child process %d for db %s started\n", getpid(), ctdb_db->db_name));
1618 ctdb_set_process_name("ctdb_vacuum");
1619 if (switch_from_server_to_client(ctdb, "vacuum-%s", ctdb_db->db_name) != 0) {
1620 DEBUG(DEBUG_CRIT, (__location__ "ERROR: failed to switch vacuum daemon into client mode. Shutting down.\n"));
1621 _exit(1);
1625 * repack the db
1627 if ((ctdb->tunable.vacuum_fast_path_count > 0) &&
1628 (vacuum_handle->fast_path_count == 0))
1630 full_vacuum_run = true;
1632 cc = ctdb_vacuum_and_repack_db(ctdb_db, child_ctx,
1633 full_vacuum_run);
1635 write(child_ctx->fd[1], &cc, 1);
1636 _exit(0);
1639 set_close_on_exec(child_ctx->fd[0]);
1640 close(child_ctx->fd[1]);
1642 child_ctx->status = VACUUM_RUNNING;
1643 child_ctx->start_time = timeval_current();
1645 DLIST_ADD(ctdb->vacuumers, child_ctx);
1646 talloc_set_destructor(child_ctx, vacuum_child_destructor);
1649 * Clear the fastpath vacuuming list in the parent.
1651 talloc_free(ctdb_db->delete_queue);
1652 ctdb_db->delete_queue = trbt_create(ctdb_db, 0);
1653 if (ctdb_db->delete_queue == NULL) {
1654 /* fatal here? ... */
1655 ctdb_fatal(ctdb, "Out of memory when re-creating vacuum tree "
1656 "in parent context. Shutting down\n");
1659 event_add_timed(ctdb->ev, child_ctx,
1660 timeval_current_ofs(ctdb->tunable.vacuum_max_run_time, 0),
1661 vacuum_child_timeout, child_ctx);
1663 DEBUG(DEBUG_DEBUG, (__location__ " Created PIPE FD:%d to child vacuum process\n", child_ctx->fd[0]));
1665 fde = event_add_fd(ctdb->ev, child_ctx, child_ctx->fd[0],
1666 EVENT_FD_READ, vacuum_child_handler, child_ctx);
1667 tevent_fd_set_auto_close(fde);
1669 vacuum_handle->child_ctx = child_ctx;
1670 child_ctx->vacuum_handle = vacuum_handle;
1673 void ctdb_stop_vacuuming(struct ctdb_context *ctdb)
1675 /* Simply free them all. */
1676 while (ctdb->vacuumers) {
1677 DEBUG(DEBUG_INFO, ("Aborting vacuuming for %s (%i)\n",
1678 ctdb->vacuumers->vacuum_handle->ctdb_db->db_name,
1679 (int)ctdb->vacuumers->child_pid));
1680 /* vacuum_child_destructor kills it, removes from list */
1681 talloc_free(ctdb->vacuumers);
1685 /* this function initializes the vacuuming context for a database
1686 * starts the vacuuming events
1688 int ctdb_vacuum_init(struct ctdb_db_context *ctdb_db)
1690 if (ctdb_db->persistent != 0) {
1691 DEBUG(DEBUG_ERR,("Vacuuming is disabled for persistent database %s\n", ctdb_db->db_name));
1692 return 0;
1695 ctdb_db->vacuum_handle = talloc(ctdb_db, struct ctdb_vacuum_handle);
1696 CTDB_NO_MEMORY(ctdb_db->ctdb, ctdb_db->vacuum_handle);
1698 ctdb_db->vacuum_handle->ctdb_db = ctdb_db;
1699 ctdb_db->vacuum_handle->fast_path_count = 0;
1701 event_add_timed(ctdb_db->ctdb->ev, ctdb_db->vacuum_handle,
1702 timeval_current_ofs(get_vacuum_interval(ctdb_db), 0),
1703 ctdb_vacuum_event, ctdb_db->vacuum_handle);
1705 return 0;
1708 static void remove_record_from_delete_queue(struct ctdb_db_context *ctdb_db,
1709 const struct ctdb_ltdb_header *hdr,
1710 const TDB_DATA key)
1712 struct delete_record_data *kd;
1713 uint32_t hash;
1715 hash = (uint32_t)ctdb_hash(&key);
1717 DEBUG(DEBUG_DEBUG, (__location__
1718 " remove_record_from_delete_queue: "
1719 "db[%s] "
1720 "db_id[0x%08x] "
1721 "key_hash[0x%08x] "
1722 "lmaster[%u] "
1723 "migrated_with_data[%s]\n",
1724 ctdb_db->db_name, ctdb_db->db_id,
1725 hash,
1726 ctdb_lmaster(ctdb_db->ctdb, &key),
1727 hdr->flags & CTDB_REC_FLAG_MIGRATED_WITH_DATA ? "yes" : "no"));
1729 kd = (struct delete_record_data *)trbt_lookup32(ctdb_db->delete_queue, hash);
1730 if (kd == NULL) {
1731 DEBUG(DEBUG_DEBUG, (__location__
1732 " remove_record_from_delete_queue: "
1733 "record not in queue (hash[0x%08x])\n.",
1734 hash));
1735 return;
1738 if ((kd->key.dsize != key.dsize) ||
1739 (memcmp(kd->key.dptr, key.dptr, key.dsize) != 0))
1741 DEBUG(DEBUG_DEBUG, (__location__
1742 " remove_record_from_delete_queue: "
1743 "hash collision for key with hash[0x%08x] "
1744 "in db[%s] - skipping\n",
1745 hash, ctdb_db->db_name));
1746 return;
1749 DEBUG(DEBUG_DEBUG, (__location__
1750 " remove_record_from_delete_queue: "
1751 "removing key with hash[0x%08x]\n",
1752 hash));
1754 talloc_free(kd);
1756 return;
1760 * Insert a record into the ctdb_db context's delete queue,
1761 * handling hash collisions.
1763 static int insert_record_into_delete_queue(struct ctdb_db_context *ctdb_db,
1764 const struct ctdb_ltdb_header *hdr,
1765 TDB_DATA key)
1767 struct delete_record_data *kd;
1768 uint32_t hash;
1769 int ret;
1771 hash = (uint32_t)ctdb_hash(&key);
1773 DEBUG(DEBUG_INFO, (__location__ " schedule for deletion: db[%s] "
1774 "db_id[0x%08x] "
1775 "key_hash[0x%08x] "
1776 "lmaster[%u] "
1777 "migrated_with_data[%s]\n",
1778 ctdb_db->db_name, ctdb_db->db_id,
1779 hash,
1780 ctdb_lmaster(ctdb_db->ctdb, &key),
1781 hdr->flags & CTDB_REC_FLAG_MIGRATED_WITH_DATA ? "yes" : "no"));
1783 kd = (struct delete_record_data *)trbt_lookup32(ctdb_db->delete_queue, hash);
1784 if (kd != NULL) {
1785 if ((kd->key.dsize != key.dsize) ||
1786 (memcmp(kd->key.dptr, key.dptr, key.dsize) != 0))
1788 DEBUG(DEBUG_INFO,
1789 (__location__ " schedule for deletion: "
1790 "hash collision for key hash [0x%08x]. "
1791 "Skipping the record.\n", hash));
1792 return 0;
1793 } else {
1794 DEBUG(DEBUG_DEBUG,
1795 (__location__ " schedule for deletion: "
1796 "updating entry for key with hash [0x%08x].\n",
1797 hash));
1801 ret = insert_delete_record_data_into_tree(ctdb_db->ctdb, ctdb_db,
1802 ctdb_db->delete_queue,
1803 hdr, key);
1804 if (ret != 0) {
1805 DEBUG(DEBUG_INFO,
1806 (__location__ " schedule for deletion: error "
1807 "inserting key with hash [0x%08x] into delete queue\n",
1808 hash));
1809 return -1;
1812 return 0;
1816 * Schedule a record for deletetion.
1817 * Called from the parent context.
1819 int32_t ctdb_control_schedule_for_deletion(struct ctdb_context *ctdb,
1820 TDB_DATA indata)
1822 struct ctdb_control_schedule_for_deletion *dd;
1823 struct ctdb_db_context *ctdb_db;
1824 int ret;
1825 TDB_DATA key;
1827 dd = (struct ctdb_control_schedule_for_deletion *)indata.dptr;
1829 ctdb_db = find_ctdb_db(ctdb, dd->db_id);
1830 if (ctdb_db == NULL) {
1831 DEBUG(DEBUG_ERR, (__location__ " Unknown db id 0x%08x\n",
1832 dd->db_id));
1833 return -1;
1836 key.dsize = dd->keylen;
1837 key.dptr = dd->key;
1839 ret = insert_record_into_delete_queue(ctdb_db, &dd->hdr, key);
1841 return ret;
1844 int32_t ctdb_local_schedule_for_deletion(struct ctdb_db_context *ctdb_db,
1845 const struct ctdb_ltdb_header *hdr,
1846 TDB_DATA key)
1848 int ret;
1849 struct ctdb_control_schedule_for_deletion *dd;
1850 TDB_DATA indata;
1851 int32_t status;
1853 if (ctdb_db->ctdb->ctdbd_pid == getpid()) {
1854 /* main daemon - directly queue */
1855 ret = insert_record_into_delete_queue(ctdb_db, hdr, key);
1857 return ret;
1860 /* if we dont have a connection to the daemon we can not send
1861 a control. For example sometimes from update_record control child
1862 process.
1864 if (!ctdb_db->ctdb->can_send_controls) {
1865 return -1;
1869 /* child process: send the main daemon a control */
1870 indata.dsize = offsetof(struct ctdb_control_schedule_for_deletion, key) + key.dsize;
1871 indata.dptr = talloc_zero_array(ctdb_db, uint8_t, indata.dsize);
1872 if (indata.dptr == NULL) {
1873 DEBUG(DEBUG_ERR, (__location__ " out of memory\n"));
1874 return -1;
1876 dd = (struct ctdb_control_schedule_for_deletion *)(void *)indata.dptr;
1877 dd->db_id = ctdb_db->db_id;
1878 dd->hdr = *hdr;
1879 dd->keylen = key.dsize;
1880 memcpy(dd->key, key.dptr, key.dsize);
1882 ret = ctdb_control(ctdb_db->ctdb,
1883 CTDB_CURRENT_NODE,
1884 ctdb_db->db_id,
1885 CTDB_CONTROL_SCHEDULE_FOR_DELETION,
1886 CTDB_CTRL_FLAG_NOREPLY, /* flags */
1887 indata,
1888 NULL, /* mem_ctx */
1889 NULL, /* outdata */
1890 &status,
1891 NULL, /* timeout : NULL == wait forever */
1892 NULL); /* error message */
1894 talloc_free(indata.dptr);
1896 if (ret != 0 || status != 0) {
1897 DEBUG(DEBUG_ERR, (__location__ " Error sending "
1898 "SCHEDULE_FOR_DELETION "
1899 "control.\n"));
1900 if (status != 0) {
1901 ret = -1;
1905 return ret;
1908 void ctdb_local_remove_from_delete_queue(struct ctdb_db_context *ctdb_db,
1909 const struct ctdb_ltdb_header *hdr,
1910 const TDB_DATA key)
1912 if (ctdb_db->ctdb->ctdbd_pid != getpid()) {
1914 * Only remove the record from the delete queue if called
1915 * in the main daemon.
1917 return;
1920 remove_record_from_delete_queue(ctdb_db, hdr, key);
1922 return;