ctdb-vacuum: remove unused counter vdata->total
[Samba/wip.git] / ctdb / server / ctdb_vacuum.c
blobec0e8d0bea725c9c21da9984755a52764f4a0f6c
1 /*
2 ctdb vacuuming events
4 Copyright (C) Ronnie Sahlberg 2009
5 Copyright (C) Michael Adam 2010-2013
6 Copyright (C) Stefan Metzmacher 2010-2011
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program; if not, see <http://www.gnu.org/licenses/>.
22 #include "includes.h"
23 #include "tdb.h"
24 #include "system/network.h"
25 #include "system/filesys.h"
26 #include "system/dir.h"
27 #include "../include/ctdb_private.h"
28 #include "db_wrap.h"
29 #include "lib/util/dlinklist.h"
30 #include "../include/ctdb_private.h"
31 #include "../common/rb_tree.h"
33 #define TIMELIMIT() timeval_current_ofs(10, 0)
35 enum vacuum_child_status { VACUUM_RUNNING, VACUUM_OK, VACUUM_ERROR, VACUUM_TIMEOUT};
37 struct ctdb_vacuum_child_context {
38 struct ctdb_vacuum_child_context *next, *prev;
39 struct ctdb_vacuum_handle *vacuum_handle;
40 /* fd child writes status to */
41 int fd[2];
42 pid_t child_pid;
43 enum vacuum_child_status status;
44 struct timeval start_time;
47 struct ctdb_vacuum_handle {
48 struct ctdb_db_context *ctdb_db;
49 struct ctdb_vacuum_child_context *child_ctx;
50 uint32_t fast_path_count;
54 /* a list of records to possibly delete */
55 struct vacuum_data {
56 uint32_t repack_limit;
57 struct ctdb_context *ctdb;
58 struct ctdb_db_context *ctdb_db;
59 struct tdb_context *dest_db;
60 trbt_tree_t *delete_list;
61 uint32_t delete_count;
62 struct ctdb_marshall_buffer **vacuum_fetch_list;
63 struct timeval start;
64 bool traverse_error;
65 bool vacuum;
66 uint32_t vacuumed;
67 uint32_t copied;
68 uint32_t fast_added_to_vacuum_fetch_list;
69 uint32_t fast_added_to_delete_list;
70 uint32_t fast_deleted;
71 uint32_t fast_skipped;
72 uint32_t fast_error;
73 uint32_t fast_total;
74 uint32_t full_scheduled;
75 uint32_t full_skipped;
76 uint32_t full_error;
77 uint32_t full_total;
78 uint32_t delete_left;
79 uint32_t delete_remote_error;
80 uint32_t delete_local_error;
81 uint32_t delete_deleted;
82 uint32_t delete_skipped;
85 /* this structure contains the information for one record to be deleted */
86 struct delete_record_data {
87 struct ctdb_context *ctdb;
88 struct ctdb_db_context *ctdb_db;
89 struct ctdb_ltdb_header hdr;
90 TDB_DATA key;
91 uint8_t keydata[1];
94 struct delete_records_list {
95 struct ctdb_marshall_buffer *records;
96 struct vacuum_data *vdata;
99 static int insert_record_into_delete_queue(struct ctdb_db_context *ctdb_db,
100 const struct ctdb_ltdb_header *hdr,
101 TDB_DATA key);
104 * Store key and header in a tree, indexed by the key hash.
106 static int insert_delete_record_data_into_tree(struct ctdb_context *ctdb,
107 struct ctdb_db_context *ctdb_db,
108 trbt_tree_t *tree,
109 const struct ctdb_ltdb_header *hdr,
110 TDB_DATA key)
112 struct delete_record_data *dd;
113 uint32_t hash;
114 size_t len;
116 len = offsetof(struct delete_record_data, keydata) + key.dsize;
118 dd = (struct delete_record_data *)talloc_size(tree, len);
119 if (dd == NULL) {
120 DEBUG(DEBUG_ERR,(__location__ " Out of memory\n"));
121 return -1;
123 talloc_set_name_const(dd, "struct delete_record_data");
125 dd->ctdb = ctdb;
126 dd->ctdb_db = ctdb_db;
127 dd->key.dsize = key.dsize;
128 dd->key.dptr = dd->keydata;
129 memcpy(dd->keydata, key.dptr, key.dsize);
131 dd->hdr = *hdr;
133 hash = ctdb_hash(&key);
135 trbt_insert32(tree, hash, dd);
137 return 0;
140 static int add_record_to_delete_list(struct vacuum_data *vdata, TDB_DATA key,
141 struct ctdb_ltdb_header *hdr)
143 struct ctdb_context *ctdb = vdata->ctdb;
144 struct ctdb_db_context *ctdb_db = vdata->ctdb_db;
145 uint32_t hash;
146 int ret;
148 hash = ctdb_hash(&key);
150 if (trbt_lookup32(vdata->delete_list, hash)) {
151 DEBUG(DEBUG_INFO, (__location__ " Hash collision when vacuuming, skipping this record.\n"));
152 return 0;
155 ret = insert_delete_record_data_into_tree(ctdb, ctdb_db,
156 vdata->delete_list,
157 hdr, key);
158 if (ret != 0) {
159 return -1;
162 vdata->delete_count++;
164 return 0;
168 * Add a record to the list of records to be sent
169 * to their lmaster with VACUUM_FETCH.
171 static int add_record_to_vacuum_fetch_list(struct vacuum_data *vdata,
172 TDB_DATA key)
174 struct ctdb_context *ctdb = vdata->ctdb;
175 struct ctdb_rec_data *rec;
176 uint32_t lmaster;
177 size_t old_size;
178 struct ctdb_marshall_buffer *vfl;
180 lmaster = ctdb_lmaster(ctdb, &key);
182 vfl = vdata->vacuum_fetch_list[lmaster];
184 rec = ctdb_marshall_record(vfl, ctdb->pnn, key, NULL, tdb_null);
185 if (rec == NULL) {
186 DEBUG(DEBUG_ERR,(__location__ " Out of memory\n"));
187 vdata->traverse_error = true;
188 return -1;
191 old_size = talloc_get_size(vfl);
192 vfl = talloc_realloc_size(NULL, vfl, old_size + rec->length);
193 if (vfl == NULL) {
194 DEBUG(DEBUG_ERR,(__location__ " Failed to expand\n"));
195 vdata->traverse_error = true;
196 return -1;
198 vdata->vacuum_fetch_list[lmaster] = vfl;
200 vfl->count++;
201 memcpy(old_size+(uint8_t *)vfl, rec, rec->length);
202 talloc_free(rec);
204 return 0;
208 static void ctdb_vacuum_event(struct event_context *ev, struct timed_event *te,
209 struct timeval t, void *private_data);
211 static int vacuum_record_parser(TDB_DATA key, TDB_DATA data, void *private_data)
213 struct ctdb_ltdb_header *header =
214 (struct ctdb_ltdb_header *)private_data;
216 if (data.dsize != sizeof(struct ctdb_ltdb_header)) {
217 return -1;
220 *header = *(struct ctdb_ltdb_header *)data.dptr;
222 return 0;
226 * traverse function for gathering the records that can be deleted
228 static int vacuum_traverse(struct tdb_context *tdb, TDB_DATA key, TDB_DATA data,
229 void *private_data)
231 struct vacuum_data *vdata = talloc_get_type(private_data,
232 struct vacuum_data);
233 struct ctdb_context *ctdb = vdata->ctdb;
234 struct ctdb_db_context *ctdb_db = vdata->ctdb_db;
235 uint32_t lmaster;
236 struct ctdb_ltdb_header *hdr;
237 int res = 0;
239 vdata->full_total++;
241 lmaster = ctdb_lmaster(ctdb, &key);
242 if (lmaster >= ctdb->num_nodes) {
243 vdata->full_error++;
244 DEBUG(DEBUG_CRIT, (__location__
245 " lmaster[%u] >= ctdb->num_nodes[%u] for key"
246 " with hash[%u]!\n",
247 (unsigned)lmaster,
248 (unsigned)ctdb->num_nodes,
249 (unsigned)ctdb_hash(&key)));
250 return -1;
253 if (data.dsize != sizeof(struct ctdb_ltdb_header)) {
254 /* it is not a deleted record */
255 vdata->full_skipped++;
256 return 0;
259 hdr = (struct ctdb_ltdb_header *)data.dptr;
261 if (hdr->dmaster != ctdb->pnn) {
262 vdata->full_skipped++;
263 return 0;
267 * Add the record to this process's delete_queue for processing
268 * in the subsequent traverse in the fast vacuum run.
270 res = insert_record_into_delete_queue(ctdb_db, hdr, key);
271 if (res != 0) {
272 vdata->full_error++;
273 } else {
274 vdata->full_scheduled++;
277 return 0;
281 * traverse the tree of records to delete and marshall them into
282 * a blob
284 static int delete_marshall_traverse(void *param, void *data)
286 struct delete_record_data *dd = talloc_get_type(data, struct delete_record_data);
287 struct delete_records_list *recs = talloc_get_type(param, struct delete_records_list);
288 struct ctdb_rec_data *rec;
289 size_t old_size;
291 rec = ctdb_marshall_record(dd, recs->records->db_id, dd->key, &dd->hdr, tdb_null);
292 if (rec == NULL) {
293 DEBUG(DEBUG_ERR, (__location__ " failed to marshall record\n"));
294 return 0;
297 old_size = talloc_get_size(recs->records);
298 recs->records = talloc_realloc_size(NULL, recs->records, old_size + rec->length);
299 if (recs->records == NULL) {
300 DEBUG(DEBUG_ERR,(__location__ " Failed to expand\n"));
301 return 0;
303 recs->records->count++;
304 memcpy(old_size+(uint8_t *)(recs->records), rec, rec->length);
305 return 0;
309 * Variant of delete_marshall_traverse() that bumps the
310 * RSN of each traversed record in the database.
312 * This is needed to ensure that when rolling out our
313 * empty record copy before remote deletion, we as the
314 * record's dmaster keep a higher RSN than the non-dmaster
315 * nodes. This is needed to prevent old copies from
316 * resurrection in recoveries.
318 static int delete_marshall_traverse_first(void *param, void *data)
320 struct delete_record_data *dd = talloc_get_type(data, struct delete_record_data);
321 struct delete_records_list *recs = talloc_get_type(param, struct delete_records_list);
322 struct ctdb_db_context *ctdb_db = dd->ctdb_db;
323 struct ctdb_context *ctdb = ctdb_db->ctdb;
324 struct ctdb_ltdb_header header;
325 uint32_t lmaster;
326 uint32_t hash = ctdb_hash(&(dd->key));
327 int res;
329 res = tdb_chainlock(ctdb_db->ltdb->tdb, dd->key);
330 if (res != 0) {
331 DEBUG(DEBUG_ERR,
332 (__location__ " Error getting chainlock on record with "
333 "key hash [0x%08x] on database db[%s].\n",
334 hash, ctdb_db->db_name));
335 recs->vdata->delete_skipped++;
336 talloc_free(dd);
337 return 0;
341 * Verify that the record is still empty, its RSN has not
342 * changed and that we are still its lmaster and dmaster.
345 res = tdb_parse_record(ctdb_db->ltdb->tdb, dd->key,
346 vacuum_record_parser, &header);
347 if (res != 0) {
348 goto skip;
351 if (header.flags & CTDB_REC_RO_FLAGS) {
352 DEBUG(DEBUG_INFO, (__location__ ": record with hash [0x%08x] "
353 "on database db[%s] has read-only flags. "
354 "skipping.\n",
355 hash, ctdb_db->db_name));
356 goto skip;
359 if (header.dmaster != ctdb->pnn) {
360 DEBUG(DEBUG_INFO, (__location__ ": record with hash [0x%08x] "
361 "on database db[%s] has been migrated away. "
362 "skipping.\n",
363 hash, ctdb_db->db_name));
364 goto skip;
367 if (header.rsn != dd->hdr.rsn) {
368 DEBUG(DEBUG_INFO, (__location__ ": record with hash [0x%08x] "
369 "on database db[%s] seems to have been "
370 "migrated away and back again (with empty "
371 "data). skipping.\n",
372 hash, ctdb_db->db_name));
373 goto skip;
376 lmaster = ctdb_lmaster(ctdb_db->ctdb, &dd->key);
378 if (lmaster != ctdb->pnn) {
379 DEBUG(DEBUG_INFO, (__location__ ": not lmaster for record in "
380 "delete list (key hash [0x%08x], db[%s]). "
381 "Strange! skipping.\n",
382 hash, ctdb_db->db_name));
383 goto skip;
387 * Increment the record's RSN to ensure the dmaster (i.e. the current
388 * node) has the highest RSN of the record in the cluster.
389 * This is to prevent old record copies from resurrecting in recoveries
390 * if something should fail during the deletion process.
391 * Note that ctdb_ltdb_store_server() increments the RSN if called
392 * on the record's dmaster.
395 res = ctdb_ltdb_store(ctdb_db, dd->key, &header, tdb_null);
396 if (res != 0) {
397 DEBUG(DEBUG_ERR, (__location__ ": Failed to store record with "
398 "key hash [0x%08x] on database db[%s].\n",
399 hash, ctdb_db->db_name));
400 goto skip;
403 tdb_chainunlock(ctdb_db->ltdb->tdb, dd->key);
405 goto done;
407 skip:
408 tdb_chainunlock(ctdb_db->ltdb->tdb, dd->key);
410 recs->vdata->delete_skipped++;
411 talloc_free(dd);
412 dd = NULL;
414 done:
415 if (dd == NULL) {
416 return 0;
419 return delete_marshall_traverse(param, data);
423 * traverse function for the traversal of the delete_queue,
424 * the fast-path vacuuming list.
426 * - If the record has been migrated off the node
427 * or has been revived (filled with data) on the node,
428 * then skip the record.
430 * - If the current node is the record's lmaster and it is
431 * a record that has never been migrated with data, then
432 * delete the record from the local tdb.
434 * - If the current node is the record's lmaster and it has
435 * been migrated with data, then schedule it for the normal
436 * vacuuming procedure (i.e. add it to the delete_list).
438 * - If the current node is NOT the record's lmaster then
439 * add it to the list of records that are to be sent to
440 * the lmaster with the VACUUM_FETCH message.
442 static int delete_queue_traverse(void *param, void *data)
444 struct delete_record_data *dd =
445 talloc_get_type(data, struct delete_record_data);
446 struct vacuum_data *vdata = talloc_get_type(param, struct vacuum_data);
447 struct ctdb_db_context *ctdb_db = dd->ctdb_db;
448 struct ctdb_context *ctdb = ctdb_db->ctdb; /* or dd->ctdb ??? */
449 int res;
450 struct ctdb_ltdb_header header;
451 uint32_t lmaster;
452 uint32_t hash = ctdb_hash(&(dd->key));
454 vdata->fast_total++;
456 res = tdb_chainlock(ctdb_db->ltdb->tdb, dd->key);
457 if (res != 0) {
458 DEBUG(DEBUG_ERR,
459 (__location__ " Error getting chainlock on record with "
460 "key hash [0x%08x] on database db[%s].\n",
461 hash, ctdb_db->db_name));
462 vdata->fast_error++;
463 return 0;
466 res = tdb_parse_record(ctdb_db->ltdb->tdb, dd->key,
467 vacuum_record_parser, &header);
468 if (res != 0) {
469 goto skipped;
472 if (header.dmaster != ctdb->pnn) {
473 /* The record has been migrated off the node. Skip. */
474 goto skipped;
477 if (header.rsn != dd->hdr.rsn) {
479 * The record has been migrated off the node and back again.
480 * But not requeued for deletion. Skip it.
482 goto skipped;
486 * We are dmaster, and the record has no data, and it has
487 * not been migrated after it has been queued for deletion.
489 * At this stage, the record could still have been revived locally
490 * and last been written with empty data. This can only be
491 * fixed with the addition of an active or delete flag. (TODO)
494 lmaster = ctdb_lmaster(ctdb_db->ctdb, &dd->key);
496 if (lmaster != ctdb->pnn) {
497 res = add_record_to_vacuum_fetch_list(vdata, dd->key);
499 if (res != 0) {
500 DEBUG(DEBUG_ERR,
501 (__location__ " Error adding record to list "
502 "of records to send to lmaster.\n"));
503 vdata->fast_error++;
504 } else {
505 vdata->fast_added_to_vacuum_fetch_list++;
507 goto done;
510 /* use header->flags or dd->hdr.flags ?? */
511 if (dd->hdr.flags & CTDB_REC_FLAG_MIGRATED_WITH_DATA) {
512 res = add_record_to_delete_list(vdata, dd->key, &dd->hdr);
514 if (res != 0) {
515 DEBUG(DEBUG_ERR,
516 (__location__ " Error adding record to list "
517 "of records for deletion on lmaster.\n"));
518 vdata->fast_error++;
519 } else {
520 vdata->fast_added_to_delete_list++;
522 } else {
523 res = tdb_delete(ctdb_db->ltdb->tdb, dd->key);
525 if (res != 0) {
526 DEBUG(DEBUG_ERR,
527 (__location__ " Error deleting record with key "
528 "hash [0x%08x] from local data base db[%s].\n",
529 hash, ctdb_db->db_name));
530 vdata->fast_error++;
531 goto done;
534 DEBUG(DEBUG_DEBUG,
535 (__location__ " Deleted record with key hash "
536 "[0x%08x] from local data base db[%s].\n",
537 hash, ctdb_db->db_name));
538 vdata->fast_deleted++;
541 goto done;
543 skipped:
544 vdata->fast_skipped++;
546 done:
547 tdb_chainunlock(ctdb_db->ltdb->tdb, dd->key);
549 return 0;
553 * Delete the records that we are lmaster and dmaster for and
554 * that could be deleted on all other nodes via the TRY_DELETE_RECORDS
555 * control.
557 static int delete_record_traverse(void *param, void *data)
559 struct delete_record_data *dd =
560 talloc_get_type(data, struct delete_record_data);
561 struct vacuum_data *vdata = talloc_get_type(param, struct vacuum_data);
562 struct ctdb_db_context *ctdb_db = dd->ctdb_db;
563 struct ctdb_context *ctdb = ctdb_db->ctdb;
564 int res;
565 struct ctdb_ltdb_header header;
566 uint32_t lmaster;
567 uint32_t hash = ctdb_hash(&(dd->key));
569 res = tdb_chainlock(ctdb_db->ltdb->tdb, dd->key);
570 if (res != 0) {
571 DEBUG(DEBUG_ERR,
572 (__location__ " Error getting chainlock on record with "
573 "key hash [0x%08x] on database db[%s].\n",
574 hash, ctdb_db->db_name));
575 vdata->delete_local_error++;
576 vdata->delete_left--;
577 talloc_free(dd);
578 return 0;
582 * Verify that the record is still empty, its RSN has not
583 * changed and that we are still its lmaster and dmaster.
586 res = tdb_parse_record(ctdb_db->ltdb->tdb, dd->key,
587 vacuum_record_parser, &header);
588 if (res != 0) {
589 goto skip;
592 if (header.flags & CTDB_REC_RO_FLAGS) {
593 DEBUG(DEBUG_INFO, (__location__ ": record with hash [0x%08x] "
594 "on database db[%s] has read-only flags. "
595 "skipping.\n",
596 hash, ctdb_db->db_name));
597 goto skip;
600 if (header.dmaster != ctdb->pnn) {
601 DEBUG(DEBUG_INFO, (__location__ ": record with hash [0x%08x] "
602 "on database db[%s] has been migrated away. "
603 "skipping.\n",
604 hash, ctdb_db->db_name));
605 goto skip;
608 if (header.rsn != dd->hdr.rsn + 1) {
610 * The record has been migrated off the node and back again.
611 * But not requeued for deletion. Skip it.
612 * (Note that the first marshall traverse has bumped the RSN
613 * on disk.)
615 DEBUG(DEBUG_INFO, (__location__ ": record with hash [0x%08x] "
616 "on database db[%s] seems to have been "
617 "migrated away and back again (with empty "
618 "data). skipping.\n",
619 hash, ctdb_db->db_name));
620 goto skip;
623 lmaster = ctdb_lmaster(ctdb_db->ctdb, &dd->key);
625 if (lmaster != ctdb->pnn) {
626 DEBUG(DEBUG_INFO, (__location__ ": not lmaster for record in "
627 "delete list (key hash [0x%08x], db[%s]). "
628 "Strange! skipping.\n",
629 hash, ctdb_db->db_name));
630 goto skip;
633 res = tdb_delete(ctdb_db->ltdb->tdb, dd->key);
635 if (res != 0) {
636 DEBUG(DEBUG_ERR,
637 (__location__ " Error deleting record with key hash "
638 "[0x%08x] from local data base db[%s].\n",
639 hash, ctdb_db->db_name));
640 vdata->delete_local_error++;
641 goto done;
644 DEBUG(DEBUG_DEBUG,
645 (__location__ " Deleted record with key hash [0x%08x] from "
646 "local data base db[%s].\n", hash, ctdb_db->db_name));
648 vdata->delete_deleted++;
649 goto done;
651 skip:
652 vdata->delete_skipped++;
654 done:
655 tdb_chainunlock(ctdb_db->ltdb->tdb, dd->key);
657 talloc_free(dd);
658 vdata->delete_left--;
660 return 0;
664 * Traverse the delete_queue.
665 * Records are either deleted directly or filled
666 * into the delete list or the vacuum fetch lists
667 * for further processing.
669 static void ctdb_process_delete_queue(struct ctdb_db_context *ctdb_db,
670 struct vacuum_data *vdata)
672 uint32_t sum;
673 int ret;
675 ret = trbt_traversearray32(ctdb_db->delete_queue, 1,
676 delete_queue_traverse, vdata);
678 if (ret != 0) {
679 DEBUG(DEBUG_ERR, (__location__ " Error traversing "
680 "the delete queue.\n"));
683 sum = vdata->fast_deleted
684 + vdata->fast_skipped
685 + vdata->fast_error
686 + vdata->fast_added_to_delete_list
687 + vdata->fast_added_to_vacuum_fetch_list;
689 if (vdata->fast_total != sum) {
690 DEBUG(DEBUG_ERR, (__location__ " Inconsistency in fast vacuum "
691 "counts for db[%s]: total[%u] != sum[%u]\n",
692 ctdb_db->db_name, (unsigned)vdata->fast_total,
693 (unsigned)sum));
696 if (vdata->fast_total > 0) {
697 DEBUG(DEBUG_INFO,
698 (__location__
699 " fast vacuuming delete_queue traverse statistics: "
700 "db[%s] "
701 "total[%u] "
702 "del[%u] "
703 "skp[%u] "
704 "err[%u] "
705 "adl[%u] "
706 "avf[%u]\n",
707 ctdb_db->db_name,
708 (unsigned)vdata->fast_total,
709 (unsigned)vdata->fast_deleted,
710 (unsigned)vdata->fast_skipped,
711 (unsigned)vdata->fast_error,
712 (unsigned)vdata->fast_added_to_delete_list,
713 (unsigned)vdata->fast_added_to_vacuum_fetch_list));
716 return;
720 * read-only traverse of the database, looking for records that
721 * might be able to be vacuumed.
723 * This is not done each time but only every tunable
724 * VacuumFastPathCount times.
726 static void ctdb_vacuum_traverse_db(struct ctdb_db_context *ctdb_db,
727 struct vacuum_data *vdata)
729 int ret;
731 ret = tdb_traverse_read(ctdb_db->ltdb->tdb, vacuum_traverse, vdata);
732 if (ret == -1 || vdata->traverse_error) {
733 DEBUG(DEBUG_ERR, (__location__ " Traverse error in vacuuming "
734 "'%s'\n", ctdb_db->db_name));
737 if (vdata->full_total > 0) {
738 DEBUG(DEBUG_INFO,
739 (__location__
740 " full vacuuming db traverse statistics: "
741 "db[%s] "
742 "total[%u] "
743 "skp[%u] "
744 "err[%u] "
745 "sched[%u]\n",
746 ctdb_db->db_name,
747 (unsigned)vdata->full_total,
748 (unsigned)vdata->full_skipped,
749 (unsigned)vdata->full_error,
750 (unsigned)vdata->full_scheduled));
753 return;
757 * Process the vacuum fetch lists:
758 * For records for which we are not the lmaster, tell the lmaster to
759 * fetch the record.
761 static void ctdb_process_vacuum_fetch_lists(struct ctdb_db_context *ctdb_db,
762 struct vacuum_data *vdata)
764 int i;
765 struct ctdb_context *ctdb = ctdb_db->ctdb;
767 for (i = 0; i < ctdb->num_nodes; i++) {
768 TDB_DATA data;
769 struct ctdb_marshall_buffer *vfl = vdata->vacuum_fetch_list[i];
771 if (ctdb->nodes[i]->pnn == ctdb->pnn) {
772 continue;
775 if (vfl->count == 0) {
776 continue;
779 DEBUG(DEBUG_INFO, ("Found %u records for lmaster %u in '%s'\n",
780 vfl->count, ctdb->nodes[i]->pnn,
781 ctdb_db->db_name));
783 data.dsize = talloc_get_size(vfl);
784 data.dptr = (void *)vfl;
785 if (ctdb_client_send_message(ctdb, ctdb->nodes[i]->pnn,
786 CTDB_SRVID_VACUUM_FETCH,
787 data) != 0)
789 DEBUG(DEBUG_ERR, (__location__ " Failed to send vacuum "
790 "fetch message to %u\n",
791 ctdb->nodes[i]->pnn));
795 return;
799 * Process the delete list:
801 * This is the last step of vacuuming that consistently deletes
802 * those records that have been migrated with data and can hence
803 * not be deleted when leaving a node.
805 * In this step, the lmaster does the final deletion of those empty
806 * records that it is also dmaster for. It has ususally received
807 * at least some of these records previously from the former dmasters
808 * with the vacuum fetch message.
810 * This last step is implemented as a 3-phase process to protect from
811 * races leading to data corruption:
813 * 1) Send the lmaster's copy to all other active nodes with the
814 * RECEIVE_RECORDS control: The remote nodes store the lmaster's copy.
815 * 2) Send the records that could successfully be stored remotely
816 * in step #1 to all active nodes with the TRY_DELETE_RECORDS
817 * control. The remote notes delete their local copy.
818 * 3) The lmaster locally deletes its copies of all records that
819 * could successfully be deleted remotely in step #2.
821 static void ctdb_process_delete_list(struct ctdb_db_context *ctdb_db,
822 struct vacuum_data *vdata)
824 int ret, i;
825 struct ctdb_context *ctdb = ctdb_db->ctdb;
826 struct delete_records_list *recs;
827 TDB_DATA indata;
828 struct ctdb_node_map *nodemap;
829 uint32_t *active_nodes;
830 int num_active_nodes;
831 TALLOC_CTX *tmp_ctx;
832 uint32_t sum;
834 if (vdata->delete_count == 0) {
835 return;
838 tmp_ctx = talloc_new(vdata);
839 if (tmp_ctx == NULL) {
840 DEBUG(DEBUG_ERR,(__location__ " Out of memory\n"));
841 return;
844 vdata->delete_left = vdata->delete_count;
847 * get the list of currently active nodes
850 ret = ctdb_ctrl_getnodemap(ctdb, TIMELIMIT(),
851 CTDB_CURRENT_NODE,
852 tmp_ctx,
853 &nodemap);
854 if (ret != 0) {
855 DEBUG(DEBUG_ERR,(__location__ " unable to get node map\n"));
856 goto done;
859 active_nodes = list_of_active_nodes(ctdb, nodemap,
860 nodemap, /* talloc context */
861 false /* include self */);
862 /* yuck! ;-) */
863 num_active_nodes = talloc_get_size(active_nodes)/sizeof(*active_nodes);
866 * Now delete the records all active nodes in a three-phase process:
867 * 1) send all active remote nodes the current empty copy with this
868 * node as DMASTER
869 * 2) if all nodes could store the new copy,
870 * tell all the active remote nodes to delete all their copy
871 * 3) if all remote nodes deleted their record copy, delete it locally
875 * Step 1:
876 * Send currently empty record copy to all active nodes for storing.
879 recs = talloc_zero(tmp_ctx, struct delete_records_list);
880 if (recs == NULL) {
881 DEBUG(DEBUG_ERR,(__location__ " Out of memory\n"));
882 goto done;
884 recs->records = (struct ctdb_marshall_buffer *)
885 talloc_zero_size(recs,
886 offsetof(struct ctdb_marshall_buffer, data));
887 if (recs->records == NULL) {
888 DEBUG(DEBUG_ERR,(__location__ " Out of memory\n"));
889 goto done;
891 recs->records->db_id = ctdb_db->db_id;
892 recs->vdata = vdata;
895 * traverse the tree of all records we want to delete and
896 * create a blob we can send to the other nodes.
898 * We call delete_marshall_traverse_first() to bump the
899 * records' RSNs in the database, to ensure we (as dmaster)
900 * keep the highest RSN of the records in the cluster.
902 ret = trbt_traversearray32(vdata->delete_list, 1,
903 delete_marshall_traverse_first, recs);
904 if (ret != 0) {
905 DEBUG(DEBUG_ERR, (__location__ " Error traversing the "
906 "delete list for first marshalling.\n"));
909 indata.dsize = talloc_get_size(recs->records);
910 indata.dptr = (void *)recs->records;
912 for (i = 0; i < num_active_nodes; i++) {
913 struct ctdb_marshall_buffer *records;
914 struct ctdb_rec_data *rec;
915 int32_t res;
916 TDB_DATA outdata;
918 ret = ctdb_control(ctdb, active_nodes[i], 0,
919 CTDB_CONTROL_RECEIVE_RECORDS, 0,
920 indata, recs, &outdata, &res,
921 NULL, NULL);
922 if (ret != 0 || res != 0) {
923 DEBUG(DEBUG_ERR, ("Error storing record copies on "
924 "node %u: ret[%d] res[%d]\n",
925 active_nodes[i], ret, res));
926 goto done;
930 * outdata contains the list of records coming back
931 * from the node: These are the records that the
932 * remote node could not store. We remove these from
933 * the list to process further.
935 records = (struct ctdb_marshall_buffer *)outdata.dptr;
936 rec = (struct ctdb_rec_data *)&records->data[0];
937 while (records->count-- > 1) {
938 TDB_DATA reckey, recdata;
939 struct ctdb_ltdb_header *rechdr;
940 struct delete_record_data *dd;
942 reckey.dptr = &rec->data[0];
943 reckey.dsize = rec->keylen;
944 recdata.dptr = &rec->data[reckey.dsize];
945 recdata.dsize = rec->datalen;
947 if (recdata.dsize < sizeof(struct ctdb_ltdb_header)) {
948 DEBUG(DEBUG_CRIT,(__location__ " bad ltdb record\n"));
949 goto done;
951 rechdr = (struct ctdb_ltdb_header *)recdata.dptr;
952 recdata.dptr += sizeof(*rechdr);
953 recdata.dsize -= sizeof(*rechdr);
955 dd = (struct delete_record_data *)trbt_lookup32(
956 vdata->delete_list,
957 ctdb_hash(&reckey));
958 if (dd != NULL) {
960 * The other node could not store the record
961 * copy and it is the first node that failed.
962 * So we should remove it from the tree and
963 * update statistics.
965 talloc_free(dd);
966 vdata->delete_remote_error++;
967 vdata->delete_left--;
970 rec = (struct ctdb_rec_data *)(rec->length + (uint8_t *)rec);
974 if (vdata->delete_left == 0) {
975 goto success;
979 * Step 2:
980 * Send the remaining records to all active nodes for deletion.
982 * The lmaster's (i.e. our) copies of these records have been stored
983 * successfully on the other nodes.
987 * Create a marshall blob from the remaining list of records to delete.
990 talloc_free(recs->records);
992 recs->records = (struct ctdb_marshall_buffer *)
993 talloc_zero_size(recs,
994 offsetof(struct ctdb_marshall_buffer, data));
995 if (recs->records == NULL) {
996 DEBUG(DEBUG_ERR,(__location__ " Out of memory\n"));
997 goto done;
999 recs->records->db_id = ctdb_db->db_id;
1001 ret = trbt_traversearray32(vdata->delete_list, 1,
1002 delete_marshall_traverse, recs);
1003 if (ret != 0) {
1004 DEBUG(DEBUG_ERR, (__location__ " Error traversing the "
1005 "delete list for second marshalling.\n"));
1008 indata.dsize = talloc_get_size(recs->records);
1009 indata.dptr = (void *)recs->records;
1011 for (i = 0; i < num_active_nodes; i++) {
1012 struct ctdb_marshall_buffer *records;
1013 struct ctdb_rec_data *rec;
1014 int32_t res;
1015 TDB_DATA outdata;
1017 ret = ctdb_control(ctdb, active_nodes[i], 0,
1018 CTDB_CONTROL_TRY_DELETE_RECORDS, 0,
1019 indata, recs, &outdata, &res,
1020 NULL, NULL);
1021 if (ret != 0 || res != 0) {
1022 DEBUG(DEBUG_ERR, ("Failed to delete records on "
1023 "node %u: ret[%d] res[%d]\n",
1024 active_nodes[i], ret, res));
1025 goto done;
1029 * outdata contains the list of records coming back
1030 * from the node: These are the records that the
1031 * remote node could not delete. We remove these from
1032 * the list to delete locally.
1034 records = (struct ctdb_marshall_buffer *)outdata.dptr;
1035 rec = (struct ctdb_rec_data *)&records->data[0];
1036 while (records->count-- > 1) {
1037 TDB_DATA reckey, recdata;
1038 struct ctdb_ltdb_header *rechdr;
1039 struct delete_record_data *dd;
1041 reckey.dptr = &rec->data[0];
1042 reckey.dsize = rec->keylen;
1043 recdata.dptr = &rec->data[reckey.dsize];
1044 recdata.dsize = rec->datalen;
1046 if (recdata.dsize < sizeof(struct ctdb_ltdb_header)) {
1047 DEBUG(DEBUG_CRIT,(__location__ " bad ltdb record\n"));
1048 goto done;
1050 rechdr = (struct ctdb_ltdb_header *)recdata.dptr;
1051 recdata.dptr += sizeof(*rechdr);
1052 recdata.dsize -= sizeof(*rechdr);
1054 dd = (struct delete_record_data *)trbt_lookup32(
1055 vdata->delete_list,
1056 ctdb_hash(&reckey));
1057 if (dd != NULL) {
1059 * The other node could not delete the
1060 * record and it is the first node that
1061 * failed. So we should remove it from
1062 * the tree and update statistics.
1064 talloc_free(dd);
1065 vdata->delete_remote_error++;
1066 vdata->delete_left--;
1069 rec = (struct ctdb_rec_data *)(rec->length + (uint8_t *)rec);
1073 if (vdata->delete_left == 0) {
1074 goto success;
1078 * Step 3:
1079 * Delete the remaining records locally.
1081 * These records have successfully been deleted on all
1082 * active remote nodes.
1085 ret = trbt_traversearray32(vdata->delete_list, 1,
1086 delete_record_traverse, vdata);
1087 if (ret != 0) {
1088 DEBUG(DEBUG_ERR, (__location__ " Error traversing the "
1089 "delete list for deletion.\n"));
1092 success:
1094 if (vdata->delete_left != 0) {
1095 DEBUG(DEBUG_ERR, (__location__ " Vaccum db[%s] error: "
1096 "there are %u records left for deletion after "
1097 "processing delete list\n",
1098 ctdb_db->db_name,
1099 (unsigned)vdata->delete_left));
1102 sum = vdata->delete_deleted
1103 + vdata->delete_skipped
1104 + vdata->delete_remote_error
1105 + vdata->delete_local_error
1106 + vdata->delete_left;
1108 if (vdata->delete_count != sum) {
1109 DEBUG(DEBUG_ERR, (__location__ " Inconsistency in vacuum "
1110 "delete list counts for db[%s]: total[%u] != sum[%u]\n",
1111 ctdb_db->db_name, (unsigned)vdata->delete_count,
1112 (unsigned)sum));
1115 if (vdata->delete_count > 0) {
1116 DEBUG(DEBUG_INFO,
1117 (__location__
1118 " vacuum delete list statistics: "
1119 "db[%s] "
1120 "total[%u] "
1121 "del[%u] "
1122 "skip[%u] "
1123 "rem.err[%u] "
1124 "loc.err[%u] "
1125 "left[%u]\n",
1126 ctdb_db->db_name,
1127 (unsigned)vdata->delete_count,
1128 (unsigned)vdata->delete_deleted,
1129 (unsigned)vdata->delete_skipped,
1130 (unsigned)vdata->delete_remote_error,
1131 (unsigned)vdata->delete_local_error,
1132 (unsigned)vdata->delete_left));
1135 done:
1136 talloc_free(tmp_ctx);
1138 return;
1142 * initialize the vacuum_data
1144 static int ctdb_vacuum_init_vacuum_data(struct ctdb_db_context *ctdb_db,
1145 struct vacuum_data *vdata)
1147 int i;
1148 struct ctdb_context *ctdb = ctdb_db->ctdb;
1150 vdata->fast_added_to_delete_list = 0;
1151 vdata->fast_added_to_vacuum_fetch_list = 0;
1152 vdata->fast_deleted = 0;
1153 vdata->fast_skipped = 0;
1154 vdata->fast_error = 0;
1155 vdata->fast_total = 0;
1156 vdata->full_scheduled = 0;
1157 vdata->full_skipped = 0;
1158 vdata->full_error = 0;
1159 vdata->full_total = 0;
1160 vdata->delete_count = 0;
1161 vdata->delete_left = 0;
1162 vdata->delete_remote_error = 0;
1163 vdata->delete_local_error = 0;
1164 vdata->delete_skipped = 0;
1165 vdata->delete_deleted = 0;
1167 /* the list needs to be of length num_nodes */
1168 vdata->vacuum_fetch_list = talloc_zero_array(vdata,
1169 struct ctdb_marshall_buffer *,
1170 ctdb->num_nodes);
1171 if (vdata->vacuum_fetch_list == NULL) {
1172 DEBUG(DEBUG_ERR,(__location__ " Out of memory\n"));
1173 return -1;
1175 for (i = 0; i < ctdb->num_nodes; i++) {
1176 vdata->vacuum_fetch_list[i] = (struct ctdb_marshall_buffer *)
1177 talloc_zero_size(vdata->vacuum_fetch_list,
1178 offsetof(struct ctdb_marshall_buffer, data));
1179 if (vdata->vacuum_fetch_list[i] == NULL) {
1180 DEBUG(DEBUG_ERR,(__location__ " Out of memory\n"));
1181 return -1;
1183 vdata->vacuum_fetch_list[i]->db_id = ctdb_db->db_id;
1186 return 0;
1190 * Vacuum a DB:
1191 * - Always do the fast vacuuming run, which traverses
1192 * the in-memory delete queue: these records have been
1193 * scheduled for deletion.
1194 * - Only if explicitly requested, the database is traversed
1195 * in order to use the traditional heuristics on empty records
1196 * to trigger deletion.
1197 * This is done only every VacuumFastPathCount'th vacuuming run.
1199 * The traverse runs fill two lists:
1201 * - The delete_list:
1202 * This is the list of empty records the current
1203 * node is lmaster and dmaster for. These records are later
1204 * deleted first on other nodes and then locally.
1206 * The fast vacuuming run has a short cut for those records
1207 * that have never been migrated with data: these records
1208 * are immediately deleted locally, since they have left
1209 * no trace on other nodes.
1211 * - The vacuum_fetch lists
1212 * (one for each other lmaster node):
1213 * The records in this list are sent for deletion to
1214 * their lmaster in a bulk VACUUM_FETCH message.
1216 * The lmaster then migrates all these records to itelf
1217 * so that they can be vacuumed there.
1219 * This executes in the child context.
1221 static int ctdb_vacuum_db(struct ctdb_db_context *ctdb_db,
1222 struct vacuum_data *vdata,
1223 bool full_vacuum_run)
1225 struct ctdb_context *ctdb = ctdb_db->ctdb;
1226 int ret, pnn;
1228 DEBUG(DEBUG_INFO, (__location__ " Entering %s vacuum run for db "
1229 "%s db_id[0x%08x]\n",
1230 full_vacuum_run ? "full" : "fast",
1231 ctdb_db->db_name, ctdb_db->db_id));
1233 ret = ctdb_ctrl_getvnnmap(ctdb, TIMELIMIT(), CTDB_CURRENT_NODE, ctdb, &ctdb->vnn_map);
1234 if (ret != 0) {
1235 DEBUG(DEBUG_ERR, ("Unable to get vnnmap from local node\n"));
1236 return ret;
1239 pnn = ctdb_ctrl_getpnn(ctdb, TIMELIMIT(), CTDB_CURRENT_NODE);
1240 if (pnn == -1) {
1241 DEBUG(DEBUG_ERR, ("Unable to get pnn from local node\n"));
1242 return -1;
1245 ctdb->pnn = pnn;
1247 ret = ctdb_vacuum_init_vacuum_data(ctdb_db, vdata);
1248 if (ret != 0) {
1249 return ret;
1252 if (full_vacuum_run) {
1253 ctdb_vacuum_traverse_db(ctdb_db, vdata);
1256 ctdb_process_delete_queue(ctdb_db, vdata);
1258 ctdb_process_vacuum_fetch_lists(ctdb_db, vdata);
1260 ctdb_process_delete_list(ctdb_db, vdata);
1262 /* this ensures we run our event queue */
1263 ctdb_ctrl_getpnn(ctdb, TIMELIMIT(), CTDB_CURRENT_NODE);
1265 return 0;
1270 * traverse function for repacking
1272 static int repack_traverse(struct tdb_context *tdb, TDB_DATA key, TDB_DATA data,
1273 void *private_data)
1275 struct vacuum_data *vdata = (struct vacuum_data *)private_data;
1277 if (vdata->vacuum) {
1278 uint32_t hash = ctdb_hash(&key);
1279 struct delete_record_data *kd;
1281 * check if we can ignore this record because it's in the delete_list
1283 kd = (struct delete_record_data *)trbt_lookup32(vdata->delete_list, hash);
1285 * there might be hash collisions so we have to compare the keys here to be sure
1287 if (kd && kd->key.dsize == key.dsize && memcmp(kd->key.dptr, key.dptr, key.dsize) == 0) {
1288 struct ctdb_ltdb_header *hdr = (struct ctdb_ltdb_header *)data.dptr;
1290 * we have to check if the record hasn't changed in the meantime in order to
1291 * savely remove it from the database
1293 if (data.dsize == sizeof(struct ctdb_ltdb_header) &&
1294 hdr->dmaster == kd->ctdb->pnn &&
1295 ctdb_lmaster(kd->ctdb, &(kd->key)) == kd->ctdb->pnn &&
1296 kd->hdr.rsn == hdr->rsn) {
1297 vdata->vacuumed++;
1298 return 0;
1302 if (tdb_store(vdata->dest_db, key, data, TDB_INSERT) != 0) {
1303 vdata->traverse_error = true;
1304 return -1;
1306 vdata->copied++;
1307 return 0;
1311 * repack a tdb
1313 static int ctdb_repack_tdb(struct tdb_context *tdb, TALLOC_CTX *mem_ctx, struct vacuum_data *vdata)
1315 struct tdb_context *tmp_db;
1317 if (tdb_transaction_start(tdb) != 0) {
1318 DEBUG(DEBUG_ERR,(__location__ " Failed to start transaction\n"));
1319 return -1;
1322 tmp_db = tdb_open("tmpdb", tdb_hash_size(tdb),
1323 TDB_INTERNAL|TDB_DISALLOW_NESTING,
1324 O_RDWR|O_CREAT, 0);
1325 if (tmp_db == NULL) {
1326 DEBUG(DEBUG_ERR,(__location__ " Failed to create tmp_db\n"));
1327 tdb_transaction_cancel(tdb);
1328 return -1;
1331 vdata->traverse_error = false;
1332 vdata->dest_db = tmp_db;
1333 vdata->vacuum = true;
1334 vdata->vacuumed = 0;
1335 vdata->copied = 0;
1338 * repack and vacuum on-the-fly by not writing the records that are
1339 * no longer needed
1341 if (tdb_traverse_read(tdb, repack_traverse, vdata) == -1) {
1342 DEBUG(DEBUG_ERR,(__location__ " Failed to traverse copying out\n"));
1343 tdb_transaction_cancel(tdb);
1344 tdb_close(tmp_db);
1345 return -1;
1348 DEBUG(DEBUG_INFO,(__location__ " %u records vacuumed\n", vdata->vacuumed));
1350 if (vdata->traverse_error) {
1351 DEBUG(DEBUG_ERR,(__location__ " Error during traversal\n"));
1352 tdb_transaction_cancel(tdb);
1353 tdb_close(tmp_db);
1354 return -1;
1357 if (tdb_wipe_all(tdb) != 0) {
1358 DEBUG(DEBUG_ERR,(__location__ " Failed to wipe database\n"));
1359 tdb_transaction_cancel(tdb);
1360 tdb_close(tmp_db);
1361 return -1;
1364 vdata->traverse_error = false;
1365 vdata->dest_db = tdb;
1366 vdata->vacuum = false;
1367 vdata->copied = 0;
1369 if (tdb_traverse_read(tmp_db, repack_traverse, vdata) == -1) {
1370 DEBUG(DEBUG_ERR,(__location__ " Failed to traverse copying back\n"));
1371 tdb_transaction_cancel(tdb);
1372 tdb_close(tmp_db);
1373 return -1;
1376 if (vdata->traverse_error) {
1377 DEBUG(DEBUG_ERR,(__location__ " Error during second traversal\n"));
1378 tdb_transaction_cancel(tdb);
1379 tdb_close(tmp_db);
1380 return -1;
1383 tdb_close(tmp_db);
1386 if (tdb_transaction_commit(tdb) != 0) {
1387 DEBUG(DEBUG_ERR,(__location__ " Failed to commit\n"));
1388 return -1;
1390 DEBUG(DEBUG_INFO,(__location__ " %u records copied\n", vdata->copied));
1392 return 0;
1396 * repack and vaccum a db
1397 * called from the child context
1399 static int ctdb_vacuum_and_repack_db(struct ctdb_db_context *ctdb_db,
1400 TALLOC_CTX *mem_ctx,
1401 bool full_vacuum_run)
1403 uint32_t repack_limit = ctdb_db->ctdb->tunable.repack_limit;
1404 const char *name = ctdb_db->db_name;
1405 int freelist_size = 0;
1406 struct vacuum_data *vdata;
1408 vdata = talloc_zero(mem_ctx, struct vacuum_data);
1409 if (vdata == NULL) {
1410 DEBUG(DEBUG_ERR,(__location__ " Out of memory\n"));
1411 return -1;
1414 vdata->ctdb = ctdb_db->ctdb;
1415 vdata->repack_limit = repack_limit;
1416 vdata->delete_list = trbt_create(vdata, 0);
1417 vdata->ctdb_db = ctdb_db;
1418 if (vdata->delete_list == NULL) {
1419 DEBUG(DEBUG_ERR,(__location__ " Out of memory\n"));
1420 talloc_free(vdata);
1421 return -1;
1424 vdata->start = timeval_current();
1427 * gather all records that can be deleted in vdata
1429 if (ctdb_vacuum_db(ctdb_db, vdata, full_vacuum_run) != 0) {
1430 DEBUG(DEBUG_ERR,(__location__ " Failed to vacuum '%s'\n", name));
1433 if (repack_limit != 0) {
1434 freelist_size = tdb_freelist_size(ctdb_db->ltdb->tdb);
1435 if (freelist_size == -1) {
1436 DEBUG(DEBUG_ERR,(__location__ " Failed to get freelist size for '%s'\n", name));
1437 talloc_free(vdata);
1438 return -1;
1443 * decide if a repack is necessary
1445 if ((repack_limit == 0 || (uint32_t)freelist_size < repack_limit))
1447 talloc_free(vdata);
1448 return 0;
1451 DEBUG(DEBUG_INFO,("Repacking %s with %u freelist entries and %u records to delete\n",
1452 name, freelist_size, vdata->delete_left));
1455 * repack and implicitely get rid of the records we can delete
1457 if (ctdb_repack_tdb(ctdb_db->ltdb->tdb, mem_ctx, vdata) != 0) {
1458 DEBUG(DEBUG_ERR,(__location__ " Failed to repack '%s'\n", name));
1459 talloc_free(vdata);
1460 return -1;
1462 talloc_free(vdata);
1464 return 0;
1467 static uint32_t get_vacuum_interval(struct ctdb_db_context *ctdb_db)
1469 uint32_t interval = ctdb_db->ctdb->tunable.vacuum_interval;
1471 return interval;
1474 static int vacuum_child_destructor(struct ctdb_vacuum_child_context *child_ctx)
1476 double l = timeval_elapsed(&child_ctx->start_time);
1477 struct ctdb_db_context *ctdb_db = child_ctx->vacuum_handle->ctdb_db;
1478 struct ctdb_context *ctdb = ctdb_db->ctdb;
1480 DEBUG(DEBUG_INFO,("Vacuuming took %.3f seconds for database %s\n", l, ctdb_db->db_name));
1482 if (child_ctx->child_pid != -1) {
1483 ctdb_kill(ctdb, child_ctx->child_pid, SIGKILL);
1484 } else {
1485 /* Bump the number of successful fast-path runs. */
1486 child_ctx->vacuum_handle->fast_path_count++;
1489 DLIST_REMOVE(ctdb->vacuumers, child_ctx);
1491 event_add_timed(ctdb->ev, child_ctx->vacuum_handle,
1492 timeval_current_ofs(get_vacuum_interval(ctdb_db), 0),
1493 ctdb_vacuum_event, child_ctx->vacuum_handle);
1495 return 0;
1499 * this event is generated when a vacuum child process times out
1501 static void vacuum_child_timeout(struct event_context *ev, struct timed_event *te,
1502 struct timeval t, void *private_data)
1504 struct ctdb_vacuum_child_context *child_ctx = talloc_get_type(private_data, struct ctdb_vacuum_child_context);
1506 DEBUG(DEBUG_ERR,("Vacuuming child process timed out for db %s\n", child_ctx->vacuum_handle->ctdb_db->db_name));
1508 child_ctx->status = VACUUM_TIMEOUT;
1510 talloc_free(child_ctx);
1515 * this event is generated when a vacuum child process has completed
1517 static void vacuum_child_handler(struct event_context *ev, struct fd_event *fde,
1518 uint16_t flags, void *private_data)
1520 struct ctdb_vacuum_child_context *child_ctx = talloc_get_type(private_data, struct ctdb_vacuum_child_context);
1521 char c = 0;
1522 int ret;
1524 DEBUG(DEBUG_INFO,("Vacuuming child process %d finished for db %s\n", child_ctx->child_pid, child_ctx->vacuum_handle->ctdb_db->db_name));
1525 child_ctx->child_pid = -1;
1527 ret = read(child_ctx->fd[0], &c, 1);
1528 if (ret != 1 || c != 0) {
1529 child_ctx->status = VACUUM_ERROR;
1530 DEBUG(DEBUG_ERR, ("A vacuum child process failed with an error for database %s. ret=%d c=%d\n", child_ctx->vacuum_handle->ctdb_db->db_name, ret, c));
1531 } else {
1532 child_ctx->status = VACUUM_OK;
1535 talloc_free(child_ctx);
1539 * this event is called every time we need to start a new vacuum process
1541 static void
1542 ctdb_vacuum_event(struct event_context *ev, struct timed_event *te,
1543 struct timeval t, void *private_data)
1545 struct ctdb_vacuum_handle *vacuum_handle = talloc_get_type(private_data, struct ctdb_vacuum_handle);
1546 struct ctdb_db_context *ctdb_db = vacuum_handle->ctdb_db;
1547 struct ctdb_context *ctdb = ctdb_db->ctdb;
1548 struct ctdb_vacuum_child_context *child_ctx;
1549 struct tevent_fd *fde;
1550 int ret;
1552 /* we dont vacuum if we are in recovery mode, or db frozen */
1553 if (ctdb->recovery_mode == CTDB_RECOVERY_ACTIVE ||
1554 ctdb->freeze_mode[ctdb_db->priority] != CTDB_FREEZE_NONE) {
1555 DEBUG(DEBUG_INFO, ("Not vacuuming %s (%s)\n", ctdb_db->db_name,
1556 ctdb->recovery_mode == CTDB_RECOVERY_ACTIVE ? "in recovery"
1557 : ctdb->freeze_mode[ctdb_db->priority] == CTDB_FREEZE_PENDING
1558 ? "freeze pending"
1559 : "frozen"));
1560 event_add_timed(ctdb->ev, vacuum_handle,
1561 timeval_current_ofs(get_vacuum_interval(ctdb_db), 0),
1562 ctdb_vacuum_event, vacuum_handle);
1563 return;
1566 child_ctx = talloc(vacuum_handle, struct ctdb_vacuum_child_context);
1567 if (child_ctx == NULL) {
1568 DEBUG(DEBUG_CRIT, (__location__ " Failed to allocate child context for vacuuming of %s\n", ctdb_db->db_name));
1569 ctdb_fatal(ctdb, "Out of memory when crating vacuum child context. Shutting down\n");
1573 ret = pipe(child_ctx->fd);
1574 if (ret != 0) {
1575 talloc_free(child_ctx);
1576 DEBUG(DEBUG_ERR, ("Failed to create pipe for vacuum child process.\n"));
1577 event_add_timed(ctdb->ev, vacuum_handle,
1578 timeval_current_ofs(get_vacuum_interval(ctdb_db), 0),
1579 ctdb_vacuum_event, vacuum_handle);
1580 return;
1583 if (vacuum_handle->fast_path_count > ctdb->tunable.vacuum_fast_path_count) {
1584 vacuum_handle->fast_path_count = 0;
1587 child_ctx->child_pid = ctdb_fork(ctdb);
1588 if (child_ctx->child_pid == (pid_t)-1) {
1589 close(child_ctx->fd[0]);
1590 close(child_ctx->fd[1]);
1591 talloc_free(child_ctx);
1592 DEBUG(DEBUG_ERR, ("Failed to fork vacuum child process.\n"));
1593 event_add_timed(ctdb->ev, vacuum_handle,
1594 timeval_current_ofs(get_vacuum_interval(ctdb_db), 0),
1595 ctdb_vacuum_event, vacuum_handle);
1596 return;
1600 if (child_ctx->child_pid == 0) {
1601 char cc = 0;
1602 bool full_vacuum_run = false;
1603 close(child_ctx->fd[0]);
1605 DEBUG(DEBUG_INFO,("Vacuuming child process %d for db %s started\n", getpid(), ctdb_db->db_name));
1606 ctdb_set_process_name("ctdb_vacuum");
1607 if (switch_from_server_to_client(ctdb, "vacuum-%s", ctdb_db->db_name) != 0) {
1608 DEBUG(DEBUG_CRIT, (__location__ "ERROR: failed to switch vacuum daemon into client mode. Shutting down.\n"));
1609 _exit(1);
1613 * repack the db
1615 if ((ctdb->tunable.vacuum_fast_path_count > 0) &&
1616 (vacuum_handle->fast_path_count == 0))
1618 full_vacuum_run = true;
1620 cc = ctdb_vacuum_and_repack_db(ctdb_db, child_ctx,
1621 full_vacuum_run);
1623 write(child_ctx->fd[1], &cc, 1);
1624 _exit(0);
1627 set_close_on_exec(child_ctx->fd[0]);
1628 close(child_ctx->fd[1]);
1630 child_ctx->status = VACUUM_RUNNING;
1631 child_ctx->start_time = timeval_current();
1633 DLIST_ADD(ctdb->vacuumers, child_ctx);
1634 talloc_set_destructor(child_ctx, vacuum_child_destructor);
1637 * Clear the fastpath vacuuming list in the parent.
1639 talloc_free(ctdb_db->delete_queue);
1640 ctdb_db->delete_queue = trbt_create(ctdb_db, 0);
1641 if (ctdb_db->delete_queue == NULL) {
1642 /* fatal here? ... */
1643 ctdb_fatal(ctdb, "Out of memory when re-creating vacuum tree "
1644 "in parent context. Shutting down\n");
1647 event_add_timed(ctdb->ev, child_ctx,
1648 timeval_current_ofs(ctdb->tunable.vacuum_max_run_time, 0),
1649 vacuum_child_timeout, child_ctx);
1651 DEBUG(DEBUG_DEBUG, (__location__ " Created PIPE FD:%d to child vacuum process\n", child_ctx->fd[0]));
1653 fde = event_add_fd(ctdb->ev, child_ctx, child_ctx->fd[0],
1654 EVENT_FD_READ, vacuum_child_handler, child_ctx);
1655 tevent_fd_set_auto_close(fde);
1657 vacuum_handle->child_ctx = child_ctx;
1658 child_ctx->vacuum_handle = vacuum_handle;
1661 void ctdb_stop_vacuuming(struct ctdb_context *ctdb)
1663 /* Simply free them all. */
1664 while (ctdb->vacuumers) {
1665 DEBUG(DEBUG_INFO, ("Aborting vacuuming for %s (%i)\n",
1666 ctdb->vacuumers->vacuum_handle->ctdb_db->db_name,
1667 (int)ctdb->vacuumers->child_pid));
1668 /* vacuum_child_destructor kills it, removes from list */
1669 talloc_free(ctdb->vacuumers);
1673 /* this function initializes the vacuuming context for a database
1674 * starts the vacuuming events
1676 int ctdb_vacuum_init(struct ctdb_db_context *ctdb_db)
1678 if (ctdb_db->persistent != 0) {
1679 DEBUG(DEBUG_ERR,("Vacuuming is disabled for persistent database %s\n", ctdb_db->db_name));
1680 return 0;
1683 ctdb_db->vacuum_handle = talloc(ctdb_db, struct ctdb_vacuum_handle);
1684 CTDB_NO_MEMORY(ctdb_db->ctdb, ctdb_db->vacuum_handle);
1686 ctdb_db->vacuum_handle->ctdb_db = ctdb_db;
1687 ctdb_db->vacuum_handle->fast_path_count = 0;
1689 event_add_timed(ctdb_db->ctdb->ev, ctdb_db->vacuum_handle,
1690 timeval_current_ofs(get_vacuum_interval(ctdb_db), 0),
1691 ctdb_vacuum_event, ctdb_db->vacuum_handle);
1693 return 0;
1696 static void remove_record_from_delete_queue(struct ctdb_db_context *ctdb_db,
1697 const struct ctdb_ltdb_header *hdr,
1698 const TDB_DATA key)
1700 struct delete_record_data *kd;
1701 uint32_t hash;
1703 hash = (uint32_t)ctdb_hash(&key);
1705 DEBUG(DEBUG_DEBUG, (__location__
1706 " remove_record_from_delete_queue: "
1707 "db[%s] "
1708 "db_id[0x%08x] "
1709 "key_hash[0x%08x] "
1710 "lmaster[%u] "
1711 "migrated_with_data[%s]\n",
1712 ctdb_db->db_name, ctdb_db->db_id,
1713 hash,
1714 ctdb_lmaster(ctdb_db->ctdb, &key),
1715 hdr->flags & CTDB_REC_FLAG_MIGRATED_WITH_DATA ? "yes" : "no"));
1717 kd = (struct delete_record_data *)trbt_lookup32(ctdb_db->delete_queue, hash);
1718 if (kd == NULL) {
1719 DEBUG(DEBUG_DEBUG, (__location__
1720 " remove_record_from_delete_queue: "
1721 "record not in queue (hash[0x%08x])\n.",
1722 hash));
1723 return;
1726 if ((kd->key.dsize != key.dsize) ||
1727 (memcmp(kd->key.dptr, key.dptr, key.dsize) != 0))
1729 DEBUG(DEBUG_DEBUG, (__location__
1730 " remove_record_from_delete_queue: "
1731 "hash collision for key with hash[0x%08x] "
1732 "in db[%s] - skipping\n",
1733 hash, ctdb_db->db_name));
1734 return;
1737 DEBUG(DEBUG_DEBUG, (__location__
1738 " remove_record_from_delete_queue: "
1739 "removing key with hash[0x%08x]\n",
1740 hash));
1742 talloc_free(kd);
1744 return;
1748 * Insert a record into the ctdb_db context's delete queue,
1749 * handling hash collisions.
1751 static int insert_record_into_delete_queue(struct ctdb_db_context *ctdb_db,
1752 const struct ctdb_ltdb_header *hdr,
1753 TDB_DATA key)
1755 struct delete_record_data *kd;
1756 uint32_t hash;
1757 int ret;
1759 hash = (uint32_t)ctdb_hash(&key);
1761 DEBUG(DEBUG_INFO, (__location__ " schedule for deletion: db[%s] "
1762 "db_id[0x%08x] "
1763 "key_hash[0x%08x] "
1764 "lmaster[%u] "
1765 "migrated_with_data[%s]\n",
1766 ctdb_db->db_name, ctdb_db->db_id,
1767 hash,
1768 ctdb_lmaster(ctdb_db->ctdb, &key),
1769 hdr->flags & CTDB_REC_FLAG_MIGRATED_WITH_DATA ? "yes" : "no"));
1771 kd = (struct delete_record_data *)trbt_lookup32(ctdb_db->delete_queue, hash);
1772 if (kd != NULL) {
1773 if ((kd->key.dsize != key.dsize) ||
1774 (memcmp(kd->key.dptr, key.dptr, key.dsize) != 0))
1776 DEBUG(DEBUG_INFO,
1777 (__location__ " schedule for deletion: "
1778 "hash collision for key hash [0x%08x]. "
1779 "Skipping the record.\n", hash));
1780 return 0;
1781 } else {
1782 DEBUG(DEBUG_DEBUG,
1783 (__location__ " schedule for deletion: "
1784 "updating entry for key with hash [0x%08x].\n",
1785 hash));
1789 ret = insert_delete_record_data_into_tree(ctdb_db->ctdb, ctdb_db,
1790 ctdb_db->delete_queue,
1791 hdr, key);
1792 if (ret != 0) {
1793 DEBUG(DEBUG_INFO,
1794 (__location__ " schedule for deletion: error "
1795 "inserting key with hash [0x%08x] into delete queue\n",
1796 hash));
1797 return -1;
1800 return 0;
1804 * Schedule a record for deletetion.
1805 * Called from the parent context.
1807 int32_t ctdb_control_schedule_for_deletion(struct ctdb_context *ctdb,
1808 TDB_DATA indata)
1810 struct ctdb_control_schedule_for_deletion *dd;
1811 struct ctdb_db_context *ctdb_db;
1812 int ret;
1813 TDB_DATA key;
1815 dd = (struct ctdb_control_schedule_for_deletion *)indata.dptr;
1817 ctdb_db = find_ctdb_db(ctdb, dd->db_id);
1818 if (ctdb_db == NULL) {
1819 DEBUG(DEBUG_ERR, (__location__ " Unknown db id 0x%08x\n",
1820 dd->db_id));
1821 return -1;
1824 key.dsize = dd->keylen;
1825 key.dptr = dd->key;
1827 ret = insert_record_into_delete_queue(ctdb_db, &dd->hdr, key);
1829 return ret;
1832 int32_t ctdb_local_schedule_for_deletion(struct ctdb_db_context *ctdb_db,
1833 const struct ctdb_ltdb_header *hdr,
1834 TDB_DATA key)
1836 int ret;
1837 struct ctdb_control_schedule_for_deletion *dd;
1838 TDB_DATA indata;
1839 int32_t status;
1841 if (ctdb_db->ctdb->ctdbd_pid == getpid()) {
1842 /* main daemon - directly queue */
1843 ret = insert_record_into_delete_queue(ctdb_db, hdr, key);
1845 return ret;
1848 /* if we dont have a connection to the daemon we can not send
1849 a control. For example sometimes from update_record control child
1850 process.
1852 if (!ctdb_db->ctdb->can_send_controls) {
1853 return -1;
1857 /* child process: send the main daemon a control */
1858 indata.dsize = offsetof(struct ctdb_control_schedule_for_deletion, key) + key.dsize;
1859 indata.dptr = talloc_zero_array(ctdb_db, uint8_t, indata.dsize);
1860 if (indata.dptr == NULL) {
1861 DEBUG(DEBUG_ERR, (__location__ " out of memory\n"));
1862 return -1;
1864 dd = (struct ctdb_control_schedule_for_deletion *)(void *)indata.dptr;
1865 dd->db_id = ctdb_db->db_id;
1866 dd->hdr = *hdr;
1867 dd->keylen = key.dsize;
1868 memcpy(dd->key, key.dptr, key.dsize);
1870 ret = ctdb_control(ctdb_db->ctdb,
1871 CTDB_CURRENT_NODE,
1872 ctdb_db->db_id,
1873 CTDB_CONTROL_SCHEDULE_FOR_DELETION,
1874 CTDB_CTRL_FLAG_NOREPLY, /* flags */
1875 indata,
1876 NULL, /* mem_ctx */
1877 NULL, /* outdata */
1878 &status,
1879 NULL, /* timeout : NULL == wait forever */
1880 NULL); /* error message */
1882 talloc_free(indata.dptr);
1884 if (ret != 0 || status != 0) {
1885 DEBUG(DEBUG_ERR, (__location__ " Error sending "
1886 "SCHEDULE_FOR_DELETION "
1887 "control.\n"));
1888 if (status != 0) {
1889 ret = -1;
1893 return ret;
1896 void ctdb_local_remove_from_delete_queue(struct ctdb_db_context *ctdb_db,
1897 const struct ctdb_ltdb_header *hdr,
1898 const TDB_DATA key)
1900 if (ctdb_db->ctdb->ctdbd_pid != getpid()) {
1902 * Only remove the record from the delete queue if called
1903 * in the main daemon.
1905 return;
1908 remove_record_from_delete_queue(ctdb_db, hdr, key);
1910 return;