ldb: Release ldb 1.3.0
[Samba.git] / ctdb / server / ctdb_vacuum.c
blobe7491164262c8f325fd18d56ff3e69e50149fad4
1 /*
2 ctdb vacuuming events
4 Copyright (C) Ronnie Sahlberg 2009
5 Copyright (C) Michael Adam 2010-2013
6 Copyright (C) Stefan Metzmacher 2010-2011
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program; if not, see <http://www.gnu.org/licenses/>.
22 #include "replace.h"
23 #include "system/network.h"
24 #include "system/filesys.h"
25 #include "system/time.h"
27 #include <talloc.h>
28 #include <tevent.h>
30 #include "lib/tdb_wrap/tdb_wrap.h"
31 #include "lib/util/dlinklist.h"
32 #include "lib/util/debug.h"
33 #include "lib/util/samba_util.h"
34 #include "lib/util/sys_rw.h"
35 #include "lib/util/util_process.h"
37 #include "ctdb_private.h"
38 #include "ctdb_client.h"
40 #include "common/rb_tree.h"
41 #include "common/common.h"
42 #include "common/logging.h"
44 #define TIMELIMIT() timeval_current_ofs(10, 0)
46 enum vacuum_child_status { VACUUM_RUNNING, VACUUM_OK, VACUUM_ERROR, VACUUM_TIMEOUT};
48 struct ctdb_vacuum_child_context {
49 struct ctdb_vacuum_child_context *next, *prev;
50 struct ctdb_vacuum_handle *vacuum_handle;
51 /* fd child writes status to */
52 int fd[2];
53 pid_t child_pid;
54 enum vacuum_child_status status;
55 struct timeval start_time;
58 struct ctdb_vacuum_handle {
59 struct ctdb_db_context *ctdb_db;
60 struct ctdb_vacuum_child_context *child_ctx;
61 uint32_t fast_path_count;
65 /* a list of records to possibly delete */
66 struct vacuum_data {
67 struct ctdb_context *ctdb;
68 struct ctdb_db_context *ctdb_db;
69 struct tdb_context *dest_db;
70 trbt_tree_t *delete_list;
71 struct ctdb_marshall_buffer **vacuum_fetch_list;
72 struct timeval start;
73 bool traverse_error;
74 bool vacuum;
75 struct {
76 struct {
77 uint32_t added_to_vacuum_fetch_list;
78 uint32_t added_to_delete_list;
79 uint32_t deleted;
80 uint32_t skipped;
81 uint32_t error;
82 uint32_t total;
83 } delete_queue;
84 struct {
85 uint32_t scheduled;
86 uint32_t skipped;
87 uint32_t error;
88 uint32_t total;
89 } db_traverse;
90 struct {
91 uint32_t total;
92 uint32_t remote_error;
93 uint32_t local_error;
94 uint32_t deleted;
95 uint32_t skipped;
96 uint32_t left;
97 } delete_list;
98 struct {
99 uint32_t vacuumed;
100 uint32_t copied;
101 } repack;
102 } count;
105 /* this structure contains the information for one record to be deleted */
106 struct delete_record_data {
107 struct ctdb_context *ctdb;
108 struct ctdb_db_context *ctdb_db;
109 struct ctdb_ltdb_header hdr;
110 TDB_DATA key;
111 uint8_t keydata[1];
114 struct delete_records_list {
115 struct ctdb_marshall_buffer *records;
116 struct vacuum_data *vdata;
119 static int insert_record_into_delete_queue(struct ctdb_db_context *ctdb_db,
120 const struct ctdb_ltdb_header *hdr,
121 TDB_DATA key);
124 * Store key and header in a tree, indexed by the key hash.
126 static int insert_delete_record_data_into_tree(struct ctdb_context *ctdb,
127 struct ctdb_db_context *ctdb_db,
128 trbt_tree_t *tree,
129 const struct ctdb_ltdb_header *hdr,
130 TDB_DATA key)
132 struct delete_record_data *dd;
133 uint32_t hash;
134 size_t len;
136 len = offsetof(struct delete_record_data, keydata) + key.dsize;
138 dd = (struct delete_record_data *)talloc_size(tree, len);
139 if (dd == NULL) {
140 DEBUG(DEBUG_ERR,(__location__ " Out of memory\n"));
141 return -1;
143 talloc_set_name_const(dd, "struct delete_record_data");
145 dd->ctdb = ctdb;
146 dd->ctdb_db = ctdb_db;
147 dd->key.dsize = key.dsize;
148 dd->key.dptr = dd->keydata;
149 memcpy(dd->keydata, key.dptr, key.dsize);
151 dd->hdr = *hdr;
153 hash = ctdb_hash(&key);
155 trbt_insert32(tree, hash, dd);
157 return 0;
160 static int add_record_to_delete_list(struct vacuum_data *vdata, TDB_DATA key,
161 struct ctdb_ltdb_header *hdr)
163 struct ctdb_context *ctdb = vdata->ctdb;
164 struct ctdb_db_context *ctdb_db = vdata->ctdb_db;
165 uint32_t hash;
166 int ret;
168 hash = ctdb_hash(&key);
170 if (trbt_lookup32(vdata->delete_list, hash)) {
171 DEBUG(DEBUG_INFO, (__location__ " Hash collision when vacuuming, skipping this record.\n"));
172 return 0;
175 ret = insert_delete_record_data_into_tree(ctdb, ctdb_db,
176 vdata->delete_list,
177 hdr, key);
178 if (ret != 0) {
179 return -1;
182 vdata->count.delete_list.total++;
184 return 0;
188 * Add a record to the list of records to be sent
189 * to their lmaster with VACUUM_FETCH.
191 static int add_record_to_vacuum_fetch_list(struct vacuum_data *vdata,
192 TDB_DATA key)
194 struct ctdb_context *ctdb = vdata->ctdb;
195 uint32_t lmaster;
196 struct ctdb_marshall_buffer *vfl;
198 lmaster = ctdb_lmaster(ctdb, &key);
200 vfl = vdata->vacuum_fetch_list[lmaster];
202 vfl = ctdb_marshall_add(ctdb, vfl, vfl->db_id, ctdb->pnn,
203 key, NULL, tdb_null);
204 if (vfl == NULL) {
205 DEBUG(DEBUG_ERR,(__location__ " Out of memory\n"));
206 vdata->traverse_error = true;
207 return -1;
210 vdata->vacuum_fetch_list[lmaster] = vfl;
212 return 0;
216 static void ctdb_vacuum_event(struct tevent_context *ev,
217 struct tevent_timer *te,
218 struct timeval t, void *private_data);
220 static int vacuum_record_parser(TDB_DATA key, TDB_DATA data, void *private_data)
222 struct ctdb_ltdb_header *header =
223 (struct ctdb_ltdb_header *)private_data;
225 if (data.dsize != sizeof(struct ctdb_ltdb_header)) {
226 return -1;
229 *header = *(struct ctdb_ltdb_header *)data.dptr;
231 return 0;
235 * traverse function for gathering the records that can be deleted
237 static int vacuum_traverse(struct tdb_context *tdb, TDB_DATA key, TDB_DATA data,
238 void *private_data)
240 struct vacuum_data *vdata = talloc_get_type(private_data,
241 struct vacuum_data);
242 struct ctdb_context *ctdb = vdata->ctdb;
243 struct ctdb_db_context *ctdb_db = vdata->ctdb_db;
244 uint32_t lmaster;
245 struct ctdb_ltdb_header *hdr;
246 int res = 0;
248 vdata->count.db_traverse.total++;
250 lmaster = ctdb_lmaster(ctdb, &key);
251 if (lmaster >= ctdb->num_nodes) {
252 vdata->count.db_traverse.error++;
253 DEBUG(DEBUG_CRIT, (__location__
254 " lmaster[%u] >= ctdb->num_nodes[%u] for key"
255 " with hash[%u]!\n",
256 (unsigned)lmaster,
257 (unsigned)ctdb->num_nodes,
258 (unsigned)ctdb_hash(&key)));
259 return -1;
262 if (data.dsize != sizeof(struct ctdb_ltdb_header)) {
263 /* it is not a deleted record */
264 vdata->count.db_traverse.skipped++;
265 return 0;
268 hdr = (struct ctdb_ltdb_header *)data.dptr;
270 if (hdr->dmaster != ctdb->pnn) {
271 vdata->count.db_traverse.skipped++;
272 return 0;
276 * Add the record to this process's delete_queue for processing
277 * in the subsequent traverse in the fast vacuum run.
279 res = insert_record_into_delete_queue(ctdb_db, hdr, key);
280 if (res != 0) {
281 vdata->count.db_traverse.error++;
282 } else {
283 vdata->count.db_traverse.scheduled++;
286 return 0;
290 * traverse the tree of records to delete and marshall them into
291 * a blob
293 static int delete_marshall_traverse(void *param, void *data)
295 struct delete_record_data *dd = talloc_get_type(data, struct delete_record_data);
296 struct delete_records_list *recs = talloc_get_type(param, struct delete_records_list);
297 struct ctdb_marshall_buffer *m;
299 m = ctdb_marshall_add(recs, recs->records, recs->records->db_id,
300 recs->records->db_id,
301 dd->key, &dd->hdr, tdb_null);
302 if (m == NULL) {
303 DEBUG(DEBUG_ERR, (__location__ " failed to marshall record\n"));
304 return -1;
307 recs->records = m;
308 return 0;
312 * Variant of delete_marshall_traverse() that bumps the
313 * RSN of each traversed record in the database.
315 * This is needed to ensure that when rolling out our
316 * empty record copy before remote deletion, we as the
317 * record's dmaster keep a higher RSN than the non-dmaster
318 * nodes. This is needed to prevent old copies from
319 * resurrection in recoveries.
321 static int delete_marshall_traverse_first(void *param, void *data)
323 struct delete_record_data *dd = talloc_get_type(data, struct delete_record_data);
324 struct delete_records_list *recs = talloc_get_type(param, struct delete_records_list);
325 struct ctdb_db_context *ctdb_db = dd->ctdb_db;
326 struct ctdb_context *ctdb = ctdb_db->ctdb;
327 struct ctdb_ltdb_header header;
328 uint32_t lmaster;
329 uint32_t hash = ctdb_hash(&(dd->key));
330 int res;
332 res = tdb_chainlock_nonblock(ctdb_db->ltdb->tdb, dd->key);
333 if (res != 0) {
334 recs->vdata->count.delete_list.skipped++;
335 recs->vdata->count.delete_list.left--;
336 talloc_free(dd);
337 return 0;
341 * Verify that the record is still empty, its RSN has not
342 * changed and that we are still its lmaster and dmaster.
345 res = tdb_parse_record(ctdb_db->ltdb->tdb, dd->key,
346 vacuum_record_parser, &header);
347 if (res != 0) {
348 goto skip;
351 if (header.flags & CTDB_REC_RO_FLAGS) {
352 DEBUG(DEBUG_INFO, (__location__ ": record with hash [0x%08x] "
353 "on database db[%s] has read-only flags. "
354 "skipping.\n",
355 hash, ctdb_db->db_name));
356 goto skip;
359 if (header.dmaster != ctdb->pnn) {
360 DEBUG(DEBUG_INFO, (__location__ ": record with hash [0x%08x] "
361 "on database db[%s] has been migrated away. "
362 "skipping.\n",
363 hash, ctdb_db->db_name));
364 goto skip;
367 if (header.rsn != dd->hdr.rsn) {
368 DEBUG(DEBUG_INFO, (__location__ ": record with hash [0x%08x] "
369 "on database db[%s] seems to have been "
370 "migrated away and back again (with empty "
371 "data). skipping.\n",
372 hash, ctdb_db->db_name));
373 goto skip;
376 lmaster = ctdb_lmaster(ctdb_db->ctdb, &dd->key);
378 if (lmaster != ctdb->pnn) {
379 DEBUG(DEBUG_INFO, (__location__ ": not lmaster for record in "
380 "delete list (key hash [0x%08x], db[%s]). "
381 "Strange! skipping.\n",
382 hash, ctdb_db->db_name));
383 goto skip;
387 * Increment the record's RSN to ensure the dmaster (i.e. the current
388 * node) has the highest RSN of the record in the cluster.
389 * This is to prevent old record copies from resurrecting in recoveries
390 * if something should fail during the deletion process.
391 * Note that ctdb_ltdb_store_server() increments the RSN if called
392 * on the record's dmaster.
395 res = ctdb_ltdb_store(ctdb_db, dd->key, &header, tdb_null);
396 if (res != 0) {
397 DEBUG(DEBUG_ERR, (__location__ ": Failed to store record with "
398 "key hash [0x%08x] on database db[%s].\n",
399 hash, ctdb_db->db_name));
400 goto skip;
403 tdb_chainunlock(ctdb_db->ltdb->tdb, dd->key);
405 goto done;
407 skip:
408 tdb_chainunlock(ctdb_db->ltdb->tdb, dd->key);
410 recs->vdata->count.delete_list.skipped++;
411 recs->vdata->count.delete_list.left--;
412 talloc_free(dd);
413 dd = NULL;
415 done:
416 if (dd == NULL) {
417 return 0;
420 return delete_marshall_traverse(param, data);
424 * traverse function for the traversal of the delete_queue,
425 * the fast-path vacuuming list.
427 * - If the record has been migrated off the node
428 * or has been revived (filled with data) on the node,
429 * then skip the record.
431 * - If the current node is the record's lmaster and it is
432 * a record that has never been migrated with data, then
433 * delete the record from the local tdb.
435 * - If the current node is the record's lmaster and it has
436 * been migrated with data, then schedule it for the normal
437 * vacuuming procedure (i.e. add it to the delete_list).
439 * - If the current node is NOT the record's lmaster then
440 * add it to the list of records that are to be sent to
441 * the lmaster with the VACUUM_FETCH message.
443 static int delete_queue_traverse(void *param, void *data)
445 struct delete_record_data *dd =
446 talloc_get_type(data, struct delete_record_data);
447 struct vacuum_data *vdata = talloc_get_type(param, struct vacuum_data);
448 struct ctdb_db_context *ctdb_db = dd->ctdb_db;
449 struct ctdb_context *ctdb = ctdb_db->ctdb; /* or dd->ctdb ??? */
450 int res;
451 struct ctdb_ltdb_header header;
452 uint32_t lmaster;
453 uint32_t hash = ctdb_hash(&(dd->key));
455 vdata->count.delete_queue.total++;
457 res = tdb_chainlock_nonblock(ctdb_db->ltdb->tdb, dd->key);
458 if (res != 0) {
459 vdata->count.delete_queue.error++;
460 return 0;
463 res = tdb_parse_record(ctdb_db->ltdb->tdb, dd->key,
464 vacuum_record_parser, &header);
465 if (res != 0) {
466 goto skipped;
469 if (header.dmaster != ctdb->pnn) {
470 /* The record has been migrated off the node. Skip. */
471 goto skipped;
474 if (header.rsn != dd->hdr.rsn) {
476 * The record has been migrated off the node and back again.
477 * But not requeued for deletion. Skip it.
479 goto skipped;
483 * We are dmaster, and the record has no data, and it has
484 * not been migrated after it has been queued for deletion.
486 * At this stage, the record could still have been revived locally
487 * and last been written with empty data. This can only be
488 * fixed with the addition of an active or delete flag. (TODO)
491 lmaster = ctdb_lmaster(ctdb_db->ctdb, &dd->key);
493 if (lmaster != ctdb->pnn) {
494 res = add_record_to_vacuum_fetch_list(vdata, dd->key);
496 if (res != 0) {
497 DEBUG(DEBUG_ERR,
498 (__location__ " Error adding record to list "
499 "of records to send to lmaster.\n"));
500 vdata->count.delete_queue.error++;
501 } else {
502 vdata->count.delete_queue.added_to_vacuum_fetch_list++;
504 goto done;
507 /* use header->flags or dd->hdr.flags ?? */
508 if (dd->hdr.flags & CTDB_REC_FLAG_MIGRATED_WITH_DATA) {
509 res = add_record_to_delete_list(vdata, dd->key, &dd->hdr);
511 if (res != 0) {
512 DEBUG(DEBUG_ERR,
513 (__location__ " Error adding record to list "
514 "of records for deletion on lmaster.\n"));
515 vdata->count.delete_queue.error++;
516 } else {
517 vdata->count.delete_queue.added_to_delete_list++;
519 } else {
520 res = tdb_delete(ctdb_db->ltdb->tdb, dd->key);
522 if (res != 0) {
523 DEBUG(DEBUG_ERR,
524 (__location__ " Error deleting record with key "
525 "hash [0x%08x] from local data base db[%s].\n",
526 hash, ctdb_db->db_name));
527 vdata->count.delete_queue.error++;
528 goto done;
531 DEBUG(DEBUG_DEBUG,
532 (__location__ " Deleted record with key hash "
533 "[0x%08x] from local data base db[%s].\n",
534 hash, ctdb_db->db_name));
535 vdata->count.delete_queue.deleted++;
538 goto done;
540 skipped:
541 vdata->count.delete_queue.skipped++;
543 done:
544 tdb_chainunlock(ctdb_db->ltdb->tdb, dd->key);
546 return 0;
550 * Delete the records that we are lmaster and dmaster for and
551 * that could be deleted on all other nodes via the TRY_DELETE_RECORDS
552 * control.
554 static int delete_record_traverse(void *param, void *data)
556 struct delete_record_data *dd =
557 talloc_get_type(data, struct delete_record_data);
558 struct vacuum_data *vdata = talloc_get_type(param, struct vacuum_data);
559 struct ctdb_db_context *ctdb_db = dd->ctdb_db;
560 struct ctdb_context *ctdb = ctdb_db->ctdb;
561 int res;
562 struct ctdb_ltdb_header header;
563 uint32_t lmaster;
564 uint32_t hash = ctdb_hash(&(dd->key));
566 res = tdb_chainlock(ctdb_db->ltdb->tdb, dd->key);
567 if (res != 0) {
568 DEBUG(DEBUG_ERR,
569 (__location__ " Error getting chainlock on record with "
570 "key hash [0x%08x] on database db[%s].\n",
571 hash, ctdb_db->db_name));
572 vdata->count.delete_list.local_error++;
573 vdata->count.delete_list.left--;
574 talloc_free(dd);
575 return 0;
579 * Verify that the record is still empty, its RSN has not
580 * changed and that we are still its lmaster and dmaster.
583 res = tdb_parse_record(ctdb_db->ltdb->tdb, dd->key,
584 vacuum_record_parser, &header);
585 if (res != 0) {
586 goto skip;
589 if (header.flags & CTDB_REC_RO_FLAGS) {
590 DEBUG(DEBUG_INFO, (__location__ ": record with hash [0x%08x] "
591 "on database db[%s] has read-only flags. "
592 "skipping.\n",
593 hash, ctdb_db->db_name));
594 goto skip;
597 if (header.dmaster != ctdb->pnn) {
598 DEBUG(DEBUG_INFO, (__location__ ": record with hash [0x%08x] "
599 "on database db[%s] has been migrated away. "
600 "skipping.\n",
601 hash, ctdb_db->db_name));
602 goto skip;
605 if (header.rsn != dd->hdr.rsn + 1) {
607 * The record has been migrated off the node and back again.
608 * But not requeued for deletion. Skip it.
609 * (Note that the first marshall traverse has bumped the RSN
610 * on disk.)
612 DEBUG(DEBUG_INFO, (__location__ ": record with hash [0x%08x] "
613 "on database db[%s] seems to have been "
614 "migrated away and back again (with empty "
615 "data). skipping.\n",
616 hash, ctdb_db->db_name));
617 goto skip;
620 lmaster = ctdb_lmaster(ctdb_db->ctdb, &dd->key);
622 if (lmaster != ctdb->pnn) {
623 DEBUG(DEBUG_INFO, (__location__ ": not lmaster for record in "
624 "delete list (key hash [0x%08x], db[%s]). "
625 "Strange! skipping.\n",
626 hash, ctdb_db->db_name));
627 goto skip;
630 res = tdb_delete(ctdb_db->ltdb->tdb, dd->key);
632 if (res != 0) {
633 DEBUG(DEBUG_ERR,
634 (__location__ " Error deleting record with key hash "
635 "[0x%08x] from local data base db[%s].\n",
636 hash, ctdb_db->db_name));
637 vdata->count.delete_list.local_error++;
638 goto done;
641 DEBUG(DEBUG_DEBUG,
642 (__location__ " Deleted record with key hash [0x%08x] from "
643 "local data base db[%s].\n", hash, ctdb_db->db_name));
645 vdata->count.delete_list.deleted++;
646 goto done;
648 skip:
649 vdata->count.delete_list.skipped++;
651 done:
652 tdb_chainunlock(ctdb_db->ltdb->tdb, dd->key);
654 talloc_free(dd);
655 vdata->count.delete_list.left--;
657 return 0;
661 * Traverse the delete_queue.
662 * Records are either deleted directly or filled
663 * into the delete list or the vacuum fetch lists
664 * for further processing.
666 static void ctdb_process_delete_queue(struct ctdb_db_context *ctdb_db,
667 struct vacuum_data *vdata)
669 uint32_t sum;
670 int ret;
672 ret = trbt_traversearray32(ctdb_db->delete_queue, 1,
673 delete_queue_traverse, vdata);
675 if (ret != 0) {
676 DEBUG(DEBUG_ERR, (__location__ " Error traversing "
677 "the delete queue.\n"));
680 sum = vdata->count.delete_queue.deleted
681 + vdata->count.delete_queue.skipped
682 + vdata->count.delete_queue.error
683 + vdata->count.delete_queue.added_to_delete_list
684 + vdata->count.delete_queue.added_to_vacuum_fetch_list;
686 if (vdata->count.delete_queue.total != sum) {
687 DEBUG(DEBUG_ERR, (__location__ " Inconsistency in fast vacuum "
688 "counts for db[%s]: total[%u] != sum[%u]\n",
689 ctdb_db->db_name,
690 (unsigned)vdata->count.delete_queue.total,
691 (unsigned)sum));
694 if (vdata->count.delete_queue.total > 0) {
695 DEBUG(DEBUG_INFO,
696 (__location__
697 " fast vacuuming delete_queue traverse statistics: "
698 "db[%s] "
699 "total[%u] "
700 "del[%u] "
701 "skp[%u] "
702 "err[%u] "
703 "adl[%u] "
704 "avf[%u]\n",
705 ctdb_db->db_name,
706 (unsigned)vdata->count.delete_queue.total,
707 (unsigned)vdata->count.delete_queue.deleted,
708 (unsigned)vdata->count.delete_queue.skipped,
709 (unsigned)vdata->count.delete_queue.error,
710 (unsigned)vdata->count.delete_queue.added_to_delete_list,
711 (unsigned)vdata->count.delete_queue.added_to_vacuum_fetch_list));
714 return;
718 * read-only traverse of the database, looking for records that
719 * might be able to be vacuumed.
721 * This is not done each time but only every tunable
722 * VacuumFastPathCount times.
724 static void ctdb_vacuum_traverse_db(struct ctdb_db_context *ctdb_db,
725 struct vacuum_data *vdata)
727 int ret;
729 ret = tdb_traverse_read(ctdb_db->ltdb->tdb, vacuum_traverse, vdata);
730 if (ret == -1 || vdata->traverse_error) {
731 DEBUG(DEBUG_ERR, (__location__ " Traverse error in vacuuming "
732 "'%s'\n", ctdb_db->db_name));
733 return;
736 if (vdata->count.db_traverse.total > 0) {
737 DEBUG(DEBUG_INFO,
738 (__location__
739 " full vacuuming db traverse statistics: "
740 "db[%s] "
741 "total[%u] "
742 "skp[%u] "
743 "err[%u] "
744 "sched[%u]\n",
745 ctdb_db->db_name,
746 (unsigned)vdata->count.db_traverse.total,
747 (unsigned)vdata->count.db_traverse.skipped,
748 (unsigned)vdata->count.db_traverse.error,
749 (unsigned)vdata->count.db_traverse.scheduled));
752 return;
756 * Process the vacuum fetch lists:
757 * For records for which we are not the lmaster, tell the lmaster to
758 * fetch the record.
760 static void ctdb_process_vacuum_fetch_lists(struct ctdb_db_context *ctdb_db,
761 struct vacuum_data *vdata)
763 int i;
764 struct ctdb_context *ctdb = ctdb_db->ctdb;
766 for (i = 0; i < ctdb->num_nodes; i++) {
767 TDB_DATA data;
768 struct ctdb_marshall_buffer *vfl = vdata->vacuum_fetch_list[i];
770 if (ctdb->nodes[i]->pnn == ctdb->pnn) {
771 continue;
774 if (vfl->count == 0) {
775 continue;
778 DEBUG(DEBUG_INFO, ("Found %u records for lmaster %u in '%s'\n",
779 vfl->count, ctdb->nodes[i]->pnn,
780 ctdb_db->db_name));
782 data = ctdb_marshall_finish(vfl);
783 if (ctdb_client_send_message(ctdb, ctdb->nodes[i]->pnn,
784 CTDB_SRVID_VACUUM_FETCH,
785 data) != 0)
787 DEBUG(DEBUG_ERR, (__location__ " Failed to send vacuum "
788 "fetch message to %u\n",
789 ctdb->nodes[i]->pnn));
793 return;
797 * Process the delete list:
799 * This is the last step of vacuuming that consistently deletes
800 * those records that have been migrated with data and can hence
801 * not be deleted when leaving a node.
803 * In this step, the lmaster does the final deletion of those empty
804 * records that it is also dmaster for. It has ususally received
805 * at least some of these records previously from the former dmasters
806 * with the vacuum fetch message.
808 * This last step is implemented as a 3-phase process to protect from
809 * races leading to data corruption:
811 * 1) Send the lmaster's copy to all other active nodes with the
812 * RECEIVE_RECORDS control: The remote nodes store the lmaster's copy.
813 * 2) Send the records that could successfully be stored remotely
814 * in step #1 to all active nodes with the TRY_DELETE_RECORDS
815 * control. The remote notes delete their local copy.
816 * 3) The lmaster locally deletes its copies of all records that
817 * could successfully be deleted remotely in step #2.
819 static void ctdb_process_delete_list(struct ctdb_db_context *ctdb_db,
820 struct vacuum_data *vdata)
822 int ret, i;
823 struct ctdb_context *ctdb = ctdb_db->ctdb;
824 struct delete_records_list *recs;
825 TDB_DATA indata;
826 struct ctdb_node_map_old *nodemap;
827 uint32_t *active_nodes;
828 int num_active_nodes;
829 TALLOC_CTX *tmp_ctx;
830 uint32_t sum;
832 if (vdata->count.delete_list.total == 0) {
833 return;
836 tmp_ctx = talloc_new(vdata);
837 if (tmp_ctx == NULL) {
838 DEBUG(DEBUG_ERR,(__location__ " Out of memory\n"));
839 return;
842 vdata->count.delete_list.left = vdata->count.delete_list.total;
845 * get the list of currently active nodes
848 ret = ctdb_ctrl_getnodemap(ctdb, TIMELIMIT(),
849 CTDB_CURRENT_NODE,
850 tmp_ctx,
851 &nodemap);
852 if (ret != 0) {
853 DEBUG(DEBUG_ERR,(__location__ " unable to get node map\n"));
854 goto done;
857 active_nodes = list_of_active_nodes(ctdb, nodemap,
858 nodemap, /* talloc context */
859 false /* include self */);
860 /* yuck! ;-) */
861 num_active_nodes = talloc_get_size(active_nodes)/sizeof(*active_nodes);
864 * Now delete the records all active nodes in a three-phase process:
865 * 1) send all active remote nodes the current empty copy with this
866 * node as DMASTER
867 * 2) if all nodes could store the new copy,
868 * tell all the active remote nodes to delete all their copy
869 * 3) if all remote nodes deleted their record copy, delete it locally
873 * Step 1:
874 * Send currently empty record copy to all active nodes for storing.
877 recs = talloc_zero(tmp_ctx, struct delete_records_list);
878 if (recs == NULL) {
879 DEBUG(DEBUG_ERR,(__location__ " Out of memory\n"));
880 goto done;
882 recs->records = (struct ctdb_marshall_buffer *)
883 talloc_zero_size(recs,
884 offsetof(struct ctdb_marshall_buffer, data));
885 if (recs->records == NULL) {
886 DEBUG(DEBUG_ERR,(__location__ " Out of memory\n"));
887 goto done;
889 recs->records->db_id = ctdb_db->db_id;
890 recs->vdata = vdata;
893 * traverse the tree of all records we want to delete and
894 * create a blob we can send to the other nodes.
896 * We call delete_marshall_traverse_first() to bump the
897 * records' RSNs in the database, to ensure we (as dmaster)
898 * keep the highest RSN of the records in the cluster.
900 ret = trbt_traversearray32(vdata->delete_list, 1,
901 delete_marshall_traverse_first, recs);
902 if (ret != 0) {
903 DEBUG(DEBUG_ERR, (__location__ " Error traversing the "
904 "delete list for first marshalling.\n"));
905 goto done;
908 indata = ctdb_marshall_finish(recs->records);
910 for (i = 0; i < num_active_nodes; i++) {
911 struct ctdb_marshall_buffer *records;
912 struct ctdb_rec_data_old *rec;
913 int32_t res;
914 TDB_DATA outdata;
916 ret = ctdb_control(ctdb, active_nodes[i], 0,
917 CTDB_CONTROL_RECEIVE_RECORDS, 0,
918 indata, recs, &outdata, &res,
919 NULL, NULL);
920 if (ret != 0 || res != 0) {
921 DEBUG(DEBUG_ERR, ("Error storing record copies on "
922 "node %u: ret[%d] res[%d]\n",
923 active_nodes[i], ret, res));
924 goto done;
928 * outdata contains the list of records coming back
929 * from the node: These are the records that the
930 * remote node could not store. We remove these from
931 * the list to process further.
933 records = (struct ctdb_marshall_buffer *)outdata.dptr;
934 rec = (struct ctdb_rec_data_old *)&records->data[0];
935 while (records->count-- > 1) {
936 TDB_DATA reckey, recdata;
937 struct ctdb_ltdb_header *rechdr;
938 struct delete_record_data *dd;
940 reckey.dptr = &rec->data[0];
941 reckey.dsize = rec->keylen;
942 recdata.dptr = &rec->data[reckey.dsize];
943 recdata.dsize = rec->datalen;
945 if (recdata.dsize < sizeof(struct ctdb_ltdb_header)) {
946 DEBUG(DEBUG_CRIT,(__location__ " bad ltdb record\n"));
947 goto done;
949 rechdr = (struct ctdb_ltdb_header *)recdata.dptr;
950 recdata.dptr += sizeof(*rechdr);
951 recdata.dsize -= sizeof(*rechdr);
953 dd = (struct delete_record_data *)trbt_lookup32(
954 vdata->delete_list,
955 ctdb_hash(&reckey));
956 if (dd != NULL) {
958 * The other node could not store the record
959 * copy and it is the first node that failed.
960 * So we should remove it from the tree and
961 * update statistics.
963 talloc_free(dd);
964 vdata->count.delete_list.remote_error++;
965 vdata->count.delete_list.left--;
966 } else {
967 DEBUG(DEBUG_ERR, (__location__ " Failed to "
968 "find record with hash 0x%08x coming "
969 "back from RECEIVE_RECORDS "
970 "control in delete list.\n",
971 ctdb_hash(&reckey)));
972 vdata->count.delete_list.local_error++;
973 vdata->count.delete_list.left--;
976 rec = (struct ctdb_rec_data_old *)(rec->length + (uint8_t *)rec);
980 if (vdata->count.delete_list.left == 0) {
981 goto success;
985 * Step 2:
986 * Send the remaining records to all active nodes for deletion.
988 * The lmaster's (i.e. our) copies of these records have been stored
989 * successfully on the other nodes.
993 * Create a marshall blob from the remaining list of records to delete.
996 talloc_free(recs->records);
998 recs->records = (struct ctdb_marshall_buffer *)
999 talloc_zero_size(recs,
1000 offsetof(struct ctdb_marshall_buffer, data));
1001 if (recs->records == NULL) {
1002 DEBUG(DEBUG_ERR,(__location__ " Out of memory\n"));
1003 goto done;
1005 recs->records->db_id = ctdb_db->db_id;
1007 ret = trbt_traversearray32(vdata->delete_list, 1,
1008 delete_marshall_traverse, recs);
1009 if (ret != 0) {
1010 DEBUG(DEBUG_ERR, (__location__ " Error traversing the "
1011 "delete list for second marshalling.\n"));
1012 goto done;
1015 indata = ctdb_marshall_finish(recs->records);
1017 for (i = 0; i < num_active_nodes; i++) {
1018 struct ctdb_marshall_buffer *records;
1019 struct ctdb_rec_data_old *rec;
1020 int32_t res;
1021 TDB_DATA outdata;
1023 ret = ctdb_control(ctdb, active_nodes[i], 0,
1024 CTDB_CONTROL_TRY_DELETE_RECORDS, 0,
1025 indata, recs, &outdata, &res,
1026 NULL, NULL);
1027 if (ret != 0 || res != 0) {
1028 DEBUG(DEBUG_ERR, ("Failed to delete records on "
1029 "node %u: ret[%d] res[%d]\n",
1030 active_nodes[i], ret, res));
1031 goto done;
1035 * outdata contains the list of records coming back
1036 * from the node: These are the records that the
1037 * remote node could not delete. We remove these from
1038 * the list to delete locally.
1040 records = (struct ctdb_marshall_buffer *)outdata.dptr;
1041 rec = (struct ctdb_rec_data_old *)&records->data[0];
1042 while (records->count-- > 1) {
1043 TDB_DATA reckey, recdata;
1044 struct ctdb_ltdb_header *rechdr;
1045 struct delete_record_data *dd;
1047 reckey.dptr = &rec->data[0];
1048 reckey.dsize = rec->keylen;
1049 recdata.dptr = &rec->data[reckey.dsize];
1050 recdata.dsize = rec->datalen;
1052 if (recdata.dsize < sizeof(struct ctdb_ltdb_header)) {
1053 DEBUG(DEBUG_CRIT,(__location__ " bad ltdb record\n"));
1054 goto done;
1056 rechdr = (struct ctdb_ltdb_header *)recdata.dptr;
1057 recdata.dptr += sizeof(*rechdr);
1058 recdata.dsize -= sizeof(*rechdr);
1060 dd = (struct delete_record_data *)trbt_lookup32(
1061 vdata->delete_list,
1062 ctdb_hash(&reckey));
1063 if (dd != NULL) {
1065 * The other node could not delete the
1066 * record and it is the first node that
1067 * failed. So we should remove it from
1068 * the tree and update statistics.
1070 talloc_free(dd);
1071 vdata->count.delete_list.remote_error++;
1072 vdata->count.delete_list.left--;
1073 } else {
1074 DEBUG(DEBUG_ERR, (__location__ " Failed to "
1075 "find record with hash 0x%08x coming "
1076 "back from TRY_DELETE_RECORDS "
1077 "control in delete list.\n",
1078 ctdb_hash(&reckey)));
1079 vdata->count.delete_list.local_error++;
1080 vdata->count.delete_list.left--;
1083 rec = (struct ctdb_rec_data_old *)(rec->length + (uint8_t *)rec);
1087 if (vdata->count.delete_list.left == 0) {
1088 goto success;
1092 * Step 3:
1093 * Delete the remaining records locally.
1095 * These records have successfully been deleted on all
1096 * active remote nodes.
1099 ret = trbt_traversearray32(vdata->delete_list, 1,
1100 delete_record_traverse, vdata);
1101 if (ret != 0) {
1102 DEBUG(DEBUG_ERR, (__location__ " Error traversing the "
1103 "delete list for deletion.\n"));
1106 success:
1108 if (vdata->count.delete_list.left != 0) {
1109 DEBUG(DEBUG_ERR, (__location__ " Vaccum db[%s] error: "
1110 "there are %u records left for deletion after "
1111 "processing delete list\n",
1112 ctdb_db->db_name,
1113 (unsigned)vdata->count.delete_list.left));
1116 sum = vdata->count.delete_list.deleted
1117 + vdata->count.delete_list.skipped
1118 + vdata->count.delete_list.remote_error
1119 + vdata->count.delete_list.local_error
1120 + vdata->count.delete_list.left;
1122 if (vdata->count.delete_list.total != sum) {
1123 DEBUG(DEBUG_ERR, (__location__ " Inconsistency in vacuum "
1124 "delete list counts for db[%s]: total[%u] != sum[%u]\n",
1125 ctdb_db->db_name,
1126 (unsigned)vdata->count.delete_list.total,
1127 (unsigned)sum));
1130 if (vdata->count.delete_list.total > 0) {
1131 DEBUG(DEBUG_INFO,
1132 (__location__
1133 " vacuum delete list statistics: "
1134 "db[%s] "
1135 "total[%u] "
1136 "del[%u] "
1137 "skip[%u] "
1138 "rem.err[%u] "
1139 "loc.err[%u] "
1140 "left[%u]\n",
1141 ctdb_db->db_name,
1142 (unsigned)vdata->count.delete_list.total,
1143 (unsigned)vdata->count.delete_list.deleted,
1144 (unsigned)vdata->count.delete_list.skipped,
1145 (unsigned)vdata->count.delete_list.remote_error,
1146 (unsigned)vdata->count.delete_list.local_error,
1147 (unsigned)vdata->count.delete_list.left));
1150 done:
1151 talloc_free(tmp_ctx);
1153 return;
1157 * initialize the vacuum_data
1159 static struct vacuum_data *ctdb_vacuum_init_vacuum_data(
1160 struct ctdb_db_context *ctdb_db,
1161 TALLOC_CTX *mem_ctx)
1163 int i;
1164 struct ctdb_context *ctdb = ctdb_db->ctdb;
1165 struct vacuum_data *vdata;
1167 vdata = talloc_zero(mem_ctx, struct vacuum_data);
1168 if (vdata == NULL) {
1169 DEBUG(DEBUG_ERR,(__location__ " Out of memory\n"));
1170 return NULL;
1173 vdata->ctdb = ctdb_db->ctdb;
1174 vdata->ctdb_db = ctdb_db;
1175 vdata->delete_list = trbt_create(vdata, 0);
1176 if (vdata->delete_list == NULL) {
1177 DEBUG(DEBUG_ERR,(__location__ " Out of memory\n"));
1178 goto fail;
1181 vdata->start = timeval_current();
1183 vdata->count.delete_queue.added_to_delete_list = 0;
1184 vdata->count.delete_queue.added_to_vacuum_fetch_list = 0;
1185 vdata->count.delete_queue.deleted = 0;
1186 vdata->count.delete_queue.skipped = 0;
1187 vdata->count.delete_queue.error = 0;
1188 vdata->count.delete_queue.total = 0;
1189 vdata->count.db_traverse.scheduled = 0;
1190 vdata->count.db_traverse.skipped = 0;
1191 vdata->count.db_traverse.error = 0;
1192 vdata->count.db_traverse.total = 0;
1193 vdata->count.delete_list.total = 0;
1194 vdata->count.delete_list.left = 0;
1195 vdata->count.delete_list.remote_error = 0;
1196 vdata->count.delete_list.local_error = 0;
1197 vdata->count.delete_list.skipped = 0;
1198 vdata->count.delete_list.deleted = 0;
1200 /* the list needs to be of length num_nodes */
1201 vdata->vacuum_fetch_list = talloc_zero_array(vdata,
1202 struct ctdb_marshall_buffer *,
1203 ctdb->num_nodes);
1204 if (vdata->vacuum_fetch_list == NULL) {
1205 DEBUG(DEBUG_ERR,(__location__ " Out of memory\n"));
1206 goto fail;
1208 for (i = 0; i < ctdb->num_nodes; i++) {
1209 vdata->vacuum_fetch_list[i] = (struct ctdb_marshall_buffer *)
1210 talloc_zero_size(vdata->vacuum_fetch_list,
1211 offsetof(struct ctdb_marshall_buffer, data));
1212 if (vdata->vacuum_fetch_list[i] == NULL) {
1213 DEBUG(DEBUG_ERR,(__location__ " Out of memory\n"));
1214 talloc_free(vdata);
1215 return NULL;
1217 vdata->vacuum_fetch_list[i]->db_id = ctdb_db->db_id;
1220 return vdata;
1222 fail:
1223 talloc_free(vdata);
1224 return NULL;
1228 * Vacuum a DB:
1229 * - Always do the fast vacuuming run, which traverses
1230 * the in-memory delete queue: these records have been
1231 * scheduled for deletion.
1232 * - Only if explicitly requested, the database is traversed
1233 * in order to use the traditional heuristics on empty records
1234 * to trigger deletion.
1235 * This is done only every VacuumFastPathCount'th vacuuming run.
1237 * The traverse runs fill two lists:
1239 * - The delete_list:
1240 * This is the list of empty records the current
1241 * node is lmaster and dmaster for. These records are later
1242 * deleted first on other nodes and then locally.
1244 * The fast vacuuming run has a short cut for those records
1245 * that have never been migrated with data: these records
1246 * are immediately deleted locally, since they have left
1247 * no trace on other nodes.
1249 * - The vacuum_fetch lists
1250 * (one for each other lmaster node):
1251 * The records in this list are sent for deletion to
1252 * their lmaster in a bulk VACUUM_FETCH message.
1254 * The lmaster then migrates all these records to itelf
1255 * so that they can be vacuumed there.
1257 * This executes in the child context.
1259 static int ctdb_vacuum_db(struct ctdb_db_context *ctdb_db,
1260 bool full_vacuum_run)
1262 struct ctdb_context *ctdb = ctdb_db->ctdb;
1263 int ret, pnn;
1264 struct vacuum_data *vdata;
1265 TALLOC_CTX *tmp_ctx;
1267 DEBUG(DEBUG_INFO, (__location__ " Entering %s vacuum run for db "
1268 "%s db_id[0x%08x]\n",
1269 full_vacuum_run ? "full" : "fast",
1270 ctdb_db->db_name, ctdb_db->db_id));
1272 ret = ctdb_ctrl_getvnnmap(ctdb, TIMELIMIT(), CTDB_CURRENT_NODE, ctdb, &ctdb->vnn_map);
1273 if (ret != 0) {
1274 DEBUG(DEBUG_ERR, ("Unable to get vnnmap from local node\n"));
1275 return ret;
1278 pnn = ctdb_ctrl_getpnn(ctdb, TIMELIMIT(), CTDB_CURRENT_NODE);
1279 if (pnn == -1) {
1280 DEBUG(DEBUG_ERR, ("Unable to get pnn from local node\n"));
1281 return -1;
1284 ctdb->pnn = pnn;
1286 tmp_ctx = talloc_new(ctdb_db);
1287 if (tmp_ctx == NULL) {
1288 DEBUG(DEBUG_ERR, ("Out of memory!\n"));
1289 return -1;
1292 vdata = ctdb_vacuum_init_vacuum_data(ctdb_db, tmp_ctx);
1293 if (vdata == NULL) {
1294 talloc_free(tmp_ctx);
1295 return -1;
1298 if (full_vacuum_run) {
1299 ctdb_vacuum_traverse_db(ctdb_db, vdata);
1302 ctdb_process_delete_queue(ctdb_db, vdata);
1304 ctdb_process_vacuum_fetch_lists(ctdb_db, vdata);
1306 ctdb_process_delete_list(ctdb_db, vdata);
1308 talloc_free(tmp_ctx);
1310 /* this ensures we run our event queue */
1311 ctdb_ctrl_getpnn(ctdb, TIMELIMIT(), CTDB_CURRENT_NODE);
1313 return 0;
1317 * repack and vaccum a db
1318 * called from the child context
1320 static int ctdb_vacuum_and_repack_db(struct ctdb_db_context *ctdb_db,
1321 bool full_vacuum_run)
1323 uint32_t repack_limit = ctdb_db->ctdb->tunable.repack_limit;
1324 const char *name = ctdb_db->db_name;
1325 int freelist_size = 0;
1326 int ret;
1328 if (ctdb_vacuum_db(ctdb_db, full_vacuum_run) != 0) {
1329 DEBUG(DEBUG_ERR,(__location__ " Failed to vacuum '%s'\n", name));
1332 freelist_size = tdb_freelist_size(ctdb_db->ltdb->tdb);
1333 if (freelist_size == -1) {
1334 DEBUG(DEBUG_ERR,(__location__ " Failed to get freelist size for '%s'\n", name));
1335 return -1;
1339 * decide if a repack is necessary
1341 if ((repack_limit == 0 || (uint32_t)freelist_size < repack_limit))
1343 return 0;
1346 DEBUG(DEBUG_INFO, ("Repacking %s with %u freelist entries\n",
1347 name, freelist_size));
1349 ret = tdb_repack(ctdb_db->ltdb->tdb);
1350 if (ret != 0) {
1351 DEBUG(DEBUG_ERR,(__location__ " Failed to repack '%s'\n", name));
1352 return -1;
1355 return 0;
1358 static uint32_t get_vacuum_interval(struct ctdb_db_context *ctdb_db)
1360 uint32_t interval = ctdb_db->ctdb->tunable.vacuum_interval;
1362 return interval;
1365 static int vacuum_child_destructor(struct ctdb_vacuum_child_context *child_ctx)
1367 double l = timeval_elapsed(&child_ctx->start_time);
1368 struct ctdb_db_context *ctdb_db = child_ctx->vacuum_handle->ctdb_db;
1369 struct ctdb_context *ctdb = ctdb_db->ctdb;
1371 CTDB_UPDATE_DB_LATENCY(ctdb_db, "vacuum", vacuum.latency, l);
1372 DEBUG(DEBUG_INFO,("Vacuuming took %.3f seconds for database %s\n", l, ctdb_db->db_name));
1374 if (child_ctx->child_pid != -1) {
1375 ctdb_kill(ctdb, child_ctx->child_pid, SIGKILL);
1376 } else {
1377 /* Bump the number of successful fast-path runs. */
1378 child_ctx->vacuum_handle->fast_path_count++;
1381 DLIST_REMOVE(ctdb->vacuumers, child_ctx);
1383 tevent_add_timer(ctdb->ev, child_ctx->vacuum_handle,
1384 timeval_current_ofs(get_vacuum_interval(ctdb_db), 0),
1385 ctdb_vacuum_event, child_ctx->vacuum_handle);
1387 return 0;
1391 * this event is generated when a vacuum child process times out
1393 static void vacuum_child_timeout(struct tevent_context *ev,
1394 struct tevent_timer *te,
1395 struct timeval t, void *private_data)
1397 struct ctdb_vacuum_child_context *child_ctx = talloc_get_type(private_data, struct ctdb_vacuum_child_context);
1399 DEBUG(DEBUG_ERR,("Vacuuming child process timed out for db %s\n", child_ctx->vacuum_handle->ctdb_db->db_name));
1401 child_ctx->status = VACUUM_TIMEOUT;
1403 talloc_free(child_ctx);
1408 * this event is generated when a vacuum child process has completed
1410 static void vacuum_child_handler(struct tevent_context *ev,
1411 struct tevent_fd *fde,
1412 uint16_t flags, void *private_data)
1414 struct ctdb_vacuum_child_context *child_ctx = talloc_get_type(private_data, struct ctdb_vacuum_child_context);
1415 char c = 0;
1416 int ret;
1418 DEBUG(DEBUG_INFO,("Vacuuming child process %d finished for db %s\n", child_ctx->child_pid, child_ctx->vacuum_handle->ctdb_db->db_name));
1419 child_ctx->child_pid = -1;
1421 ret = sys_read(child_ctx->fd[0], &c, 1);
1422 if (ret != 1 || c != 0) {
1423 child_ctx->status = VACUUM_ERROR;
1424 DEBUG(DEBUG_ERR, ("A vacuum child process failed with an error for database %s. ret=%d c=%d\n", child_ctx->vacuum_handle->ctdb_db->db_name, ret, c));
1425 } else {
1426 child_ctx->status = VACUUM_OK;
1429 talloc_free(child_ctx);
1433 * this event is called every time we need to start a new vacuum process
1435 static void ctdb_vacuum_event(struct tevent_context *ev,
1436 struct tevent_timer *te,
1437 struct timeval t, void *private_data)
1439 struct ctdb_vacuum_handle *vacuum_handle = talloc_get_type(private_data, struct ctdb_vacuum_handle);
1440 struct ctdb_db_context *ctdb_db = vacuum_handle->ctdb_db;
1441 struct ctdb_context *ctdb = ctdb_db->ctdb;
1442 struct ctdb_vacuum_child_context *child_ctx;
1443 struct tevent_fd *fde;
1444 int ret;
1446 /* we don't vacuum if we are in recovery mode, or db frozen */
1447 if (ctdb->recovery_mode == CTDB_RECOVERY_ACTIVE ||
1448 ctdb_db_frozen(ctdb_db)) {
1449 DEBUG(DEBUG_INFO, ("Not vacuuming %s (%s)\n", ctdb_db->db_name,
1450 ctdb->recovery_mode == CTDB_RECOVERY_ACTIVE ?
1451 "in recovery" : "frozen"));
1452 tevent_add_timer(ctdb->ev, vacuum_handle,
1453 timeval_current_ofs(get_vacuum_interval(ctdb_db), 0),
1454 ctdb_vacuum_event, vacuum_handle);
1455 return;
1458 /* Do not allow multiple vacuuming child processes to be active at the
1459 * same time. If there is vacuuming child process active, delay
1460 * new vacuuming event to stagger vacuuming events.
1462 if (ctdb->vacuumers != NULL) {
1463 tevent_add_timer(ctdb->ev, vacuum_handle,
1464 timeval_current_ofs(0, 500*1000),
1465 ctdb_vacuum_event, vacuum_handle);
1466 return;
1469 child_ctx = talloc(vacuum_handle, struct ctdb_vacuum_child_context);
1470 if (child_ctx == NULL) {
1471 DEBUG(DEBUG_CRIT, (__location__ " Failed to allocate child context for vacuuming of %s\n", ctdb_db->db_name));
1472 ctdb_fatal(ctdb, "Out of memory when crating vacuum child context. Shutting down\n");
1476 ret = pipe(child_ctx->fd);
1477 if (ret != 0) {
1478 talloc_free(child_ctx);
1479 DEBUG(DEBUG_ERR, ("Failed to create pipe for vacuum child process.\n"));
1480 tevent_add_timer(ctdb->ev, vacuum_handle,
1481 timeval_current_ofs(get_vacuum_interval(ctdb_db), 0),
1482 ctdb_vacuum_event, vacuum_handle);
1483 return;
1486 if (vacuum_handle->fast_path_count > ctdb->tunable.vacuum_fast_path_count) {
1487 vacuum_handle->fast_path_count = 0;
1490 child_ctx->child_pid = ctdb_fork(ctdb);
1491 if (child_ctx->child_pid == (pid_t)-1) {
1492 close(child_ctx->fd[0]);
1493 close(child_ctx->fd[1]);
1494 talloc_free(child_ctx);
1495 DEBUG(DEBUG_ERR, ("Failed to fork vacuum child process.\n"));
1496 tevent_add_timer(ctdb->ev, vacuum_handle,
1497 timeval_current_ofs(get_vacuum_interval(ctdb_db), 0),
1498 ctdb_vacuum_event, vacuum_handle);
1499 return;
1503 if (child_ctx->child_pid == 0) {
1504 char cc = 0;
1505 bool full_vacuum_run = false;
1506 close(child_ctx->fd[0]);
1508 DEBUG(DEBUG_INFO,("Vacuuming child process %d for db %s started\n", getpid(), ctdb_db->db_name));
1509 prctl_set_comment("ctdb_vacuum");
1510 if (switch_from_server_to_client(ctdb) != 0) {
1511 DEBUG(DEBUG_CRIT, (__location__ "ERROR: failed to switch vacuum daemon into client mode. Shutting down.\n"));
1512 _exit(1);
1515 if ((ctdb->tunable.vacuum_fast_path_count > 0) &&
1516 (vacuum_handle->fast_path_count == 0))
1518 full_vacuum_run = true;
1520 cc = ctdb_vacuum_and_repack_db(ctdb_db, full_vacuum_run);
1522 sys_write(child_ctx->fd[1], &cc, 1);
1523 _exit(0);
1526 set_close_on_exec(child_ctx->fd[0]);
1527 close(child_ctx->fd[1]);
1529 child_ctx->status = VACUUM_RUNNING;
1530 child_ctx->start_time = timeval_current();
1532 DLIST_ADD(ctdb->vacuumers, child_ctx);
1533 talloc_set_destructor(child_ctx, vacuum_child_destructor);
1536 * Clear the fastpath vacuuming list in the parent.
1538 talloc_free(ctdb_db->delete_queue);
1539 ctdb_db->delete_queue = trbt_create(ctdb_db, 0);
1540 if (ctdb_db->delete_queue == NULL) {
1541 /* fatal here? ... */
1542 ctdb_fatal(ctdb, "Out of memory when re-creating vacuum tree "
1543 "in parent context. Shutting down\n");
1546 tevent_add_timer(ctdb->ev, child_ctx,
1547 timeval_current_ofs(ctdb->tunable.vacuum_max_run_time, 0),
1548 vacuum_child_timeout, child_ctx);
1550 DEBUG(DEBUG_DEBUG, (__location__ " Created PIPE FD:%d to child vacuum process\n", child_ctx->fd[0]));
1552 fde = tevent_add_fd(ctdb->ev, child_ctx, child_ctx->fd[0],
1553 TEVENT_FD_READ, vacuum_child_handler, child_ctx);
1554 tevent_fd_set_auto_close(fde);
1556 vacuum_handle->child_ctx = child_ctx;
1557 child_ctx->vacuum_handle = vacuum_handle;
1560 void ctdb_stop_vacuuming(struct ctdb_context *ctdb)
1562 /* Simply free them all. */
1563 while (ctdb->vacuumers) {
1564 DEBUG(DEBUG_INFO, ("Aborting vacuuming for %s (%i)\n",
1565 ctdb->vacuumers->vacuum_handle->ctdb_db->db_name,
1566 (int)ctdb->vacuumers->child_pid));
1567 /* vacuum_child_destructor kills it, removes from list */
1568 talloc_free(ctdb->vacuumers);
1572 /* this function initializes the vacuuming context for a database
1573 * starts the vacuuming events
1575 int ctdb_vacuum_init(struct ctdb_db_context *ctdb_db)
1577 if (! ctdb_db_volatile(ctdb_db)) {
1578 DEBUG(DEBUG_ERR,
1579 ("Vacuuming is disabled for non-volatile database %s\n",
1580 ctdb_db->db_name));
1581 return 0;
1584 ctdb_db->vacuum_handle = talloc(ctdb_db, struct ctdb_vacuum_handle);
1585 CTDB_NO_MEMORY(ctdb_db->ctdb, ctdb_db->vacuum_handle);
1587 ctdb_db->vacuum_handle->ctdb_db = ctdb_db;
1588 ctdb_db->vacuum_handle->fast_path_count = 0;
1590 tevent_add_timer(ctdb_db->ctdb->ev, ctdb_db->vacuum_handle,
1591 timeval_current_ofs(get_vacuum_interval(ctdb_db), 0),
1592 ctdb_vacuum_event, ctdb_db->vacuum_handle);
1594 return 0;
1597 static void remove_record_from_delete_queue(struct ctdb_db_context *ctdb_db,
1598 const struct ctdb_ltdb_header *hdr,
1599 const TDB_DATA key)
1601 struct delete_record_data *kd;
1602 uint32_t hash;
1604 hash = (uint32_t)ctdb_hash(&key);
1606 DEBUG(DEBUG_DEBUG, (__location__
1607 " remove_record_from_delete_queue: "
1608 "db[%s] "
1609 "db_id[0x%08x] "
1610 "key_hash[0x%08x] "
1611 "lmaster[%u] "
1612 "migrated_with_data[%s]\n",
1613 ctdb_db->db_name, ctdb_db->db_id,
1614 hash,
1615 ctdb_lmaster(ctdb_db->ctdb, &key),
1616 hdr->flags & CTDB_REC_FLAG_MIGRATED_WITH_DATA ? "yes" : "no"));
1618 kd = (struct delete_record_data *)trbt_lookup32(ctdb_db->delete_queue, hash);
1619 if (kd == NULL) {
1620 DEBUG(DEBUG_DEBUG, (__location__
1621 " remove_record_from_delete_queue: "
1622 "record not in queue (hash[0x%08x])\n.",
1623 hash));
1624 return;
1627 if ((kd->key.dsize != key.dsize) ||
1628 (memcmp(kd->key.dptr, key.dptr, key.dsize) != 0))
1630 DEBUG(DEBUG_DEBUG, (__location__
1631 " remove_record_from_delete_queue: "
1632 "hash collision for key with hash[0x%08x] "
1633 "in db[%s] - skipping\n",
1634 hash, ctdb_db->db_name));
1635 return;
1638 DEBUG(DEBUG_DEBUG, (__location__
1639 " remove_record_from_delete_queue: "
1640 "removing key with hash[0x%08x]\n",
1641 hash));
1643 talloc_free(kd);
1645 return;
1649 * Insert a record into the ctdb_db context's delete queue,
1650 * handling hash collisions.
1652 static int insert_record_into_delete_queue(struct ctdb_db_context *ctdb_db,
1653 const struct ctdb_ltdb_header *hdr,
1654 TDB_DATA key)
1656 struct delete_record_data *kd;
1657 uint32_t hash;
1658 int ret;
1660 hash = (uint32_t)ctdb_hash(&key);
1662 DEBUG(DEBUG_DEBUG, (__location__ " schedule for deletion: db[%s] "
1663 "db_id[0x%08x] "
1664 "key_hash[0x%08x] "
1665 "lmaster[%u] "
1666 "migrated_with_data[%s]\n",
1667 ctdb_db->db_name, ctdb_db->db_id,
1668 hash,
1669 ctdb_lmaster(ctdb_db->ctdb, &key),
1670 hdr->flags & CTDB_REC_FLAG_MIGRATED_WITH_DATA ? "yes" : "no"));
1672 kd = (struct delete_record_data *)trbt_lookup32(ctdb_db->delete_queue, hash);
1673 if (kd != NULL) {
1674 if ((kd->key.dsize != key.dsize) ||
1675 (memcmp(kd->key.dptr, key.dptr, key.dsize) != 0))
1677 DEBUG(DEBUG_INFO,
1678 (__location__ " schedule for deletion: "
1679 "hash collision for key hash [0x%08x]. "
1680 "Skipping the record.\n", hash));
1681 return 0;
1682 } else {
1683 DEBUG(DEBUG_DEBUG,
1684 (__location__ " schedule for deletion: "
1685 "updating entry for key with hash [0x%08x].\n",
1686 hash));
1690 ret = insert_delete_record_data_into_tree(ctdb_db->ctdb, ctdb_db,
1691 ctdb_db->delete_queue,
1692 hdr, key);
1693 if (ret != 0) {
1694 DEBUG(DEBUG_INFO,
1695 (__location__ " schedule for deletion: error "
1696 "inserting key with hash [0x%08x] into delete queue\n",
1697 hash));
1698 return -1;
1701 return 0;
1705 * Schedule a record for deletetion.
1706 * Called from the parent context.
1708 int32_t ctdb_control_schedule_for_deletion(struct ctdb_context *ctdb,
1709 TDB_DATA indata)
1711 struct ctdb_control_schedule_for_deletion *dd;
1712 struct ctdb_db_context *ctdb_db;
1713 int ret;
1714 TDB_DATA key;
1716 dd = (struct ctdb_control_schedule_for_deletion *)indata.dptr;
1718 ctdb_db = find_ctdb_db(ctdb, dd->db_id);
1719 if (ctdb_db == NULL) {
1720 DEBUG(DEBUG_ERR, (__location__ " Unknown db id 0x%08x\n",
1721 dd->db_id));
1722 return -1;
1725 key.dsize = dd->keylen;
1726 key.dptr = dd->key;
1728 ret = insert_record_into_delete_queue(ctdb_db, &dd->hdr, key);
1730 return ret;
1733 int32_t ctdb_local_schedule_for_deletion(struct ctdb_db_context *ctdb_db,
1734 const struct ctdb_ltdb_header *hdr,
1735 TDB_DATA key)
1737 int ret;
1738 struct ctdb_control_schedule_for_deletion *dd;
1739 TDB_DATA indata;
1740 int32_t status;
1742 if (ctdb_db->ctdb->ctdbd_pid == getpid()) {
1743 /* main daemon - directly queue */
1744 ret = insert_record_into_delete_queue(ctdb_db, hdr, key);
1746 return ret;
1749 /* if we don't have a connection to the daemon we can not send
1750 a control. For example sometimes from update_record control child
1751 process.
1753 if (!ctdb_db->ctdb->can_send_controls) {
1754 return -1;
1758 /* child process: send the main daemon a control */
1759 indata.dsize = offsetof(struct ctdb_control_schedule_for_deletion, key) + key.dsize;
1760 indata.dptr = talloc_zero_array(ctdb_db, uint8_t, indata.dsize);
1761 if (indata.dptr == NULL) {
1762 DEBUG(DEBUG_ERR, (__location__ " out of memory\n"));
1763 return -1;
1765 dd = (struct ctdb_control_schedule_for_deletion *)(void *)indata.dptr;
1766 dd->db_id = ctdb_db->db_id;
1767 dd->hdr = *hdr;
1768 dd->keylen = key.dsize;
1769 memcpy(dd->key, key.dptr, key.dsize);
1771 ret = ctdb_control(ctdb_db->ctdb,
1772 CTDB_CURRENT_NODE,
1773 ctdb_db->db_id,
1774 CTDB_CONTROL_SCHEDULE_FOR_DELETION,
1775 CTDB_CTRL_FLAG_NOREPLY, /* flags */
1776 indata,
1777 NULL, /* mem_ctx */
1778 NULL, /* outdata */
1779 &status,
1780 NULL, /* timeout : NULL == wait forever */
1781 NULL); /* error message */
1783 talloc_free(indata.dptr);
1785 if (ret != 0 || status != 0) {
1786 DEBUG(DEBUG_ERR, (__location__ " Error sending "
1787 "SCHEDULE_FOR_DELETION "
1788 "control.\n"));
1789 if (status != 0) {
1790 ret = -1;
1794 return ret;
1797 void ctdb_local_remove_from_delete_queue(struct ctdb_db_context *ctdb_db,
1798 const struct ctdb_ltdb_header *hdr,
1799 const TDB_DATA key)
1801 if (ctdb_db->ctdb->ctdbd_pid != getpid()) {
1803 * Only remove the record from the delete queue if called
1804 * in the main daemon.
1806 return;
1809 remove_record_from_delete_queue(ctdb_db, hdr, key);
1811 return;