lib: Remove unused parmlist code
[Samba.git] / ctdb / server / ctdb_vacuum.c
blobd678ff98ab3b155dd6aa4392b169fdd14232801e
1 /*
2 ctdb vacuuming events
4 Copyright (C) Ronnie Sahlberg 2009
5 Copyright (C) Michael Adam 2010-2013
6 Copyright (C) Stefan Metzmacher 2010-2011
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program; if not, see <http://www.gnu.org/licenses/>.
22 #include "includes.h"
23 #include "tdb.h"
24 #include "system/network.h"
25 #include "system/filesys.h"
26 #include "system/dir.h"
27 #include "../include/ctdb_private.h"
28 #include "lib/tdb_wrap/tdb_wrap.h"
29 #include "lib/util/dlinklist.h"
30 #include "../include/ctdb_private.h"
31 #include "../common/rb_tree.h"
33 #define TIMELIMIT() timeval_current_ofs(10, 0)
35 enum vacuum_child_status { VACUUM_RUNNING, VACUUM_OK, VACUUM_ERROR, VACUUM_TIMEOUT};
37 struct ctdb_vacuum_child_context {
38 struct ctdb_vacuum_child_context *next, *prev;
39 struct ctdb_vacuum_handle *vacuum_handle;
40 /* fd child writes status to */
41 int fd[2];
42 pid_t child_pid;
43 enum vacuum_child_status status;
44 struct timeval start_time;
47 struct ctdb_vacuum_handle {
48 struct ctdb_db_context *ctdb_db;
49 struct ctdb_vacuum_child_context *child_ctx;
50 uint32_t fast_path_count;
54 /* a list of records to possibly delete */
55 struct vacuum_data {
56 struct ctdb_context *ctdb;
57 struct ctdb_db_context *ctdb_db;
58 struct tdb_context *dest_db;
59 trbt_tree_t *delete_list;
60 struct ctdb_marshall_buffer **vacuum_fetch_list;
61 struct timeval start;
62 bool traverse_error;
63 bool vacuum;
64 struct {
65 struct {
66 uint32_t added_to_vacuum_fetch_list;
67 uint32_t added_to_delete_list;
68 uint32_t deleted;
69 uint32_t skipped;
70 uint32_t error;
71 uint32_t total;
72 } delete_queue;
73 struct {
74 uint32_t scheduled;
75 uint32_t skipped;
76 uint32_t error;
77 uint32_t total;
78 } db_traverse;
79 struct {
80 uint32_t total;
81 uint32_t remote_error;
82 uint32_t local_error;
83 uint32_t deleted;
84 uint32_t skipped;
85 uint32_t left;
86 } delete_list;
87 struct {
88 uint32_t vacuumed;
89 uint32_t copied;
90 } repack;
91 } count;
94 /* this structure contains the information for one record to be deleted */
95 struct delete_record_data {
96 struct ctdb_context *ctdb;
97 struct ctdb_db_context *ctdb_db;
98 struct ctdb_ltdb_header hdr;
99 TDB_DATA key;
100 uint8_t keydata[1];
103 struct delete_records_list {
104 struct ctdb_marshall_buffer *records;
105 struct vacuum_data *vdata;
108 static int insert_record_into_delete_queue(struct ctdb_db_context *ctdb_db,
109 const struct ctdb_ltdb_header *hdr,
110 TDB_DATA key);
113 * Store key and header in a tree, indexed by the key hash.
115 static int insert_delete_record_data_into_tree(struct ctdb_context *ctdb,
116 struct ctdb_db_context *ctdb_db,
117 trbt_tree_t *tree,
118 const struct ctdb_ltdb_header *hdr,
119 TDB_DATA key)
121 struct delete_record_data *dd;
122 uint32_t hash;
123 size_t len;
125 len = offsetof(struct delete_record_data, keydata) + key.dsize;
127 dd = (struct delete_record_data *)talloc_size(tree, len);
128 if (dd == NULL) {
129 DEBUG(DEBUG_ERR,(__location__ " Out of memory\n"));
130 return -1;
132 talloc_set_name_const(dd, "struct delete_record_data");
134 dd->ctdb = ctdb;
135 dd->ctdb_db = ctdb_db;
136 dd->key.dsize = key.dsize;
137 dd->key.dptr = dd->keydata;
138 memcpy(dd->keydata, key.dptr, key.dsize);
140 dd->hdr = *hdr;
142 hash = ctdb_hash(&key);
144 trbt_insert32(tree, hash, dd);
146 return 0;
149 static int add_record_to_delete_list(struct vacuum_data *vdata, TDB_DATA key,
150 struct ctdb_ltdb_header *hdr)
152 struct ctdb_context *ctdb = vdata->ctdb;
153 struct ctdb_db_context *ctdb_db = vdata->ctdb_db;
154 uint32_t hash;
155 int ret;
157 hash = ctdb_hash(&key);
159 if (trbt_lookup32(vdata->delete_list, hash)) {
160 DEBUG(DEBUG_INFO, (__location__ " Hash collision when vacuuming, skipping this record.\n"));
161 return 0;
164 ret = insert_delete_record_data_into_tree(ctdb, ctdb_db,
165 vdata->delete_list,
166 hdr, key);
167 if (ret != 0) {
168 return -1;
171 vdata->count.delete_list.total++;
173 return 0;
177 * Add a record to the list of records to be sent
178 * to their lmaster with VACUUM_FETCH.
180 static int add_record_to_vacuum_fetch_list(struct vacuum_data *vdata,
181 TDB_DATA key)
183 struct ctdb_context *ctdb = vdata->ctdb;
184 uint32_t lmaster;
185 struct ctdb_marshall_buffer *vfl;
187 lmaster = ctdb_lmaster(ctdb, &key);
189 vfl = vdata->vacuum_fetch_list[lmaster];
191 vfl = ctdb_marshall_add(ctdb, vfl, vfl->db_id, ctdb->pnn,
192 key, NULL, tdb_null);
193 if (vfl == NULL) {
194 DEBUG(DEBUG_ERR,(__location__ " Out of memory\n"));
195 vdata->traverse_error = true;
196 return -1;
199 vdata->vacuum_fetch_list[lmaster] = vfl;
201 return 0;
205 static void ctdb_vacuum_event(struct event_context *ev, struct timed_event *te,
206 struct timeval t, void *private_data);
208 static int vacuum_record_parser(TDB_DATA key, TDB_DATA data, void *private_data)
210 struct ctdb_ltdb_header *header =
211 (struct ctdb_ltdb_header *)private_data;
213 if (data.dsize != sizeof(struct ctdb_ltdb_header)) {
214 return -1;
217 *header = *(struct ctdb_ltdb_header *)data.dptr;
219 return 0;
223 * traverse function for gathering the records that can be deleted
225 static int vacuum_traverse(struct tdb_context *tdb, TDB_DATA key, TDB_DATA data,
226 void *private_data)
228 struct vacuum_data *vdata = talloc_get_type(private_data,
229 struct vacuum_data);
230 struct ctdb_context *ctdb = vdata->ctdb;
231 struct ctdb_db_context *ctdb_db = vdata->ctdb_db;
232 uint32_t lmaster;
233 struct ctdb_ltdb_header *hdr;
234 int res = 0;
236 vdata->count.db_traverse.total++;
238 lmaster = ctdb_lmaster(ctdb, &key);
239 if (lmaster >= ctdb->num_nodes) {
240 vdata->count.db_traverse.error++;
241 DEBUG(DEBUG_CRIT, (__location__
242 " lmaster[%u] >= ctdb->num_nodes[%u] for key"
243 " with hash[%u]!\n",
244 (unsigned)lmaster,
245 (unsigned)ctdb->num_nodes,
246 (unsigned)ctdb_hash(&key)));
247 return -1;
250 if (data.dsize != sizeof(struct ctdb_ltdb_header)) {
251 /* it is not a deleted record */
252 vdata->count.db_traverse.skipped++;
253 return 0;
256 hdr = (struct ctdb_ltdb_header *)data.dptr;
258 if (hdr->dmaster != ctdb->pnn) {
259 vdata->count.db_traverse.skipped++;
260 return 0;
264 * Add the record to this process's delete_queue for processing
265 * in the subsequent traverse in the fast vacuum run.
267 res = insert_record_into_delete_queue(ctdb_db, hdr, key);
268 if (res != 0) {
269 vdata->count.db_traverse.error++;
270 } else {
271 vdata->count.db_traverse.scheduled++;
274 return 0;
278 * traverse the tree of records to delete and marshall them into
279 * a blob
281 static int delete_marshall_traverse(void *param, void *data)
283 struct delete_record_data *dd = talloc_get_type(data, struct delete_record_data);
284 struct delete_records_list *recs = talloc_get_type(param, struct delete_records_list);
285 struct ctdb_marshall_buffer *m;
287 m = ctdb_marshall_add(recs, recs->records, recs->records->db_id,
288 recs->records->db_id,
289 dd->key, &dd->hdr, tdb_null);
290 if (m == NULL) {
291 DEBUG(DEBUG_ERR, (__location__ " failed to marshall record\n"));
292 return -1;
295 recs->records = m;
296 return 0;
300 * Variant of delete_marshall_traverse() that bumps the
301 * RSN of each traversed record in the database.
303 * This is needed to ensure that when rolling out our
304 * empty record copy before remote deletion, we as the
305 * record's dmaster keep a higher RSN than the non-dmaster
306 * nodes. This is needed to prevent old copies from
307 * resurrection in recoveries.
309 static int delete_marshall_traverse_first(void *param, void *data)
311 struct delete_record_data *dd = talloc_get_type(data, struct delete_record_data);
312 struct delete_records_list *recs = talloc_get_type(param, struct delete_records_list);
313 struct ctdb_db_context *ctdb_db = dd->ctdb_db;
314 struct ctdb_context *ctdb = ctdb_db->ctdb;
315 struct ctdb_ltdb_header header;
316 uint32_t lmaster;
317 uint32_t hash = ctdb_hash(&(dd->key));
318 int res;
320 res = tdb_chainlock_nonblock(ctdb_db->ltdb->tdb, dd->key);
321 if (res != 0) {
322 recs->vdata->count.delete_list.skipped++;
323 recs->vdata->count.delete_list.left--;
324 talloc_free(dd);
325 return 0;
329 * Verify that the record is still empty, its RSN has not
330 * changed and that we are still its lmaster and dmaster.
333 res = tdb_parse_record(ctdb_db->ltdb->tdb, dd->key,
334 vacuum_record_parser, &header);
335 if (res != 0) {
336 goto skip;
339 if (header.flags & CTDB_REC_RO_FLAGS) {
340 DEBUG(DEBUG_INFO, (__location__ ": record with hash [0x%08x] "
341 "on database db[%s] has read-only flags. "
342 "skipping.\n",
343 hash, ctdb_db->db_name));
344 goto skip;
347 if (header.dmaster != ctdb->pnn) {
348 DEBUG(DEBUG_INFO, (__location__ ": record with hash [0x%08x] "
349 "on database db[%s] has been migrated away. "
350 "skipping.\n",
351 hash, ctdb_db->db_name));
352 goto skip;
355 if (header.rsn != dd->hdr.rsn) {
356 DEBUG(DEBUG_INFO, (__location__ ": record with hash [0x%08x] "
357 "on database db[%s] seems to have been "
358 "migrated away and back again (with empty "
359 "data). skipping.\n",
360 hash, ctdb_db->db_name));
361 goto skip;
364 lmaster = ctdb_lmaster(ctdb_db->ctdb, &dd->key);
366 if (lmaster != ctdb->pnn) {
367 DEBUG(DEBUG_INFO, (__location__ ": not lmaster for record in "
368 "delete list (key hash [0x%08x], db[%s]). "
369 "Strange! skipping.\n",
370 hash, ctdb_db->db_name));
371 goto skip;
375 * Increment the record's RSN to ensure the dmaster (i.e. the current
376 * node) has the highest RSN of the record in the cluster.
377 * This is to prevent old record copies from resurrecting in recoveries
378 * if something should fail during the deletion process.
379 * Note that ctdb_ltdb_store_server() increments the RSN if called
380 * on the record's dmaster.
383 res = ctdb_ltdb_store(ctdb_db, dd->key, &header, tdb_null);
384 if (res != 0) {
385 DEBUG(DEBUG_ERR, (__location__ ": Failed to store record with "
386 "key hash [0x%08x] on database db[%s].\n",
387 hash, ctdb_db->db_name));
388 goto skip;
391 tdb_chainunlock(ctdb_db->ltdb->tdb, dd->key);
393 goto done;
395 skip:
396 tdb_chainunlock(ctdb_db->ltdb->tdb, dd->key);
398 recs->vdata->count.delete_list.skipped++;
399 recs->vdata->count.delete_list.left--;
400 talloc_free(dd);
401 dd = NULL;
403 done:
404 if (dd == NULL) {
405 return 0;
408 return delete_marshall_traverse(param, data);
412 * traverse function for the traversal of the delete_queue,
413 * the fast-path vacuuming list.
415 * - If the record has been migrated off the node
416 * or has been revived (filled with data) on the node,
417 * then skip the record.
419 * - If the current node is the record's lmaster and it is
420 * a record that has never been migrated with data, then
421 * delete the record from the local tdb.
423 * - If the current node is the record's lmaster and it has
424 * been migrated with data, then schedule it for the normal
425 * vacuuming procedure (i.e. add it to the delete_list).
427 * - If the current node is NOT the record's lmaster then
428 * add it to the list of records that are to be sent to
429 * the lmaster with the VACUUM_FETCH message.
431 static int delete_queue_traverse(void *param, void *data)
433 struct delete_record_data *dd =
434 talloc_get_type(data, struct delete_record_data);
435 struct vacuum_data *vdata = talloc_get_type(param, struct vacuum_data);
436 struct ctdb_db_context *ctdb_db = dd->ctdb_db;
437 struct ctdb_context *ctdb = ctdb_db->ctdb; /* or dd->ctdb ??? */
438 int res;
439 struct ctdb_ltdb_header header;
440 uint32_t lmaster;
441 uint32_t hash = ctdb_hash(&(dd->key));
443 vdata->count.delete_queue.total++;
445 res = tdb_chainlock_nonblock(ctdb_db->ltdb->tdb, dd->key);
446 if (res != 0) {
447 vdata->count.delete_queue.error++;
448 return 0;
451 res = tdb_parse_record(ctdb_db->ltdb->tdb, dd->key,
452 vacuum_record_parser, &header);
453 if (res != 0) {
454 goto skipped;
457 if (header.dmaster != ctdb->pnn) {
458 /* The record has been migrated off the node. Skip. */
459 goto skipped;
462 if (header.rsn != dd->hdr.rsn) {
464 * The record has been migrated off the node and back again.
465 * But not requeued for deletion. Skip it.
467 goto skipped;
471 * We are dmaster, and the record has no data, and it has
472 * not been migrated after it has been queued for deletion.
474 * At this stage, the record could still have been revived locally
475 * and last been written with empty data. This can only be
476 * fixed with the addition of an active or delete flag. (TODO)
479 lmaster = ctdb_lmaster(ctdb_db->ctdb, &dd->key);
481 if (lmaster != ctdb->pnn) {
482 res = add_record_to_vacuum_fetch_list(vdata, dd->key);
484 if (res != 0) {
485 DEBUG(DEBUG_ERR,
486 (__location__ " Error adding record to list "
487 "of records to send to lmaster.\n"));
488 vdata->count.delete_queue.error++;
489 } else {
490 vdata->count.delete_queue.added_to_vacuum_fetch_list++;
492 goto done;
495 /* use header->flags or dd->hdr.flags ?? */
496 if (dd->hdr.flags & CTDB_REC_FLAG_MIGRATED_WITH_DATA) {
497 res = add_record_to_delete_list(vdata, dd->key, &dd->hdr);
499 if (res != 0) {
500 DEBUG(DEBUG_ERR,
501 (__location__ " Error adding record to list "
502 "of records for deletion on lmaster.\n"));
503 vdata->count.delete_queue.error++;
504 } else {
505 vdata->count.delete_queue.added_to_delete_list++;
507 } else {
508 res = tdb_delete(ctdb_db->ltdb->tdb, dd->key);
510 if (res != 0) {
511 DEBUG(DEBUG_ERR,
512 (__location__ " Error deleting record with key "
513 "hash [0x%08x] from local data base db[%s].\n",
514 hash, ctdb_db->db_name));
515 vdata->count.delete_queue.error++;
516 goto done;
519 DEBUG(DEBUG_DEBUG,
520 (__location__ " Deleted record with key hash "
521 "[0x%08x] from local data base db[%s].\n",
522 hash, ctdb_db->db_name));
523 vdata->count.delete_queue.deleted++;
526 goto done;
528 skipped:
529 vdata->count.delete_queue.skipped++;
531 done:
532 tdb_chainunlock(ctdb_db->ltdb->tdb, dd->key);
534 return 0;
538 * Delete the records that we are lmaster and dmaster for and
539 * that could be deleted on all other nodes via the TRY_DELETE_RECORDS
540 * control.
542 static int delete_record_traverse(void *param, void *data)
544 struct delete_record_data *dd =
545 talloc_get_type(data, struct delete_record_data);
546 struct vacuum_data *vdata = talloc_get_type(param, struct vacuum_data);
547 struct ctdb_db_context *ctdb_db = dd->ctdb_db;
548 struct ctdb_context *ctdb = ctdb_db->ctdb;
549 int res;
550 struct ctdb_ltdb_header header;
551 uint32_t lmaster;
552 uint32_t hash = ctdb_hash(&(dd->key));
554 res = tdb_chainlock(ctdb_db->ltdb->tdb, dd->key);
555 if (res != 0) {
556 DEBUG(DEBUG_ERR,
557 (__location__ " Error getting chainlock on record with "
558 "key hash [0x%08x] on database db[%s].\n",
559 hash, ctdb_db->db_name));
560 vdata->count.delete_list.local_error++;
561 vdata->count.delete_list.left--;
562 talloc_free(dd);
563 return 0;
567 * Verify that the record is still empty, its RSN has not
568 * changed and that we are still its lmaster and dmaster.
571 res = tdb_parse_record(ctdb_db->ltdb->tdb, dd->key,
572 vacuum_record_parser, &header);
573 if (res != 0) {
574 goto skip;
577 if (header.flags & CTDB_REC_RO_FLAGS) {
578 DEBUG(DEBUG_INFO, (__location__ ": record with hash [0x%08x] "
579 "on database db[%s] has read-only flags. "
580 "skipping.\n",
581 hash, ctdb_db->db_name));
582 goto skip;
585 if (header.dmaster != ctdb->pnn) {
586 DEBUG(DEBUG_INFO, (__location__ ": record with hash [0x%08x] "
587 "on database db[%s] has been migrated away. "
588 "skipping.\n",
589 hash, ctdb_db->db_name));
590 goto skip;
593 if (header.rsn != dd->hdr.rsn + 1) {
595 * The record has been migrated off the node and back again.
596 * But not requeued for deletion. Skip it.
597 * (Note that the first marshall traverse has bumped the RSN
598 * on disk.)
600 DEBUG(DEBUG_INFO, (__location__ ": record with hash [0x%08x] "
601 "on database db[%s] seems to have been "
602 "migrated away and back again (with empty "
603 "data). skipping.\n",
604 hash, ctdb_db->db_name));
605 goto skip;
608 lmaster = ctdb_lmaster(ctdb_db->ctdb, &dd->key);
610 if (lmaster != ctdb->pnn) {
611 DEBUG(DEBUG_INFO, (__location__ ": not lmaster for record in "
612 "delete list (key hash [0x%08x], db[%s]). "
613 "Strange! skipping.\n",
614 hash, ctdb_db->db_name));
615 goto skip;
618 res = tdb_delete(ctdb_db->ltdb->tdb, dd->key);
620 if (res != 0) {
621 DEBUG(DEBUG_ERR,
622 (__location__ " Error deleting record with key hash "
623 "[0x%08x] from local data base db[%s].\n",
624 hash, ctdb_db->db_name));
625 vdata->count.delete_list.local_error++;
626 goto done;
629 DEBUG(DEBUG_DEBUG,
630 (__location__ " Deleted record with key hash [0x%08x] from "
631 "local data base db[%s].\n", hash, ctdb_db->db_name));
633 vdata->count.delete_list.deleted++;
634 goto done;
636 skip:
637 vdata->count.delete_list.skipped++;
639 done:
640 tdb_chainunlock(ctdb_db->ltdb->tdb, dd->key);
642 talloc_free(dd);
643 vdata->count.delete_list.left--;
645 return 0;
649 * Traverse the delete_queue.
650 * Records are either deleted directly or filled
651 * into the delete list or the vacuum fetch lists
652 * for further processing.
654 static void ctdb_process_delete_queue(struct ctdb_db_context *ctdb_db,
655 struct vacuum_data *vdata)
657 uint32_t sum;
658 int ret;
660 ret = trbt_traversearray32(ctdb_db->delete_queue, 1,
661 delete_queue_traverse, vdata);
663 if (ret != 0) {
664 DEBUG(DEBUG_ERR, (__location__ " Error traversing "
665 "the delete queue.\n"));
668 sum = vdata->count.delete_queue.deleted
669 + vdata->count.delete_queue.skipped
670 + vdata->count.delete_queue.error
671 + vdata->count.delete_queue.added_to_delete_list
672 + vdata->count.delete_queue.added_to_vacuum_fetch_list;
674 if (vdata->count.delete_queue.total != sum) {
675 DEBUG(DEBUG_ERR, (__location__ " Inconsistency in fast vacuum "
676 "counts for db[%s]: total[%u] != sum[%u]\n",
677 ctdb_db->db_name,
678 (unsigned)vdata->count.delete_queue.total,
679 (unsigned)sum));
682 if (vdata->count.delete_queue.total > 0) {
683 DEBUG(DEBUG_INFO,
684 (__location__
685 " fast vacuuming delete_queue traverse statistics: "
686 "db[%s] "
687 "total[%u] "
688 "del[%u] "
689 "skp[%u] "
690 "err[%u] "
691 "adl[%u] "
692 "avf[%u]\n",
693 ctdb_db->db_name,
694 (unsigned)vdata->count.delete_queue.total,
695 (unsigned)vdata->count.delete_queue.deleted,
696 (unsigned)vdata->count.delete_queue.skipped,
697 (unsigned)vdata->count.delete_queue.error,
698 (unsigned)vdata->count.delete_queue.added_to_delete_list,
699 (unsigned)vdata->count.delete_queue.added_to_vacuum_fetch_list));
702 return;
706 * read-only traverse of the database, looking for records that
707 * might be able to be vacuumed.
709 * This is not done each time but only every tunable
710 * VacuumFastPathCount times.
712 static void ctdb_vacuum_traverse_db(struct ctdb_db_context *ctdb_db,
713 struct vacuum_data *vdata)
715 int ret;
717 ret = tdb_traverse_read(ctdb_db->ltdb->tdb, vacuum_traverse, vdata);
718 if (ret == -1 || vdata->traverse_error) {
719 DEBUG(DEBUG_ERR, (__location__ " Traverse error in vacuuming "
720 "'%s'\n", ctdb_db->db_name));
721 return;
724 if (vdata->count.db_traverse.total > 0) {
725 DEBUG(DEBUG_INFO,
726 (__location__
727 " full vacuuming db traverse statistics: "
728 "db[%s] "
729 "total[%u] "
730 "skp[%u] "
731 "err[%u] "
732 "sched[%u]\n",
733 ctdb_db->db_name,
734 (unsigned)vdata->count.db_traverse.total,
735 (unsigned)vdata->count.db_traverse.skipped,
736 (unsigned)vdata->count.db_traverse.error,
737 (unsigned)vdata->count.db_traverse.scheduled));
740 return;
744 * Process the vacuum fetch lists:
745 * For records for which we are not the lmaster, tell the lmaster to
746 * fetch the record.
748 static void ctdb_process_vacuum_fetch_lists(struct ctdb_db_context *ctdb_db,
749 struct vacuum_data *vdata)
751 int i;
752 struct ctdb_context *ctdb = ctdb_db->ctdb;
754 for (i = 0; i < ctdb->num_nodes; i++) {
755 TDB_DATA data;
756 struct ctdb_marshall_buffer *vfl = vdata->vacuum_fetch_list[i];
758 if (ctdb->nodes[i]->pnn == ctdb->pnn) {
759 continue;
762 if (vfl->count == 0) {
763 continue;
766 DEBUG(DEBUG_INFO, ("Found %u records for lmaster %u in '%s'\n",
767 vfl->count, ctdb->nodes[i]->pnn,
768 ctdb_db->db_name));
770 data = ctdb_marshall_finish(vfl);
771 if (ctdb_client_send_message(ctdb, ctdb->nodes[i]->pnn,
772 CTDB_SRVID_VACUUM_FETCH,
773 data) != 0)
775 DEBUG(DEBUG_ERR, (__location__ " Failed to send vacuum "
776 "fetch message to %u\n",
777 ctdb->nodes[i]->pnn));
781 return;
785 * Process the delete list:
787 * This is the last step of vacuuming that consistently deletes
788 * those records that have been migrated with data and can hence
789 * not be deleted when leaving a node.
791 * In this step, the lmaster does the final deletion of those empty
792 * records that it is also dmaster for. It has ususally received
793 * at least some of these records previously from the former dmasters
794 * with the vacuum fetch message.
796 * This last step is implemented as a 3-phase process to protect from
797 * races leading to data corruption:
799 * 1) Send the lmaster's copy to all other active nodes with the
800 * RECEIVE_RECORDS control: The remote nodes store the lmaster's copy.
801 * 2) Send the records that could successfully be stored remotely
802 * in step #1 to all active nodes with the TRY_DELETE_RECORDS
803 * control. The remote notes delete their local copy.
804 * 3) The lmaster locally deletes its copies of all records that
805 * could successfully be deleted remotely in step #2.
807 static void ctdb_process_delete_list(struct ctdb_db_context *ctdb_db,
808 struct vacuum_data *vdata)
810 int ret, i;
811 struct ctdb_context *ctdb = ctdb_db->ctdb;
812 struct delete_records_list *recs;
813 TDB_DATA indata;
814 struct ctdb_node_map *nodemap;
815 uint32_t *active_nodes;
816 int num_active_nodes;
817 TALLOC_CTX *tmp_ctx;
818 uint32_t sum;
820 if (vdata->count.delete_list.total == 0) {
821 return;
824 tmp_ctx = talloc_new(vdata);
825 if (tmp_ctx == NULL) {
826 DEBUG(DEBUG_ERR,(__location__ " Out of memory\n"));
827 return;
830 vdata->count.delete_list.left = vdata->count.delete_list.total;
833 * get the list of currently active nodes
836 ret = ctdb_ctrl_getnodemap(ctdb, TIMELIMIT(),
837 CTDB_CURRENT_NODE,
838 tmp_ctx,
839 &nodemap);
840 if (ret != 0) {
841 DEBUG(DEBUG_ERR,(__location__ " unable to get node map\n"));
842 goto done;
845 active_nodes = list_of_active_nodes(ctdb, nodemap,
846 nodemap, /* talloc context */
847 false /* include self */);
848 /* yuck! ;-) */
849 num_active_nodes = talloc_get_size(active_nodes)/sizeof(*active_nodes);
852 * Now delete the records all active nodes in a three-phase process:
853 * 1) send all active remote nodes the current empty copy with this
854 * node as DMASTER
855 * 2) if all nodes could store the new copy,
856 * tell all the active remote nodes to delete all their copy
857 * 3) if all remote nodes deleted their record copy, delete it locally
861 * Step 1:
862 * Send currently empty record copy to all active nodes for storing.
865 recs = talloc_zero(tmp_ctx, struct delete_records_list);
866 if (recs == NULL) {
867 DEBUG(DEBUG_ERR,(__location__ " Out of memory\n"));
868 goto done;
870 recs->records = (struct ctdb_marshall_buffer *)
871 talloc_zero_size(recs,
872 offsetof(struct ctdb_marshall_buffer, data));
873 if (recs->records == NULL) {
874 DEBUG(DEBUG_ERR,(__location__ " Out of memory\n"));
875 goto done;
877 recs->records->db_id = ctdb_db->db_id;
878 recs->vdata = vdata;
881 * traverse the tree of all records we want to delete and
882 * create a blob we can send to the other nodes.
884 * We call delete_marshall_traverse_first() to bump the
885 * records' RSNs in the database, to ensure we (as dmaster)
886 * keep the highest RSN of the records in the cluster.
888 ret = trbt_traversearray32(vdata->delete_list, 1,
889 delete_marshall_traverse_first, recs);
890 if (ret != 0) {
891 DEBUG(DEBUG_ERR, (__location__ " Error traversing the "
892 "delete list for first marshalling.\n"));
893 goto done;
896 indata = ctdb_marshall_finish(recs->records);
898 for (i = 0; i < num_active_nodes; i++) {
899 struct ctdb_marshall_buffer *records;
900 struct ctdb_rec_data *rec;
901 int32_t res;
902 TDB_DATA outdata;
904 ret = ctdb_control(ctdb, active_nodes[i], 0,
905 CTDB_CONTROL_RECEIVE_RECORDS, 0,
906 indata, recs, &outdata, &res,
907 NULL, NULL);
908 if (ret != 0 || res != 0) {
909 DEBUG(DEBUG_ERR, ("Error storing record copies on "
910 "node %u: ret[%d] res[%d]\n",
911 active_nodes[i], ret, res));
912 goto done;
916 * outdata contains the list of records coming back
917 * from the node: These are the records that the
918 * remote node could not store. We remove these from
919 * the list to process further.
921 records = (struct ctdb_marshall_buffer *)outdata.dptr;
922 rec = (struct ctdb_rec_data *)&records->data[0];
923 while (records->count-- > 1) {
924 TDB_DATA reckey, recdata;
925 struct ctdb_ltdb_header *rechdr;
926 struct delete_record_data *dd;
928 reckey.dptr = &rec->data[0];
929 reckey.dsize = rec->keylen;
930 recdata.dptr = &rec->data[reckey.dsize];
931 recdata.dsize = rec->datalen;
933 if (recdata.dsize < sizeof(struct ctdb_ltdb_header)) {
934 DEBUG(DEBUG_CRIT,(__location__ " bad ltdb record\n"));
935 goto done;
937 rechdr = (struct ctdb_ltdb_header *)recdata.dptr;
938 recdata.dptr += sizeof(*rechdr);
939 recdata.dsize -= sizeof(*rechdr);
941 dd = (struct delete_record_data *)trbt_lookup32(
942 vdata->delete_list,
943 ctdb_hash(&reckey));
944 if (dd != NULL) {
946 * The other node could not store the record
947 * copy and it is the first node that failed.
948 * So we should remove it from the tree and
949 * update statistics.
951 talloc_free(dd);
952 vdata->count.delete_list.remote_error++;
953 vdata->count.delete_list.left--;
954 } else {
955 DEBUG(DEBUG_ERR, (__location__ " Failed to "
956 "find record with hash 0x%08x coming "
957 "back from RECEIVE_RECORDS "
958 "control in delete list.\n",
959 ctdb_hash(&reckey)));
960 vdata->count.delete_list.local_error++;
961 vdata->count.delete_list.left--;
964 rec = (struct ctdb_rec_data *)(rec->length + (uint8_t *)rec);
968 if (vdata->count.delete_list.left == 0) {
969 goto success;
973 * Step 2:
974 * Send the remaining records to all active nodes for deletion.
976 * The lmaster's (i.e. our) copies of these records have been stored
977 * successfully on the other nodes.
981 * Create a marshall blob from the remaining list of records to delete.
984 talloc_free(recs->records);
986 recs->records = (struct ctdb_marshall_buffer *)
987 talloc_zero_size(recs,
988 offsetof(struct ctdb_marshall_buffer, data));
989 if (recs->records == NULL) {
990 DEBUG(DEBUG_ERR,(__location__ " Out of memory\n"));
991 goto done;
993 recs->records->db_id = ctdb_db->db_id;
995 ret = trbt_traversearray32(vdata->delete_list, 1,
996 delete_marshall_traverse, recs);
997 if (ret != 0) {
998 DEBUG(DEBUG_ERR, (__location__ " Error traversing the "
999 "delete list for second marshalling.\n"));
1000 goto done;
1003 indata = ctdb_marshall_finish(recs->records);
1005 for (i = 0; i < num_active_nodes; i++) {
1006 struct ctdb_marshall_buffer *records;
1007 struct ctdb_rec_data *rec;
1008 int32_t res;
1009 TDB_DATA outdata;
1011 ret = ctdb_control(ctdb, active_nodes[i], 0,
1012 CTDB_CONTROL_TRY_DELETE_RECORDS, 0,
1013 indata, recs, &outdata, &res,
1014 NULL, NULL);
1015 if (ret != 0 || res != 0) {
1016 DEBUG(DEBUG_ERR, ("Failed to delete records on "
1017 "node %u: ret[%d] res[%d]\n",
1018 active_nodes[i], ret, res));
1019 goto done;
1023 * outdata contains the list of records coming back
1024 * from the node: These are the records that the
1025 * remote node could not delete. We remove these from
1026 * the list to delete locally.
1028 records = (struct ctdb_marshall_buffer *)outdata.dptr;
1029 rec = (struct ctdb_rec_data *)&records->data[0];
1030 while (records->count-- > 1) {
1031 TDB_DATA reckey, recdata;
1032 struct ctdb_ltdb_header *rechdr;
1033 struct delete_record_data *dd;
1035 reckey.dptr = &rec->data[0];
1036 reckey.dsize = rec->keylen;
1037 recdata.dptr = &rec->data[reckey.dsize];
1038 recdata.dsize = rec->datalen;
1040 if (recdata.dsize < sizeof(struct ctdb_ltdb_header)) {
1041 DEBUG(DEBUG_CRIT,(__location__ " bad ltdb record\n"));
1042 goto done;
1044 rechdr = (struct ctdb_ltdb_header *)recdata.dptr;
1045 recdata.dptr += sizeof(*rechdr);
1046 recdata.dsize -= sizeof(*rechdr);
1048 dd = (struct delete_record_data *)trbt_lookup32(
1049 vdata->delete_list,
1050 ctdb_hash(&reckey));
1051 if (dd != NULL) {
1053 * The other node could not delete the
1054 * record and it is the first node that
1055 * failed. So we should remove it from
1056 * the tree and update statistics.
1058 talloc_free(dd);
1059 vdata->count.delete_list.remote_error++;
1060 vdata->count.delete_list.left--;
1061 } else {
1062 DEBUG(DEBUG_ERR, (__location__ " Failed to "
1063 "find record with hash 0x%08x coming "
1064 "back from TRY_DELETE_RECORDS "
1065 "control in delete list.\n",
1066 ctdb_hash(&reckey)));
1067 vdata->count.delete_list.local_error++;
1068 vdata->count.delete_list.left--;
1071 rec = (struct ctdb_rec_data *)(rec->length + (uint8_t *)rec);
1075 if (vdata->count.delete_list.left == 0) {
1076 goto success;
1080 * Step 3:
1081 * Delete the remaining records locally.
1083 * These records have successfully been deleted on all
1084 * active remote nodes.
1087 ret = trbt_traversearray32(vdata->delete_list, 1,
1088 delete_record_traverse, vdata);
1089 if (ret != 0) {
1090 DEBUG(DEBUG_ERR, (__location__ " Error traversing the "
1091 "delete list for deletion.\n"));
1094 success:
1096 if (vdata->count.delete_list.left != 0) {
1097 DEBUG(DEBUG_ERR, (__location__ " Vaccum db[%s] error: "
1098 "there are %u records left for deletion after "
1099 "processing delete list\n",
1100 ctdb_db->db_name,
1101 (unsigned)vdata->count.delete_list.left));
1104 sum = vdata->count.delete_list.deleted
1105 + vdata->count.delete_list.skipped
1106 + vdata->count.delete_list.remote_error
1107 + vdata->count.delete_list.local_error
1108 + vdata->count.delete_list.left;
1110 if (vdata->count.delete_list.total != sum) {
1111 DEBUG(DEBUG_ERR, (__location__ " Inconsistency in vacuum "
1112 "delete list counts for db[%s]: total[%u] != sum[%u]\n",
1113 ctdb_db->db_name,
1114 (unsigned)vdata->count.delete_list.total,
1115 (unsigned)sum));
1118 if (vdata->count.delete_list.total > 0) {
1119 DEBUG(DEBUG_INFO,
1120 (__location__
1121 " vacuum delete list statistics: "
1122 "db[%s] "
1123 "total[%u] "
1124 "del[%u] "
1125 "skip[%u] "
1126 "rem.err[%u] "
1127 "loc.err[%u] "
1128 "left[%u]\n",
1129 ctdb_db->db_name,
1130 (unsigned)vdata->count.delete_list.total,
1131 (unsigned)vdata->count.delete_list.deleted,
1132 (unsigned)vdata->count.delete_list.skipped,
1133 (unsigned)vdata->count.delete_list.remote_error,
1134 (unsigned)vdata->count.delete_list.local_error,
1135 (unsigned)vdata->count.delete_list.left));
1138 done:
1139 talloc_free(tmp_ctx);
1141 return;
1145 * initialize the vacuum_data
1147 static struct vacuum_data *ctdb_vacuum_init_vacuum_data(
1148 struct ctdb_db_context *ctdb_db,
1149 TALLOC_CTX *mem_ctx)
1151 int i;
1152 struct ctdb_context *ctdb = ctdb_db->ctdb;
1153 struct vacuum_data *vdata;
1155 vdata = talloc_zero(mem_ctx, struct vacuum_data);
1156 if (vdata == NULL) {
1157 DEBUG(DEBUG_ERR,(__location__ " Out of memory\n"));
1158 return NULL;
1161 vdata->ctdb = ctdb_db->ctdb;
1162 vdata->ctdb_db = ctdb_db;
1163 vdata->delete_list = trbt_create(vdata, 0);
1164 if (vdata->delete_list == NULL) {
1165 DEBUG(DEBUG_ERR,(__location__ " Out of memory\n"));
1166 goto fail;
1169 vdata->start = timeval_current();
1171 vdata->count.delete_queue.added_to_delete_list = 0;
1172 vdata->count.delete_queue.added_to_vacuum_fetch_list = 0;
1173 vdata->count.delete_queue.deleted = 0;
1174 vdata->count.delete_queue.skipped = 0;
1175 vdata->count.delete_queue.error = 0;
1176 vdata->count.delete_queue.total = 0;
1177 vdata->count.db_traverse.scheduled = 0;
1178 vdata->count.db_traverse.skipped = 0;
1179 vdata->count.db_traverse.error = 0;
1180 vdata->count.db_traverse.total = 0;
1181 vdata->count.delete_list.total = 0;
1182 vdata->count.delete_list.left = 0;
1183 vdata->count.delete_list.remote_error = 0;
1184 vdata->count.delete_list.local_error = 0;
1185 vdata->count.delete_list.skipped = 0;
1186 vdata->count.delete_list.deleted = 0;
1188 /* the list needs to be of length num_nodes */
1189 vdata->vacuum_fetch_list = talloc_zero_array(vdata,
1190 struct ctdb_marshall_buffer *,
1191 ctdb->num_nodes);
1192 if (vdata->vacuum_fetch_list == NULL) {
1193 DEBUG(DEBUG_ERR,(__location__ " Out of memory\n"));
1194 goto fail;
1196 for (i = 0; i < ctdb->num_nodes; i++) {
1197 vdata->vacuum_fetch_list[i] = (struct ctdb_marshall_buffer *)
1198 talloc_zero_size(vdata->vacuum_fetch_list,
1199 offsetof(struct ctdb_marshall_buffer, data));
1200 if (vdata->vacuum_fetch_list[i] == NULL) {
1201 DEBUG(DEBUG_ERR,(__location__ " Out of memory\n"));
1202 talloc_free(vdata);
1203 return NULL;
1205 vdata->vacuum_fetch_list[i]->db_id = ctdb_db->db_id;
1208 return vdata;
1210 fail:
1211 talloc_free(vdata);
1212 return NULL;
1216 * Vacuum a DB:
1217 * - Always do the fast vacuuming run, which traverses
1218 * the in-memory delete queue: these records have been
1219 * scheduled for deletion.
1220 * - Only if explicitly requested, the database is traversed
1221 * in order to use the traditional heuristics on empty records
1222 * to trigger deletion.
1223 * This is done only every VacuumFastPathCount'th vacuuming run.
1225 * The traverse runs fill two lists:
1227 * - The delete_list:
1228 * This is the list of empty records the current
1229 * node is lmaster and dmaster for. These records are later
1230 * deleted first on other nodes and then locally.
1232 * The fast vacuuming run has a short cut for those records
1233 * that have never been migrated with data: these records
1234 * are immediately deleted locally, since they have left
1235 * no trace on other nodes.
1237 * - The vacuum_fetch lists
1238 * (one for each other lmaster node):
1239 * The records in this list are sent for deletion to
1240 * their lmaster in a bulk VACUUM_FETCH message.
1242 * The lmaster then migrates all these records to itelf
1243 * so that they can be vacuumed there.
1245 * This executes in the child context.
1247 static int ctdb_vacuum_db(struct ctdb_db_context *ctdb_db,
1248 bool full_vacuum_run)
1250 struct ctdb_context *ctdb = ctdb_db->ctdb;
1251 int ret, pnn;
1252 struct vacuum_data *vdata;
1253 TALLOC_CTX *tmp_ctx;
1255 DEBUG(DEBUG_INFO, (__location__ " Entering %s vacuum run for db "
1256 "%s db_id[0x%08x]\n",
1257 full_vacuum_run ? "full" : "fast",
1258 ctdb_db->db_name, ctdb_db->db_id));
1260 ret = ctdb_ctrl_getvnnmap(ctdb, TIMELIMIT(), CTDB_CURRENT_NODE, ctdb, &ctdb->vnn_map);
1261 if (ret != 0) {
1262 DEBUG(DEBUG_ERR, ("Unable to get vnnmap from local node\n"));
1263 return ret;
1266 pnn = ctdb_ctrl_getpnn(ctdb, TIMELIMIT(), CTDB_CURRENT_NODE);
1267 if (pnn == -1) {
1268 DEBUG(DEBUG_ERR, ("Unable to get pnn from local node\n"));
1269 return -1;
1272 ctdb->pnn = pnn;
1274 tmp_ctx = talloc_new(ctdb_db);
1275 if (tmp_ctx == NULL) {
1276 DEBUG(DEBUG_ERR, ("Out of memory!\n"));
1277 return -1;
1280 vdata = ctdb_vacuum_init_vacuum_data(ctdb_db, tmp_ctx);
1281 if (vdata == NULL) {
1282 talloc_free(tmp_ctx);
1283 return -1;
1286 if (full_vacuum_run) {
1287 ctdb_vacuum_traverse_db(ctdb_db, vdata);
1290 ctdb_process_delete_queue(ctdb_db, vdata);
1292 ctdb_process_vacuum_fetch_lists(ctdb_db, vdata);
1294 ctdb_process_delete_list(ctdb_db, vdata);
1296 talloc_free(tmp_ctx);
1298 /* this ensures we run our event queue */
1299 ctdb_ctrl_getpnn(ctdb, TIMELIMIT(), CTDB_CURRENT_NODE);
1301 return 0;
1305 * repack and vaccum a db
1306 * called from the child context
1308 static int ctdb_vacuum_and_repack_db(struct ctdb_db_context *ctdb_db,
1309 bool full_vacuum_run)
1311 uint32_t repack_limit = ctdb_db->ctdb->tunable.repack_limit;
1312 const char *name = ctdb_db->db_name;
1313 int freelist_size = 0;
1314 int ret;
1316 if (ctdb_vacuum_db(ctdb_db, full_vacuum_run) != 0) {
1317 DEBUG(DEBUG_ERR,(__location__ " Failed to vacuum '%s'\n", name));
1320 freelist_size = tdb_freelist_size(ctdb_db->ltdb->tdb);
1321 if (freelist_size == -1) {
1322 DEBUG(DEBUG_ERR,(__location__ " Failed to get freelist size for '%s'\n", name));
1323 return -1;
1327 * decide if a repack is necessary
1329 if ((repack_limit == 0 || (uint32_t)freelist_size < repack_limit))
1331 return 0;
1334 DEBUG(DEBUG_INFO, ("Repacking %s with %u freelist entries\n",
1335 name, freelist_size));
1337 ret = tdb_repack(ctdb_db->ltdb->tdb);
1338 if (ret != 0) {
1339 DEBUG(DEBUG_ERR,(__location__ " Failed to repack '%s'\n", name));
1340 return -1;
1343 return 0;
1346 static uint32_t get_vacuum_interval(struct ctdb_db_context *ctdb_db)
1348 uint32_t interval = ctdb_db->ctdb->tunable.vacuum_interval;
1350 return interval;
1353 static int vacuum_child_destructor(struct ctdb_vacuum_child_context *child_ctx)
1355 double l = timeval_elapsed(&child_ctx->start_time);
1356 struct ctdb_db_context *ctdb_db = child_ctx->vacuum_handle->ctdb_db;
1357 struct ctdb_context *ctdb = ctdb_db->ctdb;
1359 CTDB_UPDATE_DB_LATENCY(ctdb_db, "vacuum", vacuum.latency, l);
1360 DEBUG(DEBUG_INFO,("Vacuuming took %.3f seconds for database %s\n", l, ctdb_db->db_name));
1362 if (child_ctx->child_pid != -1) {
1363 ctdb_kill(ctdb, child_ctx->child_pid, SIGKILL);
1364 } else {
1365 /* Bump the number of successful fast-path runs. */
1366 child_ctx->vacuum_handle->fast_path_count++;
1369 DLIST_REMOVE(ctdb->vacuumers, child_ctx);
1371 event_add_timed(ctdb->ev, child_ctx->vacuum_handle,
1372 timeval_current_ofs(get_vacuum_interval(ctdb_db), 0),
1373 ctdb_vacuum_event, child_ctx->vacuum_handle);
1375 return 0;
1379 * this event is generated when a vacuum child process times out
1381 static void vacuum_child_timeout(struct event_context *ev, struct timed_event *te,
1382 struct timeval t, void *private_data)
1384 struct ctdb_vacuum_child_context *child_ctx = talloc_get_type(private_data, struct ctdb_vacuum_child_context);
1386 DEBUG(DEBUG_ERR,("Vacuuming child process timed out for db %s\n", child_ctx->vacuum_handle->ctdb_db->db_name));
1388 child_ctx->status = VACUUM_TIMEOUT;
1390 talloc_free(child_ctx);
1395 * this event is generated when a vacuum child process has completed
1397 static void vacuum_child_handler(struct event_context *ev, struct fd_event *fde,
1398 uint16_t flags, void *private_data)
1400 struct ctdb_vacuum_child_context *child_ctx = talloc_get_type(private_data, struct ctdb_vacuum_child_context);
1401 char c = 0;
1402 int ret;
1404 DEBUG(DEBUG_INFO,("Vacuuming child process %d finished for db %s\n", child_ctx->child_pid, child_ctx->vacuum_handle->ctdb_db->db_name));
1405 child_ctx->child_pid = -1;
1407 ret = sys_read(child_ctx->fd[0], &c, 1);
1408 if (ret != 1 || c != 0) {
1409 child_ctx->status = VACUUM_ERROR;
1410 DEBUG(DEBUG_ERR, ("A vacuum child process failed with an error for database %s. ret=%d c=%d\n", child_ctx->vacuum_handle->ctdb_db->db_name, ret, c));
1411 } else {
1412 child_ctx->status = VACUUM_OK;
1415 talloc_free(child_ctx);
1419 * this event is called every time we need to start a new vacuum process
1421 static void
1422 ctdb_vacuum_event(struct event_context *ev, struct timed_event *te,
1423 struct timeval t, void *private_data)
1425 struct ctdb_vacuum_handle *vacuum_handle = talloc_get_type(private_data, struct ctdb_vacuum_handle);
1426 struct ctdb_db_context *ctdb_db = vacuum_handle->ctdb_db;
1427 struct ctdb_context *ctdb = ctdb_db->ctdb;
1428 struct ctdb_vacuum_child_context *child_ctx;
1429 struct tevent_fd *fde;
1430 int ret;
1432 /* we dont vacuum if we are in recovery mode, or db frozen */
1433 if (ctdb->recovery_mode == CTDB_RECOVERY_ACTIVE ||
1434 ctdb->freeze_mode[ctdb_db->priority] != CTDB_FREEZE_NONE) {
1435 DEBUG(DEBUG_INFO, ("Not vacuuming %s (%s)\n", ctdb_db->db_name,
1436 ctdb->recovery_mode == CTDB_RECOVERY_ACTIVE ? "in recovery"
1437 : ctdb->freeze_mode[ctdb_db->priority] == CTDB_FREEZE_PENDING
1438 ? "freeze pending"
1439 : "frozen"));
1440 event_add_timed(ctdb->ev, vacuum_handle,
1441 timeval_current_ofs(get_vacuum_interval(ctdb_db), 0),
1442 ctdb_vacuum_event, vacuum_handle);
1443 return;
1446 /* Do not allow multiple vacuuming child processes to be active at the
1447 * same time. If there is vacuuming child process active, delay
1448 * new vacuuming event to stagger vacuuming events.
1450 if (ctdb->vacuumers != NULL) {
1451 event_add_timed(ctdb->ev, vacuum_handle,
1452 timeval_current_ofs(0, 500*1000),
1453 ctdb_vacuum_event, vacuum_handle);
1454 return;
1457 child_ctx = talloc(vacuum_handle, struct ctdb_vacuum_child_context);
1458 if (child_ctx == NULL) {
1459 DEBUG(DEBUG_CRIT, (__location__ " Failed to allocate child context for vacuuming of %s\n", ctdb_db->db_name));
1460 ctdb_fatal(ctdb, "Out of memory when crating vacuum child context. Shutting down\n");
1464 ret = pipe(child_ctx->fd);
1465 if (ret != 0) {
1466 talloc_free(child_ctx);
1467 DEBUG(DEBUG_ERR, ("Failed to create pipe for vacuum child process.\n"));
1468 event_add_timed(ctdb->ev, vacuum_handle,
1469 timeval_current_ofs(get_vacuum_interval(ctdb_db), 0),
1470 ctdb_vacuum_event, vacuum_handle);
1471 return;
1474 if (vacuum_handle->fast_path_count > ctdb->tunable.vacuum_fast_path_count) {
1475 vacuum_handle->fast_path_count = 0;
1478 child_ctx->child_pid = ctdb_fork(ctdb);
1479 if (child_ctx->child_pid == (pid_t)-1) {
1480 close(child_ctx->fd[0]);
1481 close(child_ctx->fd[1]);
1482 talloc_free(child_ctx);
1483 DEBUG(DEBUG_ERR, ("Failed to fork vacuum child process.\n"));
1484 event_add_timed(ctdb->ev, vacuum_handle,
1485 timeval_current_ofs(get_vacuum_interval(ctdb_db), 0),
1486 ctdb_vacuum_event, vacuum_handle);
1487 return;
1491 if (child_ctx->child_pid == 0) {
1492 char cc = 0;
1493 bool full_vacuum_run = false;
1494 close(child_ctx->fd[0]);
1496 DEBUG(DEBUG_INFO,("Vacuuming child process %d for db %s started\n", getpid(), ctdb_db->db_name));
1497 ctdb_set_process_name("ctdb_vacuum");
1498 if (switch_from_server_to_client(ctdb, "vacuum-%s", ctdb_db->db_name) != 0) {
1499 DEBUG(DEBUG_CRIT, (__location__ "ERROR: failed to switch vacuum daemon into client mode. Shutting down.\n"));
1500 _exit(1);
1503 if ((ctdb->tunable.vacuum_fast_path_count > 0) &&
1504 (vacuum_handle->fast_path_count == 0))
1506 full_vacuum_run = true;
1508 cc = ctdb_vacuum_and_repack_db(ctdb_db, full_vacuum_run);
1510 sys_write(child_ctx->fd[1], &cc, 1);
1511 _exit(0);
1514 set_close_on_exec(child_ctx->fd[0]);
1515 close(child_ctx->fd[1]);
1517 child_ctx->status = VACUUM_RUNNING;
1518 child_ctx->start_time = timeval_current();
1520 DLIST_ADD(ctdb->vacuumers, child_ctx);
1521 talloc_set_destructor(child_ctx, vacuum_child_destructor);
1524 * Clear the fastpath vacuuming list in the parent.
1526 talloc_free(ctdb_db->delete_queue);
1527 ctdb_db->delete_queue = trbt_create(ctdb_db, 0);
1528 if (ctdb_db->delete_queue == NULL) {
1529 /* fatal here? ... */
1530 ctdb_fatal(ctdb, "Out of memory when re-creating vacuum tree "
1531 "in parent context. Shutting down\n");
1534 event_add_timed(ctdb->ev, child_ctx,
1535 timeval_current_ofs(ctdb->tunable.vacuum_max_run_time, 0),
1536 vacuum_child_timeout, child_ctx);
1538 DEBUG(DEBUG_DEBUG, (__location__ " Created PIPE FD:%d to child vacuum process\n", child_ctx->fd[0]));
1540 fde = event_add_fd(ctdb->ev, child_ctx, child_ctx->fd[0],
1541 EVENT_FD_READ, vacuum_child_handler, child_ctx);
1542 tevent_fd_set_auto_close(fde);
1544 vacuum_handle->child_ctx = child_ctx;
1545 child_ctx->vacuum_handle = vacuum_handle;
1548 void ctdb_stop_vacuuming(struct ctdb_context *ctdb)
1550 /* Simply free them all. */
1551 while (ctdb->vacuumers) {
1552 DEBUG(DEBUG_INFO, ("Aborting vacuuming for %s (%i)\n",
1553 ctdb->vacuumers->vacuum_handle->ctdb_db->db_name,
1554 (int)ctdb->vacuumers->child_pid));
1555 /* vacuum_child_destructor kills it, removes from list */
1556 talloc_free(ctdb->vacuumers);
1560 /* this function initializes the vacuuming context for a database
1561 * starts the vacuuming events
1563 int ctdb_vacuum_init(struct ctdb_db_context *ctdb_db)
1565 if (ctdb_db->persistent != 0) {
1566 DEBUG(DEBUG_ERR,("Vacuuming is disabled for persistent database %s\n", ctdb_db->db_name));
1567 return 0;
1570 ctdb_db->vacuum_handle = talloc(ctdb_db, struct ctdb_vacuum_handle);
1571 CTDB_NO_MEMORY(ctdb_db->ctdb, ctdb_db->vacuum_handle);
1573 ctdb_db->vacuum_handle->ctdb_db = ctdb_db;
1574 ctdb_db->vacuum_handle->fast_path_count = 0;
1576 event_add_timed(ctdb_db->ctdb->ev, ctdb_db->vacuum_handle,
1577 timeval_current_ofs(get_vacuum_interval(ctdb_db), 0),
1578 ctdb_vacuum_event, ctdb_db->vacuum_handle);
1580 return 0;
1583 static void remove_record_from_delete_queue(struct ctdb_db_context *ctdb_db,
1584 const struct ctdb_ltdb_header *hdr,
1585 const TDB_DATA key)
1587 struct delete_record_data *kd;
1588 uint32_t hash;
1590 hash = (uint32_t)ctdb_hash(&key);
1592 DEBUG(DEBUG_DEBUG, (__location__
1593 " remove_record_from_delete_queue: "
1594 "db[%s] "
1595 "db_id[0x%08x] "
1596 "key_hash[0x%08x] "
1597 "lmaster[%u] "
1598 "migrated_with_data[%s]\n",
1599 ctdb_db->db_name, ctdb_db->db_id,
1600 hash,
1601 ctdb_lmaster(ctdb_db->ctdb, &key),
1602 hdr->flags & CTDB_REC_FLAG_MIGRATED_WITH_DATA ? "yes" : "no"));
1604 kd = (struct delete_record_data *)trbt_lookup32(ctdb_db->delete_queue, hash);
1605 if (kd == NULL) {
1606 DEBUG(DEBUG_DEBUG, (__location__
1607 " remove_record_from_delete_queue: "
1608 "record not in queue (hash[0x%08x])\n.",
1609 hash));
1610 return;
1613 if ((kd->key.dsize != key.dsize) ||
1614 (memcmp(kd->key.dptr, key.dptr, key.dsize) != 0))
1616 DEBUG(DEBUG_DEBUG, (__location__
1617 " remove_record_from_delete_queue: "
1618 "hash collision for key with hash[0x%08x] "
1619 "in db[%s] - skipping\n",
1620 hash, ctdb_db->db_name));
1621 return;
1624 DEBUG(DEBUG_DEBUG, (__location__
1625 " remove_record_from_delete_queue: "
1626 "removing key with hash[0x%08x]\n",
1627 hash));
1629 talloc_free(kd);
1631 return;
1635 * Insert a record into the ctdb_db context's delete queue,
1636 * handling hash collisions.
1638 static int insert_record_into_delete_queue(struct ctdb_db_context *ctdb_db,
1639 const struct ctdb_ltdb_header *hdr,
1640 TDB_DATA key)
1642 struct delete_record_data *kd;
1643 uint32_t hash;
1644 int ret;
1646 hash = (uint32_t)ctdb_hash(&key);
1648 DEBUG(DEBUG_INFO, (__location__ " schedule for deletion: db[%s] "
1649 "db_id[0x%08x] "
1650 "key_hash[0x%08x] "
1651 "lmaster[%u] "
1652 "migrated_with_data[%s]\n",
1653 ctdb_db->db_name, ctdb_db->db_id,
1654 hash,
1655 ctdb_lmaster(ctdb_db->ctdb, &key),
1656 hdr->flags & CTDB_REC_FLAG_MIGRATED_WITH_DATA ? "yes" : "no"));
1658 kd = (struct delete_record_data *)trbt_lookup32(ctdb_db->delete_queue, hash);
1659 if (kd != NULL) {
1660 if ((kd->key.dsize != key.dsize) ||
1661 (memcmp(kd->key.dptr, key.dptr, key.dsize) != 0))
1663 DEBUG(DEBUG_INFO,
1664 (__location__ " schedule for deletion: "
1665 "hash collision for key hash [0x%08x]. "
1666 "Skipping the record.\n", hash));
1667 return 0;
1668 } else {
1669 DEBUG(DEBUG_DEBUG,
1670 (__location__ " schedule for deletion: "
1671 "updating entry for key with hash [0x%08x].\n",
1672 hash));
1676 ret = insert_delete_record_data_into_tree(ctdb_db->ctdb, ctdb_db,
1677 ctdb_db->delete_queue,
1678 hdr, key);
1679 if (ret != 0) {
1680 DEBUG(DEBUG_INFO,
1681 (__location__ " schedule for deletion: error "
1682 "inserting key with hash [0x%08x] into delete queue\n",
1683 hash));
1684 return -1;
1687 return 0;
1691 * Schedule a record for deletetion.
1692 * Called from the parent context.
1694 int32_t ctdb_control_schedule_for_deletion(struct ctdb_context *ctdb,
1695 TDB_DATA indata)
1697 struct ctdb_control_schedule_for_deletion *dd;
1698 struct ctdb_db_context *ctdb_db;
1699 int ret;
1700 TDB_DATA key;
1702 dd = (struct ctdb_control_schedule_for_deletion *)indata.dptr;
1704 ctdb_db = find_ctdb_db(ctdb, dd->db_id);
1705 if (ctdb_db == NULL) {
1706 DEBUG(DEBUG_ERR, (__location__ " Unknown db id 0x%08x\n",
1707 dd->db_id));
1708 return -1;
1711 key.dsize = dd->keylen;
1712 key.dptr = dd->key;
1714 ret = insert_record_into_delete_queue(ctdb_db, &dd->hdr, key);
1716 return ret;
1719 int32_t ctdb_local_schedule_for_deletion(struct ctdb_db_context *ctdb_db,
1720 const struct ctdb_ltdb_header *hdr,
1721 TDB_DATA key)
1723 int ret;
1724 struct ctdb_control_schedule_for_deletion *dd;
1725 TDB_DATA indata;
1726 int32_t status;
1728 if (ctdb_db->ctdb->ctdbd_pid == getpid()) {
1729 /* main daemon - directly queue */
1730 ret = insert_record_into_delete_queue(ctdb_db, hdr, key);
1732 return ret;
1735 /* if we dont have a connection to the daemon we can not send
1736 a control. For example sometimes from update_record control child
1737 process.
1739 if (!ctdb_db->ctdb->can_send_controls) {
1740 return -1;
1744 /* child process: send the main daemon a control */
1745 indata.dsize = offsetof(struct ctdb_control_schedule_for_deletion, key) + key.dsize;
1746 indata.dptr = talloc_zero_array(ctdb_db, uint8_t, indata.dsize);
1747 if (indata.dptr == NULL) {
1748 DEBUG(DEBUG_ERR, (__location__ " out of memory\n"));
1749 return -1;
1751 dd = (struct ctdb_control_schedule_for_deletion *)(void *)indata.dptr;
1752 dd->db_id = ctdb_db->db_id;
1753 dd->hdr = *hdr;
1754 dd->keylen = key.dsize;
1755 memcpy(dd->key, key.dptr, key.dsize);
1757 ret = ctdb_control(ctdb_db->ctdb,
1758 CTDB_CURRENT_NODE,
1759 ctdb_db->db_id,
1760 CTDB_CONTROL_SCHEDULE_FOR_DELETION,
1761 CTDB_CTRL_FLAG_NOREPLY, /* flags */
1762 indata,
1763 NULL, /* mem_ctx */
1764 NULL, /* outdata */
1765 &status,
1766 NULL, /* timeout : NULL == wait forever */
1767 NULL); /* error message */
1769 talloc_free(indata.dptr);
1771 if (ret != 0 || status != 0) {
1772 DEBUG(DEBUG_ERR, (__location__ " Error sending "
1773 "SCHEDULE_FOR_DELETION "
1774 "control.\n"));
1775 if (status != 0) {
1776 ret = -1;
1780 return ret;
1783 void ctdb_local_remove_from_delete_queue(struct ctdb_db_context *ctdb_db,
1784 const struct ctdb_ltdb_header *hdr,
1785 const TDB_DATA key)
1787 if (ctdb_db->ctdb->ctdbd_pid != getpid()) {
1789 * Only remove the record from the delete queue if called
1790 * in the main daemon.
1792 return;
1795 remove_record_from_delete_queue(ctdb_db, hdr, key);
1797 return;