ctdb-vacuum: use tdb_parse_record instead of tdb_fetch in delete_record_traverse()
[Samba/wip.git] / ctdb / server / ctdb_vacuum.c
blobc7eeeb0e7e7600e9ce56c6578c653690e4881c33
1 /*
2 ctdb vacuuming events
4 Copyright (C) Ronnie Sahlberg 2009
5 Copyright (C) Michael Adam 2010-2013
6 Copyright (C) Stefan Metzmacher 2010-2011
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program; if not, see <http://www.gnu.org/licenses/>.
22 #include "includes.h"
23 #include "tdb.h"
24 #include "system/network.h"
25 #include "system/filesys.h"
26 #include "system/dir.h"
27 #include "../include/ctdb_private.h"
28 #include "db_wrap.h"
29 #include "lib/util/dlinklist.h"
30 #include "../include/ctdb_private.h"
31 #include "../common/rb_tree.h"
33 #define TIMELIMIT() timeval_current_ofs(10, 0)
35 enum vacuum_child_status { VACUUM_RUNNING, VACUUM_OK, VACUUM_ERROR, VACUUM_TIMEOUT};
37 struct ctdb_vacuum_child_context {
38 struct ctdb_vacuum_child_context *next, *prev;
39 struct ctdb_vacuum_handle *vacuum_handle;
40 /* fd child writes status to */
41 int fd[2];
42 pid_t child_pid;
43 enum vacuum_child_status status;
44 struct timeval start_time;
47 struct ctdb_vacuum_handle {
48 struct ctdb_db_context *ctdb_db;
49 struct ctdb_vacuum_child_context *child_ctx;
50 uint32_t fast_path_count;
54 /* a list of records to possibly delete */
55 struct vacuum_data {
56 uint32_t vacuum_limit;
57 uint32_t repack_limit;
58 struct ctdb_context *ctdb;
59 struct ctdb_db_context *ctdb_db;
60 struct tdb_context *dest_db;
61 trbt_tree_t *delete_list;
62 uint32_t delete_count;
63 struct ctdb_marshall_buffer **vacuum_fetch_list;
64 struct timeval start;
65 bool traverse_error;
66 bool vacuum;
67 uint32_t total;
68 uint32_t vacuumed;
69 uint32_t copied;
70 uint32_t fast_added_to_vacuum_fetch_list;
71 uint32_t fast_added_to_delete_list;
72 uint32_t fast_deleted;
73 uint32_t fast_skipped;
74 uint32_t fast_error;
75 uint32_t fast_total;
76 uint32_t full_scheduled;
77 uint32_t full_skipped;
78 uint32_t full_error;
79 uint32_t full_total;
80 uint32_t delete_left;
81 uint32_t delete_remote_error;
82 uint32_t delete_local_error;
83 uint32_t delete_deleted;
84 uint32_t delete_skipped;
87 /* this structure contains the information for one record to be deleted */
88 struct delete_record_data {
89 struct ctdb_context *ctdb;
90 struct ctdb_db_context *ctdb_db;
91 struct ctdb_ltdb_header hdr;
92 TDB_DATA key;
93 uint8_t keydata[1];
96 struct delete_records_list {
97 struct ctdb_marshall_buffer *records;
98 struct vacuum_data *vdata;
101 static int insert_record_into_delete_queue(struct ctdb_db_context *ctdb_db,
102 const struct ctdb_ltdb_header *hdr,
103 TDB_DATA key);
106 * Store key and header in a tree, indexed by the key hash.
108 static int insert_delete_record_data_into_tree(struct ctdb_context *ctdb,
109 struct ctdb_db_context *ctdb_db,
110 trbt_tree_t *tree,
111 const struct ctdb_ltdb_header *hdr,
112 TDB_DATA key)
114 struct delete_record_data *dd;
115 uint32_t hash;
116 size_t len;
118 len = offsetof(struct delete_record_data, keydata) + key.dsize;
120 dd = (struct delete_record_data *)talloc_size(tree, len);
121 if (dd == NULL) {
122 DEBUG(DEBUG_ERR,(__location__ " Out of memory\n"));
123 return -1;
125 talloc_set_name_const(dd, "struct delete_record_data");
127 dd->ctdb = ctdb;
128 dd->ctdb_db = ctdb_db;
129 dd->key.dsize = key.dsize;
130 dd->key.dptr = dd->keydata;
131 memcpy(dd->keydata, key.dptr, key.dsize);
133 dd->hdr = *hdr;
135 hash = ctdb_hash(&key);
137 trbt_insert32(tree, hash, dd);
139 return 0;
142 static int add_record_to_delete_list(struct vacuum_data *vdata, TDB_DATA key,
143 struct ctdb_ltdb_header *hdr)
145 struct ctdb_context *ctdb = vdata->ctdb;
146 struct ctdb_db_context *ctdb_db = vdata->ctdb_db;
147 uint32_t hash;
148 int ret;
150 hash = ctdb_hash(&key);
152 if (trbt_lookup32(vdata->delete_list, hash)) {
153 DEBUG(DEBUG_INFO, (__location__ " Hash collision when vacuuming, skipping this record.\n"));
154 return 0;
157 ret = insert_delete_record_data_into_tree(ctdb, ctdb_db,
158 vdata->delete_list,
159 hdr, key);
160 if (ret != 0) {
161 return -1;
164 vdata->delete_count++;
166 return 0;
170 * Add a record to the list of records to be sent
171 * to their lmaster with VACUUM_FETCH.
173 static int add_record_to_vacuum_fetch_list(struct vacuum_data *vdata,
174 TDB_DATA key)
176 struct ctdb_context *ctdb = vdata->ctdb;
177 struct ctdb_rec_data *rec;
178 uint32_t lmaster;
179 size_t old_size;
180 struct ctdb_marshall_buffer *vfl;
182 lmaster = ctdb_lmaster(ctdb, &key);
184 vfl = vdata->vacuum_fetch_list[lmaster];
186 rec = ctdb_marshall_record(vfl, ctdb->pnn, key, NULL, tdb_null);
187 if (rec == NULL) {
188 DEBUG(DEBUG_ERR,(__location__ " Out of memory\n"));
189 vdata->traverse_error = true;
190 return -1;
193 old_size = talloc_get_size(vfl);
194 vfl = talloc_realloc_size(NULL, vfl, old_size + rec->length);
195 if (vfl == NULL) {
196 DEBUG(DEBUG_ERR,(__location__ " Failed to expand\n"));
197 vdata->traverse_error = true;
198 return -1;
200 vdata->vacuum_fetch_list[lmaster] = vfl;
202 vfl->count++;
203 memcpy(old_size+(uint8_t *)vfl, rec, rec->length);
204 talloc_free(rec);
206 vdata->total++;
208 return 0;
212 static void ctdb_vacuum_event(struct event_context *ev, struct timed_event *te,
213 struct timeval t, void *private_data);
215 static int vacuum_record_parser(TDB_DATA key, TDB_DATA data, void *private_data)
217 struct ctdb_ltdb_header *header =
218 (struct ctdb_ltdb_header *)private_data;
220 if (data.dsize != sizeof(struct ctdb_ltdb_header)) {
221 return -1;
224 *header = *(struct ctdb_ltdb_header *)data.dptr;
226 return 0;
230 * traverse function for gathering the records that can be deleted
232 static int vacuum_traverse(struct tdb_context *tdb, TDB_DATA key, TDB_DATA data,
233 void *private_data)
235 struct vacuum_data *vdata = talloc_get_type(private_data,
236 struct vacuum_data);
237 struct ctdb_context *ctdb = vdata->ctdb;
238 struct ctdb_db_context *ctdb_db = vdata->ctdb_db;
239 uint32_t lmaster;
240 struct ctdb_ltdb_header *hdr;
241 int res = 0;
243 vdata->full_total++;
245 lmaster = ctdb_lmaster(ctdb, &key);
246 if (lmaster >= ctdb->num_nodes) {
247 vdata->full_error++;
248 DEBUG(DEBUG_CRIT, (__location__
249 " lmaster[%u] >= ctdb->num_nodes[%u] for key"
250 " with hash[%u]!\n",
251 (unsigned)lmaster,
252 (unsigned)ctdb->num_nodes,
253 (unsigned)ctdb_hash(&key)));
254 return -1;
257 if (data.dsize != sizeof(struct ctdb_ltdb_header)) {
258 /* it is not a deleted record */
259 vdata->full_skipped++;
260 return 0;
263 hdr = (struct ctdb_ltdb_header *)data.dptr;
265 if (hdr->dmaster != ctdb->pnn) {
266 vdata->full_skipped++;
267 return 0;
271 * Add the record to this process's delete_queue for processing
272 * in the subsequent traverse in the fast vacuum run.
274 res = insert_record_into_delete_queue(ctdb_db, hdr, key);
275 if (res != 0) {
276 vdata->full_error++;
277 } else {
278 vdata->full_scheduled++;
281 return 0;
285 * traverse the tree of records to delete and marshall them into
286 * a blob
288 static int delete_marshall_traverse(void *param, void *data)
290 struct delete_record_data *dd = talloc_get_type(data, struct delete_record_data);
291 struct delete_records_list *recs = talloc_get_type(param, struct delete_records_list);
292 struct ctdb_rec_data *rec;
293 size_t old_size;
295 rec = ctdb_marshall_record(dd, recs->records->db_id, dd->key, &dd->hdr, tdb_null);
296 if (rec == NULL) {
297 DEBUG(DEBUG_ERR, (__location__ " failed to marshall record\n"));
298 return 0;
301 old_size = talloc_get_size(recs->records);
302 recs->records = talloc_realloc_size(NULL, recs->records, old_size + rec->length);
303 if (recs->records == NULL) {
304 DEBUG(DEBUG_ERR,(__location__ " Failed to expand\n"));
305 return 0;
307 recs->records->count++;
308 memcpy(old_size+(uint8_t *)(recs->records), rec, rec->length);
309 return 0;
313 * Variant of delete_marshall_traverse() that bumps the
314 * RSN of each traversed record in the database.
316 * This is needed to ensure that when rolling out our
317 * empty record copy before remote deletion, we as the
318 * record's dmaster keep a higher RSN than the non-dmaster
319 * nodes. This is needed to prevent old copies from
320 * resurrection in recoveries.
322 static int delete_marshall_traverse_first(void *param, void *data)
324 struct delete_record_data *dd = talloc_get_type(data, struct delete_record_data);
325 struct delete_records_list *recs = talloc_get_type(param, struct delete_records_list);
326 struct ctdb_db_context *ctdb_db = dd->ctdb_db;
327 struct ctdb_context *ctdb = ctdb_db->ctdb;
328 struct ctdb_ltdb_header *header;
329 TDB_DATA tdb_data, ctdb_data;
330 uint32_t lmaster;
331 uint32_t hash = ctdb_hash(&(dd->key));
332 int res;
334 res = tdb_chainlock(ctdb_db->ltdb->tdb, dd->key);
335 if (res != 0) {
336 DEBUG(DEBUG_ERR,
337 (__location__ " Error getting chainlock on record with "
338 "key hash [0x%08x] on database db[%s].\n",
339 hash, ctdb_db->db_name));
340 recs->vdata->delete_skipped++;
341 talloc_free(dd);
342 return 0;
346 * Verify that the record is still empty, its RSN has not
347 * changed and that we are still its lmaster and dmaster.
350 tdb_data = tdb_fetch(ctdb_db->ltdb->tdb, dd->key);
351 if (tdb_data.dsize < sizeof(struct ctdb_ltdb_header)) {
352 DEBUG(DEBUG_INFO, (__location__ ": record with hash [0x%08x] "
353 "on database db[%s] does not exist or is not"
354 " a ctdb-record. skipping.\n",
355 hash, ctdb_db->db_name));
356 goto skip;
359 if (tdb_data.dsize > sizeof(struct ctdb_ltdb_header)) {
360 DEBUG(DEBUG_INFO, (__location__ ": record with hash [0x%08x] "
361 "on database db[%s] has been recycled. "
362 "skipping.\n",
363 hash, ctdb_db->db_name));
364 goto skip;
367 header = (struct ctdb_ltdb_header *)tdb_data.dptr;
369 if (header->flags & CTDB_REC_RO_FLAGS) {
370 DEBUG(DEBUG_INFO, (__location__ ": record with hash [0x%08x] "
371 "on database db[%s] has read-only flags. "
372 "skipping.\n",
373 hash, ctdb_db->db_name));
374 goto skip;
377 if (header->dmaster != ctdb->pnn) {
378 DEBUG(DEBUG_INFO, (__location__ ": record with hash [0x%08x] "
379 "on database db[%s] has been migrated away. "
380 "skipping.\n",
381 hash, ctdb_db->db_name));
382 goto skip;
385 if (header->rsn != dd->hdr.rsn) {
386 DEBUG(DEBUG_INFO, (__location__ ": record with hash [0x%08x] "
387 "on database db[%s] seems to have been "
388 "migrated away and back again (with empty "
389 "data). skipping.\n",
390 hash, ctdb_db->db_name));
391 goto skip;
394 lmaster = ctdb_lmaster(ctdb_db->ctdb, &dd->key);
396 if (lmaster != ctdb->pnn) {
397 DEBUG(DEBUG_INFO, (__location__ ": not lmaster for record in "
398 "delete list (key hash [0x%08x], db[%s]). "
399 "Strange! skipping.\n",
400 hash, ctdb_db->db_name));
401 goto skip;
405 * Increment the record's RSN to ensure the dmaster (i.e. the current
406 * node) has the highest RSN of the record in the cluster.
407 * This is to prevent old record copies from resurrecting in recoveries
408 * if something should fail during the deletion process.
409 * Note that ctdb_ltdb_store_server() increments the RSN if called
410 * on the record's dmaster.
413 ctdb_data.dptr = tdb_data.dptr + sizeof(struct ctdb_ltdb_header);
414 ctdb_data.dsize = tdb_data.dsize - sizeof(struct ctdb_ltdb_header);
416 res = ctdb_ltdb_store(ctdb_db, dd->key, header, ctdb_data);
417 if (res != 0) {
418 DEBUG(DEBUG_ERR, (__location__ ": Failed to store record with "
419 "key hash [0x%08x] on database db[%s].\n",
420 hash, ctdb_db->db_name));
421 goto skip;
424 tdb_chainunlock(ctdb_db->ltdb->tdb, dd->key);
426 goto done;
428 skip:
429 tdb_chainunlock(ctdb_db->ltdb->tdb, dd->key);
431 recs->vdata->delete_skipped++;
432 talloc_free(dd);
433 dd = NULL;
435 done:
436 if (tdb_data.dptr != NULL) {
437 free(tdb_data.dptr);
440 if (dd == NULL) {
441 return 0;
444 return delete_marshall_traverse(param, data);
448 * traverse function for the traversal of the delete_queue,
449 * the fast-path vacuuming list.
451 * - If the record has been migrated off the node
452 * or has been revived (filled with data) on the node,
453 * then skip the record.
455 * - If the current node is the record's lmaster and it is
456 * a record that has never been migrated with data, then
457 * delete the record from the local tdb.
459 * - If the current node is the record's lmaster and it has
460 * been migrated with data, then schedule it for the normal
461 * vacuuming procedure (i.e. add it to the delete_list).
463 * - If the current node is NOT the record's lmaster then
464 * add it to the list of records that are to be sent to
465 * the lmaster with the VACUUM_FETCH message.
467 static int delete_queue_traverse(void *param, void *data)
469 struct delete_record_data *dd =
470 talloc_get_type(data, struct delete_record_data);
471 struct vacuum_data *vdata = talloc_get_type(param, struct vacuum_data);
472 struct ctdb_db_context *ctdb_db = dd->ctdb_db;
473 struct ctdb_context *ctdb = ctdb_db->ctdb; /* or dd->ctdb ??? */
474 int res;
475 struct ctdb_ltdb_header header;
476 uint32_t lmaster;
477 uint32_t hash = ctdb_hash(&(dd->key));
479 vdata->fast_total++;
481 res = tdb_chainlock(ctdb_db->ltdb->tdb, dd->key);
482 if (res != 0) {
483 DEBUG(DEBUG_ERR,
484 (__location__ " Error getting chainlock on record with "
485 "key hash [0x%08x] on database db[%s].\n",
486 hash, ctdb_db->db_name));
487 vdata->fast_error++;
488 return 0;
491 res = tdb_parse_record(ctdb_db->ltdb->tdb, dd->key,
492 vacuum_record_parser, &header);
493 if (res != 0) {
494 goto skipped;
497 if (header.dmaster != ctdb->pnn) {
498 /* The record has been migrated off the node. Skip. */
499 goto skipped;
502 if (header.rsn != dd->hdr.rsn) {
504 * The record has been migrated off the node and back again.
505 * But not requeued for deletion. Skip it.
507 goto skipped;
511 * We are dmaster, and the record has no data, and it has
512 * not been migrated after it has been queued for deletion.
514 * At this stage, the record could still have been revived locally
515 * and last been written with empty data. This can only be
516 * fixed with the addition of an active or delete flag. (TODO)
519 lmaster = ctdb_lmaster(ctdb_db->ctdb, &dd->key);
521 if (lmaster != ctdb->pnn) {
522 res = add_record_to_vacuum_fetch_list(vdata, dd->key);
524 if (res != 0) {
525 DEBUG(DEBUG_ERR,
526 (__location__ " Error adding record to list "
527 "of records to send to lmaster.\n"));
528 vdata->fast_error++;
529 } else {
530 vdata->fast_added_to_vacuum_fetch_list++;
532 goto done;
535 /* use header->flags or dd->hdr.flags ?? */
536 if (dd->hdr.flags & CTDB_REC_FLAG_MIGRATED_WITH_DATA) {
537 res = add_record_to_delete_list(vdata, dd->key, &dd->hdr);
539 if (res != 0) {
540 DEBUG(DEBUG_ERR,
541 (__location__ " Error adding record to list "
542 "of records for deletion on lmaster.\n"));
543 vdata->fast_error++;
544 } else {
545 vdata->fast_added_to_delete_list++;
547 } else {
548 res = tdb_delete(ctdb_db->ltdb->tdb, dd->key);
550 if (res != 0) {
551 DEBUG(DEBUG_ERR,
552 (__location__ " Error deleting record with key "
553 "hash [0x%08x] from local data base db[%s].\n",
554 hash, ctdb_db->db_name));
555 vdata->fast_error++;
556 goto done;
559 DEBUG(DEBUG_DEBUG,
560 (__location__ " Deleted record with key hash "
561 "[0x%08x] from local data base db[%s].\n",
562 hash, ctdb_db->db_name));
563 vdata->fast_deleted++;
566 goto done;
568 skipped:
569 vdata->fast_skipped++;
571 done:
572 tdb_chainunlock(ctdb_db->ltdb->tdb, dd->key);
574 return 0;
578 * Delete the records that we are lmaster and dmaster for and
579 * that could be deleted on all other nodes via the TRY_DELETE_RECORDS
580 * control.
582 static int delete_record_traverse(void *param, void *data)
584 struct delete_record_data *dd =
585 talloc_get_type(data, struct delete_record_data);
586 struct vacuum_data *vdata = talloc_get_type(param, struct vacuum_data);
587 struct ctdb_db_context *ctdb_db = dd->ctdb_db;
588 struct ctdb_context *ctdb = ctdb_db->ctdb;
589 int res;
590 struct ctdb_ltdb_header header;
591 uint32_t lmaster;
592 uint32_t hash = ctdb_hash(&(dd->key));
594 res = tdb_chainlock(ctdb_db->ltdb->tdb, dd->key);
595 if (res != 0) {
596 DEBUG(DEBUG_ERR,
597 (__location__ " Error getting chainlock on record with "
598 "key hash [0x%08x] on database db[%s].\n",
599 hash, ctdb_db->db_name));
600 vdata->delete_local_error++;
601 vdata->delete_left--;
602 talloc_free(dd);
603 return 0;
607 * Verify that the record is still empty, its RSN has not
608 * changed and that we are still its lmaster and dmaster.
611 res = tdb_parse_record(ctdb_db->ltdb->tdb, dd->key,
612 vacuum_record_parser, &header);
613 if (res != 0) {
614 goto skip;
617 if (header.flags & CTDB_REC_RO_FLAGS) {
618 DEBUG(DEBUG_INFO, (__location__ ": record with hash [0x%08x] "
619 "on database db[%s] has read-only flags. "
620 "skipping.\n",
621 hash, ctdb_db->db_name));
622 goto skip;
625 if (header.dmaster != ctdb->pnn) {
626 DEBUG(DEBUG_INFO, (__location__ ": record with hash [0x%08x] "
627 "on database db[%s] has been migrated away. "
628 "skipping.\n",
629 hash, ctdb_db->db_name));
630 goto skip;
633 if (header.rsn != dd->hdr.rsn + 1) {
635 * The record has been migrated off the node and back again.
636 * But not requeued for deletion. Skip it.
637 * (Note that the first marshall traverse has bumped the RSN
638 * on disk.)
640 DEBUG(DEBUG_INFO, (__location__ ": record with hash [0x%08x] "
641 "on database db[%s] seems to have been "
642 "migrated away and back again (with empty "
643 "data). skipping.\n",
644 hash, ctdb_db->db_name));
645 goto skip;
648 lmaster = ctdb_lmaster(ctdb_db->ctdb, &dd->key);
650 if (lmaster != ctdb->pnn) {
651 DEBUG(DEBUG_INFO, (__location__ ": not lmaster for record in "
652 "delete list (key hash [0x%08x], db[%s]). "
653 "Strange! skipping.\n",
654 hash, ctdb_db->db_name));
655 goto skip;
658 res = tdb_delete(ctdb_db->ltdb->tdb, dd->key);
660 if (res != 0) {
661 DEBUG(DEBUG_ERR,
662 (__location__ " Error deleting record with key hash "
663 "[0x%08x] from local data base db[%s].\n",
664 hash, ctdb_db->db_name));
665 vdata->delete_local_error++;
666 goto done;
669 DEBUG(DEBUG_DEBUG,
670 (__location__ " Deleted record with key hash [0x%08x] from "
671 "local data base db[%s].\n", hash, ctdb_db->db_name));
673 vdata->delete_deleted++;
674 goto done;
676 skip:
677 vdata->delete_skipped++;
679 done:
680 tdb_chainunlock(ctdb_db->ltdb->tdb, dd->key);
682 talloc_free(dd);
683 vdata->delete_left--;
685 return 0;
689 * Traverse the delete_queue.
690 * Records are either deleted directly or filled
691 * into the delete list or the vacuum fetch lists
692 * for further processing.
694 static void ctdb_process_delete_queue(struct ctdb_db_context *ctdb_db,
695 struct vacuum_data *vdata)
697 uint32_t sum;
699 trbt_traversearray32(ctdb_db->delete_queue, 1, delete_queue_traverse, vdata);
701 sum = vdata->fast_deleted
702 + vdata->fast_skipped
703 + vdata->fast_error
704 + vdata->fast_added_to_delete_list
705 + vdata->fast_added_to_vacuum_fetch_list;
707 if (vdata->fast_total != sum) {
708 DEBUG(DEBUG_ERR, (__location__ " Inconsistency in fast vacuum "
709 "counts for db[%s]: total[%u] != sum[%u]\n",
710 ctdb_db->db_name, (unsigned)vdata->fast_total,
711 (unsigned)sum));
714 if (vdata->fast_total > 0) {
715 DEBUG(DEBUG_INFO,
716 (__location__
717 " fast vacuuming delete_queue traverse statistics: "
718 "db[%s] "
719 "total[%u] "
720 "del[%u] "
721 "skp[%u] "
722 "err[%u] "
723 "adl[%u] "
724 "avf[%u]\n",
725 ctdb_db->db_name,
726 (unsigned)vdata->fast_total,
727 (unsigned)vdata->fast_deleted,
728 (unsigned)vdata->fast_skipped,
729 (unsigned)vdata->fast_error,
730 (unsigned)vdata->fast_added_to_delete_list,
731 (unsigned)vdata->fast_added_to_vacuum_fetch_list));
734 return;
738 * read-only traverse of the database, looking for records that
739 * might be able to be vacuumed.
741 * This is not done each time but only every tunable
742 * VacuumFastPathCount times.
744 static int ctdb_vacuum_traverse_db(struct ctdb_db_context *ctdb_db,
745 struct vacuum_data *vdata)
747 int ret;
749 ret = tdb_traverse_read(ctdb_db->ltdb->tdb, vacuum_traverse, vdata);
750 if (ret == -1 || vdata->traverse_error) {
751 DEBUG(DEBUG_ERR, (__location__ " Traverse error in vacuuming "
752 "'%s'\n", ctdb_db->db_name));
753 return -1;
756 if (vdata->full_total > 0) {
757 DEBUG(DEBUG_INFO,
758 (__location__
759 " full vacuuming db traverse statistics: "
760 "db[%s] "
761 "total[%u] "
762 "skp[%u] "
763 "err[%u] "
764 "sched[%u]\n",
765 ctdb_db->db_name,
766 (unsigned)vdata->full_total,
767 (unsigned)vdata->full_skipped,
768 (unsigned)vdata->full_error,
769 (unsigned)vdata->full_scheduled));
772 return 0;
776 * Process the vacuum fetch lists:
777 * For records for which we are not the lmaster, tell the lmaster to
778 * fetch the record.
780 static int ctdb_process_vacuum_fetch_lists(struct ctdb_db_context *ctdb_db,
781 struct vacuum_data *vdata)
783 int i;
784 struct ctdb_context *ctdb = ctdb_db->ctdb;
786 for (i = 0; i < ctdb->num_nodes; i++) {
787 TDB_DATA data;
788 struct ctdb_marshall_buffer *vfl = vdata->vacuum_fetch_list[i];
790 if (ctdb->nodes[i]->pnn == ctdb->pnn) {
791 continue;
794 if (vfl->count == 0) {
795 continue;
798 DEBUG(DEBUG_INFO, ("Found %u records for lmaster %u in '%s'\n",
799 vfl->count, ctdb->nodes[i]->pnn,
800 ctdb_db->db_name));
802 data.dsize = talloc_get_size(vfl);
803 data.dptr = (void *)vfl;
804 if (ctdb_client_send_message(ctdb, ctdb->nodes[i]->pnn,
805 CTDB_SRVID_VACUUM_FETCH,
806 data) != 0)
808 DEBUG(DEBUG_ERR, (__location__ " Failed to send vacuum "
809 "fetch message to %u\n",
810 ctdb->nodes[i]->pnn));
811 return -1;
815 return 0;
819 * Process the delete list:
821 * This is the last step of vacuuming that consistently deletes
822 * those records that have been migrated with data and can hence
823 * not be deleted when leaving a node.
825 * In this step, the lmaster does the final deletion of those empty
826 * records that it is also dmaster for. It has ususally received
827 * at least some of these records previously from the former dmasters
828 * with the vacuum fetch message.
830 * This last step is implemented as a 3-phase process to protect from
831 * races leading to data corruption:
833 * 1) Send the lmaster's copy to all other active nodes with the
834 * RECEIVE_RECORDS control: The remote nodes store the lmaster's copy.
835 * 2) Send the records that could successfully be stored remotely
836 * in step #1 to all active nodes with the TRY_DELETE_RECORDS
837 * control. The remote notes delete their local copy.
838 * 3) The lmaster locally deletes its copies of all records that
839 * could successfully be deleted remotely in step #2.
841 static int ctdb_process_delete_list(struct ctdb_db_context *ctdb_db,
842 struct vacuum_data *vdata)
844 int ret, i;
845 struct ctdb_context *ctdb = ctdb_db->ctdb;
846 struct delete_records_list *recs;
847 TDB_DATA indata;
848 struct ctdb_node_map *nodemap;
849 uint32_t *active_nodes;
850 int num_active_nodes;
851 TALLOC_CTX *tmp_ctx;
853 if (vdata->delete_count == 0) {
854 return 0;
857 tmp_ctx = talloc_new(vdata);
858 if (tmp_ctx == NULL) {
859 DEBUG(DEBUG_ERR,(__location__ " Out of memory\n"));
860 return 0;
863 vdata->delete_left = vdata->delete_count;
866 * get the list of currently active nodes
869 ret = ctdb_ctrl_getnodemap(ctdb, TIMELIMIT(),
870 CTDB_CURRENT_NODE,
871 tmp_ctx,
872 &nodemap);
873 if (ret != 0) {
874 DEBUG(DEBUG_ERR,(__location__ " unable to get node map\n"));
875 ret = -1;
876 goto done;
879 active_nodes = list_of_active_nodes(ctdb, nodemap,
880 nodemap, /* talloc context */
881 false /* include self */);
882 /* yuck! ;-) */
883 num_active_nodes = talloc_get_size(active_nodes)/sizeof(*active_nodes);
886 * Now delete the records all active nodes in a three-phase process:
887 * 1) send all active remote nodes the current empty copy with this
888 * node as DMASTER
889 * 2) if all nodes could store the new copy,
890 * tell all the active remote nodes to delete all their copy
891 * 3) if all remote nodes deleted their record copy, delete it locally
895 * Step 1:
896 * Send currently empty record copy to all active nodes for storing.
899 recs = talloc_zero(tmp_ctx, struct delete_records_list);
900 if (recs == NULL) {
901 DEBUG(DEBUG_ERR,(__location__ " Out of memory\n"));
902 ret = -1;
903 goto done;
905 recs->records = (struct ctdb_marshall_buffer *)
906 talloc_zero_size(recs,
907 offsetof(struct ctdb_marshall_buffer, data));
908 if (recs->records == NULL) {
909 DEBUG(DEBUG_ERR,(__location__ " Out of memory\n"));
910 ret = -1;
911 goto done;
913 recs->records->db_id = ctdb_db->db_id;
914 recs->vdata = vdata;
917 * traverse the tree of all records we want to delete and
918 * create a blob we can send to the other nodes.
920 * We call delete_marshall_traverse_first() to bump the
921 * records' RSNs in the database, to ensure we (as dmaster)
922 * keep the highest RSN of the records in the cluster.
924 trbt_traversearray32(vdata->delete_list, 1,
925 delete_marshall_traverse_first, recs);
927 indata.dsize = talloc_get_size(recs->records);
928 indata.dptr = (void *)recs->records;
930 for (i = 0; i < num_active_nodes; i++) {
931 struct ctdb_marshall_buffer *records;
932 struct ctdb_rec_data *rec;
933 int32_t res;
934 TDB_DATA outdata;
936 ret = ctdb_control(ctdb, active_nodes[i], 0,
937 CTDB_CONTROL_RECEIVE_RECORDS, 0,
938 indata, recs, &outdata, &res,
939 NULL, NULL);
940 if (ret != 0 || res != 0) {
941 DEBUG(DEBUG_ERR, ("Error storing record copies on "
942 "node %u: ret[%d] res[%d]\n",
943 active_nodes[i], ret, res));
944 ret = -1;
945 goto done;
949 * outdata contains the list of records coming back
950 * from the node: These are the records that the
951 * remote node could not store. We remove these from
952 * the list to process further.
954 records = (struct ctdb_marshall_buffer *)outdata.dptr;
955 rec = (struct ctdb_rec_data *)&records->data[0];
956 while (records->count-- > 1) {
957 TDB_DATA reckey, recdata;
958 struct ctdb_ltdb_header *rechdr;
959 struct delete_record_data *dd;
961 reckey.dptr = &rec->data[0];
962 reckey.dsize = rec->keylen;
963 recdata.dptr = &rec->data[reckey.dsize];
964 recdata.dsize = rec->datalen;
966 if (recdata.dsize < sizeof(struct ctdb_ltdb_header)) {
967 DEBUG(DEBUG_CRIT,(__location__ " bad ltdb record\n"));
968 ret = -1;
969 goto done;
971 rechdr = (struct ctdb_ltdb_header *)recdata.dptr;
972 recdata.dptr += sizeof(*rechdr);
973 recdata.dsize -= sizeof(*rechdr);
975 dd = (struct delete_record_data *)trbt_lookup32(
976 vdata->delete_list,
977 ctdb_hash(&reckey));
978 if (dd != NULL) {
980 * The other node could not store the record
981 * copy and it is the first node that failed.
982 * So we should remove it from the tree and
983 * update statistics.
985 talloc_free(dd);
986 vdata->delete_remote_error++;
987 vdata->delete_left--;
990 rec = (struct ctdb_rec_data *)(rec->length + (uint8_t *)rec);
994 if (vdata->delete_left == 0) {
995 goto success;
999 * Step 2:
1000 * Send the remaining records to all active nodes for deletion.
1002 * The lmaster's (i.e. our) copies of these records have been stored
1003 * successfully on the other nodes.
1007 * Create a marshall blob from the remaining list of records to delete.
1010 talloc_free(recs->records);
1012 recs->records = (struct ctdb_marshall_buffer *)
1013 talloc_zero_size(recs,
1014 offsetof(struct ctdb_marshall_buffer, data));
1015 if (recs->records == NULL) {
1016 DEBUG(DEBUG_ERR,(__location__ " Out of memory\n"));
1017 ret = -1;
1018 goto done;
1020 recs->records->db_id = ctdb_db->db_id;
1022 trbt_traversearray32(vdata->delete_list, 1,
1023 delete_marshall_traverse, recs);
1025 indata.dsize = talloc_get_size(recs->records);
1026 indata.dptr = (void *)recs->records;
1028 for (i = 0; i < num_active_nodes; i++) {
1029 struct ctdb_marshall_buffer *records;
1030 struct ctdb_rec_data *rec;
1031 int32_t res;
1032 TDB_DATA outdata;
1034 ret = ctdb_control(ctdb, active_nodes[i], 0,
1035 CTDB_CONTROL_TRY_DELETE_RECORDS, 0,
1036 indata, recs, &outdata, &res,
1037 NULL, NULL);
1038 if (ret != 0 || res != 0) {
1039 DEBUG(DEBUG_ERR, ("Failed to delete records on "
1040 "node %u: ret[%d] res[%d]\n",
1041 active_nodes[i], ret, res));
1042 ret = -1;
1043 goto done;
1047 * outdata contains the list of records coming back
1048 * from the node: These are the records that the
1049 * remote node could not delete. We remove these from
1050 * the list to delete locally.
1052 records = (struct ctdb_marshall_buffer *)outdata.dptr;
1053 rec = (struct ctdb_rec_data *)&records->data[0];
1054 while (records->count-- > 1) {
1055 TDB_DATA reckey, recdata;
1056 struct ctdb_ltdb_header *rechdr;
1057 struct delete_record_data *dd;
1059 reckey.dptr = &rec->data[0];
1060 reckey.dsize = rec->keylen;
1061 recdata.dptr = &rec->data[reckey.dsize];
1062 recdata.dsize = rec->datalen;
1064 if (recdata.dsize < sizeof(struct ctdb_ltdb_header)) {
1065 DEBUG(DEBUG_CRIT,(__location__ " bad ltdb record\n"));
1066 ret = -1;
1067 goto done;
1069 rechdr = (struct ctdb_ltdb_header *)recdata.dptr;
1070 recdata.dptr += sizeof(*rechdr);
1071 recdata.dsize -= sizeof(*rechdr);
1073 dd = (struct delete_record_data *)trbt_lookup32(
1074 vdata->delete_list,
1075 ctdb_hash(&reckey));
1076 if (dd != NULL) {
1078 * The other node could not delete the
1079 * record and it is the first node that
1080 * failed. So we should remove it from
1081 * the tree and update statistics.
1083 talloc_free(dd);
1084 vdata->delete_remote_error++;
1085 vdata->delete_left--;
1088 rec = (struct ctdb_rec_data *)(rec->length + (uint8_t *)rec);
1092 if (vdata->delete_left == 0) {
1093 goto success;
1097 * Step 3:
1098 * Delete the remaining records locally.
1100 * These records have successfully been deleted on all
1101 * active remote nodes.
1104 trbt_traversearray32(vdata->delete_list, 1,
1105 delete_record_traverse, vdata);
1107 success:
1109 if (vdata->delete_count > 0) {
1110 DEBUG(DEBUG_INFO,
1111 (__location__
1112 " vacuum delete list statistics: "
1113 "db[%s] "
1114 "total[%u] "
1115 "del[%u] "
1116 "skip[%u] "
1117 "rem.err[%u] "
1118 "loc.err[%u] "
1119 "left[%u]\n",
1120 ctdb_db->db_name,
1121 (unsigned)vdata->delete_count,
1122 (unsigned)vdata->delete_deleted,
1123 (unsigned)vdata->delete_skipped,
1124 (unsigned)vdata->delete_remote_error,
1125 (unsigned)vdata->delete_local_error,
1126 (unsigned)vdata->delete_left));
1129 ret = 0;
1131 done:
1132 talloc_free(tmp_ctx);
1134 return ret;
1138 * initialize the vacuum_data
1140 static int ctdb_vacuum_init_vacuum_data(struct ctdb_db_context *ctdb_db,
1141 struct vacuum_data *vdata)
1143 int i;
1144 struct ctdb_context *ctdb = ctdb_db->ctdb;
1146 vdata->fast_added_to_delete_list = 0;
1147 vdata->fast_added_to_vacuum_fetch_list = 0;
1148 vdata->fast_deleted = 0;
1149 vdata->fast_skipped = 0;
1150 vdata->fast_error = 0;
1151 vdata->fast_total = 0;
1152 vdata->full_scheduled = 0;
1153 vdata->full_skipped = 0;
1154 vdata->full_error = 0;
1155 vdata->full_total = 0;
1156 vdata->delete_count = 0;
1157 vdata->delete_left = 0;
1158 vdata->delete_remote_error = 0;
1159 vdata->delete_local_error = 0;
1160 vdata->delete_skipped = 0;
1161 vdata->delete_deleted = 0;
1163 /* the list needs to be of length num_nodes */
1164 vdata->vacuum_fetch_list = talloc_zero_array(vdata,
1165 struct ctdb_marshall_buffer *,
1166 ctdb->num_nodes);
1167 if (vdata->vacuum_fetch_list == NULL) {
1168 DEBUG(DEBUG_ERR,(__location__ " Out of memory\n"));
1169 return -1;
1171 for (i = 0; i < ctdb->num_nodes; i++) {
1172 vdata->vacuum_fetch_list[i] = (struct ctdb_marshall_buffer *)
1173 talloc_zero_size(vdata->vacuum_fetch_list,
1174 offsetof(struct ctdb_marshall_buffer, data));
1175 if (vdata->vacuum_fetch_list[i] == NULL) {
1176 DEBUG(DEBUG_ERR,(__location__ " Out of memory\n"));
1177 return -1;
1179 vdata->vacuum_fetch_list[i]->db_id = ctdb_db->db_id;
1182 return 0;
1186 * Vacuum a DB:
1187 * - Always do the fast vacuuming run, which traverses
1188 * the in-memory delete queue: these records have been
1189 * scheduled for deletion.
1190 * - Only if explicitly requested, the database is traversed
1191 * in order to use the traditional heuristics on empty records
1192 * to trigger deletion.
1193 * This is done only every VacuumFastPathCount'th vacuuming run.
1195 * The traverse runs fill two lists:
1197 * - The delete_list:
1198 * This is the list of empty records the current
1199 * node is lmaster and dmaster for. These records are later
1200 * deleted first on other nodes and then locally.
1202 * The fast vacuuming run has a short cut for those records
1203 * that have never been migrated with data: these records
1204 * are immediately deleted locally, since they have left
1205 * no trace on other nodes.
1207 * - The vacuum_fetch lists
1208 * (one for each other lmaster node):
1209 * The records in this list are sent for deletion to
1210 * their lmaster in a bulk VACUUM_FETCH message.
1212 * The lmaster then migrates all these records to itelf
1213 * so that they can be vacuumed there.
1215 * This executes in the child context.
1217 static int ctdb_vacuum_db(struct ctdb_db_context *ctdb_db,
1218 struct vacuum_data *vdata,
1219 bool full_vacuum_run)
1221 struct ctdb_context *ctdb = ctdb_db->ctdb;
1222 int ret, pnn;
1224 DEBUG(DEBUG_INFO, (__location__ " Entering %s vacuum run for db "
1225 "%s db_id[0x%08x]\n",
1226 full_vacuum_run ? "full" : "fast",
1227 ctdb_db->db_name, ctdb_db->db_id));
1229 ret = ctdb_ctrl_getvnnmap(ctdb, TIMELIMIT(), CTDB_CURRENT_NODE, ctdb, &ctdb->vnn_map);
1230 if (ret != 0) {
1231 DEBUG(DEBUG_ERR, ("Unable to get vnnmap from local node\n"));
1232 return ret;
1235 pnn = ctdb_ctrl_getpnn(ctdb, TIMELIMIT(), CTDB_CURRENT_NODE);
1236 if (pnn == -1) {
1237 DEBUG(DEBUG_ERR, ("Unable to get pnn from local node\n"));
1238 return -1;
1241 ctdb->pnn = pnn;
1243 ret = ctdb_vacuum_init_vacuum_data(ctdb_db, vdata);
1244 if (ret != 0) {
1245 return ret;
1248 if (full_vacuum_run) {
1249 ret = ctdb_vacuum_traverse_db(ctdb_db, vdata);
1250 if (ret != 0) {
1251 return ret;
1255 ctdb_process_delete_queue(ctdb_db, vdata);
1257 ret = ctdb_process_vacuum_fetch_lists(ctdb_db, vdata);
1258 if (ret != 0) {
1259 return ret;
1262 ret = ctdb_process_delete_list(ctdb_db, vdata);
1263 if (ret != 0) {
1264 return ret;
1267 /* this ensures we run our event queue */
1268 ctdb_ctrl_getpnn(ctdb, TIMELIMIT(), CTDB_CURRENT_NODE);
1270 return 0;
1275 * traverse function for repacking
1277 static int repack_traverse(struct tdb_context *tdb, TDB_DATA key, TDB_DATA data,
1278 void *private_data)
1280 struct vacuum_data *vdata = (struct vacuum_data *)private_data;
1282 if (vdata->vacuum) {
1283 uint32_t hash = ctdb_hash(&key);
1284 struct delete_record_data *kd;
1286 * check if we can ignore this record because it's in the delete_list
1288 kd = (struct delete_record_data *)trbt_lookup32(vdata->delete_list, hash);
1290 * there might be hash collisions so we have to compare the keys here to be sure
1292 if (kd && kd->key.dsize == key.dsize && memcmp(kd->key.dptr, key.dptr, key.dsize) == 0) {
1293 struct ctdb_ltdb_header *hdr = (struct ctdb_ltdb_header *)data.dptr;
1295 * we have to check if the record hasn't changed in the meantime in order to
1296 * savely remove it from the database
1298 if (data.dsize == sizeof(struct ctdb_ltdb_header) &&
1299 hdr->dmaster == kd->ctdb->pnn &&
1300 ctdb_lmaster(kd->ctdb, &(kd->key)) == kd->ctdb->pnn &&
1301 kd->hdr.rsn == hdr->rsn) {
1302 vdata->vacuumed++;
1303 return 0;
1307 if (tdb_store(vdata->dest_db, key, data, TDB_INSERT) != 0) {
1308 vdata->traverse_error = true;
1309 return -1;
1311 vdata->copied++;
1312 return 0;
1316 * repack a tdb
1318 static int ctdb_repack_tdb(struct tdb_context *tdb, TALLOC_CTX *mem_ctx, struct vacuum_data *vdata)
1320 struct tdb_context *tmp_db;
1322 if (tdb_transaction_start(tdb) != 0) {
1323 DEBUG(DEBUG_ERR,(__location__ " Failed to start transaction\n"));
1324 return -1;
1327 tmp_db = tdb_open("tmpdb", tdb_hash_size(tdb),
1328 TDB_INTERNAL|TDB_DISALLOW_NESTING,
1329 O_RDWR|O_CREAT, 0);
1330 if (tmp_db == NULL) {
1331 DEBUG(DEBUG_ERR,(__location__ " Failed to create tmp_db\n"));
1332 tdb_transaction_cancel(tdb);
1333 return -1;
1336 vdata->traverse_error = false;
1337 vdata->dest_db = tmp_db;
1338 vdata->vacuum = true;
1339 vdata->vacuumed = 0;
1340 vdata->copied = 0;
1343 * repack and vacuum on-the-fly by not writing the records that are
1344 * no longer needed
1346 if (tdb_traverse_read(tdb, repack_traverse, vdata) == -1) {
1347 DEBUG(DEBUG_ERR,(__location__ " Failed to traverse copying out\n"));
1348 tdb_transaction_cancel(tdb);
1349 tdb_close(tmp_db);
1350 return -1;
1353 DEBUG(DEBUG_INFO,(__location__ " %u records vacuumed\n", vdata->vacuumed));
1355 if (vdata->traverse_error) {
1356 DEBUG(DEBUG_ERR,(__location__ " Error during traversal\n"));
1357 tdb_transaction_cancel(tdb);
1358 tdb_close(tmp_db);
1359 return -1;
1362 if (tdb_wipe_all(tdb) != 0) {
1363 DEBUG(DEBUG_ERR,(__location__ " Failed to wipe database\n"));
1364 tdb_transaction_cancel(tdb);
1365 tdb_close(tmp_db);
1366 return -1;
1369 vdata->traverse_error = false;
1370 vdata->dest_db = tdb;
1371 vdata->vacuum = false;
1372 vdata->copied = 0;
1374 if (tdb_traverse_read(tmp_db, repack_traverse, vdata) == -1) {
1375 DEBUG(DEBUG_ERR,(__location__ " Failed to traverse copying back\n"));
1376 tdb_transaction_cancel(tdb);
1377 tdb_close(tmp_db);
1378 return -1;
1381 if (vdata->traverse_error) {
1382 DEBUG(DEBUG_ERR,(__location__ " Error during second traversal\n"));
1383 tdb_transaction_cancel(tdb);
1384 tdb_close(tmp_db);
1385 return -1;
1388 tdb_close(tmp_db);
1391 if (tdb_transaction_commit(tdb) != 0) {
1392 DEBUG(DEBUG_ERR,(__location__ " Failed to commit\n"));
1393 return -1;
1395 DEBUG(DEBUG_INFO,(__location__ " %u records copied\n", vdata->copied));
1397 return 0;
1401 * repack and vaccum a db
1402 * called from the child context
1404 static int ctdb_vacuum_and_repack_db(struct ctdb_db_context *ctdb_db,
1405 TALLOC_CTX *mem_ctx,
1406 bool full_vacuum_run)
1408 uint32_t repack_limit = ctdb_db->ctdb->tunable.repack_limit;
1409 uint32_t vacuum_limit = ctdb_db->ctdb->tunable.vacuum_limit;
1410 const char *name = ctdb_db->db_name;
1411 int freelist_size = 0;
1412 struct vacuum_data *vdata;
1414 vdata = talloc_zero(mem_ctx, struct vacuum_data);
1415 if (vdata == NULL) {
1416 DEBUG(DEBUG_ERR,(__location__ " Out of memory\n"));
1417 return -1;
1420 vdata->ctdb = ctdb_db->ctdb;
1421 vdata->vacuum_limit = vacuum_limit;
1422 vdata->repack_limit = repack_limit;
1423 vdata->delete_list = trbt_create(vdata, 0);
1424 vdata->ctdb_db = ctdb_db;
1425 if (vdata->delete_list == NULL) {
1426 DEBUG(DEBUG_ERR,(__location__ " Out of memory\n"));
1427 talloc_free(vdata);
1428 return -1;
1431 vdata->start = timeval_current();
1434 * gather all records that can be deleted in vdata
1436 if (ctdb_vacuum_db(ctdb_db, vdata, full_vacuum_run) != 0) {
1437 DEBUG(DEBUG_ERR,(__location__ " Failed to vacuum '%s'\n", name));
1440 if (repack_limit != 0) {
1441 freelist_size = tdb_freelist_size(ctdb_db->ltdb->tdb);
1442 if (freelist_size == -1) {
1443 DEBUG(DEBUG_ERR,(__location__ " Failed to get freelist size for '%s'\n", name));
1444 talloc_free(vdata);
1445 return -1;
1450 * decide if a repack is necessary
1452 if ((repack_limit == 0 || (uint32_t)freelist_size < repack_limit) &&
1453 vdata->delete_left < vacuum_limit)
1455 talloc_free(vdata);
1456 return 0;
1459 DEBUG(DEBUG_INFO,("Repacking %s with %u freelist entries and %u records to delete\n",
1460 name, freelist_size, vdata->delete_left));
1463 * repack and implicitely get rid of the records we can delete
1465 if (ctdb_repack_tdb(ctdb_db->ltdb->tdb, mem_ctx, vdata) != 0) {
1466 DEBUG(DEBUG_ERR,(__location__ " Failed to repack '%s'\n", name));
1467 talloc_free(vdata);
1468 return -1;
1470 talloc_free(vdata);
1472 return 0;
1475 static uint32_t get_vacuum_interval(struct ctdb_db_context *ctdb_db)
1477 uint32_t interval = ctdb_db->ctdb->tunable.vacuum_interval;
1479 return interval;
1482 static int vacuum_child_destructor(struct ctdb_vacuum_child_context *child_ctx)
1484 double l = timeval_elapsed(&child_ctx->start_time);
1485 struct ctdb_db_context *ctdb_db = child_ctx->vacuum_handle->ctdb_db;
1486 struct ctdb_context *ctdb = ctdb_db->ctdb;
1488 DEBUG(DEBUG_INFO,("Vacuuming took %.3f seconds for database %s\n", l, ctdb_db->db_name));
1490 if (child_ctx->child_pid != -1) {
1491 ctdb_kill(ctdb, child_ctx->child_pid, SIGKILL);
1492 } else {
1493 /* Bump the number of successful fast-path runs. */
1494 child_ctx->vacuum_handle->fast_path_count++;
1497 DLIST_REMOVE(ctdb->vacuumers, child_ctx);
1499 event_add_timed(ctdb->ev, child_ctx->vacuum_handle,
1500 timeval_current_ofs(get_vacuum_interval(ctdb_db), 0),
1501 ctdb_vacuum_event, child_ctx->vacuum_handle);
1503 return 0;
1507 * this event is generated when a vacuum child process times out
1509 static void vacuum_child_timeout(struct event_context *ev, struct timed_event *te,
1510 struct timeval t, void *private_data)
1512 struct ctdb_vacuum_child_context *child_ctx = talloc_get_type(private_data, struct ctdb_vacuum_child_context);
1514 DEBUG(DEBUG_ERR,("Vacuuming child process timed out for db %s\n", child_ctx->vacuum_handle->ctdb_db->db_name));
1516 child_ctx->status = VACUUM_TIMEOUT;
1518 talloc_free(child_ctx);
1523 * this event is generated when a vacuum child process has completed
1525 static void vacuum_child_handler(struct event_context *ev, struct fd_event *fde,
1526 uint16_t flags, void *private_data)
1528 struct ctdb_vacuum_child_context *child_ctx = talloc_get_type(private_data, struct ctdb_vacuum_child_context);
1529 char c = 0;
1530 int ret;
1532 DEBUG(DEBUG_INFO,("Vacuuming child process %d finished for db %s\n", child_ctx->child_pid, child_ctx->vacuum_handle->ctdb_db->db_name));
1533 child_ctx->child_pid = -1;
1535 ret = read(child_ctx->fd[0], &c, 1);
1536 if (ret != 1 || c != 0) {
1537 child_ctx->status = VACUUM_ERROR;
1538 DEBUG(DEBUG_ERR, ("A vacuum child process failed with an error for database %s. ret=%d c=%d\n", child_ctx->vacuum_handle->ctdb_db->db_name, ret, c));
1539 } else {
1540 child_ctx->status = VACUUM_OK;
1543 talloc_free(child_ctx);
1547 * this event is called every time we need to start a new vacuum process
1549 static void
1550 ctdb_vacuum_event(struct event_context *ev, struct timed_event *te,
1551 struct timeval t, void *private_data)
1553 struct ctdb_vacuum_handle *vacuum_handle = talloc_get_type(private_data, struct ctdb_vacuum_handle);
1554 struct ctdb_db_context *ctdb_db = vacuum_handle->ctdb_db;
1555 struct ctdb_context *ctdb = ctdb_db->ctdb;
1556 struct ctdb_vacuum_child_context *child_ctx;
1557 struct tevent_fd *fde;
1558 int ret;
1560 /* we dont vacuum if we are in recovery mode, or db frozen */
1561 if (ctdb->recovery_mode == CTDB_RECOVERY_ACTIVE ||
1562 ctdb->freeze_mode[ctdb_db->priority] != CTDB_FREEZE_NONE) {
1563 DEBUG(DEBUG_INFO, ("Not vacuuming %s (%s)\n", ctdb_db->db_name,
1564 ctdb->recovery_mode == CTDB_RECOVERY_ACTIVE ? "in recovery"
1565 : ctdb->freeze_mode[ctdb_db->priority] == CTDB_FREEZE_PENDING
1566 ? "freeze pending"
1567 : "frozen"));
1568 event_add_timed(ctdb->ev, vacuum_handle,
1569 timeval_current_ofs(get_vacuum_interval(ctdb_db), 0),
1570 ctdb_vacuum_event, vacuum_handle);
1571 return;
1574 child_ctx = talloc(vacuum_handle, struct ctdb_vacuum_child_context);
1575 if (child_ctx == NULL) {
1576 DEBUG(DEBUG_CRIT, (__location__ " Failed to allocate child context for vacuuming of %s\n", ctdb_db->db_name));
1577 ctdb_fatal(ctdb, "Out of memory when crating vacuum child context. Shutting down\n");
1581 ret = pipe(child_ctx->fd);
1582 if (ret != 0) {
1583 talloc_free(child_ctx);
1584 DEBUG(DEBUG_ERR, ("Failed to create pipe for vacuum child process.\n"));
1585 event_add_timed(ctdb->ev, vacuum_handle,
1586 timeval_current_ofs(get_vacuum_interval(ctdb_db), 0),
1587 ctdb_vacuum_event, vacuum_handle);
1588 return;
1591 if (vacuum_handle->fast_path_count > ctdb->tunable.vacuum_fast_path_count) {
1592 vacuum_handle->fast_path_count = 0;
1595 child_ctx->child_pid = ctdb_fork(ctdb);
1596 if (child_ctx->child_pid == (pid_t)-1) {
1597 close(child_ctx->fd[0]);
1598 close(child_ctx->fd[1]);
1599 talloc_free(child_ctx);
1600 DEBUG(DEBUG_ERR, ("Failed to fork vacuum child process.\n"));
1601 event_add_timed(ctdb->ev, vacuum_handle,
1602 timeval_current_ofs(get_vacuum_interval(ctdb_db), 0),
1603 ctdb_vacuum_event, vacuum_handle);
1604 return;
1608 if (child_ctx->child_pid == 0) {
1609 char cc = 0;
1610 bool full_vacuum_run = false;
1611 close(child_ctx->fd[0]);
1613 DEBUG(DEBUG_INFO,("Vacuuming child process %d for db %s started\n", getpid(), ctdb_db->db_name));
1614 ctdb_set_process_name("ctdb_vacuum");
1615 if (switch_from_server_to_client(ctdb, "vacuum-%s", ctdb_db->db_name) != 0) {
1616 DEBUG(DEBUG_CRIT, (__location__ "ERROR: failed to switch vacuum daemon into client mode. Shutting down.\n"));
1617 _exit(1);
1621 * repack the db
1623 if ((ctdb->tunable.vacuum_fast_path_count > 0) &&
1624 (vacuum_handle->fast_path_count == 0))
1626 full_vacuum_run = true;
1628 cc = ctdb_vacuum_and_repack_db(ctdb_db, child_ctx,
1629 full_vacuum_run);
1631 write(child_ctx->fd[1], &cc, 1);
1632 _exit(0);
1635 set_close_on_exec(child_ctx->fd[0]);
1636 close(child_ctx->fd[1]);
1638 child_ctx->status = VACUUM_RUNNING;
1639 child_ctx->start_time = timeval_current();
1641 DLIST_ADD(ctdb->vacuumers, child_ctx);
1642 talloc_set_destructor(child_ctx, vacuum_child_destructor);
1645 * Clear the fastpath vacuuming list in the parent.
1647 talloc_free(ctdb_db->delete_queue);
1648 ctdb_db->delete_queue = trbt_create(ctdb_db, 0);
1649 if (ctdb_db->delete_queue == NULL) {
1650 /* fatal here? ... */
1651 ctdb_fatal(ctdb, "Out of memory when re-creating vacuum tree "
1652 "in parent context. Shutting down\n");
1655 event_add_timed(ctdb->ev, child_ctx,
1656 timeval_current_ofs(ctdb->tunable.vacuum_max_run_time, 0),
1657 vacuum_child_timeout, child_ctx);
1659 DEBUG(DEBUG_DEBUG, (__location__ " Created PIPE FD:%d to child vacuum process\n", child_ctx->fd[0]));
1661 fde = event_add_fd(ctdb->ev, child_ctx, child_ctx->fd[0],
1662 EVENT_FD_READ, vacuum_child_handler, child_ctx);
1663 tevent_fd_set_auto_close(fde);
1665 vacuum_handle->child_ctx = child_ctx;
1666 child_ctx->vacuum_handle = vacuum_handle;
1669 void ctdb_stop_vacuuming(struct ctdb_context *ctdb)
1671 /* Simply free them all. */
1672 while (ctdb->vacuumers) {
1673 DEBUG(DEBUG_INFO, ("Aborting vacuuming for %s (%i)\n",
1674 ctdb->vacuumers->vacuum_handle->ctdb_db->db_name,
1675 (int)ctdb->vacuumers->child_pid));
1676 /* vacuum_child_destructor kills it, removes from list */
1677 talloc_free(ctdb->vacuumers);
1681 /* this function initializes the vacuuming context for a database
1682 * starts the vacuuming events
1684 int ctdb_vacuum_init(struct ctdb_db_context *ctdb_db)
1686 if (ctdb_db->persistent != 0) {
1687 DEBUG(DEBUG_ERR,("Vacuuming is disabled for persistent database %s\n", ctdb_db->db_name));
1688 return 0;
1691 ctdb_db->vacuum_handle = talloc(ctdb_db, struct ctdb_vacuum_handle);
1692 CTDB_NO_MEMORY(ctdb_db->ctdb, ctdb_db->vacuum_handle);
1694 ctdb_db->vacuum_handle->ctdb_db = ctdb_db;
1695 ctdb_db->vacuum_handle->fast_path_count = 0;
1697 event_add_timed(ctdb_db->ctdb->ev, ctdb_db->vacuum_handle,
1698 timeval_current_ofs(get_vacuum_interval(ctdb_db), 0),
1699 ctdb_vacuum_event, ctdb_db->vacuum_handle);
1701 return 0;
1704 static void remove_record_from_delete_queue(struct ctdb_db_context *ctdb_db,
1705 const struct ctdb_ltdb_header *hdr,
1706 const TDB_DATA key)
1708 struct delete_record_data *kd;
1709 uint32_t hash;
1711 hash = (uint32_t)ctdb_hash(&key);
1713 DEBUG(DEBUG_DEBUG, (__location__
1714 " remove_record_from_delete_queue: "
1715 "db[%s] "
1716 "db_id[0x%08x] "
1717 "key_hash[0x%08x] "
1718 "lmaster[%u] "
1719 "migrated_with_data[%s]\n",
1720 ctdb_db->db_name, ctdb_db->db_id,
1721 hash,
1722 ctdb_lmaster(ctdb_db->ctdb, &key),
1723 hdr->flags & CTDB_REC_FLAG_MIGRATED_WITH_DATA ? "yes" : "no"));
1725 kd = (struct delete_record_data *)trbt_lookup32(ctdb_db->delete_queue, hash);
1726 if (kd == NULL) {
1727 DEBUG(DEBUG_DEBUG, (__location__
1728 " remove_record_from_delete_queue: "
1729 "record not in queue (hash[0x%08x])\n.",
1730 hash));
1731 return;
1734 if ((kd->key.dsize != key.dsize) ||
1735 (memcmp(kd->key.dptr, key.dptr, key.dsize) != 0))
1737 DEBUG(DEBUG_DEBUG, (__location__
1738 " remove_record_from_delete_queue: "
1739 "hash collision for key with hash[0x%08x] "
1740 "in db[%s] - skipping\n",
1741 hash, ctdb_db->db_name));
1742 return;
1745 DEBUG(DEBUG_DEBUG, (__location__
1746 " remove_record_from_delete_queue: "
1747 "removing key with hash[0x%08x]\n",
1748 hash));
1750 talloc_free(kd);
1752 return;
1756 * Insert a record into the ctdb_db context's delete queue,
1757 * handling hash collisions.
1759 static int insert_record_into_delete_queue(struct ctdb_db_context *ctdb_db,
1760 const struct ctdb_ltdb_header *hdr,
1761 TDB_DATA key)
1763 struct delete_record_data *kd;
1764 uint32_t hash;
1765 int ret;
1767 hash = (uint32_t)ctdb_hash(&key);
1769 DEBUG(DEBUG_INFO, (__location__ " schedule for deletion: db[%s] "
1770 "db_id[0x%08x] "
1771 "key_hash[0x%08x] "
1772 "lmaster[%u] "
1773 "migrated_with_data[%s]\n",
1774 ctdb_db->db_name, ctdb_db->db_id,
1775 hash,
1776 ctdb_lmaster(ctdb_db->ctdb, &key),
1777 hdr->flags & CTDB_REC_FLAG_MIGRATED_WITH_DATA ? "yes" : "no"));
1779 kd = (struct delete_record_data *)trbt_lookup32(ctdb_db->delete_queue, hash);
1780 if (kd != NULL) {
1781 if ((kd->key.dsize != key.dsize) ||
1782 (memcmp(kd->key.dptr, key.dptr, key.dsize) != 0))
1784 DEBUG(DEBUG_INFO,
1785 (__location__ " schedule for deletion: "
1786 "hash collision for key hash [0x%08x]. "
1787 "Skipping the record.\n", hash));
1788 return 0;
1789 } else {
1790 DEBUG(DEBUG_DEBUG,
1791 (__location__ " schedule for deletion: "
1792 "updating entry for key with hash [0x%08x].\n",
1793 hash));
1797 ret = insert_delete_record_data_into_tree(ctdb_db->ctdb, ctdb_db,
1798 ctdb_db->delete_queue,
1799 hdr, key);
1800 if (ret != 0) {
1801 DEBUG(DEBUG_INFO,
1802 (__location__ " schedule for deletion: error "
1803 "inserting key with hash [0x%08x] into delete queue\n",
1804 hash));
1805 return -1;
1808 return 0;
1812 * Schedule a record for deletetion.
1813 * Called from the parent context.
1815 int32_t ctdb_control_schedule_for_deletion(struct ctdb_context *ctdb,
1816 TDB_DATA indata)
1818 struct ctdb_control_schedule_for_deletion *dd;
1819 struct ctdb_db_context *ctdb_db;
1820 int ret;
1821 TDB_DATA key;
1823 dd = (struct ctdb_control_schedule_for_deletion *)indata.dptr;
1825 ctdb_db = find_ctdb_db(ctdb, dd->db_id);
1826 if (ctdb_db == NULL) {
1827 DEBUG(DEBUG_ERR, (__location__ " Unknown db id 0x%08x\n",
1828 dd->db_id));
1829 return -1;
1832 key.dsize = dd->keylen;
1833 key.dptr = dd->key;
1835 ret = insert_record_into_delete_queue(ctdb_db, &dd->hdr, key);
1837 return ret;
1840 int32_t ctdb_local_schedule_for_deletion(struct ctdb_db_context *ctdb_db,
1841 const struct ctdb_ltdb_header *hdr,
1842 TDB_DATA key)
1844 int ret;
1845 struct ctdb_control_schedule_for_deletion *dd;
1846 TDB_DATA indata;
1847 int32_t status;
1849 if (ctdb_db->ctdb->ctdbd_pid == getpid()) {
1850 /* main daemon - directly queue */
1851 ret = insert_record_into_delete_queue(ctdb_db, hdr, key);
1853 return ret;
1856 /* if we dont have a connection to the daemon we can not send
1857 a control. For example sometimes from update_record control child
1858 process.
1860 if (!ctdb_db->ctdb->can_send_controls) {
1861 return -1;
1865 /* child process: send the main daemon a control */
1866 indata.dsize = offsetof(struct ctdb_control_schedule_for_deletion, key) + key.dsize;
1867 indata.dptr = talloc_zero_array(ctdb_db, uint8_t, indata.dsize);
1868 if (indata.dptr == NULL) {
1869 DEBUG(DEBUG_ERR, (__location__ " out of memory\n"));
1870 return -1;
1872 dd = (struct ctdb_control_schedule_for_deletion *)(void *)indata.dptr;
1873 dd->db_id = ctdb_db->db_id;
1874 dd->hdr = *hdr;
1875 dd->keylen = key.dsize;
1876 memcpy(dd->key, key.dptr, key.dsize);
1878 ret = ctdb_control(ctdb_db->ctdb,
1879 CTDB_CURRENT_NODE,
1880 ctdb_db->db_id,
1881 CTDB_CONTROL_SCHEDULE_FOR_DELETION,
1882 CTDB_CTRL_FLAG_NOREPLY, /* flags */
1883 indata,
1884 NULL, /* mem_ctx */
1885 NULL, /* outdata */
1886 &status,
1887 NULL, /* timeout : NULL == wait forever */
1888 NULL); /* error message */
1890 talloc_free(indata.dptr);
1892 if (ret != 0 || status != 0) {
1893 DEBUG(DEBUG_ERR, (__location__ " Error sending "
1894 "SCHEDULE_FOR_DELETION "
1895 "control.\n"));
1896 if (status != 0) {
1897 ret = -1;
1901 return ret;
1904 void ctdb_local_remove_from_delete_queue(struct ctdb_db_context *ctdb_db,
1905 const struct ctdb_ltdb_header *hdr,
1906 const TDB_DATA key)
1908 if (ctdb_db->ctdb->ctdbd_pid != getpid()) {
1910 * Only remove the record from the delete queue if called
1911 * in the main daemon.
1913 return;
1916 remove_record_from_delete_queue(ctdb_db, hdr, key);
1918 return;