ctdb-vacuum: simplify delete_record_traverse() - free treats NULL
[Samba.git] / ctdb / server / ctdb_vacuum.c
blob41fd5a3af63ca8900019f31b30e3d191f5cd98c5
1 /*
2 ctdb vacuuming events
4 Copyright (C) Ronnie Sahlberg 2009
5 Copyright (C) Michael Adam 2010-2013
6 Copyright (C) Stefan Metzmacher 2010-2011
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program; if not, see <http://www.gnu.org/licenses/>.
22 #include "includes.h"
23 #include "tdb.h"
24 #include "system/network.h"
25 #include "system/filesys.h"
26 #include "system/dir.h"
27 #include "../include/ctdb_private.h"
28 #include "db_wrap.h"
29 #include "lib/util/dlinklist.h"
30 #include "../include/ctdb_private.h"
31 #include "../common/rb_tree.h"
33 #define TIMELIMIT() timeval_current_ofs(10, 0)
35 enum vacuum_child_status { VACUUM_RUNNING, VACUUM_OK, VACUUM_ERROR, VACUUM_TIMEOUT};
37 struct ctdb_vacuum_child_context {
38 struct ctdb_vacuum_child_context *next, *prev;
39 struct ctdb_vacuum_handle *vacuum_handle;
40 /* fd child writes status to */
41 int fd[2];
42 pid_t child_pid;
43 enum vacuum_child_status status;
44 struct timeval start_time;
47 struct ctdb_vacuum_handle {
48 struct ctdb_db_context *ctdb_db;
49 struct ctdb_vacuum_child_context *child_ctx;
50 uint32_t fast_path_count;
54 /* a list of records to possibly delete */
55 struct vacuum_data {
56 uint32_t vacuum_limit;
57 uint32_t repack_limit;
58 struct ctdb_context *ctdb;
59 struct ctdb_db_context *ctdb_db;
60 struct tdb_context *dest_db;
61 trbt_tree_t *delete_list;
62 uint32_t delete_count;
63 struct ctdb_marshall_buffer **vacuum_fetch_list;
64 struct timeval start;
65 bool traverse_error;
66 bool vacuum;
67 uint32_t total;
68 uint32_t vacuumed;
69 uint32_t copied;
70 uint32_t fast_added_to_vacuum_fetch_list;
71 uint32_t fast_added_to_delete_list;
72 uint32_t fast_deleted;
73 uint32_t fast_skipped;
74 uint32_t fast_error;
75 uint32_t fast_total;
76 uint32_t full_added_to_vacuum_fetch_list;
77 uint32_t full_added_to_delete_list;
78 uint32_t full_skipped;
79 uint32_t full_error;
80 uint32_t full_total;
81 uint32_t delete_left;
82 uint32_t delete_remote_error;
83 uint32_t delete_local_error;
84 uint32_t delete_deleted;
85 uint32_t delete_skipped;
88 /* this structure contains the information for one record to be deleted */
89 struct delete_record_data {
90 struct ctdb_context *ctdb;
91 struct ctdb_db_context *ctdb_db;
92 struct ctdb_ltdb_header hdr;
93 TDB_DATA key;
94 uint8_t keydata[1];
97 struct delete_records_list {
98 struct ctdb_marshall_buffer *records;
99 struct vacuum_data *vdata;
103 * Store key and header in a tree, indexed by the key hash.
105 static int insert_delete_record_data_into_tree(struct ctdb_context *ctdb,
106 struct ctdb_db_context *ctdb_db,
107 trbt_tree_t *tree,
108 const struct ctdb_ltdb_header *hdr,
109 TDB_DATA key)
111 struct delete_record_data *dd;
112 uint32_t hash;
113 size_t len;
115 len = offsetof(struct delete_record_data, keydata) + key.dsize;
117 dd = (struct delete_record_data *)talloc_size(tree, len);
118 if (dd == NULL) {
119 DEBUG(DEBUG_ERR,(__location__ " Out of memory\n"));
120 return -1;
122 talloc_set_name_const(dd, "struct delete_record_data");
124 dd->ctdb = ctdb;
125 dd->ctdb_db = ctdb_db;
126 dd->key.dsize = key.dsize;
127 dd->key.dptr = dd->keydata;
128 memcpy(dd->keydata, key.dptr, key.dsize);
130 dd->hdr = *hdr;
132 hash = ctdb_hash(&key);
134 trbt_insert32(tree, hash, dd);
136 return 0;
139 static int add_record_to_delete_list(struct vacuum_data *vdata, TDB_DATA key,
140 struct ctdb_ltdb_header *hdr)
142 struct ctdb_context *ctdb = vdata->ctdb;
143 struct ctdb_db_context *ctdb_db = vdata->ctdb_db;
144 uint32_t hash;
145 int ret;
147 hash = ctdb_hash(&key);
149 if (trbt_lookup32(vdata->delete_list, hash)) {
150 DEBUG(DEBUG_INFO, (__location__ " Hash collision when vacuuming, skipping this record.\n"));
151 return 0;
154 ret = insert_delete_record_data_into_tree(ctdb, ctdb_db,
155 vdata->delete_list,
156 hdr, key);
157 if (ret != 0) {
158 return -1;
161 vdata->delete_count++;
163 return 0;
167 * Add a record to the list of records to be sent
168 * to their lmaster with VACUUM_FETCH.
170 static int add_record_to_vacuum_fetch_list(struct vacuum_data *vdata,
171 TDB_DATA key)
173 struct ctdb_context *ctdb = vdata->ctdb;
174 struct ctdb_rec_data *rec;
175 uint32_t lmaster;
176 size_t old_size;
177 struct ctdb_marshall_buffer *vfl;
179 lmaster = ctdb_lmaster(ctdb, &key);
181 vfl = vdata->vacuum_fetch_list[lmaster];
183 rec = ctdb_marshall_record(vfl, ctdb->pnn, key, NULL, tdb_null);
184 if (rec == NULL) {
185 DEBUG(DEBUG_ERR,(__location__ " Out of memory\n"));
186 vdata->traverse_error = true;
187 return -1;
190 old_size = talloc_get_size(vfl);
191 vfl = talloc_realloc_size(NULL, vfl, old_size + rec->length);
192 if (vfl == NULL) {
193 DEBUG(DEBUG_ERR,(__location__ " Failed to expand\n"));
194 vdata->traverse_error = true;
195 return -1;
197 vdata->vacuum_fetch_list[lmaster] = vfl;
199 vfl->count++;
200 memcpy(old_size+(uint8_t *)vfl, rec, rec->length);
201 talloc_free(rec);
203 vdata->total++;
205 return 0;
209 static void ctdb_vacuum_event(struct event_context *ev, struct timed_event *te,
210 struct timeval t, void *private_data);
214 * traverse function for gathering the records that can be deleted
216 static int vacuum_traverse(struct tdb_context *tdb, TDB_DATA key, TDB_DATA data, void *private)
218 struct vacuum_data *vdata = talloc_get_type(private, struct vacuum_data);
219 struct ctdb_context *ctdb = vdata->ctdb;
220 uint32_t lmaster;
221 struct ctdb_ltdb_header *hdr;
222 int res = 0;
224 vdata->full_total++;
226 lmaster = ctdb_lmaster(ctdb, &key);
227 if (lmaster >= ctdb->num_nodes) {
228 vdata->full_error++;
229 DEBUG(DEBUG_CRIT, (__location__
230 " lmaster[%u] >= ctdb->num_nodes[%u] for key"
231 " with hash[%u]!\n",
232 (unsigned)lmaster,
233 (unsigned)ctdb->num_nodes,
234 (unsigned)ctdb_hash(&key)));
235 return -1;
238 if (data.dsize != sizeof(struct ctdb_ltdb_header)) {
239 /* it is not a deleted record */
240 vdata->full_skipped++;
241 return 0;
244 hdr = (struct ctdb_ltdb_header *)data.dptr;
246 if (hdr->dmaster != ctdb->pnn) {
247 vdata->full_skipped++;
248 return 0;
251 if (lmaster == ctdb->pnn) {
253 * We are both lmaster and dmaster, and the record is empty.
254 * So we should be able to delete it.
256 res = add_record_to_delete_list(vdata, key, hdr);
257 if (res != 0) {
258 vdata->full_error++;
259 } else {
260 vdata->full_added_to_delete_list++;
262 } else {
264 * We are not lmaster.
265 * Add the record to the blob ready to send to the nodes.
267 res = add_record_to_vacuum_fetch_list(vdata, key);
268 if (res != 0) {
269 vdata->full_error++;
270 } else {
271 vdata->full_added_to_vacuum_fetch_list++;
275 return res;
279 * traverse the tree of records to delete and marshall them into
280 * a blob
282 static int delete_marshall_traverse(void *param, void *data)
284 struct delete_record_data *dd = talloc_get_type(data, struct delete_record_data);
285 struct delete_records_list *recs = talloc_get_type(param, struct delete_records_list);
286 struct ctdb_rec_data *rec;
287 size_t old_size;
289 rec = ctdb_marshall_record(dd, recs->records->db_id, dd->key, &dd->hdr, tdb_null);
290 if (rec == NULL) {
291 DEBUG(DEBUG_ERR, (__location__ " failed to marshall record\n"));
292 return 0;
295 old_size = talloc_get_size(recs->records);
296 recs->records = talloc_realloc_size(NULL, recs->records, old_size + rec->length);
297 if (recs->records == NULL) {
298 DEBUG(DEBUG_ERR,(__location__ " Failed to expand\n"));
299 return 0;
301 recs->records->count++;
302 memcpy(old_size+(uint8_t *)(recs->records), rec, rec->length);
303 return 0;
307 * Variant of delete_marshall_traverse() that bumps the
308 * RSN of each traversed record in the database.
310 * This is needed to ensure that when rolling out our
311 * empty record copy before remote deletion, we as the
312 * record's dmaster keep a higher RSN than the non-dmaster
313 * nodes. This is needed to prevent old copies from
314 * resurrection in recoveries.
316 static int delete_marshall_traverse_first(void *param, void *data)
318 struct delete_record_data *dd = talloc_get_type(data, struct delete_record_data);
319 struct delete_records_list *recs = talloc_get_type(param, struct delete_records_list);
320 struct ctdb_db_context *ctdb_db = dd->ctdb_db;
321 struct ctdb_context *ctdb = ctdb_db->ctdb;
322 struct ctdb_ltdb_header *header;
323 TDB_DATA tdb_data, ctdb_data;
324 uint32_t lmaster;
325 uint32_t hash = ctdb_hash(&(dd->key));
326 int res;
328 res = tdb_chainlock(ctdb_db->ltdb->tdb, dd->key);
329 if (res != 0) {
330 DEBUG(DEBUG_ERR,
331 (__location__ " Error getting chainlock on record with "
332 "key hash [0x%08x] on database db[%s].\n",
333 hash, ctdb_db->db_name));
334 recs->vdata->delete_skipped++;
335 talloc_free(dd);
336 return 0;
340 * Verify that the record is still empty, its RSN has not
341 * changed and that we are still its lmaster and dmaster.
344 tdb_data = tdb_fetch(ctdb_db->ltdb->tdb, dd->key);
345 if (tdb_data.dsize < sizeof(struct ctdb_ltdb_header)) {
346 DEBUG(DEBUG_INFO, (__location__ ": record with hash [0x%08x] "
347 "on database db[%s] does not exist or is not"
348 " a ctdb-record. skipping.\n",
349 hash, ctdb_db->db_name));
350 goto skip;
353 if (tdb_data.dsize > sizeof(struct ctdb_ltdb_header)) {
354 DEBUG(DEBUG_INFO, (__location__ ": record with hash [0x%08x] "
355 "on database db[%s] has been recycled. "
356 "skipping.\n",
357 hash, ctdb_db->db_name));
358 goto skip;
361 header = (struct ctdb_ltdb_header *)tdb_data.dptr;
363 if (header->flags & CTDB_REC_RO_FLAGS) {
364 DEBUG(DEBUG_INFO, (__location__ ": record with hash [0x%08x] "
365 "on database db[%s] has read-only flags. "
366 "skipping.\n",
367 hash, ctdb_db->db_name));
368 goto skip;
371 if (header->dmaster != ctdb->pnn) {
372 DEBUG(DEBUG_INFO, (__location__ ": record with hash [0x%08x] "
373 "on database db[%s] has been migrated away. "
374 "skipping.\n",
375 hash, ctdb_db->db_name));
376 goto skip;
379 if (header->rsn != dd->hdr.rsn) {
380 DEBUG(DEBUG_INFO, (__location__ ": record with hash [0x%08x] "
381 "on database db[%s] seems to have been "
382 "migrated away and back again (with empty "
383 "data). skipping.\n",
384 hash, ctdb_db->db_name));
385 goto skip;
388 lmaster = ctdb_lmaster(ctdb_db->ctdb, &dd->key);
390 if (lmaster != ctdb->pnn) {
391 DEBUG(DEBUG_INFO, (__location__ ": not lmaster for record in "
392 "delete list (key hash [0x%08x], db[%s]). "
393 "Strange! skipping.\n",
394 hash, ctdb_db->db_name));
395 goto skip;
399 * Increment the record's RSN to ensure the dmaster (i.e. the current
400 * node) has the highest RSN of the record in the cluster.
401 * This is to prevent old record copies from resurrecting in recoveries
402 * if something should fail during the deletion process.
403 * Note that ctdb_ltdb_store_server() increments the RSN if called
404 * on the record's dmaster.
407 ctdb_data.dptr = tdb_data.dptr + sizeof(struct ctdb_ltdb_header);
408 ctdb_data.dsize = tdb_data.dsize - sizeof(struct ctdb_ltdb_header);
410 res = ctdb_ltdb_store(ctdb_db, dd->key, header, ctdb_data);
411 if (res != 0) {
412 DEBUG(DEBUG_ERR, (__location__ ": Failed to store record with "
413 "key hash [0x%08x] on database db[%s].\n",
414 hash, ctdb_db->db_name));
415 goto skip;
418 tdb_chainunlock(ctdb_db->ltdb->tdb, dd->key);
420 goto done;
422 skip:
423 tdb_chainunlock(ctdb_db->ltdb->tdb, dd->key);
425 recs->vdata->delete_skipped++;
426 talloc_free(dd);
427 dd = NULL;
429 done:
430 if (tdb_data.dptr != NULL) {
431 free(tdb_data.dptr);
434 if (dd == NULL) {
435 return 0;
438 return delete_marshall_traverse(param, data);
442 * traverse function for the traversal of the delete_queue,
443 * the fast-path vacuuming list.
445 * - If the record has been migrated off the node
446 * or has been revived (filled with data) on the node,
447 * then skip the record.
449 * - If the current node is the record's lmaster and it is
450 * a record that has never been migrated with data, then
451 * delete the record from the local tdb.
453 * - If the current node is the record's lmaster and it has
454 * been migrated with data, then schedule it for the normal
455 * vacuuming procedure (i.e. add it to the delete_list).
457 * - If the current node is NOT the record's lmaster then
458 * add it to the list of records that are to be sent to
459 * the lmaster with the VACUUM_FETCH message.
461 static int delete_queue_traverse(void *param, void *data)
463 struct delete_record_data *dd =
464 talloc_get_type(data, struct delete_record_data);
465 struct vacuum_data *vdata = talloc_get_type(param, struct vacuum_data);
466 struct ctdb_db_context *ctdb_db = dd->ctdb_db;
467 struct ctdb_context *ctdb = ctdb_db->ctdb; /* or dd->ctdb ??? */
468 int res;
469 struct ctdb_ltdb_header *header;
470 TDB_DATA tdb_data;
471 uint32_t lmaster;
472 uint32_t hash = ctdb_hash(&(dd->key));
474 vdata->fast_total++;
476 res = tdb_chainlock(ctdb_db->ltdb->tdb, dd->key);
477 if (res != 0) {
478 DEBUG(DEBUG_ERR,
479 (__location__ " Error getting chainlock on record with "
480 "key hash [0x%08x] on database db[%s].\n",
481 hash, ctdb_db->db_name));
482 vdata->fast_error++;
483 return 0;
486 tdb_data = tdb_fetch(ctdb_db->ltdb->tdb, dd->key);
487 if (tdb_data.dsize < sizeof(struct ctdb_ltdb_header)) {
488 /* Does not exist or not a ctdb record. Skip. */
489 goto skipped;
492 if (tdb_data.dsize > sizeof(struct ctdb_ltdb_header)) {
493 /* The record has been recycled (filled with data). Skip. */
494 goto skipped;
497 header = (struct ctdb_ltdb_header *)tdb_data.dptr;
499 if (header->dmaster != ctdb->pnn) {
500 /* The record has been migrated off the node. Skip. */
501 goto skipped;
504 if (header->rsn != dd->hdr.rsn) {
506 * The record has been migrated off the node and back again.
507 * But not requeued for deletion. Skip it.
509 goto skipped;
513 * We are dmaster, and the record has no data, and it has
514 * not been migrated after it has been queued for deletion.
516 * At this stage, the record could still have been revived locally
517 * and last been written with empty data. This can only be
518 * fixed with the addition of an active or delete flag. (TODO)
521 lmaster = ctdb_lmaster(ctdb_db->ctdb, &dd->key);
523 if (lmaster != ctdb->pnn) {
524 res = add_record_to_vacuum_fetch_list(vdata, dd->key);
526 if (res != 0) {
527 DEBUG(DEBUG_ERR,
528 (__location__ " Error adding record to list "
529 "of records to send to lmaster.\n"));
530 vdata->fast_error++;
531 } else {
532 vdata->fast_added_to_vacuum_fetch_list++;
534 goto done;
537 /* use header->flags or dd->hdr.flags ?? */
538 if (dd->hdr.flags & CTDB_REC_FLAG_MIGRATED_WITH_DATA) {
539 res = add_record_to_delete_list(vdata, dd->key, &dd->hdr);
541 if (res != 0) {
542 DEBUG(DEBUG_ERR,
543 (__location__ " Error adding record to list "
544 "of records for deletion on lmaster.\n"));
545 vdata->fast_error++;
546 } else {
547 vdata->fast_added_to_delete_list++;
549 } else {
550 res = tdb_delete(ctdb_db->ltdb->tdb, dd->key);
552 if (res != 0) {
553 DEBUG(DEBUG_ERR,
554 (__location__ " Error deleting record with key "
555 "hash [0x%08x] from local data base db[%s].\n",
556 hash, ctdb_db->db_name));
557 vdata->fast_error++;
558 goto done;
561 DEBUG(DEBUG_DEBUG,
562 (__location__ " Deleted record with key hash "
563 "[0x%08x] from local data base db[%s].\n",
564 hash, ctdb_db->db_name));
565 vdata->fast_deleted++;
568 goto done;
570 skipped:
571 vdata->fast_skipped++;
573 done:
574 free(tdb_data.dptr);
576 tdb_chainunlock(ctdb_db->ltdb->tdb, dd->key);
578 return 0;
582 * Delete the records that we are lmaster and dmaster for and
583 * that could be deleted on all other nodes via the TRY_DELETE_RECORDS
584 * control.
586 static int delete_record_traverse(void *param, void *data)
588 struct delete_record_data *dd =
589 talloc_get_type(data, struct delete_record_data);
590 struct vacuum_data *vdata = talloc_get_type(param, struct vacuum_data);
591 struct ctdb_db_context *ctdb_db = dd->ctdb_db;
592 struct ctdb_context *ctdb = ctdb_db->ctdb;
593 int res;
594 struct ctdb_ltdb_header *header;
595 TDB_DATA tdb_data;
596 uint32_t lmaster;
597 uint32_t hash = ctdb_hash(&(dd->key));
599 res = tdb_chainlock(ctdb_db->ltdb->tdb, dd->key);
600 if (res != 0) {
601 DEBUG(DEBUG_ERR,
602 (__location__ " Error getting chainlock on record with "
603 "key hash [0x%08x] on database db[%s].\n",
604 hash, ctdb_db->db_name));
605 vdata->delete_local_error++;
606 vdata->delete_left--;
607 talloc_free(dd);
608 return 0;
612 * Verify that the record is still empty, its RSN has not
613 * changed and that we are still its lmaster and dmaster.
616 tdb_data = tdb_fetch(ctdb_db->ltdb->tdb, dd->key);
617 if (tdb_data.dsize < sizeof(struct ctdb_ltdb_header)) {
618 DEBUG(DEBUG_INFO, (__location__ ": record with hash [0x%08x] "
619 "on database db[%s] does not exist or is not"
620 " a ctdb-record. skipping.\n",
621 hash, ctdb_db->db_name));
622 goto skip;
625 if (tdb_data.dsize > sizeof(struct ctdb_ltdb_header)) {
626 DEBUG(DEBUG_INFO, (__location__ ": record with hash [0x%08x] "
627 "on database db[%s] has been recycled. "
628 "skipping.\n",
629 hash, ctdb_db->db_name));
630 goto skip;
633 header = (struct ctdb_ltdb_header *)tdb_data.dptr;
635 if (header->flags & CTDB_REC_RO_FLAGS) {
636 DEBUG(DEBUG_INFO, (__location__ ": record with hash [0x%08x] "
637 "on database db[%s] has read-only flags. "
638 "skipping.\n",
639 hash, ctdb_db->db_name));
640 goto skip;
643 if (header->dmaster != ctdb->pnn) {
644 DEBUG(DEBUG_INFO, (__location__ ": record with hash [0x%08x] "
645 "on database db[%s] has been migrated away. "
646 "skipping.\n",
647 hash, ctdb_db->db_name));
648 goto skip;
651 if (header->rsn != dd->hdr.rsn + 1) {
653 * The record has been migrated off the node and back again.
654 * But not requeued for deletion. Skip it.
655 * (Note that the first marshall traverse has bumped the RSN
656 * on disk.)
658 DEBUG(DEBUG_INFO, (__location__ ": record with hash [0x%08x] "
659 "on database db[%s] seems to have been "
660 "migrated away and back again (with empty "
661 "data). skipping.\n",
662 hash, ctdb_db->db_name));
663 goto skip;
666 lmaster = ctdb_lmaster(ctdb_db->ctdb, &dd->key);
668 if (lmaster != ctdb->pnn) {
669 DEBUG(DEBUG_INFO, (__location__ ": not lmaster for record in "
670 "delete list (key hash [0x%08x], db[%s]). "
671 "Strange! skipping.\n",
672 hash, ctdb_db->db_name));
673 goto skip;
676 res = tdb_delete(ctdb_db->ltdb->tdb, dd->key);
678 if (res != 0) {
679 DEBUG(DEBUG_ERR,
680 (__location__ " Error deleting record with key hash "
681 "[0x%08x] from local data base db[%s].\n",
682 hash, ctdb_db->db_name));
683 vdata->delete_local_error++;
684 goto done;
687 DEBUG(DEBUG_DEBUG,
688 (__location__ " Deleted record with key hash [0x%08x] from "
689 "local data base db[%s].\n", hash, ctdb_db->db_name));
691 vdata->delete_deleted++;
692 goto done;
694 skip:
695 vdata->delete_skipped++;
697 done:
698 free(tdb_data.dptr);
700 tdb_chainunlock(ctdb_db->ltdb->tdb, dd->key);
702 talloc_free(dd);
703 vdata->delete_left--;
705 return 0;
709 * Fast vacuuming run:
710 * Traverse the delete_queue.
711 * This fills the same lists as the database traverse.
713 static void ctdb_vacuum_db_fast(struct ctdb_db_context *ctdb_db,
714 struct vacuum_data *vdata)
716 trbt_traversearray32(ctdb_db->delete_queue, 1, delete_queue_traverse, vdata);
718 if (vdata->fast_total > 0) {
719 DEBUG(DEBUG_INFO,
720 (__location__
721 " fast vacuuming delete_queue traverse statistics: "
722 "db[%s] "
723 "total[%u] "
724 "del[%u] "
725 "skp[%u] "
726 "err[%u] "
727 "adl[%u] "
728 "avf[%u]\n",
729 ctdb_db->db_name,
730 (unsigned)vdata->fast_total,
731 (unsigned)vdata->fast_deleted,
732 (unsigned)vdata->fast_skipped,
733 (unsigned)vdata->fast_error,
734 (unsigned)vdata->fast_added_to_delete_list,
735 (unsigned)vdata->fast_added_to_vacuum_fetch_list));
738 return;
742 * Full vacuum run:
743 * read-only traverse of the database, looking for records that
744 * might be able to be vacuumed.
746 * This is not done each time but only every tunable
747 * VacuumFastPathCount times.
749 static int ctdb_vacuum_db_full(struct ctdb_db_context *ctdb_db,
750 struct vacuum_data *vdata,
751 bool full_vacuum_run)
753 int ret;
755 if (!full_vacuum_run) {
756 return 0;
759 ret = tdb_traverse_read(ctdb_db->ltdb->tdb, vacuum_traverse, vdata);
760 if (ret == -1 || vdata->traverse_error) {
761 DEBUG(DEBUG_ERR, (__location__ " Traverse error in vacuuming "
762 "'%s'\n", ctdb_db->db_name));
763 return -1;
766 if (vdata->full_total > 0) {
767 DEBUG(DEBUG_INFO,
768 (__location__
769 " full vacuuming db traverse statistics: "
770 "db[%s] "
771 "total[%u] "
772 "skp[%u] "
773 "err[%u] "
774 "adl[%u] "
775 "avf[%u]\n",
776 ctdb_db->db_name,
777 (unsigned)vdata->full_total,
778 (unsigned)vdata->full_skipped,
779 (unsigned)vdata->full_error,
780 (unsigned)vdata->full_added_to_delete_list,
781 (unsigned)vdata->full_added_to_vacuum_fetch_list));
784 return 0;
788 * Process the vacuum fetch lists:
789 * For records for which we are not the lmaster, tell the lmaster to
790 * fetch the record.
792 static int ctdb_process_vacuum_fetch_lists(struct ctdb_db_context *ctdb_db,
793 struct vacuum_data *vdata)
795 int i;
796 struct ctdb_context *ctdb = ctdb_db->ctdb;
798 for (i = 0; i < ctdb->num_nodes; i++) {
799 TDB_DATA data;
800 struct ctdb_marshall_buffer *vfl = vdata->vacuum_fetch_list[i];
802 if (ctdb->nodes[i]->pnn == ctdb->pnn) {
803 continue;
806 if (vfl->count == 0) {
807 continue;
810 DEBUG(DEBUG_INFO, ("Found %u records for lmaster %u in '%s'\n",
811 vfl->count, ctdb->nodes[i]->pnn,
812 ctdb_db->db_name));
814 data.dsize = talloc_get_size(vfl);
815 data.dptr = (void *)vfl;
816 if (ctdb_client_send_message(ctdb, ctdb->nodes[i]->pnn,
817 CTDB_SRVID_VACUUM_FETCH,
818 data) != 0)
820 DEBUG(DEBUG_ERR, (__location__ " Failed to send vacuum "
821 "fetch message to %u\n",
822 ctdb->nodes[i]->pnn));
823 return -1;
827 return 0;
831 * Process the delete list:
833 * This is the last step of vacuuming that consistently deletes
834 * those records that have been migrated with data and can hence
835 * not be deleted when leaving a node.
837 * In this step, the lmaster does the final deletion of those empty
838 * records that it is also dmaster for. It has ususally received
839 * at least some of these records previously from the former dmasters
840 * with the vacuum fetch message.
842 * This last step is implemented as a 3-phase process to protect from
843 * races leading to data corruption:
845 * 1) Send the lmaster's copy to all other active nodes with the
846 * RECEIVE_RECORDS control: The remote nodes store the lmaster's copy.
847 * 2) Send the records that could successfully be stored remotely
848 * in step #1 to all active nodes with the TRY_DELETE_RECORDS
849 * control. The remote notes delete their local copy.
850 * 3) The lmaster locally deletes its copies of all records that
851 * could successfully be deleted remotely in step #2.
853 static int ctdb_process_delete_list(struct ctdb_db_context *ctdb_db,
854 struct vacuum_data *vdata)
856 int ret, i;
857 struct ctdb_context *ctdb = ctdb_db->ctdb;
858 struct delete_records_list *recs;
859 TDB_DATA indata;
860 struct ctdb_node_map *nodemap;
861 uint32_t *active_nodes;
862 int num_active_nodes;
863 TALLOC_CTX *tmp_ctx;
865 if (vdata->delete_count == 0) {
866 return 0;
869 tmp_ctx = talloc_new(vdata);
870 if (tmp_ctx == NULL) {
871 DEBUG(DEBUG_ERR,(__location__ " Out of memory\n"));
872 return 0;
875 vdata->delete_left = vdata->delete_count;
878 * get the list of currently active nodes
881 ret = ctdb_ctrl_getnodemap(ctdb, TIMELIMIT(),
882 CTDB_CURRENT_NODE,
883 tmp_ctx,
884 &nodemap);
885 if (ret != 0) {
886 DEBUG(DEBUG_ERR,(__location__ " unable to get node map\n"));
887 ret = -1;
888 goto done;
891 active_nodes = list_of_active_nodes(ctdb, nodemap,
892 nodemap, /* talloc context */
893 false /* include self */);
894 /* yuck! ;-) */
895 num_active_nodes = talloc_get_size(active_nodes)/sizeof(*active_nodes);
898 * Now delete the records all active nodes in a three-phase process:
899 * 1) send all active remote nodes the current empty copy with this
900 * node as DMASTER
901 * 2) if all nodes could store the new copy,
902 * tell all the active remote nodes to delete all their copy
903 * 3) if all remote nodes deleted their record copy, delete it locally
907 * Step 1:
908 * Send currently empty record copy to all active nodes for storing.
911 recs = talloc_zero(tmp_ctx, struct delete_records_list);
912 if (recs == NULL) {
913 DEBUG(DEBUG_ERR,(__location__ " Out of memory\n"));
914 ret = -1;
915 goto done;
917 recs->records = (struct ctdb_marshall_buffer *)
918 talloc_zero_size(recs,
919 offsetof(struct ctdb_marshall_buffer, data));
920 if (recs->records == NULL) {
921 DEBUG(DEBUG_ERR,(__location__ " Out of memory\n"));
922 ret = -1;
923 goto done;
925 recs->records->db_id = ctdb_db->db_id;
926 recs->vdata = vdata;
929 * traverse the tree of all records we want to delete and
930 * create a blob we can send to the other nodes.
932 * We call delete_marshall_traverse_first() to bump the
933 * records' RSNs in the database, to ensure we (as dmaster)
934 * keep the highest RSN of the records in the cluster.
936 trbt_traversearray32(vdata->delete_list, 1,
937 delete_marshall_traverse_first, recs);
939 indata.dsize = talloc_get_size(recs->records);
940 indata.dptr = (void *)recs->records;
942 for (i = 0; i < num_active_nodes; i++) {
943 struct ctdb_marshall_buffer *records;
944 struct ctdb_rec_data *rec;
945 int32_t res;
946 TDB_DATA outdata;
948 ret = ctdb_control(ctdb, active_nodes[i], 0,
949 CTDB_CONTROL_RECEIVE_RECORDS, 0,
950 indata, recs, &outdata, &res,
951 NULL, NULL);
952 if (ret != 0 || res != 0) {
953 DEBUG(DEBUG_ERR, ("Error storing record copies on "
954 "node %u: ret[%d] res[%d]\n",
955 active_nodes[i], ret, res));
956 ret = -1;
957 goto done;
961 * outdata contains the list of records coming back
962 * from the node: These are the records that the
963 * remote node could not store. We remove these from
964 * the list to process further.
966 records = (struct ctdb_marshall_buffer *)outdata.dptr;
967 rec = (struct ctdb_rec_data *)&records->data[0];
968 while (records->count-- > 1) {
969 TDB_DATA reckey, recdata;
970 struct ctdb_ltdb_header *rechdr;
971 struct delete_record_data *dd;
973 reckey.dptr = &rec->data[0];
974 reckey.dsize = rec->keylen;
975 recdata.dptr = &rec->data[reckey.dsize];
976 recdata.dsize = rec->datalen;
978 if (recdata.dsize < sizeof(struct ctdb_ltdb_header)) {
979 DEBUG(DEBUG_CRIT,(__location__ " bad ltdb record\n"));
980 ret = -1;
981 goto done;
983 rechdr = (struct ctdb_ltdb_header *)recdata.dptr;
984 recdata.dptr += sizeof(*rechdr);
985 recdata.dsize -= sizeof(*rechdr);
987 dd = (struct delete_record_data *)trbt_lookup32(
988 vdata->delete_list,
989 ctdb_hash(&reckey));
990 if (dd != NULL) {
992 * The other node could not store the record
993 * copy and it is the first node that failed.
994 * So we should remove it from the tree and
995 * update statistics.
997 talloc_free(dd);
998 vdata->delete_remote_error++;
999 vdata->delete_left--;
1002 rec = (struct ctdb_rec_data *)(rec->length + (uint8_t *)rec);
1006 if (vdata->delete_left == 0) {
1007 goto success;
1011 * Step 2:
1012 * Send the remaining records to all active nodes for deletion.
1014 * The lmaster's (i.e. our) copies of these records have been stored
1015 * successfully on the other nodes.
1019 * Create a marshall blob from the remaining list of records to delete.
1022 talloc_free(recs->records);
1024 recs->records = (struct ctdb_marshall_buffer *)
1025 talloc_zero_size(recs,
1026 offsetof(struct ctdb_marshall_buffer, data));
1027 if (recs->records == NULL) {
1028 DEBUG(DEBUG_ERR,(__location__ " Out of memory\n"));
1029 ret = -1;
1030 goto done;
1032 recs->records->db_id = ctdb_db->db_id;
1034 trbt_traversearray32(vdata->delete_list, 1,
1035 delete_marshall_traverse, recs);
1037 indata.dsize = talloc_get_size(recs->records);
1038 indata.dptr = (void *)recs->records;
1040 for (i = 0; i < num_active_nodes; i++) {
1041 struct ctdb_marshall_buffer *records;
1042 struct ctdb_rec_data *rec;
1043 int32_t res;
1044 TDB_DATA outdata;
1046 ret = ctdb_control(ctdb, active_nodes[i], 0,
1047 CTDB_CONTROL_TRY_DELETE_RECORDS, 0,
1048 indata, recs, &outdata, &res,
1049 NULL, NULL);
1050 if (ret != 0 || res != 0) {
1051 DEBUG(DEBUG_ERR, ("Failed to delete records on "
1052 "node %u: ret[%d] res[%d]\n",
1053 active_nodes[i], ret, res));
1054 ret = -1;
1055 goto done;
1059 * outdata contains the list of records coming back
1060 * from the node: These are the records that the
1061 * remote node could not delete. We remove these from
1062 * the list to delete locally.
1064 records = (struct ctdb_marshall_buffer *)outdata.dptr;
1065 rec = (struct ctdb_rec_data *)&records->data[0];
1066 while (records->count-- > 1) {
1067 TDB_DATA reckey, recdata;
1068 struct ctdb_ltdb_header *rechdr;
1069 struct delete_record_data *dd;
1071 reckey.dptr = &rec->data[0];
1072 reckey.dsize = rec->keylen;
1073 recdata.dptr = &rec->data[reckey.dsize];
1074 recdata.dsize = rec->datalen;
1076 if (recdata.dsize < sizeof(struct ctdb_ltdb_header)) {
1077 DEBUG(DEBUG_CRIT,(__location__ " bad ltdb record\n"));
1078 ret = -1;
1079 goto done;
1081 rechdr = (struct ctdb_ltdb_header *)recdata.dptr;
1082 recdata.dptr += sizeof(*rechdr);
1083 recdata.dsize -= sizeof(*rechdr);
1085 dd = (struct delete_record_data *)trbt_lookup32(
1086 vdata->delete_list,
1087 ctdb_hash(&reckey));
1088 if (dd != NULL) {
1090 * The other node could not delete the
1091 * record and it is the first node that
1092 * failed. So we should remove it from
1093 * the tree and update statistics.
1095 talloc_free(dd);
1096 vdata->delete_remote_error++;
1097 vdata->delete_left--;
1100 rec = (struct ctdb_rec_data *)(rec->length + (uint8_t *)rec);
1104 if (vdata->delete_left == 0) {
1105 goto success;
1109 * Step 3:
1110 * Delete the remaining records locally.
1112 * These records have successfully been deleted on all
1113 * active remote nodes.
1116 trbt_traversearray32(vdata->delete_list, 1,
1117 delete_record_traverse, vdata);
1119 success:
1121 if (vdata->delete_count > 0) {
1122 DEBUG(DEBUG_INFO,
1123 (__location__
1124 " vacuum delete list statistics: "
1125 "db[%s] "
1126 "total[%u] "
1127 "del[%u] "
1128 "skip[%u] "
1129 "rem.err[%u] "
1130 "loc.err[%u] "
1131 "left[%u]\n",
1132 ctdb_db->db_name,
1133 (unsigned)vdata->delete_count,
1134 (unsigned)vdata->delete_deleted,
1135 (unsigned)vdata->delete_skipped,
1136 (unsigned)vdata->delete_remote_error,
1137 (unsigned)vdata->delete_local_error,
1138 (unsigned)vdata->delete_left));
1141 ret = 0;
1143 done:
1144 talloc_free(tmp_ctx);
1146 return ret;
1150 * initialize the vacuum_data
1152 static int ctdb_vacuum_init_vacuum_data(struct ctdb_db_context *ctdb_db,
1153 struct vacuum_data *vdata)
1155 int i;
1156 struct ctdb_context *ctdb = ctdb_db->ctdb;
1158 vdata->fast_added_to_delete_list = 0;
1159 vdata->fast_added_to_vacuum_fetch_list = 0;
1160 vdata->fast_deleted = 0;
1161 vdata->fast_skipped = 0;
1162 vdata->fast_error = 0;
1163 vdata->fast_total = 0;
1164 vdata->full_added_to_delete_list = 0;
1165 vdata->full_added_to_vacuum_fetch_list = 0;
1166 vdata->full_skipped = 0;
1167 vdata->full_error = 0;
1168 vdata->full_total = 0;
1169 vdata->delete_count = 0;
1170 vdata->delete_left = 0;
1171 vdata->delete_remote_error = 0;
1172 vdata->delete_local_error = 0;
1173 vdata->delete_skipped = 0;
1174 vdata->delete_deleted = 0;
1176 /* the list needs to be of length num_nodes */
1177 vdata->vacuum_fetch_list = talloc_zero_array(vdata,
1178 struct ctdb_marshall_buffer *,
1179 ctdb->num_nodes);
1180 if (vdata->vacuum_fetch_list == NULL) {
1181 DEBUG(DEBUG_ERR,(__location__ " Out of memory\n"));
1182 return -1;
1184 for (i = 0; i < ctdb->num_nodes; i++) {
1185 vdata->vacuum_fetch_list[i] = (struct ctdb_marshall_buffer *)
1186 talloc_zero_size(vdata->vacuum_fetch_list,
1187 offsetof(struct ctdb_marshall_buffer, data));
1188 if (vdata->vacuum_fetch_list[i] == NULL) {
1189 DEBUG(DEBUG_ERR,(__location__ " Out of memory\n"));
1190 return -1;
1192 vdata->vacuum_fetch_list[i]->db_id = ctdb_db->db_id;
1195 return 0;
1199 * Vacuum a DB:
1200 * - Always do the fast vacuuming run, which traverses
1201 * the in-memory delete queue: these records have been
1202 * scheduled for deletion.
1203 * - Only if explicitly requested, the database is traversed
1204 * in order to use the traditional heuristics on empty records
1205 * to trigger deletion.
1206 * This is done only every VacuumFastPathCount'th vacuuming run.
1208 * The traverse runs fill two lists:
1210 * - The delete_list:
1211 * This is the list of empty records the current
1212 * node is lmaster and dmaster for. These records are later
1213 * deleted first on other nodes and then locally.
1215 * The fast vacuuming run has a short cut for those records
1216 * that have never been migrated with data: these records
1217 * are immediately deleted locally, since they have left
1218 * no trace on other nodes.
1220 * - The vacuum_fetch lists
1221 * (one for each other lmaster node):
1222 * The records in this list are sent for deletion to
1223 * their lmaster in a bulk VACUUM_FETCH message.
1225 * The lmaster then migrates all these records to itelf
1226 * so that they can be vacuumed there.
1228 * This executes in the child context.
1230 static int ctdb_vacuum_db(struct ctdb_db_context *ctdb_db,
1231 struct vacuum_data *vdata,
1232 bool full_vacuum_run)
1234 struct ctdb_context *ctdb = ctdb_db->ctdb;
1235 int ret, pnn;
1237 DEBUG(DEBUG_INFO, (__location__ " Entering %s vacuum run for db "
1238 "%s db_id[0x%08x]\n",
1239 full_vacuum_run ? "full" : "fast",
1240 ctdb_db->db_name, ctdb_db->db_id));
1242 ret = ctdb_ctrl_getvnnmap(ctdb, TIMELIMIT(), CTDB_CURRENT_NODE, ctdb, &ctdb->vnn_map);
1243 if (ret != 0) {
1244 DEBUG(DEBUG_ERR, ("Unable to get vnnmap from local node\n"));
1245 return ret;
1248 pnn = ctdb_ctrl_getpnn(ctdb, TIMELIMIT(), CTDB_CURRENT_NODE);
1249 if (pnn == -1) {
1250 DEBUG(DEBUG_ERR, ("Unable to get pnn from local node\n"));
1251 return -1;
1254 ctdb->pnn = pnn;
1256 ret = ctdb_vacuum_init_vacuum_data(ctdb_db, vdata);
1257 if (ret != 0) {
1258 return ret;
1261 ctdb_vacuum_db_fast(ctdb_db, vdata);
1263 ret = ctdb_vacuum_db_full(ctdb_db, vdata, full_vacuum_run);
1264 if (ret != 0) {
1265 return ret;
1268 ret = ctdb_process_vacuum_fetch_lists(ctdb_db, vdata);
1269 if (ret != 0) {
1270 return ret;
1273 ret = ctdb_process_delete_list(ctdb_db, vdata);
1274 if (ret != 0) {
1275 return ret;
1278 /* this ensures we run our event queue */
1279 ctdb_ctrl_getpnn(ctdb, TIMELIMIT(), CTDB_CURRENT_NODE);
1281 return 0;
1286 * traverse function for repacking
1288 static int repack_traverse(struct tdb_context *tdb, TDB_DATA key, TDB_DATA data, void *private)
1290 struct vacuum_data *vdata = (struct vacuum_data *)private;
1292 if (vdata->vacuum) {
1293 uint32_t hash = ctdb_hash(&key);
1294 struct delete_record_data *kd;
1296 * check if we can ignore this record because it's in the delete_list
1298 kd = (struct delete_record_data *)trbt_lookup32(vdata->delete_list, hash);
1300 * there might be hash collisions so we have to compare the keys here to be sure
1302 if (kd && kd->key.dsize == key.dsize && memcmp(kd->key.dptr, key.dptr, key.dsize) == 0) {
1303 struct ctdb_ltdb_header *hdr = (struct ctdb_ltdb_header *)data.dptr;
1305 * we have to check if the record hasn't changed in the meantime in order to
1306 * savely remove it from the database
1308 if (data.dsize == sizeof(struct ctdb_ltdb_header) &&
1309 hdr->dmaster == kd->ctdb->pnn &&
1310 ctdb_lmaster(kd->ctdb, &(kd->key)) == kd->ctdb->pnn &&
1311 kd->hdr.rsn == hdr->rsn) {
1312 vdata->vacuumed++;
1313 return 0;
1317 if (tdb_store(vdata->dest_db, key, data, TDB_INSERT) != 0) {
1318 vdata->traverse_error = true;
1319 return -1;
1321 vdata->copied++;
1322 return 0;
1326 * repack a tdb
1328 static int ctdb_repack_tdb(struct tdb_context *tdb, TALLOC_CTX *mem_ctx, struct vacuum_data *vdata)
1330 struct tdb_context *tmp_db;
1332 if (tdb_transaction_start(tdb) != 0) {
1333 DEBUG(DEBUG_ERR,(__location__ " Failed to start transaction\n"));
1334 return -1;
1337 tmp_db = tdb_open("tmpdb", tdb_hash_size(tdb),
1338 TDB_INTERNAL|TDB_DISALLOW_NESTING,
1339 O_RDWR|O_CREAT, 0);
1340 if (tmp_db == NULL) {
1341 DEBUG(DEBUG_ERR,(__location__ " Failed to create tmp_db\n"));
1342 tdb_transaction_cancel(tdb);
1343 return -1;
1346 vdata->traverse_error = false;
1347 vdata->dest_db = tmp_db;
1348 vdata->vacuum = true;
1349 vdata->vacuumed = 0;
1350 vdata->copied = 0;
1353 * repack and vacuum on-the-fly by not writing the records that are
1354 * no longer needed
1356 if (tdb_traverse_read(tdb, repack_traverse, vdata) == -1) {
1357 DEBUG(DEBUG_ERR,(__location__ " Failed to traverse copying out\n"));
1358 tdb_transaction_cancel(tdb);
1359 tdb_close(tmp_db);
1360 return -1;
1363 DEBUG(DEBUG_INFO,(__location__ " %u records vacuumed\n", vdata->vacuumed));
1365 if (vdata->traverse_error) {
1366 DEBUG(DEBUG_ERR,(__location__ " Error during traversal\n"));
1367 tdb_transaction_cancel(tdb);
1368 tdb_close(tmp_db);
1369 return -1;
1372 if (tdb_wipe_all(tdb) != 0) {
1373 DEBUG(DEBUG_ERR,(__location__ " Failed to wipe database\n"));
1374 tdb_transaction_cancel(tdb);
1375 tdb_close(tmp_db);
1376 return -1;
1379 vdata->traverse_error = false;
1380 vdata->dest_db = tdb;
1381 vdata->vacuum = false;
1382 vdata->copied = 0;
1384 if (tdb_traverse_read(tmp_db, repack_traverse, vdata) == -1) {
1385 DEBUG(DEBUG_ERR,(__location__ " Failed to traverse copying back\n"));
1386 tdb_transaction_cancel(tdb);
1387 tdb_close(tmp_db);
1388 return -1;
1391 if (vdata->traverse_error) {
1392 DEBUG(DEBUG_ERR,(__location__ " Error during second traversal\n"));
1393 tdb_transaction_cancel(tdb);
1394 tdb_close(tmp_db);
1395 return -1;
1398 tdb_close(tmp_db);
1401 if (tdb_transaction_commit(tdb) != 0) {
1402 DEBUG(DEBUG_ERR,(__location__ " Failed to commit\n"));
1403 return -1;
1405 DEBUG(DEBUG_INFO,(__location__ " %u records copied\n", vdata->copied));
1407 return 0;
1411 * repack and vaccum a db
1412 * called from the child context
1414 static int ctdb_vacuum_and_repack_db(struct ctdb_db_context *ctdb_db,
1415 TALLOC_CTX *mem_ctx,
1416 bool full_vacuum_run)
1418 uint32_t repack_limit = ctdb_db->ctdb->tunable.repack_limit;
1419 uint32_t vacuum_limit = ctdb_db->ctdb->tunable.vacuum_limit;
1420 const char *name = ctdb_db->db_name;
1421 int freelist_size = 0;
1422 struct vacuum_data *vdata;
1424 vdata = talloc_zero(mem_ctx, struct vacuum_data);
1425 if (vdata == NULL) {
1426 DEBUG(DEBUG_ERR,(__location__ " Out of memory\n"));
1427 return -1;
1430 vdata->ctdb = ctdb_db->ctdb;
1431 vdata->vacuum_limit = vacuum_limit;
1432 vdata->repack_limit = repack_limit;
1433 vdata->delete_list = trbt_create(vdata, 0);
1434 vdata->ctdb_db = ctdb_db;
1435 if (vdata->delete_list == NULL) {
1436 DEBUG(DEBUG_ERR,(__location__ " Out of memory\n"));
1437 talloc_free(vdata);
1438 return -1;
1441 vdata->start = timeval_current();
1444 * gather all records that can be deleted in vdata
1446 if (ctdb_vacuum_db(ctdb_db, vdata, full_vacuum_run) != 0) {
1447 DEBUG(DEBUG_ERR,(__location__ " Failed to vacuum '%s'\n", name));
1450 if (repack_limit != 0) {
1451 freelist_size = tdb_freelist_size(ctdb_db->ltdb->tdb);
1452 if (freelist_size == -1) {
1453 DEBUG(DEBUG_ERR,(__location__ " Failed to get freelist size for '%s'\n", name));
1454 talloc_free(vdata);
1455 return -1;
1460 * decide if a repack is necessary
1462 if ((repack_limit == 0 || (uint32_t)freelist_size < repack_limit) &&
1463 vdata->delete_left < vacuum_limit)
1465 talloc_free(vdata);
1466 return 0;
1469 DEBUG(DEBUG_INFO,("Repacking %s with %u freelist entries and %u records to delete\n",
1470 name, freelist_size, vdata->delete_left));
1473 * repack and implicitely get rid of the records we can delete
1475 if (ctdb_repack_tdb(ctdb_db->ltdb->tdb, mem_ctx, vdata) != 0) {
1476 DEBUG(DEBUG_ERR,(__location__ " Failed to repack '%s'\n", name));
1477 talloc_free(vdata);
1478 return -1;
1480 talloc_free(vdata);
1482 return 0;
1485 static uint32_t get_vacuum_interval(struct ctdb_db_context *ctdb_db)
1487 uint32_t interval = ctdb_db->ctdb->tunable.vacuum_interval;
1489 return interval;
1492 static int vacuum_child_destructor(struct ctdb_vacuum_child_context *child_ctx)
1494 double l = timeval_elapsed(&child_ctx->start_time);
1495 struct ctdb_db_context *ctdb_db = child_ctx->vacuum_handle->ctdb_db;
1496 struct ctdb_context *ctdb = ctdb_db->ctdb;
1498 DEBUG(DEBUG_INFO,("Vacuuming took %.3f seconds for database %s\n", l, ctdb_db->db_name));
1500 if (child_ctx->child_pid != -1) {
1501 ctdb_kill(ctdb, child_ctx->child_pid, SIGKILL);
1502 } else {
1503 /* Bump the number of successful fast-path runs. */
1504 child_ctx->vacuum_handle->fast_path_count++;
1507 DLIST_REMOVE(ctdb->vacuumers, child_ctx);
1509 event_add_timed(ctdb->ev, child_ctx->vacuum_handle,
1510 timeval_current_ofs(get_vacuum_interval(ctdb_db), 0),
1511 ctdb_vacuum_event, child_ctx->vacuum_handle);
1513 return 0;
1517 * this event is generated when a vacuum child process times out
1519 static void vacuum_child_timeout(struct event_context *ev, struct timed_event *te,
1520 struct timeval t, void *private_data)
1522 struct ctdb_vacuum_child_context *child_ctx = talloc_get_type(private_data, struct ctdb_vacuum_child_context);
1524 DEBUG(DEBUG_ERR,("Vacuuming child process timed out for db %s\n", child_ctx->vacuum_handle->ctdb_db->db_name));
1526 child_ctx->status = VACUUM_TIMEOUT;
1528 talloc_free(child_ctx);
1533 * this event is generated when a vacuum child process has completed
1535 static void vacuum_child_handler(struct event_context *ev, struct fd_event *fde,
1536 uint16_t flags, void *private_data)
1538 struct ctdb_vacuum_child_context *child_ctx = talloc_get_type(private_data, struct ctdb_vacuum_child_context);
1539 char c = 0;
1540 int ret;
1542 DEBUG(DEBUG_INFO,("Vacuuming child process %d finished for db %s\n", child_ctx->child_pid, child_ctx->vacuum_handle->ctdb_db->db_name));
1543 child_ctx->child_pid = -1;
1545 ret = read(child_ctx->fd[0], &c, 1);
1546 if (ret != 1 || c != 0) {
1547 child_ctx->status = VACUUM_ERROR;
1548 DEBUG(DEBUG_ERR, ("A vacuum child process failed with an error for database %s. ret=%d c=%d\n", child_ctx->vacuum_handle->ctdb_db->db_name, ret, c));
1549 } else {
1550 child_ctx->status = VACUUM_OK;
1553 talloc_free(child_ctx);
1557 * this event is called every time we need to start a new vacuum process
1559 static void
1560 ctdb_vacuum_event(struct event_context *ev, struct timed_event *te,
1561 struct timeval t, void *private_data)
1563 struct ctdb_vacuum_handle *vacuum_handle = talloc_get_type(private_data, struct ctdb_vacuum_handle);
1564 struct ctdb_db_context *ctdb_db = vacuum_handle->ctdb_db;
1565 struct ctdb_context *ctdb = ctdb_db->ctdb;
1566 struct ctdb_vacuum_child_context *child_ctx;
1567 struct tevent_fd *fde;
1568 int ret;
1570 /* we dont vacuum if we are in recovery mode, or db frozen */
1571 if (ctdb->recovery_mode == CTDB_RECOVERY_ACTIVE ||
1572 ctdb->freeze_mode[ctdb_db->priority] != CTDB_FREEZE_NONE) {
1573 DEBUG(DEBUG_INFO, ("Not vacuuming %s (%s)\n", ctdb_db->db_name,
1574 ctdb->recovery_mode == CTDB_RECOVERY_ACTIVE ? "in recovery"
1575 : ctdb->freeze_mode[ctdb_db->priority] == CTDB_FREEZE_PENDING
1576 ? "freeze pending"
1577 : "frozen"));
1578 event_add_timed(ctdb->ev, vacuum_handle,
1579 timeval_current_ofs(get_vacuum_interval(ctdb_db), 0),
1580 ctdb_vacuum_event, vacuum_handle);
1581 return;
1584 child_ctx = talloc(vacuum_handle, struct ctdb_vacuum_child_context);
1585 if (child_ctx == NULL) {
1586 DEBUG(DEBUG_CRIT, (__location__ " Failed to allocate child context for vacuuming of %s\n", ctdb_db->db_name));
1587 ctdb_fatal(ctdb, "Out of memory when crating vacuum child context. Shutting down\n");
1591 ret = pipe(child_ctx->fd);
1592 if (ret != 0) {
1593 talloc_free(child_ctx);
1594 DEBUG(DEBUG_ERR, ("Failed to create pipe for vacuum child process.\n"));
1595 event_add_timed(ctdb->ev, vacuum_handle,
1596 timeval_current_ofs(get_vacuum_interval(ctdb_db), 0),
1597 ctdb_vacuum_event, vacuum_handle);
1598 return;
1601 if (vacuum_handle->fast_path_count > ctdb->tunable.vacuum_fast_path_count) {
1602 vacuum_handle->fast_path_count = 0;
1605 child_ctx->child_pid = ctdb_fork(ctdb);
1606 if (child_ctx->child_pid == (pid_t)-1) {
1607 close(child_ctx->fd[0]);
1608 close(child_ctx->fd[1]);
1609 talloc_free(child_ctx);
1610 DEBUG(DEBUG_ERR, ("Failed to fork vacuum child process.\n"));
1611 event_add_timed(ctdb->ev, vacuum_handle,
1612 timeval_current_ofs(get_vacuum_interval(ctdb_db), 0),
1613 ctdb_vacuum_event, vacuum_handle);
1614 return;
1618 if (child_ctx->child_pid == 0) {
1619 char cc = 0;
1620 bool full_vacuum_run = false;
1621 close(child_ctx->fd[0]);
1623 DEBUG(DEBUG_INFO,("Vacuuming child process %d for db %s started\n", getpid(), ctdb_db->db_name));
1624 ctdb_set_process_name("ctdb_vacuum");
1625 if (switch_from_server_to_client(ctdb, "vacuum-%s", ctdb_db->db_name) != 0) {
1626 DEBUG(DEBUG_CRIT, (__location__ "ERROR: failed to switch vacuum daemon into client mode. Shutting down.\n"));
1627 _exit(1);
1631 * repack the db
1633 if ((ctdb->tunable.vacuum_fast_path_count > 0) &&
1634 (vacuum_handle->fast_path_count == 0))
1636 full_vacuum_run = true;
1638 cc = ctdb_vacuum_and_repack_db(ctdb_db, child_ctx,
1639 full_vacuum_run);
1641 write(child_ctx->fd[1], &cc, 1);
1642 _exit(0);
1645 set_close_on_exec(child_ctx->fd[0]);
1646 close(child_ctx->fd[1]);
1648 child_ctx->status = VACUUM_RUNNING;
1649 child_ctx->start_time = timeval_current();
1651 DLIST_ADD(ctdb->vacuumers, child_ctx);
1652 talloc_set_destructor(child_ctx, vacuum_child_destructor);
1655 * Clear the fastpath vacuuming list in the parent.
1657 talloc_free(ctdb_db->delete_queue);
1658 ctdb_db->delete_queue = trbt_create(ctdb_db, 0);
1659 if (ctdb_db->delete_queue == NULL) {
1660 /* fatal here? ... */
1661 ctdb_fatal(ctdb, "Out of memory when re-creating vacuum tree "
1662 "in parent context. Shutting down\n");
1665 event_add_timed(ctdb->ev, child_ctx,
1666 timeval_current_ofs(ctdb->tunable.vacuum_max_run_time, 0),
1667 vacuum_child_timeout, child_ctx);
1669 DEBUG(DEBUG_DEBUG, (__location__ " Created PIPE FD:%d to child vacuum process\n", child_ctx->fd[0]));
1671 fde = event_add_fd(ctdb->ev, child_ctx, child_ctx->fd[0],
1672 EVENT_FD_READ, vacuum_child_handler, child_ctx);
1673 tevent_fd_set_auto_close(fde);
1675 vacuum_handle->child_ctx = child_ctx;
1676 child_ctx->vacuum_handle = vacuum_handle;
1679 void ctdb_stop_vacuuming(struct ctdb_context *ctdb)
1681 /* Simply free them all. */
1682 while (ctdb->vacuumers) {
1683 DEBUG(DEBUG_INFO, ("Aborting vacuuming for %s (%i)\n",
1684 ctdb->vacuumers->vacuum_handle->ctdb_db->db_name,
1685 (int)ctdb->vacuumers->child_pid));
1686 /* vacuum_child_destructor kills it, removes from list */
1687 talloc_free(ctdb->vacuumers);
1691 /* this function initializes the vacuuming context for a database
1692 * starts the vacuuming events
1694 int ctdb_vacuum_init(struct ctdb_db_context *ctdb_db)
1696 if (ctdb_db->persistent != 0) {
1697 DEBUG(DEBUG_ERR,("Vacuuming is disabled for persistent database %s\n", ctdb_db->db_name));
1698 return 0;
1701 ctdb_db->vacuum_handle = talloc(ctdb_db, struct ctdb_vacuum_handle);
1702 CTDB_NO_MEMORY(ctdb_db->ctdb, ctdb_db->vacuum_handle);
1704 ctdb_db->vacuum_handle->ctdb_db = ctdb_db;
1705 ctdb_db->vacuum_handle->fast_path_count = 0;
1707 event_add_timed(ctdb_db->ctdb->ev, ctdb_db->vacuum_handle,
1708 timeval_current_ofs(get_vacuum_interval(ctdb_db), 0),
1709 ctdb_vacuum_event, ctdb_db->vacuum_handle);
1711 return 0;
1714 static void remove_record_from_delete_queue(struct ctdb_db_context *ctdb_db,
1715 const struct ctdb_ltdb_header *hdr,
1716 const TDB_DATA key)
1718 struct delete_record_data *kd;
1719 uint32_t hash;
1721 hash = (uint32_t)ctdb_hash(&key);
1723 DEBUG(DEBUG_DEBUG, (__location__
1724 " remove_record_from_delete_queue: "
1725 "db[%s] "
1726 "db_id[0x%08x] "
1727 "key_hash[0x%08x] "
1728 "lmaster[%u] "
1729 "migrated_with_data[%s]\n",
1730 ctdb_db->db_name, ctdb_db->db_id,
1731 hash,
1732 ctdb_lmaster(ctdb_db->ctdb, &key),
1733 hdr->flags & CTDB_REC_FLAG_MIGRATED_WITH_DATA ? "yes" : "no"));
1735 kd = (struct delete_record_data *)trbt_lookup32(ctdb_db->delete_queue, hash);
1736 if (kd == NULL) {
1737 DEBUG(DEBUG_DEBUG, (__location__
1738 " remove_record_from_delete_queue: "
1739 "record not in queue (hash[0x%08x])\n.",
1740 hash));
1741 return;
1744 if ((kd->key.dsize != key.dsize) ||
1745 (memcmp(kd->key.dptr, key.dptr, key.dsize) != 0))
1747 DEBUG(DEBUG_DEBUG, (__location__
1748 " remove_record_from_delete_queue: "
1749 "hash collision for key with hash[0x%08x] "
1750 "in db[%s] - skipping\n",
1751 hash, ctdb_db->db_name));
1752 return;
1755 DEBUG(DEBUG_DEBUG, (__location__
1756 " remove_record_from_delete_queue: "
1757 "removing key with hash[0x%08x]\n",
1758 hash));
1760 talloc_free(kd);
1762 return;
1766 * Insert a record into the ctdb_db context's delete queue,
1767 * handling hash collisions.
1769 static int insert_record_into_delete_queue(struct ctdb_db_context *ctdb_db,
1770 const struct ctdb_ltdb_header *hdr,
1771 TDB_DATA key)
1773 struct delete_record_data *kd;
1774 uint32_t hash;
1775 int ret;
1777 hash = (uint32_t)ctdb_hash(&key);
1779 DEBUG(DEBUG_INFO, (__location__ " schedule for deletion: db[%s] "
1780 "db_id[0x%08x] "
1781 "key_hash[0x%08x] "
1782 "lmaster[%u] "
1783 "migrated_with_data[%s]\n",
1784 ctdb_db->db_name, ctdb_db->db_id,
1785 hash,
1786 ctdb_lmaster(ctdb_db->ctdb, &key),
1787 hdr->flags & CTDB_REC_FLAG_MIGRATED_WITH_DATA ? "yes" : "no"));
1789 kd = (struct delete_record_data *)trbt_lookup32(ctdb_db->delete_queue, hash);
1790 if (kd != NULL) {
1791 if ((kd->key.dsize != key.dsize) ||
1792 (memcmp(kd->key.dptr, key.dptr, key.dsize) != 0))
1794 DEBUG(DEBUG_INFO,
1795 (__location__ " schedule for deletion: "
1796 "hash collision for key hash [0x%08x]. "
1797 "Skipping the record.\n", hash));
1798 return 0;
1799 } else {
1800 DEBUG(DEBUG_DEBUG,
1801 (__location__ " schedule for deletion: "
1802 "updating entry for key with hash [0x%08x].\n",
1803 hash));
1807 ret = insert_delete_record_data_into_tree(ctdb_db->ctdb, ctdb_db,
1808 ctdb_db->delete_queue,
1809 hdr, key);
1810 if (ret != 0) {
1811 DEBUG(DEBUG_INFO,
1812 (__location__ " schedule for deletion: error "
1813 "inserting key with hash [0x%08x] into delete queue\n",
1814 hash));
1815 return -1;
1818 return 0;
1822 * Schedule a record for deletetion.
1823 * Called from the parent context.
1825 int32_t ctdb_control_schedule_for_deletion(struct ctdb_context *ctdb,
1826 TDB_DATA indata)
1828 struct ctdb_control_schedule_for_deletion *dd;
1829 struct ctdb_db_context *ctdb_db;
1830 int ret;
1831 TDB_DATA key;
1833 dd = (struct ctdb_control_schedule_for_deletion *)indata.dptr;
1835 ctdb_db = find_ctdb_db(ctdb, dd->db_id);
1836 if (ctdb_db == NULL) {
1837 DEBUG(DEBUG_ERR, (__location__ " Unknown db id 0x%08x\n",
1838 dd->db_id));
1839 return -1;
1842 key.dsize = dd->keylen;
1843 key.dptr = dd->key;
1845 ret = insert_record_into_delete_queue(ctdb_db, &dd->hdr, key);
1847 return ret;
1850 int32_t ctdb_local_schedule_for_deletion(struct ctdb_db_context *ctdb_db,
1851 const struct ctdb_ltdb_header *hdr,
1852 TDB_DATA key)
1854 int ret;
1855 struct ctdb_control_schedule_for_deletion *dd;
1856 TDB_DATA indata;
1857 int32_t status;
1859 if (ctdb_db->ctdb->ctdbd_pid == getpid()) {
1860 /* main daemon - directly queue */
1861 ret = insert_record_into_delete_queue(ctdb_db, hdr, key);
1863 return ret;
1866 /* if we dont have a connection to the daemon we can not send
1867 a control. For example sometimes from update_record control child
1868 process.
1870 if (!ctdb_db->ctdb->can_send_controls) {
1871 return -1;
1875 /* child process: send the main daemon a control */
1876 indata.dsize = offsetof(struct ctdb_control_schedule_for_deletion, key) + key.dsize;
1877 indata.dptr = talloc_zero_array(ctdb_db, uint8_t, indata.dsize);
1878 if (indata.dptr == NULL) {
1879 DEBUG(DEBUG_ERR, (__location__ " out of memory\n"));
1880 return -1;
1882 dd = (struct ctdb_control_schedule_for_deletion *)(void *)indata.dptr;
1883 dd->db_id = ctdb_db->db_id;
1884 dd->hdr = *hdr;
1885 dd->keylen = key.dsize;
1886 memcpy(dd->key, key.dptr, key.dsize);
1888 ret = ctdb_control(ctdb_db->ctdb,
1889 CTDB_CURRENT_NODE,
1890 ctdb_db->db_id,
1891 CTDB_CONTROL_SCHEDULE_FOR_DELETION,
1892 CTDB_CTRL_FLAG_NOREPLY, /* flags */
1893 indata,
1894 NULL, /* mem_ctx */
1895 NULL, /* outdata */
1896 &status,
1897 NULL, /* timeout : NULL == wait forever */
1898 NULL); /* error message */
1900 talloc_free(indata.dptr);
1902 if (ret != 0 || status != 0) {
1903 DEBUG(DEBUG_ERR, (__location__ " Error sending "
1904 "SCHEDULE_FOR_DELETION "
1905 "control.\n"));
1906 if (status != 0) {
1907 ret = -1;
1911 return ret;
1914 void ctdb_local_remove_from_delete_queue(struct ctdb_db_context *ctdb_db,
1915 const struct ctdb_ltdb_header *hdr,
1916 const TDB_DATA key)
1918 if (ctdb_db->ctdb->ctdbd_pid != getpid()) {
1920 * Only remove the record from the delete queue if called
1921 * in the main daemon.
1923 return;
1926 remove_record_from_delete_queue(ctdb_db, hdr, key);
1928 return;