4 Copyright (C) Ronnie Sahlberg 2009
5 Copyright (C) Michael Adam 2010-2013
6 Copyright (C) Stefan Metzmacher 2010-2011
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program; if not, see <http://www.gnu.org/licenses/>.
24 #include "system/network.h"
25 #include "system/filesys.h"
26 #include "system/dir.h"
27 #include "../include/ctdb_private.h"
29 #include "lib/util/dlinklist.h"
30 #include "../include/ctdb_private.h"
31 #include "../common/rb_tree.h"
33 #define TIMELIMIT() timeval_current_ofs(10, 0)
35 enum vacuum_child_status
{ VACUUM_RUNNING
, VACUUM_OK
, VACUUM_ERROR
, VACUUM_TIMEOUT
};
37 struct ctdb_vacuum_child_context
{
38 struct ctdb_vacuum_child_context
*next
, *prev
;
39 struct ctdb_vacuum_handle
*vacuum_handle
;
40 /* fd child writes status to */
43 enum vacuum_child_status status
;
44 struct timeval start_time
;
47 struct ctdb_vacuum_handle
{
48 struct ctdb_db_context
*ctdb_db
;
49 struct ctdb_vacuum_child_context
*child_ctx
;
50 uint32_t fast_path_count
;
54 /* a list of records to possibly delete */
56 uint32_t vacuum_limit
;
57 uint32_t repack_limit
;
58 struct ctdb_context
*ctdb
;
59 struct ctdb_db_context
*ctdb_db
;
60 struct tdb_context
*dest_db
;
61 trbt_tree_t
*delete_list
;
62 uint32_t delete_count
;
63 struct ctdb_marshall_buffer
**vacuum_fetch_list
;
70 uint32_t fast_added_to_vacuum_fetch_list
;
71 uint32_t fast_added_to_delete_list
;
72 uint32_t fast_deleted
;
73 uint32_t fast_skipped
;
76 uint32_t full_added_to_vacuum_fetch_list
;
77 uint32_t full_added_to_delete_list
;
78 uint32_t full_skipped
;
82 uint32_t delete_remote_error
;
83 uint32_t delete_local_error
;
84 uint32_t delete_deleted
;
85 uint32_t delete_skipped
;
88 /* this structure contains the information for one record to be deleted */
89 struct delete_record_data
{
90 struct ctdb_context
*ctdb
;
91 struct ctdb_db_context
*ctdb_db
;
92 struct ctdb_ltdb_header hdr
;
97 struct delete_records_list
{
98 struct ctdb_marshall_buffer
*records
;
99 struct vacuum_data
*vdata
;
103 * Store key and header in a tree, indexed by the key hash.
105 static int insert_delete_record_data_into_tree(struct ctdb_context
*ctdb
,
106 struct ctdb_db_context
*ctdb_db
,
108 const struct ctdb_ltdb_header
*hdr
,
111 struct delete_record_data
*dd
;
115 len
= offsetof(struct delete_record_data
, keydata
) + key
.dsize
;
117 dd
= (struct delete_record_data
*)talloc_size(tree
, len
);
119 DEBUG(DEBUG_ERR
,(__location__
" Out of memory\n"));
122 talloc_set_name_const(dd
, "struct delete_record_data");
125 dd
->ctdb_db
= ctdb_db
;
126 dd
->key
.dsize
= key
.dsize
;
127 dd
->key
.dptr
= dd
->keydata
;
128 memcpy(dd
->keydata
, key
.dptr
, key
.dsize
);
132 hash
= ctdb_hash(&key
);
134 trbt_insert32(tree
, hash
, dd
);
139 static int add_record_to_delete_list(struct vacuum_data
*vdata
, TDB_DATA key
,
140 struct ctdb_ltdb_header
*hdr
)
142 struct ctdb_context
*ctdb
= vdata
->ctdb
;
143 struct ctdb_db_context
*ctdb_db
= vdata
->ctdb_db
;
147 hash
= ctdb_hash(&key
);
149 if (trbt_lookup32(vdata
->delete_list
, hash
)) {
150 DEBUG(DEBUG_INFO
, (__location__
" Hash collission when vacuuming, skipping this record.\n"));
154 ret
= insert_delete_record_data_into_tree(ctdb
, ctdb_db
,
161 vdata
->delete_count
++;
167 * Add a record to the list of records to be sent
168 * to their lmaster with VACUUM_FETCH.
170 static int add_record_to_vacuum_fetch_list(struct vacuum_data
*vdata
,
173 struct ctdb_context
*ctdb
= vdata
->ctdb
;
174 struct ctdb_rec_data
*rec
;
177 struct ctdb_marshall_buffer
*vfl
;
179 lmaster
= ctdb_lmaster(ctdb
, &key
);
181 vfl
= vdata
->vacuum_fetch_list
[lmaster
];
183 rec
= ctdb_marshall_record(vfl
, ctdb
->pnn
, key
, NULL
, tdb_null
);
185 DEBUG(DEBUG_ERR
,(__location__
" Out of memory\n"));
186 vdata
->traverse_error
= true;
190 old_size
= talloc_get_size(vfl
);
191 vfl
= talloc_realloc_size(NULL
, vfl
, old_size
+ rec
->length
);
193 DEBUG(DEBUG_ERR
,(__location__
" Failed to expand\n"));
194 vdata
->traverse_error
= true;
197 vdata
->vacuum_fetch_list
[lmaster
] = vfl
;
200 memcpy(old_size
+(uint8_t *)vfl
, rec
, rec
->length
);
209 static void ctdb_vacuum_event(struct event_context
*ev
, struct timed_event
*te
,
210 struct timeval t
, void *private_data
);
214 * traverse function for gathering the records that can be deleted
216 static int vacuum_traverse(struct tdb_context
*tdb
, TDB_DATA key
, TDB_DATA data
, void *private)
218 struct vacuum_data
*vdata
= talloc_get_type(private, struct vacuum_data
);
219 struct ctdb_context
*ctdb
= vdata
->ctdb
;
221 struct ctdb_ltdb_header
*hdr
;
226 lmaster
= ctdb_lmaster(ctdb
, &key
);
227 if (lmaster
>= ctdb
->num_nodes
) {
229 DEBUG(DEBUG_CRIT
, (__location__
230 " lmaster[%u] >= ctdb->num_nodes[%u] for key"
233 (unsigned)ctdb
->num_nodes
,
234 (unsigned)ctdb_hash(&key
)));
238 if (data
.dsize
!= sizeof(struct ctdb_ltdb_header
)) {
239 /* it is not a deleted record */
240 vdata
->full_skipped
++;
244 hdr
= (struct ctdb_ltdb_header
*)data
.dptr
;
246 if (hdr
->dmaster
!= ctdb
->pnn
) {
247 vdata
->full_skipped
++;
251 if (lmaster
== ctdb
->pnn
) {
253 * We are both lmaster and dmaster, and the record is empty.
254 * So we should be able to delete it.
256 res
= add_record_to_delete_list(vdata
, key
, hdr
);
260 vdata
->full_added_to_delete_list
++;
264 * We are not lmaster.
265 * Add the record to the blob ready to send to the nodes.
267 res
= add_record_to_vacuum_fetch_list(vdata
, key
);
271 vdata
->full_added_to_vacuum_fetch_list
++;
279 * traverse the tree of records to delete and marshall them into
282 static int delete_marshall_traverse(void *param
, void *data
)
284 struct delete_record_data
*dd
= talloc_get_type(data
, struct delete_record_data
);
285 struct delete_records_list
*recs
= talloc_get_type(param
, struct delete_records_list
);
286 struct ctdb_rec_data
*rec
;
289 rec
= ctdb_marshall_record(dd
, recs
->records
->db_id
, dd
->key
, &dd
->hdr
, tdb_null
);
291 DEBUG(DEBUG_ERR
, (__location__
" failed to marshall record\n"));
295 old_size
= talloc_get_size(recs
->records
);
296 recs
->records
= talloc_realloc_size(NULL
, recs
->records
, old_size
+ rec
->length
);
297 if (recs
->records
== NULL
) {
298 DEBUG(DEBUG_ERR
,(__location__
" Failed to expand\n"));
301 recs
->records
->count
++;
302 memcpy(old_size
+(uint8_t *)(recs
->records
), rec
, rec
->length
);
307 * Variant of delete_marshall_traverse() that bumps the
308 * RSN of each traversed record in the database.
310 * This is needed to ensure that when rolling out our
311 * empty record copy before remote deletion, we as the
312 * record's dmaster keep a higher RSN than the non-dmaster
313 * nodes. This is needed to prevent old copies from
314 * resurrection in recoveries.
316 static int delete_marshall_traverse_first(void *param
, void *data
)
318 struct delete_record_data
*dd
= talloc_get_type(data
, struct delete_record_data
);
319 struct delete_records_list
*recs
= talloc_get_type(param
, struct delete_records_list
);
320 struct ctdb_db_context
*ctdb_db
= dd
->ctdb_db
;
321 struct ctdb_context
*ctdb
= ctdb_db
->ctdb
;
322 struct ctdb_ltdb_header
*header
;
323 TDB_DATA tdb_data
, ctdb_data
;
325 uint32_t hash
= ctdb_hash(&(dd
->key
));
328 res
= tdb_chainlock(ctdb_db
->ltdb
->tdb
, dd
->key
);
331 (__location__
" Error getting chainlock on record with "
332 "key hash [0x%08x] on database db[%s].\n",
333 hash
, ctdb_db
->db_name
));
334 recs
->vdata
->delete_skipped
++;
340 * Verify that the record is still empty, its RSN has not
341 * changed and that we are still its lmaster and dmaster.
344 tdb_data
= tdb_fetch(ctdb_db
->ltdb
->tdb
, dd
->key
);
345 if (tdb_data
.dsize
< sizeof(struct ctdb_ltdb_header
)) {
346 DEBUG(DEBUG_INFO
, (__location__
": record with hash [0x%08x] "
347 "on database db[%s] does not exist or is not"
348 " a ctdb-record. skipping.\n",
349 hash
, ctdb_db
->db_name
));
353 if (tdb_data
.dsize
> sizeof(struct ctdb_ltdb_header
)) {
354 DEBUG(DEBUG_INFO
, (__location__
": record with hash [0x%08x] "
355 "on database db[%s] has been recycled. "
357 hash
, ctdb_db
->db_name
));
361 header
= (struct ctdb_ltdb_header
*)tdb_data
.dptr
;
363 if (header
->flags
& CTDB_REC_RO_FLAGS
) {
364 DEBUG(DEBUG_INFO
, (__location__
": record with hash [0x%08x] "
365 "on database db[%s] has read-only flags. "
367 hash
, ctdb_db
->db_name
));
371 if (header
->dmaster
!= ctdb
->pnn
) {
372 DEBUG(DEBUG_INFO
, (__location__
": record with hash [0x%08x] "
373 "on database db[%s] has been migrated away. "
375 hash
, ctdb_db
->db_name
));
379 if (header
->rsn
!= dd
->hdr
.rsn
) {
380 DEBUG(DEBUG_INFO
, (__location__
": record with hash [0x%08x] "
381 "on database db[%s] seems to have been "
382 "migrated away and back again (with empty "
383 "data). skipping.\n",
384 hash
, ctdb_db
->db_name
));
388 lmaster
= ctdb_lmaster(ctdb_db
->ctdb
, &dd
->key
);
390 if (lmaster
!= ctdb
->pnn
) {
391 DEBUG(DEBUG_INFO
, (__location__
": not lmaster for record in "
392 "delete list (key hash [0x%08x], db[%s]). "
393 "Strange! skipping.\n",
394 hash
, ctdb_db
->db_name
));
399 * Increment the record's RSN to ensure the dmaster (i.e. the current
400 * node) has the highest RSN of the record in the cluster.
401 * This is to prevent old record copies from resurrecting in recoveries
402 * if something should fail during the deletion process.
403 * Note that ctdb_ltdb_store_server() increments the RSN if called
404 * on the record's dmaster.
407 ctdb_data
.dptr
= tdb_data
.dptr
+ sizeof(struct ctdb_ltdb_header
);
408 ctdb_data
.dsize
= tdb_data
.dsize
- sizeof(struct ctdb_ltdb_header
);
410 res
= ctdb_ltdb_store(ctdb_db
, dd
->key
, header
, ctdb_data
);
412 DEBUG(DEBUG_ERR
, (__location__
": Failed to store record with "
413 "key hash [0x%08x] on database db[%s].\n",
414 hash
, ctdb_db
->db_name
));
418 tdb_chainunlock(ctdb_db
->ltdb
->tdb
, dd
->key
);
423 tdb_chainunlock(ctdb_db
->ltdb
->tdb
, dd
->key
);
425 recs
->vdata
->delete_skipped
++;
430 if (tdb_data
.dptr
!= NULL
) {
438 return delete_marshall_traverse(param
, data
);
442 * traverse function for the traversal of the delete_queue,
443 * the fast-path vacuuming list.
445 * - If the record has been migrated off the node
446 * or has been revived (filled with data) on the node,
447 * then skip the record.
449 * - If the current node is the record's lmaster and it is
450 * a record that has never been migrated with data, then
451 * delete the record from the local tdb.
453 * - If the current node is the record's lmaster and it has
454 * been migrated with data, then schedule it for the normal
455 * vacuuming procedure (i.e. add it to the delete_list).
457 * - If the current node is NOT the record's lmaster then
458 * add it to the list of records that are to be sent to
459 * the lmaster with the VACUUM_FETCH message.
461 static int delete_queue_traverse(void *param
, void *data
)
463 struct delete_record_data
*dd
=
464 talloc_get_type(data
, struct delete_record_data
);
465 struct vacuum_data
*vdata
= talloc_get_type(param
, struct vacuum_data
);
466 struct ctdb_db_context
*ctdb_db
= dd
->ctdb_db
;
467 struct ctdb_context
*ctdb
= ctdb_db
->ctdb
; /* or dd->ctdb ??? */
469 struct ctdb_ltdb_header
*header
;
472 uint32_t hash
= ctdb_hash(&(dd
->key
));
476 res
= tdb_chainlock(ctdb_db
->ltdb
->tdb
, dd
->key
);
479 (__location__
" Error getting chainlock on record with "
480 "key hash [0x%08x] on database db[%s].\n",
481 hash
, ctdb_db
->db_name
));
486 tdb_data
= tdb_fetch(ctdb_db
->ltdb
->tdb
, dd
->key
);
487 if (tdb_data
.dsize
< sizeof(struct ctdb_ltdb_header
)) {
488 /* Does not exist or not a ctdb record. Skip. */
492 if (tdb_data
.dsize
> sizeof(struct ctdb_ltdb_header
)) {
493 /* The record has been recycled (filled with data). Skip. */
497 header
= (struct ctdb_ltdb_header
*)tdb_data
.dptr
;
499 if (header
->dmaster
!= ctdb
->pnn
) {
500 /* The record has been migrated off the node. Skip. */
504 if (header
->rsn
!= dd
->hdr
.rsn
) {
506 * The record has been migrated off the node and back again.
507 * But not requeued for deletion. Skip it.
513 * We are dmaster, and the record has no data, and it has
514 * not been migrated after it has been queued for deletion.
516 * At this stage, the record could still have been revived locally
517 * and last been written with empty data. This can only be
518 * fixed with the addition of an active or delete flag. (TODO)
521 lmaster
= ctdb_lmaster(ctdb_db
->ctdb
, &dd
->key
);
523 if (lmaster
!= ctdb
->pnn
) {
524 res
= add_record_to_vacuum_fetch_list(vdata
, dd
->key
);
528 (__location__
" Error adding record to list "
529 "of records to send to lmaster.\n"));
532 vdata
->fast_added_to_vacuum_fetch_list
++;
537 /* use header->flags or dd->hdr.flags ?? */
538 if (dd
->hdr
.flags
& CTDB_REC_FLAG_MIGRATED_WITH_DATA
) {
539 res
= add_record_to_delete_list(vdata
, dd
->key
, &dd
->hdr
);
543 (__location__
" Error adding record to list "
544 "of records for deletion on lmaster.\n"));
547 vdata
->fast_added_to_delete_list
++;
550 res
= tdb_delete(ctdb_db
->ltdb
->tdb
, dd
->key
);
554 (__location__
" Error deleting record with key "
555 "hash [0x%08x] from local data base db[%s].\n",
556 hash
, ctdb_db
->db_name
));
560 (__location__
" Deleted record with key hash "
561 "[0x%08x] from local data base db[%s].\n",
562 hash
, ctdb_db
->db_name
));
563 vdata
->fast_deleted
++;
570 vdata
->fast_skipped
++;
573 if (tdb_data
.dptr
!= NULL
) {
576 tdb_chainunlock(ctdb_db
->ltdb
->tdb
, dd
->key
);
582 * Delete the records that we are lmaster and dmaster for and
583 * that could be deleted on all other nodes via the TRY_DELETE_RECORDS
586 static int delete_record_traverse(void *param
, void *data
)
588 struct delete_record_data
*dd
=
589 talloc_get_type(data
, struct delete_record_data
);
590 struct vacuum_data
*vdata
= talloc_get_type(param
, struct vacuum_data
);
591 struct ctdb_db_context
*ctdb_db
= dd
->ctdb_db
;
592 struct ctdb_context
*ctdb
= ctdb_db
->ctdb
;
594 struct ctdb_ltdb_header
*header
;
597 bool deleted
= false;
598 uint32_t hash
= ctdb_hash(&(dd
->key
));
600 res
= tdb_chainlock(ctdb_db
->ltdb
->tdb
, dd
->key
);
603 (__location__
" Error getting chainlock on record with "
604 "key hash [0x%08x] on database db[%s].\n",
605 hash
, ctdb_db
->db_name
));
606 vdata
->delete_local_error
++;
611 * Verify that the record is still empty, its RSN has not
612 * changed and that we are still its lmaster and dmaster.
615 tdb_data
= tdb_fetch(ctdb_db
->ltdb
->tdb
, dd
->key
);
616 if (tdb_data
.dsize
< sizeof(struct ctdb_ltdb_header
)) {
617 DEBUG(DEBUG_INFO
, (__location__
": record with hash [0x%08x] "
618 "on database db[%s] does not exist or is not"
619 " a ctdb-record. skipping.\n",
620 hash
, ctdb_db
->db_name
));
621 vdata
->delete_skipped
++;
625 if (tdb_data
.dsize
> sizeof(struct ctdb_ltdb_header
)) {
626 DEBUG(DEBUG_INFO
, (__location__
": record with hash [0x%08x] "
627 "on database db[%s] has been recycled. "
629 hash
, ctdb_db
->db_name
));
630 vdata
->delete_skipped
++;
634 header
= (struct ctdb_ltdb_header
*)tdb_data
.dptr
;
636 if (header
->flags
& CTDB_REC_RO_FLAGS
) {
637 DEBUG(DEBUG_INFO
, (__location__
": record with hash [0x%08x] "
638 "on database db[%s] has read-only flags. "
640 hash
, ctdb_db
->db_name
));
641 vdata
->delete_skipped
++;
645 if (header
->dmaster
!= ctdb
->pnn
) {
646 DEBUG(DEBUG_INFO
, (__location__
": record with hash [0x%08x] "
647 "on database db[%s] has been migrated away. "
649 hash
, ctdb_db
->db_name
));
650 vdata
->delete_skipped
++;
654 if (header
->rsn
!= dd
->hdr
.rsn
+ 1) {
656 * The record has been migrated off the node and back again.
657 * But not requeued for deletion. Skip it.
658 * (Note that the first marshall traverse has bumped the RSN
661 DEBUG(DEBUG_INFO
, (__location__
": record with hash [0x%08x] "
662 "on database db[%s] seems to have been "
663 "migrated away and back again (with empty "
664 "data). skipping.\n",
665 hash
, ctdb_db
->db_name
));
666 vdata
->delete_skipped
++;
670 lmaster
= ctdb_lmaster(ctdb_db
->ctdb
, &dd
->key
);
672 if (lmaster
!= ctdb
->pnn
) {
673 DEBUG(DEBUG_INFO
, (__location__
": not lmaster for record in "
674 "delete list (key hash [0x%08x], db[%s]). "
675 "Strange! skipping.\n",
676 hash
, ctdb_db
->db_name
));
677 vdata
->delete_skipped
++;
681 res
= tdb_delete(ctdb_db
->ltdb
->tdb
, dd
->key
);
685 (__location__
" Error deleting record with key hash "
686 "[0x%08x] from local data base db[%s].\n",
687 hash
, ctdb_db
->db_name
));
688 vdata
->delete_local_error
++;
695 (__location__
" Deleted record with key hash [0x%08x] from "
696 "local data base db[%s].\n", hash
, ctdb_db
->db_name
));
699 if (tdb_data
.dptr
!= NULL
) {
703 tdb_chainunlock(ctdb_db
->ltdb
->tdb
, dd
->key
);
707 * successfully deleted the record locally.
708 * remove it from the list and update statistics.
711 vdata
->delete_deleted
++;
712 vdata
->delete_left
--;
719 * Fast vacuuming run:
720 * Traverse the delete_queue.
721 * This fills the same lists as the database traverse.
723 static void ctdb_vacuum_db_fast(struct ctdb_db_context
*ctdb_db
,
724 struct vacuum_data
*vdata
)
726 trbt_traversearray32(ctdb_db
->delete_queue
, 1, delete_queue_traverse
, vdata
);
728 if (vdata
->fast_total
> 0) {
731 " fast vacuuming delete_queue traverse statistics: "
740 (unsigned)vdata
->fast_total
,
741 (unsigned)vdata
->fast_deleted
,
742 (unsigned)vdata
->fast_skipped
,
743 (unsigned)vdata
->fast_error
,
744 (unsigned)vdata
->fast_added_to_delete_list
,
745 (unsigned)vdata
->fast_added_to_vacuum_fetch_list
));
753 * read-only traverse of the database, looking for records that
754 * might be able to be vacuumed.
756 * This is not done each time but only every tunable
757 * VacuumFastPathCount times.
759 static int ctdb_vacuum_db_full(struct ctdb_db_context
*ctdb_db
,
760 struct vacuum_data
*vdata
,
761 bool full_vacuum_run
)
765 if (!full_vacuum_run
) {
769 ret
= tdb_traverse_read(ctdb_db
->ltdb
->tdb
, vacuum_traverse
, vdata
);
770 if (ret
== -1 || vdata
->traverse_error
) {
771 DEBUG(DEBUG_ERR
, (__location__
" Traverse error in vacuuming "
772 "'%s'\n", ctdb_db
->db_name
));
776 if (vdata
->full_total
> 0) {
779 " full vacuuming db traverse statistics: "
787 (unsigned)vdata
->full_total
,
788 (unsigned)vdata
->full_skipped
,
789 (unsigned)vdata
->full_error
,
790 (unsigned)vdata
->full_added_to_delete_list
,
791 (unsigned)vdata
->full_added_to_vacuum_fetch_list
));
798 * Process the vacuum fetch lists:
799 * For records for which we are not the lmaster, tell the lmaster to
802 static int ctdb_process_vacuum_fetch_lists(struct ctdb_db_context
*ctdb_db
,
803 struct vacuum_data
*vdata
)
806 struct ctdb_context
*ctdb
= ctdb_db
->ctdb
;
808 for (i
= 0; i
< ctdb
->num_nodes
; i
++) {
810 struct ctdb_marshall_buffer
*vfl
= vdata
->vacuum_fetch_list
[i
];
812 if (ctdb
->nodes
[i
]->pnn
== ctdb
->pnn
) {
816 if (vfl
->count
== 0) {
820 DEBUG(DEBUG_INFO
, ("Found %u records for lmaster %u in '%s'\n",
821 vfl
->count
, ctdb
->nodes
[i
]->pnn
,
824 data
.dsize
= talloc_get_size(vfl
);
825 data
.dptr
= (void *)vfl
;
826 if (ctdb_client_send_message(ctdb
, ctdb
->nodes
[i
]->pnn
,
827 CTDB_SRVID_VACUUM_FETCH
,
830 DEBUG(DEBUG_ERR
, (__location__
" Failed to send vacuum "
831 "fetch message to %u\n",
832 ctdb
->nodes
[i
]->pnn
));
841 * Process the delete list:
843 * This is the last step of vacuuming that consistently deletes
844 * those records that have been migrated with data and can hence
845 * not be deleted when leaving a node.
847 * In this step, the lmaster does the final deletion of those empty
848 * records that it is also dmaster for. It has ususally received
849 * at least some of these records previously from the former dmasters
850 * with the vacuum fetch message.
852 * This last step is implemented as a 3-phase process to protect from
853 * races leading to data corruption:
855 * 1) Send the lmaster's copy to all other active nodes with the
856 * RECEIVE_RECORDS control: The remote nodes store the lmaster's copy.
857 * 2) Send the records that could successfully be stored remotely
858 * in step #1 to all active nodes with the TRY_DELETE_RECORDS
859 * control. The remote notes delete their local copy.
860 * 3) The lmaster locally deletes its copies of all records that
861 * could successfully be deleted remotely in step #2.
863 static int ctdb_process_delete_list(struct ctdb_db_context
*ctdb_db
,
864 struct vacuum_data
*vdata
)
867 struct ctdb_context
*ctdb
= ctdb_db
->ctdb
;
868 struct delete_records_list
*recs
;
870 struct ctdb_node_map
*nodemap
;
871 uint32_t *active_nodes
;
872 int num_active_nodes
;
875 if (vdata
->delete_count
== 0) {
879 tmp_ctx
= talloc_new(vdata
);
880 if (tmp_ctx
== NULL
) {
881 DEBUG(DEBUG_ERR
,(__location__
" Out of memory\n"));
885 vdata
->delete_left
= vdata
->delete_count
;
888 * get the list of currently active nodes
891 ret
= ctdb_ctrl_getnodemap(ctdb
, TIMELIMIT(),
896 DEBUG(DEBUG_ERR
,(__location__
" unable to get node map\n"));
901 active_nodes
= list_of_active_nodes(ctdb
, nodemap
,
902 nodemap
, /* talloc context */
903 false /* include self */);
905 num_active_nodes
= talloc_get_size(active_nodes
)/sizeof(*active_nodes
);
908 * Now delete the records all active nodes in a three-phase process:
909 * 1) send all active remote nodes the current empty copy with this
911 * 2) if all nodes could store the new copy,
912 * tell all the active remote nodes to delete all their copy
913 * 3) if all remote nodes deleted their record copy, delete it locally
918 * Send currently empty record copy to all active nodes for storing.
921 recs
= talloc_zero(tmp_ctx
, struct delete_records_list
);
923 DEBUG(DEBUG_ERR
,(__location__
" Out of memory\n"));
927 recs
->records
= (struct ctdb_marshall_buffer
*)
928 talloc_zero_size(recs
,
929 offsetof(struct ctdb_marshall_buffer
, data
));
930 if (recs
->records
== NULL
) {
931 DEBUG(DEBUG_ERR
,(__location__
" Out of memory\n"));
935 recs
->records
->db_id
= ctdb_db
->db_id
;
939 * traverse the tree of all records we want to delete and
940 * create a blob we can send to the other nodes.
942 * We call delete_marshall_traverse_first() to bump the
943 * records' RSNs in the database, to ensure we (as dmaster)
944 * keep the highest RSN of the records in the cluster.
946 trbt_traversearray32(vdata
->delete_list
, 1,
947 delete_marshall_traverse_first
, recs
);
949 indata
.dsize
= talloc_get_size(recs
->records
);
950 indata
.dptr
= (void *)recs
->records
;
952 for (i
= 0; i
< num_active_nodes
; i
++) {
953 struct ctdb_marshall_buffer
*records
;
954 struct ctdb_rec_data
*rec
;
958 ret
= ctdb_control(ctdb
, active_nodes
[i
], 0,
959 CTDB_CONTROL_RECEIVE_RECORDS
, 0,
960 indata
, recs
, &outdata
, &res
,
962 if (ret
!= 0 || res
!= 0) {
963 DEBUG(DEBUG_ERR
, ("Error storing record copies on "
964 "node %u: ret[%d] res[%d]\n",
965 active_nodes
[i
], ret
, res
));
971 * outdata contains the list of records coming back
972 * from the node: These are the records that the
973 * remote node could not store. We remove these from
974 * the list to process further.
976 records
= (struct ctdb_marshall_buffer
*)outdata
.dptr
;
977 rec
= (struct ctdb_rec_data
*)&records
->data
[0];
978 while (records
->count
-- > 1) {
979 TDB_DATA reckey
, recdata
;
980 struct ctdb_ltdb_header
*rechdr
;
981 struct delete_record_data
*dd
;
983 reckey
.dptr
= &rec
->data
[0];
984 reckey
.dsize
= rec
->keylen
;
985 recdata
.dptr
= &rec
->data
[reckey
.dsize
];
986 recdata
.dsize
= rec
->datalen
;
988 if (recdata
.dsize
< sizeof(struct ctdb_ltdb_header
)) {
989 DEBUG(DEBUG_CRIT
,(__location__
" bad ltdb record\n"));
993 rechdr
= (struct ctdb_ltdb_header
*)recdata
.dptr
;
994 recdata
.dptr
+= sizeof(*rechdr
);
995 recdata
.dsize
-= sizeof(*rechdr
);
997 dd
= (struct delete_record_data
*)trbt_lookup32(
1002 * The other node could not store the record
1003 * copy and it is the first node that failed.
1004 * So we should remove it from the tree and
1005 * update statistics.
1008 vdata
->delete_remote_error
++;
1009 vdata
->delete_left
--;
1012 rec
= (struct ctdb_rec_data
*)(rec
->length
+ (uint8_t *)rec
);
1016 if (vdata
->delete_left
== 0) {
1022 * Send the remaining records to all active nodes for deletion.
1024 * The lmaster's (i.e. our) copies of these records have been stored
1025 * successfully on the other nodes.
1029 * Create a marshall blob from the remaining list of records to delete.
1032 talloc_free(recs
->records
);
1034 recs
->records
= (struct ctdb_marshall_buffer
*)
1035 talloc_zero_size(recs
,
1036 offsetof(struct ctdb_marshall_buffer
, data
));
1037 if (recs
->records
== NULL
) {
1038 DEBUG(DEBUG_ERR
,(__location__
" Out of memory\n"));
1042 recs
->records
->db_id
= ctdb_db
->db_id
;
1044 trbt_traversearray32(vdata
->delete_list
, 1,
1045 delete_marshall_traverse
, recs
);
1047 indata
.dsize
= talloc_get_size(recs
->records
);
1048 indata
.dptr
= (void *)recs
->records
;
1050 for (i
= 0; i
< num_active_nodes
; i
++) {
1051 struct ctdb_marshall_buffer
*records
;
1052 struct ctdb_rec_data
*rec
;
1056 ret
= ctdb_control(ctdb
, active_nodes
[i
], 0,
1057 CTDB_CONTROL_TRY_DELETE_RECORDS
, 0,
1058 indata
, recs
, &outdata
, &res
,
1060 if (ret
!= 0 || res
!= 0) {
1061 DEBUG(DEBUG_ERR
, ("Failed to delete records on "
1062 "node %u: ret[%d] res[%d]\n",
1063 active_nodes
[i
], ret
, res
));
1069 * outdata contains the list of records coming back
1070 * from the node: These are the records that the
1071 * remote node could not delete. We remove these from
1072 * the list to delete locally.
1074 records
= (struct ctdb_marshall_buffer
*)outdata
.dptr
;
1075 rec
= (struct ctdb_rec_data
*)&records
->data
[0];
1076 while (records
->count
-- > 1) {
1077 TDB_DATA reckey
, recdata
;
1078 struct ctdb_ltdb_header
*rechdr
;
1079 struct delete_record_data
*dd
;
1081 reckey
.dptr
= &rec
->data
[0];
1082 reckey
.dsize
= rec
->keylen
;
1083 recdata
.dptr
= &rec
->data
[reckey
.dsize
];
1084 recdata
.dsize
= rec
->datalen
;
1086 if (recdata
.dsize
< sizeof(struct ctdb_ltdb_header
)) {
1087 DEBUG(DEBUG_CRIT
,(__location__
" bad ltdb record\n"));
1091 rechdr
= (struct ctdb_ltdb_header
*)recdata
.dptr
;
1092 recdata
.dptr
+= sizeof(*rechdr
);
1093 recdata
.dsize
-= sizeof(*rechdr
);
1095 dd
= (struct delete_record_data
*)trbt_lookup32(
1097 ctdb_hash(&reckey
));
1100 * The other node could not delete the
1101 * record and it is the first node that
1102 * failed. So we should remove it from
1103 * the tree and update statistics.
1106 vdata
->delete_remote_error
++;
1107 vdata
->delete_left
--;
1110 rec
= (struct ctdb_rec_data
*)(rec
->length
+ (uint8_t *)rec
);
1114 if (vdata
->delete_left
== 0) {
1120 * Delete the remaining records locally.
1122 * These records have successfully been deleted on all
1123 * active remote nodes.
1126 trbt_traversearray32(vdata
->delete_list
, 1,
1127 delete_record_traverse
, vdata
);
1131 if (vdata
->delete_count
> 0) {
1134 " vacuum delete list statistics: "
1143 (unsigned)vdata
->delete_count
,
1144 (unsigned)vdata
->delete_remote_error
,
1145 (unsigned)vdata
->delete_local_error
,
1146 (unsigned)vdata
->delete_skipped
,
1147 (unsigned)vdata
->delete_deleted
,
1148 (unsigned)vdata
->delete_left
));
1154 talloc_free(tmp_ctx
);
1160 * initialize the vacuum_data
1162 static int ctdb_vacuum_init_vacuum_data(struct ctdb_db_context
*ctdb_db
,
1163 struct vacuum_data
*vdata
)
1166 struct ctdb_context
*ctdb
= ctdb_db
->ctdb
;
1168 vdata
->fast_added_to_delete_list
= 0;
1169 vdata
->fast_added_to_vacuum_fetch_list
= 0;
1170 vdata
->fast_deleted
= 0;
1171 vdata
->fast_skipped
= 0;
1172 vdata
->fast_error
= 0;
1173 vdata
->fast_total
= 0;
1174 vdata
->full_added_to_delete_list
= 0;
1175 vdata
->full_added_to_vacuum_fetch_list
= 0;
1176 vdata
->full_skipped
= 0;
1177 vdata
->full_error
= 0;
1178 vdata
->full_total
= 0;
1179 vdata
->delete_count
= 0;
1180 vdata
->delete_left
= 0;
1181 vdata
->delete_remote_error
= 0;
1182 vdata
->delete_local_error
= 0;
1183 vdata
->delete_skipped
= 0;
1184 vdata
->delete_deleted
= 0;
1186 /* the list needs to be of length num_nodes */
1187 vdata
->vacuum_fetch_list
= talloc_zero_array(vdata
,
1188 struct ctdb_marshall_buffer
*,
1190 if (vdata
->vacuum_fetch_list
== NULL
) {
1191 DEBUG(DEBUG_ERR
,(__location__
" Out of memory\n"));
1194 for (i
= 0; i
< ctdb
->num_nodes
; i
++) {
1195 vdata
->vacuum_fetch_list
[i
] = (struct ctdb_marshall_buffer
*)
1196 talloc_zero_size(vdata
->vacuum_fetch_list
,
1197 offsetof(struct ctdb_marshall_buffer
, data
));
1198 if (vdata
->vacuum_fetch_list
[i
] == NULL
) {
1199 DEBUG(DEBUG_ERR
,(__location__
" Out of memory\n"));
1202 vdata
->vacuum_fetch_list
[i
]->db_id
= ctdb_db
->db_id
;
1210 * - Always do the fast vacuuming run, which traverses
1211 * the in-memory delete queue: these records have been
1212 * scheduled for deletion.
1213 * - Only if explicitly requested, the database is traversed
1214 * in order to use the traditional heuristics on empty records
1215 * to trigger deletion.
1216 * This is done only every VacuumFastPathCount'th vacuuming run.
1218 * The traverse runs fill two lists:
1220 * - The delete_list:
1221 * This is the list of empty records the current
1222 * node is lmaster and dmaster for. These records are later
1223 * deleted first on other nodes and then locally.
1225 * The fast vacuuming run has a short cut for those records
1226 * that have never been migrated with data: these records
1227 * are immediately deleted locally, since they have left
1228 * no trace on other nodes.
1230 * - The vacuum_fetch lists
1231 * (one for each other lmaster node):
1232 * The records in this list are sent for deletion to
1233 * their lmaster in a bulk VACUUM_FETCH message.
1235 * The lmaster then migrates all these records to itelf
1236 * so that they can be vacuumed there.
1238 * This executes in the child context.
1240 static int ctdb_vacuum_db(struct ctdb_db_context
*ctdb_db
,
1241 struct vacuum_data
*vdata
,
1242 bool full_vacuum_run
)
1244 struct ctdb_context
*ctdb
= ctdb_db
->ctdb
;
1247 DEBUG(DEBUG_INFO
, (__location__
" Entering %s vacuum run for db "
1248 "%s db_id[0x%08x]\n",
1249 full_vacuum_run
? "full" : "fast",
1250 ctdb_db
->db_name
, ctdb_db
->db_id
));
1252 ret
= ctdb_ctrl_getvnnmap(ctdb
, TIMELIMIT(), CTDB_CURRENT_NODE
, ctdb
, &ctdb
->vnn_map
);
1254 DEBUG(DEBUG_ERR
, ("Unable to get vnnmap from local node\n"));
1258 pnn
= ctdb_ctrl_getpnn(ctdb
, TIMELIMIT(), CTDB_CURRENT_NODE
);
1260 DEBUG(DEBUG_ERR
, ("Unable to get pnn from local node\n"));
1266 ret
= ctdb_vacuum_init_vacuum_data(ctdb_db
, vdata
);
1271 ctdb_vacuum_db_fast(ctdb_db
, vdata
);
1273 ret
= ctdb_vacuum_db_full(ctdb_db
, vdata
, full_vacuum_run
);
1278 ret
= ctdb_process_vacuum_fetch_lists(ctdb_db
, vdata
);
1283 ret
= ctdb_process_delete_list(ctdb_db
, vdata
);
1288 /* this ensures we run our event queue */
1289 ctdb_ctrl_getpnn(ctdb
, TIMELIMIT(), CTDB_CURRENT_NODE
);
1296 * traverse function for repacking
1298 static int repack_traverse(struct tdb_context
*tdb
, TDB_DATA key
, TDB_DATA data
, void *private)
1300 struct vacuum_data
*vdata
= (struct vacuum_data
*)private;
1302 if (vdata
->vacuum
) {
1303 uint32_t hash
= ctdb_hash(&key
);
1304 struct delete_record_data
*kd
;
1306 * check if we can ignore this record because it's in the delete_list
1308 kd
= (struct delete_record_data
*)trbt_lookup32(vdata
->delete_list
, hash
);
1310 * there might be hash collisions so we have to compare the keys here to be sure
1312 if (kd
&& kd
->key
.dsize
== key
.dsize
&& memcmp(kd
->key
.dptr
, key
.dptr
, key
.dsize
) == 0) {
1313 struct ctdb_ltdb_header
*hdr
= (struct ctdb_ltdb_header
*)data
.dptr
;
1315 * we have to check if the record hasn't changed in the meantime in order to
1316 * savely remove it from the database
1318 if (data
.dsize
== sizeof(struct ctdb_ltdb_header
) &&
1319 hdr
->dmaster
== kd
->ctdb
->pnn
&&
1320 ctdb_lmaster(kd
->ctdb
, &(kd
->key
)) == kd
->ctdb
->pnn
&&
1321 kd
->hdr
.rsn
== hdr
->rsn
) {
1327 if (tdb_store(vdata
->dest_db
, key
, data
, TDB_INSERT
) != 0) {
1328 vdata
->traverse_error
= true;
1338 static int ctdb_repack_tdb(struct tdb_context
*tdb
, TALLOC_CTX
*mem_ctx
, struct vacuum_data
*vdata
)
1340 struct tdb_context
*tmp_db
;
1342 if (tdb_transaction_start(tdb
) != 0) {
1343 DEBUG(DEBUG_ERR
,(__location__
" Failed to start transaction\n"));
1347 tmp_db
= tdb_open("tmpdb", tdb_hash_size(tdb
),
1348 TDB_INTERNAL
|TDB_DISALLOW_NESTING
,
1350 if (tmp_db
== NULL
) {
1351 DEBUG(DEBUG_ERR
,(__location__
" Failed to create tmp_db\n"));
1352 tdb_transaction_cancel(tdb
);
1356 vdata
->traverse_error
= false;
1357 vdata
->dest_db
= tmp_db
;
1358 vdata
->vacuum
= true;
1359 vdata
->vacuumed
= 0;
1363 * repack and vacuum on-the-fly by not writing the records that are
1366 if (tdb_traverse_read(tdb
, repack_traverse
, vdata
) == -1) {
1367 DEBUG(DEBUG_ERR
,(__location__
" Failed to traverse copying out\n"));
1368 tdb_transaction_cancel(tdb
);
1373 DEBUG(DEBUG_INFO
,(__location__
" %u records vacuumed\n", vdata
->vacuumed
));
1375 if (vdata
->traverse_error
) {
1376 DEBUG(DEBUG_ERR
,(__location__
" Error during traversal\n"));
1377 tdb_transaction_cancel(tdb
);
1382 if (tdb_wipe_all(tdb
) != 0) {
1383 DEBUG(DEBUG_ERR
,(__location__
" Failed to wipe database\n"));
1384 tdb_transaction_cancel(tdb
);
1389 vdata
->traverse_error
= false;
1390 vdata
->dest_db
= tdb
;
1391 vdata
->vacuum
= false;
1394 if (tdb_traverse_read(tmp_db
, repack_traverse
, vdata
) == -1) {
1395 DEBUG(DEBUG_ERR
,(__location__
" Failed to traverse copying back\n"));
1396 tdb_transaction_cancel(tdb
);
1401 if (vdata
->traverse_error
) {
1402 DEBUG(DEBUG_ERR
,(__location__
" Error during second traversal\n"));
1403 tdb_transaction_cancel(tdb
);
1411 if (tdb_transaction_commit(tdb
) != 0) {
1412 DEBUG(DEBUG_ERR
,(__location__
" Failed to commit\n"));
1415 DEBUG(DEBUG_INFO
,(__location__
" %u records copied\n", vdata
->copied
));
1421 * repack and vaccum a db
1422 * called from the child context
1424 static int ctdb_vacuum_and_repack_db(struct ctdb_db_context
*ctdb_db
,
1425 TALLOC_CTX
*mem_ctx
,
1426 bool full_vacuum_run
)
1428 uint32_t repack_limit
= ctdb_db
->ctdb
->tunable
.repack_limit
;
1429 uint32_t vacuum_limit
= ctdb_db
->ctdb
->tunable
.vacuum_limit
;
1430 const char *name
= ctdb_db
->db_name
;
1432 struct vacuum_data
*vdata
;
1434 freelist_size
= tdb_freelist_size(ctdb_db
->ltdb
->tdb
);
1435 if (freelist_size
== -1) {
1436 DEBUG(DEBUG_ERR
,(__location__
" Failed to get freelist size for '%s'\n", name
));
1440 vdata
= talloc_zero(mem_ctx
, struct vacuum_data
);
1441 if (vdata
== NULL
) {
1442 DEBUG(DEBUG_ERR
,(__location__
" Out of memory\n"));
1446 vdata
->ctdb
= ctdb_db
->ctdb
;
1447 vdata
->vacuum_limit
= vacuum_limit
;
1448 vdata
->repack_limit
= repack_limit
;
1449 vdata
->delete_list
= trbt_create(vdata
, 0);
1450 vdata
->ctdb_db
= ctdb_db
;
1451 if (vdata
->delete_list
== NULL
) {
1452 DEBUG(DEBUG_ERR
,(__location__
" Out of memory\n"));
1457 vdata
->start
= timeval_current();
1460 * gather all records that can be deleted in vdata
1462 if (ctdb_vacuum_db(ctdb_db
, vdata
, full_vacuum_run
) != 0) {
1463 DEBUG(DEBUG_ERR
,(__location__
" Failed to vacuum '%s'\n", name
));
1467 * decide if a repack is necessary
1469 if (freelist_size
< repack_limit
&& vdata
->delete_left
< vacuum_limit
)
1475 DEBUG(DEBUG_INFO
,("Repacking %s with %u freelist entries and %u records to delete\n",
1476 name
, freelist_size
, vdata
->delete_left
));
1479 * repack and implicitely get rid of the records we can delete
1481 if (ctdb_repack_tdb(ctdb_db
->ltdb
->tdb
, mem_ctx
, vdata
) != 0) {
1482 DEBUG(DEBUG_ERR
,(__location__
" Failed to repack '%s'\n", name
));
1491 static uint32_t get_vacuum_interval(struct ctdb_db_context
*ctdb_db
)
1493 uint32_t interval
= ctdb_db
->ctdb
->tunable
.vacuum_interval
;
1498 static int vacuum_child_destructor(struct ctdb_vacuum_child_context
*child_ctx
)
1500 double l
= timeval_elapsed(&child_ctx
->start_time
);
1501 struct ctdb_db_context
*ctdb_db
= child_ctx
->vacuum_handle
->ctdb_db
;
1502 struct ctdb_context
*ctdb
= ctdb_db
->ctdb
;
1504 DEBUG(DEBUG_INFO
,("Vacuuming took %.3f seconds for database %s\n", l
, ctdb_db
->db_name
));
1506 if (child_ctx
->child_pid
!= -1) {
1507 ctdb_kill(ctdb
, child_ctx
->child_pid
, SIGKILL
);
1509 /* Bump the number of successful fast-path runs. */
1510 child_ctx
->vacuum_handle
->fast_path_count
++;
1513 DLIST_REMOVE(ctdb
->vacuumers
, child_ctx
);
1515 event_add_timed(ctdb
->ev
, child_ctx
->vacuum_handle
,
1516 timeval_current_ofs(get_vacuum_interval(ctdb_db
), 0),
1517 ctdb_vacuum_event
, child_ctx
->vacuum_handle
);
1523 * this event is generated when a vacuum child process times out
1525 static void vacuum_child_timeout(struct event_context
*ev
, struct timed_event
*te
,
1526 struct timeval t
, void *private_data
)
1528 struct ctdb_vacuum_child_context
*child_ctx
= talloc_get_type(private_data
, struct ctdb_vacuum_child_context
);
1530 DEBUG(DEBUG_ERR
,("Vacuuming child process timed out for db %s\n", child_ctx
->vacuum_handle
->ctdb_db
->db_name
));
1532 child_ctx
->status
= VACUUM_TIMEOUT
;
1534 talloc_free(child_ctx
);
1539 * this event is generated when a vacuum child process has completed
1541 static void vacuum_child_handler(struct event_context
*ev
, struct fd_event
*fde
,
1542 uint16_t flags
, void *private_data
)
1544 struct ctdb_vacuum_child_context
*child_ctx
= talloc_get_type(private_data
, struct ctdb_vacuum_child_context
);
1548 DEBUG(DEBUG_INFO
,("Vacuuming child process %d finished for db %s\n", child_ctx
->child_pid
, child_ctx
->vacuum_handle
->ctdb_db
->db_name
));
1549 child_ctx
->child_pid
= -1;
1551 ret
= read(child_ctx
->fd
[0], &c
, 1);
1552 if (ret
!= 1 || c
!= 0) {
1553 child_ctx
->status
= VACUUM_ERROR
;
1554 DEBUG(DEBUG_ERR
, ("A vacuum child process failed with an error for database %s. ret=%d c=%d\n", child_ctx
->vacuum_handle
->ctdb_db
->db_name
, ret
, c
));
1556 child_ctx
->status
= VACUUM_OK
;
1559 talloc_free(child_ctx
);
1563 * this event is called every time we need to start a new vacuum process
1566 ctdb_vacuum_event(struct event_context
*ev
, struct timed_event
*te
,
1567 struct timeval t
, void *private_data
)
1569 struct ctdb_vacuum_handle
*vacuum_handle
= talloc_get_type(private_data
, struct ctdb_vacuum_handle
);
1570 struct ctdb_db_context
*ctdb_db
= vacuum_handle
->ctdb_db
;
1571 struct ctdb_context
*ctdb
= ctdb_db
->ctdb
;
1572 struct ctdb_vacuum_child_context
*child_ctx
;
1573 struct tevent_fd
*fde
;
1576 /* we dont vacuum if we are in recovery mode, or db frozen */
1577 if (ctdb
->recovery_mode
== CTDB_RECOVERY_ACTIVE
||
1578 ctdb
->freeze_mode
[ctdb_db
->priority
] != CTDB_FREEZE_NONE
) {
1579 DEBUG(DEBUG_INFO
, ("Not vacuuming %s (%s)\n", ctdb_db
->db_name
,
1580 ctdb
->recovery_mode
== CTDB_RECOVERY_ACTIVE
? "in recovery"
1581 : ctdb
->freeze_mode
[ctdb_db
->priority
] == CTDB_FREEZE_PENDING
1584 event_add_timed(ctdb
->ev
, vacuum_handle
,
1585 timeval_current_ofs(get_vacuum_interval(ctdb_db
), 0),
1586 ctdb_vacuum_event
, vacuum_handle
);
1590 child_ctx
= talloc(vacuum_handle
, struct ctdb_vacuum_child_context
);
1591 if (child_ctx
== NULL
) {
1592 DEBUG(DEBUG_CRIT
, (__location__
" Failed to allocate child context for vacuuming of %s\n", ctdb_db
->db_name
));
1593 ctdb_fatal(ctdb
, "Out of memory when crating vacuum child context. Shutting down\n");
1597 ret
= pipe(child_ctx
->fd
);
1599 talloc_free(child_ctx
);
1600 DEBUG(DEBUG_ERR
, ("Failed to create pipe for vacuum child process.\n"));
1601 event_add_timed(ctdb
->ev
, vacuum_handle
,
1602 timeval_current_ofs(get_vacuum_interval(ctdb_db
), 0),
1603 ctdb_vacuum_event
, vacuum_handle
);
1607 if (vacuum_handle
->fast_path_count
> ctdb
->tunable
.vacuum_fast_path_count
) {
1608 vacuum_handle
->fast_path_count
= 0;
1611 child_ctx
->child_pid
= ctdb_fork(ctdb
);
1612 if (child_ctx
->child_pid
== (pid_t
)-1) {
1613 close(child_ctx
->fd
[0]);
1614 close(child_ctx
->fd
[1]);
1615 talloc_free(child_ctx
);
1616 DEBUG(DEBUG_ERR
, ("Failed to fork vacuum child process.\n"));
1617 event_add_timed(ctdb
->ev
, vacuum_handle
,
1618 timeval_current_ofs(get_vacuum_interval(ctdb_db
), 0),
1619 ctdb_vacuum_event
, vacuum_handle
);
1624 if (child_ctx
->child_pid
== 0) {
1626 bool full_vacuum_run
= false;
1627 close(child_ctx
->fd
[0]);
1629 DEBUG(DEBUG_INFO
,("Vacuuming child process %d for db %s started\n", getpid(), ctdb_db
->db_name
));
1630 ctdb_set_process_name("ctdb_vacuum");
1631 if (switch_from_server_to_client(ctdb
, "vacuum-%s", ctdb_db
->db_name
) != 0) {
1632 DEBUG(DEBUG_CRIT
, (__location__
"ERROR: failed to switch vacuum daemon into client mode. Shutting down.\n"));
1639 if ((ctdb
->tunable
.vacuum_fast_path_count
> 0) &&
1640 (vacuum_handle
->fast_path_count
== 0))
1642 full_vacuum_run
= true;
1644 cc
= ctdb_vacuum_and_repack_db(ctdb_db
, child_ctx
,
1647 write(child_ctx
->fd
[1], &cc
, 1);
1651 set_close_on_exec(child_ctx
->fd
[0]);
1652 close(child_ctx
->fd
[1]);
1654 child_ctx
->status
= VACUUM_RUNNING
;
1655 child_ctx
->start_time
= timeval_current();
1657 DLIST_ADD(ctdb
->vacuumers
, child_ctx
);
1658 talloc_set_destructor(child_ctx
, vacuum_child_destructor
);
1661 * Clear the fastpath vacuuming list in the parent.
1663 talloc_free(ctdb_db
->delete_queue
);
1664 ctdb_db
->delete_queue
= trbt_create(ctdb_db
, 0);
1665 if (ctdb_db
->delete_queue
== NULL
) {
1666 /* fatal here? ... */
1667 ctdb_fatal(ctdb
, "Out of memory when re-creating vacuum tree "
1668 "in parent context. Shutting down\n");
1671 event_add_timed(ctdb
->ev
, child_ctx
,
1672 timeval_current_ofs(ctdb
->tunable
.vacuum_max_run_time
, 0),
1673 vacuum_child_timeout
, child_ctx
);
1675 DEBUG(DEBUG_DEBUG
, (__location__
" Created PIPE FD:%d to child vacuum process\n", child_ctx
->fd
[0]));
1677 fde
= event_add_fd(ctdb
->ev
, child_ctx
, child_ctx
->fd
[0],
1678 EVENT_FD_READ
, vacuum_child_handler
, child_ctx
);
1679 tevent_fd_set_auto_close(fde
);
1681 vacuum_handle
->child_ctx
= child_ctx
;
1682 child_ctx
->vacuum_handle
= vacuum_handle
;
1685 void ctdb_stop_vacuuming(struct ctdb_context
*ctdb
)
1687 /* Simply free them all. */
1688 while (ctdb
->vacuumers
) {
1689 DEBUG(DEBUG_INFO
, ("Aborting vacuuming for %s (%i)\n",
1690 ctdb
->vacuumers
->vacuum_handle
->ctdb_db
->db_name
,
1691 (int)ctdb
->vacuumers
->child_pid
));
1692 /* vacuum_child_destructor kills it, removes from list */
1693 talloc_free(ctdb
->vacuumers
);
1697 /* this function initializes the vacuuming context for a database
1698 * starts the vacuuming events
1700 int ctdb_vacuum_init(struct ctdb_db_context
*ctdb_db
)
1702 if (ctdb_db
->persistent
!= 0) {
1703 DEBUG(DEBUG_ERR
,("Vacuuming is disabled for persistent database %s\n", ctdb_db
->db_name
));
1707 ctdb_db
->vacuum_handle
= talloc(ctdb_db
, struct ctdb_vacuum_handle
);
1708 CTDB_NO_MEMORY(ctdb_db
->ctdb
, ctdb_db
->vacuum_handle
);
1710 ctdb_db
->vacuum_handle
->ctdb_db
= ctdb_db
;
1711 ctdb_db
->vacuum_handle
->fast_path_count
= 0;
1713 event_add_timed(ctdb_db
->ctdb
->ev
, ctdb_db
->vacuum_handle
,
1714 timeval_current_ofs(get_vacuum_interval(ctdb_db
), 0),
1715 ctdb_vacuum_event
, ctdb_db
->vacuum_handle
);
1720 static void remove_record_from_delete_queue(struct ctdb_db_context
*ctdb_db
,
1721 const struct ctdb_ltdb_header
*hdr
,
1724 struct delete_record_data
*kd
;
1727 hash
= (uint32_t)ctdb_hash(&key
);
1729 DEBUG(DEBUG_DEBUG
, (__location__
1730 " remove_record_from_delete_queue: "
1735 "migrated_with_data[%s]\n",
1736 ctdb_db
->db_name
, ctdb_db
->db_id
,
1738 ctdb_lmaster(ctdb_db
->ctdb
, &key
),
1739 hdr
->flags
& CTDB_REC_FLAG_MIGRATED_WITH_DATA
? "yes" : "no"));
1741 kd
= (struct delete_record_data
*)trbt_lookup32(ctdb_db
->delete_queue
, hash
);
1743 DEBUG(DEBUG_DEBUG
, (__location__
1744 " remove_record_from_delete_queue: "
1745 "record not in queue (hash[0x%08x])\n.",
1750 if ((kd
->key
.dsize
!= key
.dsize
) ||
1751 (memcmp(kd
->key
.dptr
, key
.dptr
, key
.dsize
) != 0))
1753 DEBUG(DEBUG_DEBUG
, (__location__
1754 " remove_record_from_delete_queue: "
1755 "hash collision for key with hash[0x%08x] "
1756 "in db[%s] - skipping\n",
1757 hash
, ctdb_db
->db_name
));
1761 DEBUG(DEBUG_DEBUG
, (__location__
1762 " remove_record_from_delete_queue: "
1763 "removing key with hash[0x%08x]\n",
1772 * Insert a record into the ctdb_db context's delete queue,
1773 * handling hash collisions.
1775 static int insert_record_into_delete_queue(struct ctdb_db_context
*ctdb_db
,
1776 const struct ctdb_ltdb_header
*hdr
,
1779 struct delete_record_data
*kd
;
1783 hash
= (uint32_t)ctdb_hash(&key
);
1785 DEBUG(DEBUG_INFO
, (__location__
" schedule for deletion: db[%s] "
1789 "migrated_with_data[%s]\n",
1790 ctdb_db
->db_name
, ctdb_db
->db_id
,
1792 ctdb_lmaster(ctdb_db
->ctdb
, &key
),
1793 hdr
->flags
& CTDB_REC_FLAG_MIGRATED_WITH_DATA
? "yes" : "no"));
1795 kd
= (struct delete_record_data
*)trbt_lookup32(ctdb_db
->delete_queue
, hash
);
1797 if ((kd
->key
.dsize
!= key
.dsize
) ||
1798 (memcmp(kd
->key
.dptr
, key
.dptr
, key
.dsize
) != 0))
1801 (__location__
" schedule for deletion: "
1802 "hash collision for key hash [0x%08x]. "
1803 "Skipping the record.\n", hash
));
1807 (__location__
" schedule for deletion: "
1808 "updating entry for key with hash [0x%08x].\n",
1813 ret
= insert_delete_record_data_into_tree(ctdb_db
->ctdb
, ctdb_db
,
1814 ctdb_db
->delete_queue
,
1818 (__location__
" schedule for deletion: error "
1819 "inserting key with hash [0x%08x] into delete queue\n",
1828 * Schedule a record for deletetion.
1829 * Called from the parent context.
1831 int32_t ctdb_control_schedule_for_deletion(struct ctdb_context
*ctdb
,
1834 struct ctdb_control_schedule_for_deletion
*dd
;
1835 struct ctdb_db_context
*ctdb_db
;
1839 dd
= (struct ctdb_control_schedule_for_deletion
*)indata
.dptr
;
1841 ctdb_db
= find_ctdb_db(ctdb
, dd
->db_id
);
1842 if (ctdb_db
== NULL
) {
1843 DEBUG(DEBUG_ERR
, (__location__
" Unknown db id 0x%08x\n",
1848 key
.dsize
= dd
->keylen
;
1851 ret
= insert_record_into_delete_queue(ctdb_db
, &dd
->hdr
, key
);
1856 int32_t ctdb_local_schedule_for_deletion(struct ctdb_db_context
*ctdb_db
,
1857 const struct ctdb_ltdb_header
*hdr
,
1861 struct ctdb_control_schedule_for_deletion
*dd
;
1865 if (ctdb_db
->ctdb
->ctdbd_pid
== getpid()) {
1866 /* main daemon - directly queue */
1867 ret
= insert_record_into_delete_queue(ctdb_db
, hdr
, key
);
1872 /* if we dont have a connection to the daemon we can not send
1873 a control. For example sometimes from update_record control child
1876 if (!ctdb_db
->ctdb
->can_send_controls
) {
1881 /* child process: send the main daemon a control */
1882 indata
.dsize
= offsetof(struct ctdb_control_schedule_for_deletion
, key
) + key
.dsize
;
1883 indata
.dptr
= talloc_zero_array(ctdb_db
, uint8_t, indata
.dsize
);
1884 if (indata
.dptr
== NULL
) {
1885 DEBUG(DEBUG_ERR
, (__location__
" out of memory\n"));
1888 dd
= (struct ctdb_control_schedule_for_deletion
*)(void *)indata
.dptr
;
1889 dd
->db_id
= ctdb_db
->db_id
;
1891 dd
->keylen
= key
.dsize
;
1892 memcpy(dd
->key
, key
.dptr
, key
.dsize
);
1894 ret
= ctdb_control(ctdb_db
->ctdb
,
1897 CTDB_CONTROL_SCHEDULE_FOR_DELETION
,
1898 CTDB_CTRL_FLAG_NOREPLY
, /* flags */
1903 NULL
, /* timeout : NULL == wait forever */
1904 NULL
); /* error message */
1906 talloc_free(indata
.dptr
);
1908 if (ret
!= 0 || status
!= 0) {
1909 DEBUG(DEBUG_ERR
, (__location__
" Error sending "
1910 "SCHEDULE_FOR_DELETION "
1920 void ctdb_local_remove_from_delete_queue(struct ctdb_db_context
*ctdb_db
,
1921 const struct ctdb_ltdb_header
*hdr
,
1924 if (ctdb_db
->ctdb
->ctdbd_pid
!= getpid()) {
1926 * Only remove the record from the delete queue if called
1927 * in the main daemon.
1932 remove_record_from_delete_queue(ctdb_db
, hdr
, key
);