4 Copyright (C) Ronnie Sahlberg 2009
5 Copyright (C) Michael Adam 2010-2013
6 Copyright (C) Stefan Metzmacher 2010-2011
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program; if not, see <http://www.gnu.org/licenses/>.
24 #include "system/network.h"
25 #include "system/filesys.h"
26 #include "system/dir.h"
27 #include "../include/ctdb_private.h"
29 #include "lib/util/dlinklist.h"
30 #include "../include/ctdb_private.h"
31 #include "../common/rb_tree.h"
33 #define TIMELIMIT() timeval_current_ofs(10, 0)
35 enum vacuum_child_status
{ VACUUM_RUNNING
, VACUUM_OK
, VACUUM_ERROR
, VACUUM_TIMEOUT
};
37 struct ctdb_vacuum_child_context
{
38 struct ctdb_vacuum_child_context
*next
, *prev
;
39 struct ctdb_vacuum_handle
*vacuum_handle
;
40 /* fd child writes status to */
43 enum vacuum_child_status status
;
44 struct timeval start_time
;
47 struct ctdb_vacuum_handle
{
48 struct ctdb_db_context
*ctdb_db
;
49 struct ctdb_vacuum_child_context
*child_ctx
;
50 uint32_t fast_path_count
;
54 /* a list of records to possibly delete */
56 uint32_t repack_limit
;
57 struct ctdb_context
*ctdb
;
58 struct ctdb_db_context
*ctdb_db
;
59 struct tdb_context
*dest_db
;
60 trbt_tree_t
*delete_list
;
61 struct ctdb_marshall_buffer
**vacuum_fetch_list
;
67 uint32_t added_to_vacuum_fetch_list
;
68 uint32_t added_to_delete_list
;
82 uint32_t remote_error
;
95 /* this structure contains the information for one record to be deleted */
96 struct delete_record_data
{
97 struct ctdb_context
*ctdb
;
98 struct ctdb_db_context
*ctdb_db
;
99 struct ctdb_ltdb_header hdr
;
104 struct delete_records_list
{
105 struct ctdb_marshall_buffer
*records
;
106 struct vacuum_data
*vdata
;
109 static int insert_record_into_delete_queue(struct ctdb_db_context
*ctdb_db
,
110 const struct ctdb_ltdb_header
*hdr
,
114 * Store key and header in a tree, indexed by the key hash.
116 static int insert_delete_record_data_into_tree(struct ctdb_context
*ctdb
,
117 struct ctdb_db_context
*ctdb_db
,
119 const struct ctdb_ltdb_header
*hdr
,
122 struct delete_record_data
*dd
;
126 len
= offsetof(struct delete_record_data
, keydata
) + key
.dsize
;
128 dd
= (struct delete_record_data
*)talloc_size(tree
, len
);
130 DEBUG(DEBUG_ERR
,(__location__
" Out of memory\n"));
133 talloc_set_name_const(dd
, "struct delete_record_data");
136 dd
->ctdb_db
= ctdb_db
;
137 dd
->key
.dsize
= key
.dsize
;
138 dd
->key
.dptr
= dd
->keydata
;
139 memcpy(dd
->keydata
, key
.dptr
, key
.dsize
);
143 hash
= ctdb_hash(&key
);
145 trbt_insert32(tree
, hash
, dd
);
150 static int add_record_to_delete_list(struct vacuum_data
*vdata
, TDB_DATA key
,
151 struct ctdb_ltdb_header
*hdr
)
153 struct ctdb_context
*ctdb
= vdata
->ctdb
;
154 struct ctdb_db_context
*ctdb_db
= vdata
->ctdb_db
;
158 hash
= ctdb_hash(&key
);
160 if (trbt_lookup32(vdata
->delete_list
, hash
)) {
161 DEBUG(DEBUG_INFO
, (__location__
" Hash collision when vacuuming, skipping this record.\n"));
165 ret
= insert_delete_record_data_into_tree(ctdb
, ctdb_db
,
172 vdata
->count
.delete_list
.total
++;
178 * Add a record to the list of records to be sent
179 * to their lmaster with VACUUM_FETCH.
181 static int add_record_to_vacuum_fetch_list(struct vacuum_data
*vdata
,
184 struct ctdb_context
*ctdb
= vdata
->ctdb
;
185 struct ctdb_rec_data
*rec
;
188 struct ctdb_marshall_buffer
*vfl
;
190 lmaster
= ctdb_lmaster(ctdb
, &key
);
192 vfl
= vdata
->vacuum_fetch_list
[lmaster
];
194 rec
= ctdb_marshall_record(vfl
, ctdb
->pnn
, key
, NULL
, tdb_null
);
196 DEBUG(DEBUG_ERR
,(__location__
" Out of memory\n"));
197 vdata
->traverse_error
= true;
201 old_size
= talloc_get_size(vfl
);
202 vfl
= talloc_realloc_size(NULL
, vfl
, old_size
+ rec
->length
);
204 DEBUG(DEBUG_ERR
,(__location__
" Failed to expand\n"));
205 vdata
->traverse_error
= true;
208 vdata
->vacuum_fetch_list
[lmaster
] = vfl
;
211 memcpy(old_size
+(uint8_t *)vfl
, rec
, rec
->length
);
218 static void ctdb_vacuum_event(struct event_context
*ev
, struct timed_event
*te
,
219 struct timeval t
, void *private_data
);
221 static int vacuum_record_parser(TDB_DATA key
, TDB_DATA data
, void *private_data
)
223 struct ctdb_ltdb_header
*header
=
224 (struct ctdb_ltdb_header
*)private_data
;
226 if (data
.dsize
!= sizeof(struct ctdb_ltdb_header
)) {
230 *header
= *(struct ctdb_ltdb_header
*)data
.dptr
;
236 * traverse function for gathering the records that can be deleted
238 static int vacuum_traverse(struct tdb_context
*tdb
, TDB_DATA key
, TDB_DATA data
,
241 struct vacuum_data
*vdata
= talloc_get_type(private_data
,
243 struct ctdb_context
*ctdb
= vdata
->ctdb
;
244 struct ctdb_db_context
*ctdb_db
= vdata
->ctdb_db
;
246 struct ctdb_ltdb_header
*hdr
;
249 vdata
->count
.db_traverse
.total
++;
251 lmaster
= ctdb_lmaster(ctdb
, &key
);
252 if (lmaster
>= ctdb
->num_nodes
) {
253 vdata
->count
.db_traverse
.error
++;
254 DEBUG(DEBUG_CRIT
, (__location__
255 " lmaster[%u] >= ctdb->num_nodes[%u] for key"
258 (unsigned)ctdb
->num_nodes
,
259 (unsigned)ctdb_hash(&key
)));
263 if (data
.dsize
!= sizeof(struct ctdb_ltdb_header
)) {
264 /* it is not a deleted record */
265 vdata
->count
.db_traverse
.skipped
++;
269 hdr
= (struct ctdb_ltdb_header
*)data
.dptr
;
271 if (hdr
->dmaster
!= ctdb
->pnn
) {
272 vdata
->count
.db_traverse
.skipped
++;
277 * Add the record to this process's delete_queue for processing
278 * in the subsequent traverse in the fast vacuum run.
280 res
= insert_record_into_delete_queue(ctdb_db
, hdr
, key
);
282 vdata
->count
.db_traverse
.error
++;
284 vdata
->count
.db_traverse
.scheduled
++;
291 * traverse the tree of records to delete and marshall them into
294 static int delete_marshall_traverse(void *param
, void *data
)
296 struct delete_record_data
*dd
= talloc_get_type(data
, struct delete_record_data
);
297 struct delete_records_list
*recs
= talloc_get_type(param
, struct delete_records_list
);
298 struct ctdb_rec_data
*rec
;
301 rec
= ctdb_marshall_record(dd
, recs
->records
->db_id
, dd
->key
, &dd
->hdr
, tdb_null
);
303 DEBUG(DEBUG_ERR
, (__location__
" failed to marshall record\n"));
307 old_size
= talloc_get_size(recs
->records
);
308 recs
->records
= talloc_realloc_size(NULL
, recs
->records
, old_size
+ rec
->length
);
309 if (recs
->records
== NULL
) {
310 DEBUG(DEBUG_ERR
,(__location__
" Failed to expand\n"));
313 recs
->records
->count
++;
314 memcpy(old_size
+(uint8_t *)(recs
->records
), rec
, rec
->length
);
319 * Variant of delete_marshall_traverse() that bumps the
320 * RSN of each traversed record in the database.
322 * This is needed to ensure that when rolling out our
323 * empty record copy before remote deletion, we as the
324 * record's dmaster keep a higher RSN than the non-dmaster
325 * nodes. This is needed to prevent old copies from
326 * resurrection in recoveries.
328 static int delete_marshall_traverse_first(void *param
, void *data
)
330 struct delete_record_data
*dd
= talloc_get_type(data
, struct delete_record_data
);
331 struct delete_records_list
*recs
= talloc_get_type(param
, struct delete_records_list
);
332 struct ctdb_db_context
*ctdb_db
= dd
->ctdb_db
;
333 struct ctdb_context
*ctdb
= ctdb_db
->ctdb
;
334 struct ctdb_ltdb_header header
;
336 uint32_t hash
= ctdb_hash(&(dd
->key
));
339 res
= tdb_chainlock(ctdb_db
->ltdb
->tdb
, dd
->key
);
342 (__location__
" Error getting chainlock on record with "
343 "key hash [0x%08x] on database db[%s].\n",
344 hash
, ctdb_db
->db_name
));
345 recs
->vdata
->count
.delete_list
.skipped
++;
346 recs
->vdata
->count
.delete_list
.left
--;
352 * Verify that the record is still empty, its RSN has not
353 * changed and that we are still its lmaster and dmaster.
356 res
= tdb_parse_record(ctdb_db
->ltdb
->tdb
, dd
->key
,
357 vacuum_record_parser
, &header
);
362 if (header
.flags
& CTDB_REC_RO_FLAGS
) {
363 DEBUG(DEBUG_INFO
, (__location__
": record with hash [0x%08x] "
364 "on database db[%s] has read-only flags. "
366 hash
, ctdb_db
->db_name
));
370 if (header
.dmaster
!= ctdb
->pnn
) {
371 DEBUG(DEBUG_INFO
, (__location__
": record with hash [0x%08x] "
372 "on database db[%s] has been migrated away. "
374 hash
, ctdb_db
->db_name
));
378 if (header
.rsn
!= dd
->hdr
.rsn
) {
379 DEBUG(DEBUG_INFO
, (__location__
": record with hash [0x%08x] "
380 "on database db[%s] seems to have been "
381 "migrated away and back again (with empty "
382 "data). skipping.\n",
383 hash
, ctdb_db
->db_name
));
387 lmaster
= ctdb_lmaster(ctdb_db
->ctdb
, &dd
->key
);
389 if (lmaster
!= ctdb
->pnn
) {
390 DEBUG(DEBUG_INFO
, (__location__
": not lmaster for record in "
391 "delete list (key hash [0x%08x], db[%s]). "
392 "Strange! skipping.\n",
393 hash
, ctdb_db
->db_name
));
398 * Increment the record's RSN to ensure the dmaster (i.e. the current
399 * node) has the highest RSN of the record in the cluster.
400 * This is to prevent old record copies from resurrecting in recoveries
401 * if something should fail during the deletion process.
402 * Note that ctdb_ltdb_store_server() increments the RSN if called
403 * on the record's dmaster.
406 res
= ctdb_ltdb_store(ctdb_db
, dd
->key
, &header
, tdb_null
);
408 DEBUG(DEBUG_ERR
, (__location__
": Failed to store record with "
409 "key hash [0x%08x] on database db[%s].\n",
410 hash
, ctdb_db
->db_name
));
414 tdb_chainunlock(ctdb_db
->ltdb
->tdb
, dd
->key
);
419 tdb_chainunlock(ctdb_db
->ltdb
->tdb
, dd
->key
);
421 recs
->vdata
->count
.delete_list
.skipped
++;
422 recs
->vdata
->count
.delete_list
.left
--;
431 return delete_marshall_traverse(param
, data
);
435 * traverse function for the traversal of the delete_queue,
436 * the fast-path vacuuming list.
438 * - If the record has been migrated off the node
439 * or has been revived (filled with data) on the node,
440 * then skip the record.
442 * - If the current node is the record's lmaster and it is
443 * a record that has never been migrated with data, then
444 * delete the record from the local tdb.
446 * - If the current node is the record's lmaster and it has
447 * been migrated with data, then schedule it for the normal
448 * vacuuming procedure (i.e. add it to the delete_list).
450 * - If the current node is NOT the record's lmaster then
451 * add it to the list of records that are to be sent to
452 * the lmaster with the VACUUM_FETCH message.
454 static int delete_queue_traverse(void *param
, void *data
)
456 struct delete_record_data
*dd
=
457 talloc_get_type(data
, struct delete_record_data
);
458 struct vacuum_data
*vdata
= talloc_get_type(param
, struct vacuum_data
);
459 struct ctdb_db_context
*ctdb_db
= dd
->ctdb_db
;
460 struct ctdb_context
*ctdb
= ctdb_db
->ctdb
; /* or dd->ctdb ??? */
462 struct ctdb_ltdb_header header
;
464 uint32_t hash
= ctdb_hash(&(dd
->key
));
466 vdata
->count
.delete_queue
.total
++;
468 res
= tdb_chainlock(ctdb_db
->ltdb
->tdb
, dd
->key
);
471 (__location__
" Error getting chainlock on record with "
472 "key hash [0x%08x] on database db[%s].\n",
473 hash
, ctdb_db
->db_name
));
474 vdata
->count
.delete_queue
.error
++;
478 res
= tdb_parse_record(ctdb_db
->ltdb
->tdb
, dd
->key
,
479 vacuum_record_parser
, &header
);
484 if (header
.dmaster
!= ctdb
->pnn
) {
485 /* The record has been migrated off the node. Skip. */
489 if (header
.rsn
!= dd
->hdr
.rsn
) {
491 * The record has been migrated off the node and back again.
492 * But not requeued for deletion. Skip it.
498 * We are dmaster, and the record has no data, and it has
499 * not been migrated after it has been queued for deletion.
501 * At this stage, the record could still have been revived locally
502 * and last been written with empty data. This can only be
503 * fixed with the addition of an active or delete flag. (TODO)
506 lmaster
= ctdb_lmaster(ctdb_db
->ctdb
, &dd
->key
);
508 if (lmaster
!= ctdb
->pnn
) {
509 res
= add_record_to_vacuum_fetch_list(vdata
, dd
->key
);
513 (__location__
" Error adding record to list "
514 "of records to send to lmaster.\n"));
515 vdata
->count
.delete_queue
.error
++;
517 vdata
->count
.delete_queue
.added_to_vacuum_fetch_list
++;
522 /* use header->flags or dd->hdr.flags ?? */
523 if (dd
->hdr
.flags
& CTDB_REC_FLAG_MIGRATED_WITH_DATA
) {
524 res
= add_record_to_delete_list(vdata
, dd
->key
, &dd
->hdr
);
528 (__location__
" Error adding record to list "
529 "of records for deletion on lmaster.\n"));
530 vdata
->count
.delete_queue
.error
++;
532 vdata
->count
.delete_queue
.added_to_delete_list
++;
535 res
= tdb_delete(ctdb_db
->ltdb
->tdb
, dd
->key
);
539 (__location__
" Error deleting record with key "
540 "hash [0x%08x] from local data base db[%s].\n",
541 hash
, ctdb_db
->db_name
));
542 vdata
->count
.delete_queue
.error
++;
547 (__location__
" Deleted record with key hash "
548 "[0x%08x] from local data base db[%s].\n",
549 hash
, ctdb_db
->db_name
));
550 vdata
->count
.delete_queue
.deleted
++;
556 vdata
->count
.delete_queue
.skipped
++;
559 tdb_chainunlock(ctdb_db
->ltdb
->tdb
, dd
->key
);
565 * Delete the records that we are lmaster and dmaster for and
566 * that could be deleted on all other nodes via the TRY_DELETE_RECORDS
569 static int delete_record_traverse(void *param
, void *data
)
571 struct delete_record_data
*dd
=
572 talloc_get_type(data
, struct delete_record_data
);
573 struct vacuum_data
*vdata
= talloc_get_type(param
, struct vacuum_data
);
574 struct ctdb_db_context
*ctdb_db
= dd
->ctdb_db
;
575 struct ctdb_context
*ctdb
= ctdb_db
->ctdb
;
577 struct ctdb_ltdb_header header
;
579 uint32_t hash
= ctdb_hash(&(dd
->key
));
581 res
= tdb_chainlock(ctdb_db
->ltdb
->tdb
, dd
->key
);
584 (__location__
" Error getting chainlock on record with "
585 "key hash [0x%08x] on database db[%s].\n",
586 hash
, ctdb_db
->db_name
));
587 vdata
->count
.delete_list
.local_error
++;
588 vdata
->count
.delete_list
.left
--;
594 * Verify that the record is still empty, its RSN has not
595 * changed and that we are still its lmaster and dmaster.
598 res
= tdb_parse_record(ctdb_db
->ltdb
->tdb
, dd
->key
,
599 vacuum_record_parser
, &header
);
604 if (header
.flags
& CTDB_REC_RO_FLAGS
) {
605 DEBUG(DEBUG_INFO
, (__location__
": record with hash [0x%08x] "
606 "on database db[%s] has read-only flags. "
608 hash
, ctdb_db
->db_name
));
612 if (header
.dmaster
!= ctdb
->pnn
) {
613 DEBUG(DEBUG_INFO
, (__location__
": record with hash [0x%08x] "
614 "on database db[%s] has been migrated away. "
616 hash
, ctdb_db
->db_name
));
620 if (header
.rsn
!= dd
->hdr
.rsn
+ 1) {
622 * The record has been migrated off the node and back again.
623 * But not requeued for deletion. Skip it.
624 * (Note that the first marshall traverse has bumped the RSN
627 DEBUG(DEBUG_INFO
, (__location__
": record with hash [0x%08x] "
628 "on database db[%s] seems to have been "
629 "migrated away and back again (with empty "
630 "data). skipping.\n",
631 hash
, ctdb_db
->db_name
));
635 lmaster
= ctdb_lmaster(ctdb_db
->ctdb
, &dd
->key
);
637 if (lmaster
!= ctdb
->pnn
) {
638 DEBUG(DEBUG_INFO
, (__location__
": not lmaster for record in "
639 "delete list (key hash [0x%08x], db[%s]). "
640 "Strange! skipping.\n",
641 hash
, ctdb_db
->db_name
));
645 res
= tdb_delete(ctdb_db
->ltdb
->tdb
, dd
->key
);
649 (__location__
" Error deleting record with key hash "
650 "[0x%08x] from local data base db[%s].\n",
651 hash
, ctdb_db
->db_name
));
652 vdata
->count
.delete_list
.local_error
++;
657 (__location__
" Deleted record with key hash [0x%08x] from "
658 "local data base db[%s].\n", hash
, ctdb_db
->db_name
));
660 vdata
->count
.delete_list
.deleted
++;
664 vdata
->count
.delete_list
.skipped
++;
667 tdb_chainunlock(ctdb_db
->ltdb
->tdb
, dd
->key
);
670 vdata
->count
.delete_list
.left
--;
676 * Traverse the delete_queue.
677 * Records are either deleted directly or filled
678 * into the delete list or the vacuum fetch lists
679 * for further processing.
681 static void ctdb_process_delete_queue(struct ctdb_db_context
*ctdb_db
,
682 struct vacuum_data
*vdata
)
687 ret
= trbt_traversearray32(ctdb_db
->delete_queue
, 1,
688 delete_queue_traverse
, vdata
);
691 DEBUG(DEBUG_ERR
, (__location__
" Error traversing "
692 "the delete queue.\n"));
695 sum
= vdata
->count
.delete_queue
.deleted
696 + vdata
->count
.delete_queue
.skipped
697 + vdata
->count
.delete_queue
.error
698 + vdata
->count
.delete_queue
.added_to_delete_list
699 + vdata
->count
.delete_queue
.added_to_vacuum_fetch_list
;
701 if (vdata
->count
.delete_queue
.total
!= sum
) {
702 DEBUG(DEBUG_ERR
, (__location__
" Inconsistency in fast vacuum "
703 "counts for db[%s]: total[%u] != sum[%u]\n",
705 (unsigned)vdata
->count
.delete_queue
.total
,
709 if (vdata
->count
.delete_queue
.total
> 0) {
712 " fast vacuuming delete_queue traverse statistics: "
721 (unsigned)vdata
->count
.delete_queue
.total
,
722 (unsigned)vdata
->count
.delete_queue
.deleted
,
723 (unsigned)vdata
->count
.delete_queue
.skipped
,
724 (unsigned)vdata
->count
.delete_queue
.error
,
725 (unsigned)vdata
->count
.delete_queue
.added_to_delete_list
,
726 (unsigned)vdata
->count
.delete_queue
.added_to_vacuum_fetch_list
));
733 * read-only traverse of the database, looking for records that
734 * might be able to be vacuumed.
736 * This is not done each time but only every tunable
737 * VacuumFastPathCount times.
739 static void ctdb_vacuum_traverse_db(struct ctdb_db_context
*ctdb_db
,
740 struct vacuum_data
*vdata
)
744 ret
= tdb_traverse_read(ctdb_db
->ltdb
->tdb
, vacuum_traverse
, vdata
);
745 if (ret
== -1 || vdata
->traverse_error
) {
746 DEBUG(DEBUG_ERR
, (__location__
" Traverse error in vacuuming "
747 "'%s'\n", ctdb_db
->db_name
));
750 if (vdata
->count
.db_traverse
.total
> 0) {
753 " full vacuuming db traverse statistics: "
760 (unsigned)vdata
->count
.db_traverse
.total
,
761 (unsigned)vdata
->count
.db_traverse
.skipped
,
762 (unsigned)vdata
->count
.db_traverse
.error
,
763 (unsigned)vdata
->count
.db_traverse
.scheduled
));
770 * Process the vacuum fetch lists:
771 * For records for which we are not the lmaster, tell the lmaster to
774 static void ctdb_process_vacuum_fetch_lists(struct ctdb_db_context
*ctdb_db
,
775 struct vacuum_data
*vdata
)
778 struct ctdb_context
*ctdb
= ctdb_db
->ctdb
;
780 for (i
= 0; i
< ctdb
->num_nodes
; i
++) {
782 struct ctdb_marshall_buffer
*vfl
= vdata
->vacuum_fetch_list
[i
];
784 if (ctdb
->nodes
[i
]->pnn
== ctdb
->pnn
) {
788 if (vfl
->count
== 0) {
792 DEBUG(DEBUG_INFO
, ("Found %u records for lmaster %u in '%s'\n",
793 vfl
->count
, ctdb
->nodes
[i
]->pnn
,
796 data
.dsize
= talloc_get_size(vfl
);
797 data
.dptr
= (void *)vfl
;
798 if (ctdb_client_send_message(ctdb
, ctdb
->nodes
[i
]->pnn
,
799 CTDB_SRVID_VACUUM_FETCH
,
802 DEBUG(DEBUG_ERR
, (__location__
" Failed to send vacuum "
803 "fetch message to %u\n",
804 ctdb
->nodes
[i
]->pnn
));
812 * Process the delete list:
814 * This is the last step of vacuuming that consistently deletes
815 * those records that have been migrated with data and can hence
816 * not be deleted when leaving a node.
818 * In this step, the lmaster does the final deletion of those empty
819 * records that it is also dmaster for. It has ususally received
820 * at least some of these records previously from the former dmasters
821 * with the vacuum fetch message.
823 * This last step is implemented as a 3-phase process to protect from
824 * races leading to data corruption:
826 * 1) Send the lmaster's copy to all other active nodes with the
827 * RECEIVE_RECORDS control: The remote nodes store the lmaster's copy.
828 * 2) Send the records that could successfully be stored remotely
829 * in step #1 to all active nodes with the TRY_DELETE_RECORDS
830 * control. The remote notes delete their local copy.
831 * 3) The lmaster locally deletes its copies of all records that
832 * could successfully be deleted remotely in step #2.
834 static void ctdb_process_delete_list(struct ctdb_db_context
*ctdb_db
,
835 struct vacuum_data
*vdata
)
838 struct ctdb_context
*ctdb
= ctdb_db
->ctdb
;
839 struct delete_records_list
*recs
;
841 struct ctdb_node_map
*nodemap
;
842 uint32_t *active_nodes
;
843 int num_active_nodes
;
847 if (vdata
->count
.delete_list
.total
== 0) {
851 tmp_ctx
= talloc_new(vdata
);
852 if (tmp_ctx
== NULL
) {
853 DEBUG(DEBUG_ERR
,(__location__
" Out of memory\n"));
857 vdata
->count
.delete_list
.left
= vdata
->count
.delete_list
.total
;
860 * get the list of currently active nodes
863 ret
= ctdb_ctrl_getnodemap(ctdb
, TIMELIMIT(),
868 DEBUG(DEBUG_ERR
,(__location__
" unable to get node map\n"));
872 active_nodes
= list_of_active_nodes(ctdb
, nodemap
,
873 nodemap
, /* talloc context */
874 false /* include self */);
876 num_active_nodes
= talloc_get_size(active_nodes
)/sizeof(*active_nodes
);
879 * Now delete the records all active nodes in a three-phase process:
880 * 1) send all active remote nodes the current empty copy with this
882 * 2) if all nodes could store the new copy,
883 * tell all the active remote nodes to delete all their copy
884 * 3) if all remote nodes deleted their record copy, delete it locally
889 * Send currently empty record copy to all active nodes for storing.
892 recs
= talloc_zero(tmp_ctx
, struct delete_records_list
);
894 DEBUG(DEBUG_ERR
,(__location__
" Out of memory\n"));
897 recs
->records
= (struct ctdb_marshall_buffer
*)
898 talloc_zero_size(recs
,
899 offsetof(struct ctdb_marshall_buffer
, data
));
900 if (recs
->records
== NULL
) {
901 DEBUG(DEBUG_ERR
,(__location__
" Out of memory\n"));
904 recs
->records
->db_id
= ctdb_db
->db_id
;
908 * traverse the tree of all records we want to delete and
909 * create a blob we can send to the other nodes.
911 * We call delete_marshall_traverse_first() to bump the
912 * records' RSNs in the database, to ensure we (as dmaster)
913 * keep the highest RSN of the records in the cluster.
915 ret
= trbt_traversearray32(vdata
->delete_list
, 1,
916 delete_marshall_traverse_first
, recs
);
918 DEBUG(DEBUG_ERR
, (__location__
" Error traversing the "
919 "delete list for first marshalling.\n"));
922 indata
.dsize
= talloc_get_size(recs
->records
);
923 indata
.dptr
= (void *)recs
->records
;
925 for (i
= 0; i
< num_active_nodes
; i
++) {
926 struct ctdb_marshall_buffer
*records
;
927 struct ctdb_rec_data
*rec
;
931 ret
= ctdb_control(ctdb
, active_nodes
[i
], 0,
932 CTDB_CONTROL_RECEIVE_RECORDS
, 0,
933 indata
, recs
, &outdata
, &res
,
935 if (ret
!= 0 || res
!= 0) {
936 DEBUG(DEBUG_ERR
, ("Error storing record copies on "
937 "node %u: ret[%d] res[%d]\n",
938 active_nodes
[i
], ret
, res
));
943 * outdata contains the list of records coming back
944 * from the node: These are the records that the
945 * remote node could not store. We remove these from
946 * the list to process further.
948 records
= (struct ctdb_marshall_buffer
*)outdata
.dptr
;
949 rec
= (struct ctdb_rec_data
*)&records
->data
[0];
950 while (records
->count
-- > 1) {
951 TDB_DATA reckey
, recdata
;
952 struct ctdb_ltdb_header
*rechdr
;
953 struct delete_record_data
*dd
;
955 reckey
.dptr
= &rec
->data
[0];
956 reckey
.dsize
= rec
->keylen
;
957 recdata
.dptr
= &rec
->data
[reckey
.dsize
];
958 recdata
.dsize
= rec
->datalen
;
960 if (recdata
.dsize
< sizeof(struct ctdb_ltdb_header
)) {
961 DEBUG(DEBUG_CRIT
,(__location__
" bad ltdb record\n"));
964 rechdr
= (struct ctdb_ltdb_header
*)recdata
.dptr
;
965 recdata
.dptr
+= sizeof(*rechdr
);
966 recdata
.dsize
-= sizeof(*rechdr
);
968 dd
= (struct delete_record_data
*)trbt_lookup32(
973 * The other node could not store the record
974 * copy and it is the first node that failed.
975 * So we should remove it from the tree and
979 vdata
->count
.delete_list
.remote_error
++;
980 vdata
->count
.delete_list
.left
--;
982 DEBUG(DEBUG_ERR
, (__location__
" Failed to "
983 "find record with hash 0x%08x coming "
984 "back from RECEIVE_RECORDS "
985 "control in delete list.\n",
986 ctdb_hash(&reckey
)));
987 vdata
->count
.delete_list
.local_error
++;
988 vdata
->count
.delete_list
.left
--;
991 rec
= (struct ctdb_rec_data
*)(rec
->length
+ (uint8_t *)rec
);
995 if (vdata
->count
.delete_list
.left
== 0) {
1001 * Send the remaining records to all active nodes for deletion.
1003 * The lmaster's (i.e. our) copies of these records have been stored
1004 * successfully on the other nodes.
1008 * Create a marshall blob from the remaining list of records to delete.
1011 talloc_free(recs
->records
);
1013 recs
->records
= (struct ctdb_marshall_buffer
*)
1014 talloc_zero_size(recs
,
1015 offsetof(struct ctdb_marshall_buffer
, data
));
1016 if (recs
->records
== NULL
) {
1017 DEBUG(DEBUG_ERR
,(__location__
" Out of memory\n"));
1020 recs
->records
->db_id
= ctdb_db
->db_id
;
1022 ret
= trbt_traversearray32(vdata
->delete_list
, 1,
1023 delete_marshall_traverse
, recs
);
1025 DEBUG(DEBUG_ERR
, (__location__
" Error traversing the "
1026 "delete list for second marshalling.\n"));
1029 indata
.dsize
= talloc_get_size(recs
->records
);
1030 indata
.dptr
= (void *)recs
->records
;
1032 for (i
= 0; i
< num_active_nodes
; i
++) {
1033 struct ctdb_marshall_buffer
*records
;
1034 struct ctdb_rec_data
*rec
;
1038 ret
= ctdb_control(ctdb
, active_nodes
[i
], 0,
1039 CTDB_CONTROL_TRY_DELETE_RECORDS
, 0,
1040 indata
, recs
, &outdata
, &res
,
1042 if (ret
!= 0 || res
!= 0) {
1043 DEBUG(DEBUG_ERR
, ("Failed to delete records on "
1044 "node %u: ret[%d] res[%d]\n",
1045 active_nodes
[i
], ret
, res
));
1050 * outdata contains the list of records coming back
1051 * from the node: These are the records that the
1052 * remote node could not delete. We remove these from
1053 * the list to delete locally.
1055 records
= (struct ctdb_marshall_buffer
*)outdata
.dptr
;
1056 rec
= (struct ctdb_rec_data
*)&records
->data
[0];
1057 while (records
->count
-- > 1) {
1058 TDB_DATA reckey
, recdata
;
1059 struct ctdb_ltdb_header
*rechdr
;
1060 struct delete_record_data
*dd
;
1062 reckey
.dptr
= &rec
->data
[0];
1063 reckey
.dsize
= rec
->keylen
;
1064 recdata
.dptr
= &rec
->data
[reckey
.dsize
];
1065 recdata
.dsize
= rec
->datalen
;
1067 if (recdata
.dsize
< sizeof(struct ctdb_ltdb_header
)) {
1068 DEBUG(DEBUG_CRIT
,(__location__
" bad ltdb record\n"));
1071 rechdr
= (struct ctdb_ltdb_header
*)recdata
.dptr
;
1072 recdata
.dptr
+= sizeof(*rechdr
);
1073 recdata
.dsize
-= sizeof(*rechdr
);
1075 dd
= (struct delete_record_data
*)trbt_lookup32(
1077 ctdb_hash(&reckey
));
1080 * The other node could not delete the
1081 * record and it is the first node that
1082 * failed. So we should remove it from
1083 * the tree and update statistics.
1086 vdata
->count
.delete_list
.remote_error
++;
1087 vdata
->count
.delete_list
.left
--;
1089 DEBUG(DEBUG_ERR
, (__location__
" Failed to "
1090 "find record with hash 0x%08x coming "
1091 "back from TRY_DELETE_RECORDS "
1092 "control in delete list.\n",
1093 ctdb_hash(&reckey
)));
1094 vdata
->count
.delete_list
.local_error
++;
1095 vdata
->count
.delete_list
.left
--;
1098 rec
= (struct ctdb_rec_data
*)(rec
->length
+ (uint8_t *)rec
);
1102 if (vdata
->count
.delete_list
.left
== 0) {
1108 * Delete the remaining records locally.
1110 * These records have successfully been deleted on all
1111 * active remote nodes.
1114 ret
= trbt_traversearray32(vdata
->delete_list
, 1,
1115 delete_record_traverse
, vdata
);
1117 DEBUG(DEBUG_ERR
, (__location__
" Error traversing the "
1118 "delete list for deletion.\n"));
1123 if (vdata
->count
.delete_list
.left
!= 0) {
1124 DEBUG(DEBUG_ERR
, (__location__
" Vaccum db[%s] error: "
1125 "there are %u records left for deletion after "
1126 "processing delete list\n",
1128 (unsigned)vdata
->count
.delete_list
.left
));
1131 sum
= vdata
->count
.delete_list
.deleted
1132 + vdata
->count
.delete_list
.skipped
1133 + vdata
->count
.delete_list
.remote_error
1134 + vdata
->count
.delete_list
.local_error
1135 + vdata
->count
.delete_list
.left
;
1137 if (vdata
->count
.delete_list
.total
!= sum
) {
1138 DEBUG(DEBUG_ERR
, (__location__
" Inconsistency in vacuum "
1139 "delete list counts for db[%s]: total[%u] != sum[%u]\n",
1141 (unsigned)vdata
->count
.delete_list
.total
,
1145 if (vdata
->count
.delete_list
.total
> 0) {
1148 " vacuum delete list statistics: "
1157 (unsigned)vdata
->count
.delete_list
.total
,
1158 (unsigned)vdata
->count
.delete_list
.deleted
,
1159 (unsigned)vdata
->count
.delete_list
.skipped
,
1160 (unsigned)vdata
->count
.delete_list
.remote_error
,
1161 (unsigned)vdata
->count
.delete_list
.local_error
,
1162 (unsigned)vdata
->count
.delete_list
.left
));
1166 talloc_free(tmp_ctx
);
1172 * initialize the vacuum_data
1174 static int ctdb_vacuum_init_vacuum_data(struct ctdb_db_context
*ctdb_db
,
1175 struct vacuum_data
*vdata
)
1178 struct ctdb_context
*ctdb
= ctdb_db
->ctdb
;
1180 vdata
->count
.delete_queue
.added_to_delete_list
= 0;
1181 vdata
->count
.delete_queue
.added_to_vacuum_fetch_list
= 0;
1182 vdata
->count
.delete_queue
.deleted
= 0;
1183 vdata
->count
.delete_queue
.skipped
= 0;
1184 vdata
->count
.delete_queue
.error
= 0;
1185 vdata
->count
.delete_queue
.total
= 0;
1186 vdata
->count
.db_traverse
.scheduled
= 0;
1187 vdata
->count
.db_traverse
.skipped
= 0;
1188 vdata
->count
.db_traverse
.error
= 0;
1189 vdata
->count
.db_traverse
.total
= 0;
1190 vdata
->count
.delete_list
.total
= 0;
1191 vdata
->count
.delete_list
.left
= 0;
1192 vdata
->count
.delete_list
.remote_error
= 0;
1193 vdata
->count
.delete_list
.local_error
= 0;
1194 vdata
->count
.delete_list
.skipped
= 0;
1195 vdata
->count
.delete_list
.deleted
= 0;
1197 /* the list needs to be of length num_nodes */
1198 vdata
->vacuum_fetch_list
= talloc_zero_array(vdata
,
1199 struct ctdb_marshall_buffer
*,
1201 if (vdata
->vacuum_fetch_list
== NULL
) {
1202 DEBUG(DEBUG_ERR
,(__location__
" Out of memory\n"));
1205 for (i
= 0; i
< ctdb
->num_nodes
; i
++) {
1206 vdata
->vacuum_fetch_list
[i
] = (struct ctdb_marshall_buffer
*)
1207 talloc_zero_size(vdata
->vacuum_fetch_list
,
1208 offsetof(struct ctdb_marshall_buffer
, data
));
1209 if (vdata
->vacuum_fetch_list
[i
] == NULL
) {
1210 DEBUG(DEBUG_ERR
,(__location__
" Out of memory\n"));
1213 vdata
->vacuum_fetch_list
[i
]->db_id
= ctdb_db
->db_id
;
1221 * - Always do the fast vacuuming run, which traverses
1222 * the in-memory delete queue: these records have been
1223 * scheduled for deletion.
1224 * - Only if explicitly requested, the database is traversed
1225 * in order to use the traditional heuristics on empty records
1226 * to trigger deletion.
1227 * This is done only every VacuumFastPathCount'th vacuuming run.
1229 * The traverse runs fill two lists:
1231 * - The delete_list:
1232 * This is the list of empty records the current
1233 * node is lmaster and dmaster for. These records are later
1234 * deleted first on other nodes and then locally.
1236 * The fast vacuuming run has a short cut for those records
1237 * that have never been migrated with data: these records
1238 * are immediately deleted locally, since they have left
1239 * no trace on other nodes.
1241 * - The vacuum_fetch lists
1242 * (one for each other lmaster node):
1243 * The records in this list are sent for deletion to
1244 * their lmaster in a bulk VACUUM_FETCH message.
1246 * The lmaster then migrates all these records to itelf
1247 * so that they can be vacuumed there.
1249 * This executes in the child context.
1251 static int ctdb_vacuum_db(struct ctdb_db_context
*ctdb_db
,
1252 struct vacuum_data
*vdata
,
1253 bool full_vacuum_run
)
1255 struct ctdb_context
*ctdb
= ctdb_db
->ctdb
;
1258 DEBUG(DEBUG_INFO
, (__location__
" Entering %s vacuum run for db "
1259 "%s db_id[0x%08x]\n",
1260 full_vacuum_run
? "full" : "fast",
1261 ctdb_db
->db_name
, ctdb_db
->db_id
));
1263 ret
= ctdb_ctrl_getvnnmap(ctdb
, TIMELIMIT(), CTDB_CURRENT_NODE
, ctdb
, &ctdb
->vnn_map
);
1265 DEBUG(DEBUG_ERR
, ("Unable to get vnnmap from local node\n"));
1269 pnn
= ctdb_ctrl_getpnn(ctdb
, TIMELIMIT(), CTDB_CURRENT_NODE
);
1271 DEBUG(DEBUG_ERR
, ("Unable to get pnn from local node\n"));
1277 ret
= ctdb_vacuum_init_vacuum_data(ctdb_db
, vdata
);
1282 if (full_vacuum_run
) {
1283 ctdb_vacuum_traverse_db(ctdb_db
, vdata
);
1286 ctdb_process_delete_queue(ctdb_db
, vdata
);
1288 ctdb_process_vacuum_fetch_lists(ctdb_db
, vdata
);
1290 ctdb_process_delete_list(ctdb_db
, vdata
);
1292 /* this ensures we run our event queue */
1293 ctdb_ctrl_getpnn(ctdb
, TIMELIMIT(), CTDB_CURRENT_NODE
);
1300 * traverse function for repacking
1302 static int repack_traverse(struct tdb_context
*tdb
, TDB_DATA key
, TDB_DATA data
,
1305 struct vacuum_data
*vdata
= (struct vacuum_data
*)private_data
;
1307 if (vdata
->vacuum
) {
1308 uint32_t hash
= ctdb_hash(&key
);
1309 struct delete_record_data
*kd
;
1311 * check if we can ignore this record because it's in the delete_list
1313 kd
= (struct delete_record_data
*)trbt_lookup32(vdata
->delete_list
, hash
);
1315 * there might be hash collisions so we have to compare the keys here to be sure
1317 if (kd
&& kd
->key
.dsize
== key
.dsize
&& memcmp(kd
->key
.dptr
, key
.dptr
, key
.dsize
) == 0) {
1318 struct ctdb_ltdb_header
*hdr
= (struct ctdb_ltdb_header
*)data
.dptr
;
1320 * we have to check if the record hasn't changed in the meantime in order to
1321 * savely remove it from the database
1323 if (data
.dsize
== sizeof(struct ctdb_ltdb_header
) &&
1324 hdr
->dmaster
== kd
->ctdb
->pnn
&&
1325 ctdb_lmaster(kd
->ctdb
, &(kd
->key
)) == kd
->ctdb
->pnn
&&
1326 kd
->hdr
.rsn
== hdr
->rsn
) {
1327 vdata
->count
.repack
.vacuumed
++;
1332 if (tdb_store(vdata
->dest_db
, key
, data
, TDB_INSERT
) != 0) {
1333 vdata
->traverse_error
= true;
1336 vdata
->count
.repack
.copied
++;
1343 static int ctdb_repack_tdb(struct tdb_context
*tdb
, TALLOC_CTX
*mem_ctx
, struct vacuum_data
*vdata
)
1345 struct tdb_context
*tmp_db
;
1347 if (tdb_transaction_start(tdb
) != 0) {
1348 DEBUG(DEBUG_ERR
,(__location__
" Failed to start transaction\n"));
1352 tmp_db
= tdb_open("tmpdb", tdb_hash_size(tdb
),
1353 TDB_INTERNAL
|TDB_DISALLOW_NESTING
,
1355 if (tmp_db
== NULL
) {
1356 DEBUG(DEBUG_ERR
,(__location__
" Failed to create tmp_db\n"));
1357 tdb_transaction_cancel(tdb
);
1361 vdata
->traverse_error
= false;
1362 vdata
->dest_db
= tmp_db
;
1363 vdata
->vacuum
= true;
1364 vdata
->count
.repack
.vacuumed
= 0;
1365 vdata
->count
.repack
.copied
= 0;
1368 * repack and vacuum on-the-fly by not writing the records that are
1371 if (tdb_traverse_read(tdb
, repack_traverse
, vdata
) == -1) {
1372 DEBUG(DEBUG_ERR
,(__location__
" Failed to traverse copying out\n"));
1373 tdb_transaction_cancel(tdb
);
1378 DEBUG(DEBUG_INFO
,(__location__
" %u records vacuumed\n",
1379 vdata
->count
.repack
.vacuumed
));
1381 if (vdata
->traverse_error
) {
1382 DEBUG(DEBUG_ERR
,(__location__
" Error during traversal\n"));
1383 tdb_transaction_cancel(tdb
);
1388 if (tdb_wipe_all(tdb
) != 0) {
1389 DEBUG(DEBUG_ERR
,(__location__
" Failed to wipe database\n"));
1390 tdb_transaction_cancel(tdb
);
1395 vdata
->traverse_error
= false;
1396 vdata
->dest_db
= tdb
;
1397 vdata
->vacuum
= false;
1398 vdata
->count
.repack
.copied
= 0;
1400 if (tdb_traverse_read(tmp_db
, repack_traverse
, vdata
) == -1) {
1401 DEBUG(DEBUG_ERR
,(__location__
" Failed to traverse copying back\n"));
1402 tdb_transaction_cancel(tdb
);
1407 if (vdata
->traverse_error
) {
1408 DEBUG(DEBUG_ERR
,(__location__
" Error during second traversal\n"));
1409 tdb_transaction_cancel(tdb
);
1417 if (tdb_transaction_commit(tdb
) != 0) {
1418 DEBUG(DEBUG_ERR
,(__location__
" Failed to commit\n"));
1421 DEBUG(DEBUG_INFO
,(__location__
" %u records copied\n",
1422 vdata
->count
.repack
.copied
));
1428 * repack and vaccum a db
1429 * called from the child context
1431 static int ctdb_vacuum_and_repack_db(struct ctdb_db_context
*ctdb_db
,
1432 TALLOC_CTX
*mem_ctx
,
1433 bool full_vacuum_run
)
1435 uint32_t repack_limit
= ctdb_db
->ctdb
->tunable
.repack_limit
;
1436 const char *name
= ctdb_db
->db_name
;
1437 int freelist_size
= 0;
1438 struct vacuum_data
*vdata
;
1440 vdata
= talloc_zero(mem_ctx
, struct vacuum_data
);
1441 if (vdata
== NULL
) {
1442 DEBUG(DEBUG_ERR
,(__location__
" Out of memory\n"));
1446 vdata
->ctdb
= ctdb_db
->ctdb
;
1447 vdata
->repack_limit
= repack_limit
;
1448 vdata
->delete_list
= trbt_create(vdata
, 0);
1449 vdata
->ctdb_db
= ctdb_db
;
1450 if (vdata
->delete_list
== NULL
) {
1451 DEBUG(DEBUG_ERR
,(__location__
" Out of memory\n"));
1456 vdata
->start
= timeval_current();
1459 * gather all records that can be deleted in vdata
1461 if (ctdb_vacuum_db(ctdb_db
, vdata
, full_vacuum_run
) != 0) {
1462 DEBUG(DEBUG_ERR
,(__location__
" Failed to vacuum '%s'\n", name
));
1465 if (repack_limit
!= 0) {
1466 freelist_size
= tdb_freelist_size(ctdb_db
->ltdb
->tdb
);
1467 if (freelist_size
== -1) {
1468 DEBUG(DEBUG_ERR
,(__location__
" Failed to get freelist size for '%s'\n", name
));
1475 * decide if a repack is necessary
1477 if ((repack_limit
== 0 || (uint32_t)freelist_size
< repack_limit
))
1483 DEBUG(DEBUG_INFO
,("Repacking %s with %u freelist entries and %u records to delete\n",
1484 name
, freelist_size
, vdata
->count
.delete_list
.left
));
1487 * repack and implicitely get rid of the records we can delete
1489 if (ctdb_repack_tdb(ctdb_db
->ltdb
->tdb
, mem_ctx
, vdata
) != 0) {
1490 DEBUG(DEBUG_ERR
,(__location__
" Failed to repack '%s'\n", name
));
1499 static uint32_t get_vacuum_interval(struct ctdb_db_context
*ctdb_db
)
1501 uint32_t interval
= ctdb_db
->ctdb
->tunable
.vacuum_interval
;
1506 static int vacuum_child_destructor(struct ctdb_vacuum_child_context
*child_ctx
)
1508 double l
= timeval_elapsed(&child_ctx
->start_time
);
1509 struct ctdb_db_context
*ctdb_db
= child_ctx
->vacuum_handle
->ctdb_db
;
1510 struct ctdb_context
*ctdb
= ctdb_db
->ctdb
;
1512 DEBUG(DEBUG_INFO
,("Vacuuming took %.3f seconds for database %s\n", l
, ctdb_db
->db_name
));
1514 if (child_ctx
->child_pid
!= -1) {
1515 ctdb_kill(ctdb
, child_ctx
->child_pid
, SIGKILL
);
1517 /* Bump the number of successful fast-path runs. */
1518 child_ctx
->vacuum_handle
->fast_path_count
++;
1521 DLIST_REMOVE(ctdb
->vacuumers
, child_ctx
);
1523 event_add_timed(ctdb
->ev
, child_ctx
->vacuum_handle
,
1524 timeval_current_ofs(get_vacuum_interval(ctdb_db
), 0),
1525 ctdb_vacuum_event
, child_ctx
->vacuum_handle
);
1531 * this event is generated when a vacuum child process times out
1533 static void vacuum_child_timeout(struct event_context
*ev
, struct timed_event
*te
,
1534 struct timeval t
, void *private_data
)
1536 struct ctdb_vacuum_child_context
*child_ctx
= talloc_get_type(private_data
, struct ctdb_vacuum_child_context
);
1538 DEBUG(DEBUG_ERR
,("Vacuuming child process timed out for db %s\n", child_ctx
->vacuum_handle
->ctdb_db
->db_name
));
1540 child_ctx
->status
= VACUUM_TIMEOUT
;
1542 talloc_free(child_ctx
);
1547 * this event is generated when a vacuum child process has completed
1549 static void vacuum_child_handler(struct event_context
*ev
, struct fd_event
*fde
,
1550 uint16_t flags
, void *private_data
)
1552 struct ctdb_vacuum_child_context
*child_ctx
= talloc_get_type(private_data
, struct ctdb_vacuum_child_context
);
1556 DEBUG(DEBUG_INFO
,("Vacuuming child process %d finished for db %s\n", child_ctx
->child_pid
, child_ctx
->vacuum_handle
->ctdb_db
->db_name
));
1557 child_ctx
->child_pid
= -1;
1559 ret
= read(child_ctx
->fd
[0], &c
, 1);
1560 if (ret
!= 1 || c
!= 0) {
1561 child_ctx
->status
= VACUUM_ERROR
;
1562 DEBUG(DEBUG_ERR
, ("A vacuum child process failed with an error for database %s. ret=%d c=%d\n", child_ctx
->vacuum_handle
->ctdb_db
->db_name
, ret
, c
));
1564 child_ctx
->status
= VACUUM_OK
;
1567 talloc_free(child_ctx
);
1571 * this event is called every time we need to start a new vacuum process
1574 ctdb_vacuum_event(struct event_context
*ev
, struct timed_event
*te
,
1575 struct timeval t
, void *private_data
)
1577 struct ctdb_vacuum_handle
*vacuum_handle
= talloc_get_type(private_data
, struct ctdb_vacuum_handle
);
1578 struct ctdb_db_context
*ctdb_db
= vacuum_handle
->ctdb_db
;
1579 struct ctdb_context
*ctdb
= ctdb_db
->ctdb
;
1580 struct ctdb_vacuum_child_context
*child_ctx
;
1581 struct tevent_fd
*fde
;
1584 /* we dont vacuum if we are in recovery mode, or db frozen */
1585 if (ctdb
->recovery_mode
== CTDB_RECOVERY_ACTIVE
||
1586 ctdb
->freeze_mode
[ctdb_db
->priority
] != CTDB_FREEZE_NONE
) {
1587 DEBUG(DEBUG_INFO
, ("Not vacuuming %s (%s)\n", ctdb_db
->db_name
,
1588 ctdb
->recovery_mode
== CTDB_RECOVERY_ACTIVE
? "in recovery"
1589 : ctdb
->freeze_mode
[ctdb_db
->priority
] == CTDB_FREEZE_PENDING
1592 event_add_timed(ctdb
->ev
, vacuum_handle
,
1593 timeval_current_ofs(get_vacuum_interval(ctdb_db
), 0),
1594 ctdb_vacuum_event
, vacuum_handle
);
1598 child_ctx
= talloc(vacuum_handle
, struct ctdb_vacuum_child_context
);
1599 if (child_ctx
== NULL
) {
1600 DEBUG(DEBUG_CRIT
, (__location__
" Failed to allocate child context for vacuuming of %s\n", ctdb_db
->db_name
));
1601 ctdb_fatal(ctdb
, "Out of memory when crating vacuum child context. Shutting down\n");
1605 ret
= pipe(child_ctx
->fd
);
1607 talloc_free(child_ctx
);
1608 DEBUG(DEBUG_ERR
, ("Failed to create pipe for vacuum child process.\n"));
1609 event_add_timed(ctdb
->ev
, vacuum_handle
,
1610 timeval_current_ofs(get_vacuum_interval(ctdb_db
), 0),
1611 ctdb_vacuum_event
, vacuum_handle
);
1615 if (vacuum_handle
->fast_path_count
> ctdb
->tunable
.vacuum_fast_path_count
) {
1616 vacuum_handle
->fast_path_count
= 0;
1619 child_ctx
->child_pid
= ctdb_fork(ctdb
);
1620 if (child_ctx
->child_pid
== (pid_t
)-1) {
1621 close(child_ctx
->fd
[0]);
1622 close(child_ctx
->fd
[1]);
1623 talloc_free(child_ctx
);
1624 DEBUG(DEBUG_ERR
, ("Failed to fork vacuum child process.\n"));
1625 event_add_timed(ctdb
->ev
, vacuum_handle
,
1626 timeval_current_ofs(get_vacuum_interval(ctdb_db
), 0),
1627 ctdb_vacuum_event
, vacuum_handle
);
1632 if (child_ctx
->child_pid
== 0) {
1634 bool full_vacuum_run
= false;
1635 close(child_ctx
->fd
[0]);
1637 DEBUG(DEBUG_INFO
,("Vacuuming child process %d for db %s started\n", getpid(), ctdb_db
->db_name
));
1638 ctdb_set_process_name("ctdb_vacuum");
1639 if (switch_from_server_to_client(ctdb
, "vacuum-%s", ctdb_db
->db_name
) != 0) {
1640 DEBUG(DEBUG_CRIT
, (__location__
"ERROR: failed to switch vacuum daemon into client mode. Shutting down.\n"));
1647 if ((ctdb
->tunable
.vacuum_fast_path_count
> 0) &&
1648 (vacuum_handle
->fast_path_count
== 0))
1650 full_vacuum_run
= true;
1652 cc
= ctdb_vacuum_and_repack_db(ctdb_db
, child_ctx
,
1655 write(child_ctx
->fd
[1], &cc
, 1);
1659 set_close_on_exec(child_ctx
->fd
[0]);
1660 close(child_ctx
->fd
[1]);
1662 child_ctx
->status
= VACUUM_RUNNING
;
1663 child_ctx
->start_time
= timeval_current();
1665 DLIST_ADD(ctdb
->vacuumers
, child_ctx
);
1666 talloc_set_destructor(child_ctx
, vacuum_child_destructor
);
1669 * Clear the fastpath vacuuming list in the parent.
1671 talloc_free(ctdb_db
->delete_queue
);
1672 ctdb_db
->delete_queue
= trbt_create(ctdb_db
, 0);
1673 if (ctdb_db
->delete_queue
== NULL
) {
1674 /* fatal here? ... */
1675 ctdb_fatal(ctdb
, "Out of memory when re-creating vacuum tree "
1676 "in parent context. Shutting down\n");
1679 event_add_timed(ctdb
->ev
, child_ctx
,
1680 timeval_current_ofs(ctdb
->tunable
.vacuum_max_run_time
, 0),
1681 vacuum_child_timeout
, child_ctx
);
1683 DEBUG(DEBUG_DEBUG
, (__location__
" Created PIPE FD:%d to child vacuum process\n", child_ctx
->fd
[0]));
1685 fde
= event_add_fd(ctdb
->ev
, child_ctx
, child_ctx
->fd
[0],
1686 EVENT_FD_READ
, vacuum_child_handler
, child_ctx
);
1687 tevent_fd_set_auto_close(fde
);
1689 vacuum_handle
->child_ctx
= child_ctx
;
1690 child_ctx
->vacuum_handle
= vacuum_handle
;
1693 void ctdb_stop_vacuuming(struct ctdb_context
*ctdb
)
1695 /* Simply free them all. */
1696 while (ctdb
->vacuumers
) {
1697 DEBUG(DEBUG_INFO
, ("Aborting vacuuming for %s (%i)\n",
1698 ctdb
->vacuumers
->vacuum_handle
->ctdb_db
->db_name
,
1699 (int)ctdb
->vacuumers
->child_pid
));
1700 /* vacuum_child_destructor kills it, removes from list */
1701 talloc_free(ctdb
->vacuumers
);
1705 /* this function initializes the vacuuming context for a database
1706 * starts the vacuuming events
1708 int ctdb_vacuum_init(struct ctdb_db_context
*ctdb_db
)
1710 if (ctdb_db
->persistent
!= 0) {
1711 DEBUG(DEBUG_ERR
,("Vacuuming is disabled for persistent database %s\n", ctdb_db
->db_name
));
1715 ctdb_db
->vacuum_handle
= talloc(ctdb_db
, struct ctdb_vacuum_handle
);
1716 CTDB_NO_MEMORY(ctdb_db
->ctdb
, ctdb_db
->vacuum_handle
);
1718 ctdb_db
->vacuum_handle
->ctdb_db
= ctdb_db
;
1719 ctdb_db
->vacuum_handle
->fast_path_count
= 0;
1721 event_add_timed(ctdb_db
->ctdb
->ev
, ctdb_db
->vacuum_handle
,
1722 timeval_current_ofs(get_vacuum_interval(ctdb_db
), 0),
1723 ctdb_vacuum_event
, ctdb_db
->vacuum_handle
);
1728 static void remove_record_from_delete_queue(struct ctdb_db_context
*ctdb_db
,
1729 const struct ctdb_ltdb_header
*hdr
,
1732 struct delete_record_data
*kd
;
1735 hash
= (uint32_t)ctdb_hash(&key
);
1737 DEBUG(DEBUG_DEBUG
, (__location__
1738 " remove_record_from_delete_queue: "
1743 "migrated_with_data[%s]\n",
1744 ctdb_db
->db_name
, ctdb_db
->db_id
,
1746 ctdb_lmaster(ctdb_db
->ctdb
, &key
),
1747 hdr
->flags
& CTDB_REC_FLAG_MIGRATED_WITH_DATA
? "yes" : "no"));
1749 kd
= (struct delete_record_data
*)trbt_lookup32(ctdb_db
->delete_queue
, hash
);
1751 DEBUG(DEBUG_DEBUG
, (__location__
1752 " remove_record_from_delete_queue: "
1753 "record not in queue (hash[0x%08x])\n.",
1758 if ((kd
->key
.dsize
!= key
.dsize
) ||
1759 (memcmp(kd
->key
.dptr
, key
.dptr
, key
.dsize
) != 0))
1761 DEBUG(DEBUG_DEBUG
, (__location__
1762 " remove_record_from_delete_queue: "
1763 "hash collision for key with hash[0x%08x] "
1764 "in db[%s] - skipping\n",
1765 hash
, ctdb_db
->db_name
));
1769 DEBUG(DEBUG_DEBUG
, (__location__
1770 " remove_record_from_delete_queue: "
1771 "removing key with hash[0x%08x]\n",
1780 * Insert a record into the ctdb_db context's delete queue,
1781 * handling hash collisions.
1783 static int insert_record_into_delete_queue(struct ctdb_db_context
*ctdb_db
,
1784 const struct ctdb_ltdb_header
*hdr
,
1787 struct delete_record_data
*kd
;
1791 hash
= (uint32_t)ctdb_hash(&key
);
1793 DEBUG(DEBUG_INFO
, (__location__
" schedule for deletion: db[%s] "
1797 "migrated_with_data[%s]\n",
1798 ctdb_db
->db_name
, ctdb_db
->db_id
,
1800 ctdb_lmaster(ctdb_db
->ctdb
, &key
),
1801 hdr
->flags
& CTDB_REC_FLAG_MIGRATED_WITH_DATA
? "yes" : "no"));
1803 kd
= (struct delete_record_data
*)trbt_lookup32(ctdb_db
->delete_queue
, hash
);
1805 if ((kd
->key
.dsize
!= key
.dsize
) ||
1806 (memcmp(kd
->key
.dptr
, key
.dptr
, key
.dsize
) != 0))
1809 (__location__
" schedule for deletion: "
1810 "hash collision for key hash [0x%08x]. "
1811 "Skipping the record.\n", hash
));
1815 (__location__
" schedule for deletion: "
1816 "updating entry for key with hash [0x%08x].\n",
1821 ret
= insert_delete_record_data_into_tree(ctdb_db
->ctdb
, ctdb_db
,
1822 ctdb_db
->delete_queue
,
1826 (__location__
" schedule for deletion: error "
1827 "inserting key with hash [0x%08x] into delete queue\n",
1836 * Schedule a record for deletetion.
1837 * Called from the parent context.
1839 int32_t ctdb_control_schedule_for_deletion(struct ctdb_context
*ctdb
,
1842 struct ctdb_control_schedule_for_deletion
*dd
;
1843 struct ctdb_db_context
*ctdb_db
;
1847 dd
= (struct ctdb_control_schedule_for_deletion
*)indata
.dptr
;
1849 ctdb_db
= find_ctdb_db(ctdb
, dd
->db_id
);
1850 if (ctdb_db
== NULL
) {
1851 DEBUG(DEBUG_ERR
, (__location__
" Unknown db id 0x%08x\n",
1856 key
.dsize
= dd
->keylen
;
1859 ret
= insert_record_into_delete_queue(ctdb_db
, &dd
->hdr
, key
);
1864 int32_t ctdb_local_schedule_for_deletion(struct ctdb_db_context
*ctdb_db
,
1865 const struct ctdb_ltdb_header
*hdr
,
1869 struct ctdb_control_schedule_for_deletion
*dd
;
1873 if (ctdb_db
->ctdb
->ctdbd_pid
== getpid()) {
1874 /* main daemon - directly queue */
1875 ret
= insert_record_into_delete_queue(ctdb_db
, hdr
, key
);
1880 /* if we dont have a connection to the daemon we can not send
1881 a control. For example sometimes from update_record control child
1884 if (!ctdb_db
->ctdb
->can_send_controls
) {
1889 /* child process: send the main daemon a control */
1890 indata
.dsize
= offsetof(struct ctdb_control_schedule_for_deletion
, key
) + key
.dsize
;
1891 indata
.dptr
= talloc_zero_array(ctdb_db
, uint8_t, indata
.dsize
);
1892 if (indata
.dptr
== NULL
) {
1893 DEBUG(DEBUG_ERR
, (__location__
" out of memory\n"));
1896 dd
= (struct ctdb_control_schedule_for_deletion
*)(void *)indata
.dptr
;
1897 dd
->db_id
= ctdb_db
->db_id
;
1899 dd
->keylen
= key
.dsize
;
1900 memcpy(dd
->key
, key
.dptr
, key
.dsize
);
1902 ret
= ctdb_control(ctdb_db
->ctdb
,
1905 CTDB_CONTROL_SCHEDULE_FOR_DELETION
,
1906 CTDB_CTRL_FLAG_NOREPLY
, /* flags */
1911 NULL
, /* timeout : NULL == wait forever */
1912 NULL
); /* error message */
1914 talloc_free(indata
.dptr
);
1916 if (ret
!= 0 || status
!= 0) {
1917 DEBUG(DEBUG_ERR
, (__location__
" Error sending "
1918 "SCHEDULE_FOR_DELETION "
1928 void ctdb_local_remove_from_delete_queue(struct ctdb_db_context
*ctdb_db
,
1929 const struct ctdb_ltdb_header
*hdr
,
1932 if (ctdb_db
->ctdb
->ctdbd_pid
!= getpid()) {
1934 * Only remove the record from the delete queue if called
1935 * in the main daemon.
1940 remove_record_from_delete_queue(ctdb_db
, hdr
, key
);