HAMMER 58B/Many: Revamp ioctls, add non-monotonic timestamps, mirroring
[dragonfly.git] / sys / vfs / hammer / hammer_prune.c
blob469e447e7d7081141a9d91742e2ab7f7d3fb566b
1 /*
2 * Copyright (c) 2008 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
34 * $DragonFly: src/sys/vfs/hammer/hammer_prune.c,v 1.7 2008/06/24 17:38:17 dillon Exp $
37 #include "hammer.h"
40 * Iterate through the specified range of object ids and remove any
41 * deleted records that fall entirely within a prune modulo.
43 * A reverse iteration is used to prevent overlapping records from being
44 * created during the iteration due to alignments. This also allows us
45 * to adjust alignments without blowing up the B-Tree.
47 static int check_prune(struct hammer_ioc_prune *prune, hammer_btree_elm_t elm,
48 int *realign_cre, int *realign_del);
49 static int realign_prune(struct hammer_ioc_prune *prune, hammer_cursor_t cursor,
50 int realign_cre, int realign_del);
52 int
53 hammer_ioc_prune(hammer_transaction_t trans, hammer_inode_t ip,
54 struct hammer_ioc_prune *prune)
56 struct hammer_cursor cursor;
57 hammer_btree_elm_t elm;
58 struct hammer_ioc_prune_elm *copy_elms;
59 struct hammer_ioc_prune_elm *user_elms;
60 int error;
61 int isdir;
62 int realign_cre;
63 int realign_del;
64 int elm_array_size;
66 if (prune->nelms < 0 || prune->nelms > HAMMER_MAX_PRUNE_ELMS)
67 return(EINVAL);
68 if ((prune->key_beg.localization | prune->key_end.localization) &
69 HAMMER_LOCALIZE_PSEUDOFS_MASK) {
70 return(EINVAL);
72 if (prune->key_beg.localization > prune->key_end.localization)
73 return(EINVAL);
74 if (prune->key_beg.localization == prune->key_end.localization) {
75 if (prune->key_beg.obj_id > prune->key_end.obj_id)
76 return(EINVAL);
77 /* key-space limitations - no check needed */
79 if ((prune->head.flags & HAMMER_IOC_PRUNE_ALL) && prune->nelms)
80 return(EINVAL);
82 prune->key_cur.localization = prune->key_end.localization +
83 ip->obj_localization;
84 prune->key_cur.obj_id = prune->key_end.obj_id;
85 prune->key_cur.key = HAMMER_MAX_KEY;
88 * Copy element array from userland
90 elm_array_size = sizeof(*copy_elms) * prune->nelms;
91 user_elms = prune->elms;
92 copy_elms = kmalloc(elm_array_size, M_TEMP, M_WAITOK);
93 if ((error = copyin(user_elms, copy_elms, elm_array_size)) != 0)
94 goto failed;
95 prune->elms = copy_elms;
98 * Scan backwards. Retries typically occur if a deadlock is detected.
100 retry:
101 error = hammer_init_cursor(trans, &cursor, NULL, NULL);
102 if (error) {
103 hammer_done_cursor(&cursor);
104 goto failed;
106 cursor.key_beg.localization = prune->key_beg.localization +
107 ip->obj_localization;
108 cursor.key_beg.obj_id = prune->key_beg.obj_id;
109 cursor.key_beg.key = HAMMER_MIN_KEY;
110 cursor.key_beg.create_tid = 1;
111 cursor.key_beg.delete_tid = 0;
112 cursor.key_beg.rec_type = HAMMER_MIN_RECTYPE;
113 cursor.key_beg.obj_type = 0;
115 cursor.key_end.localization = prune->key_cur.localization;
116 cursor.key_end.obj_id = prune->key_cur.obj_id;
117 cursor.key_end.key = prune->key_cur.key;
118 cursor.key_end.create_tid = HAMMER_MAX_TID - 1;
119 cursor.key_end.delete_tid = 0;
120 cursor.key_end.rec_type = HAMMER_MAX_RECTYPE;
121 cursor.key_end.obj_type = 0;
123 cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE;
124 cursor.flags |= HAMMER_CURSOR_BACKEND;
127 * This flag allows the B-Tree code to clean up loose ends.
129 cursor.flags |= HAMMER_CURSOR_PRUNING;
131 hammer_sync_lock_sh(trans);
132 error = hammer_btree_last(&cursor);
134 while (error == 0) {
136 * Yield to more important tasks
138 if ((error = hammer_signal_check(trans->hmp)) != 0)
139 break;
140 if (trans->hmp->sync_lock.wanted) {
141 hammer_sync_unlock(trans);
142 tsleep(trans, 0, "hmrslo", hz / 10);
143 hammer_sync_lock_sh(trans);
145 if (trans->hmp->locked_dirty_count +
146 trans->hmp->io_running_count > hammer_limit_dirtybufs) {
147 hammer_sync_unlock(trans);
148 hammer_flusher_async(trans->hmp);
149 tsleep(trans, 0, "hmrslo", hz / 10);
150 hammer_sync_lock_sh(trans);
154 * Check for work
156 elm = &cursor.node->ondisk->elms[cursor.index];
157 prune->key_cur = elm->base;
159 if (prune->stat_oldest_tid > elm->leaf.base.create_tid)
160 prune->stat_oldest_tid = elm->leaf.base.create_tid;
162 if (hammer_debug_general & 0x0200) {
163 kprintf("check %016llx %016llx cre=%016llx del=%016llx\n",
164 elm->base.obj_id,
165 elm->base.key,
166 elm->base.create_tid,
167 elm->base.delete_tid);
170 if (check_prune(prune, elm, &realign_cre, &realign_del) == 0) {
171 if (hammer_debug_general & 0x0200) {
172 kprintf("check %016llx %016llx: DELETE\n",
173 elm->base.obj_id, elm->base.key);
177 * NOTE: This can return EDEADLK
179 * Acquiring the sync lock guarantees that the
180 * operation will not cross a synchronization
181 * boundary (see the flusher).
183 isdir = (elm->base.rec_type == HAMMER_RECTYPE_DIRENTRY);
185 error = hammer_delete_at_cursor(&cursor,
186 &prune->stat_bytes);
187 if (error)
188 break;
190 if (isdir)
191 ++prune->stat_dirrecords;
192 else
193 ++prune->stat_rawrecords;
196 * The current record might now be the one after
197 * the one we deleted, set ATEDISK to force us
198 * to skip it (since we are iterating backwards).
200 cursor.flags |= HAMMER_CURSOR_ATEDISK;
201 } else if (realign_cre >= 0 || realign_del >= 0) {
202 error = realign_prune(prune, &cursor,
203 realign_cre, realign_del);
204 if (error == 0) {
205 cursor.flags |= HAMMER_CURSOR_ATEDISK;
206 if (hammer_debug_general & 0x0200) {
207 kprintf("check %016llx %016llx: "
208 "REALIGN\n",
209 elm->base.obj_id,
210 elm->base.key);
213 } else {
214 cursor.flags |= HAMMER_CURSOR_ATEDISK;
215 if (hammer_debug_general & 0x0100) {
216 kprintf("check %016llx %016llx: SKIP\n",
217 elm->base.obj_id, elm->base.key);
220 ++prune->stat_scanrecords;
221 error = hammer_btree_iterate_reverse(&cursor);
223 hammer_sync_unlock(trans);
224 if (error == ENOENT)
225 error = 0;
226 hammer_done_cursor(&cursor);
227 if (error == EDEADLK)
228 goto retry;
229 if (error == EINTR) {
230 prune->head.flags |= HAMMER_IOC_HEAD_INTR;
231 error = 0;
233 failed:
234 prune->key_cur.localization &= HAMMER_LOCALIZE_MASK;
235 prune->elms = user_elms;
236 kfree(copy_elms, M_TEMP);
237 return(error);
241 * Check pruning list. The list must be sorted in descending order.
243 static int
244 check_prune(struct hammer_ioc_prune *prune, hammer_btree_elm_t elm,
245 int *realign_cre, int *realign_del)
247 struct hammer_ioc_prune_elm *scan;
248 int i;
250 *realign_cre = -1;
251 *realign_del = -1;
254 * If pruning everything remove all records with a non-zero
255 * delete_tid.
257 if (prune->head.flags & HAMMER_IOC_PRUNE_ALL) {
258 if (elm->base.delete_tid != 0)
259 return(0);
260 return(-1);
263 for (i = 0; i < prune->nelms; ++i) {
264 scan = &prune->elms[i];
267 * Locate the scan index covering the create and delete TIDs.
269 if (*realign_cre < 0 &&
270 elm->base.create_tid >= scan->beg_tid &&
271 elm->base.create_tid < scan->end_tid) {
272 *realign_cre = i;
274 if (*realign_del < 0 && elm->base.delete_tid &&
275 elm->base.delete_tid > scan->beg_tid &&
276 elm->base.delete_tid <= scan->end_tid) {
277 *realign_del = i;
281 * Now check for loop termination.
283 if (elm->base.create_tid >= scan->end_tid ||
284 elm->base.delete_tid > scan->end_tid) {
285 break;
289 * Now determine if we can delete the record.
291 if (elm->base.delete_tid &&
292 elm->base.create_tid >= scan->beg_tid &&
293 elm->base.delete_tid <= scan->end_tid &&
294 (elm->base.create_tid - scan->beg_tid) / scan->mod_tid ==
295 (elm->base.delete_tid - scan->beg_tid) / scan->mod_tid) {
296 return(0);
299 return(-1);
303 * Align the record to cover any gaps created through the deletion of
304 * records within the pruning space. If we were to just delete the records
305 * there would be gaps which in turn would cause a snapshot that is NOT on
306 * a pruning boundary to appear corrupt to the user. Forcing alignment
307 * of the create_tid and delete_tid for retained records 'reconnects'
308 * the previously contiguous space, making it contiguous again after the
309 * deletions.
311 * The use of a reverse iteration allows us to safely align the records and
312 * related elements without creating temporary overlaps. XXX we should
313 * add ordering dependancies for record buffers to guarantee consistency
314 * during recovery.
316 static int
317 realign_prune(struct hammer_ioc_prune *prune,
318 hammer_cursor_t cursor, int realign_cre, int realign_del)
320 struct hammer_ioc_prune_elm *scan;
321 hammer_btree_elm_t elm;
322 hammer_tid_t delta;
323 hammer_tid_t tid;
324 int error;
326 hammer_cursor_downgrade(cursor);
328 elm = &cursor->node->ondisk->elms[cursor->index];
329 ++prune->stat_realignments;
332 * Align the create_tid. By doing a reverse iteration we guarantee
333 * that all records after our current record have already been
334 * aligned, allowing us to safely correct the right-hand-boundary
335 * (because no record to our right is otherwise exactly matching
336 * will have a create_tid to the left of our aligned create_tid).
338 error = 0;
339 if (realign_cre >= 0) {
340 scan = &prune->elms[realign_cre];
342 delta = (elm->leaf.base.create_tid - scan->beg_tid) %
343 scan->mod_tid;
344 if (delta) {
345 tid = elm->leaf.base.create_tid - delta + scan->mod_tid;
347 /* can EDEADLK */
348 error = hammer_btree_correct_rhb(cursor, tid + 1);
349 if (error == 0) {
350 error = hammer_btree_extract(cursor,
351 HAMMER_CURSOR_GET_LEAF);
353 if (error == 0) {
354 /* can EDEADLK */
355 error = hammer_cursor_upgrade(cursor);
357 if (error == 0) {
358 hammer_modify_node(cursor->trans, cursor->node,
359 &elm->leaf.base.create_tid,
360 sizeof(elm->leaf.base.create_tid));
361 elm->leaf.base.create_tid = tid;
362 hammer_modify_node_done(cursor->node);
368 * Align the delete_tid. This only occurs if the record is historical
369 * was deleted at some point. Realigning the delete_tid does not
370 * move the record within the B-Tree but may cause it to temporarily
371 * overlap a record that has not yet been pruned.
373 if (error == 0 && realign_del >= 0) {
374 scan = &prune->elms[realign_del];
376 delta = (elm->leaf.base.delete_tid - scan->beg_tid) %
377 scan->mod_tid;
378 if (delta) {
379 error = hammer_btree_extract(cursor,
380 HAMMER_CURSOR_GET_LEAF);
381 if (error == 0) {
382 hammer_modify_node(cursor->trans, cursor->node,
383 &elm->leaf.base.delete_tid,
384 sizeof(elm->leaf.base.delete_tid));
385 elm->leaf.base.delete_tid =
386 elm->leaf.base.delete_tid -
387 delta + scan->mod_tid;
388 hammer_modify_node_done(cursor->node);
392 return (error);