hammer - Disallow modifying ioctls when filesystem is read-only
[dragonfly.git] / sys / vfs / hammer / hammer_prune.c
blob01f0a70a3eaec10c2f313094ab11f3ec3935cc6d
1 /*
2 * Copyright (c) 2008 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
34 * $DragonFly: src/sys/vfs/hammer/hammer_prune.c,v 1.19 2008/09/23 21:03:52 dillon Exp $
37 #include "hammer.h"
40 * Iterate through the specified range of object ids and remove any
41 * deleted records that fall entirely within a prune modulo.
43 * A reverse iteration is used to prevent overlapping records from being
44 * created during the iteration due to alignments. This also allows us
45 * to adjust alignments without blowing up the B-Tree.
47 static int prune_should_delete(struct hammer_ioc_prune *prune,
48 hammer_btree_leaf_elm_t elm);
49 static void prune_check_nlinks(hammer_cursor_t cursor,
50 hammer_btree_leaf_elm_t elm);
52 int
53 hammer_ioc_prune(hammer_transaction_t trans, hammer_inode_t ip,
54 struct hammer_ioc_prune *prune)
56 struct hammer_cursor cursor;
57 hammer_btree_leaf_elm_t elm;
58 struct hammer_ioc_prune_elm *copy_elms;
59 struct hammer_ioc_prune_elm *user_elms;
60 int error;
61 int isdir;
62 int elm_array_size;
63 int seq;
64 int64_t bytes;
65 uint32_t key_beg_localization;
67 if (prune->nelms < 0 || prune->nelms > HAMMER_MAX_PRUNE_ELMS)
68 return(EINVAL);
69 if ((prune->key_beg.localization | prune->key_end.localization) &
70 HAMMER_LOCALIZE_PSEUDOFS_MASK) {
71 return(EINVAL);
73 if (prune->key_beg.localization > prune->key_end.localization)
74 return(EINVAL);
75 if (prune->key_beg.localization == prune->key_end.localization) {
76 if (prune->key_beg.obj_id > prune->key_end.obj_id)
77 return(EINVAL);
78 /* key-space limitations - no check needed */
80 if ((prune->head.flags & HAMMER_IOC_PRUNE_ALL) && prune->nelms)
81 return(EINVAL);
84 * Ioctl caller has only set localization type to prune.
85 * Initialize cursor key localization with ip localization.
87 key_beg_localization = prune->key_beg.localization;
88 key_beg_localization &= HAMMER_LOCALIZE_MASK;
89 key_beg_localization |= ip->obj_localization;
91 prune->key_cur.localization = prune->key_end.localization;
92 prune->key_cur.localization &= HAMMER_LOCALIZE_MASK;
93 prune->key_cur.localization |= ip->obj_localization;
95 prune->key_cur.obj_id = prune->key_end.obj_id;
96 prune->key_cur.key = HAMMER_MAX_KEY;
99 * Copy element array from userland
101 elm_array_size = sizeof(*copy_elms) * prune->nelms;
102 user_elms = prune->elms;
103 copy_elms = kmalloc(elm_array_size, M_TEMP, M_WAITOK);
104 if ((error = copyin(user_elms, copy_elms, elm_array_size)) != 0)
105 goto failed;
106 prune->elms = copy_elms;
108 seq = trans->hmp->flusher.done;
111 * Scan backwards. Retries typically occur if a deadlock is detected.
113 retry:
114 error = hammer_init_cursor(trans, &cursor, NULL, NULL);
115 if (error) {
116 hammer_done_cursor(&cursor);
117 goto failed;
119 cursor.key_beg.localization = key_beg_localization;
120 cursor.key_beg.obj_id = prune->key_beg.obj_id;
121 cursor.key_beg.key = HAMMER_MIN_KEY;
122 cursor.key_beg.create_tid = 1;
123 cursor.key_beg.delete_tid = 0;
124 cursor.key_beg.rec_type = HAMMER_MIN_RECTYPE;
125 cursor.key_beg.obj_type = 0;
127 cursor.key_end.localization = prune->key_cur.localization;
128 cursor.key_end.obj_id = prune->key_cur.obj_id;
129 cursor.key_end.key = prune->key_cur.key;
130 cursor.key_end.create_tid = HAMMER_MAX_TID - 1;
131 cursor.key_end.delete_tid = 0;
132 cursor.key_end.rec_type = HAMMER_MAX_RECTYPE;
133 cursor.key_end.obj_type = 0;
135 cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE;
136 cursor.flags |= HAMMER_CURSOR_BACKEND;
139 * This flag allows the B-Tree code to clean up loose ends. At
140 * the moment (XXX) it also means we have to hold the sync lock
141 * through the iteration.
143 cursor.flags |= HAMMER_CURSOR_PRUNING;
145 hammer_sync_lock_sh(trans);
146 error = hammer_btree_last(&cursor);
147 hammer_sync_unlock(trans);
149 while (error == 0) {
151 * Check for work
153 elm = &cursor.node->ondisk->elms[cursor.index].leaf;
154 prune->key_cur = elm->base;
157 * Filesystem went read-only during rebalancing
159 if (trans->hmp->ronly) {
160 error = EROFS;
161 break;
165 * Yield to more important tasks
167 if ((error = hammer_signal_check(trans->hmp)) != 0)
168 break;
170 if (prune->stat_oldest_tid > elm->base.create_tid)
171 prune->stat_oldest_tid = elm->base.create_tid;
173 if (hammer_debug_general & 0x0200) {
174 hdkprintf("check %016jx %016jx cre=%016jx del=%016jx\n",
175 (intmax_t)elm->base.obj_id,
176 (intmax_t)elm->base.key,
177 (intmax_t)elm->base.create_tid,
178 (intmax_t)elm->base.delete_tid);
181 if (prune_should_delete(prune, elm)) {
182 if (hammer_debug_general & 0x0200) {
183 hdkprintf("check %016jx %016jx: DELETE\n",
184 (intmax_t)elm->base.obj_id,
185 (intmax_t)elm->base.key);
189 * NOTE: This can return EDEADLK
191 * Acquiring the sync lock guarantees that the
192 * operation will not cross a synchronization
193 * boundary (see the flusher).
195 * We dont need to track inodes or next_tid when
196 * we are destroying deleted records.
198 isdir = (elm->base.rec_type == HAMMER_RECTYPE_DIRENTRY);
200 hammer_sync_lock_sh(trans);
201 error = hammer_delete_at_cursor(&cursor,
202 HAMMER_DELETE_DESTROY,
203 cursor.trans->tid,
204 cursor.trans->time32,
205 0, &bytes);
206 hammer_sync_unlock(trans);
207 if (error)
208 break;
210 if (isdir)
211 ++prune->stat_dirrecords;
212 else
213 ++prune->stat_rawrecords;
214 prune->stat_bytes += bytes;
217 * The current record might now be the one after
218 * the one we deleted, set ATEDISK to force us
219 * to skip it (since we are iterating backwards).
221 cursor.flags |= HAMMER_CURSOR_ATEDISK;
222 } else {
224 * Nothing to delete, but we may have to check other
225 * things.
227 prune_check_nlinks(&cursor, elm);
228 cursor.flags |= HAMMER_CURSOR_ATEDISK;
229 if (hammer_debug_general & 0x0100) {
230 hdkprintf("check %016jx %016jx: SKIP\n",
231 (intmax_t)elm->base.obj_id,
232 (intmax_t)elm->base.key);
235 ++prune->stat_scanrecords;
238 * WARNING: See warnings in hammer_unlock_cursor() function.
240 while (hammer_flusher_meta_halflimit(trans->hmp) ||
241 hammer_flusher_undo_exhausted(trans, 2)) {
242 hammer_unlock_cursor(&cursor);
243 hammer_flusher_wait(trans->hmp, seq);
244 hammer_lock_cursor(&cursor);
245 seq = hammer_flusher_async_one(trans->hmp);
247 hammer_sync_lock_sh(trans);
248 error = hammer_btree_iterate_reverse(&cursor);
249 hammer_sync_unlock(trans);
251 if (error == ENOENT)
252 error = 0;
253 hammer_done_cursor(&cursor);
254 if (error == EDEADLK)
255 goto retry;
256 if (error == EINTR) {
257 prune->head.flags |= HAMMER_IOC_HEAD_INTR;
258 error = 0;
260 failed:
261 prune->key_cur.localization &= HAMMER_LOCALIZE_MASK;
262 prune->elms = user_elms;
263 kfree(copy_elms, M_TEMP);
264 return(error);
268 * Check pruning list. The list must be sorted in descending order.
270 * Return non-zero if the record should be deleted.
272 static int
273 prune_should_delete(struct hammer_ioc_prune *prune, hammer_btree_leaf_elm_t elm)
275 struct hammer_ioc_prune_elm *scan;
276 int i;
279 * If pruning everything remove all records with a non-zero
280 * delete_tid.
282 if (prune->head.flags & HAMMER_IOC_PRUNE_ALL) {
283 if (elm->base.delete_tid != 0)
284 return(1);
285 return(0);
288 for (i = 0; i < prune->nelms; ++i) {
289 scan = &prune->elms[i];
292 * Check for loop termination.
294 if (elm->base.create_tid >= scan->end_tid ||
295 elm->base.delete_tid > scan->end_tid) {
296 break;
300 * Determine if we can delete the record.
302 if (elm->base.delete_tid &&
303 elm->base.create_tid >= scan->beg_tid &&
304 elm->base.delete_tid <= scan->end_tid &&
305 (elm->base.create_tid - scan->beg_tid) / scan->mod_tid ==
306 (elm->base.delete_tid - scan->beg_tid) / scan->mod_tid) {
307 return(1);
310 return(0);
314 * Dangling inodes can occur if processes are holding open descriptors on
315 * deleted files as-of when a machine crashes. When we find one simply
316 * acquire the inode and release it. The inode handling code will then
317 * do the right thing.
319 static
320 void
321 prune_check_nlinks(hammer_cursor_t cursor, hammer_btree_leaf_elm_t elm)
323 hammer_inode_t ip;
324 int error;
326 if (elm->base.rec_type != HAMMER_RECTYPE_INODE)
327 return;
328 if (elm->base.delete_tid != 0)
329 return;
330 if (hammer_btree_extract_data(cursor))
331 return;
332 if (cursor->data->inode.nlinks)
333 return;
334 hammer_cursor_downgrade(cursor);
335 ip = hammer_get_inode(cursor->trans, NULL, elm->base.obj_id,
336 HAMMER_MAX_TID,
337 elm->base.localization & HAMMER_LOCALIZE_PSEUDOFS_MASK,
338 0, &error);
339 if (ip) {
340 if (hammer_debug_general & 0x0001) {
341 hdkprintf("pruning disconnected inode %016jx\n",
342 (intmax_t)elm->base.obj_id);
344 hammer_rel_inode(ip, 0);
345 hammer_inode_waitreclaims(cursor->trans);
346 } else {
347 hkprintf("unable to prune disconnected inode %016jx\n",
348 (intmax_t)elm->base.obj_id);
352 #if 0
355 * NOTE: THIS CODE HAS BEEN REMOVED! Pruning no longer attempts to realign
356 * adjacent records because it seriously interferes with every
357 * mirroring algorithm I could come up with.
359 * This means that historical accesses beyond the first snapshot
360 * softlink should be on snapshot boundaries only. Historical
361 * accesses from "now" to the first snapshot softlink continue to
362 * be fine-grained.
364 * NOTE: It also looks like there's a bug in the removed code. It is believed
365 * that create_tid can sometimes get set to 0xffffffffffffffff. Just as
366 * well we no longer try to do this fancy shit. Probably the attempt to
367 * correct the rhb is blowing up the cursor's indexing or addressing mapping.
369 * Align the record to cover any gaps created through the deletion of
370 * records within the pruning space. If we were to just delete the records
371 * there would be gaps which in turn would cause a snapshot that is NOT on
372 * a pruning boundary to appear corrupt to the user. Forcing alignment
373 * of the create_tid and delete_tid for retained records 'reconnects'
374 * the previously contiguous space, making it contiguous again after the
375 * deletions.
377 * The use of a reverse iteration allows us to safely align the records and
378 * related elements without creating temporary overlaps. XXX we should
379 * add ordering dependancies for record buffers to guarantee consistency
380 * during recovery.
382 static int
383 realign_prune(struct hammer_ioc_prune *prune,
384 hammer_cursor_t cursor, int realign_cre, int realign_del)
386 struct hammer_ioc_prune_elm *scan;
387 hammer_btree_elm_t elm;
388 hammer_tid_t delta;
389 hammer_tid_t tid;
390 int error;
392 hammer_cursor_downgrade(cursor);
394 elm = &cursor->node->ondisk->elms[cursor->index];
395 ++prune->stat_realignments;
398 * Align the create_tid. By doing a reverse iteration we guarantee
399 * that all records after our current record have already been
400 * aligned, allowing us to safely correct the right-hand-boundary
401 * (because no record to our right is otherwise exactly matching
402 * will have a create_tid to the left of our aligned create_tid).
404 error = 0;
405 if (realign_cre >= 0) {
406 scan = &prune->elms[realign_cre];
408 delta = (elm->leaf.base.create_tid - scan->beg_tid) %
409 scan->mod_tid;
410 if (delta) {
411 tid = elm->leaf.base.create_tid - delta + scan->mod_tid;
413 /* can EDEADLK */
414 error = hammer_btree_correct_rhb(cursor, tid + 1);
415 if (error == 0) {
416 error = hammer_btree_extract_leaf(cursor);
418 if (error == 0) {
419 /* can EDEADLK */
420 error = hammer_cursor_upgrade(cursor);
422 if (error == 0) {
423 hammer_modify_node(cursor->trans, cursor->node,
424 &elm->leaf.base.create_tid,
425 sizeof(elm->leaf.base.create_tid));
426 elm->leaf.base.create_tid = tid;
427 hammer_modify_node_done(cursor->node);
433 * Align the delete_tid. This only occurs if the record is historical
434 * was deleted at some point. Realigning the delete_tid does not
435 * move the record within the B-Tree but may cause it to temporarily
436 * overlap a record that has not yet been pruned.
438 if (error == 0 && realign_del >= 0) {
439 scan = &prune->elms[realign_del];
441 delta = (elm->leaf.base.delete_tid - scan->beg_tid) %
442 scan->mod_tid;
443 if (delta) {
444 error = hammer_btree_extract_leaf(cursor);
445 if (error == 0) {
446 hammer_modify_node(cursor->trans, cursor->node,
447 &elm->leaf.base.delete_tid,
448 sizeof(elm->leaf.base.delete_tid));
449 elm->leaf.base.delete_tid =
450 elm->leaf.base.delete_tid -
451 delta + scan->mod_tid;
452 hammer_modify_node_done(cursor->node);
456 return (error);
459 #endif