HAMMER Utilities: scan feedback
[dragonfly.git] / sys / vfs / hammer / hammer_ioctl.c
blob9fc935383834fb2db909be40e9c38032f9891238
1 /*
2 * Copyright (c) 2008 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
34 * $DragonFly: src/sys/vfs/hammer/hammer_ioctl.c,v 1.14 2008/05/10 22:56:36 dillon Exp $
37 #include "hammer.h"
39 static int hammer_ioc_prune(hammer_transaction_t trans, hammer_inode_t ip,
40 struct hammer_ioc_prune *prune);
41 static int hammer_ioc_gethistory(hammer_transaction_t trans, hammer_inode_t ip,
42 struct hammer_ioc_history *hist);
44 int
45 hammer_ioctl(hammer_inode_t ip, u_long com, caddr_t data, int fflag,
46 struct ucred *cred)
48 struct hammer_transaction trans;
49 int error;
51 error = suser_cred(cred, PRISON_ROOT);
53 hammer_start_transaction(&trans, ip->hmp);
55 switch(com) {
56 case HAMMERIOC_PRUNE:
57 if (error == 0) {
58 error = hammer_ioc_prune(&trans, ip,
59 (struct hammer_ioc_prune *)data);
61 break;
62 case HAMMERIOC_GETHISTORY:
63 error = hammer_ioc_gethistory(&trans, ip,
64 (struct hammer_ioc_history *)data);
65 break;
66 case HAMMERIOC_REBLOCK:
67 error = hammer_ioc_reblock(&trans, ip,
68 (struct hammer_ioc_reblock *)data);
69 break;
70 default:
71 error = EOPNOTSUPP;
72 break;
74 hammer_done_transaction(&trans);
75 return (error);
79 * Iterate through the specified range of object ids and remove any
80 * deleted records that fall entirely within a prune modulo.
82 * A reverse iteration is used to prevent overlapping records from being
83 * created during the iteration due to alignments. This also allows us
84 * to adjust alignments without blowing up the B-Tree.
86 static int check_prune(struct hammer_ioc_prune *prune, hammer_btree_elm_t elm,
87 int *realign_cre, int *realign_del);
88 static int realign_prune(struct hammer_ioc_prune *prune, hammer_cursor_t cursor,
89 int realign_cre, int realign_del);
91 static int
92 hammer_ioc_prune(hammer_transaction_t trans, hammer_inode_t ip,
93 struct hammer_ioc_prune *prune)
95 struct hammer_cursor cursor;
96 hammer_btree_elm_t elm;
97 int error;
98 int isdir;
99 int realign_cre;
100 int realign_del;
102 if (prune->nelms < 0 || prune->nelms > HAMMER_MAX_PRUNE_ELMS)
103 return(EINVAL);
104 if (prune->beg_obj_id >= prune->end_obj_id)
105 return(EINVAL);
106 if ((prune->head.flags & HAMMER_IOC_PRUNE_ALL) && prune->nelms)
107 return(EINVAL);
109 retry:
110 error = hammer_init_cursor(trans, &cursor, NULL, NULL);
111 if (error) {
112 hammer_done_cursor(&cursor);
113 return(error);
115 cursor.key_beg.obj_id = prune->beg_obj_id;
116 cursor.key_beg.key = HAMMER_MIN_KEY;
117 cursor.key_beg.create_tid = 1;
118 cursor.key_beg.delete_tid = 0;
119 cursor.key_beg.rec_type = HAMMER_MIN_RECTYPE;
120 cursor.key_beg.obj_type = 0;
122 cursor.key_end.obj_id = prune->end_obj_id;
123 cursor.key_end.key = HAMMER_MAX_KEY;
124 cursor.key_end.create_tid = HAMMER_MAX_TID - 1;
125 cursor.key_end.delete_tid = 0;
126 cursor.key_end.rec_type = HAMMER_MAX_RECTYPE;
127 cursor.key_end.obj_type = 0;
129 cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE;
130 cursor.flags |= HAMMER_CURSOR_BACKEND;
132 prune->cur_obj_id = cursor.key_end.obj_id;
133 prune->cur_key = cursor.key_end.key;
135 error = hammer_btree_last(&cursor);
136 while (error == 0) {
137 elm = &cursor.node->ondisk->elms[cursor.index];
138 prune->cur_obj_id = elm->base.obj_id;
139 prune->cur_key = elm->base.key;
141 if (prune->stat_oldest_tid > elm->leaf.base.create_tid)
142 prune->stat_oldest_tid = elm->leaf.base.create_tid;
144 if (check_prune(prune, elm, &realign_cre, &realign_del) == 0) {
145 if (hammer_debug_general & 0x0200) {
146 kprintf("check %016llx %016llx: DELETE\n",
147 elm->base.obj_id, elm->base.key);
151 * NOTE: This can return EDEADLK
153 * Acquiring the sync lock guarantees that the
154 * operation will not cross a synchronization
155 * boundary (see the flusher).
157 isdir = (elm->base.rec_type == HAMMER_RECTYPE_DIRENTRY);
159 hammer_lock_ex(&trans->hmp->sync_lock);
160 error = hammer_delete_at_cursor(&cursor,
161 &prune->stat_bytes);
162 hammer_unlock(&trans->hmp->sync_lock);
163 if (error)
164 break;
166 if (isdir)
167 ++prune->stat_dirrecords;
168 else
169 ++prune->stat_rawrecords;
170 } else if (realign_cre >= 0 || realign_del >= 0) {
171 hammer_lock_ex(&trans->hmp->sync_lock);
172 error = realign_prune(prune, &cursor,
173 realign_cre, realign_del);
174 hammer_unlock(&trans->hmp->sync_lock);
175 if (error == 0) {
176 cursor.flags |= HAMMER_CURSOR_ATEDISK;
177 if (hammer_debug_general & 0x0200) {
178 kprintf("check %016llx %016llx: "
179 "REALIGN\n",
180 elm->base.obj_id,
181 elm->base.key);
184 } else {
185 cursor.flags |= HAMMER_CURSOR_ATEDISK;
186 if (hammer_debug_general & 0x0100) {
187 kprintf("check %016llx %016llx: SKIP\n",
188 elm->base.obj_id, elm->base.key);
191 ++prune->stat_scanrecords;
194 * Bad hack for now, don't blow out the kernel's buffer
195 * cache. NOTE: We still hold locks on the cursor, we
196 * cannot call the flusher synchronously.
198 if (trans->hmp->locked_dirty_count > hammer_limit_dirtybufs) {
199 hammer_flusher_async(trans->hmp);
200 tsleep(trans, 0, "hmrslo", hz / 10);
202 error = hammer_signal_check(trans->hmp);
203 if (error == 0)
204 error = hammer_btree_iterate_reverse(&cursor);
206 if (error == ENOENT)
207 error = 0;
208 hammer_done_cursor(&cursor);
209 if (error == EDEADLK)
210 goto retry;
211 if (error == EINTR) {
212 prune->head.flags |= HAMMER_IOC_HEAD_INTR;
213 error = 0;
215 return(error);
219 * Check pruning list. The list must be sorted in descending order.
221 static int
222 check_prune(struct hammer_ioc_prune *prune, hammer_btree_elm_t elm,
223 int *realign_cre, int *realign_del)
225 struct hammer_ioc_prune_elm *scan;
226 int i;
228 *realign_cre = -1;
229 *realign_del = -1;
232 * If pruning everything remove all records with a non-zero
233 * delete_tid.
235 if (prune->head.flags & HAMMER_IOC_PRUNE_ALL) {
236 if (elm->base.delete_tid != 0)
237 return(0);
238 return(-1);
241 for (i = 0; i < prune->nelms; ++i) {
242 scan = &prune->elms[i];
245 * Locate the scan index covering the create and delete TIDs.
247 if (*realign_cre < 0 &&
248 elm->base.create_tid >= scan->beg_tid &&
249 elm->base.create_tid < scan->end_tid) {
250 *realign_cre = i;
252 if (*realign_del < 0 && elm->base.delete_tid &&
253 elm->base.delete_tid > scan->beg_tid &&
254 elm->base.delete_tid <= scan->end_tid) {
255 *realign_del = i;
259 * Now check for loop termination.
261 if (elm->base.create_tid >= scan->end_tid ||
262 elm->base.delete_tid > scan->end_tid) {
263 break;
267 * Now determine if we can delete the record.
269 if (elm->base.delete_tid &&
270 elm->base.create_tid >= scan->beg_tid &&
271 elm->base.delete_tid <= scan->end_tid &&
272 elm->base.create_tid / scan->mod_tid ==
273 elm->base.delete_tid / scan->mod_tid) {
274 return(0);
277 return(-1);
281 * Align the record to cover any gaps created through the deletion of
282 * records within the pruning space. If we were to just delete the records
283 * there would be gaps which in turn would cause a snapshot that is NOT on
284 * a pruning boundary to appear corrupt to the user. Forcing alignment
285 * of the create_tid and delete_tid for retained records 'reconnects'
286 * the previously contiguous space, making it contiguous again after the
287 * deletions.
289 * The use of a reverse iteration allows us to safely align the records and
290 * related elements without creating temporary overlaps. XXX we should
291 * add ordering dependancies for record buffers to guarantee consistency
292 * during recovery.
294 static int
295 realign_prune(struct hammer_ioc_prune *prune,
296 hammer_cursor_t cursor, int realign_cre, int realign_del)
298 hammer_btree_elm_t elm;
299 hammer_tid_t delta;
300 hammer_tid_t mod;
301 hammer_tid_t tid;
302 int error;
304 hammer_cursor_downgrade(cursor);
306 elm = &cursor->node->ondisk->elms[cursor->index];
307 ++prune->stat_realignments;
310 * Align the create_tid. By doing a reverse iteration we guarantee
311 * that all records after our current record have already been
312 * aligned, allowing us to safely correct the right-hand-boundary
313 * (because no record to our right if otherwise exactly matching
314 * will have a create_tid to the left of our aligned create_tid).
316 * Ordering is important here XXX but disk write ordering for
317 * inter-cluster corrections is not currently guaranteed.
319 error = 0;
320 if (realign_cre >= 0) {
321 mod = prune->elms[realign_cre].mod_tid;
322 delta = elm->leaf.base.create_tid % mod;
323 if (delta) {
324 tid = elm->leaf.base.create_tid - delta + mod;
326 /* can EDEADLK */
327 error = hammer_btree_correct_rhb(cursor, tid + 1);
328 if (error == 0) {
329 error = hammer_btree_extract(cursor,
330 HAMMER_CURSOR_GET_RECORD);
332 if (error == 0) {
333 /* can EDEADLK */
334 error = hammer_cursor_upgrade(cursor);
336 if (error == 0) {
337 hammer_modify_record_field(cursor->trans,
338 cursor->record_buffer,
339 cursor->record,
340 base.base.create_tid, 0);
341 cursor->record->base.base.create_tid = tid;
342 hammer_modify_record_done(
343 cursor->record_buffer,
344 cursor->record);
345 hammer_modify_node(cursor->trans, cursor->node,
346 &elm->leaf.base.create_tid,
347 sizeof(elm->leaf.base.create_tid));
348 elm->leaf.base.create_tid = tid;
349 hammer_modify_node_done(cursor->node);
355 * Align the delete_tid. This only occurs if the record is historical
356 * was deleted at some point. Realigning the delete_tid does not
357 * move the record within the B-Tree but may cause it to temporarily
358 * overlap a record that has not yet been pruned.
360 if (error == 0 && realign_del >= 0) {
361 mod = prune->elms[realign_del].mod_tid;
362 delta = elm->leaf.base.delete_tid % mod;
363 if (delta) {
364 error = hammer_btree_extract(cursor,
365 HAMMER_CURSOR_GET_RECORD);
366 if (error == 0) {
367 hammer_modify_node(cursor->trans, cursor->node,
368 &elm->leaf.base.delete_tid,
369 sizeof(elm->leaf.base.delete_tid));
370 elm->leaf.base.delete_tid =
371 elm->leaf.base.delete_tid -
372 delta + mod;
373 hammer_modify_node_done(cursor->node);
374 hammer_modify_record_field(cursor->trans,
375 cursor->record_buffer,
376 cursor->record,
377 base.base.delete_tid, 0);
378 cursor->record->base.base.delete_tid =
379 elm->leaf.base.delete_tid;
380 hammer_modify_record_done(cursor->record_buffer,
381 cursor->record);
385 return (error);
389 * Iterate through an object's inode or an object's records and record
390 * modification TIDs.
392 static void add_history(hammer_inode_t ip, struct hammer_ioc_history *hist,
393 hammer_btree_elm_t elm);
395 static
397 hammer_ioc_gethistory(hammer_transaction_t trans, hammer_inode_t ip,
398 struct hammer_ioc_history *hist)
400 struct hammer_cursor cursor;
401 hammer_btree_elm_t elm;
402 int error;
405 * Validate the structure and initialize for return.
407 if (hist->beg_tid > hist->end_tid)
408 return(EINVAL);
409 if (hist->head.flags & HAMMER_IOC_HISTORY_ATKEY) {
410 if (hist->key > hist->nxt_key)
411 return(EINVAL);
414 hist->obj_id = ip->obj_id;
415 hist->count = 0;
416 hist->nxt_tid = hist->end_tid;
417 hist->head.flags &= ~HAMMER_IOC_HISTORY_NEXT_TID;
418 hist->head.flags &= ~HAMMER_IOC_HISTORY_NEXT_KEY;
419 hist->head.flags &= ~HAMMER_IOC_HISTORY_EOF;
420 hist->head.flags &= ~HAMMER_IOC_HISTORY_UNSYNCED;
421 if ((ip->flags & HAMMER_INODE_MODMASK) & ~HAMMER_INODE_ITIMES)
422 hist->head.flags |= HAMMER_IOC_HISTORY_UNSYNCED;
425 * Setup the cursor. We can't handle undeletable records
426 * (create_tid of 0) at the moment. A create_tid of 0 has
427 * a special meaning and cannot be specified in the cursor.
429 error = hammer_init_cursor(trans, &cursor, &ip->cache[0], NULL);
430 if (error) {
431 hammer_done_cursor(&cursor);
432 return(error);
435 cursor.key_beg.obj_id = hist->obj_id;
436 cursor.key_beg.create_tid = hist->beg_tid;
437 cursor.key_beg.delete_tid = 0;
438 cursor.key_beg.obj_type = 0;
439 if (cursor.key_beg.create_tid == HAMMER_MIN_TID)
440 cursor.key_beg.create_tid = 1;
442 cursor.key_end.obj_id = hist->obj_id;
443 cursor.key_end.create_tid = hist->end_tid;
444 cursor.key_end.delete_tid = 0;
445 cursor.key_end.obj_type = 0;
447 cursor.flags |= HAMMER_CURSOR_END_EXCLUSIVE;
449 if (hist->head.flags & HAMMER_IOC_HISTORY_ATKEY) {
451 * key-range within the file. For a regular file the
452 * on-disk key represents BASE+LEN, not BASE, so the
453 * first possible record containing the offset 'key'
454 * has an on-disk key of (key + 1).
456 cursor.key_beg.key = hist->key;
457 cursor.key_end.key = HAMMER_MAX_KEY;
459 switch(ip->ino_rec.base.base.obj_type) {
460 case HAMMER_OBJTYPE_REGFILE:
461 ++cursor.key_beg.key;
462 cursor.key_beg.rec_type = HAMMER_RECTYPE_DATA;
463 break;
464 case HAMMER_OBJTYPE_DIRECTORY:
465 cursor.key_beg.rec_type = HAMMER_RECTYPE_DIRENTRY;
466 break;
467 case HAMMER_OBJTYPE_DBFILE:
468 cursor.key_beg.rec_type = HAMMER_RECTYPE_DB;
469 break;
470 default:
471 error = EINVAL;
472 break;
474 cursor.key_end.rec_type = cursor.key_beg.rec_type;
475 } else {
477 * The inode itself.
479 cursor.key_beg.key = 0;
480 cursor.key_end.key = 0;
481 cursor.key_beg.rec_type = HAMMER_RECTYPE_INODE;
482 cursor.key_end.rec_type = HAMMER_RECTYPE_INODE;
485 error = hammer_btree_first(&cursor);
486 while (error == 0) {
487 elm = &cursor.node->ondisk->elms[cursor.index];
489 add_history(ip, hist, elm);
490 if (hist->head.flags & (HAMMER_IOC_HISTORY_NEXT_TID |
491 HAMMER_IOC_HISTORY_NEXT_KEY |
492 HAMMER_IOC_HISTORY_EOF)) {
493 break;
495 error = hammer_btree_iterate(&cursor);
497 if (error == ENOENT) {
498 hist->head.flags |= HAMMER_IOC_HISTORY_EOF;
499 error = 0;
501 hammer_done_cursor(&cursor);
502 return(error);
506 * Add the scanned element to the ioctl return structure. Some special
507 * casing is required for regular files to accomodate how data ranges are
508 * stored on-disk.
510 static void
511 add_history(hammer_inode_t ip, struct hammer_ioc_history *hist,
512 hammer_btree_elm_t elm)
514 if (elm->base.btype != HAMMER_BTREE_TYPE_RECORD)
515 return;
516 if ((hist->head.flags & HAMMER_IOC_HISTORY_ATKEY) &&
517 ip->ino_rec.base.base.obj_type == HAMMER_OBJTYPE_REGFILE) {
519 * Adjust nxt_key
521 if (hist->nxt_key > elm->leaf.base.key - elm->leaf.data_len &&
522 hist->key < elm->leaf.base.key - elm->leaf.data_len) {
523 hist->nxt_key = elm->leaf.base.key - elm->leaf.data_len;
525 if (hist->nxt_key > elm->leaf.base.key)
526 hist->nxt_key = elm->leaf.base.key;
529 * Record is beyond MAXPHYS, there won't be any more records
530 * in the iteration covering the requested offset (key).
532 if (elm->leaf.base.key >= MAXPHYS &&
533 elm->leaf.base.key - MAXPHYS > hist->key) {
534 hist->head.flags |= HAMMER_IOC_HISTORY_NEXT_KEY;
538 * Data-range of record does not cover the key.
540 if (elm->leaf.base.key - elm->leaf.data_len > hist->key)
541 return;
543 } else if (hist->head.flags & HAMMER_IOC_HISTORY_ATKEY) {
545 * Adjust nxt_key
547 if (hist->nxt_key > elm->leaf.base.key &&
548 hist->key < elm->leaf.base.key) {
549 hist->nxt_key = elm->leaf.base.key;
553 * Record is beyond the requested key.
555 if (elm->leaf.base.key > hist->key)
556 hist->head.flags |= HAMMER_IOC_HISTORY_NEXT_KEY;
560 * Add create_tid if it is in-bounds.
562 if ((hist->count == 0 ||
563 elm->leaf.base.create_tid != hist->tid_ary[hist->count - 1]) &&
564 elm->leaf.base.create_tid >= hist->beg_tid &&
565 elm->leaf.base.create_tid < hist->end_tid) {
566 if (hist->count == HAMMER_MAX_HISTORY_ELMS) {
567 hist->nxt_tid = elm->leaf.base.create_tid;
568 hist->head.flags |= HAMMER_IOC_HISTORY_NEXT_TID;
569 return;
571 hist->tid_ary[hist->count++] = elm->leaf.base.create_tid;
575 * Add delete_tid if it is in-bounds. Note that different portions
576 * of the history may have overlapping data ranges with different
577 * delete_tid's. If this case occurs the delete_tid may match the
578 * create_tid of a following record. XXX
580 * [ ]
581 * [ ]
583 if (elm->leaf.base.delete_tid &&
584 elm->leaf.base.delete_tid >= hist->beg_tid &&
585 elm->leaf.base.delete_tid < hist->end_tid) {
586 if (hist->count == HAMMER_MAX_HISTORY_ELMS) {
587 hist->nxt_tid = elm->leaf.base.delete_tid;
588 hist->head.flags |= HAMMER_IOC_HISTORY_NEXT_TID;
589 return;
591 hist->tid_ary[hist->count++] = elm->leaf.base.delete_tid;