2 * Copyright (c) 2008 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * $DragonFly: src/sys/vfs/hammer/hammer_ioctl.c,v 1.14 2008/05/10 22:56:36 dillon Exp $
39 static int hammer_ioc_prune(hammer_transaction_t trans
, hammer_inode_t ip
,
40 struct hammer_ioc_prune
*prune
);
41 static int hammer_ioc_gethistory(hammer_transaction_t trans
, hammer_inode_t ip
,
42 struct hammer_ioc_history
*hist
);
45 hammer_ioctl(hammer_inode_t ip
, u_long com
, caddr_t data
, int fflag
,
48 struct hammer_transaction trans
;
51 error
= suser_cred(cred
, PRISON_ROOT
);
53 hammer_start_transaction(&trans
, ip
->hmp
);
58 error
= hammer_ioc_prune(&trans
, ip
,
59 (struct hammer_ioc_prune
*)data
);
62 case HAMMERIOC_GETHISTORY
:
63 error
= hammer_ioc_gethistory(&trans
, ip
,
64 (struct hammer_ioc_history
*)data
);
66 case HAMMERIOC_REBLOCK
:
67 error
= hammer_ioc_reblock(&trans
, ip
,
68 (struct hammer_ioc_reblock
*)data
);
74 hammer_done_transaction(&trans
);
79 * Iterate through the specified range of object ids and remove any
80 * deleted records that fall entirely within a prune modulo.
82 * A reverse iteration is used to prevent overlapping records from being
83 * created during the iteration due to alignments. This also allows us
84 * to adjust alignments without blowing up the B-Tree.
86 static int check_prune(struct hammer_ioc_prune
*prune
, hammer_btree_elm_t elm
,
87 int *realign_cre
, int *realign_del
);
88 static int realign_prune(struct hammer_ioc_prune
*prune
, hammer_cursor_t cursor
,
89 int realign_cre
, int realign_del
);
92 hammer_ioc_prune(hammer_transaction_t trans
, hammer_inode_t ip
,
93 struct hammer_ioc_prune
*prune
)
95 struct hammer_cursor cursor
;
96 hammer_btree_elm_t elm
;
102 if (prune
->nelms
< 0 || prune
->nelms
> HAMMER_MAX_PRUNE_ELMS
)
104 if (prune
->beg_obj_id
>= prune
->end_obj_id
)
106 if ((prune
->head
.flags
& HAMMER_IOC_PRUNE_ALL
) && prune
->nelms
)
110 error
= hammer_init_cursor(trans
, &cursor
, NULL
, NULL
);
112 hammer_done_cursor(&cursor
);
115 cursor
.key_beg
.obj_id
= prune
->beg_obj_id
;
116 cursor
.key_beg
.key
= HAMMER_MIN_KEY
;
117 cursor
.key_beg
.create_tid
= 1;
118 cursor
.key_beg
.delete_tid
= 0;
119 cursor
.key_beg
.rec_type
= HAMMER_MIN_RECTYPE
;
120 cursor
.key_beg
.obj_type
= 0;
122 cursor
.key_end
.obj_id
= prune
->end_obj_id
;
123 cursor
.key_end
.key
= HAMMER_MAX_KEY
;
124 cursor
.key_end
.create_tid
= HAMMER_MAX_TID
- 1;
125 cursor
.key_end
.delete_tid
= 0;
126 cursor
.key_end
.rec_type
= HAMMER_MAX_RECTYPE
;
127 cursor
.key_end
.obj_type
= 0;
129 cursor
.flags
|= HAMMER_CURSOR_END_INCLUSIVE
;
130 cursor
.flags
|= HAMMER_CURSOR_BACKEND
;
132 prune
->cur_obj_id
= cursor
.key_end
.obj_id
;
133 prune
->cur_key
= cursor
.key_end
.key
;
135 error
= hammer_btree_last(&cursor
);
137 elm
= &cursor
.node
->ondisk
->elms
[cursor
.index
];
138 prune
->cur_obj_id
= elm
->base
.obj_id
;
139 prune
->cur_key
= elm
->base
.key
;
141 if (prune
->stat_oldest_tid
> elm
->leaf
.base
.create_tid
)
142 prune
->stat_oldest_tid
= elm
->leaf
.base
.create_tid
;
144 if (check_prune(prune
, elm
, &realign_cre
, &realign_del
) == 0) {
145 if (hammer_debug_general
& 0x0200) {
146 kprintf("check %016llx %016llx: DELETE\n",
147 elm
->base
.obj_id
, elm
->base
.key
);
151 * NOTE: This can return EDEADLK
153 * Acquiring the sync lock guarantees that the
154 * operation will not cross a synchronization
155 * boundary (see the flusher).
157 isdir
= (elm
->base
.rec_type
== HAMMER_RECTYPE_DIRENTRY
);
159 hammer_lock_ex(&trans
->hmp
->sync_lock
);
160 error
= hammer_delete_at_cursor(&cursor
,
162 hammer_unlock(&trans
->hmp
->sync_lock
);
167 ++prune
->stat_dirrecords
;
169 ++prune
->stat_rawrecords
;
170 } else if (realign_cre
>= 0 || realign_del
>= 0) {
171 hammer_lock_ex(&trans
->hmp
->sync_lock
);
172 error
= realign_prune(prune
, &cursor
,
173 realign_cre
, realign_del
);
174 hammer_unlock(&trans
->hmp
->sync_lock
);
176 cursor
.flags
|= HAMMER_CURSOR_ATEDISK
;
177 if (hammer_debug_general
& 0x0200) {
178 kprintf("check %016llx %016llx: "
185 cursor
.flags
|= HAMMER_CURSOR_ATEDISK
;
186 if (hammer_debug_general
& 0x0100) {
187 kprintf("check %016llx %016llx: SKIP\n",
188 elm
->base
.obj_id
, elm
->base
.key
);
191 ++prune
->stat_scanrecords
;
194 * Bad hack for now, don't blow out the kernel's buffer
195 * cache. NOTE: We still hold locks on the cursor, we
196 * cannot call the flusher synchronously.
198 if (trans
->hmp
->locked_dirty_count
> hammer_limit_dirtybufs
) {
199 hammer_flusher_async(trans
->hmp
);
200 tsleep(trans
, 0, "hmrslo", hz
/ 10);
202 error
= hammer_signal_check(trans
->hmp
);
204 error
= hammer_btree_iterate_reverse(&cursor
);
208 hammer_done_cursor(&cursor
);
209 if (error
== EDEADLK
)
211 if (error
== EINTR
) {
212 prune
->head
.flags
|= HAMMER_IOC_HEAD_INTR
;
219 * Check pruning list. The list must be sorted in descending order.
222 check_prune(struct hammer_ioc_prune
*prune
, hammer_btree_elm_t elm
,
223 int *realign_cre
, int *realign_del
)
225 struct hammer_ioc_prune_elm
*scan
;
232 * If pruning everything remove all records with a non-zero
235 if (prune
->head
.flags
& HAMMER_IOC_PRUNE_ALL
) {
236 if (elm
->base
.delete_tid
!= 0)
241 for (i
= 0; i
< prune
->nelms
; ++i
) {
242 scan
= &prune
->elms
[i
];
245 * Locate the scan index covering the create and delete TIDs.
247 if (*realign_cre
< 0 &&
248 elm
->base
.create_tid
>= scan
->beg_tid
&&
249 elm
->base
.create_tid
< scan
->end_tid
) {
252 if (*realign_del
< 0 && elm
->base
.delete_tid
&&
253 elm
->base
.delete_tid
> scan
->beg_tid
&&
254 elm
->base
.delete_tid
<= scan
->end_tid
) {
259 * Now check for loop termination.
261 if (elm
->base
.create_tid
>= scan
->end_tid
||
262 elm
->base
.delete_tid
> scan
->end_tid
) {
267 * Now determine if we can delete the record.
269 if (elm
->base
.delete_tid
&&
270 elm
->base
.create_tid
>= scan
->beg_tid
&&
271 elm
->base
.delete_tid
<= scan
->end_tid
&&
272 elm
->base
.create_tid
/ scan
->mod_tid
==
273 elm
->base
.delete_tid
/ scan
->mod_tid
) {
281 * Align the record to cover any gaps created through the deletion of
282 * records within the pruning space. If we were to just delete the records
283 * there would be gaps which in turn would cause a snapshot that is NOT on
284 * a pruning boundary to appear corrupt to the user. Forcing alignment
285 * of the create_tid and delete_tid for retained records 'reconnects'
286 * the previously contiguous space, making it contiguous again after the
289 * The use of a reverse iteration allows us to safely align the records and
290 * related elements without creating temporary overlaps. XXX we should
291 * add ordering dependancies for record buffers to guarantee consistency
295 realign_prune(struct hammer_ioc_prune
*prune
,
296 hammer_cursor_t cursor
, int realign_cre
, int realign_del
)
298 hammer_btree_elm_t elm
;
304 hammer_cursor_downgrade(cursor
);
306 elm
= &cursor
->node
->ondisk
->elms
[cursor
->index
];
307 ++prune
->stat_realignments
;
310 * Align the create_tid. By doing a reverse iteration we guarantee
311 * that all records after our current record have already been
312 * aligned, allowing us to safely correct the right-hand-boundary
313 * (because no record to our right if otherwise exactly matching
314 * will have a create_tid to the left of our aligned create_tid).
316 * Ordering is important here XXX but disk write ordering for
317 * inter-cluster corrections is not currently guaranteed.
320 if (realign_cre
>= 0) {
321 mod
= prune
->elms
[realign_cre
].mod_tid
;
322 delta
= elm
->leaf
.base
.create_tid
% mod
;
324 tid
= elm
->leaf
.base
.create_tid
- delta
+ mod
;
327 error
= hammer_btree_correct_rhb(cursor
, tid
+ 1);
329 error
= hammer_btree_extract(cursor
,
330 HAMMER_CURSOR_GET_RECORD
);
334 error
= hammer_cursor_upgrade(cursor
);
337 hammer_modify_record_field(cursor
->trans
,
338 cursor
->record_buffer
,
340 base
.base
.create_tid
, 0);
341 cursor
->record
->base
.base
.create_tid
= tid
;
342 hammer_modify_record_done(
343 cursor
->record_buffer
,
345 hammer_modify_node(cursor
->trans
, cursor
->node
,
346 &elm
->leaf
.base
.create_tid
,
347 sizeof(elm
->leaf
.base
.create_tid
));
348 elm
->leaf
.base
.create_tid
= tid
;
349 hammer_modify_node_done(cursor
->node
);
355 * Align the delete_tid. This only occurs if the record is historical
356 * was deleted at some point. Realigning the delete_tid does not
357 * move the record within the B-Tree but may cause it to temporarily
358 * overlap a record that has not yet been pruned.
360 if (error
== 0 && realign_del
>= 0) {
361 mod
= prune
->elms
[realign_del
].mod_tid
;
362 delta
= elm
->leaf
.base
.delete_tid
% mod
;
364 error
= hammer_btree_extract(cursor
,
365 HAMMER_CURSOR_GET_RECORD
);
367 hammer_modify_node(cursor
->trans
, cursor
->node
,
368 &elm
->leaf
.base
.delete_tid
,
369 sizeof(elm
->leaf
.base
.delete_tid
));
370 elm
->leaf
.base
.delete_tid
=
371 elm
->leaf
.base
.delete_tid
-
373 hammer_modify_node_done(cursor
->node
);
374 hammer_modify_record_field(cursor
->trans
,
375 cursor
->record_buffer
,
377 base
.base
.delete_tid
, 0);
378 cursor
->record
->base
.base
.delete_tid
=
379 elm
->leaf
.base
.delete_tid
;
380 hammer_modify_record_done(cursor
->record_buffer
,
389 * Iterate through an object's inode or an object's records and record
392 static void add_history(hammer_inode_t ip
, struct hammer_ioc_history
*hist
,
393 hammer_btree_elm_t elm
);
397 hammer_ioc_gethistory(hammer_transaction_t trans
, hammer_inode_t ip
,
398 struct hammer_ioc_history
*hist
)
400 struct hammer_cursor cursor
;
401 hammer_btree_elm_t elm
;
405 * Validate the structure and initialize for return.
407 if (hist
->beg_tid
> hist
->end_tid
)
409 if (hist
->head
.flags
& HAMMER_IOC_HISTORY_ATKEY
) {
410 if (hist
->key
> hist
->nxt_key
)
414 hist
->obj_id
= ip
->obj_id
;
416 hist
->nxt_tid
= hist
->end_tid
;
417 hist
->head
.flags
&= ~HAMMER_IOC_HISTORY_NEXT_TID
;
418 hist
->head
.flags
&= ~HAMMER_IOC_HISTORY_NEXT_KEY
;
419 hist
->head
.flags
&= ~HAMMER_IOC_HISTORY_EOF
;
420 hist
->head
.flags
&= ~HAMMER_IOC_HISTORY_UNSYNCED
;
421 if ((ip
->flags
& HAMMER_INODE_MODMASK
) & ~HAMMER_INODE_ITIMES
)
422 hist
->head
.flags
|= HAMMER_IOC_HISTORY_UNSYNCED
;
425 * Setup the cursor. We can't handle undeletable records
426 * (create_tid of 0) at the moment. A create_tid of 0 has
427 * a special meaning and cannot be specified in the cursor.
429 error
= hammer_init_cursor(trans
, &cursor
, &ip
->cache
[0], NULL
);
431 hammer_done_cursor(&cursor
);
435 cursor
.key_beg
.obj_id
= hist
->obj_id
;
436 cursor
.key_beg
.create_tid
= hist
->beg_tid
;
437 cursor
.key_beg
.delete_tid
= 0;
438 cursor
.key_beg
.obj_type
= 0;
439 if (cursor
.key_beg
.create_tid
== HAMMER_MIN_TID
)
440 cursor
.key_beg
.create_tid
= 1;
442 cursor
.key_end
.obj_id
= hist
->obj_id
;
443 cursor
.key_end
.create_tid
= hist
->end_tid
;
444 cursor
.key_end
.delete_tid
= 0;
445 cursor
.key_end
.obj_type
= 0;
447 cursor
.flags
|= HAMMER_CURSOR_END_EXCLUSIVE
;
449 if (hist
->head
.flags
& HAMMER_IOC_HISTORY_ATKEY
) {
451 * key-range within the file. For a regular file the
452 * on-disk key represents BASE+LEN, not BASE, so the
453 * first possible record containing the offset 'key'
454 * has an on-disk key of (key + 1).
456 cursor
.key_beg
.key
= hist
->key
;
457 cursor
.key_end
.key
= HAMMER_MAX_KEY
;
459 switch(ip
->ino_rec
.base
.base
.obj_type
) {
460 case HAMMER_OBJTYPE_REGFILE
:
461 ++cursor
.key_beg
.key
;
462 cursor
.key_beg
.rec_type
= HAMMER_RECTYPE_DATA
;
464 case HAMMER_OBJTYPE_DIRECTORY
:
465 cursor
.key_beg
.rec_type
= HAMMER_RECTYPE_DIRENTRY
;
467 case HAMMER_OBJTYPE_DBFILE
:
468 cursor
.key_beg
.rec_type
= HAMMER_RECTYPE_DB
;
474 cursor
.key_end
.rec_type
= cursor
.key_beg
.rec_type
;
479 cursor
.key_beg
.key
= 0;
480 cursor
.key_end
.key
= 0;
481 cursor
.key_beg
.rec_type
= HAMMER_RECTYPE_INODE
;
482 cursor
.key_end
.rec_type
= HAMMER_RECTYPE_INODE
;
485 error
= hammer_btree_first(&cursor
);
487 elm
= &cursor
.node
->ondisk
->elms
[cursor
.index
];
489 add_history(ip
, hist
, elm
);
490 if (hist
->head
.flags
& (HAMMER_IOC_HISTORY_NEXT_TID
|
491 HAMMER_IOC_HISTORY_NEXT_KEY
|
492 HAMMER_IOC_HISTORY_EOF
)) {
495 error
= hammer_btree_iterate(&cursor
);
497 if (error
== ENOENT
) {
498 hist
->head
.flags
|= HAMMER_IOC_HISTORY_EOF
;
501 hammer_done_cursor(&cursor
);
506 * Add the scanned element to the ioctl return structure. Some special
507 * casing is required for regular files to accomodate how data ranges are
511 add_history(hammer_inode_t ip
, struct hammer_ioc_history
*hist
,
512 hammer_btree_elm_t elm
)
514 if (elm
->base
.btype
!= HAMMER_BTREE_TYPE_RECORD
)
516 if ((hist
->head
.flags
& HAMMER_IOC_HISTORY_ATKEY
) &&
517 ip
->ino_rec
.base
.base
.obj_type
== HAMMER_OBJTYPE_REGFILE
) {
521 if (hist
->nxt_key
> elm
->leaf
.base
.key
- elm
->leaf
.data_len
&&
522 hist
->key
< elm
->leaf
.base
.key
- elm
->leaf
.data_len
) {
523 hist
->nxt_key
= elm
->leaf
.base
.key
- elm
->leaf
.data_len
;
525 if (hist
->nxt_key
> elm
->leaf
.base
.key
)
526 hist
->nxt_key
= elm
->leaf
.base
.key
;
529 * Record is beyond MAXPHYS, there won't be any more records
530 * in the iteration covering the requested offset (key).
532 if (elm
->leaf
.base
.key
>= MAXPHYS
&&
533 elm
->leaf
.base
.key
- MAXPHYS
> hist
->key
) {
534 hist
->head
.flags
|= HAMMER_IOC_HISTORY_NEXT_KEY
;
538 * Data-range of record does not cover the key.
540 if (elm
->leaf
.base
.key
- elm
->leaf
.data_len
> hist
->key
)
543 } else if (hist
->head
.flags
& HAMMER_IOC_HISTORY_ATKEY
) {
547 if (hist
->nxt_key
> elm
->leaf
.base
.key
&&
548 hist
->key
< elm
->leaf
.base
.key
) {
549 hist
->nxt_key
= elm
->leaf
.base
.key
;
553 * Record is beyond the requested key.
555 if (elm
->leaf
.base
.key
> hist
->key
)
556 hist
->head
.flags
|= HAMMER_IOC_HISTORY_NEXT_KEY
;
560 * Add create_tid if it is in-bounds.
562 if ((hist
->count
== 0 ||
563 elm
->leaf
.base
.create_tid
!= hist
->tid_ary
[hist
->count
- 1]) &&
564 elm
->leaf
.base
.create_tid
>= hist
->beg_tid
&&
565 elm
->leaf
.base
.create_tid
< hist
->end_tid
) {
566 if (hist
->count
== HAMMER_MAX_HISTORY_ELMS
) {
567 hist
->nxt_tid
= elm
->leaf
.base
.create_tid
;
568 hist
->head
.flags
|= HAMMER_IOC_HISTORY_NEXT_TID
;
571 hist
->tid_ary
[hist
->count
++] = elm
->leaf
.base
.create_tid
;
575 * Add delete_tid if it is in-bounds. Note that different portions
576 * of the history may have overlapping data ranges with different
577 * delete_tid's. If this case occurs the delete_tid may match the
578 * create_tid of a following record. XXX
583 if (elm
->leaf
.base
.delete_tid
&&
584 elm
->leaf
.base
.delete_tid
>= hist
->beg_tid
&&
585 elm
->leaf
.base
.delete_tid
< hist
->end_tid
) {
586 if (hist
->count
== HAMMER_MAX_HISTORY_ELMS
) {
587 hist
->nxt_tid
= elm
->leaf
.base
.delete_tid
;
588 hist
->head
.flags
|= HAMMER_IOC_HISTORY_NEXT_TID
;
591 hist
->tid_ary
[hist
->count
++] = elm
->leaf
.base
.delete_tid
;