2 * Copyright (c) 2008 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * $DragonFly: src/sys/vfs/hammer/hammer_ioctl.c,v 1.32 2008/11/13 02:23:29 dillon Exp $
39 static int hammer_ioc_gethistory(hammer_transaction_t trans
, hammer_inode_t ip
,
40 struct hammer_ioc_history
*hist
);
41 static int hammer_ioc_synctid(hammer_transaction_t trans
, hammer_inode_t ip
,
42 struct hammer_ioc_synctid
*std
);
43 static int hammer_ioc_get_version(hammer_transaction_t trans
,
45 struct hammer_ioc_version
*ver
);
46 static int hammer_ioc_set_version(hammer_transaction_t trans
,
48 struct hammer_ioc_version
*ver
);
49 static int hammer_ioc_get_info(hammer_transaction_t trans
,
50 struct hammer_ioc_info
*info
);
51 static int hammer_ioc_add_snapshot(hammer_transaction_t trans
, hammer_inode_t ip
,
52 struct hammer_ioc_snapshot
*snap
);
53 static int hammer_ioc_del_snapshot(hammer_transaction_t trans
, hammer_inode_t ip
,
54 struct hammer_ioc_snapshot
*snap
);
55 static int hammer_ioc_get_snapshot(hammer_transaction_t trans
, hammer_inode_t ip
,
56 struct hammer_ioc_snapshot
*snap
);
57 static int hammer_ioc_get_config(hammer_transaction_t trans
, hammer_inode_t ip
,
58 struct hammer_ioc_config
*snap
);
59 static int hammer_ioc_set_config(hammer_transaction_t trans
, hammer_inode_t ip
,
60 struct hammer_ioc_config
*snap
);
63 hammer_ioctl(hammer_inode_t ip
, u_long com
, caddr_t data
, int fflag
,
66 struct hammer_transaction trans
;
69 error
= priv_check_cred(cred
, PRIV_HAMMER_IOCTL
, 0);
71 hammer_start_transaction(&trans
, ip
->hmp
);
76 error
= hammer_ioc_prune(&trans
, ip
,
77 (struct hammer_ioc_prune
*)data
);
80 case HAMMERIOC_GETHISTORY
:
81 error
= hammer_ioc_gethistory(&trans
, ip
,
82 (struct hammer_ioc_history
*)data
);
84 case HAMMERIOC_REBLOCK
:
86 error
= hammer_ioc_reblock(&trans
, ip
,
87 (struct hammer_ioc_reblock
*)data
);
90 case HAMMERIOC_REBALANCE
:
92 * Rebalancing needs to lock a lot of B-Tree nodes. The
93 * children and children's children. Systems with very
94 * little memory will not be able to do it.
96 if (error
== 0 && nbuf
< HAMMER_REBALANCE_MIN_BUFS
) {
97 kprintf("hammer: System has insufficient buffers "
98 "to rebalance the tree. nbuf < %d\n",
99 HAMMER_REBALANCE_MIN_BUFS
);
103 error
= hammer_ioc_rebalance(&trans
, ip
,
104 (struct hammer_ioc_rebalance
*)data
);
107 case HAMMERIOC_SYNCTID
:
108 error
= hammer_ioc_synctid(&trans
, ip
,
109 (struct hammer_ioc_synctid
*)data
);
111 case HAMMERIOC_GET_PSEUDOFS
:
112 error
= hammer_ioc_get_pseudofs(&trans
, ip
,
113 (struct hammer_ioc_pseudofs_rw
*)data
);
115 case HAMMERIOC_SET_PSEUDOFS
:
117 error
= hammer_ioc_set_pseudofs(&trans
, ip
, cred
,
118 (struct hammer_ioc_pseudofs_rw
*)data
);
121 case HAMMERIOC_UPG_PSEUDOFS
:
123 error
= hammer_ioc_upgrade_pseudofs(&trans
, ip
,
124 (struct hammer_ioc_pseudofs_rw
*)data
);
127 case HAMMERIOC_DGD_PSEUDOFS
:
129 error
= hammer_ioc_downgrade_pseudofs(&trans
, ip
,
130 (struct hammer_ioc_pseudofs_rw
*)data
);
133 case HAMMERIOC_RMR_PSEUDOFS
:
135 error
= hammer_ioc_destroy_pseudofs(&trans
, ip
,
136 (struct hammer_ioc_pseudofs_rw
*)data
);
139 case HAMMERIOC_WAI_PSEUDOFS
:
141 error
= hammer_ioc_wait_pseudofs(&trans
, ip
,
142 (struct hammer_ioc_pseudofs_rw
*)data
);
145 case HAMMERIOC_MIRROR_READ
:
147 error
= hammer_ioc_mirror_read(&trans
, ip
,
148 (struct hammer_ioc_mirror_rw
*)data
);
151 case HAMMERIOC_MIRROR_WRITE
:
153 error
= hammer_ioc_mirror_write(&trans
, ip
,
154 (struct hammer_ioc_mirror_rw
*)data
);
157 case HAMMERIOC_GET_VERSION
:
158 error
= hammer_ioc_get_version(&trans
, ip
,
159 (struct hammer_ioc_version
*)data
);
161 case HAMMERIOC_GET_INFO
:
162 error
= hammer_ioc_get_info(&trans
,
163 (struct hammer_ioc_info
*)data
);
165 case HAMMERIOC_SET_VERSION
:
167 error
= hammer_ioc_set_version(&trans
, ip
,
168 (struct hammer_ioc_version
*)data
);
171 case HAMMERIOC_ADD_VOLUME
:
173 error
= priv_check_cred(cred
, PRIV_HAMMER_VOLUME
, 0);
175 error
= hammer_ioc_volume_add(&trans
, ip
,
176 (struct hammer_ioc_volume
*)data
);
179 case HAMMERIOC_DEL_VOLUME
:
181 error
= priv_check_cred(cred
, PRIV_HAMMER_VOLUME
, 0);
183 error
= hammer_ioc_volume_del(&trans
, ip
,
184 (struct hammer_ioc_volume
*)data
);
187 case HAMMERIOC_ADD_SNAPSHOT
:
189 error
= hammer_ioc_add_snapshot(
190 &trans
, ip
, (struct hammer_ioc_snapshot
*)data
);
193 case HAMMERIOC_DEL_SNAPSHOT
:
195 error
= hammer_ioc_del_snapshot(
196 &trans
, ip
, (struct hammer_ioc_snapshot
*)data
);
199 case HAMMERIOC_GET_SNAPSHOT
:
200 error
= hammer_ioc_get_snapshot(
201 &trans
, ip
, (struct hammer_ioc_snapshot
*)data
);
203 case HAMMERIOC_GET_CONFIG
:
204 error
= hammer_ioc_get_config(
205 &trans
, ip
, (struct hammer_ioc_config
*)data
);
207 case HAMMERIOC_SET_CONFIG
:
209 error
= hammer_ioc_set_config(
210 &trans
, ip
, (struct hammer_ioc_config
*)data
);
217 hammer_done_transaction(&trans
);
222 * Iterate through an object's inode or an object's records and record
225 static void add_history(hammer_inode_t ip
, struct hammer_ioc_history
*hist
,
226 hammer_btree_elm_t elm
);
230 hammer_ioc_gethistory(hammer_transaction_t trans
, hammer_inode_t ip
,
231 struct hammer_ioc_history
*hist
)
233 struct hammer_cursor cursor
;
234 hammer_btree_elm_t elm
;
238 * Validate the structure and initialize for return.
240 if (hist
->beg_tid
> hist
->end_tid
)
242 if (hist
->head
.flags
& HAMMER_IOC_HISTORY_ATKEY
) {
243 if (hist
->key
> hist
->nxt_key
)
247 hist
->obj_id
= ip
->obj_id
;
249 hist
->nxt_tid
= hist
->end_tid
;
250 hist
->head
.flags
&= ~HAMMER_IOC_HISTORY_NEXT_TID
;
251 hist
->head
.flags
&= ~HAMMER_IOC_HISTORY_NEXT_KEY
;
252 hist
->head
.flags
&= ~HAMMER_IOC_HISTORY_EOF
;
253 hist
->head
.flags
&= ~HAMMER_IOC_HISTORY_UNSYNCED
;
254 if ((ip
->flags
& HAMMER_INODE_MODMASK
) &
255 ~(HAMMER_INODE_ATIME
| HAMMER_INODE_MTIME
)) {
256 hist
->head
.flags
|= HAMMER_IOC_HISTORY_UNSYNCED
;
260 * Setup the cursor. We can't handle undeletable records
261 * (create_tid of 0) at the moment. A create_tid of 0 has
262 * a special meaning and cannot be specified in the cursor.
264 error
= hammer_init_cursor(trans
, &cursor
, &ip
->cache
[0], NULL
);
266 hammer_done_cursor(&cursor
);
270 cursor
.key_beg
.obj_id
= hist
->obj_id
;
271 cursor
.key_beg
.create_tid
= hist
->beg_tid
;
272 cursor
.key_beg
.delete_tid
= 0;
273 cursor
.key_beg
.obj_type
= 0;
274 if (cursor
.key_beg
.create_tid
== HAMMER_MIN_TID
)
275 cursor
.key_beg
.create_tid
= 1;
277 cursor
.key_end
.obj_id
= hist
->obj_id
;
278 cursor
.key_end
.create_tid
= hist
->end_tid
;
279 cursor
.key_end
.delete_tid
= 0;
280 cursor
.key_end
.obj_type
= 0;
282 cursor
.flags
|= HAMMER_CURSOR_END_EXCLUSIVE
;
284 if (hist
->head
.flags
& HAMMER_IOC_HISTORY_ATKEY
) {
286 * key-range within the file. For a regular file the
287 * on-disk key represents BASE+LEN, not BASE, so the
288 * first possible record containing the offset 'key'
289 * has an on-disk key of (key + 1).
291 cursor
.key_beg
.key
= hist
->key
;
292 cursor
.key_end
.key
= HAMMER_MAX_KEY
;
293 cursor
.key_beg
.localization
= ip
->obj_localization
+
294 HAMMER_LOCALIZE_MISC
;
295 cursor
.key_end
.localization
= ip
->obj_localization
+
296 HAMMER_LOCALIZE_MISC
;
298 switch(ip
->ino_data
.obj_type
) {
299 case HAMMER_OBJTYPE_REGFILE
:
300 ++cursor
.key_beg
.key
;
301 cursor
.key_beg
.rec_type
= HAMMER_RECTYPE_DATA
;
303 case HAMMER_OBJTYPE_DIRECTORY
:
304 cursor
.key_beg
.rec_type
= HAMMER_RECTYPE_DIRENTRY
;
305 cursor
.key_beg
.localization
= ip
->obj_localization
+
306 hammer_dir_localization(ip
);
307 cursor
.key_end
.localization
= ip
->obj_localization
+
308 hammer_dir_localization(ip
);
310 case HAMMER_OBJTYPE_DBFILE
:
311 cursor
.key_beg
.rec_type
= HAMMER_RECTYPE_DB
;
317 cursor
.key_end
.rec_type
= cursor
.key_beg
.rec_type
;
322 cursor
.key_beg
.key
= 0;
323 cursor
.key_end
.key
= 0;
324 cursor
.key_beg
.rec_type
= HAMMER_RECTYPE_INODE
;
325 cursor
.key_end
.rec_type
= HAMMER_RECTYPE_INODE
;
326 cursor
.key_beg
.localization
= ip
->obj_localization
+
327 HAMMER_LOCALIZE_INODE
;
328 cursor
.key_end
.localization
= ip
->obj_localization
+
329 HAMMER_LOCALIZE_INODE
;
332 error
= hammer_btree_first(&cursor
);
334 elm
= &cursor
.node
->ondisk
->elms
[cursor
.index
];
336 add_history(ip
, hist
, elm
);
337 if (hist
->head
.flags
& (HAMMER_IOC_HISTORY_NEXT_TID
|
338 HAMMER_IOC_HISTORY_NEXT_KEY
|
339 HAMMER_IOC_HISTORY_EOF
)) {
342 error
= hammer_btree_iterate(&cursor
);
344 if (error
== ENOENT
) {
345 hist
->head
.flags
|= HAMMER_IOC_HISTORY_EOF
;
348 hammer_done_cursor(&cursor
);
353 * Add the scanned element to the ioctl return structure. Some special
354 * casing is required for regular files to accomodate how data ranges are
358 add_history(hammer_inode_t ip
, struct hammer_ioc_history
*hist
,
359 hammer_btree_elm_t elm
)
363 if (elm
->base
.btype
!= HAMMER_BTREE_TYPE_RECORD
)
365 if ((hist
->head
.flags
& HAMMER_IOC_HISTORY_ATKEY
) &&
366 ip
->ino_data
.obj_type
== HAMMER_OBJTYPE_REGFILE
) {
370 if (hist
->nxt_key
> elm
->leaf
.base
.key
- elm
->leaf
.data_len
&&
371 hist
->key
< elm
->leaf
.base
.key
- elm
->leaf
.data_len
) {
372 hist
->nxt_key
= elm
->leaf
.base
.key
- elm
->leaf
.data_len
;
374 if (hist
->nxt_key
> elm
->leaf
.base
.key
)
375 hist
->nxt_key
= elm
->leaf
.base
.key
;
378 * Record is beyond MAXPHYS, there won't be any more records
379 * in the iteration covering the requested offset (key).
381 if (elm
->leaf
.base
.key
>= MAXPHYS
&&
382 elm
->leaf
.base
.key
- MAXPHYS
> hist
->key
) {
383 hist
->head
.flags
|= HAMMER_IOC_HISTORY_NEXT_KEY
;
387 * Data-range of record does not cover the key.
389 if (elm
->leaf
.base
.key
- elm
->leaf
.data_len
> hist
->key
)
392 } else if (hist
->head
.flags
& HAMMER_IOC_HISTORY_ATKEY
) {
396 if (hist
->nxt_key
> elm
->leaf
.base
.key
&&
397 hist
->key
< elm
->leaf
.base
.key
) {
398 hist
->nxt_key
= elm
->leaf
.base
.key
;
402 * Record is beyond the requested key.
404 if (elm
->leaf
.base
.key
> hist
->key
)
405 hist
->head
.flags
|= HAMMER_IOC_HISTORY_NEXT_KEY
;
409 * Add create_tid if it is in-bounds.
413 elm
->leaf
.base
.create_tid
!= hist
->hist_ary
[i
- 1].tid
) &&
414 elm
->leaf
.base
.create_tid
>= hist
->beg_tid
&&
415 elm
->leaf
.base
.create_tid
< hist
->end_tid
) {
416 if (hist
->count
== HAMMER_MAX_HISTORY_ELMS
) {
417 hist
->nxt_tid
= elm
->leaf
.base
.create_tid
;
418 hist
->head
.flags
|= HAMMER_IOC_HISTORY_NEXT_TID
;
421 hist
->hist_ary
[i
].tid
= elm
->leaf
.base
.create_tid
;
422 hist
->hist_ary
[i
].time32
= elm
->leaf
.create_ts
;
427 * Add delete_tid if it is in-bounds. Note that different portions
428 * of the history may have overlapping data ranges with different
429 * delete_tid's. If this case occurs the delete_tid may match the
430 * create_tid of a following record. XXX
436 if (elm
->leaf
.base
.delete_tid
&&
437 elm
->leaf
.base
.delete_tid
>= hist
->beg_tid
&&
438 elm
->leaf
.base
.delete_tid
< hist
->end_tid
) {
439 if (i
== HAMMER_MAX_HISTORY_ELMS
) {
440 hist
->nxt_tid
= elm
->leaf
.base
.delete_tid
;
441 hist
->head
.flags
|= HAMMER_IOC_HISTORY_NEXT_TID
;
444 hist
->hist_ary
[i
].tid
= elm
->leaf
.base
.delete_tid
;
445 hist
->hist_ary
[i
].time32
= elm
->leaf
.delete_ts
;
451 * Acquire synchronization TID
455 hammer_ioc_synctid(hammer_transaction_t trans
, hammer_inode_t ip
,
456 struct hammer_ioc_synctid
*std
)
458 hammer_mount_t hmp
= ip
->hmp
;
462 case HAMMER_SYNCTID_NONE
:
463 std
->tid
= hmp
->flusher
.tid
; /* inaccurate */
465 case HAMMER_SYNCTID_ASYNC
:
466 hammer_queue_inodes_flusher(hmp
, MNT_NOWAIT
);
467 hammer_flusher_async(hmp
, NULL
);
468 std
->tid
= hmp
->flusher
.tid
; /* inaccurate */
470 case HAMMER_SYNCTID_SYNC1
:
471 hammer_queue_inodes_flusher(hmp
, MNT_WAIT
);
472 hammer_flusher_sync(hmp
);
473 std
->tid
= hmp
->flusher
.tid
;
475 case HAMMER_SYNCTID_SYNC2
:
476 hammer_queue_inodes_flusher(hmp
, MNT_WAIT
);
477 hammer_flusher_sync(hmp
);
478 std
->tid
= hmp
->flusher
.tid
;
479 hammer_flusher_sync(hmp
);
489 * Retrieve version info.
491 * Load min_version, wip_version, and max_versino. If cur_version is passed
492 * as 0 then load the current version into cur_version. Load the description
493 * for cur_version into the description array.
495 * Returns 0 on success, EINVAL if cur_version is non-zero and set to an
500 hammer_ioc_get_version(hammer_transaction_t trans
, hammer_inode_t ip
,
501 struct hammer_ioc_version
*ver
)
505 ver
->min_version
= HAMMER_VOL_VERSION_MIN
;
506 ver
->wip_version
= HAMMER_VOL_VERSION_WIP
;
507 ver
->max_version
= HAMMER_VOL_VERSION_MAX
;
508 if (ver
->cur_version
== 0)
509 ver
->cur_version
= trans
->hmp
->version
;
510 switch(ver
->cur_version
) {
512 ksnprintf(ver
->description
, sizeof(ver
->description
),
513 "First HAMMER release (DragonFly 2.0+)");
516 ksnprintf(ver
->description
, sizeof(ver
->description
),
517 "New directory entry layout (DragonFly 2.3+)");
520 ksnprintf(ver
->description
, sizeof(ver
->description
),
521 "New snapshot management (DragonFly 2.5+)");
524 ksnprintf(ver
->description
, sizeof(ver
->description
),
525 "New undo/flush, faster flush/sync (DragonFly 2.5+)");
528 ksnprintf(ver
->description
, sizeof(ver
->description
),
541 hammer_ioc_set_version(hammer_transaction_t trans
, hammer_inode_t ip
,
542 struct hammer_ioc_version
*ver
)
544 hammer_mount_t hmp
= trans
->hmp
;
545 struct hammer_cursor cursor
;
546 hammer_volume_t volume
;
548 int over
= hmp
->version
;
551 * Generally do not allow downgrades. However, version 4 can
552 * be downgraded to version 3.
554 if (ver
->cur_version
< hmp
->version
) {
555 if (!(ver
->cur_version
== 3 && hmp
->version
== 4))
558 if (ver
->cur_version
== hmp
->version
)
560 if (ver
->cur_version
> HAMMER_VOL_VERSION_MAX
)
566 * Update the root volume header and the version cached in
567 * the hammer_mount structure.
569 error
= hammer_init_cursor(trans
, &cursor
, NULL
, NULL
);
572 hammer_lock_ex(&hmp
->flusher
.finalize_lock
);
573 hammer_sync_lock_ex(trans
);
574 hmp
->version
= ver
->cur_version
;
577 * If upgrading from version < 4 to version >= 4 the UNDO FIFO
578 * must be reinitialized.
580 if (over
< HAMMER_VOL_VERSION_FOUR
&&
581 ver
->cur_version
>= HAMMER_VOL_VERSION_FOUR
) {
582 kprintf("upgrade undo to version 4\n");
583 error
= hammer_upgrade_undo_4(trans
);
589 * Adjust the version in the volume header
591 volume
= hammer_get_root_volume(hmp
, &error
);
592 KKASSERT(error
== 0);
593 hammer_modify_volume_field(cursor
.trans
, volume
, vol_version
);
594 volume
->ondisk
->vol_version
= ver
->cur_version
;
595 hammer_modify_volume_done(volume
);
596 hammer_rel_volume(volume
, 0);
598 hammer_sync_unlock(trans
);
599 hammer_unlock(&hmp
->flusher
.finalize_lock
);
601 ver
->head
.error
= error
;
602 hammer_done_cursor(&cursor
);
611 hammer_ioc_get_info(hammer_transaction_t trans
, struct hammer_ioc_info
*info
) {
613 struct hammer_volume_ondisk
*od
= trans
->hmp
->rootvol
->ondisk
;
614 struct hammer_mount
*hm
= trans
->hmp
;
616 /* Fill the structure with the necessary information */
617 _hammer_checkspace(hm
, HAMMER_CHKSPC_WRITE
, &info
->rsvbigblocks
);
618 info
->rsvbigblocks
= info
->rsvbigblocks
>> HAMMER_LARGEBLOCK_BITS
;
619 strlcpy(info
->vol_name
, od
->vol_name
, sizeof(od
->vol_name
));
621 info
->vol_fsid
= hm
->fsid
;
622 info
->vol_fstype
= od
->vol_fstype
;
623 info
->version
= hm
->version
;
625 info
->inodes
= od
->vol0_stat_inodes
;
626 info
->bigblocks
= od
->vol0_stat_bigblocks
;
627 info
->freebigblocks
= od
->vol0_stat_freebigblocks
;
628 info
->nvolumes
= hm
->nvolumes
;
634 * Add a snapshot transction id(s) to the list of snapshots.
636 * NOTE: Records are created with an allocated TID. If a flush cycle
637 * is in progress the record may be synced in the current flush
638 * cycle and the volume header will reflect the allocation of the
639 * TID, but the synchronization point may not catch up to the
640 * TID until the next flush cycle.
644 hammer_ioc_add_snapshot(hammer_transaction_t trans
, hammer_inode_t ip
,
645 struct hammer_ioc_snapshot
*snap
)
647 hammer_mount_t hmp
= ip
->hmp
;
648 struct hammer_btree_leaf_elm leaf
;
649 struct hammer_cursor cursor
;
655 if (snap
->count
> HAMMER_SNAPS_PER_IOCTL
)
657 if (snap
->index
> snap
->count
)
660 hammer_lock_ex(&hmp
->snapshot_lock
);
663 * Look for keys starting after the previous iteration, or at
664 * the beginning if snap->count is 0.
666 error
= hammer_init_cursor(trans
, &cursor
, &ip
->cache
[0], NULL
);
668 hammer_done_cursor(&cursor
);
672 cursor
.asof
= HAMMER_MAX_TID
;
673 cursor
.flags
|= HAMMER_CURSOR_BACKEND
| HAMMER_CURSOR_ASOF
;
675 bzero(&leaf
, sizeof(leaf
));
676 leaf
.base
.obj_id
= HAMMER_OBJID_ROOT
;
677 leaf
.base
.rec_type
= HAMMER_RECTYPE_SNAPSHOT
;
678 leaf
.base
.create_tid
= hammer_alloc_tid(hmp
, 1);
679 leaf
.base
.btype
= HAMMER_BTREE_TYPE_RECORD
;
680 leaf
.base
.localization
= ip
->obj_localization
+ HAMMER_LOCALIZE_INODE
;
681 leaf
.data_len
= sizeof(struct hammer_snapshot_data
);
683 while (snap
->index
< snap
->count
) {
684 leaf
.base
.key
= (int64_t)snap
->snaps
[snap
->index
].tid
;
685 cursor
.key_beg
= leaf
.base
;
686 error
= hammer_btree_lookup(&cursor
);
692 cursor
.flags
&= ~HAMMER_CURSOR_ASOF
;
693 error
= hammer_create_at_cursor(&cursor
, &leaf
,
694 &snap
->snaps
[snap
->index
],
695 HAMMER_CREATE_MODE_SYS
);
696 if (error
== EDEADLK
) {
697 hammer_done_cursor(&cursor
);
700 cursor
.flags
|= HAMMER_CURSOR_ASOF
;
705 snap
->head
.error
= error
;
706 hammer_done_cursor(&cursor
);
707 hammer_unlock(&hmp
->snapshot_lock
);
712 * Delete snapshot transaction id(s) from the list of snapshots.
716 hammer_ioc_del_snapshot(hammer_transaction_t trans
, hammer_inode_t ip
,
717 struct hammer_ioc_snapshot
*snap
)
719 hammer_mount_t hmp
= ip
->hmp
;
720 struct hammer_cursor cursor
;
726 if (snap
->count
> HAMMER_SNAPS_PER_IOCTL
)
728 if (snap
->index
> snap
->count
)
731 hammer_lock_ex(&hmp
->snapshot_lock
);
734 * Look for keys starting after the previous iteration, or at
735 * the beginning if snap->count is 0.
737 error
= hammer_init_cursor(trans
, &cursor
, &ip
->cache
[0], NULL
);
739 hammer_done_cursor(&cursor
);
743 cursor
.key_beg
.obj_id
= HAMMER_OBJID_ROOT
;
744 cursor
.key_beg
.create_tid
= 0;
745 cursor
.key_beg
.delete_tid
= 0;
746 cursor
.key_beg
.obj_type
= 0;
747 cursor
.key_beg
.rec_type
= HAMMER_RECTYPE_SNAPSHOT
;
748 cursor
.key_beg
.localization
= ip
->obj_localization
+ HAMMER_LOCALIZE_INODE
;
749 cursor
.asof
= HAMMER_MAX_TID
;
750 cursor
.flags
|= HAMMER_CURSOR_ASOF
;
752 while (snap
->index
< snap
->count
) {
753 cursor
.key_beg
.key
= (int64_t)snap
->snaps
[snap
->index
].tid
;
754 error
= hammer_btree_lookup(&cursor
);
757 error
= hammer_btree_extract(&cursor
, HAMMER_CURSOR_GET_LEAF
);
760 error
= hammer_delete_at_cursor(&cursor
, HAMMER_DELETE_DESTROY
,
762 if (error
== EDEADLK
) {
763 hammer_done_cursor(&cursor
);
770 snap
->head
.error
= error
;
771 hammer_done_cursor(&cursor
);
772 hammer_unlock(&hmp
->snapshot_lock
);
777 * Retrieve as many snapshot ids as possible or until the array is
778 * full, starting after the last transction id passed in. If count
779 * is 0 we retrieve starting at the beginning.
781 * NOTE: Because the b-tree key field is signed but transaction ids
782 * are unsigned the returned list will be signed-sorted instead
783 * of unsigned sorted. The Caller must still sort the aggregate
788 hammer_ioc_get_snapshot(hammer_transaction_t trans
, hammer_inode_t ip
,
789 struct hammer_ioc_snapshot
*snap
)
791 struct hammer_cursor cursor
;
797 if (snap
->index
!= 0)
799 if (snap
->count
> HAMMER_SNAPS_PER_IOCTL
)
803 * Look for keys starting after the previous iteration, or at
804 * the beginning if snap->count is 0.
806 error
= hammer_init_cursor(trans
, &cursor
, &ip
->cache
[0], NULL
);
808 hammer_done_cursor(&cursor
);
812 cursor
.key_beg
.obj_id
= HAMMER_OBJID_ROOT
;
813 cursor
.key_beg
.create_tid
= 0;
814 cursor
.key_beg
.delete_tid
= 0;
815 cursor
.key_beg
.obj_type
= 0;
816 cursor
.key_beg
.rec_type
= HAMMER_RECTYPE_SNAPSHOT
;
817 cursor
.key_beg
.localization
= ip
->obj_localization
+ HAMMER_LOCALIZE_INODE
;
818 if (snap
->count
== 0)
819 cursor
.key_beg
.key
= HAMMER_MIN_KEY
;
821 cursor
.key_beg
.key
= (int64_t)snap
->snaps
[snap
->count
- 1].tid
+ 1;
823 cursor
.key_end
= cursor
.key_beg
;
824 cursor
.key_end
.key
= HAMMER_MAX_KEY
;
825 cursor
.asof
= HAMMER_MAX_TID
;
826 cursor
.flags
|= HAMMER_CURSOR_END_EXCLUSIVE
| HAMMER_CURSOR_ASOF
;
830 error
= hammer_btree_first(&cursor
);
831 while (error
== 0 && snap
->count
< HAMMER_SNAPS_PER_IOCTL
) {
832 error
= hammer_btree_extract(&cursor
, HAMMER_CURSOR_GET_LEAF
);
835 if (cursor
.leaf
->base
.rec_type
== HAMMER_RECTYPE_SNAPSHOT
) {
836 error
= hammer_btree_extract(
837 &cursor
, HAMMER_CURSOR_GET_LEAF
|
838 HAMMER_CURSOR_GET_DATA
);
839 snap
->snaps
[snap
->count
] = cursor
.data
->snap
;
842 * The snap data tid should match the key but might
843 * not due to a bug in the HAMMER v3 conversion code.
845 * This error will work itself out over time but we
846 * have to force a match or the snapshot will not
849 if (cursor
.data
->snap
.tid
!=
850 (hammer_tid_t
)cursor
.leaf
->base
.key
) {
851 kprintf("HAMMER: lo=%08x snapshot key "
852 "0x%016jx data mismatch 0x%016jx\n",
853 cursor
.key_beg
.localization
,
854 (uintmax_t)cursor
.data
->snap
.tid
,
855 cursor
.leaf
->base
.key
);
856 kprintf("HAMMER: Probably left over from the "
857 "original v3 conversion, hammer "
858 "cleanup should get it eventually\n");
859 snap
->snaps
[snap
->count
].tid
=
860 cursor
.leaf
->base
.key
;
864 error
= hammer_btree_iterate(&cursor
);
867 if (error
== ENOENT
) {
868 snap
->head
.flags
|= HAMMER_IOC_SNAPSHOT_EOF
;
871 snap
->head
.error
= error
;
872 hammer_done_cursor(&cursor
);
877 * Retrieve the PFS hammer cleanup utility config record. This is
878 * different (newer than) the PFS config.
882 hammer_ioc_get_config(hammer_transaction_t trans
, hammer_inode_t ip
,
883 struct hammer_ioc_config
*config
)
885 struct hammer_cursor cursor
;
888 error
= hammer_init_cursor(trans
, &cursor
, &ip
->cache
[0], NULL
);
890 hammer_done_cursor(&cursor
);
894 cursor
.key_beg
.obj_id
= HAMMER_OBJID_ROOT
;
895 cursor
.key_beg
.create_tid
= 0;
896 cursor
.key_beg
.delete_tid
= 0;
897 cursor
.key_beg
.obj_type
= 0;
898 cursor
.key_beg
.rec_type
= HAMMER_RECTYPE_CONFIG
;
899 cursor
.key_beg
.localization
= ip
->obj_localization
+ HAMMER_LOCALIZE_INODE
;
900 cursor
.key_beg
.key
= 0; /* config space page 0 */
902 cursor
.asof
= HAMMER_MAX_TID
;
903 cursor
.flags
|= HAMMER_CURSOR_ASOF
;
905 error
= hammer_btree_lookup(&cursor
);
907 error
= hammer_btree_extract(&cursor
, HAMMER_CURSOR_GET_LEAF
|
908 HAMMER_CURSOR_GET_DATA
);
910 config
->config
= cursor
.data
->config
;
912 /* error can be ENOENT */
913 config
->head
.error
= error
;
914 hammer_done_cursor(&cursor
);
919 * Retrieve the PFS hammer cleanup utility config record. This is
920 * different (newer than) the PFS config.
922 * This is kinda a hack.
926 hammer_ioc_set_config(hammer_transaction_t trans
, hammer_inode_t ip
,
927 struct hammer_ioc_config
*config
)
929 struct hammer_btree_leaf_elm leaf
;
930 struct hammer_cursor cursor
;
931 hammer_mount_t hmp
= ip
->hmp
;
935 error
= hammer_init_cursor(trans
, &cursor
, &ip
->cache
[0], NULL
);
937 hammer_done_cursor(&cursor
);
941 bzero(&leaf
, sizeof(leaf
));
942 leaf
.base
.obj_id
= HAMMER_OBJID_ROOT
;
943 leaf
.base
.rec_type
= HAMMER_RECTYPE_CONFIG
;
944 leaf
.base
.create_tid
= hammer_alloc_tid(hmp
, 1);
945 leaf
.base
.btype
= HAMMER_BTREE_TYPE_RECORD
;
946 leaf
.base
.localization
= ip
->obj_localization
+ HAMMER_LOCALIZE_INODE
;
947 leaf
.base
.key
= 0; /* page 0 */
948 leaf
.data_len
= sizeof(struct hammer_config_data
);
950 cursor
.key_beg
= leaf
.base
;
952 cursor
.asof
= HAMMER_MAX_TID
;
953 cursor
.flags
|= HAMMER_CURSOR_BACKEND
| HAMMER_CURSOR_ASOF
;
955 error
= hammer_btree_lookup(&cursor
);
957 error
= hammer_btree_extract(&cursor
, HAMMER_CURSOR_GET_LEAF
|
958 HAMMER_CURSOR_GET_DATA
);
959 error
= hammer_delete_at_cursor(&cursor
, HAMMER_DELETE_DESTROY
,
961 if (error
== EDEADLK
) {
962 hammer_done_cursor(&cursor
);
969 cursor
.flags
&= ~HAMMER_CURSOR_ASOF
;
970 cursor
.key_beg
= leaf
.base
;
971 error
= hammer_create_at_cursor(&cursor
, &leaf
,
973 HAMMER_CREATE_MODE_SYS
);
974 if (error
== EDEADLK
) {
975 hammer_done_cursor(&cursor
);
979 config
->head
.error
= error
;
980 hammer_done_cursor(&cursor
);