2 * Copyright (c) 2008 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * $DragonFly: src/sys/vfs/hammer/hammer_ioctl.c,v 1.32 2008/11/13 02:23:29 dillon Exp $
39 static int hammer_ioc_gethistory(hammer_transaction_t trans
, hammer_inode_t ip
,
40 struct hammer_ioc_history
*hist
);
41 static int hammer_ioc_synctid(hammer_transaction_t trans
, hammer_inode_t ip
,
42 struct hammer_ioc_synctid
*std
);
43 static int hammer_ioc_get_version(hammer_transaction_t trans
,
45 struct hammer_ioc_version
*ver
);
46 static int hammer_ioc_set_version(hammer_transaction_t trans
,
48 struct hammer_ioc_version
*ver
);
49 static int hammer_ioc_get_info(hammer_transaction_t trans
,
50 struct hammer_ioc_info
*info
);
51 static int hammer_ioc_add_snapshot(hammer_transaction_t trans
, hammer_inode_t ip
,
52 struct hammer_ioc_snapshot
*snap
);
53 static int hammer_ioc_del_snapshot(hammer_transaction_t trans
, hammer_inode_t ip
,
54 struct hammer_ioc_snapshot
*snap
);
55 static int hammer_ioc_get_snapshot(hammer_transaction_t trans
, hammer_inode_t ip
,
56 struct hammer_ioc_snapshot
*snap
);
57 static int hammer_ioc_get_config(hammer_transaction_t trans
, hammer_inode_t ip
,
58 struct hammer_ioc_config
*snap
);
59 static int hammer_ioc_set_config(hammer_transaction_t trans
, hammer_inode_t ip
,
60 struct hammer_ioc_config
*snap
);
61 static int hammer_ioc_get_data(hammer_transaction_t trans
, hammer_inode_t ip
,
62 struct hammer_ioc_data
*data
);
65 hammer_ioctl(hammer_inode_t ip
, u_long com
, caddr_t data
, int fflag
,
68 struct hammer_transaction trans
;
69 struct hammer_mount
*hmp
;
72 error
= priv_check_cred(cred
, PRIV_HAMMER_IOCTL
, 0);
75 hammer_start_transaction(&trans
, hmp
);
79 if (error
== 0 && hmp
->ronly
)
82 error
= hammer_ioc_prune(&trans
, ip
,
83 (struct hammer_ioc_prune
*)data
);
86 case HAMMERIOC_GETHISTORY
:
87 error
= hammer_ioc_gethistory(&trans
, ip
,
88 (struct hammer_ioc_history
*)data
);
90 case HAMMERIOC_REBLOCK
:
91 if (error
== 0 && hmp
->ronly
)
94 error
= hammer_ioc_reblock(&trans
, ip
,
95 (struct hammer_ioc_reblock
*)data
);
98 case HAMMERIOC_REBALANCE
:
100 * Rebalancing needs to lock a lot of B-Tree nodes. The
101 * children and children's children. Systems with very
102 * little memory will not be able to do it.
104 if (error
== 0 && hmp
->ronly
)
106 if (error
== 0 && nbuf
< HAMMER_REBALANCE_MIN_BUFS
) {
107 hkprintf("System has insufficient buffers "
108 "to rebalance the tree. nbuf < %d\n",
109 HAMMER_REBALANCE_MIN_BUFS
);
113 error
= hammer_ioc_rebalance(&trans
, ip
,
114 (struct hammer_ioc_rebalance
*)data
);
117 case HAMMERIOC_SYNCTID
:
118 error
= hammer_ioc_synctid(&trans
, ip
,
119 (struct hammer_ioc_synctid
*)data
);
121 case HAMMERIOC_GET_PSEUDOFS
:
122 error
= hammer_ioc_get_pseudofs(&trans
, ip
,
123 (struct hammer_ioc_pseudofs_rw
*)data
);
125 case HAMMERIOC_SET_PSEUDOFS
:
126 if (error
== 0 && hmp
->ronly
)
129 error
= hammer_ioc_set_pseudofs(&trans
, ip
, cred
,
130 (struct hammer_ioc_pseudofs_rw
*)data
);
133 case HAMMERIOC_UPG_PSEUDOFS
:
134 if (error
== 0 && hmp
->ronly
)
137 error
= hammer_ioc_upgrade_pseudofs(&trans
, ip
,
138 (struct hammer_ioc_pseudofs_rw
*)data
);
141 case HAMMERIOC_DGD_PSEUDOFS
:
142 if (error
== 0 && hmp
->ronly
)
145 error
= hammer_ioc_downgrade_pseudofs(&trans
, ip
,
146 (struct hammer_ioc_pseudofs_rw
*)data
);
149 case HAMMERIOC_RMR_PSEUDOFS
:
150 if (error
== 0 && hmp
->ronly
)
153 error
= hammer_ioc_destroy_pseudofs(&trans
, ip
,
154 (struct hammer_ioc_pseudofs_rw
*)data
);
157 case HAMMERIOC_WAI_PSEUDOFS
:
159 error
= hammer_ioc_wait_pseudofs(&trans
, ip
,
160 (struct hammer_ioc_pseudofs_rw
*)data
);
163 case HAMMERIOC_MIRROR_READ
:
165 error
= hammer_ioc_mirror_read(&trans
, ip
,
166 (struct hammer_ioc_mirror_rw
*)data
);
169 case HAMMERIOC_MIRROR_WRITE
:
170 if (error
== 0 && hmp
->ronly
)
173 error
= hammer_ioc_mirror_write(&trans
, ip
,
174 (struct hammer_ioc_mirror_rw
*)data
);
177 case HAMMERIOC_GET_VERSION
:
178 error
= hammer_ioc_get_version(&trans
, ip
,
179 (struct hammer_ioc_version
*)data
);
181 case HAMMERIOC_GET_INFO
:
182 error
= hammer_ioc_get_info(&trans
,
183 (struct hammer_ioc_info
*)data
);
185 case HAMMERIOC_SET_VERSION
:
186 if (error
== 0 && hmp
->ronly
)
189 error
= hammer_ioc_set_version(&trans
, ip
,
190 (struct hammer_ioc_version
*)data
);
193 case HAMMERIOC_ADD_VOLUME
:
194 if (error
== 0 && hmp
->ronly
)
197 error
= priv_check_cred(cred
, PRIV_HAMMER_VOLUME
, 0);
199 error
= hammer_ioc_volume_add(&trans
, ip
,
200 (struct hammer_ioc_volume
*)data
);
203 case HAMMERIOC_DEL_VOLUME
:
204 if (error
== 0 && hmp
->ronly
)
207 error
= priv_check_cred(cred
, PRIV_HAMMER_VOLUME
, 0);
209 error
= hammer_ioc_volume_del(&trans
, ip
,
210 (struct hammer_ioc_volume
*)data
);
213 case HAMMERIOC_LIST_VOLUMES
:
214 error
= hammer_ioc_volume_list(&trans
, ip
,
215 (struct hammer_ioc_volume_list
*)data
);
217 case HAMMERIOC_ADD_SNAPSHOT
:
218 if (error
== 0 && hmp
->ronly
)
221 error
= hammer_ioc_add_snapshot(
222 &trans
, ip
, (struct hammer_ioc_snapshot
*)data
);
225 case HAMMERIOC_DEL_SNAPSHOT
:
226 if (error
== 0 && hmp
->ronly
)
229 error
= hammer_ioc_del_snapshot(
230 &trans
, ip
, (struct hammer_ioc_snapshot
*)data
);
233 case HAMMERIOC_GET_SNAPSHOT
:
234 error
= hammer_ioc_get_snapshot(
235 &trans
, ip
, (struct hammer_ioc_snapshot
*)data
);
237 case HAMMERIOC_GET_CONFIG
:
238 error
= hammer_ioc_get_config(
239 &trans
, ip
, (struct hammer_ioc_config
*)data
);
241 case HAMMERIOC_SET_CONFIG
:
242 if (error
== 0 && hmp
->ronly
)
245 error
= hammer_ioc_set_config(
246 &trans
, ip
, (struct hammer_ioc_config
*)data
);
249 case HAMMERIOC_DEDUP
:
250 if (error
== 0 && hmp
->ronly
)
253 error
= hammer_ioc_dedup(
254 &trans
, ip
, (struct hammer_ioc_dedup
*)data
);
257 case HAMMERIOC_GET_DATA
:
259 error
= hammer_ioc_get_data(
260 &trans
, ip
, (struct hammer_ioc_data
*)data
);
263 case HAMMERIOC_SCAN_PSEUDOFS
:
264 error
= hammer_ioc_scan_pseudofs(
265 &trans
, ip
, (struct hammer_ioc_pseudofs_rw
*)data
);
271 hammer_done_transaction(&trans
);
276 * Iterate through an object's inode or an object's records and record
279 static void add_history(hammer_inode_t ip
, struct hammer_ioc_history
*hist
,
280 hammer_btree_elm_t elm
);
284 hammer_ioc_gethistory(hammer_transaction_t trans
, hammer_inode_t ip
,
285 struct hammer_ioc_history
*hist
)
287 struct hammer_cursor cursor
;
288 hammer_btree_elm_t elm
;
292 * Validate the structure and initialize for return.
294 if (hist
->beg_tid
> hist
->end_tid
)
296 if (hist
->head
.flags
& HAMMER_IOC_HISTORY_ATKEY
) {
297 if (hist
->key
> hist
->nxt_key
)
301 hist
->obj_id
= ip
->obj_id
;
303 hist
->nxt_tid
= hist
->end_tid
;
304 hist
->head
.flags
&= ~HAMMER_IOC_HISTORY_NEXT_TID
;
305 hist
->head
.flags
&= ~HAMMER_IOC_HISTORY_NEXT_KEY
;
306 hist
->head
.flags
&= ~HAMMER_IOC_HISTORY_EOF
;
307 hist
->head
.flags
&= ~HAMMER_IOC_HISTORY_UNSYNCED
;
308 if ((ip
->flags
& HAMMER_INODE_MODMASK
) &
309 ~(HAMMER_INODE_ATIME
| HAMMER_INODE_MTIME
)) {
310 hist
->head
.flags
|= HAMMER_IOC_HISTORY_UNSYNCED
;
314 * Setup the cursor. We can't handle undeletable records
315 * (create_tid of 0) at the moment. A create_tid of 0 has
316 * a special meaning and cannot be specified in the cursor.
318 error
= hammer_init_cursor(trans
, &cursor
, &ip
->cache
[0], NULL
);
320 hammer_done_cursor(&cursor
);
324 cursor
.key_beg
.obj_id
= hist
->obj_id
;
325 cursor
.key_beg
.create_tid
= hist
->beg_tid
;
326 cursor
.key_beg
.delete_tid
= 0;
327 cursor
.key_beg
.obj_type
= 0;
328 if (cursor
.key_beg
.create_tid
== HAMMER_MIN_TID
)
329 cursor
.key_beg
.create_tid
= 1;
331 cursor
.key_end
.obj_id
= hist
->obj_id
;
332 cursor
.key_end
.create_tid
= hist
->end_tid
;
333 cursor
.key_end
.delete_tid
= 0;
334 cursor
.key_end
.obj_type
= 0;
336 cursor
.flags
|= HAMMER_CURSOR_END_EXCLUSIVE
;
338 if (hist
->head
.flags
& HAMMER_IOC_HISTORY_ATKEY
) {
340 * key-range within the file. For a regular file the
341 * on-disk key represents BASE+LEN, not BASE, so the
342 * first possible record containing the offset 'key'
343 * has an on-disk key of (key + 1).
345 cursor
.key_beg
.key
= hist
->key
;
346 cursor
.key_end
.key
= HAMMER_MAX_KEY
;
347 cursor
.key_beg
.localization
= ip
->obj_localization
|
348 HAMMER_LOCALIZE_MISC
;
349 cursor
.key_end
.localization
= ip
->obj_localization
|
350 HAMMER_LOCALIZE_MISC
;
352 switch(ip
->ino_data
.obj_type
) {
353 case HAMMER_OBJTYPE_REGFILE
:
354 ++cursor
.key_beg
.key
;
355 cursor
.key_beg
.rec_type
= HAMMER_RECTYPE_DATA
;
357 case HAMMER_OBJTYPE_DIRECTORY
:
358 cursor
.key_beg
.rec_type
= HAMMER_RECTYPE_DIRENTRY
;
359 cursor
.key_beg
.localization
= ip
->obj_localization
|
360 hammer_dir_localization(ip
);
361 cursor
.key_end
.localization
= ip
->obj_localization
|
362 hammer_dir_localization(ip
);
364 case HAMMER_OBJTYPE_DBFILE
:
365 cursor
.key_beg
.rec_type
= HAMMER_RECTYPE_DB
;
371 cursor
.key_end
.rec_type
= cursor
.key_beg
.rec_type
;
376 cursor
.key_beg
.key
= 0;
377 cursor
.key_end
.key
= 0;
378 cursor
.key_beg
.rec_type
= HAMMER_RECTYPE_INODE
;
379 cursor
.key_end
.rec_type
= HAMMER_RECTYPE_INODE
;
380 cursor
.key_beg
.localization
= ip
->obj_localization
|
381 HAMMER_LOCALIZE_INODE
;
382 cursor
.key_end
.localization
= ip
->obj_localization
|
383 HAMMER_LOCALIZE_INODE
;
386 error
= hammer_btree_first(&cursor
);
388 elm
= &cursor
.node
->ondisk
->elms
[cursor
.index
];
390 add_history(ip
, hist
, elm
);
391 if (hist
->head
.flags
& (HAMMER_IOC_HISTORY_NEXT_TID
|
392 HAMMER_IOC_HISTORY_NEXT_KEY
|
393 HAMMER_IOC_HISTORY_EOF
)) {
396 error
= hammer_btree_iterate(&cursor
);
398 if (error
== ENOENT
) {
399 hist
->head
.flags
|= HAMMER_IOC_HISTORY_EOF
;
402 hammer_done_cursor(&cursor
);
407 * Add the scanned element to the ioctl return structure. Some special
408 * casing is required for regular files to accomodate how data ranges are
412 add_history(hammer_inode_t ip
, struct hammer_ioc_history
*hist
,
413 hammer_btree_elm_t elm
)
417 if (elm
->base
.btype
!= HAMMER_BTREE_TYPE_RECORD
)
419 if ((hist
->head
.flags
& HAMMER_IOC_HISTORY_ATKEY
) &&
420 ip
->ino_data
.obj_type
== HAMMER_OBJTYPE_REGFILE
) {
424 if (hist
->nxt_key
> elm
->leaf
.base
.key
- elm
->leaf
.data_len
&&
425 hist
->key
< elm
->leaf
.base
.key
- elm
->leaf
.data_len
) {
426 hist
->nxt_key
= elm
->leaf
.base
.key
- elm
->leaf
.data_len
;
428 if (hist
->nxt_key
> elm
->leaf
.base
.key
)
429 hist
->nxt_key
= elm
->leaf
.base
.key
;
432 * Record is beyond MAXPHYS, there won't be any more records
433 * in the iteration covering the requested offset (key).
435 if (elm
->leaf
.base
.key
>= MAXPHYS
&&
436 elm
->leaf
.base
.key
- MAXPHYS
> hist
->key
) {
437 hist
->head
.flags
|= HAMMER_IOC_HISTORY_NEXT_KEY
;
441 * Data-range of record does not cover the key.
443 if (elm
->leaf
.base
.key
- elm
->leaf
.data_len
> hist
->key
)
446 } else if (hist
->head
.flags
& HAMMER_IOC_HISTORY_ATKEY
) {
450 if (hist
->nxt_key
> elm
->leaf
.base
.key
&&
451 hist
->key
< elm
->leaf
.base
.key
) {
452 hist
->nxt_key
= elm
->leaf
.base
.key
;
456 * Record is beyond the requested key.
458 if (elm
->leaf
.base
.key
> hist
->key
)
459 hist
->head
.flags
|= HAMMER_IOC_HISTORY_NEXT_KEY
;
463 * Add create_tid if it is in-bounds.
467 elm
->leaf
.base
.create_tid
!= hist
->hist_ary
[i
- 1].tid
) &&
468 elm
->leaf
.base
.create_tid
>= hist
->beg_tid
&&
469 elm
->leaf
.base
.create_tid
< hist
->end_tid
) {
470 if (hist
->count
== HAMMER_MAX_HISTORY_ELMS
) {
471 hist
->nxt_tid
= elm
->leaf
.base
.create_tid
;
472 hist
->head
.flags
|= HAMMER_IOC_HISTORY_NEXT_TID
;
475 hist
->hist_ary
[i
].tid
= elm
->leaf
.base
.create_tid
;
476 hist
->hist_ary
[i
].time32
= elm
->leaf
.create_ts
;
481 * Add delete_tid if it is in-bounds. Note that different portions
482 * of the history may have overlapping data ranges with different
483 * delete_tid's. If this case occurs the delete_tid may match the
484 * create_tid of a following record. XXX
490 if (elm
->leaf
.base
.delete_tid
&&
491 elm
->leaf
.base
.delete_tid
>= hist
->beg_tid
&&
492 elm
->leaf
.base
.delete_tid
< hist
->end_tid
) {
493 if (i
== HAMMER_MAX_HISTORY_ELMS
) {
494 hist
->nxt_tid
= elm
->leaf
.base
.delete_tid
;
495 hist
->head
.flags
|= HAMMER_IOC_HISTORY_NEXT_TID
;
498 hist
->hist_ary
[i
].tid
= elm
->leaf
.base
.delete_tid
;
499 hist
->hist_ary
[i
].time32
= elm
->leaf
.delete_ts
;
505 * Acquire synchronization TID
509 hammer_ioc_synctid(hammer_transaction_t trans
, hammer_inode_t ip
,
510 struct hammer_ioc_synctid
*std
)
512 hammer_mount_t hmp
= ip
->hmp
;
516 case HAMMER_SYNCTID_NONE
:
517 std
->tid
= hmp
->flusher
.tid
; /* inaccurate */
519 case HAMMER_SYNCTID_ASYNC
:
520 hammer_queue_inodes_flusher(hmp
, MNT_NOWAIT
);
521 hammer_flusher_async(hmp
, NULL
);
522 std
->tid
= hmp
->flusher
.tid
; /* inaccurate */
524 case HAMMER_SYNCTID_SYNC1
:
525 hammer_queue_inodes_flusher(hmp
, MNT_WAIT
);
526 hammer_flusher_sync(hmp
);
527 std
->tid
= hmp
->flusher
.tid
;
529 case HAMMER_SYNCTID_SYNC2
:
530 hammer_queue_inodes_flusher(hmp
, MNT_WAIT
);
531 hammer_flusher_sync(hmp
);
532 std
->tid
= hmp
->flusher
.tid
;
533 hammer_flusher_sync(hmp
);
543 * Retrieve version info.
545 * Load min_version, wip_version, and max_versino. If cur_version is passed
546 * as 0 then load the current version into cur_version. Load the description
547 * for cur_version into the description array.
549 * Returns 0 on success, EINVAL if cur_version is non-zero and set to an
554 hammer_ioc_get_version(hammer_transaction_t trans
, hammer_inode_t ip
,
555 struct hammer_ioc_version
*ver
)
559 ver
->min_version
= HAMMER_VOL_VERSION_MIN
;
560 ver
->wip_version
= HAMMER_VOL_VERSION_WIP
;
561 ver
->max_version
= HAMMER_VOL_VERSION_MAX
;
562 if (ver
->cur_version
== 0)
563 ver
->cur_version
= trans
->hmp
->version
;
564 switch(ver
->cur_version
) {
566 ksnprintf(ver
->description
, sizeof(ver
->description
),
567 "First HAMMER release (DragonFly 2.0+)");
570 ksnprintf(ver
->description
, sizeof(ver
->description
),
571 "New directory entry layout (DragonFly 2.3+)");
574 ksnprintf(ver
->description
, sizeof(ver
->description
),
575 "New snapshot management (DragonFly 2.5+)");
578 ksnprintf(ver
->description
, sizeof(ver
->description
),
579 "New undo/flush, faster flush/sync (DragonFly 2.5+)");
582 ksnprintf(ver
->description
, sizeof(ver
->description
),
583 "Adjustments for dedup support (DragonFly 2.9+)");
586 ksnprintf(ver
->description
, sizeof(ver
->description
),
587 "Directory Hash ALG1 (tmp/rename resistance)");
590 ksnprintf(ver
->description
, sizeof(ver
->description
),
603 hammer_ioc_set_version(hammer_transaction_t trans
, hammer_inode_t ip
,
604 struct hammer_ioc_version
*ver
)
606 hammer_mount_t hmp
= trans
->hmp
;
607 struct hammer_cursor cursor
;
608 hammer_volume_t volume
;
610 int over
= hmp
->version
;
613 * Generally do not allow downgrades. However, version 4 can
614 * be downgraded to version 3.
616 if (ver
->cur_version
< hmp
->version
) {
617 if (!(ver
->cur_version
== 3 && hmp
->version
== 4))
620 if (ver
->cur_version
== hmp
->version
)
622 if (ver
->cur_version
> HAMMER_VOL_VERSION_MAX
)
628 * Update the root volume header and the version cached in
629 * the hammer_mount structure.
631 error
= hammer_init_cursor(trans
, &cursor
, NULL
, NULL
);
634 hammer_lock_ex(&hmp
->flusher
.finalize_lock
);
635 hammer_sync_lock_ex(trans
);
636 hmp
->version
= ver
->cur_version
;
639 * If upgrading from version < 4 to version >= 4 the UNDO FIFO
640 * must be reinitialized.
642 if (over
< HAMMER_VOL_VERSION_FOUR
&&
643 ver
->cur_version
>= HAMMER_VOL_VERSION_FOUR
) {
644 hkprintf("upgrade undo to version 4\n");
645 error
= hammer_upgrade_undo_4(trans
);
651 * Adjust the version in the volume header
653 volume
= hammer_get_root_volume(hmp
, &error
);
654 KKASSERT(error
== 0);
655 hammer_modify_volume_field(cursor
.trans
, volume
, vol_version
);
656 volume
->ondisk
->vol_version
= ver
->cur_version
;
657 hammer_modify_volume_done(volume
);
658 hammer_rel_volume(volume
, 0);
660 hammer_sync_unlock(trans
);
661 hammer_unlock(&hmp
->flusher
.finalize_lock
);
663 ver
->head
.error
= error
;
664 hammer_done_cursor(&cursor
);
673 hammer_ioc_get_info(hammer_transaction_t trans
, struct hammer_ioc_info
*info
)
675 hammer_volume_ondisk_t ondisk
= trans
->hmp
->rootvol
->ondisk
;
676 hammer_mount_t hmp
= trans
->hmp
;
678 /* Fill the structure with the necessary information */
679 _hammer_checkspace(hmp
, HAMMER_CHKSPC_WRITE
, &info
->rsvbigblocks
);
680 info
->rsvbigblocks
= info
->rsvbigblocks
>> HAMMER_BIGBLOCK_BITS
;
681 strlcpy(info
->vol_label
, ondisk
->vol_label
, sizeof(ondisk
->vol_label
));
683 info
->vol_fsid
= hmp
->fsid
;
684 info
->vol_fstype
= ondisk
->vol_fstype
;
685 info
->version
= hmp
->version
;
687 info
->inodes
= ondisk
->vol0_stat_inodes
;
688 info
->bigblocks
= ondisk
->vol0_stat_bigblocks
;
689 info
->freebigblocks
= ondisk
->vol0_stat_freebigblocks
;
690 info
->nvolumes
= hmp
->nvolumes
;
691 info
->rootvol
= ondisk
->vol_rootvol
;
697 * Add a snapshot transaction id(s) to the list of snapshots.
699 * NOTE: Records are created with an allocated TID. If a flush cycle
700 * is in progress the record may be synced in the current flush
701 * cycle and the volume header will reflect the allocation of the
702 * TID, but the synchronization point may not catch up to the
703 * TID until the next flush cycle.
707 hammer_ioc_add_snapshot(hammer_transaction_t trans
, hammer_inode_t ip
,
708 struct hammer_ioc_snapshot
*snap
)
710 hammer_mount_t hmp
= ip
->hmp
;
711 struct hammer_btree_leaf_elm leaf
;
712 struct hammer_cursor cursor
;
718 if (snap
->count
> HAMMER_SNAPS_PER_IOCTL
)
720 if (snap
->index
>= snap
->count
)
723 hammer_lock_ex(&hmp
->snapshot_lock
);
726 * Look for keys starting after the previous iteration, or at
727 * the beginning if snap->count is 0.
729 error
= hammer_init_cursor(trans
, &cursor
, &ip
->cache
[0], NULL
);
731 hammer_done_cursor(&cursor
);
735 cursor
.asof
= HAMMER_MAX_TID
;
736 cursor
.flags
|= HAMMER_CURSOR_BACKEND
| HAMMER_CURSOR_ASOF
;
738 bzero(&leaf
, sizeof(leaf
));
739 leaf
.base
.obj_id
= HAMMER_OBJID_ROOT
;
740 leaf
.base
.rec_type
= HAMMER_RECTYPE_SNAPSHOT
;
741 leaf
.base
.create_tid
= hammer_alloc_tid(hmp
, 1);
742 leaf
.base
.btype
= HAMMER_BTREE_TYPE_RECORD
;
743 leaf
.base
.localization
= ip
->obj_localization
| HAMMER_LOCALIZE_INODE
;
744 leaf
.data_len
= sizeof(struct hammer_snapshot_data
);
746 while (snap
->index
< snap
->count
) {
747 leaf
.base
.key
= (int64_t)snap
->snaps
[snap
->index
].tid
;
748 cursor
.key_beg
= leaf
.base
;
749 error
= hammer_btree_lookup(&cursor
);
756 * NOTE: Must reload key_beg after an ASOF search because
757 * the create_tid may have been modified during the
760 cursor
.flags
&= ~HAMMER_CURSOR_ASOF
;
761 cursor
.key_beg
= leaf
.base
;
762 error
= hammer_create_at_cursor(&cursor
, &leaf
,
763 &snap
->snaps
[snap
->index
],
764 HAMMER_CREATE_MODE_SYS
);
765 if (error
== EDEADLK
) {
766 hammer_done_cursor(&cursor
);
769 cursor
.flags
|= HAMMER_CURSOR_ASOF
;
774 snap
->head
.error
= error
;
775 hammer_done_cursor(&cursor
);
776 hammer_unlock(&hmp
->snapshot_lock
);
781 * Delete snapshot transaction id(s) from the list of snapshots.
785 hammer_ioc_del_snapshot(hammer_transaction_t trans
, hammer_inode_t ip
,
786 struct hammer_ioc_snapshot
*snap
)
788 hammer_mount_t hmp
= ip
->hmp
;
789 struct hammer_cursor cursor
;
795 if (snap
->count
> HAMMER_SNAPS_PER_IOCTL
)
797 if (snap
->index
>= snap
->count
)
800 hammer_lock_ex(&hmp
->snapshot_lock
);
803 * Look for keys starting after the previous iteration, or at
804 * the beginning if snap->count is 0.
806 error
= hammer_init_cursor(trans
, &cursor
, &ip
->cache
[0], NULL
);
808 hammer_done_cursor(&cursor
);
812 cursor
.key_beg
.obj_id
= HAMMER_OBJID_ROOT
;
813 cursor
.key_beg
.create_tid
= 0;
814 cursor
.key_beg
.delete_tid
= 0;
815 cursor
.key_beg
.obj_type
= 0;
816 cursor
.key_beg
.rec_type
= HAMMER_RECTYPE_SNAPSHOT
;
817 cursor
.key_beg
.localization
= ip
->obj_localization
| HAMMER_LOCALIZE_INODE
;
818 cursor
.asof
= HAMMER_MAX_TID
;
819 cursor
.flags
|= HAMMER_CURSOR_ASOF
;
821 while (snap
->index
< snap
->count
) {
822 cursor
.key_beg
.key
= (int64_t)snap
->snaps
[snap
->index
].tid
;
823 error
= hammer_btree_lookup(&cursor
);
826 error
= hammer_btree_extract_leaf(&cursor
);
829 error
= hammer_delete_at_cursor(&cursor
, HAMMER_DELETE_DESTROY
,
831 if (error
== EDEADLK
) {
832 hammer_done_cursor(&cursor
);
839 snap
->head
.error
= error
;
840 hammer_done_cursor(&cursor
);
841 hammer_unlock(&hmp
->snapshot_lock
);
846 * Retrieve as many snapshot ids as possible or until the array is
847 * full, starting after the last transaction id passed in. If count
848 * is 0 we retrieve starting at the beginning.
850 * NOTE: Because the b-tree key field is signed but transaction ids
851 * are unsigned the returned list will be signed-sorted instead
852 * of unsigned sorted. The Caller must still sort the aggregate
857 hammer_ioc_get_snapshot(hammer_transaction_t trans
, hammer_inode_t ip
,
858 struct hammer_ioc_snapshot
*snap
)
860 struct hammer_cursor cursor
;
866 if (snap
->index
!= 0)
868 if (snap
->count
> HAMMER_SNAPS_PER_IOCTL
)
872 * Look for keys starting after the previous iteration, or at
873 * the beginning if snap->count is 0.
875 error
= hammer_init_cursor(trans
, &cursor
, &ip
->cache
[0], NULL
);
877 hammer_done_cursor(&cursor
);
881 cursor
.key_beg
.obj_id
= HAMMER_OBJID_ROOT
;
882 cursor
.key_beg
.create_tid
= 0;
883 cursor
.key_beg
.delete_tid
= 0;
884 cursor
.key_beg
.obj_type
= 0;
885 cursor
.key_beg
.rec_type
= HAMMER_RECTYPE_SNAPSHOT
;
886 cursor
.key_beg
.localization
= ip
->obj_localization
| HAMMER_LOCALIZE_INODE
;
887 if (snap
->count
== 0)
888 cursor
.key_beg
.key
= HAMMER_MIN_KEY
;
890 cursor
.key_beg
.key
= (int64_t)snap
->snaps
[snap
->count
- 1].tid
+ 1;
892 cursor
.key_end
= cursor
.key_beg
;
893 cursor
.key_end
.key
= HAMMER_MAX_KEY
;
894 cursor
.asof
= HAMMER_MAX_TID
;
895 cursor
.flags
|= HAMMER_CURSOR_END_EXCLUSIVE
| HAMMER_CURSOR_ASOF
;
899 error
= hammer_btree_first(&cursor
);
900 while (error
== 0 && snap
->count
< HAMMER_SNAPS_PER_IOCTL
) {
901 error
= hammer_btree_extract_leaf(&cursor
);
904 if (cursor
.leaf
->base
.rec_type
== HAMMER_RECTYPE_SNAPSHOT
) {
905 error
= hammer_btree_extract_data(&cursor
);
906 snap
->snaps
[snap
->count
] = cursor
.data
->snap
;
909 * The snap data tid should match the key but might
910 * not due to a bug in the HAMMER v3 conversion code.
912 * This error will work itself out over time but we
913 * have to force a match or the snapshot will not
916 if (cursor
.data
->snap
.tid
!=
917 (hammer_tid_t
)cursor
.leaf
->base
.key
) {
918 hkprintf("lo=%08x snapshot key "
919 "0x%016jx data mismatch 0x%016jx\n",
920 cursor
.key_beg
.localization
,
921 (uintmax_t)cursor
.data
->snap
.tid
,
922 cursor
.leaf
->base
.key
);
923 hkprintf("Probably left over from the "
924 "original v3 conversion, hammer "
925 "cleanup should get it eventually\n");
926 snap
->snaps
[snap
->count
].tid
=
927 cursor
.leaf
->base
.key
;
931 error
= hammer_btree_iterate(&cursor
);
934 if (error
== ENOENT
) {
935 snap
->head
.flags
|= HAMMER_IOC_SNAPSHOT_EOF
;
938 snap
->head
.error
= error
;
939 hammer_done_cursor(&cursor
);
944 * Retrieve the PFS hammer cleanup utility config record. This is
945 * different (newer than) the PFS config.
949 hammer_ioc_get_config(hammer_transaction_t trans
, hammer_inode_t ip
,
950 struct hammer_ioc_config
*config
)
952 struct hammer_cursor cursor
;
955 error
= hammer_init_cursor(trans
, &cursor
, &ip
->cache
[0], NULL
);
957 hammer_done_cursor(&cursor
);
961 cursor
.key_beg
.obj_id
= HAMMER_OBJID_ROOT
;
962 cursor
.key_beg
.create_tid
= 0;
963 cursor
.key_beg
.delete_tid
= 0;
964 cursor
.key_beg
.obj_type
= 0;
965 cursor
.key_beg
.rec_type
= HAMMER_RECTYPE_CONFIG
;
966 cursor
.key_beg
.localization
= ip
->obj_localization
| HAMMER_LOCALIZE_INODE
;
967 cursor
.key_beg
.key
= 0; /* config space page 0 */
969 cursor
.asof
= HAMMER_MAX_TID
;
970 cursor
.flags
|= HAMMER_CURSOR_ASOF
;
972 error
= hammer_btree_lookup(&cursor
);
974 error
= hammer_btree_extract_data(&cursor
);
976 config
->config
= cursor
.data
->config
;
978 /* error can be ENOENT */
979 config
->head
.error
= error
;
980 hammer_done_cursor(&cursor
);
985 * Retrieve the PFS hammer cleanup utility config record. This is
986 * different (newer than) the PFS config.
988 * This is kinda a hack.
992 hammer_ioc_set_config(hammer_transaction_t trans
, hammer_inode_t ip
,
993 struct hammer_ioc_config
*config
)
995 struct hammer_btree_leaf_elm leaf
;
996 struct hammer_cursor cursor
;
997 hammer_mount_t hmp
= ip
->hmp
;
1001 error
= hammer_init_cursor(trans
, &cursor
, &ip
->cache
[0], NULL
);
1003 hammer_done_cursor(&cursor
);
1007 bzero(&leaf
, sizeof(leaf
));
1008 leaf
.base
.obj_id
= HAMMER_OBJID_ROOT
;
1009 leaf
.base
.rec_type
= HAMMER_RECTYPE_CONFIG
;
1010 leaf
.base
.create_tid
= hammer_alloc_tid(hmp
, 1);
1011 leaf
.base
.btype
= HAMMER_BTREE_TYPE_RECORD
;
1012 leaf
.base
.localization
= ip
->obj_localization
| HAMMER_LOCALIZE_INODE
;
1013 leaf
.base
.key
= 0; /* page 0 */
1014 leaf
.data_len
= sizeof(struct hammer_config_data
);
1016 cursor
.key_beg
= leaf
.base
;
1018 cursor
.asof
= HAMMER_MAX_TID
;
1019 cursor
.flags
|= HAMMER_CURSOR_BACKEND
| HAMMER_CURSOR_ASOF
;
1021 error
= hammer_btree_lookup(&cursor
);
1023 error
= hammer_btree_extract_data(&cursor
);
1024 error
= hammer_delete_at_cursor(&cursor
, HAMMER_DELETE_DESTROY
,
1026 if (error
== EDEADLK
) {
1027 hammer_done_cursor(&cursor
);
1031 if (error
== ENOENT
)
1035 * NOTE: Must reload key_beg after an ASOF search because
1036 * the create_tid may have been modified during the
1039 cursor
.flags
&= ~HAMMER_CURSOR_ASOF
;
1040 cursor
.key_beg
= leaf
.base
;
1041 error
= hammer_create_at_cursor(&cursor
, &leaf
,
1043 HAMMER_CREATE_MODE_SYS
);
1044 if (error
== EDEADLK
) {
1045 hammer_done_cursor(&cursor
);
1049 config
->head
.error
= error
;
1050 hammer_done_cursor(&cursor
);
1056 hammer_ioc_get_data(hammer_transaction_t trans
, hammer_inode_t ip
,
1057 struct hammer_ioc_data
*data
)
1059 struct hammer_cursor cursor
;
1063 /* XXX cached inode ? */
1064 error
= hammer_init_cursor(trans
, &cursor
, NULL
, NULL
);
1068 cursor
.key_beg
= data
->elm
;
1069 cursor
.flags
|= HAMMER_CURSOR_BACKEND
;
1071 error
= hammer_btree_lookup(&cursor
);
1073 error
= hammer_btree_extract_data(&cursor
);
1075 data
->leaf
= *cursor
.leaf
;
1076 bytes
= cursor
.leaf
->data_len
;
1077 if (bytes
> data
->size
)
1079 error
= copyout(cursor
.data
, data
->ubuf
, bytes
);
1084 hammer_done_cursor(&cursor
);