hammer - Disallow modifying ioctls when filesystem is read-only
[dragonfly.git] / sys / vfs / hammer / hammer_ioctl.c
blobea54f8574942810cb8ef39ad05dd6cb2c70781db
1 /*
2 * Copyright (c) 2008 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
34 * $DragonFly: src/sys/vfs/hammer/hammer_ioctl.c,v 1.32 2008/11/13 02:23:29 dillon Exp $
37 #include "hammer.h"
39 static int hammer_ioc_gethistory(hammer_transaction_t trans, hammer_inode_t ip,
40 struct hammer_ioc_history *hist);
41 static int hammer_ioc_synctid(hammer_transaction_t trans, hammer_inode_t ip,
42 struct hammer_ioc_synctid *std);
43 static int hammer_ioc_get_version(hammer_transaction_t trans,
44 hammer_inode_t ip,
45 struct hammer_ioc_version *ver);
46 static int hammer_ioc_set_version(hammer_transaction_t trans,
47 hammer_inode_t ip,
48 struct hammer_ioc_version *ver);
49 static int hammer_ioc_get_info(hammer_transaction_t trans,
50 struct hammer_ioc_info *info);
51 static int hammer_ioc_add_snapshot(hammer_transaction_t trans, hammer_inode_t ip,
52 struct hammer_ioc_snapshot *snap);
53 static int hammer_ioc_del_snapshot(hammer_transaction_t trans, hammer_inode_t ip,
54 struct hammer_ioc_snapshot *snap);
55 static int hammer_ioc_get_snapshot(hammer_transaction_t trans, hammer_inode_t ip,
56 struct hammer_ioc_snapshot *snap);
57 static int hammer_ioc_get_config(hammer_transaction_t trans, hammer_inode_t ip,
58 struct hammer_ioc_config *snap);
59 static int hammer_ioc_set_config(hammer_transaction_t trans, hammer_inode_t ip,
60 struct hammer_ioc_config *snap);
61 static int hammer_ioc_get_data(hammer_transaction_t trans, hammer_inode_t ip,
62 struct hammer_ioc_data *data);
64 int
65 hammer_ioctl(hammer_inode_t ip, u_long com, caddr_t data, int fflag,
66 struct ucred *cred)
68 struct hammer_transaction trans;
69 struct hammer_mount *hmp;
70 int error;
72 error = priv_check_cred(cred, PRIV_HAMMER_IOCTL, 0);
73 hmp = ip->hmp;
75 hammer_start_transaction(&trans, hmp);
77 switch(com) {
78 case HAMMERIOC_PRUNE:
79 if (error == 0 && hmp->ronly)
80 error = EROFS;
81 if (error == 0) {
82 error = hammer_ioc_prune(&trans, ip,
83 (struct hammer_ioc_prune *)data);
85 break;
86 case HAMMERIOC_GETHISTORY:
87 error = hammer_ioc_gethistory(&trans, ip,
88 (struct hammer_ioc_history *)data);
89 break;
90 case HAMMERIOC_REBLOCK:
91 if (error == 0 && hmp->ronly)
92 error = EROFS;
93 if (error == 0) {
94 error = hammer_ioc_reblock(&trans, ip,
95 (struct hammer_ioc_reblock *)data);
97 break;
98 case HAMMERIOC_REBALANCE:
100 * Rebalancing needs to lock a lot of B-Tree nodes. The
101 * children and children's children. Systems with very
102 * little memory will not be able to do it.
104 if (error == 0 && hmp->ronly)
105 error = EROFS;
106 if (error == 0 && nbuf < HAMMER_REBALANCE_MIN_BUFS) {
107 hkprintf("System has insufficient buffers "
108 "to rebalance the tree. nbuf < %d\n",
109 HAMMER_REBALANCE_MIN_BUFS);
110 error = ENOSPC;
112 if (error == 0) {
113 error = hammer_ioc_rebalance(&trans, ip,
114 (struct hammer_ioc_rebalance *)data);
116 break;
117 case HAMMERIOC_SYNCTID:
118 error = hammer_ioc_synctid(&trans, ip,
119 (struct hammer_ioc_synctid *)data);
120 break;
121 case HAMMERIOC_GET_PSEUDOFS:
122 error = hammer_ioc_get_pseudofs(&trans, ip,
123 (struct hammer_ioc_pseudofs_rw *)data);
124 break;
125 case HAMMERIOC_SET_PSEUDOFS:
126 if (error == 0 && hmp->ronly)
127 error = EROFS;
128 if (error == 0) {
129 error = hammer_ioc_set_pseudofs(&trans, ip, cred,
130 (struct hammer_ioc_pseudofs_rw *)data);
132 break;
133 case HAMMERIOC_UPG_PSEUDOFS:
134 if (error == 0 && hmp->ronly)
135 error = EROFS;
136 if (error == 0) {
137 error = hammer_ioc_upgrade_pseudofs(&trans, ip,
138 (struct hammer_ioc_pseudofs_rw *)data);
140 break;
141 case HAMMERIOC_DGD_PSEUDOFS:
142 if (error == 0 && hmp->ronly)
143 error = EROFS;
144 if (error == 0) {
145 error = hammer_ioc_downgrade_pseudofs(&trans, ip,
146 (struct hammer_ioc_pseudofs_rw *)data);
148 break;
149 case HAMMERIOC_RMR_PSEUDOFS:
150 if (error == 0 && hmp->ronly)
151 error = EROFS;
152 if (error == 0) {
153 error = hammer_ioc_destroy_pseudofs(&trans, ip,
154 (struct hammer_ioc_pseudofs_rw *)data);
156 break;
157 case HAMMERIOC_WAI_PSEUDOFS:
158 if (error == 0) {
159 error = hammer_ioc_wait_pseudofs(&trans, ip,
160 (struct hammer_ioc_pseudofs_rw *)data);
162 break;
163 case HAMMERIOC_MIRROR_READ:
164 if (error == 0) {
165 error = hammer_ioc_mirror_read(&trans, ip,
166 (struct hammer_ioc_mirror_rw *)data);
168 break;
169 case HAMMERIOC_MIRROR_WRITE:
170 if (error == 0 && hmp->ronly)
171 error = EROFS;
172 if (error == 0) {
173 error = hammer_ioc_mirror_write(&trans, ip,
174 (struct hammer_ioc_mirror_rw *)data);
176 break;
177 case HAMMERIOC_GET_VERSION:
178 error = hammer_ioc_get_version(&trans, ip,
179 (struct hammer_ioc_version *)data);
180 break;
181 case HAMMERIOC_GET_INFO:
182 error = hammer_ioc_get_info(&trans,
183 (struct hammer_ioc_info *)data);
184 break;
185 case HAMMERIOC_SET_VERSION:
186 if (error == 0 && hmp->ronly)
187 error = EROFS;
188 if (error == 0) {
189 error = hammer_ioc_set_version(&trans, ip,
190 (struct hammer_ioc_version *)data);
192 break;
193 case HAMMERIOC_ADD_VOLUME:
194 if (error == 0 && hmp->ronly)
195 error = EROFS;
196 if (error == 0) {
197 error = priv_check_cred(cred, PRIV_HAMMER_VOLUME, 0);
198 if (error == 0)
199 error = hammer_ioc_volume_add(&trans, ip,
200 (struct hammer_ioc_volume *)data);
202 break;
203 case HAMMERIOC_DEL_VOLUME:
204 if (error == 0 && hmp->ronly)
205 error = EROFS;
206 if (error == 0) {
207 error = priv_check_cred(cred, PRIV_HAMMER_VOLUME, 0);
208 if (error == 0)
209 error = hammer_ioc_volume_del(&trans, ip,
210 (struct hammer_ioc_volume *)data);
212 break;
213 case HAMMERIOC_LIST_VOLUMES:
214 error = hammer_ioc_volume_list(&trans, ip,
215 (struct hammer_ioc_volume_list *)data);
216 break;
217 case HAMMERIOC_ADD_SNAPSHOT:
218 if (error == 0 && hmp->ronly)
219 error = EROFS;
220 if (error == 0) {
221 error = hammer_ioc_add_snapshot(
222 &trans, ip, (struct hammer_ioc_snapshot *)data);
224 break;
225 case HAMMERIOC_DEL_SNAPSHOT:
226 if (error == 0 && hmp->ronly)
227 error = EROFS;
228 if (error == 0) {
229 error = hammer_ioc_del_snapshot(
230 &trans, ip, (struct hammer_ioc_snapshot *)data);
232 break;
233 case HAMMERIOC_GET_SNAPSHOT:
234 error = hammer_ioc_get_snapshot(
235 &trans, ip, (struct hammer_ioc_snapshot *)data);
236 break;
237 case HAMMERIOC_GET_CONFIG:
238 error = hammer_ioc_get_config(
239 &trans, ip, (struct hammer_ioc_config *)data);
240 break;
241 case HAMMERIOC_SET_CONFIG:
242 if (error == 0 && hmp->ronly)
243 error = EROFS;
244 if (error == 0) {
245 error = hammer_ioc_set_config(
246 &trans, ip, (struct hammer_ioc_config *)data);
248 break;
249 case HAMMERIOC_DEDUP:
250 if (error == 0 && hmp->ronly)
251 error = EROFS;
252 if (error == 0) {
253 error = hammer_ioc_dedup(
254 &trans, ip, (struct hammer_ioc_dedup *)data);
256 break;
257 case HAMMERIOC_GET_DATA:
258 if (error == 0) {
259 error = hammer_ioc_get_data(
260 &trans, ip, (struct hammer_ioc_data *)data);
262 break;
263 case HAMMERIOC_SCAN_PSEUDOFS:
264 error = hammer_ioc_scan_pseudofs(
265 &trans, ip, (struct hammer_ioc_pseudofs_rw *)data);
266 break;
267 default:
268 error = EOPNOTSUPP;
269 break;
271 hammer_done_transaction(&trans);
272 return (error);
276 * Iterate through an object's inode or an object's records and record
277 * modification TIDs.
279 static void add_history(hammer_inode_t ip, struct hammer_ioc_history *hist,
280 hammer_btree_elm_t elm);
282 static
284 hammer_ioc_gethistory(hammer_transaction_t trans, hammer_inode_t ip,
285 struct hammer_ioc_history *hist)
287 struct hammer_cursor cursor;
288 hammer_btree_elm_t elm;
289 int error;
292 * Validate the structure and initialize for return.
294 if (hist->beg_tid > hist->end_tid)
295 return(EINVAL);
296 if (hist->head.flags & HAMMER_IOC_HISTORY_ATKEY) {
297 if (hist->key > hist->nxt_key)
298 return(EINVAL);
301 hist->obj_id = ip->obj_id;
302 hist->count = 0;
303 hist->nxt_tid = hist->end_tid;
304 hist->head.flags &= ~HAMMER_IOC_HISTORY_NEXT_TID;
305 hist->head.flags &= ~HAMMER_IOC_HISTORY_NEXT_KEY;
306 hist->head.flags &= ~HAMMER_IOC_HISTORY_EOF;
307 hist->head.flags &= ~HAMMER_IOC_HISTORY_UNSYNCED;
308 if ((ip->flags & HAMMER_INODE_MODMASK) &
309 ~(HAMMER_INODE_ATIME | HAMMER_INODE_MTIME)) {
310 hist->head.flags |= HAMMER_IOC_HISTORY_UNSYNCED;
314 * Setup the cursor. We can't handle undeletable records
315 * (create_tid of 0) at the moment. A create_tid of 0 has
316 * a special meaning and cannot be specified in the cursor.
318 error = hammer_init_cursor(trans, &cursor, &ip->cache[0], NULL);
319 if (error) {
320 hammer_done_cursor(&cursor);
321 return(error);
324 cursor.key_beg.obj_id = hist->obj_id;
325 cursor.key_beg.create_tid = hist->beg_tid;
326 cursor.key_beg.delete_tid = 0;
327 cursor.key_beg.obj_type = 0;
328 if (cursor.key_beg.create_tid == HAMMER_MIN_TID)
329 cursor.key_beg.create_tid = 1;
331 cursor.key_end.obj_id = hist->obj_id;
332 cursor.key_end.create_tid = hist->end_tid;
333 cursor.key_end.delete_tid = 0;
334 cursor.key_end.obj_type = 0;
336 cursor.flags |= HAMMER_CURSOR_END_EXCLUSIVE;
338 if (hist->head.flags & HAMMER_IOC_HISTORY_ATKEY) {
340 * key-range within the file. For a regular file the
341 * on-disk key represents BASE+LEN, not BASE, so the
342 * first possible record containing the offset 'key'
343 * has an on-disk key of (key + 1).
345 cursor.key_beg.key = hist->key;
346 cursor.key_end.key = HAMMER_MAX_KEY;
347 cursor.key_beg.localization = ip->obj_localization |
348 HAMMER_LOCALIZE_MISC;
349 cursor.key_end.localization = ip->obj_localization |
350 HAMMER_LOCALIZE_MISC;
352 switch(ip->ino_data.obj_type) {
353 case HAMMER_OBJTYPE_REGFILE:
354 ++cursor.key_beg.key;
355 cursor.key_beg.rec_type = HAMMER_RECTYPE_DATA;
356 break;
357 case HAMMER_OBJTYPE_DIRECTORY:
358 cursor.key_beg.rec_type = HAMMER_RECTYPE_DIRENTRY;
359 cursor.key_beg.localization = ip->obj_localization |
360 hammer_dir_localization(ip);
361 cursor.key_end.localization = ip->obj_localization |
362 hammer_dir_localization(ip);
363 break;
364 case HAMMER_OBJTYPE_DBFILE:
365 cursor.key_beg.rec_type = HAMMER_RECTYPE_DB;
366 break;
367 default:
368 error = EINVAL;
369 break;
371 cursor.key_end.rec_type = cursor.key_beg.rec_type;
372 } else {
374 * The inode itself.
376 cursor.key_beg.key = 0;
377 cursor.key_end.key = 0;
378 cursor.key_beg.rec_type = HAMMER_RECTYPE_INODE;
379 cursor.key_end.rec_type = HAMMER_RECTYPE_INODE;
380 cursor.key_beg.localization = ip->obj_localization |
381 HAMMER_LOCALIZE_INODE;
382 cursor.key_end.localization = ip->obj_localization |
383 HAMMER_LOCALIZE_INODE;
386 error = hammer_btree_first(&cursor);
387 while (error == 0) {
388 elm = &cursor.node->ondisk->elms[cursor.index];
390 add_history(ip, hist, elm);
391 if (hist->head.flags & (HAMMER_IOC_HISTORY_NEXT_TID |
392 HAMMER_IOC_HISTORY_NEXT_KEY |
393 HAMMER_IOC_HISTORY_EOF)) {
394 break;
396 error = hammer_btree_iterate(&cursor);
398 if (error == ENOENT) {
399 hist->head.flags |= HAMMER_IOC_HISTORY_EOF;
400 error = 0;
402 hammer_done_cursor(&cursor);
403 return(error);
407 * Add the scanned element to the ioctl return structure. Some special
408 * casing is required for regular files to accomodate how data ranges are
409 * stored on-disk.
411 static void
412 add_history(hammer_inode_t ip, struct hammer_ioc_history *hist,
413 hammer_btree_elm_t elm)
415 int i;
417 if (elm->base.btype != HAMMER_BTREE_TYPE_RECORD)
418 return;
419 if ((hist->head.flags & HAMMER_IOC_HISTORY_ATKEY) &&
420 ip->ino_data.obj_type == HAMMER_OBJTYPE_REGFILE) {
422 * Adjust nxt_key
424 if (hist->nxt_key > elm->leaf.base.key - elm->leaf.data_len &&
425 hist->key < elm->leaf.base.key - elm->leaf.data_len) {
426 hist->nxt_key = elm->leaf.base.key - elm->leaf.data_len;
428 if (hist->nxt_key > elm->leaf.base.key)
429 hist->nxt_key = elm->leaf.base.key;
432 * Record is beyond MAXPHYS, there won't be any more records
433 * in the iteration covering the requested offset (key).
435 if (elm->leaf.base.key >= MAXPHYS &&
436 elm->leaf.base.key - MAXPHYS > hist->key) {
437 hist->head.flags |= HAMMER_IOC_HISTORY_NEXT_KEY;
441 * Data-range of record does not cover the key.
443 if (elm->leaf.base.key - elm->leaf.data_len > hist->key)
444 return;
446 } else if (hist->head.flags & HAMMER_IOC_HISTORY_ATKEY) {
448 * Adjust nxt_key
450 if (hist->nxt_key > elm->leaf.base.key &&
451 hist->key < elm->leaf.base.key) {
452 hist->nxt_key = elm->leaf.base.key;
456 * Record is beyond the requested key.
458 if (elm->leaf.base.key > hist->key)
459 hist->head.flags |= HAMMER_IOC_HISTORY_NEXT_KEY;
463 * Add create_tid if it is in-bounds.
465 i = hist->count;
466 if ((i == 0 ||
467 elm->leaf.base.create_tid != hist->hist_ary[i - 1].tid) &&
468 elm->leaf.base.create_tid >= hist->beg_tid &&
469 elm->leaf.base.create_tid < hist->end_tid) {
470 if (hist->count == HAMMER_MAX_HISTORY_ELMS) {
471 hist->nxt_tid = elm->leaf.base.create_tid;
472 hist->head.flags |= HAMMER_IOC_HISTORY_NEXT_TID;
473 return;
475 hist->hist_ary[i].tid = elm->leaf.base.create_tid;
476 hist->hist_ary[i].time32 = elm->leaf.create_ts;
477 ++hist->count;
481 * Add delete_tid if it is in-bounds. Note that different portions
482 * of the history may have overlapping data ranges with different
483 * delete_tid's. If this case occurs the delete_tid may match the
484 * create_tid of a following record. XXX
486 * [ ]
487 * [ ]
489 i = hist->count;
490 if (elm->leaf.base.delete_tid &&
491 elm->leaf.base.delete_tid >= hist->beg_tid &&
492 elm->leaf.base.delete_tid < hist->end_tid) {
493 if (i == HAMMER_MAX_HISTORY_ELMS) {
494 hist->nxt_tid = elm->leaf.base.delete_tid;
495 hist->head.flags |= HAMMER_IOC_HISTORY_NEXT_TID;
496 return;
498 hist->hist_ary[i].tid = elm->leaf.base.delete_tid;
499 hist->hist_ary[i].time32 = elm->leaf.delete_ts;
500 ++hist->count;
505 * Acquire synchronization TID
507 static
509 hammer_ioc_synctid(hammer_transaction_t trans, hammer_inode_t ip,
510 struct hammer_ioc_synctid *std)
512 hammer_mount_t hmp = ip->hmp;
513 int error = 0;
515 switch(std->op) {
516 case HAMMER_SYNCTID_NONE:
517 std->tid = hmp->flusher.tid; /* inaccurate */
518 break;
519 case HAMMER_SYNCTID_ASYNC:
520 hammer_queue_inodes_flusher(hmp, MNT_NOWAIT);
521 hammer_flusher_async(hmp, NULL);
522 std->tid = hmp->flusher.tid; /* inaccurate */
523 break;
524 case HAMMER_SYNCTID_SYNC1:
525 hammer_queue_inodes_flusher(hmp, MNT_WAIT);
526 hammer_flusher_sync(hmp);
527 std->tid = hmp->flusher.tid;
528 break;
529 case HAMMER_SYNCTID_SYNC2:
530 hammer_queue_inodes_flusher(hmp, MNT_WAIT);
531 hammer_flusher_sync(hmp);
532 std->tid = hmp->flusher.tid;
533 hammer_flusher_sync(hmp);
534 break;
535 default:
536 error = EOPNOTSUPP;
537 break;
539 return(error);
543 * Retrieve version info.
545 * Load min_version, wip_version, and max_versino. If cur_version is passed
546 * as 0 then load the current version into cur_version. Load the description
547 * for cur_version into the description array.
549 * Returns 0 on success, EINVAL if cur_version is non-zero and set to an
550 * unsupported value.
552 static
554 hammer_ioc_get_version(hammer_transaction_t trans, hammer_inode_t ip,
555 struct hammer_ioc_version *ver)
557 int error = 0;
559 ver->min_version = HAMMER_VOL_VERSION_MIN;
560 ver->wip_version = HAMMER_VOL_VERSION_WIP;
561 ver->max_version = HAMMER_VOL_VERSION_MAX;
562 if (ver->cur_version == 0)
563 ver->cur_version = trans->hmp->version;
564 switch(ver->cur_version) {
565 case 1:
566 ksnprintf(ver->description, sizeof(ver->description),
567 "First HAMMER release (DragonFly 2.0+)");
568 break;
569 case 2:
570 ksnprintf(ver->description, sizeof(ver->description),
571 "New directory entry layout (DragonFly 2.3+)");
572 break;
573 case 3:
574 ksnprintf(ver->description, sizeof(ver->description),
575 "New snapshot management (DragonFly 2.5+)");
576 break;
577 case 4:
578 ksnprintf(ver->description, sizeof(ver->description),
579 "New undo/flush, faster flush/sync (DragonFly 2.5+)");
580 break;
581 case 5:
582 ksnprintf(ver->description, sizeof(ver->description),
583 "Adjustments for dedup support (DragonFly 2.9+)");
584 break;
585 case 6:
586 ksnprintf(ver->description, sizeof(ver->description),
587 "Directory Hash ALG1 (tmp/rename resistance)");
588 break;
589 default:
590 ksnprintf(ver->description, sizeof(ver->description),
591 "Unknown");
592 error = EINVAL;
593 break;
595 return(error);
599 * Set version info
601 static
603 hammer_ioc_set_version(hammer_transaction_t trans, hammer_inode_t ip,
604 struct hammer_ioc_version *ver)
606 hammer_mount_t hmp = trans->hmp;
607 struct hammer_cursor cursor;
608 hammer_volume_t volume;
609 int error;
610 int over = hmp->version;
613 * Generally do not allow downgrades. However, version 4 can
614 * be downgraded to version 3.
616 if (ver->cur_version < hmp->version) {
617 if (!(ver->cur_version == 3 && hmp->version == 4))
618 return(EINVAL);
620 if (ver->cur_version == hmp->version)
621 return(0);
622 if (ver->cur_version > HAMMER_VOL_VERSION_MAX)
623 return(EINVAL);
624 if (hmp->ronly)
625 return(EROFS);
628 * Update the root volume header and the version cached in
629 * the hammer_mount structure.
631 error = hammer_init_cursor(trans, &cursor, NULL, NULL);
632 if (error)
633 goto failed;
634 hammer_lock_ex(&hmp->flusher.finalize_lock);
635 hammer_sync_lock_ex(trans);
636 hmp->version = ver->cur_version;
639 * If upgrading from version < 4 to version >= 4 the UNDO FIFO
640 * must be reinitialized.
642 if (over < HAMMER_VOL_VERSION_FOUR &&
643 ver->cur_version >= HAMMER_VOL_VERSION_FOUR) {
644 hkprintf("upgrade undo to version 4\n");
645 error = hammer_upgrade_undo_4(trans);
646 if (error)
647 goto failed;
651 * Adjust the version in the volume header
653 volume = hammer_get_root_volume(hmp, &error);
654 KKASSERT(error == 0);
655 hammer_modify_volume_field(cursor.trans, volume, vol_version);
656 volume->ondisk->vol_version = ver->cur_version;
657 hammer_modify_volume_done(volume);
658 hammer_rel_volume(volume, 0);
660 hammer_sync_unlock(trans);
661 hammer_unlock(&hmp->flusher.finalize_lock);
662 failed:
663 ver->head.error = error;
664 hammer_done_cursor(&cursor);
665 return(0);
669 * Get information
671 static
673 hammer_ioc_get_info(hammer_transaction_t trans, struct hammer_ioc_info *info)
675 hammer_volume_ondisk_t ondisk = trans->hmp->rootvol->ondisk;
676 hammer_mount_t hmp = trans->hmp;
678 /* Fill the structure with the necessary information */
679 _hammer_checkspace(hmp, HAMMER_CHKSPC_WRITE, &info->rsvbigblocks);
680 info->rsvbigblocks = info->rsvbigblocks >> HAMMER_BIGBLOCK_BITS;
681 strlcpy(info->vol_label, ondisk->vol_label, sizeof(ondisk->vol_label));
683 info->vol_fsid = hmp->fsid;
684 info->vol_fstype = ondisk->vol_fstype;
685 info->version = hmp->version;
687 info->inodes = ondisk->vol0_stat_inodes;
688 info->bigblocks = ondisk->vol0_stat_bigblocks;
689 info->freebigblocks = ondisk->vol0_stat_freebigblocks;
690 info->nvolumes = hmp->nvolumes;
691 info->rootvol = ondisk->vol_rootvol;
693 return 0;
697 * Add a snapshot transaction id(s) to the list of snapshots.
699 * NOTE: Records are created with an allocated TID. If a flush cycle
700 * is in progress the record may be synced in the current flush
701 * cycle and the volume header will reflect the allocation of the
702 * TID, but the synchronization point may not catch up to the
703 * TID until the next flush cycle.
705 static
707 hammer_ioc_add_snapshot(hammer_transaction_t trans, hammer_inode_t ip,
708 struct hammer_ioc_snapshot *snap)
710 hammer_mount_t hmp = ip->hmp;
711 struct hammer_btree_leaf_elm leaf;
712 struct hammer_cursor cursor;
713 int error;
716 * Validate structure
718 if (snap->count > HAMMER_SNAPS_PER_IOCTL)
719 return (EINVAL);
720 if (snap->index >= snap->count)
721 return (EINVAL);
723 hammer_lock_ex(&hmp->snapshot_lock);
724 again:
726 * Look for keys starting after the previous iteration, or at
727 * the beginning if snap->count is 0.
729 error = hammer_init_cursor(trans, &cursor, &ip->cache[0], NULL);
730 if (error) {
731 hammer_done_cursor(&cursor);
732 return(error);
735 cursor.asof = HAMMER_MAX_TID;
736 cursor.flags |= HAMMER_CURSOR_BACKEND | HAMMER_CURSOR_ASOF;
738 bzero(&leaf, sizeof(leaf));
739 leaf.base.obj_id = HAMMER_OBJID_ROOT;
740 leaf.base.rec_type = HAMMER_RECTYPE_SNAPSHOT;
741 leaf.base.create_tid = hammer_alloc_tid(hmp, 1);
742 leaf.base.btype = HAMMER_BTREE_TYPE_RECORD;
743 leaf.base.localization = ip->obj_localization | HAMMER_LOCALIZE_INODE;
744 leaf.data_len = sizeof(struct hammer_snapshot_data);
746 while (snap->index < snap->count) {
747 leaf.base.key = (int64_t)snap->snaps[snap->index].tid;
748 cursor.key_beg = leaf.base;
749 error = hammer_btree_lookup(&cursor);
750 if (error == 0) {
751 error = EEXIST;
752 break;
756 * NOTE: Must reload key_beg after an ASOF search because
757 * the create_tid may have been modified during the
758 * search.
760 cursor.flags &= ~HAMMER_CURSOR_ASOF;
761 cursor.key_beg = leaf.base;
762 error = hammer_create_at_cursor(&cursor, &leaf,
763 &snap->snaps[snap->index],
764 HAMMER_CREATE_MODE_SYS);
765 if (error == EDEADLK) {
766 hammer_done_cursor(&cursor);
767 goto again;
769 cursor.flags |= HAMMER_CURSOR_ASOF;
770 if (error)
771 break;
772 ++snap->index;
774 snap->head.error = error;
775 hammer_done_cursor(&cursor);
776 hammer_unlock(&hmp->snapshot_lock);
777 return(0);
781 * Delete snapshot transaction id(s) from the list of snapshots.
783 static
785 hammer_ioc_del_snapshot(hammer_transaction_t trans, hammer_inode_t ip,
786 struct hammer_ioc_snapshot *snap)
788 hammer_mount_t hmp = ip->hmp;
789 struct hammer_cursor cursor;
790 int error;
793 * Validate structure
795 if (snap->count > HAMMER_SNAPS_PER_IOCTL)
796 return (EINVAL);
797 if (snap->index >= snap->count)
798 return (EINVAL);
800 hammer_lock_ex(&hmp->snapshot_lock);
801 again:
803 * Look for keys starting after the previous iteration, or at
804 * the beginning if snap->count is 0.
806 error = hammer_init_cursor(trans, &cursor, &ip->cache[0], NULL);
807 if (error) {
808 hammer_done_cursor(&cursor);
809 return(error);
812 cursor.key_beg.obj_id = HAMMER_OBJID_ROOT;
813 cursor.key_beg.create_tid = 0;
814 cursor.key_beg.delete_tid = 0;
815 cursor.key_beg.obj_type = 0;
816 cursor.key_beg.rec_type = HAMMER_RECTYPE_SNAPSHOT;
817 cursor.key_beg.localization = ip->obj_localization | HAMMER_LOCALIZE_INODE;
818 cursor.asof = HAMMER_MAX_TID;
819 cursor.flags |= HAMMER_CURSOR_ASOF;
821 while (snap->index < snap->count) {
822 cursor.key_beg.key = (int64_t)snap->snaps[snap->index].tid;
823 error = hammer_btree_lookup(&cursor);
824 if (error)
825 break;
826 error = hammer_btree_extract_leaf(&cursor);
827 if (error)
828 break;
829 error = hammer_delete_at_cursor(&cursor, HAMMER_DELETE_DESTROY,
830 0, 0, 0, NULL);
831 if (error == EDEADLK) {
832 hammer_done_cursor(&cursor);
833 goto again;
835 if (error)
836 break;
837 ++snap->index;
839 snap->head.error = error;
840 hammer_done_cursor(&cursor);
841 hammer_unlock(&hmp->snapshot_lock);
842 return(0);
846 * Retrieve as many snapshot ids as possible or until the array is
847 * full, starting after the last transaction id passed in. If count
848 * is 0 we retrieve starting at the beginning.
850 * NOTE: Because the b-tree key field is signed but transaction ids
851 * are unsigned the returned list will be signed-sorted instead
852 * of unsigned sorted. The Caller must still sort the aggregate
853 * results.
855 static
857 hammer_ioc_get_snapshot(hammer_transaction_t trans, hammer_inode_t ip,
858 struct hammer_ioc_snapshot *snap)
860 struct hammer_cursor cursor;
861 int error;
864 * Validate structure
866 if (snap->index != 0)
867 return (EINVAL);
868 if (snap->count > HAMMER_SNAPS_PER_IOCTL)
869 return (EINVAL);
872 * Look for keys starting after the previous iteration, or at
873 * the beginning if snap->count is 0.
875 error = hammer_init_cursor(trans, &cursor, &ip->cache[0], NULL);
876 if (error) {
877 hammer_done_cursor(&cursor);
878 return(error);
881 cursor.key_beg.obj_id = HAMMER_OBJID_ROOT;
882 cursor.key_beg.create_tid = 0;
883 cursor.key_beg.delete_tid = 0;
884 cursor.key_beg.obj_type = 0;
885 cursor.key_beg.rec_type = HAMMER_RECTYPE_SNAPSHOT;
886 cursor.key_beg.localization = ip->obj_localization | HAMMER_LOCALIZE_INODE;
887 if (snap->count == 0)
888 cursor.key_beg.key = HAMMER_MIN_KEY;
889 else
890 cursor.key_beg.key = (int64_t)snap->snaps[snap->count - 1].tid + 1;
892 cursor.key_end = cursor.key_beg;
893 cursor.key_end.key = HAMMER_MAX_KEY;
894 cursor.asof = HAMMER_MAX_TID;
895 cursor.flags |= HAMMER_CURSOR_END_EXCLUSIVE | HAMMER_CURSOR_ASOF;
897 snap->count = 0;
899 error = hammer_btree_first(&cursor);
900 while (error == 0 && snap->count < HAMMER_SNAPS_PER_IOCTL) {
901 error = hammer_btree_extract_leaf(&cursor);
902 if (error)
903 break;
904 if (cursor.leaf->base.rec_type == HAMMER_RECTYPE_SNAPSHOT) {
905 error = hammer_btree_extract_data(&cursor);
906 snap->snaps[snap->count] = cursor.data->snap;
909 * The snap data tid should match the key but might
910 * not due to a bug in the HAMMER v3 conversion code.
912 * This error will work itself out over time but we
913 * have to force a match or the snapshot will not
914 * be deletable.
916 if (cursor.data->snap.tid !=
917 (hammer_tid_t)cursor.leaf->base.key) {
918 hkprintf("lo=%08x snapshot key "
919 "0x%016jx data mismatch 0x%016jx\n",
920 cursor.key_beg.localization,
921 (uintmax_t)cursor.data->snap.tid,
922 cursor.leaf->base.key);
923 hkprintf("Probably left over from the "
924 "original v3 conversion, hammer "
925 "cleanup should get it eventually\n");
926 snap->snaps[snap->count].tid =
927 cursor.leaf->base.key;
929 ++snap->count;
931 error = hammer_btree_iterate(&cursor);
934 if (error == ENOENT) {
935 snap->head.flags |= HAMMER_IOC_SNAPSHOT_EOF;
936 error = 0;
938 snap->head.error = error;
939 hammer_done_cursor(&cursor);
940 return(0);
944 * Retrieve the PFS hammer cleanup utility config record. This is
945 * different (newer than) the PFS config.
947 static
949 hammer_ioc_get_config(hammer_transaction_t trans, hammer_inode_t ip,
950 struct hammer_ioc_config *config)
952 struct hammer_cursor cursor;
953 int error;
955 error = hammer_init_cursor(trans, &cursor, &ip->cache[0], NULL);
956 if (error) {
957 hammer_done_cursor(&cursor);
958 return(error);
961 cursor.key_beg.obj_id = HAMMER_OBJID_ROOT;
962 cursor.key_beg.create_tid = 0;
963 cursor.key_beg.delete_tid = 0;
964 cursor.key_beg.obj_type = 0;
965 cursor.key_beg.rec_type = HAMMER_RECTYPE_CONFIG;
966 cursor.key_beg.localization = ip->obj_localization | HAMMER_LOCALIZE_INODE;
967 cursor.key_beg.key = 0; /* config space page 0 */
969 cursor.asof = HAMMER_MAX_TID;
970 cursor.flags |= HAMMER_CURSOR_ASOF;
972 error = hammer_btree_lookup(&cursor);
973 if (error == 0) {
974 error = hammer_btree_extract_data(&cursor);
975 if (error == 0)
976 config->config = cursor.data->config;
978 /* error can be ENOENT */
979 config->head.error = error;
980 hammer_done_cursor(&cursor);
981 return(0);
985 * Retrieve the PFS hammer cleanup utility config record. This is
986 * different (newer than) the PFS config.
988 * This is kinda a hack.
990 static
992 hammer_ioc_set_config(hammer_transaction_t trans, hammer_inode_t ip,
993 struct hammer_ioc_config *config)
995 struct hammer_btree_leaf_elm leaf;
996 struct hammer_cursor cursor;
997 hammer_mount_t hmp = ip->hmp;
998 int error;
1000 again:
1001 error = hammer_init_cursor(trans, &cursor, &ip->cache[0], NULL);
1002 if (error) {
1003 hammer_done_cursor(&cursor);
1004 return(error);
1007 bzero(&leaf, sizeof(leaf));
1008 leaf.base.obj_id = HAMMER_OBJID_ROOT;
1009 leaf.base.rec_type = HAMMER_RECTYPE_CONFIG;
1010 leaf.base.create_tid = hammer_alloc_tid(hmp, 1);
1011 leaf.base.btype = HAMMER_BTREE_TYPE_RECORD;
1012 leaf.base.localization = ip->obj_localization | HAMMER_LOCALIZE_INODE;
1013 leaf.base.key = 0; /* page 0 */
1014 leaf.data_len = sizeof(struct hammer_config_data);
1016 cursor.key_beg = leaf.base;
1018 cursor.asof = HAMMER_MAX_TID;
1019 cursor.flags |= HAMMER_CURSOR_BACKEND | HAMMER_CURSOR_ASOF;
1021 error = hammer_btree_lookup(&cursor);
1022 if (error == 0) {
1023 error = hammer_btree_extract_data(&cursor);
1024 error = hammer_delete_at_cursor(&cursor, HAMMER_DELETE_DESTROY,
1025 0, 0, 0, NULL);
1026 if (error == EDEADLK) {
1027 hammer_done_cursor(&cursor);
1028 goto again;
1031 if (error == ENOENT)
1032 error = 0;
1033 if (error == 0) {
1035 * NOTE: Must reload key_beg after an ASOF search because
1036 * the create_tid may have been modified during the
1037 * search.
1039 cursor.flags &= ~HAMMER_CURSOR_ASOF;
1040 cursor.key_beg = leaf.base;
1041 error = hammer_create_at_cursor(&cursor, &leaf,
1042 &config->config,
1043 HAMMER_CREATE_MODE_SYS);
1044 if (error == EDEADLK) {
1045 hammer_done_cursor(&cursor);
1046 goto again;
1049 config->head.error = error;
1050 hammer_done_cursor(&cursor);
1051 return(0);
1054 static
1056 hammer_ioc_get_data(hammer_transaction_t trans, hammer_inode_t ip,
1057 struct hammer_ioc_data *data)
1059 struct hammer_cursor cursor;
1060 int bytes;
1061 int error;
1063 /* XXX cached inode ? */
1064 error = hammer_init_cursor(trans, &cursor, NULL, NULL);
1065 if (error)
1066 goto failed;
1068 cursor.key_beg = data->elm;
1069 cursor.flags |= HAMMER_CURSOR_BACKEND;
1071 error = hammer_btree_lookup(&cursor);
1072 if (error == 0) {
1073 error = hammer_btree_extract_data(&cursor);
1074 if (error == 0) {
1075 data->leaf = *cursor.leaf;
1076 bytes = cursor.leaf->data_len;
1077 if (bytes > data->size)
1078 bytes = data->size;
1079 error = copyout(cursor.data, data->ubuf, bytes);
1083 failed:
1084 hammer_done_cursor(&cursor);
1085 return (error);