2 * Copyright (c) 2011-2015 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@dragonflybsd.org>
6 * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the
18 * 3. Neither the name of The DragonFly Project nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific, prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * WARNING! The ioctl functions which manipulate the connection state need
39 * to be able to run without deadlock on the volume's chain lock.
40 * Most of these functions use a separate lock.
45 static int hammer2_ioctl_version_get(hammer2_inode_t
*ip
, void *data
);
46 static int hammer2_ioctl_recluster(hammer2_inode_t
*ip
, void *data
);
47 static int hammer2_ioctl_remote_scan(hammer2_inode_t
*ip
, void *data
);
48 static int hammer2_ioctl_remote_add(hammer2_inode_t
*ip
, void *data
);
49 static int hammer2_ioctl_remote_del(hammer2_inode_t
*ip
, void *data
);
50 static int hammer2_ioctl_remote_rep(hammer2_inode_t
*ip
, void *data
);
51 static int hammer2_ioctl_socket_get(hammer2_inode_t
*ip
, void *data
);
52 static int hammer2_ioctl_socket_set(hammer2_inode_t
*ip
, void *data
);
53 static int hammer2_ioctl_pfs_get(hammer2_inode_t
*ip
, void *data
);
54 static int hammer2_ioctl_pfs_lookup(hammer2_inode_t
*ip
, void *data
);
55 static int hammer2_ioctl_pfs_create(hammer2_inode_t
*ip
, void *data
);
56 static int hammer2_ioctl_pfs_snapshot(hammer2_inode_t
*ip
, void *data
);
57 static int hammer2_ioctl_pfs_delete(hammer2_inode_t
*ip
, void *data
);
58 static int hammer2_ioctl_inode_get(hammer2_inode_t
*ip
, void *data
);
59 static int hammer2_ioctl_inode_set(hammer2_inode_t
*ip
, void *data
);
60 static int hammer2_ioctl_debug_dump(hammer2_inode_t
*ip
);
61 //static int hammer2_ioctl_inode_comp_set(hammer2_inode_t *ip, void *data);
62 //static int hammer2_ioctl_inode_comp_rec_set(hammer2_inode_t *ip, void *data);
63 //static int hammer2_ioctl_inode_comp_rec_set2(hammer2_inode_t *ip, void *data);
64 static int hammer2_ioctl_bulkfree_scan(hammer2_inode_t
*ip
, void *data
);
67 hammer2_ioctl(hammer2_inode_t
*ip
, u_long com
, void *data
, int fflag
,
73 * Standard root cred checks, will be selectively ignored below
74 * for ioctls that do not require root creds.
76 error
= priv_check_cred(cred
, PRIV_HAMMER_IOCTL
, 0);
79 case HAMMER2IOC_VERSION_GET
:
80 error
= hammer2_ioctl_version_get(ip
, data
);
82 case HAMMER2IOC_RECLUSTER
:
84 error
= hammer2_ioctl_recluster(ip
, data
);
86 case HAMMER2IOC_REMOTE_SCAN
:
88 error
= hammer2_ioctl_remote_scan(ip
, data
);
90 case HAMMER2IOC_REMOTE_ADD
:
92 error
= hammer2_ioctl_remote_add(ip
, data
);
94 case HAMMER2IOC_REMOTE_DEL
:
96 error
= hammer2_ioctl_remote_del(ip
, data
);
98 case HAMMER2IOC_REMOTE_REP
:
100 error
= hammer2_ioctl_remote_rep(ip
, data
);
102 case HAMMER2IOC_SOCKET_GET
:
104 error
= hammer2_ioctl_socket_get(ip
, data
);
106 case HAMMER2IOC_SOCKET_SET
:
108 error
= hammer2_ioctl_socket_set(ip
, data
);
110 case HAMMER2IOC_PFS_GET
:
112 error
= hammer2_ioctl_pfs_get(ip
, data
);
114 case HAMMER2IOC_PFS_LOOKUP
:
116 error
= hammer2_ioctl_pfs_lookup(ip
, data
);
118 case HAMMER2IOC_PFS_CREATE
:
120 error
= hammer2_ioctl_pfs_create(ip
, data
);
122 case HAMMER2IOC_PFS_DELETE
:
124 error
= hammer2_ioctl_pfs_delete(ip
, data
);
126 case HAMMER2IOC_PFS_SNAPSHOT
:
128 error
= hammer2_ioctl_pfs_snapshot(ip
, data
);
130 case HAMMER2IOC_INODE_GET
:
131 error
= hammer2_ioctl_inode_get(ip
, data
);
133 case HAMMER2IOC_INODE_SET
:
135 error
= hammer2_ioctl_inode_set(ip
, data
);
137 case HAMMER2IOC_BULKFREE_SCAN
:
138 error
= hammer2_ioctl_bulkfree_scan(ip
, data
);
140 case HAMMER2IOC_BULKFREE_ASYNC
:
141 error
= hammer2_ioctl_bulkfree_scan(ip
, NULL
);
143 /*case HAMMER2IOC_INODE_COMP_SET:
144 error = hammer2_ioctl_inode_comp_set(ip, data);
146 case HAMMER2IOC_INODE_COMP_REC_SET:
147 error = hammer2_ioctl_inode_comp_rec_set(ip, data);
149 case HAMMER2IOC_INODE_COMP_REC_SET2:
150 error = hammer2_ioctl_inode_comp_rec_set2(ip, data);
152 case HAMMER2IOC_DEBUG_DUMP
:
153 error
= hammer2_ioctl_debug_dump(ip
);
163 * Retrieve version and basic info
166 hammer2_ioctl_version_get(hammer2_inode_t
*ip
, void *data
)
168 hammer2_ioc_version_t
*version
= data
;
171 hmp
= ip
->pmp
->pfs_hmps
[0];
173 version
->version
= hmp
->voldata
.version
;
175 version
->version
= -1;
180 hammer2_ioctl_recluster(hammer2_inode_t
*ip
, void *data
)
182 hammer2_ioc_recluster_t
*recl
= data
;
183 struct vnode
*vproot
;
185 hammer2_cluster_t
*cluster
;
188 fp
= holdfp(curproc
->p_fd
, recl
->fd
, -1);
190 error
= VFS_ROOT(ip
->pmp
->mp
, &vproot
);
192 cluster
= &ip
->pmp
->iroot
->cluster
;
193 kprintf("reconnect to cluster: nc=%d focus=%p\n",
194 cluster
->nchains
, cluster
->focus
);
195 if (cluster
->nchains
!= 1 || cluster
->focus
== NULL
) {
196 kprintf("not a local device mount\n");
199 hammer2_cluster_reconnect(cluster
->focus
->hmp
,
213 * Retrieve information about a remote
216 hammer2_ioctl_remote_scan(hammer2_inode_t
*ip
, void *data
)
219 hammer2_ioc_remote_t
*remote
= data
;
220 int copyid
= remote
->copyid
;
222 hmp
= ip
->pmp
->pfs_hmps
[0];
226 if (copyid
< 0 || copyid
>= HAMMER2_COPYID_COUNT
)
229 hammer2_voldata_lock(hmp
);
230 remote
->copy1
= hmp
->voldata
.copyinfo
[copyid
];
231 hammer2_voldata_unlock(hmp
);
234 * Adjust nextid (GET only)
236 while (++copyid
< HAMMER2_COPYID_COUNT
&&
237 hmp
->voldata
.copyinfo
[copyid
].copyid
== 0) {
240 if (copyid
== HAMMER2_COPYID_COUNT
)
243 remote
->nextid
= copyid
;
249 * Add new remote entry
252 hammer2_ioctl_remote_add(hammer2_inode_t
*ip
, void *data
)
254 hammer2_ioc_remote_t
*remote
= data
;
255 hammer2_pfs_t
*pmp
= ip
->pmp
;
257 int copyid
= remote
->copyid
;
260 hmp
= pmp
->pfs_hmps
[0];
263 if (copyid
>= HAMMER2_COPYID_COUNT
)
266 hammer2_voldata_lock(hmp
);
268 for (copyid
= 1; copyid
< HAMMER2_COPYID_COUNT
; ++copyid
) {
269 if (hmp
->voldata
.copyinfo
[copyid
].copyid
== 0)
272 if (copyid
== HAMMER2_COPYID_COUNT
) {
277 hammer2_voldata_modify(hmp
);
278 remote
->copy1
.copyid
= copyid
;
279 hmp
->voldata
.copyinfo
[copyid
] = remote
->copy1
;
280 hammer2_volconf_update(hmp
, copyid
);
282 hammer2_voldata_unlock(hmp
);
287 * Delete existing remote entry
290 hammer2_ioctl_remote_del(hammer2_inode_t
*ip
, void *data
)
292 hammer2_ioc_remote_t
*remote
= data
;
293 hammer2_pfs_t
*pmp
= ip
->pmp
;
295 int copyid
= remote
->copyid
;
298 hmp
= pmp
->pfs_hmps
[0];
301 if (copyid
>= HAMMER2_COPYID_COUNT
)
303 remote
->copy1
.path
[sizeof(remote
->copy1
.path
) - 1] = 0;
304 hammer2_voldata_lock(hmp
);
306 for (copyid
= 1; copyid
< HAMMER2_COPYID_COUNT
; ++copyid
) {
307 if (hmp
->voldata
.copyinfo
[copyid
].copyid
== 0)
309 if (strcmp(remote
->copy1
.path
,
310 hmp
->voldata
.copyinfo
[copyid
].path
) == 0) {
314 if (copyid
== HAMMER2_COPYID_COUNT
) {
319 hammer2_voldata_modify(hmp
);
320 hmp
->voldata
.copyinfo
[copyid
].copyid
= 0;
321 hammer2_volconf_update(hmp
, copyid
);
323 hammer2_voldata_unlock(hmp
);
328 * Replace existing remote entry
331 hammer2_ioctl_remote_rep(hammer2_inode_t
*ip
, void *data
)
333 hammer2_ioc_remote_t
*remote
= data
;
335 int copyid
= remote
->copyid
;
337 hmp
= ip
->pmp
->pfs_hmps
[0];
340 if (copyid
< 0 || copyid
>= HAMMER2_COPYID_COUNT
)
343 hammer2_voldata_lock(hmp
);
344 hammer2_voldata_modify(hmp
);
345 /*hammer2_volconf_update(hmp, copyid);*/
346 hammer2_voldata_unlock(hmp
);
352 * Retrieve communications socket
355 hammer2_ioctl_socket_get(hammer2_inode_t
*ip
, void *data
)
361 * Set communications socket for connection
364 hammer2_ioctl_socket_set(hammer2_inode_t
*ip
, void *data
)
366 hammer2_ioc_remote_t
*remote
= data
;
368 int copyid
= remote
->copyid
;
370 hmp
= ip
->pmp
->pfs_hmps
[0];
373 if (copyid
< 0 || copyid
>= HAMMER2_COPYID_COUNT
)
376 hammer2_voldata_lock(hmp
);
377 hammer2_voldata_unlock(hmp
);
383 * Used to scan and retrieve PFS information. PFS's are directories under
386 * To scan PFSs pass name_key=0. The function will scan for the next
387 * PFS and set all fields, as well as set name_next to the next key.
388 * When no PFSs remain, name_next is set to (hammer2_key_t)-1.
390 * To retrieve a particular PFS by key, specify the key but note that
391 * the ioctl will return the lowest key >= specified_key, so the caller
392 * must verify the key.
394 * To retrieve the PFS associated with the file descriptor, pass
395 * name_key set to (hammer2_key_t)-1.
398 hammer2_ioctl_pfs_get(hammer2_inode_t
*ip
, void *data
)
400 const hammer2_inode_data_t
*ripdata
;
402 hammer2_ioc_pfs_t
*pfs
;
403 hammer2_chain_t
*parent
;
404 hammer2_chain_t
*chain
;
405 hammer2_key_t key_next
;
406 hammer2_key_t save_key
;
407 int cache_index
= -1;
410 hmp
= ip
->pmp
->pfs_hmps
[0];
415 save_key
= pfs
->name_key
;
421 if (save_key
== (hammer2_key_t
)-1) {
422 hammer2_inode_lock(ip
->pmp
->iroot
, 0);
424 chain
= hammer2_inode_chain(ip
->pmp
->iroot
, 0,
425 HAMMER2_RESOLVE_ALWAYS
|
426 HAMMER2_RESOLVE_SHARED
);
428 hammer2_inode_lock(hmp
->spmp
->iroot
, 0);
429 parent
= hammer2_inode_chain(hmp
->spmp
->iroot
, 0,
430 HAMMER2_RESOLVE_ALWAYS
|
431 HAMMER2_RESOLVE_SHARED
);
432 chain
= hammer2_chain_lookup(&parent
, &key_next
,
433 pfs
->name_key
, HAMMER2_KEY_MAX
,
435 HAMMER2_LOOKUP_SHARED
);
442 if (chain
->bref
.type
== HAMMER2_BREF_TYPE_INODE
)
444 if (parent
== NULL
) {
445 hammer2_chain_unlock(chain
);
446 hammer2_chain_drop(chain
);
450 chain
= hammer2_chain_next(&parent
, chain
, &key_next
,
451 key_next
, HAMMER2_KEY_MAX
,
453 HAMMER2_LOOKUP_SHARED
);
457 * Load the data being returned by the ioctl.
460 ripdata
= &chain
->data
->ipdata
;
461 pfs
->name_key
= ripdata
->meta
.name_key
;
462 pfs
->pfs_type
= ripdata
->meta
.pfs_type
;
463 pfs
->pfs_subtype
= ripdata
->meta
.pfs_subtype
;
464 pfs
->pfs_clid
= ripdata
->meta
.pfs_clid
;
465 pfs
->pfs_fsid
= ripdata
->meta
.pfs_fsid
;
466 KKASSERT(ripdata
->meta
.name_len
< sizeof(pfs
->name
));
467 bcopy(ripdata
->filename
, pfs
->name
, ripdata
->meta
.name_len
);
468 pfs
->name
[ripdata
->meta
.name_len
] = 0;
469 ripdata
= NULL
; /* safety */
472 * Calculate name_next, if any.
474 if (parent
== NULL
) {
475 pfs
->name_next
= (hammer2_key_t
)-1;
477 chain
= hammer2_chain_next(&parent
, chain
, &key_next
,
478 key_next
, HAMMER2_KEY_MAX
,
480 HAMMER2_LOOKUP_SHARED
);
482 pfs
->name_next
= chain
->bref
.key
;
484 pfs
->name_next
= (hammer2_key_t
)-1;
487 pfs
->name_next
= (hammer2_key_t
)-1;
495 hammer2_chain_unlock(chain
);
496 hammer2_chain_drop(chain
);
499 hammer2_chain_unlock(parent
);
500 hammer2_chain_drop(parent
);
502 if (save_key
== (hammer2_key_t
)-1) {
503 hammer2_inode_unlock(ip
->pmp
->iroot
);
505 hammer2_inode_unlock(hmp
->spmp
->iroot
);
512 * Find a specific PFS by name
515 hammer2_ioctl_pfs_lookup(hammer2_inode_t
*ip
, void *data
)
517 const hammer2_inode_data_t
*ripdata
;
519 hammer2_ioc_pfs_t
*pfs
;
520 hammer2_chain_t
*parent
;
521 hammer2_chain_t
*chain
;
522 hammer2_key_t key_next
;
524 int cache_index
= -1;
528 hmp
= ip
->pmp
->pfs_hmps
[0];
535 hammer2_inode_lock(hmp
->spmp
->iroot
, HAMMER2_RESOLVE_SHARED
);
536 parent
= hammer2_inode_chain(hmp
->spmp
->iroot
, 0,
537 HAMMER2_RESOLVE_ALWAYS
|
538 HAMMER2_RESOLVE_SHARED
);
540 pfs
->name
[sizeof(pfs
->name
) - 1] = 0;
541 len
= strlen(pfs
->name
);
542 lhc
= hammer2_dirhash(pfs
->name
, len
);
544 chain
= hammer2_chain_lookup(&parent
, &key_next
,
545 lhc
, lhc
+ HAMMER2_DIRHASH_LOMASK
,
547 HAMMER2_LOOKUP_SHARED
);
549 if (hammer2_chain_dirent_test(chain
, pfs
->name
, len
))
551 chain
= hammer2_chain_next(&parent
, chain
, &key_next
,
553 lhc
+ HAMMER2_DIRHASH_LOMASK
,
555 HAMMER2_LOOKUP_SHARED
);
559 * Load the data being returned by the ioctl.
562 KKASSERT(chain
->bref
.type
== HAMMER2_BREF_TYPE_INODE
);
563 ripdata
= &chain
->data
->ipdata
;
564 pfs
->name_key
= ripdata
->meta
.name_key
;
565 pfs
->pfs_type
= ripdata
->meta
.pfs_type
;
566 pfs
->pfs_subtype
= ripdata
->meta
.pfs_subtype
;
567 pfs
->pfs_clid
= ripdata
->meta
.pfs_clid
;
568 pfs
->pfs_fsid
= ripdata
->meta
.pfs_fsid
;
571 hammer2_chain_unlock(chain
);
572 hammer2_chain_drop(chain
);
577 hammer2_chain_unlock(parent
);
578 hammer2_chain_drop(parent
);
580 hammer2_inode_unlock(hmp
->spmp
->iroot
);
586 * Create a new PFS under the super-root
589 hammer2_ioctl_pfs_create(hammer2_inode_t
*ip
, void *data
)
591 hammer2_inode_data_t
*nipdata
;
592 hammer2_chain_t
*nchain
;
594 hammer2_dev_t
*force_local
;
595 hammer2_ioc_pfs_t
*pfs
;
596 hammer2_inode_t
*nip
;
600 hmp
= ip
->pmp
->pfs_hmps
[0]; /* XXX */
607 if (pfs
->name
[0] == 0)
609 pfs
->name
[sizeof(pfs
->name
) - 1] = 0; /* ensure 0-termination */
611 if (hammer2_ioctl_pfs_lookup(ip
, pfs
) == 0)
614 hammer2_trans_init(hmp
->spmp
, 0);
615 mtid
= hammer2_trans_sub(hmp
->spmp
);
616 nip
= hammer2_inode_create(hmp
->spmp
->iroot
, hmp
->spmp
->iroot
,
618 pfs
->name
, strlen(pfs
->name
), 0,
619 1, HAMMER2_OBJTYPE_DIRECTORY
, 0,
620 HAMMER2_INSERT_PFSROOT
, &error
);
622 hammer2_inode_modify(nip
);
623 nchain
= hammer2_inode_chain(nip
, 0, HAMMER2_RESOLVE_ALWAYS
);
624 hammer2_chain_modify(nchain
, mtid
, 0, 0);
625 nipdata
= &nchain
->data
->ipdata
;
627 nip
->meta
.pfs_type
= pfs
->pfs_type
;
628 nip
->meta
.pfs_subtype
= pfs
->pfs_subtype
;
629 nip
->meta
.pfs_clid
= pfs
->pfs_clid
;
630 nip
->meta
.pfs_fsid
= pfs
->pfs_fsid
;
631 nip
->meta
.op_flags
|= HAMMER2_OPFLAG_PFSROOT
;
634 * Set default compression and check algorithm. This
635 * can be changed later.
637 * Do not allow compression on PFS's with the special name
638 * "boot", the boot loader can't decompress (yet).
640 nip
->meta
.comp_algo
=
641 HAMMER2_ENC_ALGO(HAMMER2_COMP_NEWFS_DEFAULT
);
642 nip
->meta
.check_algo
=
643 HAMMER2_ENC_ALGO( HAMMER2_CHECK_XXHASH64
);
645 if (strcasecmp(pfs
->name
, "boot") == 0) {
646 nip
->meta
.comp_algo
=
647 HAMMER2_ENC_ALGO(HAMMER2_COMP_AUTOZERO
);
651 * Super-root isn't mounted, fsync it
653 hammer2_chain_unlock(nchain
);
654 hammer2_inode_ref(nip
);
655 hammer2_inode_unlock(nip
);
656 hammer2_inode_chain_sync(nip
);
657 hammer2_inode_drop(nip
);
660 * We still have a ref on the chain, relock and associate
661 * with an appropriate PFS.
663 force_local
= (hmp
->hflags
& HMNT2_LOCAL
) ? hmp
: NULL
;
665 hammer2_chain_lock(nchain
, HAMMER2_RESOLVE_ALWAYS
);
666 nipdata
= &nchain
->data
->ipdata
;
667 kprintf("ADD LOCAL PFS (IOCTL): %s\n", nipdata
->filename
);
668 hammer2_pfsalloc(nchain
, nipdata
,
669 nchain
->bref
.modify_tid
, force_local
);
671 hammer2_chain_unlock(nchain
);
672 hammer2_chain_drop(nchain
);
675 hammer2_trans_done(hmp
->spmp
);
681 * Destroy an existing PFS under the super-root
684 hammer2_ioctl_pfs_delete(hammer2_inode_t
*ip
, void *data
)
686 hammer2_ioc_pfs_t
*pfs
= data
;
690 hammer2_xop_unlink_t
*xop
;
691 hammer2_inode_t
*dip
;
692 hammer2_inode_t
*iroot
;
697 * The PFS should be probed, so we should be able to
698 * locate it. We only delete the PFS from the
699 * specific H2 block device (hmp), not all of
700 * them. We must remove the PFS from the cluster
701 * before we can destroy it.
703 hmp
= ip
->pmp
->pfs_hmps
[0];
707 pfs
->name
[sizeof(pfs
->name
) - 1] = 0; /* ensure termination */
709 lockmgr(&hammer2_mntlk
, LK_EXCLUSIVE
);
711 TAILQ_FOREACH(pmp
, &hammer2_pfslist
, mntentry
) {
712 for (i
= 0; i
< HAMMER2_MAXCLUSTER
; ++i
) {
713 if (pmp
->pfs_hmps
[i
] != hmp
)
715 if (pmp
->pfs_names
[i
] &&
716 strcmp(pmp
->pfs_names
[i
], pfs
->name
) == 0) {
720 if (i
!= HAMMER2_MAXCLUSTER
)
725 lockmgr(&hammer2_mntlk
, LK_RELEASE
);
730 * Ok, we found the pmp and we have the index. Permanently remove
731 * the PFS from the cluster
734 kprintf("FOUND PFS %s CLINDEX %d\n", pfs
->name
, i
);
735 hammer2_pfsdealloc(pmp
, i
, 1);
737 lockmgr(&hammer2_mntlk
, LK_RELEASE
);
740 * Now destroy the PFS under its device using the per-device
745 hammer2_trans_init(spmp
, 0);
746 hammer2_inode_lock(dip
, 0);
748 xop
= hammer2_xop_alloc(dip
, HAMMER2_XOP_MODIFYING
);
749 hammer2_xop_setname(&xop
->head
, pfs
->name
, strlen(pfs
->name
));
751 xop
->dopermanent
= 2 | 1; /* FORCE | PERMANENT */
752 hammer2_xop_start(&xop
->head
, hammer2_xop_unlink
);
754 error
= hammer2_xop_collect(&xop
->head
, 0);
756 hammer2_inode_unlock(dip
);
760 ip
= hammer2_inode_get(dip
->pmp
, dip
, &xop
->head
.cluster
, -1);
761 hammer2_xop_retire(&xop
->head
, HAMMER2_XOPMASK_VOP
);
763 hammer2_inode_unlink_finisher(ip
, 0);
764 hammer2_inode_unlock(ip
);
767 hammer2_xop_retire(&xop
->head
, HAMMER2_XOPMASK_VOP
);
770 hammer2_xop_retire(&xop
->head
, HAMMER2_XOPMASK_VOP
);
772 hammer2_trans_done(spmp
);
778 hammer2_ioctl_pfs_snapshot(hammer2_inode_t
*ip
, void *data
)
780 hammer2_ioc_pfs_t
*pfs
= data
;
783 hammer2_chain_t
*chain
;
787 if (pfs
->name
[0] == 0)
789 if (pfs
->name
[sizeof(pfs
->name
)-1] != 0)
795 hmp
= pmp
->pfs_hmps
[0];
799 lockmgr(&hmp
->bulklk
, LK_EXCLUSIVE
);
801 hammer2_vfs_sync(pmp
->mp
, MNT_WAIT
);
803 hammer2_trans_init(pmp
, HAMMER2_TRANS_ISFLUSH
);
804 mtid
= hammer2_trans_sub(pmp
);
805 hammer2_inode_lock(ip
, 0);
806 hammer2_inode_modify(ip
);
807 ip
->meta
.pfs_lsnap_tid
= mtid
;
809 /* XXX cluster it! */
810 chain
= hammer2_inode_chain(ip
, 0, HAMMER2_RESOLVE_ALWAYS
);
811 error
= hammer2_chain_snapshot(chain
, pfs
, mtid
);
812 hammer2_chain_unlock(chain
);
813 hammer2_chain_drop(chain
);
815 hammer2_inode_unlock(ip
);
816 hammer2_trans_done(pmp
);
818 lockmgr(&hmp
->bulklk
, LK_RELEASE
);
824 * Retrieve the raw inode structure, non-inclusive of node-specific data.
827 hammer2_ioctl_inode_get(hammer2_inode_t
*ip
, void *data
)
829 hammer2_ioc_inode_t
*ino
;
830 hammer2_chain_t
*chain
;
837 hammer2_inode_lock(ip
, HAMMER2_RESOLVE_SHARED
);
839 ino
->inode_count
= 0;
840 for (i
= 0; i
< ip
->cluster
.nchains
; ++i
) {
841 if ((chain
= ip
->cluster
.array
[i
].chain
) != NULL
) {
842 if (ino
->data_count
<
843 chain
->bref
.embed
.stats
.data_count
) {
845 chain
->bref
.embed
.stats
.data_count
;
847 if (ino
->inode_count
<
848 chain
->bref
.embed
.stats
.inode_count
) {
850 chain
->bref
.embed
.stats
.inode_count
;
854 bzero(&ino
->ip_data
, sizeof(ino
->ip_data
));
855 ino
->ip_data
.meta
= ip
->meta
;
857 hammer2_inode_unlock(ip
);
863 * Set various parameters in an inode which cannot be set through
864 * normal filesystem VNOPS.
867 hammer2_ioctl_inode_set(hammer2_inode_t
*ip
, void *data
)
869 hammer2_ioc_inode_t
*ino
= data
;
872 hammer2_trans_init(ip
->pmp
, 0);
873 hammer2_inode_lock(ip
, 0);
875 if ((ino
->flags
& HAMMER2IOC_INODE_FLAG_CHECK
) &&
876 ip
->meta
.check_algo
!= ino
->ip_data
.meta
.check_algo
) {
877 hammer2_inode_modify(ip
);
878 ip
->meta
.check_algo
= ino
->ip_data
.meta
.check_algo
;
880 if ((ino
->flags
& HAMMER2IOC_INODE_FLAG_COMP
) &&
881 ip
->meta
.comp_algo
!= ino
->ip_data
.meta
.comp_algo
) {
882 hammer2_inode_modify(ip
);
883 ip
->meta
.comp_algo
= ino
->ip_data
.meta
.comp_algo
;
887 /* Ignore these flags for now...*/
888 if ((ino
->flags
& HAMMER2IOC_INODE_FLAG_IQUOTA
) &&
889 ip
->meta
.inode_quota
!= ino
->ip_data
.meta
.inode_quota
) {
890 hammer2_inode_modify(ip
);
891 ip
->meta
.inode_quota
= ino
->ip_data
.meta
.inode_quota
;
893 if ((ino
->flags
& HAMMER2IOC_INODE_FLAG_DQUOTA
) &&
894 ip
->meta
.data_quota
!= ino
->ip_data
.meta
.data_quota
) {
895 hammer2_inode_modify(ip
);
896 ip
->meta
.data_quota
= ino
->ip_data
.meta
.data_quota
;
898 if ((ino
->flags
& HAMMER2IOC_INODE_FLAG_COPIES
) &&
899 ip
->meta
.ncopies
!= ino
->ip_data
.meta
.ncopies
) {
900 hammer2_inode_modify(ip
);
901 ip
->meta
.ncopies
= ino
->ip_data
.meta
.ncopies
;
903 hammer2_inode_unlock(ip
);
904 hammer2_trans_done(ip
->pmp
);
911 hammer2_ioctl_debug_dump(hammer2_inode_t
*ip
)
913 hammer2_chain_t
*chain
;
917 for (i
= 0; i
< ip
->cluster
.nchains
; ++i
) {
918 chain
= ip
->cluster
.array
[i
].chain
;
921 hammer2_dump_chain(chain
, 0, &count
, 'i');
927 * Executes one flush/free pass per call. If trying to recover
928 * data we just freed up a moment ago it can take up to six passes
929 * to fully free the blocks. Note that passes occur automatically based
930 * on free space as the storage fills up, but manual passes may be needed
931 * if storage becomes almost completely full.
935 hammer2_ioctl_bulkfree_scan(hammer2_inode_t
*ip
, void *data
)
937 hammer2_ioc_bulkfree_t
*bfi
= data
;
941 hmp
= ip
->pmp
->pfs_hmps
[0];
946 * Negotiate for manual access. The hammer2_bulkfree_pass() itself
947 * also has its own lock and will deal with a manual override when
948 * an automatic bulkfree is already running.
950 error
= lockmgr(&hmp
->bflock
, LK_EXCLUSIVE
| LK_PCATCH
);
954 hammer2_thr_freeze(&hmp
->bfthr
);
955 error
= hammer2_bulkfree_pass(hmp
, bfi
);
956 hammer2_thr_unfreeze(&hmp
->bfthr
);
958 hammer2_thr_remaster(&hmp
->bfthr
);
960 lockmgr(&hmp
->bflock
, LK_RELEASE
);