hammer2 - Add directive to destroy bad directory entries
[dragonfly.git] / sys / vfs / hammer2 / hammer2_ioctl.c
blob5e077829fc9bff5399b0a66cd313b1ac8e7f7216
1 /*
2 * Copyright (c) 2011-2015 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@dragonflybsd.org>
6 * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the
17 * distribution.
18 * 3. Neither the name of The DragonFly Project nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific, prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
36 * Ioctl Functions.
38 * WARNING! The ioctl functions which manipulate the connection state need
39 * to be able to run without deadlock on the volume's chain lock.
40 * Most of these functions use a separate lock.
43 #include "hammer2.h"
45 static int hammer2_ioctl_version_get(hammer2_inode_t *ip, void *data);
46 static int hammer2_ioctl_recluster(hammer2_inode_t *ip, void *data);
47 static int hammer2_ioctl_remote_scan(hammer2_inode_t *ip, void *data);
48 static int hammer2_ioctl_remote_add(hammer2_inode_t *ip, void *data);
49 static int hammer2_ioctl_remote_del(hammer2_inode_t *ip, void *data);
50 static int hammer2_ioctl_remote_rep(hammer2_inode_t *ip, void *data);
51 static int hammer2_ioctl_socket_get(hammer2_inode_t *ip, void *data);
52 static int hammer2_ioctl_socket_set(hammer2_inode_t *ip, void *data);
53 static int hammer2_ioctl_pfs_get(hammer2_inode_t *ip, void *data);
54 static int hammer2_ioctl_pfs_lookup(hammer2_inode_t *ip, void *data);
55 static int hammer2_ioctl_pfs_create(hammer2_inode_t *ip, void *data);
56 static int hammer2_ioctl_pfs_snapshot(hammer2_inode_t *ip, void *data);
57 static int hammer2_ioctl_pfs_delete(hammer2_inode_t *ip, void *data);
58 static int hammer2_ioctl_inode_get(hammer2_inode_t *ip, void *data);
59 static int hammer2_ioctl_inode_set(hammer2_inode_t *ip, void *data);
60 static int hammer2_ioctl_debug_dump(hammer2_inode_t *ip);
61 //static int hammer2_ioctl_inode_comp_set(hammer2_inode_t *ip, void *data);
62 //static int hammer2_ioctl_inode_comp_rec_set(hammer2_inode_t *ip, void *data);
63 //static int hammer2_ioctl_inode_comp_rec_set2(hammer2_inode_t *ip, void *data);
64 static int hammer2_ioctl_bulkfree_scan(hammer2_inode_t *ip, void *data);
65 static int hammer2_ioctl_destroy(hammer2_inode_t *ip, void *data);
67 int
68 hammer2_ioctl(hammer2_inode_t *ip, u_long com, void *data, int fflag,
69 struct ucred *cred)
71 int error;
74 * Standard root cred checks, will be selectively ignored below
75 * for ioctls that do not require root creds.
77 error = priv_check_cred(cred, PRIV_HAMMER_IOCTL, 0);
79 switch(com) {
80 case HAMMER2IOC_VERSION_GET:
81 error = hammer2_ioctl_version_get(ip, data);
82 break;
83 case HAMMER2IOC_RECLUSTER:
84 if (error == 0)
85 error = hammer2_ioctl_recluster(ip, data);
86 break;
87 case HAMMER2IOC_REMOTE_SCAN:
88 if (error == 0)
89 error = hammer2_ioctl_remote_scan(ip, data);
90 break;
91 case HAMMER2IOC_REMOTE_ADD:
92 if (error == 0)
93 error = hammer2_ioctl_remote_add(ip, data);
94 break;
95 case HAMMER2IOC_REMOTE_DEL:
96 if (error == 0)
97 error = hammer2_ioctl_remote_del(ip, data);
98 break;
99 case HAMMER2IOC_REMOTE_REP:
100 if (error == 0)
101 error = hammer2_ioctl_remote_rep(ip, data);
102 break;
103 case HAMMER2IOC_SOCKET_GET:
104 if (error == 0)
105 error = hammer2_ioctl_socket_get(ip, data);
106 break;
107 case HAMMER2IOC_SOCKET_SET:
108 if (error == 0)
109 error = hammer2_ioctl_socket_set(ip, data);
110 break;
111 case HAMMER2IOC_PFS_GET:
112 if (error == 0)
113 error = hammer2_ioctl_pfs_get(ip, data);
114 break;
115 case HAMMER2IOC_PFS_LOOKUP:
116 if (error == 0)
117 error = hammer2_ioctl_pfs_lookup(ip, data);
118 break;
119 case HAMMER2IOC_PFS_CREATE:
120 if (error == 0)
121 error = hammer2_ioctl_pfs_create(ip, data);
122 break;
123 case HAMMER2IOC_PFS_DELETE:
124 if (error == 0)
125 error = hammer2_ioctl_pfs_delete(ip, data);
126 break;
127 case HAMMER2IOC_PFS_SNAPSHOT:
128 if (error == 0)
129 error = hammer2_ioctl_pfs_snapshot(ip, data);
130 break;
131 case HAMMER2IOC_INODE_GET:
132 error = hammer2_ioctl_inode_get(ip, data);
133 break;
134 case HAMMER2IOC_INODE_SET:
135 if (error == 0)
136 error = hammer2_ioctl_inode_set(ip, data);
137 break;
138 case HAMMER2IOC_BULKFREE_SCAN:
139 error = hammer2_ioctl_bulkfree_scan(ip, data);
140 break;
141 case HAMMER2IOC_BULKFREE_ASYNC:
142 error = hammer2_ioctl_bulkfree_scan(ip, NULL);
143 break;
144 /*case HAMMER2IOC_INODE_COMP_SET:
145 error = hammer2_ioctl_inode_comp_set(ip, data);
146 break;
147 case HAMMER2IOC_INODE_COMP_REC_SET:
148 error = hammer2_ioctl_inode_comp_rec_set(ip, data);
149 break;
150 case HAMMER2IOC_INODE_COMP_REC_SET2:
151 error = hammer2_ioctl_inode_comp_rec_set2(ip, data);
152 break;*/
153 case HAMMER2IOC_DESTROY:
154 if (error == 0)
155 error = hammer2_ioctl_destroy(ip, data);
156 break;
157 case HAMMER2IOC_DEBUG_DUMP:
158 error = hammer2_ioctl_debug_dump(ip);
159 break;
160 default:
161 error = EOPNOTSUPP;
162 break;
164 return (error);
168 * Retrieve version and basic info
170 static int
171 hammer2_ioctl_version_get(hammer2_inode_t *ip, void *data)
173 hammer2_ioc_version_t *version = data;
174 hammer2_dev_t *hmp;
176 hmp = ip->pmp->pfs_hmps[0];
177 if (hmp)
178 version->version = hmp->voldata.version;
179 else
180 version->version = -1;
181 return 0;
184 static int
185 hammer2_ioctl_recluster(hammer2_inode_t *ip, void *data)
187 hammer2_ioc_recluster_t *recl = data;
188 struct vnode *vproot;
189 struct file *fp;
190 hammer2_cluster_t *cluster;
191 int error;
193 fp = holdfp(curproc->p_fd, recl->fd, -1);
194 if (fp) {
195 error = VFS_ROOT(ip->pmp->mp, &vproot);
196 if (error == 0) {
197 cluster = &ip->pmp->iroot->cluster;
198 kprintf("reconnect to cluster: nc=%d focus=%p\n",
199 cluster->nchains, cluster->focus);
200 if (cluster->nchains != 1 || cluster->focus == NULL) {
201 kprintf("not a local device mount\n");
202 error = EINVAL;
203 } else {
204 hammer2_cluster_reconnect(cluster->focus->hmp,
205 fp);
206 kprintf("ok\n");
207 error = 0;
209 vput(vproot);
211 } else {
212 error = EINVAL;
214 return error;
218 * Retrieve information about a remote
220 static int
221 hammer2_ioctl_remote_scan(hammer2_inode_t *ip, void *data)
223 hammer2_dev_t *hmp;
224 hammer2_ioc_remote_t *remote = data;
225 int copyid = remote->copyid;
227 hmp = ip->pmp->pfs_hmps[0];
228 if (hmp == NULL)
229 return (EINVAL);
231 if (copyid < 0 || copyid >= HAMMER2_COPYID_COUNT)
232 return (EINVAL);
234 hammer2_voldata_lock(hmp);
235 remote->copy1 = hmp->voldata.copyinfo[copyid];
236 hammer2_voldata_unlock(hmp);
239 * Adjust nextid (GET only)
241 while (++copyid < HAMMER2_COPYID_COUNT &&
242 hmp->voldata.copyinfo[copyid].copyid == 0) {
245 if (copyid == HAMMER2_COPYID_COUNT)
246 remote->nextid = -1;
247 else
248 remote->nextid = copyid;
250 return(0);
254 * Add new remote entry
256 static int
257 hammer2_ioctl_remote_add(hammer2_inode_t *ip, void *data)
259 hammer2_ioc_remote_t *remote = data;
260 hammer2_pfs_t *pmp = ip->pmp;
261 hammer2_dev_t *hmp;
262 int copyid = remote->copyid;
263 int error = 0;
265 hmp = pmp->pfs_hmps[0];
266 if (hmp == NULL)
267 return (EINVAL);
268 if (copyid >= HAMMER2_COPYID_COUNT)
269 return (EINVAL);
271 hammer2_voldata_lock(hmp);
272 if (copyid < 0) {
273 for (copyid = 1; copyid < HAMMER2_COPYID_COUNT; ++copyid) {
274 if (hmp->voldata.copyinfo[copyid].copyid == 0)
275 break;
277 if (copyid == HAMMER2_COPYID_COUNT) {
278 error = ENOSPC;
279 goto failed;
282 hammer2_voldata_modify(hmp);
283 remote->copy1.copyid = copyid;
284 hmp->voldata.copyinfo[copyid] = remote->copy1;
285 hammer2_volconf_update(hmp, copyid);
286 failed:
287 hammer2_voldata_unlock(hmp);
288 return (error);
292 * Delete existing remote entry
294 static int
295 hammer2_ioctl_remote_del(hammer2_inode_t *ip, void *data)
297 hammer2_ioc_remote_t *remote = data;
298 hammer2_pfs_t *pmp = ip->pmp;
299 hammer2_dev_t *hmp;
300 int copyid = remote->copyid;
301 int error = 0;
303 hmp = pmp->pfs_hmps[0];
304 if (hmp == NULL)
305 return (EINVAL);
306 if (copyid >= HAMMER2_COPYID_COUNT)
307 return (EINVAL);
308 remote->copy1.path[sizeof(remote->copy1.path) - 1] = 0;
309 hammer2_voldata_lock(hmp);
310 if (copyid < 0) {
311 for (copyid = 1; copyid < HAMMER2_COPYID_COUNT; ++copyid) {
312 if (hmp->voldata.copyinfo[copyid].copyid == 0)
313 continue;
314 if (strcmp(remote->copy1.path,
315 hmp->voldata.copyinfo[copyid].path) == 0) {
316 break;
319 if (copyid == HAMMER2_COPYID_COUNT) {
320 error = ENOENT;
321 goto failed;
324 hammer2_voldata_modify(hmp);
325 hmp->voldata.copyinfo[copyid].copyid = 0;
326 hammer2_volconf_update(hmp, copyid);
327 failed:
328 hammer2_voldata_unlock(hmp);
329 return (error);
333 * Replace existing remote entry
335 static int
336 hammer2_ioctl_remote_rep(hammer2_inode_t *ip, void *data)
338 hammer2_ioc_remote_t *remote = data;
339 hammer2_dev_t *hmp;
340 int copyid = remote->copyid;
342 hmp = ip->pmp->pfs_hmps[0];
343 if (hmp == NULL)
344 return (EINVAL);
345 if (copyid < 0 || copyid >= HAMMER2_COPYID_COUNT)
346 return (EINVAL);
348 hammer2_voldata_lock(hmp);
349 hammer2_voldata_modify(hmp);
350 /*hammer2_volconf_update(hmp, copyid);*/
351 hammer2_voldata_unlock(hmp);
353 return(0);
357 * Retrieve communications socket
359 static int
360 hammer2_ioctl_socket_get(hammer2_inode_t *ip, void *data)
362 return (EOPNOTSUPP);
366 * Set communications socket for connection
368 static int
369 hammer2_ioctl_socket_set(hammer2_inode_t *ip, void *data)
371 hammer2_ioc_remote_t *remote = data;
372 hammer2_dev_t *hmp;
373 int copyid = remote->copyid;
375 hmp = ip->pmp->pfs_hmps[0];
376 if (hmp == NULL)
377 return (EINVAL);
378 if (copyid < 0 || copyid >= HAMMER2_COPYID_COUNT)
379 return (EINVAL);
381 hammer2_voldata_lock(hmp);
382 hammer2_voldata_unlock(hmp);
384 return(0);
388 * Used to scan and retrieve PFS information. PFS's are directories under
389 * the super-root.
391 * To scan PFSs pass name_key=0. The function will scan for the next
392 * PFS and set all fields, as well as set name_next to the next key.
393 * When no PFSs remain, name_next is set to (hammer2_key_t)-1.
395 * To retrieve a particular PFS by key, specify the key but note that
396 * the ioctl will return the lowest key >= specified_key, so the caller
397 * must verify the key.
399 * To retrieve the PFS associated with the file descriptor, pass
400 * name_key set to (hammer2_key_t)-1.
402 static int
403 hammer2_ioctl_pfs_get(hammer2_inode_t *ip, void *data)
405 const hammer2_inode_data_t *ripdata;
406 hammer2_dev_t *hmp;
407 hammer2_ioc_pfs_t *pfs;
408 hammer2_chain_t *parent;
409 hammer2_chain_t *chain;
410 hammer2_key_t key_next;
411 hammer2_key_t save_key;
412 int error;
414 hmp = ip->pmp->pfs_hmps[0];
415 if (hmp == NULL)
416 return (EINVAL);
418 pfs = data;
419 save_key = pfs->name_key;
420 error = 0;
423 * Setup
425 if (save_key == (hammer2_key_t)-1) {
426 hammer2_inode_lock(ip->pmp->iroot, 0);
427 parent = NULL;
428 chain = hammer2_inode_chain(ip->pmp->iroot, 0,
429 HAMMER2_RESOLVE_ALWAYS |
430 HAMMER2_RESOLVE_SHARED);
431 } else {
432 hammer2_inode_lock(hmp->spmp->iroot, 0);
433 parent = hammer2_inode_chain(hmp->spmp->iroot, 0,
434 HAMMER2_RESOLVE_ALWAYS |
435 HAMMER2_RESOLVE_SHARED);
436 chain = hammer2_chain_lookup(&parent, &key_next,
437 pfs->name_key, HAMMER2_KEY_MAX,
438 &error,
439 HAMMER2_LOOKUP_SHARED);
443 * Locate next PFS
445 while (chain) {
446 if (chain->bref.type == HAMMER2_BREF_TYPE_INODE)
447 break;
448 if (parent == NULL) {
449 hammer2_chain_unlock(chain);
450 hammer2_chain_drop(chain);
451 chain = NULL;
452 break;
454 chain = hammer2_chain_next(&parent, chain, &key_next,
455 key_next, HAMMER2_KEY_MAX,
456 &error,
457 HAMMER2_LOOKUP_SHARED);
459 error = hammer2_error_to_errno(error);
462 * Load the data being returned by the ioctl.
464 if (chain && chain->error == 0) {
465 ripdata = &chain->data->ipdata;
466 pfs->name_key = ripdata->meta.name_key;
467 pfs->pfs_type = ripdata->meta.pfs_type;
468 pfs->pfs_subtype = ripdata->meta.pfs_subtype;
469 pfs->pfs_clid = ripdata->meta.pfs_clid;
470 pfs->pfs_fsid = ripdata->meta.pfs_fsid;
471 KKASSERT(ripdata->meta.name_len < sizeof(pfs->name));
472 bcopy(ripdata->filename, pfs->name, ripdata->meta.name_len);
473 pfs->name[ripdata->meta.name_len] = 0;
474 ripdata = NULL; /* safety */
477 * Calculate name_next, if any. We are only accessing
478 * chain->bref so we can ignore chain->error (if the key
479 * is used later it will error then).
481 if (parent == NULL) {
482 pfs->name_next = (hammer2_key_t)-1;
483 } else {
484 chain = hammer2_chain_next(&parent, chain, &key_next,
485 key_next, HAMMER2_KEY_MAX,
486 &error,
487 HAMMER2_LOOKUP_SHARED);
488 if (chain)
489 pfs->name_next = chain->bref.key;
490 else
491 pfs->name_next = (hammer2_key_t)-1;
493 } else {
494 pfs->name_next = (hammer2_key_t)-1;
495 error = ENOENT;
499 * Cleanup
501 if (chain) {
502 hammer2_chain_unlock(chain);
503 hammer2_chain_drop(chain);
505 if (parent) {
506 hammer2_chain_unlock(parent);
507 hammer2_chain_drop(parent);
509 if (save_key == (hammer2_key_t)-1) {
510 hammer2_inode_unlock(ip->pmp->iroot);
511 } else {
512 hammer2_inode_unlock(hmp->spmp->iroot);
515 return (error);
519 * Find a specific PFS by name
521 static int
522 hammer2_ioctl_pfs_lookup(hammer2_inode_t *ip, void *data)
524 const hammer2_inode_data_t *ripdata;
525 hammer2_dev_t *hmp;
526 hammer2_ioc_pfs_t *pfs;
527 hammer2_chain_t *parent;
528 hammer2_chain_t *chain;
529 hammer2_key_t key_next;
530 hammer2_key_t lhc;
531 int error;
532 size_t len;
534 hmp = ip->pmp->pfs_hmps[0];
535 if (hmp == NULL)
536 return (EINVAL);
538 pfs = data;
539 error = 0;
541 hammer2_inode_lock(hmp->spmp->iroot, HAMMER2_RESOLVE_SHARED);
542 parent = hammer2_inode_chain(hmp->spmp->iroot, 0,
543 HAMMER2_RESOLVE_ALWAYS |
544 HAMMER2_RESOLVE_SHARED);
546 pfs->name[sizeof(pfs->name) - 1] = 0;
547 len = strlen(pfs->name);
548 lhc = hammer2_dirhash(pfs->name, len);
550 chain = hammer2_chain_lookup(&parent, &key_next,
551 lhc, lhc + HAMMER2_DIRHASH_LOMASK,
552 &error, HAMMER2_LOOKUP_SHARED);
553 while (chain) {
554 if (hammer2_chain_dirent_test(chain, pfs->name, len))
555 break;
556 chain = hammer2_chain_next(&parent, chain, &key_next,
557 key_next,
558 lhc + HAMMER2_DIRHASH_LOMASK,
559 &error, HAMMER2_LOOKUP_SHARED);
561 error = hammer2_error_to_errno(error);
564 * Load the data being returned by the ioctl.
566 if (chain && chain->error == 0) {
567 KKASSERT(chain->bref.type == HAMMER2_BREF_TYPE_INODE);
568 ripdata = &chain->data->ipdata;
569 pfs->name_key = ripdata->meta.name_key;
570 pfs->pfs_type = ripdata->meta.pfs_type;
571 pfs->pfs_subtype = ripdata->meta.pfs_subtype;
572 pfs->pfs_clid = ripdata->meta.pfs_clid;
573 pfs->pfs_fsid = ripdata->meta.pfs_fsid;
574 ripdata = NULL;
576 hammer2_chain_unlock(chain);
577 hammer2_chain_drop(chain);
578 } else if (error == 0) {
579 error = ENOENT;
581 if (parent) {
582 hammer2_chain_unlock(parent);
583 hammer2_chain_drop(parent);
585 hammer2_inode_unlock(hmp->spmp->iroot);
587 return (error);
591 * Create a new PFS under the super-root
593 static int
594 hammer2_ioctl_pfs_create(hammer2_inode_t *ip, void *data)
596 hammer2_inode_data_t *nipdata;
597 hammer2_chain_t *nchain;
598 hammer2_dev_t *hmp;
599 hammer2_dev_t *force_local;
600 hammer2_ioc_pfs_t *pfs;
601 hammer2_inode_t *nip;
602 hammer2_tid_t mtid;
603 int error;
605 hmp = ip->pmp->pfs_hmps[0]; /* XXX */
606 if (hmp == NULL)
607 return (EINVAL);
609 pfs = data;
610 nip = NULL;
612 if (pfs->name[0] == 0)
613 return(EINVAL);
614 pfs->name[sizeof(pfs->name) - 1] = 0; /* ensure 0-termination */
616 if (hammer2_ioctl_pfs_lookup(ip, pfs) == 0)
617 return(EEXIST);
619 hammer2_trans_init(hmp->spmp, 0);
620 mtid = hammer2_trans_sub(hmp->spmp);
621 nip = hammer2_inode_create(hmp->spmp->iroot, hmp->spmp->iroot,
622 NULL, NULL,
623 pfs->name, strlen(pfs->name), 0,
624 1, HAMMER2_OBJTYPE_DIRECTORY, 0,
625 HAMMER2_INSERT_PFSROOT, &error);
626 if (error == 0) {
627 hammer2_inode_modify(nip);
628 nchain = hammer2_inode_chain(nip, 0, HAMMER2_RESOLVE_ALWAYS);
629 hammer2_chain_modify(nchain, mtid, 0, 0);
630 nipdata = &nchain->data->ipdata;
632 nip->meta.pfs_type = pfs->pfs_type;
633 nip->meta.pfs_subtype = pfs->pfs_subtype;
634 nip->meta.pfs_clid = pfs->pfs_clid;
635 nip->meta.pfs_fsid = pfs->pfs_fsid;
636 nip->meta.op_flags |= HAMMER2_OPFLAG_PFSROOT;
639 * Set default compression and check algorithm. This
640 * can be changed later.
642 * Do not allow compression on PFS's with the special name
643 * "boot", the boot loader can't decompress (yet).
645 nip->meta.comp_algo =
646 HAMMER2_ENC_ALGO(HAMMER2_COMP_NEWFS_DEFAULT);
647 nip->meta.check_algo =
648 HAMMER2_ENC_ALGO( HAMMER2_CHECK_XXHASH64);
650 if (strcasecmp(pfs->name, "boot") == 0) {
651 nip->meta.comp_algo =
652 HAMMER2_ENC_ALGO(HAMMER2_COMP_AUTOZERO);
656 * Super-root isn't mounted, fsync it
658 hammer2_chain_unlock(nchain);
659 hammer2_inode_ref(nip);
660 hammer2_inode_unlock(nip);
661 hammer2_inode_chain_sync(nip);
662 hammer2_inode_drop(nip);
665 * We still have a ref on the chain, relock and associate
666 * with an appropriate PFS.
668 force_local = (hmp->hflags & HMNT2_LOCAL) ? hmp : NULL;
670 hammer2_chain_lock(nchain, HAMMER2_RESOLVE_ALWAYS);
671 nipdata = &nchain->data->ipdata;
672 kprintf("ADD LOCAL PFS (IOCTL): %s\n", nipdata->filename);
673 hammer2_pfsalloc(nchain, nipdata,
674 nchain->bref.modify_tid, force_local);
676 hammer2_chain_unlock(nchain);
677 hammer2_chain_drop(nchain);
680 hammer2_trans_done(hmp->spmp);
682 return (error);
686 * Destroy an existing PFS under the super-root
688 static int
689 hammer2_ioctl_pfs_delete(hammer2_inode_t *ip, void *data)
691 hammer2_ioc_pfs_t *pfs = data;
692 hammer2_dev_t *hmp;
693 hammer2_pfs_t *spmp;
694 hammer2_pfs_t *pmp;
695 hammer2_xop_unlink_t *xop;
696 hammer2_inode_t *dip;
697 hammer2_inode_t *iroot;
698 int error;
699 int i;
702 * The PFS should be probed, so we should be able to
703 * locate it. We only delete the PFS from the
704 * specific H2 block device (hmp), not all of
705 * them. We must remove the PFS from the cluster
706 * before we can destroy it.
708 hmp = ip->pmp->pfs_hmps[0];
709 if (hmp == NULL)
710 return (EINVAL);
712 pfs->name[sizeof(pfs->name) - 1] = 0; /* ensure termination */
714 lockmgr(&hammer2_mntlk, LK_EXCLUSIVE);
716 TAILQ_FOREACH(pmp, &hammer2_pfslist, mntentry) {
717 for (i = 0; i < HAMMER2_MAXCLUSTER; ++i) {
718 if (pmp->pfs_hmps[i] != hmp)
719 continue;
720 if (pmp->pfs_names[i] &&
721 strcmp(pmp->pfs_names[i], pfs->name) == 0) {
722 break;
725 if (i != HAMMER2_MAXCLUSTER)
726 break;
729 if (pmp == NULL) {
730 lockmgr(&hammer2_mntlk, LK_RELEASE);
731 return ENOENT;
735 * Ok, we found the pmp and we have the index. Permanently remove
736 * the PFS from the cluster
738 iroot = pmp->iroot;
739 kprintf("FOUND PFS %s CLINDEX %d\n", pfs->name, i);
740 hammer2_pfsdealloc(pmp, i, 1);
742 lockmgr(&hammer2_mntlk, LK_RELEASE);
745 * Now destroy the PFS under its device using the per-device
746 * super-root.
748 spmp = hmp->spmp;
749 dip = spmp->iroot;
750 hammer2_trans_init(spmp, 0);
751 hammer2_inode_lock(dip, 0);
753 xop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING);
754 hammer2_xop_setname(&xop->head, pfs->name, strlen(pfs->name));
755 xop->isdir = 2;
756 xop->dopermanent = H2DOPERM_PERMANENT | H2DOPERM_FORCE;
757 hammer2_xop_start(&xop->head, hammer2_xop_unlink);
759 error = hammer2_xop_collect(&xop->head, 0);
761 hammer2_inode_unlock(dip);
763 #if 0
764 if (error == 0) {
765 ip = hammer2_inode_get(dip->pmp, dip, &xop->head.cluster, -1);
766 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
767 if (ip) {
768 hammer2_inode_unlink_finisher(ip, 0);
769 hammer2_inode_unlock(ip);
771 } else {
772 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
774 #endif
775 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
777 hammer2_trans_done(spmp);
779 return (hammer2_error_to_errno(error));
782 static int
783 hammer2_ioctl_pfs_snapshot(hammer2_inode_t *ip, void *data)
785 hammer2_ioc_pfs_t *pfs = data;
786 hammer2_dev_t *hmp;
787 hammer2_pfs_t *pmp;
788 hammer2_chain_t *chain;
789 hammer2_tid_t mtid;
790 int error;
792 if (pfs->name[0] == 0)
793 return(EINVAL);
794 if (pfs->name[sizeof(pfs->name)-1] != 0)
795 return(EINVAL);
797 pmp = ip->pmp;
798 ip = pmp->iroot;
800 hmp = pmp->pfs_hmps[0];
801 if (hmp == NULL)
802 return (EINVAL);
804 lockmgr(&hmp->bulklk, LK_EXCLUSIVE);
806 hammer2_vfs_sync(pmp->mp, MNT_WAIT);
808 hammer2_trans_init(pmp, HAMMER2_TRANS_ISFLUSH);
809 mtid = hammer2_trans_sub(pmp);
810 hammer2_inode_lock(ip, 0);
811 hammer2_inode_modify(ip);
812 ip->meta.pfs_lsnap_tid = mtid;
814 /* XXX cluster it! */
815 chain = hammer2_inode_chain(ip, 0, HAMMER2_RESOLVE_ALWAYS);
816 error = hammer2_chain_snapshot(chain, pfs, mtid);
817 hammer2_chain_unlock(chain);
818 hammer2_chain_drop(chain);
820 hammer2_inode_unlock(ip);
821 hammer2_trans_done(pmp);
823 lockmgr(&hmp->bulklk, LK_RELEASE);
825 return (hammer2_error_to_errno(error));
829 * Retrieve the raw inode structure, non-inclusive of node-specific data.
831 static int
832 hammer2_ioctl_inode_get(hammer2_inode_t *ip, void *data)
834 hammer2_ioc_inode_t *ino;
835 hammer2_chain_t *chain;
836 int error;
837 int i;
839 ino = data;
840 error = 0;
842 hammer2_inode_lock(ip, HAMMER2_RESOLVE_SHARED);
843 ino->data_count = 0;
844 ino->inode_count = 0;
845 for (i = 0; i < ip->cluster.nchains; ++i) {
846 if ((chain = ip->cluster.array[i].chain) != NULL) {
847 if (ino->data_count <
848 chain->bref.embed.stats.data_count) {
849 ino->data_count =
850 chain->bref.embed.stats.data_count;
852 if (ino->inode_count <
853 chain->bref.embed.stats.inode_count) {
854 ino->inode_count =
855 chain->bref.embed.stats.inode_count;
859 bzero(&ino->ip_data, sizeof(ino->ip_data));
860 ino->ip_data.meta = ip->meta;
861 ino->kdata = ip;
862 hammer2_inode_unlock(ip);
864 return hammer2_error_to_errno(error);
868 * Set various parameters in an inode which cannot be set through
869 * normal filesystem VNOPS.
871 static int
872 hammer2_ioctl_inode_set(hammer2_inode_t *ip, void *data)
874 hammer2_ioc_inode_t *ino = data;
875 int error = 0;
877 hammer2_trans_init(ip->pmp, 0);
878 hammer2_inode_lock(ip, 0);
880 if ((ino->flags & HAMMER2IOC_INODE_FLAG_CHECK) &&
881 ip->meta.check_algo != ino->ip_data.meta.check_algo) {
882 hammer2_inode_modify(ip);
883 ip->meta.check_algo = ino->ip_data.meta.check_algo;
885 if ((ino->flags & HAMMER2IOC_INODE_FLAG_COMP) &&
886 ip->meta.comp_algo != ino->ip_data.meta.comp_algo) {
887 hammer2_inode_modify(ip);
888 ip->meta.comp_algo = ino->ip_data.meta.comp_algo;
890 ino->kdata = ip;
892 /* Ignore these flags for now...*/
893 if ((ino->flags & HAMMER2IOC_INODE_FLAG_IQUOTA) &&
894 ip->meta.inode_quota != ino->ip_data.meta.inode_quota) {
895 hammer2_inode_modify(ip);
896 ip->meta.inode_quota = ino->ip_data.meta.inode_quota;
898 if ((ino->flags & HAMMER2IOC_INODE_FLAG_DQUOTA) &&
899 ip->meta.data_quota != ino->ip_data.meta.data_quota) {
900 hammer2_inode_modify(ip);
901 ip->meta.data_quota = ino->ip_data.meta.data_quota;
903 if ((ino->flags & HAMMER2IOC_INODE_FLAG_COPIES) &&
904 ip->meta.ncopies != ino->ip_data.meta.ncopies) {
905 hammer2_inode_modify(ip);
906 ip->meta.ncopies = ino->ip_data.meta.ncopies;
908 hammer2_inode_unlock(ip);
909 hammer2_trans_done(ip->pmp);
911 return (hammer2_error_to_errno(error));
914 static
916 hammer2_ioctl_debug_dump(hammer2_inode_t *ip)
918 hammer2_chain_t *chain;
919 int count = 1000;
920 int i;
922 for (i = 0; i < ip->cluster.nchains; ++i) {
923 chain = ip->cluster.array[i].chain;
924 if (chain == NULL)
925 continue;
926 hammer2_dump_chain(chain, 0, &count, 'i');
928 return 0;
932 * Executes one flush/free pass per call. If trying to recover
933 * data we just freed up a moment ago it can take up to six passes
934 * to fully free the blocks. Note that passes occur automatically based
935 * on free space as the storage fills up, but manual passes may be needed
936 * if storage becomes almost completely full.
938 static
940 hammer2_ioctl_bulkfree_scan(hammer2_inode_t *ip, void *data)
942 hammer2_ioc_bulkfree_t *bfi = data;
943 hammer2_dev_t *hmp;
944 hammer2_pfs_t *pmp;
945 hammer2_chain_t *vchain;
946 int error;
947 int didsnap;
949 pmp = ip->pmp;
950 ip = pmp->iroot;
952 hmp = pmp->pfs_hmps[0];
953 if (hmp == NULL)
954 return (EINVAL);
955 if (bfi == NULL)
956 return (EINVAL);
959 * Bulkfree has to be serialized to guarantee at least one sync
960 * inbetween bulkfrees.
962 error = lockmgr(&hmp->bflock, LK_EXCLUSIVE | LK_PCATCH);
963 if (error)
964 return error;
967 * sync the filesystem and obtain a snapshot of the synchronized
968 * hmp volume header. We treat the snapshot as an independent
969 * entity.
971 * If ENOSPC occurs we should continue, because bulkfree is the only
972 * way to fix that. The flush will have flushed everything it could
973 * and not left any modified chains. Otherwise an error is fatal.
975 error = hammer2_vfs_sync(pmp->mp, MNT_WAIT);
976 if (error && error != ENOSPC)
977 goto failed;
980 * If we have an ENOSPC error we have to bulkfree on the live
981 * topology. Otherwise we can bulkfree on a snapshot.
983 if (error) {
984 kprintf("hammer2: WARNING! Bulkfree forced to use live "
985 "topology\n");
986 vchain = &hmp->vchain;
987 hammer2_chain_ref(vchain);
988 didsnap = 0;
989 } else {
990 vchain = hammer2_chain_bulksnap(hmp);
991 didsnap = 1;
995 * Bulkfree on a snapshot does not need a transaction, which allows
996 * it to run concurrently with any operation other than another
997 * bulkfree.
999 * If we are running bulkfree on the live topology we have to be
1000 * in a FLUSH transaction.
1002 if (didsnap == 0)
1003 hammer2_trans_init(pmp, HAMMER2_TRANS_ISFLUSH);
1005 if (bfi) {
1006 hammer2_thr_freeze(&hmp->bfthr);
1007 error = hammer2_bulkfree_pass(hmp, vchain, bfi);
1008 hammer2_thr_unfreeze(&hmp->bfthr);
1010 if (didsnap) {
1011 hammer2_chain_bulkdrop(vchain);
1012 } else {
1013 hammer2_chain_drop(vchain);
1014 hammer2_trans_done(pmp);
1016 error = hammer2_error_to_errno(error);
1018 failed:
1019 lockmgr(&hmp->bflock, LK_RELEASE);
1020 return error;
1024 * Unconditionally delete meta-data in a hammer2 filesystem
1026 static
1028 hammer2_ioctl_destroy(hammer2_inode_t *ip, void *data)
1030 hammer2_ioc_destroy_t *iocd = data;
1031 hammer2_pfs_t *pmp = ip->pmp;
1032 int error;
1034 if (pmp->ronly) {
1035 error = EROFS;
1036 return error;
1039 switch(iocd->cmd) {
1040 case HAMMER2_DELETE_FILE:
1042 * Destroy a bad directory entry by name. Caller must
1043 * pass the directory as fd.
1046 hammer2_xop_unlink_t *xop;
1048 if (iocd->path[sizeof(iocd->path)-1]) {
1049 error = EINVAL;
1050 break;
1052 if (ip->meta.type != HAMMER2_OBJTYPE_DIRECTORY) {
1053 error = EINVAL;
1054 break;
1056 hammer2_pfs_memory_wait(pmp);
1057 hammer2_trans_init(pmp, 0);
1058 hammer2_inode_lock(ip, 0);
1060 xop = hammer2_xop_alloc(ip, HAMMER2_XOP_MODIFYING);
1061 hammer2_xop_setname(&xop->head, iocd->path, strlen(iocd->path));
1062 xop->isdir = -1;
1063 xop->dopermanent = H2DOPERM_PERMANENT |
1064 H2DOPERM_FORCE |
1065 H2DOPERM_IGNINO;
1066 hammer2_xop_start(&xop->head, hammer2_xop_unlink);
1068 error = hammer2_xop_collect(&xop->head, 0);
1069 error = hammer2_error_to_errno(error);
1070 hammer2_inode_unlock(ip);
1071 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1072 hammer2_trans_done(pmp);
1074 break;
1075 case HAMMER2_DELETE_INUM:
1077 * Destroy a bad inode by inode number.
1080 hammer2_xop_lookup_t *xop;
1082 if (iocd->inum < 1) {
1083 error = EINVAL;
1084 break;
1086 hammer2_pfs_memory_wait(pmp);
1087 hammer2_trans_init(pmp, 0);
1089 xop = hammer2_xop_alloc(pmp->iroot, 0);
1090 xop->lhc = iocd->inum;
1091 hammer2_xop_start(&xop->head, hammer2_xop_lookup);
1092 error = hammer2_xop_collect(&xop->head, 0);
1093 if (error == 0) {
1094 ip = hammer2_inode_get(pmp, NULL,
1095 &xop->head.cluster, -1);
1096 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1097 if (ip) {
1098 ip->meta.nlinks = 1;
1099 hammer2_inode_unlink_finisher(ip, 0);
1100 hammer2_inode_unlock(ip);
1102 } else {
1103 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
1106 break;
1107 default:
1108 error = EINVAL;
1109 break;
1111 return error;