hammer2 - Add kernel-thread-based async bulk free
[dragonfly.git] / sys / vfs / hammer2 / hammer2_ioctl.c
blob4f4b8c9a188ef52ef15cbfc171b8870b1d01a6f1
1 /*
2 * Copyright (c) 2011-2015 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@dragonflybsd.org>
6 * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the
17 * distribution.
18 * 3. Neither the name of The DragonFly Project nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific, prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
36 * Ioctl Functions.
38 * WARNING! The ioctl functions which manipulate the connection state need
39 * to be able to run without deadlock on the volume's chain lock.
40 * Most of these functions use a separate lock.
43 #include "hammer2.h"
45 static int hammer2_ioctl_version_get(hammer2_inode_t *ip, void *data);
46 static int hammer2_ioctl_recluster(hammer2_inode_t *ip, void *data);
47 static int hammer2_ioctl_remote_scan(hammer2_inode_t *ip, void *data);
48 static int hammer2_ioctl_remote_add(hammer2_inode_t *ip, void *data);
49 static int hammer2_ioctl_remote_del(hammer2_inode_t *ip, void *data);
50 static int hammer2_ioctl_remote_rep(hammer2_inode_t *ip, void *data);
51 static int hammer2_ioctl_socket_get(hammer2_inode_t *ip, void *data);
52 static int hammer2_ioctl_socket_set(hammer2_inode_t *ip, void *data);
53 static int hammer2_ioctl_pfs_get(hammer2_inode_t *ip, void *data);
54 static int hammer2_ioctl_pfs_lookup(hammer2_inode_t *ip, void *data);
55 static int hammer2_ioctl_pfs_create(hammer2_inode_t *ip, void *data);
56 static int hammer2_ioctl_pfs_snapshot(hammer2_inode_t *ip, void *data);
57 static int hammer2_ioctl_pfs_delete(hammer2_inode_t *ip, void *data);
58 static int hammer2_ioctl_inode_get(hammer2_inode_t *ip, void *data);
59 static int hammer2_ioctl_inode_set(hammer2_inode_t *ip, void *data);
60 static int hammer2_ioctl_debug_dump(hammer2_inode_t *ip);
61 //static int hammer2_ioctl_inode_comp_set(hammer2_inode_t *ip, void *data);
62 //static int hammer2_ioctl_inode_comp_rec_set(hammer2_inode_t *ip, void *data);
63 //static int hammer2_ioctl_inode_comp_rec_set2(hammer2_inode_t *ip, void *data);
64 static int hammer2_ioctl_bulkfree_scan(hammer2_inode_t *ip, void *data);
66 int
67 hammer2_ioctl(hammer2_inode_t *ip, u_long com, void *data, int fflag,
68 struct ucred *cred)
70 int error;
73 * Standard root cred checks, will be selectively ignored below
74 * for ioctls that do not require root creds.
76 error = priv_check_cred(cred, PRIV_HAMMER_IOCTL, 0);
78 switch(com) {
79 case HAMMER2IOC_VERSION_GET:
80 error = hammer2_ioctl_version_get(ip, data);
81 break;
82 case HAMMER2IOC_RECLUSTER:
83 if (error == 0)
84 error = hammer2_ioctl_recluster(ip, data);
85 break;
86 case HAMMER2IOC_REMOTE_SCAN:
87 if (error == 0)
88 error = hammer2_ioctl_remote_scan(ip, data);
89 break;
90 case HAMMER2IOC_REMOTE_ADD:
91 if (error == 0)
92 error = hammer2_ioctl_remote_add(ip, data);
93 break;
94 case HAMMER2IOC_REMOTE_DEL:
95 if (error == 0)
96 error = hammer2_ioctl_remote_del(ip, data);
97 break;
98 case HAMMER2IOC_REMOTE_REP:
99 if (error == 0)
100 error = hammer2_ioctl_remote_rep(ip, data);
101 break;
102 case HAMMER2IOC_SOCKET_GET:
103 if (error == 0)
104 error = hammer2_ioctl_socket_get(ip, data);
105 break;
106 case HAMMER2IOC_SOCKET_SET:
107 if (error == 0)
108 error = hammer2_ioctl_socket_set(ip, data);
109 break;
110 case HAMMER2IOC_PFS_GET:
111 if (error == 0)
112 error = hammer2_ioctl_pfs_get(ip, data);
113 break;
114 case HAMMER2IOC_PFS_LOOKUP:
115 if (error == 0)
116 error = hammer2_ioctl_pfs_lookup(ip, data);
117 break;
118 case HAMMER2IOC_PFS_CREATE:
119 if (error == 0)
120 error = hammer2_ioctl_pfs_create(ip, data);
121 break;
122 case HAMMER2IOC_PFS_DELETE:
123 if (error == 0)
124 error = hammer2_ioctl_pfs_delete(ip, data);
125 break;
126 case HAMMER2IOC_PFS_SNAPSHOT:
127 if (error == 0)
128 error = hammer2_ioctl_pfs_snapshot(ip, data);
129 break;
130 case HAMMER2IOC_INODE_GET:
131 error = hammer2_ioctl_inode_get(ip, data);
132 break;
133 case HAMMER2IOC_INODE_SET:
134 if (error == 0)
135 error = hammer2_ioctl_inode_set(ip, data);
136 break;
137 case HAMMER2IOC_BULKFREE_SCAN:
138 error = hammer2_ioctl_bulkfree_scan(ip, data);
139 break;
140 case HAMMER2IOC_BULKFREE_ASYNC:
141 error = hammer2_ioctl_bulkfree_scan(ip, NULL);
142 break;
143 /*case HAMMER2IOC_INODE_COMP_SET:
144 error = hammer2_ioctl_inode_comp_set(ip, data);
145 break;
146 case HAMMER2IOC_INODE_COMP_REC_SET:
147 error = hammer2_ioctl_inode_comp_rec_set(ip, data);
148 break;
149 case HAMMER2IOC_INODE_COMP_REC_SET2:
150 error = hammer2_ioctl_inode_comp_rec_set2(ip, data);
151 break;*/
152 case HAMMER2IOC_DEBUG_DUMP:
153 error = hammer2_ioctl_debug_dump(ip);
154 break;
155 default:
156 error = EOPNOTSUPP;
157 break;
159 return (error);
163 * Retrieve version and basic info
165 static int
166 hammer2_ioctl_version_get(hammer2_inode_t *ip, void *data)
168 hammer2_ioc_version_t *version = data;
169 hammer2_dev_t *hmp;
171 hmp = ip->pmp->pfs_hmps[0];
172 if (hmp)
173 version->version = hmp->voldata.version;
174 else
175 version->version = -1;
176 return 0;
179 static int
180 hammer2_ioctl_recluster(hammer2_inode_t *ip, void *data)
182 hammer2_ioc_recluster_t *recl = data;
183 struct file *fp;
184 hammer2_cluster_t *cluster;
185 int error;
187 fp = holdfp(curproc->p_fd, recl->fd, -1);
188 if (fp) {
189 kprintf("reconnect to cluster: XXX ");
190 cluster = &ip->pmp->iroot->cluster;
191 if (cluster->nchains != 1 || cluster->focus == NULL) {
192 kprintf("not a local device mount\n");
193 error = EINVAL;
194 } else {
195 hammer2_cluster_reconnect(cluster->focus->hmp, fp);
196 kprintf("ok\n");
197 error = 0;
199 } else {
200 error = EINVAL;
202 return error;
206 * Retrieve information about a remote
208 static int
209 hammer2_ioctl_remote_scan(hammer2_inode_t *ip, void *data)
211 hammer2_dev_t *hmp;
212 hammer2_ioc_remote_t *remote = data;
213 int copyid = remote->copyid;
215 hmp = ip->pmp->pfs_hmps[0];
216 if (hmp == NULL)
217 return (EINVAL);
219 if (copyid < 0 || copyid >= HAMMER2_COPYID_COUNT)
220 return (EINVAL);
222 hammer2_voldata_lock(hmp);
223 remote->copy1 = hmp->voldata.copyinfo[copyid];
224 hammer2_voldata_unlock(hmp);
227 * Adjust nextid (GET only)
229 while (++copyid < HAMMER2_COPYID_COUNT &&
230 hmp->voldata.copyinfo[copyid].copyid == 0) {
233 if (copyid == HAMMER2_COPYID_COUNT)
234 remote->nextid = -1;
235 else
236 remote->nextid = copyid;
238 return(0);
242 * Add new remote entry
244 static int
245 hammer2_ioctl_remote_add(hammer2_inode_t *ip, void *data)
247 hammer2_ioc_remote_t *remote = data;
248 hammer2_pfs_t *pmp = ip->pmp;
249 hammer2_dev_t *hmp;
250 int copyid = remote->copyid;
251 int error = 0;
253 hmp = pmp->pfs_hmps[0];
254 if (hmp == NULL)
255 return (EINVAL);
256 if (copyid >= HAMMER2_COPYID_COUNT)
257 return (EINVAL);
259 hammer2_voldata_lock(hmp);
260 if (copyid < 0) {
261 for (copyid = 1; copyid < HAMMER2_COPYID_COUNT; ++copyid) {
262 if (hmp->voldata.copyinfo[copyid].copyid == 0)
263 break;
265 if (copyid == HAMMER2_COPYID_COUNT) {
266 error = ENOSPC;
267 goto failed;
270 hammer2_voldata_modify(hmp);
271 remote->copy1.copyid = copyid;
272 hmp->voldata.copyinfo[copyid] = remote->copy1;
273 hammer2_volconf_update(hmp, copyid);
274 failed:
275 hammer2_voldata_unlock(hmp);
276 return (error);
280 * Delete existing remote entry
282 static int
283 hammer2_ioctl_remote_del(hammer2_inode_t *ip, void *data)
285 hammer2_ioc_remote_t *remote = data;
286 hammer2_pfs_t *pmp = ip->pmp;
287 hammer2_dev_t *hmp;
288 int copyid = remote->copyid;
289 int error = 0;
291 hmp = pmp->pfs_hmps[0];
292 if (hmp == NULL)
293 return (EINVAL);
294 if (copyid >= HAMMER2_COPYID_COUNT)
295 return (EINVAL);
296 remote->copy1.path[sizeof(remote->copy1.path) - 1] = 0;
297 hammer2_voldata_lock(hmp);
298 if (copyid < 0) {
299 for (copyid = 1; copyid < HAMMER2_COPYID_COUNT; ++copyid) {
300 if (hmp->voldata.copyinfo[copyid].copyid == 0)
301 continue;
302 if (strcmp(remote->copy1.path,
303 hmp->voldata.copyinfo[copyid].path) == 0) {
304 break;
307 if (copyid == HAMMER2_COPYID_COUNT) {
308 error = ENOENT;
309 goto failed;
312 hammer2_voldata_modify(hmp);
313 hmp->voldata.copyinfo[copyid].copyid = 0;
314 hammer2_volconf_update(hmp, copyid);
315 failed:
316 hammer2_voldata_unlock(hmp);
317 return (error);
321 * Replace existing remote entry
323 static int
324 hammer2_ioctl_remote_rep(hammer2_inode_t *ip, void *data)
326 hammer2_ioc_remote_t *remote = data;
327 hammer2_dev_t *hmp;
328 int copyid = remote->copyid;
330 hmp = ip->pmp->pfs_hmps[0];
331 if (hmp == NULL)
332 return (EINVAL);
333 if (copyid < 0 || copyid >= HAMMER2_COPYID_COUNT)
334 return (EINVAL);
336 hammer2_voldata_lock(hmp);
337 hammer2_voldata_modify(hmp);
338 /*hammer2_volconf_update(hmp, copyid);*/
339 hammer2_voldata_unlock(hmp);
341 return(0);
345 * Retrieve communications socket
347 static int
348 hammer2_ioctl_socket_get(hammer2_inode_t *ip, void *data)
350 return (EOPNOTSUPP);
354 * Set communications socket for connection
356 static int
357 hammer2_ioctl_socket_set(hammer2_inode_t *ip, void *data)
359 hammer2_ioc_remote_t *remote = data;
360 hammer2_dev_t *hmp;
361 int copyid = remote->copyid;
363 hmp = ip->pmp->pfs_hmps[0];
364 if (hmp == NULL)
365 return (EINVAL);
366 if (copyid < 0 || copyid >= HAMMER2_COPYID_COUNT)
367 return (EINVAL);
369 hammer2_voldata_lock(hmp);
370 hammer2_voldata_unlock(hmp);
372 return(0);
376 * Used to scan and retrieve PFS information. PFS's are directories under
377 * the super-root.
379 * To scan PFSs pass name_key=0. The function will scan for the next
380 * PFS and set all fields, as well as set name_next to the next key.
381 * When no PFSs remain, name_next is set to (hammer2_key_t)-1.
383 * To retrieve a particular PFS by key, specify the key but note that
384 * the ioctl will return the lowest key >= specified_key, so the caller
385 * must verify the key.
387 * To retrieve the PFS associated with the file descriptor, pass
388 * name_key set to (hammer2_key_t)-1.
390 static int
391 hammer2_ioctl_pfs_get(hammer2_inode_t *ip, void *data)
393 const hammer2_inode_data_t *ripdata;
394 hammer2_dev_t *hmp;
395 hammer2_ioc_pfs_t *pfs;
396 hammer2_chain_t *parent;
397 hammer2_chain_t *chain;
398 hammer2_key_t key_next;
399 hammer2_key_t save_key;
400 int cache_index = -1;
401 int error;
403 hmp = ip->pmp->pfs_hmps[0];
404 if (hmp == NULL)
405 return (EINVAL);
407 pfs = data;
408 save_key = pfs->name_key;
409 error = 0;
412 * Setup
414 if (save_key == (hammer2_key_t)-1) {
415 hammer2_inode_lock(ip->pmp->iroot, 0);
416 parent = NULL;
417 chain = hammer2_inode_chain(hmp->spmp->iroot, 0,
418 HAMMER2_RESOLVE_ALWAYS |
419 HAMMER2_RESOLVE_SHARED);
420 } else {
421 hammer2_inode_lock(hmp->spmp->iroot, 0);
422 parent = hammer2_inode_chain(hmp->spmp->iroot, 0,
423 HAMMER2_RESOLVE_ALWAYS |
424 HAMMER2_RESOLVE_SHARED);
425 chain = hammer2_chain_lookup(&parent, &key_next,
426 pfs->name_key, HAMMER2_KEY_MAX,
427 &cache_index,
428 HAMMER2_LOOKUP_SHARED);
432 * Locate next PFS
434 while (chain) {
435 if (chain->bref.type == HAMMER2_BREF_TYPE_INODE)
436 break;
437 if (parent == NULL) {
438 hammer2_chain_unlock(chain);
439 hammer2_chain_drop(chain);
440 chain = NULL;
441 break;
443 chain = hammer2_chain_next(&parent, chain, &key_next,
444 key_next, HAMMER2_KEY_MAX,
445 &cache_index,
446 HAMMER2_LOOKUP_SHARED);
450 * Load the data being returned by the ioctl.
452 if (chain) {
453 ripdata = &chain->data->ipdata;
454 pfs->name_key = ripdata->meta.name_key;
455 pfs->pfs_type = ripdata->meta.pfs_type;
456 pfs->pfs_subtype = ripdata->meta.pfs_subtype;
457 pfs->pfs_clid = ripdata->meta.pfs_clid;
458 pfs->pfs_fsid = ripdata->meta.pfs_fsid;
459 KKASSERT(ripdata->meta.name_len < sizeof(pfs->name));
460 bcopy(ripdata->filename, pfs->name, ripdata->meta.name_len);
461 pfs->name[ripdata->meta.name_len] = 0;
462 ripdata = NULL; /* safety */
465 * Calculate name_next, if any.
467 if (parent == NULL) {
468 pfs->name_next = (hammer2_key_t)-1;
469 } else {
470 chain = hammer2_chain_next(&parent, chain, &key_next,
471 key_next, HAMMER2_KEY_MAX,
472 &cache_index,
473 HAMMER2_LOOKUP_SHARED);
474 if (chain)
475 pfs->name_next = chain->bref.key;
476 else
477 pfs->name_next = (hammer2_key_t)-1;
479 } else {
480 pfs->name_next = (hammer2_key_t)-1;
481 error = ENOENT;
485 * Cleanup
487 if (chain) {
488 hammer2_chain_unlock(chain);
489 hammer2_chain_drop(chain);
491 if (parent) {
492 hammer2_chain_unlock(parent);
493 hammer2_chain_drop(parent);
495 if (save_key == (hammer2_key_t)-1) {
496 hammer2_inode_unlock(ip->pmp->iroot);
497 } else {
498 hammer2_inode_unlock(hmp->spmp->iroot);
501 return (error);
505 * Find a specific PFS by name
507 static int
508 hammer2_ioctl_pfs_lookup(hammer2_inode_t *ip, void *data)
510 const hammer2_inode_data_t *ripdata;
511 hammer2_dev_t *hmp;
512 hammer2_ioc_pfs_t *pfs;
513 hammer2_chain_t *parent;
514 hammer2_chain_t *chain;
515 hammer2_key_t key_next;
516 hammer2_key_t lhc;
517 int cache_index = -1;
518 int error;
519 size_t len;
521 hmp = ip->pmp->pfs_hmps[0];
522 if (hmp == NULL)
523 return (EINVAL);
525 pfs = data;
526 error = 0;
528 hammer2_inode_lock(hmp->spmp->iroot, HAMMER2_RESOLVE_SHARED);
529 parent = hammer2_inode_chain(hmp->spmp->iroot, 0,
530 HAMMER2_RESOLVE_ALWAYS |
531 HAMMER2_RESOLVE_SHARED);
533 pfs->name[sizeof(pfs->name) - 1] = 0;
534 len = strlen(pfs->name);
535 lhc = hammer2_dirhash(pfs->name, len);
537 chain = hammer2_chain_lookup(&parent, &key_next,
538 lhc, lhc + HAMMER2_DIRHASH_LOMASK,
539 &cache_index,
540 HAMMER2_LOOKUP_SHARED);
541 while (chain) {
542 if (hammer2_chain_dirent_test(chain, pfs->name, len))
543 break;
544 chain = hammer2_chain_next(&parent, chain, &key_next,
545 key_next,
546 lhc + HAMMER2_DIRHASH_LOMASK,
547 &cache_index,
548 HAMMER2_LOOKUP_SHARED);
552 * Load the data being returned by the ioctl.
554 if (chain) {
555 KKASSERT(chain->bref.type == HAMMER2_BREF_TYPE_INODE);
556 ripdata = &chain->data->ipdata;
557 pfs->name_key = ripdata->meta.name_key;
558 pfs->pfs_type = ripdata->meta.pfs_type;
559 pfs->pfs_subtype = ripdata->meta.pfs_subtype;
560 pfs->pfs_clid = ripdata->meta.pfs_clid;
561 pfs->pfs_fsid = ripdata->meta.pfs_fsid;
562 ripdata = NULL;
564 hammer2_chain_unlock(chain);
565 hammer2_chain_drop(chain);
566 } else {
567 error = ENOENT;
569 if (parent) {
570 hammer2_chain_unlock(parent);
571 hammer2_chain_drop(parent);
573 hammer2_inode_unlock(hmp->spmp->iroot);
575 return (error);
579 * Create a new PFS under the super-root
581 static int
582 hammer2_ioctl_pfs_create(hammer2_inode_t *ip, void *data)
584 hammer2_inode_data_t *nipdata;
585 hammer2_chain_t *nchain;
586 hammer2_dev_t *hmp;
587 hammer2_dev_t *force_local;
588 hammer2_ioc_pfs_t *pfs;
589 hammer2_inode_t *nip;
590 hammer2_tid_t mtid;
591 int error;
593 hmp = ip->pmp->pfs_hmps[0]; /* XXX */
594 if (hmp == NULL)
595 return (EINVAL);
597 pfs = data;
598 nip = NULL;
600 if (pfs->name[0] == 0)
601 return(EINVAL);
602 pfs->name[sizeof(pfs->name) - 1] = 0; /* ensure 0-termination */
604 if (hammer2_ioctl_pfs_lookup(ip, pfs) == 0)
605 return(EEXIST);
607 hammer2_trans_init(hmp->spmp, 0);
608 mtid = hammer2_trans_sub(hmp->spmp);
609 nip = hammer2_inode_create(hmp->spmp->iroot, hmp->spmp->iroot,
610 NULL, NULL,
611 pfs->name, strlen(pfs->name), 0,
612 1, HAMMER2_OBJTYPE_DIRECTORY, 0,
613 HAMMER2_INSERT_PFSROOT, &error);
614 if (error == 0) {
615 hammer2_inode_modify(nip);
616 nchain = hammer2_inode_chain(nip, 0, HAMMER2_RESOLVE_ALWAYS);
617 hammer2_chain_modify(nchain, mtid, 0, 0);
618 nipdata = &nchain->data->ipdata;
620 nip->meta.pfs_type = pfs->pfs_type;
621 nip->meta.pfs_subtype = pfs->pfs_subtype;
622 nip->meta.pfs_clid = pfs->pfs_clid;
623 nip->meta.pfs_fsid = pfs->pfs_fsid;
624 nip->meta.op_flags |= HAMMER2_OPFLAG_PFSROOT;
627 * Set default compression and check algorithm. This
628 * can be changed later.
630 * Do not allow compression on PFS's with the special name
631 * "boot", the boot loader can't decompress (yet).
633 nip->meta.comp_algo =
634 HAMMER2_ENC_ALGO(HAMMER2_COMP_NEWFS_DEFAULT);
635 nip->meta.check_algo =
636 HAMMER2_ENC_ALGO( HAMMER2_CHECK_XXHASH64);
638 if (strcasecmp(pfs->name, "boot") == 0) {
639 nip->meta.comp_algo =
640 HAMMER2_ENC_ALGO(HAMMER2_COMP_AUTOZERO);
644 * Super-root isn't mounted, fsync it
646 hammer2_chain_unlock(nchain);
647 hammer2_inode_ref(nip);
648 hammer2_inode_unlock(nip);
649 hammer2_inode_chain_sync(nip);
650 hammer2_inode_drop(nip);
653 * We still have a ref on the chain, relock and associate
654 * with an appropriate PFS.
656 force_local = (hmp->hflags & HMNT2_LOCAL) ? hmp : NULL;
658 hammer2_chain_lock(nchain, HAMMER2_RESOLVE_ALWAYS);
659 nipdata = &nchain->data->ipdata;
660 kprintf("ADD LOCAL PFS (IOCTL): %s\n", nipdata->filename);
661 hammer2_pfsalloc(nchain, nipdata,
662 nchain->bref.modify_tid, force_local);
664 hammer2_chain_unlock(nchain);
665 hammer2_chain_drop(nchain);
668 hammer2_trans_done(hmp->spmp);
670 return (error);
674 * Destroy an existing PFS under the super-root
676 static int
677 hammer2_ioctl_pfs_delete(hammer2_inode_t *ip, void *data)
679 hammer2_ioc_pfs_t *pfs = data;
680 hammer2_dev_t *hmp;
681 hammer2_pfs_t *spmp;
682 hammer2_pfs_t *pmp;
683 hammer2_xop_unlink_t *xop;
684 hammer2_inode_t *dip;
685 hammer2_inode_t *iroot;
686 int error;
687 int i;
690 * The PFS should be probed, so we should be able to
691 * locate it. We only delete the PFS from the
692 * specific H2 block device (hmp), not all of
693 * them. We must remove the PFS from the cluster
694 * before we can destroy it.
696 hmp = ip->pmp->pfs_hmps[0];
697 if (hmp == NULL)
698 return (EINVAL);
700 pfs->name[sizeof(pfs->name) - 1] = 0; /* ensure termination */
702 lockmgr(&hammer2_mntlk, LK_EXCLUSIVE);
704 TAILQ_FOREACH(pmp, &hammer2_pfslist, mntentry) {
705 for (i = 0; i < HAMMER2_MAXCLUSTER; ++i) {
706 if (pmp->pfs_hmps[i] != hmp)
707 continue;
708 if (pmp->pfs_names[i] &&
709 strcmp(pmp->pfs_names[i], pfs->name) == 0) {
710 break;
713 if (i != HAMMER2_MAXCLUSTER)
714 break;
717 if (pmp == NULL) {
718 lockmgr(&hammer2_mntlk, LK_RELEASE);
719 return ENOENT;
723 * Ok, we found the pmp and we have the index. Permanently remove
724 * the PFS from the cluster
726 iroot = pmp->iroot;
727 kprintf("FOUND PFS %s CLINDEX %d\n", pfs->name, i);
728 hammer2_pfsdealloc(pmp, i, 1);
730 lockmgr(&hammer2_mntlk, LK_RELEASE);
733 * Now destroy the PFS under its device using the per-device
734 * super-root.
736 spmp = hmp->spmp;
737 dip = spmp->iroot;
738 hammer2_trans_init(spmp, 0);
739 hammer2_inode_lock(dip, 0);
741 xop = hammer2_xop_alloc(dip, HAMMER2_XOP_MODIFYING);
742 hammer2_xop_setname(&xop->head, pfs->name, strlen(pfs->name));
743 xop->isdir = 2;
744 xop->dopermanent = 2 | 1; /* FORCE | PERMANENT */
745 hammer2_xop_start(&xop->head, hammer2_xop_unlink);
747 error = hammer2_xop_collect(&xop->head, 0);
749 hammer2_inode_unlock(dip);
751 #if 0
752 if (error == 0) {
753 ip = hammer2_inode_get(dip->pmp, dip, &xop->head.cluster, -1);
754 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
755 if (ip) {
756 hammer2_inode_unlink_finisher(ip, 0);
757 hammer2_inode_unlock(ip);
759 } else {
760 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
762 #endif
763 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
765 hammer2_trans_done(spmp);
767 return (error);
770 static int
771 hammer2_ioctl_pfs_snapshot(hammer2_inode_t *ip, void *data)
773 hammer2_ioc_pfs_t *pfs = data;
774 hammer2_dev_t *hmp;
775 hammer2_pfs_t *pmp;
776 hammer2_chain_t *chain;
777 hammer2_tid_t mtid;
778 int error;
780 if (pfs->name[0] == 0)
781 return(EINVAL);
782 if (pfs->name[sizeof(pfs->name)-1] != 0)
783 return(EINVAL);
785 pmp = ip->pmp;
786 ip = pmp->iroot;
788 hmp = pmp->pfs_hmps[0];
789 if (hmp == NULL)
790 return (EINVAL);
792 hammer2_vfs_sync(pmp->mp, MNT_WAIT);
794 hammer2_trans_init(pmp, HAMMER2_TRANS_ISFLUSH);
795 mtid = hammer2_trans_sub(pmp);
796 hammer2_inode_lock(ip, 0);
797 hammer2_inode_modify(ip);
798 ip->meta.pfs_lsnap_tid = mtid;
800 /* XXX cluster it! */
801 chain = hammer2_inode_chain(ip, 0, HAMMER2_RESOLVE_ALWAYS);
802 error = hammer2_chain_snapshot(chain, pfs, mtid);
803 hammer2_chain_unlock(chain);
804 hammer2_chain_drop(chain);
806 hammer2_inode_unlock(ip);
807 hammer2_trans_done(pmp);
809 return (error);
813 * Retrieve the raw inode structure, non-inclusive of node-specific data.
815 static int
816 hammer2_ioctl_inode_get(hammer2_inode_t *ip, void *data)
818 hammer2_ioc_inode_t *ino;
819 hammer2_chain_t *chain;
820 int error;
821 int i;
823 ino = data;
824 error = 0;
826 hammer2_inode_lock(ip, HAMMER2_RESOLVE_SHARED);
827 ino->data_count = 0;
828 ino->inode_count = 0;
829 for (i = 0; i < ip->cluster.nchains; ++i) {
830 if ((chain = ip->cluster.array[i].chain) != NULL) {
831 if (ino->data_count <
832 chain->bref.embed.stats.data_count) {
833 ino->data_count =
834 chain->bref.embed.stats.data_count;
836 if (ino->inode_count <
837 chain->bref.embed.stats.inode_count) {
838 ino->inode_count =
839 chain->bref.embed.stats.inode_count;
843 bzero(&ino->ip_data, sizeof(ino->ip_data));
844 ino->ip_data.meta = ip->meta;
845 ino->kdata = ip;
846 hammer2_inode_unlock(ip);
848 return error;
852 * Set various parameters in an inode which cannot be set through
853 * normal filesystem VNOPS.
855 static int
856 hammer2_ioctl_inode_set(hammer2_inode_t *ip, void *data)
858 hammer2_ioc_inode_t *ino = data;
859 int error = 0;
861 hammer2_trans_init(ip->pmp, 0);
862 hammer2_inode_lock(ip, 0);
864 if ((ino->flags & HAMMER2IOC_INODE_FLAG_CHECK) &&
865 ip->meta.check_algo != ino->ip_data.meta.check_algo) {
866 hammer2_inode_modify(ip);
867 ip->meta.check_algo = ino->ip_data.meta.check_algo;
869 if ((ino->flags & HAMMER2IOC_INODE_FLAG_COMP) &&
870 ip->meta.comp_algo != ino->ip_data.meta.comp_algo) {
871 hammer2_inode_modify(ip);
872 ip->meta.comp_algo = ino->ip_data.meta.comp_algo;
874 ino->kdata = ip;
876 /* Ignore these flags for now...*/
877 if ((ino->flags & HAMMER2IOC_INODE_FLAG_IQUOTA) &&
878 ip->meta.inode_quota != ino->ip_data.meta.inode_quota) {
879 hammer2_inode_modify(ip);
880 ip->meta.inode_quota = ino->ip_data.meta.inode_quota;
882 if ((ino->flags & HAMMER2IOC_INODE_FLAG_DQUOTA) &&
883 ip->meta.data_quota != ino->ip_data.meta.data_quota) {
884 hammer2_inode_modify(ip);
885 ip->meta.data_quota = ino->ip_data.meta.data_quota;
887 if ((ino->flags & HAMMER2IOC_INODE_FLAG_COPIES) &&
888 ip->meta.ncopies != ino->ip_data.meta.ncopies) {
889 hammer2_inode_modify(ip);
890 ip->meta.ncopies = ino->ip_data.meta.ncopies;
892 hammer2_inode_unlock(ip);
893 hammer2_trans_done(ip->pmp);
895 return (error);
898 static
900 hammer2_ioctl_debug_dump(hammer2_inode_t *ip)
902 hammer2_chain_t *chain;
903 int count = 1000;
904 int i;
906 for (i = 0; i < ip->cluster.nchains; ++i) {
907 chain = ip->cluster.array[i].chain;
908 if (chain == NULL)
909 continue;
910 hammer2_dump_chain(chain, 0, &count, 'i');
912 return 0;
916 * Executes one flush/free pass per call. If trying to recover
917 * data we just freed up a moment ago it can take up to six passes
918 * to fully free the blocks. Note that passes occur automatically based
919 * on free space as the storage fills up, but manual passes may be needed
920 * if storage becomes almost completely full.
922 static
924 hammer2_ioctl_bulkfree_scan(hammer2_inode_t *ip, void *data)
926 hammer2_ioc_bulkfree_t *bfi = data;
927 hammer2_dev_t *hmp;
928 int error;
930 hmp = ip->pmp->pfs_hmps[0];
931 if (hmp == NULL)
932 return (EINVAL);
935 * Negotiate for manual access. The hammer2_bulkfree_pass() itself
936 * also has its own lock and will deal with a manual override when
937 * an automatic bulkfree is already running.
939 error = lockmgr(&hmp->bflock, LK_EXCLUSIVE | LK_PCATCH);
940 if (error)
941 return error;
942 if (bfi) {
943 hammer2_thr_freeze(&hmp->bfthr);
944 error = hammer2_bulkfree_pass(hmp, bfi);
945 hammer2_thr_unfreeze(&hmp->bfthr);
946 } else {
947 hammer2_thr_remaster(&hmp->bfthr);
949 lockmgr(&hmp->bflock, LK_RELEASE);
951 return error;