(nnpfs_block_extend): fix typo, spotted by Robert Watson <rwatson@FreeBSD.org>
[arla.git] / nnpfs / bsd / nnpfs_blocks.c
blob0dfd2112b744b886587dff40a096eace5877e9f9
1 /*
2 * Copyright (c) 2005-2007, Stockholms Universitet
3 * (Stockholm University, Stockholm Sweden)
4 * All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * 3. Neither the name of the university nor the names of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
25 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
34 /* $Id$ */
36 #include <nnpfs/nnpfs_locl.h>
37 #include <nnpfs/nnpfs_fs.h>
38 #include <nnpfs/nnpfs_dev.h>
39 #include <nnpfs/nnpfs_deb.h>
40 #include <nnpfs/nnpfs_vnodeops.h>
42 #include <nnpfs/nnpfs_common.h>
43 #include <nnpfs/nnpfs_node.h>
46 * return true if block is in cache
49 int
50 nnpfs_block_have_p(struct nnpfs_node *node, uint64_t offset)
52 struct nnpfs_cache_handle *handle = &node->data;
53 uint32_t index = nnpfs_block_index(offset);
54 uint32_t maskno = nnpfs_block_masknumber(index);
56 nnpfs_assert(nnpfs_offset(offset) == offset);
58 if (handle->nmasks == 0)
59 return 0;
61 if (maskno >= handle->nmasks)
62 return 0;
64 if (handle->nmasks == 1)
65 return (handle->masks.first & nnpfs_block_mask(index));
67 return (handle->masks.list[maskno] & nnpfs_block_mask(index));
71 * mark block at offset as present in cache
73 * XXX assert on the bit being changed?
76 static int
77 nnpfs_block_set_have(struct nnpfs_node *node, uint64_t offset, int val)
79 struct nnpfs_cache_handle *handle = &node->data;
80 uint32_t index = nnpfs_block_index(offset);
81 uint32_t maskno = nnpfs_block_masknumber(index);
82 uint32_t mask = nnpfs_block_mask(index);
83 uint32_t *slot;
85 nnpfs_assert(nnpfs_offset(offset) == offset);
87 if (maskno == 0 && handle->nmasks <= 1) {
88 handle->nmasks = 1;
89 slot = &handle->masks.first;
90 } else {
91 if (maskno >= handle->nmasks) {
92 int n = maskno + NNPFS_NMASKS - (maskno % NNPFS_NMASKS);
93 int size = n * sizeof(uint32_t);
94 uint32_t *new;
96 nnpfs_assert(val);
98 new = nnpfs_alloc(size, M_NNPFS_BLOCKS);
99 nnpfs_debug_assert(new);
100 if (!new)
101 return ENOMEM;
103 if (handle->nmasks == 1) {
104 new[0] = handle->masks.first;
105 } else if (handle->nmasks > 1) {
106 memcpy(new, handle->masks.list,
107 handle->nmasks * sizeof(uint32_t));
108 nnpfs_free(handle->masks.list, handle->nmasks * sizeof(uint32_t),
109 M_NNPFS_BLOCKS);
112 memset(&new[handle->nmasks], 0,
113 (n - handle->nmasks) * sizeof(uint32_t));
114 handle->nmasks = n;
115 handle->masks.list = new;
117 slot = &handle->masks.list[maskno];
120 if (val)
121 *slot |= mask;
122 else
123 *slot &= ~mask;
125 return 0;
129 * mark block at offset as present in cache
133 nnpfs_block_setvalid(struct nnpfs_node *node, uint64_t offset)
135 return nnpfs_block_set_have(node, offset, TRUE);
139 * mark block at offset as not present in cache
142 void
143 nnpfs_block_setinvalid(struct nnpfs_node *node, uint64_t offset)
145 (void)nnpfs_block_set_have(node, offset, FALSE);
148 static void
149 nnpfs_block_foreach_int(struct nnpfs_node *node,
150 nnpfs_block_callback_t fun,
151 void *data,
152 uint64_t base_offset,
153 int32_t mask)
155 uint32_t tmp_mask = 1;
156 int i;
158 if (!mask)
159 return;
161 for (i = 0; i < 32; i++) {
162 if (mask & tmp_mask) {
163 fun(node, base_offset + i * nnpfs_blocksize, data);
164 mask -= tmp_mask;
165 if (!mask)
166 return;
169 tmp_mask = tmp_mask << 1;
174 * call callback for every block present in cache
177 void
178 nnpfs_block_foreach(struct nnpfs_node *node,
179 nnpfs_block_callback_t fun,
180 void *data)
182 struct nnpfs_cache_handle *handle = &node->data;
183 int i;
185 if (handle->nmasks == 0)
186 return;
188 if (handle->nmasks == 1) {
189 nnpfs_block_foreach_int(node, fun, data, 0, handle->masks.first);
190 return;
193 for (i = 0; i < handle->nmasks; i++)
194 nnpfs_block_foreach_int(node, fun, data, i * 32 * nnpfs_blocksize,
195 handle->masks.list[i]);
199 * Foreach callback for nnpfs_block_truncate()
202 static void
203 truncate_callback(struct nnpfs_node *node, uint64_t offset, void *data)
205 uint64_t *size = (uint64_t *)data;
206 if (*size <= offset && offset > 0)
207 (void)nnpfs_block_set_have(node, offset, FALSE);
211 * Forget all blocks beyond `sizeĀ“ for `node'
214 void
215 nnpfs_block_truncate(struct nnpfs_node *node, uint64_t size)
217 nnpfs_block_foreach(node, truncate_callback, &size);
221 * free all handle internal resources
224 void
225 nnpfs_block_free_all(struct nnpfs_node *node)
227 struct nnpfs_cache_handle *handle = &node->data;
228 if (handle->nmasks > 1) {
229 nnpfs_free(handle->masks.list, handle->nmasks * sizeof(uint32_t),
230 M_NNPFS_BLOCKS);
231 handle->masks.list = NULL;
232 } else {
233 handle->masks.first = 0;
236 handle->nmasks = 0;
240 * return true if we have no data
244 nnpfs_block_empty(struct nnpfs_node *node)
246 struct nnpfs_cache_handle *handle = &node->data;
247 int i;
249 if (handle->nmasks == 0)
250 return 1;
252 if (handle->nmasks == 1) {
253 if (handle->masks.first == 0)
254 return 1;
255 return 0;
258 for (i = 0; i < handle->nmasks; i++)
259 if (handle->masks.list[i] != 0)
260 return 0;
262 return 1;
265 static int
266 nnpfs_block_extend_int(struct nnpfs_node *node, struct vnode *vp, d_thread_t *p)
268 struct nnpfs_vfs_vattr va;
269 int ret;
271 VATTR_INIT(&va);
272 nnpfs_set_va_size(&va, nnpfs_blocksize);
273 nnpfs_vfs_writelock(vp, p);
275 /* printf("nnpfs extend_int(%p)\n", vp); */
276 ret = nnpfs_vnode_setattr(vp, &va, NNPFS_FROM_XNODE(node)->ctx);
277 nnpfs_vfs_unlock(vp, p);
278 nnpfs_debug_assert(!ret);
279 return ret;
283 * Extend an existing block to full block size.
286 static int
287 nnpfs_block_extend(struct nnpfs_node *node, uint64_t offset)
289 d_thread_t *p = nnpfs_curproc();
290 struct vnode *vp;
291 int ret;
293 nnpfs_assert(nnpfs_block_have_p(node, offset));
295 ret = nnpfs_block_open(node, offset, FREAD|FWRITE, &vp);
296 if (!ret) {
297 nnpfs_assert(vp);
299 #ifdef __FreeBSD__
301 struct mount *mp;
303 (void)vn_start_write(vp, &mp, V_WAIT);
304 VOP_LEASE(vp, p,
305 nnpfs_vfs_context_ucred(NNPFS_FROM_XNODE(node)->ctx),
306 LEASE_WRITE);
308 ret = nnpfs_block_extend_int(node, vp, p);
310 nnpfs_vfs_unlock(vp, p);
311 vn_finished_write(mp);
313 #else
314 ret = nnpfs_block_extend_int(node, vp, p);
315 #endif
317 nnpfs_block_close(node, vp, 1);
320 if (ret)
321 printf("nnpfs_block_extend: failed at offset 0x%llx: %d\n",
322 (unsigned long long)offset, ret);
324 return ret;
327 #ifndef __APPLE__
329 * namei() compatible alloc/free
332 static long nnpfs_namei_allocs, nnpfs_namei_frees;
333 static void
334 nnpfs_namei_alloc(struct componentname *cnp)
336 void *p = NULL;
338 if (cnp->cn_flags & HASBUF) {
339 printf("nnpfs_namei_alloc: cnp flags 0x%lx\n", cnp->cn_flags);
340 return;
343 #ifdef __FreeBSD__
344 p = uma_zalloc(namei_zone, M_WAITOK);
345 #endif
346 #ifdef __OpenBSD__
347 p = pool_get(&namei_pool, PR_WAITOK);
348 #endif
349 #ifdef __NetBSD__
350 p = PNBUF_GET();
351 #endif
352 if (p) {
353 cnp->cn_pnbuf = p;
354 cnp->cn_flags |= HASBUF;
355 nnpfs_namei_allocs++;
359 static void
360 nnpfs_namei_free(struct componentname *cnp)
362 if ((cnp->cn_flags & HASBUF) == 0)
363 return;
365 #ifdef __FreeBSD__
366 uma_zfree(namei_zone, cnp->cn_pnbuf);
367 #endif
368 #ifdef __NetBSD__
369 PNBUF_PUT(cnp->cn_pnbuf);
370 #endif
371 #ifdef __OpenBSD__
372 pool_put(&namei_pool, cnp->cn_pnbuf);
373 #endif
375 cnp->cn_flags &= ~HASBUF;
376 nnpfs_namei_frees++;
379 #endif /* !__APPLE__ */
383 * a handy implementation of open()
385 * nnpfs_block_close() on success.
388 static int
389 open_file(struct vnode *cachedir, char *name, int fmode,
390 nnpfs_vfs_context ctx, struct vnode **vpp)
392 int error;
394 #ifdef __APPLE__ /* XXX */
395 error = vnode_open(name, fmode, S_IRUSR|S_IWUSR, 0, vpp, ctx);
396 #else
398 d_thread_t *p = nnpfs_curproc();
399 nnpfs_kernel_cred cred = nnpfs_vfs_context_ucred(ctx);
400 /* nnpfs_kernel_cred cred = nnpfs_proc_to_cred(p); */
401 struct nameidata nd;
403 memset(&nd, 0, sizeof(nd));
405 if (fmode & O_CREAT) {
406 NDINIT(&nd, CREATE,
407 FOLLOW | LOCKLEAF | LOCKPARENT | SAVENAME | NNPFS_MPSAFE,
408 UIO_SYSSPACE, name, p);
409 } else {
410 NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF | NNPFS_MPSAFE, UIO_SYSSPACE, name, p);
413 nd.ni_cnd.cn_cred = cred;
414 nd.ni_startdir = cachedir;
416 nnpfs_namei_alloc(&nd.ni_cnd);
417 nd.ni_cnd.cn_nameptr = nd.ni_cnd.cn_pnbuf;
419 error = copystr(name, nd.ni_cnd.cn_pnbuf, MAXPATHLEN, &nd.ni_pathlen);
420 if (error == 0 && nd.ni_pathlen == 1)
421 error = ENOENT;
423 if (error) {
424 nnpfs_namei_free(&nd.ni_cnd);
425 printf("nnpfs open_file(%p, %s) copystr -> %d\n",
426 cachedir, name, error);
427 return error;
430 #ifdef __FreeBSD__
431 if ((fmode & O_ACCMODE) != FREAD)
432 bwillwrite(); /* do this before getting devlock? */
433 #endif
434 /* XXX vn_start_write() etc? */
436 nnpfs_vref(cachedir);
438 error = lookup(&nd);
439 if (error) {
440 nnpfs_namei_free(&nd.ni_cnd);
441 printf("lookup(%s) -> %d\n", name, error);
442 return error;
445 if (fmode & O_CREAT && nd.ni_vp) {
446 fmode &= ~O_CREAT;
447 #ifndef __NetBSD__
448 nnpfs_vfs_unlock(cachedir, p);
449 #endif
452 if (fmode & O_CREAT) {
453 struct vattr vat;
454 struct mount *mp;
456 if ((nd.ni_cnd.cn_flags & SAVENAME) == 0) {
457 nnpfs_namei_free(&nd.ni_cnd);
458 printf("lookup: not SAVENAME, flags 0x%lx\n", nd.ni_cnd.cn_flags);
459 return EINVAL;
462 VATTR_NULL(&vat);
463 vat.va_type = VREG;
464 vat.va_mode = S_IRUSR|S_IWUSR;
465 if ((nd.ni_cnd.cn_flags & HASBUF) == 0)
466 panic("HASBUF was cleared\n");
468 /* nd.ni_cnd.cn_flags |= HASBUF; */
470 #ifdef __FreeBSD__
471 (void)vn_start_write(cachedir, &mp, V_WAIT); /* V_NOWAIT? */
472 #endif
473 VOP_LEASE(cachedir, p, cred, LEASE_WRITE);
474 error = VOP_CREATE(cachedir, vpp, &nd.ni_cnd, &vat);
476 #ifdef __FreeBSD__
477 nnpfs_namei_free(&nd.ni_cnd);
478 nnpfs_vfs_unlock(cachedir, p);
479 vn_finished_write(mp);
480 #else
481 /* NetBSD and OpenBSD releases buf w/o clearing HASBUF */
482 nd.ni_cnd.cn_flags &= ~HASBUF;
483 nnpfs_namei_frees++;
484 #endif
486 if (error) {
487 printf("nnpfs open_file(%p, %s) create -> %d\n",
488 cachedir, name, error);
489 return error;
491 } else {
492 *vpp = nd.ni_vp;
493 nnpfs_namei_free(&nd.ni_cnd);
496 #if defined(__FreeBSD__) && 0
497 if (nd.ni_vp
498 && vn_canvmio(nd.ni_vp) == TRUE
499 && ((nd.ni_cnd.cn_flags & (NOOBJ|LOCKLEAF)) == LOCKLEAF))
500 vfs_object_create(nd.ni_vp, p, cred);
501 #endif
503 #ifdef __FreeBSD__
504 #ifdef HAVE_FINAL_ARG_FILE_VOP_OPEN
505 error = VOP_OPEN(*vpp, fmode, cred, p, NULL);
506 #else
507 error = VOP_OPEN(*vpp, fmode, cred, p, -1);
508 #endif
509 #else
510 error = VOP_OPEN(*vpp, fmode, cred, p);
511 #endif
513 if (error) {
514 nnpfs_vput(*vpp);
515 } else {
516 if (fmode & FWRITE)
517 (*vpp)->v_writecount++;
519 nnpfs_vfs_unlock(*vpp, p);
523 #endif /* !__APPLE__! */
525 NNPFSDEB(XDEBNODE, ("nnpfs open_file(%p, %s) -> %d (%p)\n",
526 cachedir, name, error, *vpp));
527 return error;
531 * open indicated cache block file. needs to be closed by caller.
535 nnpfs_block_open(struct nnpfs_node *node, uint64_t offset, int flags,
536 struct vnode **vpp)
538 char cachename[NNPFS_CACHE_PATH_SIZE];
539 uint64_t blockindex = nnpfs_block_index(offset);
540 struct nnpfs *nnpfsp = NNPFS_FROM_XNODE(node);
541 off_t eof = nnpfs_vattr_get_size(&node->attr);
542 int ret;
544 NNPFSDEB(XDEBNODE, ("nnpfs_block_open(0x%llx)\n", (unsigned long long)offset));
546 nnpfs_assert(nnpfsp);
548 nnpfs_assert(nnpfs_block_have_p(node, offset)
549 || (flags & O_CREAT));
551 if (nnpfs_vnode_isdir(XNODE_TO_VNODE(node))) {
552 nnpfs_assert((flags & O_CREAT) == 0);
553 *vpp = node->cache_vn;
554 ret = 0;
555 } else {
556 #ifdef __APPLE__
557 ret = snprintf(cachename, sizeof(cachename),
558 NNPFS_CACHE_FILE_PATH,
559 node->index / 0x100, node->index % 0x100,
560 (unsigned long long)blockindex);
561 #else
562 ret = snprintf(cachename, sizeof(cachename),
563 NNPFS_CACHE_FILE_BLOCK_PATH,
564 (unsigned long long)blockindex);
565 #endif
567 nnpfs_assert(ret > 0 && ret < sizeof(cachename)); /* XXX */
569 ret = open_file(node->cache_vn, cachename, flags, nnpfsp->ctx, vpp);
570 nnpfs_debug_assert(!ret);
571 if (ret)
572 return ret;
575 /* blocks in the middle of the file should be of full length */
576 if ((flags & O_CREAT) && offset < nnpfs_offset(eof)) {
577 ret = nnpfs_block_extend_int(node, *vpp, nnpfs_curproc());
578 nnpfs_debug_assert(!ret);
579 if (ret)
580 nnpfs_block_close(node, *vpp,
581 ((flags & FWRITE) == FWRITE) ? 1 : 0);
584 NNPFSDEB(XDEBNODE, ("nnpfs_block_open -> %d\n", ret));
586 #if 0
587 nnpfs_assert(node->cache_vn);
588 if (VOP_ISLOCKED(node->cache_vn, nnpfs_curproc())) {
589 printf("%p is locked at %d\n", node->cache_vn, __LINE__);
590 panic("locked at block_open:exit");
592 #endif
594 return ret;
597 void
598 nnpfs_block_close(struct nnpfs_node *node, struct vnode *vp, int rw)
600 NNPFSDEB(XDEBNODE, ("nnpfs_block_close(%p)\n", vp));
602 if (nnpfs_vnode_isdir(XNODE_TO_VNODE(node)))
603 return;
605 #ifdef __APPLE__
606 vnode_close(vp, 0, NULL);
607 #else
609 d_thread_t *p = nnpfs_curproc();
611 nnpfs_vfs_writelock(vp, p);
613 if (rw)
614 vp->v_writecount--;
616 VOP_CLOSE(vp, rw ? FWRITE : FREAD, NULL, p);
617 nnpfs_vput(vp);
619 #endif /* !__APPLE__ */
621 NNPFSDEB(XDEBNODE, ("nnpfs_block_close done\n"));
625 * Create the indicated block and mark it as present in cache.
627 * Intended for writes beyond EOF.
631 nnpfs_block_create(struct nnpfs_node *node, uint64_t offset)
633 struct nnpfs_message_appenddata msg;
634 struct nnpfs *nnpfsp = NNPFS_FROM_XNODE(node);
635 off_t eof = nnpfs_vattr_get_size(&node->attr);
636 struct vnode *vp;
637 int ret;
639 nnpfs_assert(!nnpfs_block_have_p(node, offset));
640 nnpfs_assert(!nnpfs_vnode_isdir(XNODE_TO_VNODE(node)));
642 /* printf("nnpfs_block_create @0x%llx\n", (unsigned long long)offset);*/
644 NNPFSDEB(XDEBNODE, ("nnpfs_block_create: %lx @0x%llx\n",
645 (unsigned long)node, (unsigned long long )offset));
647 ret = nnpfs_block_setvalid(node, offset);
648 if (ret) {
649 nnpfs_debug_assert(0);
650 return ret;
653 ret = nnpfs_block_open(node, offset, O_CREAT|FWRITE, &vp);
654 if (!ret) {
655 nnpfs_assert(vp);
656 nnpfs_block_close(node, vp, 1);
659 /* extend previously last block to full length */
660 if (!ret && eof < offset) {
661 uint64_t prevoff = nnpfs_offset(eof);
662 if (nnpfs_block_have_p(node, prevoff))
663 ret = nnpfs_block_extend(node, prevoff);
666 nnpfs_debug_assert(!ret);
668 if (ret) {
669 /* XXX roll back file changes? */
670 nnpfs_block_setinvalid(node, offset);
671 return ret;
674 while (nnpfsp->appendquota < nnpfs_blocksize
675 && nnpfsp->status & CHANNEL_OPENED) {
676 int waiting = (nnpfsp->status & NNPFS_QUOTAWAIT);
677 nnpfsp->status |= NNPFS_QUOTAWAIT;
678 /* XXX */
679 (void)nnpfs_dev_msleep(nnpfsp, (caddr_t)&nnpfsp->appendquota,
680 (PZERO + 1), "nnpfsquota");
681 if (!waiting)
682 nnpfsp->status &= ~NNPFS_QUOTAWAIT;
685 if ((nnpfsp->status & CHANNEL_OPENED) == 0)
686 return ENODEV;
688 nnpfsp->appendquota -= nnpfs_blocksize;
689 nnpfs_assert(nnpfsp->appendquota >= 0);
691 msg.header.opcode = NNPFS_MSG_APPENDDATA;
692 msg.handle = node->handle;
693 msg.offset = offset;
695 /* XXX currently no cleanup on failed send, hope it's just a devclose */
696 return nnpfs_message_send(nnpfsp, &msg.header, sizeof(msg));