2 * Copyright (c) 2011-2018 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@dragonflybsd.org>
6 * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org>
7 * by Daniel Flores (GSOC 2013 - mentored by Matthew Dillon, compression)
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in
17 * the documentation and/or other materials provided with the
19 * 3. Neither the name of The DragonFly Project nor the names of its
20 * contributors may be used to endorse or promote products derived
21 * from this software without specific, prior written permission.
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
26 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
27 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
28 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
29 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
30 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
31 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
32 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
33 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 * This module handles low level logical file I/O (strategy) which backs
38 * the logical buffer cache.
40 * [De]compression, zero-block, check codes, and buffer cache operations
41 * for file data is handled here.
43 * Live dedup makes its home here as well.
46 #include <sys/param.h>
47 #include <sys/systm.h>
48 #include <sys/kernel.h>
49 #include <sys/fcntl.h>
52 #include <sys/namei.h>
53 #include <sys/mount.h>
54 #include <sys/vnode.h>
55 #include <sys/mountctl.h>
56 #include <sys/dirent.h>
58 #include <sys/objcache.h>
59 #include <sys/event.h>
61 #include <vfs/fifofs/fifo.h>
64 #include "hammer2_lz4.h"
66 #include "zlib/hammer2_zlib.h"
68 struct objcache
*cache_buffer_read
;
69 struct objcache
*cache_buffer_write
;
72 * Strategy code (async logical file buffer I/O from system)
74 * Except for the transaction init (which should normally not block),
75 * we essentially run the strategy operation asynchronously via a XOP.
77 * WARNING! The XOP deals with buffer synchronization. It is not synchronized
80 * XXX This isn't supposed to be able to deadlock against vfs_sync vfsync()
81 * calls but it has in the past when multiple flushes are queued.
83 * XXX We currently terminate the transaction once we get a quorum, otherwise
84 * the frontend can stall, but this can leave the remaining nodes with
85 * a potential flush conflict. We need to delay flushes on those nodes
86 * until running transactions complete separately from the normal
87 * transaction sequencing. FIXME TODO.
89 static int hammer2_strategy_read(struct vop_strategy_args
*ap
);
90 static int hammer2_strategy_write(struct vop_strategy_args
*ap
);
91 static void hammer2_strategy_read_completion(hammer2_chain_t
*focus
,
92 const char *data
, struct bio
*bio
);
94 static hammer2_off_t
hammer2_dedup_lookup(hammer2_dev_t
*hmp
,
95 char **datap
, int pblksize
);
98 hammer2_vop_strategy(struct vop_strategy_args
*ap
)
109 error
= hammer2_strategy_read(ap
);
110 ++hammer2_iod_file_read
;
113 error
= hammer2_strategy_write(ap
);
114 ++hammer2_iod_file_write
;
117 bp
->b_error
= error
= EINVAL
;
118 bp
->b_flags
|= B_ERROR
;
126 * Return the largest contiguous physical disk range for the logical
129 * (struct vnode *vp, off_t loffset, off_t *doffsetp, int *runp, int *runb)
131 * Basically disabled, the logical buffer write thread has to deal with
132 * buffers one-at-a-time. Note that this should not prevent cluster_read()
133 * from reading-ahead, it simply prevents it from trying form a single
134 * cluster buffer for the logical request. H2 already uses 64KB buffers!
137 hammer2_vop_bmap(struct vop_bmap_args
*ap
)
139 *ap
->a_doffsetp
= NOOFFSET
;
147 /****************************************************************************
149 ****************************************************************************/
151 * Callback used in read path in case that a block is compressed with LZ4.
155 hammer2_decompress_LZ4_callback(const char *data
, u_int bytes
, struct bio
*bio
)
158 char *compressed_buffer
;
165 if bio
->bio_caller_info2
.index
&&
166 bio
->bio_caller_info1
.uvalue32
!=
167 crc32(bp
->b_data
, bp
->b_bufsize
) --- return error
170 KKASSERT(bp
->b_bufsize
<= HAMMER2_PBUFSIZE
);
171 compressed_size
= *(const int *)data
;
172 KKASSERT((uint32_t)compressed_size
<= bytes
- sizeof(int));
174 compressed_buffer
= objcache_get(cache_buffer_read
, M_INTWAIT
);
175 result
= LZ4_decompress_safe(__DECONST(char *, &data
[sizeof(int)]),
180 kprintf("READ PATH: Error during decompression."
182 (intmax_t)bio
->bio_offset
, bytes
);
183 /* make sure it isn't random garbage */
184 bzero(compressed_buffer
, bp
->b_bufsize
);
186 KKASSERT(result
<= bp
->b_bufsize
);
187 bcopy(compressed_buffer
, bp
->b_data
, bp
->b_bufsize
);
188 if (result
< bp
->b_bufsize
)
189 bzero(bp
->b_data
+ result
, bp
->b_bufsize
- result
);
190 objcache_put(cache_buffer_read
, compressed_buffer
);
192 bp
->b_flags
|= B_AGE
;
196 * Callback used in read path in case that a block is compressed with ZLIB.
197 * It is almost identical to LZ4 callback, so in theory they can be unified,
198 * but we didn't want to make changes in bio structure for that.
202 hammer2_decompress_ZLIB_callback(const char *data
, u_int bytes
, struct bio
*bio
)
205 char *compressed_buffer
;
206 z_stream strm_decompress
;
212 KKASSERT(bp
->b_bufsize
<= HAMMER2_PBUFSIZE
);
213 strm_decompress
.avail_in
= 0;
214 strm_decompress
.next_in
= Z_NULL
;
216 ret
= inflateInit(&strm_decompress
);
219 kprintf("HAMMER2 ZLIB: Fatal error in inflateInit.\n");
221 compressed_buffer
= objcache_get(cache_buffer_read
, M_INTWAIT
);
222 strm_decompress
.next_in
= __DECONST(char *, data
);
224 /* XXX supply proper size, subset of device bp */
225 strm_decompress
.avail_in
= bytes
;
226 strm_decompress
.next_out
= compressed_buffer
;
227 strm_decompress
.avail_out
= bp
->b_bufsize
;
229 ret
= inflate(&strm_decompress
, Z_FINISH
);
230 if (ret
!= Z_STREAM_END
) {
231 kprintf("HAMMER2 ZLIB: Fatar error during decompression.\n");
232 bzero(compressed_buffer
, bp
->b_bufsize
);
234 bcopy(compressed_buffer
, bp
->b_data
, bp
->b_bufsize
);
235 result
= bp
->b_bufsize
- strm_decompress
.avail_out
;
236 if (result
< bp
->b_bufsize
)
237 bzero(bp
->b_data
+ result
, strm_decompress
.avail_out
);
238 objcache_put(cache_buffer_read
, compressed_buffer
);
239 ret
= inflateEnd(&strm_decompress
);
242 bp
->b_flags
|= B_AGE
;
246 * Logical buffer I/O, async read.
250 hammer2_strategy_read(struct vop_strategy_args
*ap
)
252 hammer2_xop_strategy_t
*xop
;
262 nbio
= push_bio(bio
);
264 lbase
= bio
->bio_offset
;
265 KKASSERT(((int)lbase
& HAMMER2_PBUFMASK
) == 0);
267 xop
= hammer2_xop_alloc(ip
, HAMMER2_XOP_STRATEGY
);
271 hammer2_mtx_init(&xop
->lock
, "h2bior");
272 hammer2_xop_start(&xop
->head
, &hammer2_strategy_read_desc
);
273 /* asynchronous completion */
279 * Per-node XOP (threaded), do a synchronous lookup of the chain and
280 * its data. The frontend is asynchronous, so we are also responsible
281 * for racing to terminate the frontend.
284 hammer2_xop_strategy_read(hammer2_xop_t
*arg
, void *scratch
, int clindex
)
286 hammer2_xop_strategy_t
*xop
= &arg
->xop_strategy
;
287 hammer2_chain_t
*parent
;
288 hammer2_chain_t
*chain
;
289 hammer2_chain_t
*focus
;
290 hammer2_key_t key_dummy
;
298 * Note that we can race completion of the bio supplied by
299 * the front-end so we cannot access it until we determine
300 * that we are the ones finishing it up.
305 * This is difficult to optimize. The logical buffer might be
306 * partially dirty (contain dummy zero-fill pages), which would
307 * mess up our crc calculation if we were to try a direct read.
308 * So for now we always double-buffer through the underlying
311 * If not for the above problem we could conditionalize on
312 * (1) 64KB buffer, (2) one chain (not multi-master) and
313 * (3) !hammer2_double_buffer, and issue a direct read into the
316 parent
= hammer2_inode_chain(xop
->head
.ip1
, clindex
,
317 HAMMER2_RESOLVE_ALWAYS
|
318 HAMMER2_RESOLVE_SHARED
);
320 chain
= hammer2_chain_lookup(&parent
, &key_dummy
,
323 HAMMER2_LOOKUP_ALWAYS
|
324 HAMMER2_LOOKUP_SHARED
);
326 error
= chain
->error
;
328 error
= HAMMER2_ERROR_EIO
;
331 error
= hammer2_xop_feed(&xop
->head
, chain
, clindex
, error
);
333 hammer2_chain_unlock(chain
);
334 hammer2_chain_drop(chain
);
337 hammer2_chain_unlock(parent
);
338 hammer2_chain_drop(parent
);
340 chain
= NULL
; /* safety */
341 parent
= NULL
; /* safety */
344 * Race to finish the frontend. First-to-complete. bio is only
345 * valid if we are determined to be the ones able to complete
350 hammer2_mtx_ex(&xop
->lock
);
352 hammer2_mtx_unlock(&xop
->lock
);
360 * Async operation has not completed and we now own the lock.
361 * Determine if we can complete the operation by issuing the
362 * frontend collection non-blocking.
364 * H2 double-buffers the data, setting B_NOTMETA on the logical
365 * buffer hints to the OS that the logical buffer should not be
366 * swapcached (since the device buffer can be).
368 * Also note that even for compressed data we would rather the
369 * kernel cache/swapcache device buffers more and (decompressed)
370 * logical buffers less, since that will significantly improve
371 * the amount of end-user data that can be cached.
373 * NOTE: The chain->data for xop->head.cluster.focus will be
374 * synchronized to the current cpu by xop_collect(),
375 * but other chains in the cluster might not be.
377 error
= hammer2_xop_collect(&xop
->head
, HAMMER2_XOP_COLLECT_NOWAIT
);
382 hammer2_mtx_unlock(&xop
->lock
);
383 bp
->b_flags
|= B_NOTMETA
;
384 focus
= xop
->head
.cluster
.focus
;
385 data
= hammer2_xop_gdata(&xop
->head
)->buf
;
386 hammer2_strategy_read_completion(focus
, data
, xop
->bio
);
387 hammer2_xop_pdata(&xop
->head
);
389 hammer2_xop_retire(&xop
->head
, HAMMER2_XOPMASK_VOP
);
391 case HAMMER2_ERROR_ENOENT
:
393 hammer2_mtx_unlock(&xop
->lock
);
394 bp
->b_flags
|= B_NOTMETA
;
397 bzero(bp
->b_data
, bp
->b_bcount
);
399 hammer2_xop_retire(&xop
->head
, HAMMER2_XOPMASK_VOP
);
401 case HAMMER2_ERROR_EINPROGRESS
:
402 hammer2_mtx_unlock(&xop
->lock
);
405 kprintf("xop_strategy_read: error %08x loff=%016jx\n",
406 error
, bp
->b_loffset
);
408 hammer2_mtx_unlock(&xop
->lock
);
409 bp
->b_flags
|= B_ERROR
;
412 hammer2_xop_retire(&xop
->head
, HAMMER2_XOPMASK_VOP
);
419 hammer2_strategy_read_completion(hammer2_chain_t
*focus
, const char *data
,
422 struct buf
*bp
= bio
->bio_buf
;
424 if (focus
->bref
.type
== HAMMER2_BREF_TYPE_INODE
) {
426 * Copy from in-memory inode structure.
428 bcopy(((const hammer2_inode_data_t
*)data
)->u
.data
,
429 bp
->b_data
, HAMMER2_EMBEDDED_BYTES
);
430 bzero(bp
->b_data
+ HAMMER2_EMBEDDED_BYTES
,
431 bp
->b_bcount
- HAMMER2_EMBEDDED_BYTES
);
434 } else if (focus
->bref
.type
== HAMMER2_BREF_TYPE_DATA
) {
436 * Data is on-media, record for live dedup. Release the
437 * chain (try to free it) when done. The data is still
438 * cached by both the buffer cache in front and the
439 * block device behind us. This leaves more room in the
440 * LRU chain cache for meta-data chains which we really
443 * NOTE: Deduplication cannot be safely recorded for
444 * records without a check code.
446 hammer2_dedup_record(focus
, NULL
, data
);
447 atomic_set_int(&focus
->flags
, HAMMER2_CHAIN_RELEASE
);
450 * Decompression and copy.
452 switch (HAMMER2_DEC_COMP(focus
->bref
.methods
)) {
453 case HAMMER2_COMP_LZ4
:
454 hammer2_decompress_LZ4_callback(data
, focus
->bytes
,
456 /* b_resid set by call */
458 case HAMMER2_COMP_ZLIB
:
459 hammer2_decompress_ZLIB_callback(data
, focus
->bytes
,
461 /* b_resid set by call */
463 case HAMMER2_COMP_NONE
:
464 KKASSERT(focus
->bytes
<= bp
->b_bcount
);
465 bcopy(data
, bp
->b_data
, focus
->bytes
);
466 if (focus
->bytes
< bp
->b_bcount
) {
467 bzero(bp
->b_data
+ focus
->bytes
,
468 bp
->b_bcount
- focus
->bytes
);
474 panic("hammer2_strategy_read: "
475 "unknown compression type");
478 panic("hammer2_strategy_read: unknown bref type");
482 /****************************************************************************
484 ****************************************************************************/
487 * Functions for compression in threads,
488 * from hammer2_vnops.c
490 static void hammer2_write_file_core(char *data
, hammer2_inode_t
*ip
,
491 hammer2_chain_t
**parentp
,
492 hammer2_key_t lbase
, int ioflag
, int pblksize
,
493 hammer2_tid_t mtid
, int *errorp
);
494 static void hammer2_compress_and_write(char *data
, hammer2_inode_t
*ip
,
495 hammer2_chain_t
**parentp
,
496 hammer2_key_t lbase
, int ioflag
, int pblksize
,
497 hammer2_tid_t mtid
, int *errorp
,
498 int comp_algo
, int check_algo
);
499 static void hammer2_zero_check_and_write(char *data
, hammer2_inode_t
*ip
,
500 hammer2_chain_t
**parentp
,
501 hammer2_key_t lbase
, int ioflag
, int pblksize
,
502 hammer2_tid_t mtid
, int *errorp
,
504 static int test_block_zeros(const char *buf
, size_t bytes
);
505 static void zero_write(char *data
, hammer2_inode_t
*ip
,
506 hammer2_chain_t
**parentp
,
508 hammer2_tid_t mtid
, int *errorp
);
509 static void hammer2_write_bp(hammer2_chain_t
*chain
, char *data
,
510 int ioflag
, int pblksize
,
511 hammer2_tid_t mtid
, int *errorp
,
515 hammer2_strategy_write(struct vop_strategy_args
*ap
)
517 hammer2_xop_strategy_t
*xop
;
528 atomic_set_int(&ip
->flags
, HAMMER2_INODE_DIRTYDATA
);
529 hammer2_lwinprog_ref(pmp
);
530 hammer2_trans_assert_strategy(pmp
);
531 hammer2_trans_init(pmp
, HAMMER2_TRANS_BUFCACHE
);
533 xop
= hammer2_xop_alloc(ip
, HAMMER2_XOP_MODIFYING
|
534 HAMMER2_XOP_STRATEGY
);
537 xop
->lbase
= bio
->bio_offset
;
538 hammer2_mtx_init(&xop
->lock
, "h2biow");
539 hammer2_xop_start(&xop
->head
, &hammer2_strategy_write_desc
);
540 /* asynchronous completion */
542 hammer2_lwinprog_wait(pmp
, hammer2_flush_pipe
);
548 * Per-node XOP (threaded). Write the logical buffer to the media.
550 * This is a bit problematic because there may be multiple target and
551 * any of them may be able to release the bp. In addition, if our
552 * particulr target is offline we don't want to block the bp (and thus
553 * the frontend). To accomplish this we copy the data to the per-thr
557 hammer2_xop_strategy_write(hammer2_xop_t
*arg
, void *scratch
, int clindex
)
559 hammer2_xop_strategy_t
*xop
= &arg
->xop_strategy
;
560 hammer2_chain_t
*parent
;
568 hammer2_off_t bio_offset
;
572 * We can only access the bp/bio if the frontend has not yet
577 hammer2_mtx_sh(&xop
->lock
);
579 hammer2_mtx_unlock(&xop
->lock
);
584 bio
= xop
->bio
; /* ephermal */
585 bp
= bio
->bio_buf
; /* ephermal */
586 ip
= xop
->head
.ip1
; /* retained by ref */
587 bio_offset
= bio
->bio_offset
;
590 /* hammer2_trans_init(parent->hmp->spmp, HAMMER2_TRANS_BUFCACHE); */
592 lblksize
= hammer2_calc_logical(ip
, bio
->bio_offset
, &lbase
, NULL
);
593 pblksize
= hammer2_calc_physical(ip
, lbase
);
595 KKASSERT(lblksize
<= MAXPHYS
);
596 bcopy(bp
->b_data
, bio_data
, lblksize
);
598 hammer2_mtx_unlock(&xop
->lock
);
599 bp
= NULL
; /* safety, illegal to access after unlock */
600 bio
= NULL
; /* safety, illegal to access after unlock */
605 parent
= hammer2_inode_chain(ip
, clindex
, HAMMER2_RESOLVE_ALWAYS
);
606 hammer2_write_file_core(bio_data
, ip
, &parent
,
607 lbase
, IO_ASYNC
, pblksize
,
608 xop
->head
.mtid
, &error
);
610 hammer2_chain_unlock(parent
);
611 hammer2_chain_drop(parent
);
612 parent
= NULL
; /* safety */
614 hammer2_xop_feed(&xop
->head
, NULL
, clindex
, error
);
617 * Try to complete the operation on behalf of the front-end.
621 hammer2_mtx_ex(&xop
->lock
);
623 hammer2_mtx_unlock(&xop
->lock
);
628 * Async operation has not completed and we now own the lock.
629 * Determine if we can complete the operation by issuing the
630 * frontend collection non-blocking.
632 * H2 double-buffers the data, setting B_NOTMETA on the logical
633 * buffer hints to the OS that the logical buffer should not be
634 * swapcached (since the device buffer can be).
636 error
= hammer2_xop_collect(&xop
->head
, HAMMER2_XOP_COLLECT_NOWAIT
);
638 if (error
== HAMMER2_ERROR_EINPROGRESS
) {
639 hammer2_mtx_unlock(&xop
->lock
);
644 * Async operation has completed.
647 hammer2_mtx_unlock(&xop
->lock
);
649 bio
= xop
->bio
; /* now owned by us */
650 bp
= bio
->bio_buf
; /* now owned by us */
652 if (error
== HAMMER2_ERROR_ENOENT
|| error
== 0) {
653 bp
->b_flags
|= B_NOTMETA
;
658 kprintf("xop_strategy_write: error %d loff=%016jx\n",
659 error
, bp
->b_loffset
);
660 bp
->b_flags
|= B_ERROR
;
664 hammer2_xop_retire(&xop
->head
, HAMMER2_XOPMASK_VOP
);
665 hammer2_trans_assert_strategy(ip
->pmp
);
666 hammer2_lwinprog_drop(ip
->pmp
);
667 hammer2_trans_done(ip
->pmp
, 0);
671 * Wait for pending I/O to complete
674 hammer2_bioq_sync(hammer2_pfs_t
*pmp
)
676 hammer2_lwinprog_wait(pmp
, 0);
680 * Assign physical storage at (cparent, lbase), returning a suitable chain
681 * and setting *errorp appropriately.
683 * If no error occurs, the returned chain will be in a modified state.
685 * If an error occurs, the returned chain may or may not be NULL. If
686 * not-null any chain->error (if not 0) will also be rolled up into *errorp.
687 * So the caller only needs to test *errorp.
689 * cparent can wind up being anything.
691 * If datap is not NULL, *datap points to the real data we intend to write.
692 * If we can dedup the storage location we set *datap to NULL to indicate
693 * to the caller that a dedup occurred.
695 * NOTE: Special case for data embedded in inode.
699 hammer2_assign_physical(hammer2_inode_t
*ip
, hammer2_chain_t
**parentp
,
700 hammer2_key_t lbase
, int pblksize
,
701 hammer2_tid_t mtid
, char **datap
, int *errorp
)
703 hammer2_chain_t
*chain
;
704 hammer2_key_t key_dummy
;
705 hammer2_off_t dedup_off
;
706 int pradix
= hammer2_getradix(pblksize
);
709 * Locate the chain associated with lbase, return a locked chain.
710 * However, do not instantiate any data reference (which utilizes a
711 * device buffer) because we will be using direct IO via the
712 * logical buffer cache buffer.
714 KKASSERT(pblksize
>= HAMMER2_ALLOC_MIN
);
716 chain
= hammer2_chain_lookup(parentp
, &key_dummy
,
719 HAMMER2_LOOKUP_NODATA
);
722 * The lookup code should not return a DELETED chain to us, unless
723 * its a short-file embedded in the inode. Then it is possible for
724 * the lookup to return a deleted inode.
726 if (chain
&& (chain
->flags
& HAMMER2_CHAIN_DELETED
) &&
727 chain
->bref
.type
!= HAMMER2_BREF_TYPE_INODE
) {
728 kprintf("assign physical deleted chain @ "
729 "%016jx (%016jx.%02x) ip %016jx\n",
730 lbase
, chain
->bref
.data_off
, chain
->bref
.type
,
737 * We found a hole, create a new chain entry.
739 * NOTE: DATA chains are created without device backing
740 * store (nor do we want any).
742 dedup_off
= hammer2_dedup_lookup((*parentp
)->hmp
, datap
,
744 *errorp
|= hammer2_chain_create(parentp
, &chain
,
746 HAMMER2_ENC_CHECK(ip
->meta
.check_algo
) |
747 HAMMER2_ENC_COMP(HAMMER2_COMP_NONE
),
748 lbase
, HAMMER2_PBUFRADIX
,
749 HAMMER2_BREF_TYPE_DATA
,
754 /*ip->delta_dcount += pblksize;*/
755 } else if (chain
->error
== 0) {
756 switch (chain
->bref
.type
) {
757 case HAMMER2_BREF_TYPE_INODE
:
759 * The data is embedded in the inode, which requires
762 *errorp
|= hammer2_chain_modify_ip(ip
, chain
, mtid
, 0);
764 case HAMMER2_BREF_TYPE_DATA
:
765 dedup_off
= hammer2_dedup_lookup(chain
->hmp
, datap
,
767 if (chain
->bytes
!= pblksize
) {
768 *errorp
|= hammer2_chain_resize(chain
,
771 HAMMER2_MODIFY_OPTDATA
);
777 * DATA buffers must be marked modified whether the
778 * data is in a logical buffer or not. We also have
779 * to make this call to fixup the chain data pointers
780 * after resizing in case this is an encrypted or
783 *errorp
|= hammer2_chain_modify(chain
, mtid
, dedup_off
,
784 HAMMER2_MODIFY_OPTDATA
);
787 panic("hammer2_assign_physical: bad type");
792 *errorp
= chain
->error
;
794 atomic_set_int(&ip
->flags
, HAMMER2_INODE_DIRTYDATA
);
800 * hammer2_write_file_core() - hammer2_write_thread() helper
802 * The core write function which determines which path to take
803 * depending on compression settings. We also have to locate the
804 * related chains so we can calculate and set the check data for
809 hammer2_write_file_core(char *data
, hammer2_inode_t
*ip
,
810 hammer2_chain_t
**parentp
,
811 hammer2_key_t lbase
, int ioflag
, int pblksize
,
812 hammer2_tid_t mtid
, int *errorp
)
814 hammer2_chain_t
*chain
;
819 switch(HAMMER2_DEC_ALGO(ip
->meta
.comp_algo
)) {
820 case HAMMER2_COMP_NONE
:
822 * We have to assign physical storage to the buffer
823 * we intend to dirty or write now to avoid deadlocks
824 * in the strategy code later.
826 * This can return NOOFFSET for inode-embedded data.
827 * The strategy code will take care of it in that case.
830 chain
= hammer2_assign_physical(ip
, parentp
, lbase
, pblksize
,
831 mtid
, &bdata
, errorp
);
833 /* skip modifications */
834 } else if (chain
->bref
.type
== HAMMER2_BREF_TYPE_INODE
) {
835 hammer2_inode_data_t
*wipdata
;
837 wipdata
= &chain
->data
->ipdata
;
838 KKASSERT(wipdata
->meta
.op_flags
&
839 HAMMER2_OPFLAG_DIRECTDATA
);
840 bcopy(data
, wipdata
->u
.data
, HAMMER2_EMBEDDED_BYTES
);
841 ++hammer2_iod_file_wembed
;
842 } else if (bdata
== NULL
) {
844 * Copy of data already present on-media.
846 chain
->bref
.methods
=
847 HAMMER2_ENC_COMP(HAMMER2_COMP_NONE
) +
848 HAMMER2_ENC_CHECK(ip
->meta
.check_algo
);
849 hammer2_chain_setcheck(chain
, data
);
851 hammer2_write_bp(chain
, data
, ioflag
, pblksize
,
852 mtid
, errorp
, ip
->meta
.check_algo
);
855 hammer2_chain_unlock(chain
);
856 hammer2_chain_drop(chain
);
859 case HAMMER2_COMP_AUTOZERO
:
861 * Check for zero-fill only
863 hammer2_zero_check_and_write(data
, ip
, parentp
,
864 lbase
, ioflag
, pblksize
,
866 ip
->meta
.check_algo
);
868 case HAMMER2_COMP_LZ4
:
869 case HAMMER2_COMP_ZLIB
:
872 * Check for zero-fill and attempt compression.
874 hammer2_compress_and_write(data
, ip
, parentp
,
875 lbase
, ioflag
, pblksize
,
878 ip
->meta
.check_algo
);
886 * Generic function that will perform the compression in compression
887 * write path. The compression algorithm is determined by the settings
888 * obtained from inode.
892 hammer2_compress_and_write(char *data
, hammer2_inode_t
*ip
,
893 hammer2_chain_t
**parentp
,
894 hammer2_key_t lbase
, int ioflag
, int pblksize
,
895 hammer2_tid_t mtid
, int *errorp
, int comp_algo
, int check_algo
)
897 hammer2_chain_t
*chain
;
904 * An all-zeros write creates a hole unless the check code
905 * is disabled. When the check code is disabled all writes
906 * are done in-place, including any all-zeros writes.
908 * NOTE: A snapshot will still force a copy-on-write
909 * (see the HAMMER2_CHECK_NONE in hammer2_chain.c).
911 if (check_algo
!= HAMMER2_CHECK_NONE
&&
912 test_block_zeros(data
, pblksize
)) {
913 zero_write(data
, ip
, parentp
, lbase
, mtid
, errorp
);
918 * Compression requested. Try to compress the block. We store
919 * the data normally if we cannot sufficiently compress it.
921 * We have a heuristic to detect files which are mostly
922 * uncompressable and avoid the compression attempt in that
923 * case. If the compression heuristic is turned off, we always
929 KKASSERT(pblksize
/ 2 <= 32768);
931 if (ip
->comp_heuristic
< 8 || (ip
->comp_heuristic
& 7) == 0 ||
932 hammer2_always_compress
) {
933 z_stream strm_compress
;
937 switch(HAMMER2_DEC_ALGO(comp_algo
)) {
938 case HAMMER2_COMP_LZ4
:
940 * We need to prefix with the size, LZ4
941 * doesn't do it for us. Add the related
944 * NOTE: The LZ4 code seems to assume at least an
945 * 8-byte buffer size granularity and may
946 * overrun the buffer if given a 4-byte
949 comp_buffer
= objcache_get(cache_buffer_write
,
951 comp_size
= LZ4_compress_limitedOutput(
953 &comp_buffer
[sizeof(int)],
955 pblksize
/ 2 - sizeof(int64_t));
956 *(int *)comp_buffer
= comp_size
;
958 comp_size
+= sizeof(int);
960 case HAMMER2_COMP_ZLIB
:
961 comp_level
= HAMMER2_DEC_LEVEL(comp_algo
);
963 comp_level
= 6; /* default zlib compression */
964 else if (comp_level
< 6)
966 else if (comp_level
> 9)
968 ret
= deflateInit(&strm_compress
, comp_level
);
970 kprintf("HAMMER2 ZLIB: fatal error "
971 "on deflateInit.\n");
974 comp_buffer
= objcache_get(cache_buffer_write
,
976 strm_compress
.next_in
= data
;
977 strm_compress
.avail_in
= pblksize
;
978 strm_compress
.next_out
= comp_buffer
;
979 strm_compress
.avail_out
= pblksize
/ 2;
980 ret
= deflate(&strm_compress
, Z_FINISH
);
981 if (ret
== Z_STREAM_END
) {
982 comp_size
= pblksize
/ 2 -
983 strm_compress
.avail_out
;
987 ret
= deflateEnd(&strm_compress
);
990 kprintf("Error: Unknown compression method.\n");
991 kprintf("Comp_method = %d.\n", comp_algo
);
996 if (comp_size
== 0) {
998 * compression failed or turned off
1000 comp_block_size
= pblksize
; /* safety */
1001 if (++ip
->comp_heuristic
> 128)
1002 ip
->comp_heuristic
= 8;
1005 * compression succeeded
1007 ip
->comp_heuristic
= 0;
1008 if (comp_size
<= 1024) {
1009 comp_block_size
= 1024;
1010 } else if (comp_size
<= 2048) {
1011 comp_block_size
= 2048;
1012 } else if (comp_size
<= 4096) {
1013 comp_block_size
= 4096;
1014 } else if (comp_size
<= 8192) {
1015 comp_block_size
= 8192;
1016 } else if (comp_size
<= 16384) {
1017 comp_block_size
= 16384;
1018 } else if (comp_size
<= 32768) {
1019 comp_block_size
= 32768;
1021 panic("hammer2: WRITE PATH: "
1022 "Weird comp_size value.");
1024 comp_block_size
= pblksize
;
1028 * Must zero the remainder or dedup (which operates on a
1029 * physical block basis) will not find matches.
1031 if (comp_size
< comp_block_size
) {
1032 bzero(comp_buffer
+ comp_size
,
1033 comp_block_size
- comp_size
);
1038 * Assign physical storage, data will be set to NULL if a live-dedup
1041 bdata
= comp_size
? comp_buffer
: data
;
1042 chain
= hammer2_assign_physical(ip
, parentp
, lbase
, comp_block_size
,
1043 mtid
, &bdata
, errorp
);
1049 if (chain
->bref
.type
== HAMMER2_BREF_TYPE_INODE
) {
1050 hammer2_inode_data_t
*wipdata
;
1052 *errorp
= hammer2_chain_modify_ip(ip
, chain
, mtid
, 0);
1054 wipdata
= &chain
->data
->ipdata
;
1055 KKASSERT(wipdata
->meta
.op_flags
&
1056 HAMMER2_OPFLAG_DIRECTDATA
);
1057 bcopy(data
, wipdata
->u
.data
, HAMMER2_EMBEDDED_BYTES
);
1058 ++hammer2_iod_file_wembed
;
1060 } else if (bdata
== NULL
) {
1062 * Live deduplication, a copy of the data is already present
1066 chain
->bref
.methods
=
1067 HAMMER2_ENC_COMP(comp_algo
) +
1068 HAMMER2_ENC_CHECK(check_algo
);
1070 chain
->bref
.methods
=
1072 HAMMER2_COMP_NONE
) +
1073 HAMMER2_ENC_CHECK(check_algo
);
1075 bdata
= comp_size
? comp_buffer
: data
;
1076 hammer2_chain_setcheck(chain
, bdata
);
1077 atomic_clear_int(&chain
->flags
, HAMMER2_CHAIN_INITIAL
);
1081 KKASSERT(chain
->flags
& HAMMER2_CHAIN_MODIFIED
);
1083 switch(chain
->bref
.type
) {
1084 case HAMMER2_BREF_TYPE_INODE
:
1085 panic("hammer2_write_bp: unexpected inode\n");
1087 case HAMMER2_BREF_TYPE_DATA
:
1089 * Optimize out the read-before-write
1092 *errorp
= hammer2_io_newnz(chain
->hmp
,
1094 chain
->bref
.data_off
,
1098 hammer2_io_brelse(&dio
);
1099 kprintf("hammer2: WRITE PATH: "
1100 "dbp bread error\n");
1103 bdata
= hammer2_io_data(dio
, chain
->bref
.data_off
);
1106 * When loading the block make sure we don't
1107 * leave garbage after the compressed data.
1110 chain
->bref
.methods
=
1111 HAMMER2_ENC_COMP(comp_algo
) +
1112 HAMMER2_ENC_CHECK(check_algo
);
1113 bcopy(comp_buffer
, bdata
, comp_size
);
1115 chain
->bref
.methods
=
1117 HAMMER2_COMP_NONE
) +
1118 HAMMER2_ENC_CHECK(check_algo
);
1119 bcopy(data
, bdata
, pblksize
);
1123 * The flush code doesn't calculate check codes for
1124 * file data (doing so can result in excessive I/O),
1127 hammer2_chain_setcheck(chain
, bdata
);
1130 * Device buffer is now valid, chain is no longer in
1131 * the initial state.
1133 * (No blockref table worries with file data)
1135 atomic_clear_int(&chain
->flags
, HAMMER2_CHAIN_INITIAL
);
1136 hammer2_dedup_record(chain
, dio
, bdata
);
1138 /* Now write the related bdp. */
1139 if (ioflag
& IO_SYNC
) {
1141 * Synchronous I/O requested.
1143 hammer2_io_bwrite(&dio
);
1145 } else if ((ioflag & IO_DIRECT) &&
1146 loff + n == pblksize) {
1147 hammer2_io_bdwrite(&dio);
1149 } else if (ioflag
& IO_ASYNC
) {
1150 hammer2_io_bawrite(&dio
);
1152 hammer2_io_bdwrite(&dio
);
1156 panic("hammer2_write_bp: bad chain type %d\n",
1164 hammer2_chain_unlock(chain
);
1165 hammer2_chain_drop(chain
);
1168 objcache_put(cache_buffer_write
, comp_buffer
);
1174 * Function that performs zero-checking and writing without compression,
1175 * it corresponds to default zero-checking path.
1179 hammer2_zero_check_and_write(char *data
, hammer2_inode_t
*ip
,
1180 hammer2_chain_t
**parentp
,
1181 hammer2_key_t lbase
, int ioflag
, int pblksize
,
1182 hammer2_tid_t mtid
, int *errorp
,
1185 hammer2_chain_t
*chain
;
1188 if (check_algo
!= HAMMER2_CHECK_NONE
&&
1189 test_block_zeros(data
, pblksize
)) {
1191 * An all-zeros write creates a hole unless the check code
1192 * is disabled. When the check code is disabled all writes
1193 * are done in-place, including any all-zeros writes.
1195 * NOTE: A snapshot will still force a copy-on-write
1196 * (see the HAMMER2_CHECK_NONE in hammer2_chain.c).
1198 zero_write(data
, ip
, parentp
, lbase
, mtid
, errorp
);
1204 chain
= hammer2_assign_physical(ip
, parentp
, lbase
, pblksize
,
1205 mtid
, &bdata
, errorp
);
1209 hammer2_write_bp(chain
, data
, ioflag
, pblksize
,
1210 mtid
, errorp
, check_algo
);
1212 /* dedup occurred */
1213 chain
->bref
.methods
=
1214 HAMMER2_ENC_COMP(HAMMER2_COMP_NONE
) +
1215 HAMMER2_ENC_CHECK(check_algo
);
1216 hammer2_chain_setcheck(chain
, data
);
1219 hammer2_chain_unlock(chain
);
1220 hammer2_chain_drop(chain
);
1228 * A function to test whether a block of data contains only zeros,
1229 * returns TRUE (non-zero) if the block is all zeros.
1233 test_block_zeros(const char *buf
, size_t bytes
)
1237 for (i
= 0; i
< bytes
; i
+= sizeof(long)) {
1238 if (*(const long *)(buf
+ i
) != 0)
1247 * Function to "write" a block that contains only zeros.
1251 zero_write(char *data
, hammer2_inode_t
*ip
,
1252 hammer2_chain_t
**parentp
,
1253 hammer2_key_t lbase
, hammer2_tid_t mtid
, int *errorp
)
1255 hammer2_chain_t
*chain
;
1256 hammer2_key_t key_dummy
;
1258 chain
= hammer2_chain_lookup(parentp
, &key_dummy
,
1261 HAMMER2_LOOKUP_NODATA
);
1263 if (chain
->bref
.type
== HAMMER2_BREF_TYPE_INODE
) {
1264 hammer2_inode_data_t
*wipdata
;
1267 *errorp
= hammer2_chain_modify_ip(ip
, chain
,
1271 wipdata
= &chain
->data
->ipdata
;
1272 KKASSERT(wipdata
->meta
.op_flags
&
1273 HAMMER2_OPFLAG_DIRECTDATA
);
1274 bzero(wipdata
->u
.data
, HAMMER2_EMBEDDED_BYTES
);
1275 ++hammer2_iod_file_wembed
;
1278 /* chain->error ok for deletion */
1279 hammer2_chain_delete(*parentp
, chain
,
1280 mtid
, HAMMER2_DELETE_PERMANENT
);
1281 ++hammer2_iod_file_wzero
;
1283 atomic_set_int(&ip
->flags
, HAMMER2_INODE_DIRTYDATA
);
1284 hammer2_chain_unlock(chain
);
1285 hammer2_chain_drop(chain
);
1287 ++hammer2_iod_file_wzero
;
1294 * Function to write the data as it is, without performing any sort of
1295 * compression. This function is used in path without compression and
1296 * default zero-checking path.
1300 hammer2_write_bp(hammer2_chain_t
*chain
, char *data
, int ioflag
,
1302 hammer2_tid_t mtid
, int *errorp
, int check_algo
)
1304 hammer2_inode_data_t
*wipdata
;
1309 error
= 0; /* XXX TODO below */
1311 KKASSERT(chain
->flags
& HAMMER2_CHAIN_MODIFIED
);
1313 switch(chain
->bref
.type
) {
1314 case HAMMER2_BREF_TYPE_INODE
:
1315 wipdata
= &chain
->data
->ipdata
;
1316 KKASSERT(wipdata
->meta
.op_flags
& HAMMER2_OPFLAG_DIRECTDATA
);
1317 bcopy(data
, wipdata
->u
.data
, HAMMER2_EMBEDDED_BYTES
);
1319 ++hammer2_iod_file_wembed
;
1321 case HAMMER2_BREF_TYPE_DATA
:
1322 error
= hammer2_io_newnz(chain
->hmp
,
1324 chain
->bref
.data_off
,
1325 chain
->bytes
, &dio
);
1327 hammer2_io_bqrelse(&dio
);
1328 kprintf("hammer2: WRITE PATH: "
1329 "dbp bread error\n");
1332 bdata
= hammer2_io_data(dio
, chain
->bref
.data_off
);
1334 chain
->bref
.methods
= HAMMER2_ENC_COMP(HAMMER2_COMP_NONE
) +
1335 HAMMER2_ENC_CHECK(check_algo
);
1336 bcopy(data
, bdata
, chain
->bytes
);
1339 * The flush code doesn't calculate check codes for
1340 * file data (doing so can result in excessive I/O),
1343 hammer2_chain_setcheck(chain
, bdata
);
1346 * Device buffer is now valid, chain is no longer in
1347 * the initial state.
1349 * (No blockref table worries with file data)
1351 atomic_clear_int(&chain
->flags
, HAMMER2_CHAIN_INITIAL
);
1352 hammer2_dedup_record(chain
, dio
, bdata
);
1354 if (ioflag
& IO_SYNC
) {
1356 * Synchronous I/O requested.
1358 hammer2_io_bwrite(&dio
);
1360 } else if ((ioflag & IO_DIRECT) &&
1361 loff + n == pblksize) {
1362 hammer2_io_bdwrite(&dio);
1364 } else if (ioflag
& IO_ASYNC
) {
1365 hammer2_io_bawrite(&dio
);
1367 hammer2_io_bdwrite(&dio
);
1371 panic("hammer2_write_bp: bad chain type %d\n",
1381 * LIVE DEDUP HEURISTICS
1383 * Record media and crc information for possible dedup operation. Note
1384 * that the dedup mask bits must also be set in the related DIO for a dedup
1385 * to be fully validated (which is handled in the freemap allocation code).
1387 * WARNING! This code is SMP safe but the heuristic allows SMP collisions.
1388 * All fields must be loaded into locals and validated.
1390 * WARNING! Should only be used for file data and directory entries,
1391 * hammer2_chain_modify() only checks for the dedup case on data
1392 * chains. Also, dedup data can only be recorded for committed
1393 * chains (so NOT strategy writes which can undergo further
1394 * modification after the fact!).
1397 hammer2_dedup_record(hammer2_chain_t
*chain
, hammer2_io_t
*dio
,
1401 hammer2_dedup_t
*dedup
;
1409 * We can only record a dedup if we have media data to test against.
1410 * If dedup is not enabled, return early, which allows a chain to
1411 * remain marked MODIFIED (which might have benefits in special
1412 * situations, though typically it does not).
1414 if (hammer2_dedup_enable
== 0)
1424 switch(HAMMER2_DEC_CHECK(chain
->bref
.methods
)) {
1425 case HAMMER2_CHECK_ISCSI32
:
1427 * XXX use the built-in crc (the dedup lookup sequencing
1428 * needs to be fixed so the check code is already present
1429 * when dedup_lookup is called)
1432 crc
= (uint64_t)(uint32_t)chain
->bref
.check
.iscsi32
.value
;
1434 crc
= XXH64(data
, chain
->bytes
, XXH_HAMMER2_SEED
);
1436 case HAMMER2_CHECK_XXHASH64
:
1437 crc
= chain
->bref
.check
.xxhash64
.value
;
1439 case HAMMER2_CHECK_SHA192
:
1441 * XXX use the built-in crc (the dedup lookup sequencing
1442 * needs to be fixed so the check code is already present
1443 * when dedup_lookup is called)
1446 crc
= ((uint64_t *)chain
->bref
.check
.sha192
.data
)[0] ^
1447 ((uint64_t *)chain
->bref
.check
.sha192
.data
)[1] ^
1448 ((uint64_t *)chain
->bref
.check
.sha192
.data
)[2];
1450 crc
= XXH64(data
, chain
->bytes
, XXH_HAMMER2_SEED
);
1454 * Cannot dedup without a check code
1456 * NOTE: In particular, CHECK_NONE allows a sector to be
1457 * overwritten without copy-on-write, recording
1458 * a dedup block for a CHECK_NONE object would be
1464 atomic_set_int(&chain
->flags
, HAMMER2_CHAIN_DEDUPABLE
);
1466 dedup
= &hmp
->heur_dedup
[crc
& (HAMMER2_DEDUP_HEUR_MASK
& ~3)];
1467 for (i
= 0; i
< 4; ++i
) {
1468 if (dedup
[i
].data_crc
== crc
) {
1472 dticks
= (int)(dedup
[i
].ticks
- dedup
[best
].ticks
);
1473 if (dticks
< 0 || dticks
> hz
* 60 * 30)
1477 if (hammer2_debug
& 0x40000) {
1478 kprintf("REC %04x %016jx %016jx\n",
1479 (int)(dedup
- hmp
->heur_dedup
),
1481 chain
->bref
.data_off
);
1483 dedup
->ticks
= ticks
;
1484 dedup
->data_off
= chain
->bref
.data_off
;
1485 dedup
->data_crc
= crc
;
1488 * Set the valid bits for the dedup only after we know the data
1489 * buffer has been updated. The alloc bits were set (and the valid
1490 * bits cleared) when the media was allocated.
1492 * This is done in two stages becuase the bulkfree code can race
1493 * the gap between allocation and data population. Both masks must
1494 * be set before a bcmp/dedup operation is able to use the block.
1496 mask
= hammer2_dedup_mask(dio
, chain
->bref
.data_off
, chain
->bytes
);
1497 atomic_set_64(&dio
->dedup_valid
, mask
);
1501 * XXX removed. MODIFIED is an integral part of the flush code,
1502 * lets not just clear it
1505 * Once we record the dedup the chain must be marked clean to
1506 * prevent reuse of the underlying block. Remember that this
1507 * write occurs when the buffer cache is flushed (i.e. on sync(),
1508 * fsync(), filesystem periodic sync, or when the kernel needs to
1509 * flush a buffer), and not whenever the user write()s.
1511 if (chain
->flags
& HAMMER2_CHAIN_MODIFIED
) {
1512 atomic_clear_int(&chain
->flags
, HAMMER2_CHAIN_MODIFIED
);
1513 atomic_add_long(&hammer2_count_modified_chains
, -1);
1515 hammer2_pfs_memory_wakeup(chain
->pmp
);
1522 hammer2_dedup_lookup(hammer2_dev_t
*hmp
, char **datap
, int pblksize
)
1524 hammer2_dedup_t
*dedup
;
1533 if (hammer2_dedup_enable
== 0)
1540 * XXX use the built-in crc (the dedup lookup sequencing
1541 * needs to be fixed so the check code is already present
1542 * when dedup_lookup is called)
1544 crc
= XXH64(data
, pblksize
, XXH_HAMMER2_SEED
);
1545 dedup
= &hmp
->heur_dedup
[crc
& (HAMMER2_DEDUP_HEUR_MASK
& ~3)];
1547 if (hammer2_debug
& 0x40000) {
1548 kprintf("LOC %04x/4 %016jx\n",
1549 (int)(dedup
- hmp
->heur_dedup
),
1553 for (i
= 0; i
< 4; ++i
) {
1554 off
= dedup
[i
].data_off
;
1556 if (dedup
[i
].data_crc
!= crc
)
1558 if ((1 << (int)(off
& HAMMER2_OFF_MASK_RADIX
)) != pblksize
)
1560 dio
= hammer2_io_getquick(hmp
, off
, pblksize
);
1562 dtmp
= hammer2_io_data(dio
, off
),
1563 mask
= hammer2_dedup_mask(dio
, off
, pblksize
);
1564 if ((dio
->dedup_alloc
& mask
) == mask
&&
1565 (dio
->dedup_valid
& mask
) == mask
&&
1566 bcmp(data
, dtmp
, pblksize
) == 0) {
1567 if (hammer2_debug
& 0x40000) {
1568 kprintf("DEDUP SUCCESS %016jx\n",
1571 hammer2_io_putblk(&dio
);
1573 dedup
[i
].ticks
= ticks
; /* update use */
1574 atomic_add_long(&hammer2_iod_file_wdedup
,
1577 return off
; /* RETURN */
1579 hammer2_io_putblk(&dio
);
1586 * Poof. Races are ok, if someone gets in and reuses a dedup offset
1587 * before or while we are clearing it they will also recover the freemap
1588 * entry (set it to fully allocated), so a bulkfree race can only set it
1589 * to a possibly-free state.
1591 * XXX ok, well, not really sure races are ok but going to run with it
1595 hammer2_dedup_clear(hammer2_dev_t
*hmp
)
1599 for (i
= 0; i
< HAMMER2_DEDUP_HEUR_SIZE
; ++i
) {
1600 hmp
->heur_dedup
[i
].data_off
= 0;
1601 hmp
->heur_dedup
[i
].ticks
= ticks
- 1;