sys/vfs/hammer2: Fix double count of hammer2_iod_file_{read,write}
[dragonfly.git] / sys / vfs / hammer2 / hammer2_strategy.c
blob9f723d871ac8a580f20cea7716ac70b2f9287f35
1 /*
2 * Copyright (c) 2011-2018 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@dragonflybsd.org>
6 * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org>
7 * by Daniel Flores (GSOC 2013 - mentored by Matthew Dillon, compression)
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in
17 * the documentation and/or other materials provided with the
18 * distribution.
19 * 3. Neither the name of The DragonFly Project nor the names of its
20 * contributors may be used to endorse or promote products derived
21 * from this software without specific, prior written permission.
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
26 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
27 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
28 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
29 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
30 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
31 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
32 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
33 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
37 * This module handles low level logical file I/O (strategy) which backs
38 * the logical buffer cache.
40 * [De]compression, zero-block, check codes, and buffer cache operations
41 * for file data is handled here.
43 * Live dedup makes its home here as well.
46 #include <sys/param.h>
47 #include <sys/systm.h>
48 #include <sys/kernel.h>
49 #include <sys/buf.h>
50 #include <sys/proc.h>
51 #include <sys/mount.h>
52 #include <sys/vnode.h>
53 #include <sys/objcache.h>
55 #include "hammer2.h"
56 #include "hammer2_lz4.h"
58 #include "zlib/hammer2_zlib.h"
60 struct objcache *cache_buffer_read;
61 struct objcache *cache_buffer_write;
64 * Strategy code (async logical file buffer I/O from system)
66 * Except for the transaction init (which should normally not block),
67 * we essentially run the strategy operation asynchronously via a XOP.
69 * WARNING! The XOP deals with buffer synchronization. It is not synchronized
70 * to the current cpu.
72 * XXX This isn't supposed to be able to deadlock against vfs_sync vfsync()
73 * calls but it has in the past when multiple flushes are queued.
75 * XXX We currently terminate the transaction once we get a quorum, otherwise
76 * the frontend can stall, but this can leave the remaining nodes with
77 * a potential flush conflict. We need to delay flushes on those nodes
78 * until running transactions complete separately from the normal
79 * transaction sequencing. FIXME TODO.
81 static int hammer2_strategy_read(struct vop_strategy_args *ap);
82 static int hammer2_strategy_write(struct vop_strategy_args *ap);
83 static void hammer2_strategy_read_completion(hammer2_chain_t *focus,
84 const char *data, struct bio *bio);
86 static hammer2_off_t hammer2_dedup_lookup(hammer2_dev_t *hmp,
87 char **datap, int pblksize);
89 int
90 hammer2_vop_strategy(struct vop_strategy_args *ap)
92 struct bio *biop;
93 struct buf *bp;
94 int error;
96 biop = ap->a_bio;
97 bp = biop->bio_buf;
99 switch(bp->b_cmd) {
100 case BUF_CMD_READ:
101 error = hammer2_strategy_read(ap);
102 break;
103 case BUF_CMD_WRITE:
104 error = hammer2_strategy_write(ap);
105 break;
106 default:
107 bp->b_error = error = EINVAL;
108 bp->b_flags |= B_ERROR;
109 biodone(biop);
110 break;
112 return (error);
116 * Return the largest contiguous physical disk range for the logical
117 * request, in bytes.
119 * (struct vnode *vp, off_t loffset, off_t *doffsetp, int *runp, int *runb)
121 * Basically disabled, the logical buffer write thread has to deal with
122 * buffers one-at-a-time. Note that this should not prevent cluster_read()
123 * from reading-ahead, it simply prevents it from trying form a single
124 * cluster buffer for the logical request. H2 already uses 64KB buffers!
127 hammer2_vop_bmap(struct vop_bmap_args *ap)
129 *ap->a_doffsetp = NOOFFSET;
130 if (ap->a_runp)
131 *ap->a_runp = 0;
132 if (ap->a_runb)
133 *ap->a_runb = 0;
134 return (EOPNOTSUPP);
137 /****************************************************************************
138 * READ SUPPORT *
139 ****************************************************************************/
141 * Callback used in read path in case that a block is compressed with LZ4.
143 static
144 void
145 hammer2_decompress_LZ4_callback(const char *data, u_int bytes, struct bio *bio)
147 struct buf *bp;
148 char *compressed_buffer;
149 int compressed_size;
150 int result;
152 bp = bio->bio_buf;
154 #if 0
155 if bio->bio_caller_info2.index &&
156 bio->bio_caller_info1.uvalue32 !=
157 crc32(bp->b_data, bp->b_bufsize) --- return error
158 #endif
160 KKASSERT(bp->b_bufsize <= HAMMER2_PBUFSIZE);
161 compressed_size = *(const int *)data;
162 KKASSERT((uint32_t)compressed_size <= bytes - sizeof(int));
164 compressed_buffer = objcache_get(cache_buffer_read, M_INTWAIT);
165 result = LZ4_decompress_safe(__DECONST(char *, &data[sizeof(int)]),
166 compressed_buffer,
167 compressed_size,
168 bp->b_bufsize);
169 if (result < 0) {
170 kprintf("READ PATH: Error during decompression."
171 "bio %016jx/%d\n",
172 (intmax_t)bio->bio_offset, bytes);
173 /* make sure it isn't random garbage */
174 bzero(compressed_buffer, bp->b_bufsize);
176 KKASSERT(result <= bp->b_bufsize);
177 bcopy(compressed_buffer, bp->b_data, bp->b_bufsize);
178 if (result < bp->b_bufsize)
179 bzero(bp->b_data + result, bp->b_bufsize - result);
180 objcache_put(cache_buffer_read, compressed_buffer);
181 bp->b_resid = 0;
182 bp->b_flags |= B_AGE;
186 * Callback used in read path in case that a block is compressed with ZLIB.
187 * It is almost identical to LZ4 callback, so in theory they can be unified,
188 * but we didn't want to make changes in bio structure for that.
190 static
191 void
192 hammer2_decompress_ZLIB_callback(const char *data, u_int bytes, struct bio *bio)
194 struct buf *bp;
195 char *compressed_buffer;
196 z_stream strm_decompress;
197 int result;
198 int ret;
200 bp = bio->bio_buf;
202 KKASSERT(bp->b_bufsize <= HAMMER2_PBUFSIZE);
203 strm_decompress.avail_in = 0;
204 strm_decompress.next_in = Z_NULL;
206 ret = inflateInit(&strm_decompress);
208 if (ret != Z_OK)
209 kprintf("HAMMER2 ZLIB: Fatal error in inflateInit.\n");
211 compressed_buffer = objcache_get(cache_buffer_read, M_INTWAIT);
212 strm_decompress.next_in = __DECONST(char *, data);
214 /* XXX supply proper size, subset of device bp */
215 strm_decompress.avail_in = bytes;
216 strm_decompress.next_out = compressed_buffer;
217 strm_decompress.avail_out = bp->b_bufsize;
219 ret = inflate(&strm_decompress, Z_FINISH);
220 if (ret != Z_STREAM_END) {
221 kprintf("HAMMER2 ZLIB: Fatal error during decompression.\n");
222 bzero(compressed_buffer, bp->b_bufsize);
224 bcopy(compressed_buffer, bp->b_data, bp->b_bufsize);
225 result = bp->b_bufsize - strm_decompress.avail_out;
226 if (result < bp->b_bufsize)
227 bzero(bp->b_data + result, strm_decompress.avail_out);
228 objcache_put(cache_buffer_read, compressed_buffer);
229 ret = inflateEnd(&strm_decompress);
231 bp->b_resid = 0;
232 bp->b_flags |= B_AGE;
236 * Logical buffer I/O, async read.
238 static
240 hammer2_strategy_read(struct vop_strategy_args *ap)
242 hammer2_xop_strategy_t *xop;
243 struct bio *bio;
244 hammer2_inode_t *ip;
245 hammer2_key_t lbase;
247 bio = ap->a_bio;
248 ip = VTOI(ap->a_vp);
250 lbase = bio->bio_offset;
251 KKASSERT(((int)lbase & HAMMER2_PBUFMASK) == 0);
253 xop = hammer2_xop_alloc(ip, HAMMER2_XOP_STRATEGY);
254 xop->finished = 0;
255 xop->bio = bio;
256 xop->lbase = lbase;
257 hammer2_mtx_init(&xop->lock, "h2bior");
258 hammer2_xop_start(&xop->head, &hammer2_strategy_read_desc);
259 /* asynchronous completion */
261 return(0);
265 * Per-node XOP (threaded), do a synchronous lookup of the chain and
266 * its data. The frontend is asynchronous, so we are also responsible
267 * for racing to terminate the frontend.
269 void
270 hammer2_xop_strategy_read(hammer2_xop_t *arg, void *scratch, int clindex)
272 hammer2_xop_strategy_t *xop = &arg->xop_strategy;
273 hammer2_chain_t *parent;
274 hammer2_chain_t *chain;
275 hammer2_chain_t *focus;
276 hammer2_key_t key_dummy;
277 hammer2_key_t lbase;
278 struct bio *bio;
279 struct buf *bp;
280 const char *data;
281 int error;
284 * Note that we can race completion of the bio supplied by
285 * the front-end so we cannot access it until we determine
286 * that we are the ones finishing it up.
288 lbase = xop->lbase;
291 * This is difficult to optimize. The logical buffer might be
292 * partially dirty (contain dummy zero-fill pages), which would
293 * mess up our crc calculation if we were to try a direct read.
294 * So for now we always double-buffer through the underlying
295 * storage.
297 * If not for the above problem we could conditionalize on
298 * (1) 64KB buffer, (2) one chain (not multi-master) and
299 * (3) !hammer2_double_buffer, and issue a direct read into the
300 * logical buffer.
302 parent = hammer2_inode_chain(xop->head.ip1, clindex,
303 HAMMER2_RESOLVE_ALWAYS |
304 HAMMER2_RESOLVE_SHARED);
305 if (parent) {
306 chain = hammer2_chain_lookup(&parent, &key_dummy,
307 lbase, lbase,
308 &error,
309 HAMMER2_LOOKUP_ALWAYS |
310 HAMMER2_LOOKUP_SHARED);
311 if (chain)
312 error = chain->error;
313 } else {
314 error = HAMMER2_ERROR_EIO;
315 chain = NULL;
317 error = hammer2_xop_feed(&xop->head, chain, clindex, error);
318 if (chain) {
319 hammer2_chain_unlock(chain);
320 hammer2_chain_drop(chain);
322 if (parent) {
323 hammer2_chain_unlock(parent);
324 hammer2_chain_drop(parent);
326 chain = NULL; /* safety */
327 parent = NULL; /* safety */
330 * Race to finish the frontend. First-to-complete. bio is only
331 * valid if we are determined to be the ones able to complete
332 * the operation.
334 if (xop->finished)
335 return;
336 hammer2_mtx_ex(&xop->lock);
337 if (xop->finished) {
338 hammer2_mtx_unlock(&xop->lock);
339 return;
341 bio = xop->bio;
342 bp = bio->bio_buf;
343 bkvasync(bp);
346 * Async operation has not completed and we now own the lock.
347 * Determine if we can complete the operation by issuing the
348 * frontend collection non-blocking.
350 * H2 double-buffers the data, setting B_NOTMETA on the logical
351 * buffer hints to the OS that the logical buffer should not be
352 * swapcached (since the device buffer can be).
354 * Also note that even for compressed data we would rather the
355 * kernel cache/swapcache device buffers more and (decompressed)
356 * logical buffers less, since that will significantly improve
357 * the amount of end-user data that can be cached.
359 * NOTE: The chain->data for xop->head.cluster.focus will be
360 * synchronized to the current cpu by xop_collect(),
361 * but other chains in the cluster might not be.
363 error = hammer2_xop_collect(&xop->head, HAMMER2_XOP_COLLECT_NOWAIT);
365 switch(error) {
366 case 0:
367 xop->finished = 1;
368 hammer2_mtx_unlock(&xop->lock);
369 bp->b_flags |= B_NOTMETA;
370 focus = xop->head.cluster.focus;
371 data = hammer2_xop_gdata(&xop->head)->buf;
372 hammer2_strategy_read_completion(focus, data, xop->bio);
373 hammer2_xop_pdata(&xop->head);
374 biodone(bio);
375 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
376 break;
377 case HAMMER2_ERROR_ENOENT:
378 xop->finished = 1;
379 hammer2_mtx_unlock(&xop->lock);
380 bp->b_flags |= B_NOTMETA;
381 bp->b_resid = 0;
382 bp->b_error = 0;
383 bzero(bp->b_data, bp->b_bcount);
384 biodone(bio);
385 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
386 break;
387 case HAMMER2_ERROR_EINPROGRESS:
388 hammer2_mtx_unlock(&xop->lock);
389 break;
390 default:
391 kprintf("xop_strategy_read: error %08x loff=%016jx\n",
392 error, (intmax_t)bp->b_loffset);
393 xop->finished = 1;
394 hammer2_mtx_unlock(&xop->lock);
395 bp->b_flags |= B_ERROR;
396 bp->b_error = EIO;
397 biodone(bio);
398 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
399 break;
403 static
404 void
405 hammer2_strategy_read_completion(hammer2_chain_t *focus, const char *data,
406 struct bio *bio)
408 struct buf *bp = bio->bio_buf;
410 if (focus->bref.type == HAMMER2_BREF_TYPE_INODE) {
412 * Copy from in-memory inode structure.
414 bcopy(((const hammer2_inode_data_t *)data)->u.data,
415 bp->b_data, HAMMER2_EMBEDDED_BYTES);
416 bzero(bp->b_data + HAMMER2_EMBEDDED_BYTES,
417 bp->b_bcount - HAMMER2_EMBEDDED_BYTES);
418 bp->b_resid = 0;
419 bp->b_error = 0;
420 } else if (focus->bref.type == HAMMER2_BREF_TYPE_DATA) {
422 * Data is on-media, record for live dedup. Release the
423 * chain (try to free it) when done. The data is still
424 * cached by both the buffer cache in front and the
425 * block device behind us. This leaves more room in the
426 * LRU chain cache for meta-data chains which we really
427 * want to retain.
429 * NOTE: Deduplication cannot be safely recorded for
430 * records without a check code.
432 hammer2_dedup_record(focus, NULL, data);
433 atomic_set_int(&focus->flags, HAMMER2_CHAIN_RELEASE);
436 * Decompression and copy.
438 switch (HAMMER2_DEC_COMP(focus->bref.methods)) {
439 case HAMMER2_COMP_LZ4:
440 hammer2_decompress_LZ4_callback(data, focus->bytes,
441 bio);
442 /* b_resid set by call */
443 break;
444 case HAMMER2_COMP_ZLIB:
445 hammer2_decompress_ZLIB_callback(data, focus->bytes,
446 bio);
447 /* b_resid set by call */
448 break;
449 case HAMMER2_COMP_NONE:
450 KKASSERT(focus->bytes <= bp->b_bcount);
451 bcopy(data, bp->b_data, focus->bytes);
452 if (focus->bytes < bp->b_bcount) {
453 bzero(bp->b_data + focus->bytes,
454 bp->b_bcount - focus->bytes);
456 bp->b_resid = 0;
457 bp->b_error = 0;
458 break;
459 default:
460 panic("hammer2_strategy_read_completion: "
461 "unknown compression type");
463 } else {
464 panic("hammer2_strategy_read_completion: unknown bref type");
468 /****************************************************************************
469 * WRITE SUPPORT *
470 ****************************************************************************/
473 * Functions for compression in threads,
474 * from hammer2_vnops.c
476 static void hammer2_write_file_core(char *data, hammer2_inode_t *ip,
477 hammer2_chain_t **parentp,
478 hammer2_key_t lbase, int ioflag, int pblksize,
479 hammer2_tid_t mtid, int *errorp);
480 static void hammer2_compress_and_write(char *data, hammer2_inode_t *ip,
481 hammer2_chain_t **parentp,
482 hammer2_key_t lbase, int ioflag, int pblksize,
483 hammer2_tid_t mtid, int *errorp,
484 int comp_algo, int check_algo);
485 static void hammer2_zero_check_and_write(char *data, hammer2_inode_t *ip,
486 hammer2_chain_t **parentp,
487 hammer2_key_t lbase, int ioflag, int pblksize,
488 hammer2_tid_t mtid, int *errorp,
489 int check_algo);
490 static int test_block_zeros(const char *buf, size_t bytes);
491 static void zero_write(char *data, hammer2_inode_t *ip,
492 hammer2_chain_t **parentp,
493 hammer2_key_t lbase,
494 hammer2_tid_t mtid, int *errorp);
495 static void hammer2_write_bp(hammer2_chain_t *chain, char *data,
496 int ioflag, int pblksize,
497 hammer2_tid_t mtid, int *errorp,
498 int check_algo);
501 hammer2_strategy_write(struct vop_strategy_args *ap)
503 hammer2_xop_strategy_t *xop;
504 hammer2_pfs_t *pmp;
505 struct bio *bio;
506 hammer2_inode_t *ip;
508 bio = ap->a_bio;
509 ip = VTOI(ap->a_vp);
510 pmp = ip->pmp;
512 atomic_set_int(&ip->flags, HAMMER2_INODE_DIRTYDATA);
513 hammer2_lwinprog_ref(pmp);
514 hammer2_trans_assert_strategy(pmp);
515 hammer2_trans_init(pmp, HAMMER2_TRANS_BUFCACHE);
517 xop = hammer2_xop_alloc(ip, HAMMER2_XOP_MODIFYING |
518 HAMMER2_XOP_STRATEGY);
519 xop->finished = 0;
520 xop->bio = bio;
521 xop->lbase = bio->bio_offset;
522 hammer2_mtx_init(&xop->lock, "h2biow");
523 hammer2_xop_start(&xop->head, &hammer2_strategy_write_desc);
524 /* asynchronous completion */
526 hammer2_lwinprog_wait(pmp, hammer2_flush_pipe);
528 return(0);
532 * Per-node XOP (threaded). Write the logical buffer to the media.
534 * This is a bit problematic because there may be multiple target and
535 * any of them may be able to release the bp. In addition, if our
536 * particulr target is offline we don't want to block the bp (and thus
537 * the frontend). To accomplish this we copy the data to the per-thr
538 * scratch buffer.
540 void
541 hammer2_xop_strategy_write(hammer2_xop_t *arg, void *scratch, int clindex)
543 hammer2_xop_strategy_t *xop = &arg->xop_strategy;
544 hammer2_chain_t *parent;
545 hammer2_key_t lbase;
546 hammer2_inode_t *ip;
547 struct bio *bio;
548 struct buf *bp;
549 int error;
550 int lblksize;
551 int pblksize;
552 char *bio_data;
555 * We can only access the bp/bio if the frontend has not yet
556 * completed.
558 if (xop->finished)
559 return;
560 hammer2_mtx_sh(&xop->lock);
561 if (xop->finished) {
562 hammer2_mtx_unlock(&xop->lock);
563 return;
566 lbase = xop->lbase;
567 bio = xop->bio; /* ephermal */
568 bp = bio->bio_buf; /* ephermal */
569 ip = xop->head.ip1; /* retained by ref */
570 bio_data = scratch;
572 /* hammer2_trans_init(parent->hmp->spmp, HAMMER2_TRANS_BUFCACHE); */
574 lblksize = hammer2_calc_logical(ip, bio->bio_offset, &lbase, NULL);
575 pblksize = hammer2_calc_physical(ip, lbase);
576 bkvasync(bp);
577 KKASSERT(lblksize <= MAXPHYS);
578 bcopy(bp->b_data, bio_data, lblksize);
580 hammer2_mtx_unlock(&xop->lock);
581 bp = NULL; /* safety, illegal to access after unlock */
582 bio = NULL; /* safety, illegal to access after unlock */
585 * Actual operation
587 parent = hammer2_inode_chain(ip, clindex, HAMMER2_RESOLVE_ALWAYS);
588 hammer2_write_file_core(bio_data, ip, &parent,
589 lbase, IO_ASYNC, pblksize,
590 xop->head.mtid, &error);
591 if (parent) {
592 hammer2_chain_unlock(parent);
593 hammer2_chain_drop(parent);
594 parent = NULL; /* safety */
596 hammer2_xop_feed(&xop->head, NULL, clindex, error);
599 * Try to complete the operation on behalf of the front-end.
601 if (xop->finished)
602 return;
603 hammer2_mtx_ex(&xop->lock);
604 if (xop->finished) {
605 hammer2_mtx_unlock(&xop->lock);
606 return;
610 * Async operation has not completed and we now own the lock.
611 * Determine if we can complete the operation by issuing the
612 * frontend collection non-blocking.
614 * H2 double-buffers the data, setting B_NOTMETA on the logical
615 * buffer hints to the OS that the logical buffer should not be
616 * swapcached (since the device buffer can be).
618 error = hammer2_xop_collect(&xop->head, HAMMER2_XOP_COLLECT_NOWAIT);
620 if (error == HAMMER2_ERROR_EINPROGRESS) {
621 hammer2_mtx_unlock(&xop->lock);
622 return;
626 * Async operation has completed.
628 xop->finished = 1;
629 hammer2_mtx_unlock(&xop->lock);
631 bio = xop->bio; /* now owned by us */
632 bp = bio->bio_buf; /* now owned by us */
634 if (error == HAMMER2_ERROR_ENOENT || error == 0) {
635 bp->b_flags |= B_NOTMETA;
636 bp->b_resid = 0;
637 bp->b_error = 0;
638 biodone(bio);
639 } else {
640 kprintf("xop_strategy_write: error %d loff=%016jx\n",
641 error, (intmax_t)bp->b_loffset);
642 bp->b_flags |= B_ERROR;
643 bp->b_error = EIO;
644 biodone(bio);
646 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
647 hammer2_trans_assert_strategy(ip->pmp);
648 hammer2_lwinprog_drop(ip->pmp);
649 hammer2_trans_done(ip->pmp, HAMMER2_TRANS_BUFCACHE);
653 * Wait for pending I/O to complete
655 void
656 hammer2_bioq_sync(hammer2_pfs_t *pmp)
658 hammer2_lwinprog_wait(pmp, 0);
662 * Assign physical storage at (cparent, lbase), returning a suitable chain
663 * and setting *errorp appropriately.
665 * If no error occurs, the returned chain will be in a modified state.
667 * If an error occurs, the returned chain may or may not be NULL. If
668 * not-null any chain->error (if not 0) will also be rolled up into *errorp.
669 * So the caller only needs to test *errorp.
671 * cparent can wind up being anything.
673 * If datap is not NULL, *datap points to the real data we intend to write.
674 * If we can dedup the storage location we set *datap to NULL to indicate
675 * to the caller that a dedup occurred.
677 * NOTE: Special case for data embedded in inode.
679 static
680 hammer2_chain_t *
681 hammer2_assign_physical(hammer2_inode_t *ip, hammer2_chain_t **parentp,
682 hammer2_key_t lbase, int pblksize,
683 hammer2_tid_t mtid, char **datap, int *errorp)
685 hammer2_chain_t *chain;
686 hammer2_key_t key_dummy;
687 hammer2_off_t dedup_off;
688 int pradix = hammer2_getradix(pblksize);
691 * Locate the chain associated with lbase, return a locked chain.
692 * However, do not instantiate any data reference (which utilizes a
693 * device buffer) because we will be using direct IO via the
694 * logical buffer cache buffer.
696 KKASSERT(pblksize >= HAMMER2_ALLOC_MIN);
698 chain = hammer2_chain_lookup(parentp, &key_dummy,
699 lbase, lbase,
700 errorp,
701 HAMMER2_LOOKUP_NODATA);
704 * The lookup code should not return a DELETED chain to us, unless
705 * its a short-file embedded in the inode. Then it is possible for
706 * the lookup to return a deleted inode.
708 if (chain && (chain->flags & HAMMER2_CHAIN_DELETED) &&
709 chain->bref.type != HAMMER2_BREF_TYPE_INODE) {
710 kprintf("assign physical deleted chain @ "
711 "%016jx (%016jx.%02x) ip %016jx\n",
712 lbase, chain->bref.data_off, chain->bref.type,
713 ip->meta.inum);
714 Debugger("bleh");
717 if (chain == NULL) {
719 * We found a hole, create a new chain entry.
721 * NOTE: DATA chains are created without device backing
722 * store (nor do we want any).
724 dedup_off = hammer2_dedup_lookup((*parentp)->hmp, datap,
725 pblksize);
726 *errorp |= hammer2_chain_create(parentp, &chain, NULL, ip->pmp,
727 HAMMER2_ENC_CHECK(ip->meta.check_algo) |
728 HAMMER2_ENC_COMP(HAMMER2_COMP_NONE),
729 lbase, HAMMER2_PBUFRADIX,
730 HAMMER2_BREF_TYPE_DATA,
731 pblksize, mtid,
732 dedup_off, 0);
733 if (chain == NULL)
734 goto failed;
735 /*ip->delta_dcount += pblksize;*/
736 } else if (chain->error == 0) {
737 switch (chain->bref.type) {
738 case HAMMER2_BREF_TYPE_INODE:
740 * The data is embedded in the inode, which requires
741 * a bit more finess.
743 *errorp |= hammer2_chain_modify_ip(ip, chain, mtid, 0);
744 break;
745 case HAMMER2_BREF_TYPE_DATA:
746 dedup_off = hammer2_dedup_lookup(chain->hmp, datap,
747 pblksize);
748 if (chain->bytes != pblksize) {
749 *errorp |= hammer2_chain_resize(chain,
750 mtid, dedup_off,
751 pradix,
752 HAMMER2_MODIFY_OPTDATA);
753 if (*errorp)
754 break;
758 * DATA buffers must be marked modified whether the
759 * data is in a logical buffer or not. We also have
760 * to make this call to fixup the chain data pointers
761 * after resizing in case this is an encrypted or
762 * compressed buffer.
764 *errorp |= hammer2_chain_modify(chain, mtid, dedup_off,
765 HAMMER2_MODIFY_OPTDATA);
766 break;
767 default:
768 panic("hammer2_assign_physical: bad type");
769 /* NOT REACHED */
770 break;
772 } else {
773 *errorp = chain->error;
775 atomic_set_int(&ip->flags, HAMMER2_INODE_DIRTYDATA);
776 failed:
777 return (chain);
781 * hammer2_write_file_core()
783 * The core write function which determines which path to take
784 * depending on compression settings. We also have to locate the
785 * related chains so we can calculate and set the check data for
786 * the blockref.
788 static
789 void
790 hammer2_write_file_core(char *data, hammer2_inode_t *ip,
791 hammer2_chain_t **parentp,
792 hammer2_key_t lbase, int ioflag, int pblksize,
793 hammer2_tid_t mtid, int *errorp)
795 hammer2_chain_t *chain;
796 char *bdata;
798 *errorp = 0;
800 switch(HAMMER2_DEC_ALGO(ip->meta.comp_algo)) {
801 case HAMMER2_COMP_NONE:
803 * We have to assign physical storage to the buffer
804 * we intend to dirty or write now to avoid deadlocks
805 * in the strategy code later.
807 * This can return NOOFFSET for inode-embedded data.
808 * The strategy code will take care of it in that case.
810 bdata = data;
811 chain = hammer2_assign_physical(ip, parentp, lbase, pblksize,
812 mtid, &bdata, errorp);
813 if (*errorp) {
814 /* skip modifications */
815 } else if (chain->bref.type == HAMMER2_BREF_TYPE_INODE) {
816 hammer2_inode_data_t *wipdata;
818 wipdata = &chain->data->ipdata;
819 KKASSERT(wipdata->meta.op_flags &
820 HAMMER2_OPFLAG_DIRECTDATA);
821 bcopy(data, wipdata->u.data, HAMMER2_EMBEDDED_BYTES);
822 ++hammer2_iod_file_wembed;
823 } else if (bdata == NULL) {
825 * Copy of data already present on-media.
827 chain->bref.methods =
828 HAMMER2_ENC_COMP(HAMMER2_COMP_NONE) +
829 HAMMER2_ENC_CHECK(ip->meta.check_algo);
830 hammer2_chain_setcheck(chain, data);
831 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_INITIAL);
832 } else {
833 hammer2_write_bp(chain, data, ioflag, pblksize,
834 mtid, errorp, ip->meta.check_algo);
836 if (chain) {
837 hammer2_chain_unlock(chain);
838 hammer2_chain_drop(chain);
840 break;
841 case HAMMER2_COMP_AUTOZERO:
843 * Check for zero-fill only
845 hammer2_zero_check_and_write(data, ip, parentp,
846 lbase, ioflag, pblksize,
847 mtid, errorp,
848 ip->meta.check_algo);
849 break;
850 case HAMMER2_COMP_LZ4:
851 case HAMMER2_COMP_ZLIB:
852 default:
854 * Check for zero-fill and attempt compression.
856 hammer2_compress_and_write(data, ip, parentp,
857 lbase, ioflag, pblksize,
858 mtid, errorp,
859 ip->meta.comp_algo,
860 ip->meta.check_algo);
861 break;
866 * Helper
868 * Generic function that will perform the compression in compression
869 * write path. The compression algorithm is determined by the settings
870 * obtained from inode.
872 static
873 void
874 hammer2_compress_and_write(char *data, hammer2_inode_t *ip,
875 hammer2_chain_t **parentp,
876 hammer2_key_t lbase, int ioflag, int pblksize,
877 hammer2_tid_t mtid, int *errorp, int comp_algo, int check_algo)
879 hammer2_chain_t *chain;
880 int comp_size;
881 int comp_block_size;
882 char *comp_buffer;
883 char *bdata;
886 * An all-zeros write creates a hole unless the check code
887 * is disabled. When the check code is disabled all writes
888 * are done in-place, including any all-zeros writes.
890 * NOTE: A snapshot will still force a copy-on-write
891 * (see the HAMMER2_CHECK_NONE in hammer2_chain.c).
893 if (check_algo != HAMMER2_CHECK_NONE &&
894 test_block_zeros(data, pblksize)) {
895 zero_write(data, ip, parentp, lbase, mtid, errorp);
896 return;
900 * Compression requested. Try to compress the block. We store
901 * the data normally if we cannot sufficiently compress it.
903 * We have a heuristic to detect files which are mostly
904 * uncompressable and avoid the compression attempt in that
905 * case. If the compression heuristic is turned off, we always
906 * try to compress.
908 comp_size = 0;
909 comp_buffer = NULL;
911 KKASSERT(pblksize / 2 <= 32768);
913 if (ip->comp_heuristic < 8 || (ip->comp_heuristic & 7) == 0 ||
914 hammer2_always_compress) {
915 z_stream strm_compress;
916 int comp_level;
917 int ret;
919 switch(HAMMER2_DEC_ALGO(comp_algo)) {
920 case HAMMER2_COMP_LZ4:
922 * We need to prefix with the size, LZ4
923 * doesn't do it for us. Add the related
924 * overhead.
926 * NOTE: The LZ4 code seems to assume at least an
927 * 8-byte buffer size granularity and may
928 * overrun the buffer if given a 4-byte
929 * granularity.
931 comp_buffer = objcache_get(cache_buffer_write,
932 M_INTWAIT);
933 comp_size = LZ4_compress_limitedOutput(
934 data,
935 &comp_buffer[sizeof(int)],
936 pblksize,
937 pblksize / 2 - sizeof(int64_t));
938 *(int *)comp_buffer = comp_size;
939 if (comp_size)
940 comp_size += sizeof(int);
941 break;
942 case HAMMER2_COMP_ZLIB:
943 comp_level = HAMMER2_DEC_LEVEL(comp_algo);
944 if (comp_level == 0)
945 comp_level = 6; /* default zlib compression */
946 else if (comp_level < 6)
947 comp_level = 6;
948 else if (comp_level > 9)
949 comp_level = 9;
950 ret = deflateInit(&strm_compress, comp_level);
951 if (ret != Z_OK) {
952 kprintf("HAMMER2 ZLIB: fatal error "
953 "on deflateInit.\n");
956 comp_buffer = objcache_get(cache_buffer_write,
957 M_INTWAIT);
958 strm_compress.next_in = data;
959 strm_compress.avail_in = pblksize;
960 strm_compress.next_out = comp_buffer;
961 strm_compress.avail_out = pblksize / 2;
962 ret = deflate(&strm_compress, Z_FINISH);
963 if (ret == Z_STREAM_END) {
964 comp_size = pblksize / 2 -
965 strm_compress.avail_out;
966 } else {
967 comp_size = 0;
969 ret = deflateEnd(&strm_compress);
970 break;
971 default:
972 kprintf("Error: Unknown compression method.\n");
973 kprintf("Comp_method = %d.\n", comp_algo);
974 break;
978 if (comp_size == 0) {
980 * compression failed or turned off
982 comp_block_size = pblksize; /* safety */
983 if (++ip->comp_heuristic > 128)
984 ip->comp_heuristic = 8;
985 } else {
987 * compression succeeded
989 ip->comp_heuristic = 0;
990 if (comp_size <= 1024) {
991 comp_block_size = 1024;
992 } else if (comp_size <= 2048) {
993 comp_block_size = 2048;
994 } else if (comp_size <= 4096) {
995 comp_block_size = 4096;
996 } else if (comp_size <= 8192) {
997 comp_block_size = 8192;
998 } else if (comp_size <= 16384) {
999 comp_block_size = 16384;
1000 } else if (comp_size <= 32768) {
1001 comp_block_size = 32768;
1002 } else {
1003 panic("hammer2: WRITE PATH: "
1004 "Weird comp_size value.");
1005 /* NOT REACHED */
1006 comp_block_size = pblksize;
1010 * Must zero the remainder or dedup (which operates on a
1011 * physical block basis) will not find matches.
1013 if (comp_size < comp_block_size) {
1014 bzero(comp_buffer + comp_size,
1015 comp_block_size - comp_size);
1020 * Assign physical storage, bdata will be set to NULL if a live-dedup
1021 * was successful.
1023 bdata = comp_size ? comp_buffer : data;
1024 chain = hammer2_assign_physical(ip, parentp, lbase, comp_block_size,
1025 mtid, &bdata, errorp);
1027 if (*errorp) {
1028 goto done;
1031 if (chain->bref.type == HAMMER2_BREF_TYPE_INODE) {
1032 hammer2_inode_data_t *wipdata;
1034 *errorp = hammer2_chain_modify_ip(ip, chain, mtid, 0);
1035 if (*errorp == 0) {
1036 wipdata = &chain->data->ipdata;
1037 KKASSERT(wipdata->meta.op_flags &
1038 HAMMER2_OPFLAG_DIRECTDATA);
1039 bcopy(data, wipdata->u.data, HAMMER2_EMBEDDED_BYTES);
1040 ++hammer2_iod_file_wembed;
1042 } else if (bdata == NULL) {
1044 * Live deduplication, a copy of the data is already present
1045 * on the media.
1047 if (comp_size) {
1048 chain->bref.methods =
1049 HAMMER2_ENC_COMP(comp_algo) +
1050 HAMMER2_ENC_CHECK(check_algo);
1051 } else {
1052 chain->bref.methods =
1053 HAMMER2_ENC_COMP(
1054 HAMMER2_COMP_NONE) +
1055 HAMMER2_ENC_CHECK(check_algo);
1057 bdata = comp_size ? comp_buffer : data;
1058 hammer2_chain_setcheck(chain, bdata);
1059 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_INITIAL);
1060 } else {
1061 hammer2_io_t *dio;
1063 KKASSERT(chain->flags & HAMMER2_CHAIN_MODIFIED);
1065 switch(chain->bref.type) {
1066 case HAMMER2_BREF_TYPE_INODE:
1067 panic("hammer2_compress_and_write: unexpected inode\n");
1068 break;
1069 case HAMMER2_BREF_TYPE_DATA:
1071 * Optimize out the read-before-write
1072 * if possible.
1074 *errorp = hammer2_io_newnz(chain->hmp,
1075 chain->bref.type,
1076 chain->bref.data_off,
1077 chain->bytes,
1078 &dio);
1079 if (*errorp) {
1080 hammer2_io_brelse(&dio);
1081 kprintf("hammer2: WRITE PATH: "
1082 "dbp bread error\n");
1083 break;
1085 bdata = hammer2_io_data(dio, chain->bref.data_off);
1088 * When loading the block make sure we don't
1089 * leave garbage after the compressed data.
1091 if (comp_size) {
1092 chain->bref.methods =
1093 HAMMER2_ENC_COMP(comp_algo) +
1094 HAMMER2_ENC_CHECK(check_algo);
1095 bcopy(comp_buffer, bdata, comp_block_size);
1096 } else {
1097 chain->bref.methods =
1098 HAMMER2_ENC_COMP(
1099 HAMMER2_COMP_NONE) +
1100 HAMMER2_ENC_CHECK(check_algo);
1101 bcopy(data, bdata, pblksize);
1105 * The flush code doesn't calculate check codes for
1106 * file data (doing so can result in excessive I/O),
1107 * so we do it here.
1109 hammer2_chain_setcheck(chain, bdata);
1112 * Device buffer is now valid, chain is no longer in
1113 * the initial state.
1115 * (No blockref table worries with file data)
1117 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_INITIAL);
1118 hammer2_dedup_record(chain, dio, bdata);
1120 /* Now write the related bdp. */
1121 if (ioflag & IO_SYNC) {
1123 * Synchronous I/O requested.
1125 hammer2_io_bwrite(&dio);
1127 } else if ((ioflag & IO_DIRECT) &&
1128 loff + n == pblksize) {
1129 hammer2_io_bdwrite(&dio);
1131 } else if (ioflag & IO_ASYNC) {
1132 hammer2_io_bawrite(&dio);
1133 } else {
1134 hammer2_io_bdwrite(&dio);
1136 break;
1137 default:
1138 panic("hammer2_compress_and_write: bad chain type %d\n",
1139 chain->bref.type);
1140 /* NOT REACHED */
1141 break;
1144 done:
1145 if (chain) {
1146 hammer2_chain_unlock(chain);
1147 hammer2_chain_drop(chain);
1149 if (comp_buffer)
1150 objcache_put(cache_buffer_write, comp_buffer);
1154 * Helper
1156 * Function that performs zero-checking and writing without compression,
1157 * it corresponds to default zero-checking path.
1159 static
1160 void
1161 hammer2_zero_check_and_write(char *data, hammer2_inode_t *ip,
1162 hammer2_chain_t **parentp,
1163 hammer2_key_t lbase, int ioflag, int pblksize,
1164 hammer2_tid_t mtid, int *errorp,
1165 int check_algo)
1167 hammer2_chain_t *chain;
1168 char *bdata;
1170 if (check_algo != HAMMER2_CHECK_NONE &&
1171 test_block_zeros(data, pblksize)) {
1173 * An all-zeros write creates a hole unless the check code
1174 * is disabled. When the check code is disabled all writes
1175 * are done in-place, including any all-zeros writes.
1177 * NOTE: A snapshot will still force a copy-on-write
1178 * (see the HAMMER2_CHECK_NONE in hammer2_chain.c).
1180 zero_write(data, ip, parentp, lbase, mtid, errorp);
1181 } else {
1183 * Normal write (bdata set to NULL if de-duplicated)
1185 bdata = data;
1186 chain = hammer2_assign_physical(ip, parentp, lbase, pblksize,
1187 mtid, &bdata, errorp);
1188 if (*errorp) {
1189 /* do nothing */
1190 } else if (bdata) {
1191 hammer2_write_bp(chain, data, ioflag, pblksize,
1192 mtid, errorp, check_algo);
1193 } else {
1194 /* dedup occurred */
1195 chain->bref.methods =
1196 HAMMER2_ENC_COMP(HAMMER2_COMP_NONE) +
1197 HAMMER2_ENC_CHECK(check_algo);
1198 hammer2_chain_setcheck(chain, data);
1199 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_INITIAL);
1201 if (chain) {
1202 hammer2_chain_unlock(chain);
1203 hammer2_chain_drop(chain);
1209 * Helper
1211 * A function to test whether a block of data contains only zeros,
1212 * returns TRUE (non-zero) if the block is all zeros.
1214 static
1216 test_block_zeros(const char *buf, size_t bytes)
1218 size_t i;
1220 for (i = 0; i < bytes; i += sizeof(long)) {
1221 if (*(const long *)(buf + i) != 0)
1222 return (0);
1224 return (1);
1228 * Helper
1230 * Function to "write" a block that contains only zeros.
1232 static
1233 void
1234 zero_write(char *data, hammer2_inode_t *ip,
1235 hammer2_chain_t **parentp,
1236 hammer2_key_t lbase, hammer2_tid_t mtid, int *errorp)
1238 hammer2_chain_t *chain;
1239 hammer2_key_t key_dummy;
1241 chain = hammer2_chain_lookup(parentp, &key_dummy,
1242 lbase, lbase,
1243 errorp,
1244 HAMMER2_LOOKUP_NODATA);
1245 if (chain) {
1246 if (chain->bref.type == HAMMER2_BREF_TYPE_INODE) {
1247 hammer2_inode_data_t *wipdata;
1249 if (*errorp == 0) {
1250 *errorp = hammer2_chain_modify_ip(ip, chain,
1251 mtid, 0);
1253 if (*errorp == 0) {
1254 wipdata = &chain->data->ipdata;
1255 KKASSERT(wipdata->meta.op_flags &
1256 HAMMER2_OPFLAG_DIRECTDATA);
1257 bzero(wipdata->u.data, HAMMER2_EMBEDDED_BYTES);
1258 ++hammer2_iod_file_wembed;
1260 } else {
1261 /* chain->error ok for deletion */
1262 hammer2_chain_delete(*parentp, chain,
1263 mtid, HAMMER2_DELETE_PERMANENT);
1264 ++hammer2_iod_file_wzero;
1266 atomic_set_int(&ip->flags, HAMMER2_INODE_DIRTYDATA);
1267 hammer2_chain_unlock(chain);
1268 hammer2_chain_drop(chain);
1269 } else {
1270 ++hammer2_iod_file_wzero;
1275 * Helper
1277 * Function to write the data as it is, without performing any sort of
1278 * compression. This function is used in path without compression and
1279 * default zero-checking path.
1281 static
1282 void
1283 hammer2_write_bp(hammer2_chain_t *chain, char *data, int ioflag,
1284 int pblksize,
1285 hammer2_tid_t mtid, int *errorp, int check_algo)
1287 hammer2_inode_data_t *wipdata;
1288 hammer2_io_t *dio;
1289 char *bdata;
1290 int error;
1292 error = 0; /* XXX TODO below */
1294 KKASSERT(chain->flags & HAMMER2_CHAIN_MODIFIED);
1296 switch(chain->bref.type) {
1297 case HAMMER2_BREF_TYPE_INODE:
1298 wipdata = &chain->data->ipdata;
1299 KKASSERT(wipdata->meta.op_flags & HAMMER2_OPFLAG_DIRECTDATA);
1300 bcopy(data, wipdata->u.data, HAMMER2_EMBEDDED_BYTES);
1301 error = 0;
1302 ++hammer2_iod_file_wembed;
1303 break;
1304 case HAMMER2_BREF_TYPE_DATA:
1305 error = hammer2_io_newnz(chain->hmp,
1306 chain->bref.type,
1307 chain->bref.data_off,
1308 chain->bytes, &dio);
1309 if (error) {
1310 hammer2_io_bqrelse(&dio);
1311 kprintf("hammer2: WRITE PATH: "
1312 "dbp bread error\n");
1313 break;
1315 bdata = hammer2_io_data(dio, chain->bref.data_off);
1317 chain->bref.methods = HAMMER2_ENC_COMP(HAMMER2_COMP_NONE) +
1318 HAMMER2_ENC_CHECK(check_algo);
1319 bcopy(data, bdata, chain->bytes);
1322 * The flush code doesn't calculate check codes for
1323 * file data (doing so can result in excessive I/O),
1324 * so we do it here.
1326 hammer2_chain_setcheck(chain, bdata);
1329 * Device buffer is now valid, chain is no longer in
1330 * the initial state.
1332 * (No blockref table worries with file data)
1334 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_INITIAL);
1335 hammer2_dedup_record(chain, dio, bdata);
1337 if (ioflag & IO_SYNC) {
1339 * Synchronous I/O requested.
1341 hammer2_io_bwrite(&dio);
1343 } else if ((ioflag & IO_DIRECT) &&
1344 loff + n == pblksize) {
1345 hammer2_io_bdwrite(&dio);
1347 } else if (ioflag & IO_ASYNC) {
1348 hammer2_io_bawrite(&dio);
1349 } else {
1350 hammer2_io_bdwrite(&dio);
1352 break;
1353 default:
1354 panic("hammer2_write_bp: bad chain type %d\n",
1355 chain->bref.type);
1356 /* NOT REACHED */
1357 error = 0;
1358 break;
1360 *errorp = error;
1364 * LIVE DEDUP HEURISTICS
1366 * Record media and crc information for possible dedup operation. Note
1367 * that the dedup mask bits must also be set in the related DIO for a dedup
1368 * to be fully validated (which is handled in the freemap allocation code).
1370 * WARNING! This code is SMP safe but the heuristic allows SMP collisions.
1371 * All fields must be loaded into locals and validated.
1373 * WARNING! Should only be used for file data and directory entries,
1374 * hammer2_chain_modify() only checks for the dedup case on data
1375 * chains. Also, dedup data can only be recorded for committed
1376 * chains (so NOT strategy writes which can undergo further
1377 * modification after the fact!).
1379 void
1380 hammer2_dedup_record(hammer2_chain_t *chain, hammer2_io_t *dio,
1381 const char *data)
1383 hammer2_dev_t *hmp;
1384 hammer2_dedup_t *dedup;
1385 uint64_t crc;
1386 uint64_t mask;
1387 int best = 0;
1388 int i;
1389 int dticks;
1392 * We can only record a dedup if we have media data to test against.
1393 * If dedup is not enabled, return early, which allows a chain to
1394 * remain marked MODIFIED (which might have benefits in special
1395 * situations, though typically it does not).
1397 if (hammer2_dedup_enable == 0)
1398 return;
1399 if (dio == NULL) {
1400 dio = chain->dio;
1401 if (dio == NULL)
1402 return;
1405 hmp = chain->hmp;
1407 switch(HAMMER2_DEC_CHECK(chain->bref.methods)) {
1408 case HAMMER2_CHECK_ISCSI32:
1410 * XXX use the built-in crc (the dedup lookup sequencing
1411 * needs to be fixed so the check code is already present
1412 * when dedup_lookup is called)
1414 #if 0
1415 crc = (uint64_t)(uint32_t)chain->bref.check.iscsi32.value;
1416 #endif
1417 crc = XXH64(data, chain->bytes, XXH_HAMMER2_SEED);
1418 break;
1419 case HAMMER2_CHECK_XXHASH64:
1420 crc = chain->bref.check.xxhash64.value;
1421 break;
1422 case HAMMER2_CHECK_SHA192:
1424 * XXX use the built-in crc (the dedup lookup sequencing
1425 * needs to be fixed so the check code is already present
1426 * when dedup_lookup is called)
1428 #if 0
1429 crc = ((uint64_t *)chain->bref.check.sha192.data)[0] ^
1430 ((uint64_t *)chain->bref.check.sha192.data)[1] ^
1431 ((uint64_t *)chain->bref.check.sha192.data)[2];
1432 #endif
1433 crc = XXH64(data, chain->bytes, XXH_HAMMER2_SEED);
1434 break;
1435 default:
1437 * Cannot dedup without a check code
1439 * NOTE: In particular, CHECK_NONE allows a sector to be
1440 * overwritten without copy-on-write, recording
1441 * a dedup block for a CHECK_NONE object would be
1442 * a disaster!
1444 return;
1447 atomic_set_int(&chain->flags, HAMMER2_CHAIN_DEDUPABLE);
1449 dedup = &hmp->heur_dedup[crc & (HAMMER2_DEDUP_HEUR_MASK & ~3)];
1450 for (i = 0; i < 4; ++i) {
1451 if (dedup[i].data_crc == crc) {
1452 best = i;
1453 break;
1455 dticks = (int)(dedup[i].ticks - dedup[best].ticks);
1456 if (dticks < 0 || dticks > hz * 60 * 30)
1457 best = i;
1459 dedup += best;
1460 if (hammer2_debug & 0x40000) {
1461 kprintf("REC %04x %016jx %016jx\n",
1462 (int)(dedup - hmp->heur_dedup),
1463 crc,
1464 chain->bref.data_off);
1466 dedup->ticks = ticks;
1467 dedup->data_off = chain->bref.data_off;
1468 dedup->data_crc = crc;
1471 * Set the valid bits for the dedup only after we know the data
1472 * buffer has been updated. The alloc bits were set (and the valid
1473 * bits cleared) when the media was allocated.
1475 * This is done in two stages becuase the bulkfree code can race
1476 * the gap between allocation and data population. Both masks must
1477 * be set before a bcmp/dedup operation is able to use the block.
1479 mask = hammer2_dedup_mask(dio, chain->bref.data_off, chain->bytes);
1480 atomic_set_64(&dio->dedup_valid, mask);
1482 #if 0
1484 * XXX removed. MODIFIED is an integral part of the flush code,
1485 * lets not just clear it
1488 * Once we record the dedup the chain must be marked clean to
1489 * prevent reuse of the underlying block. Remember that this
1490 * write occurs when the buffer cache is flushed (i.e. on sync(),
1491 * fsync(), filesystem periodic sync, or when the kernel needs to
1492 * flush a buffer), and not whenever the user write()s.
1494 if (chain->flags & HAMMER2_CHAIN_MODIFIED) {
1495 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_MODIFIED);
1496 atomic_add_long(&hammer2_count_modified_chains, -1);
1497 if (chain->pmp)
1498 hammer2_pfs_memory_wakeup(chain->pmp, -1);
1500 #endif
1503 static
1504 hammer2_off_t
1505 hammer2_dedup_lookup(hammer2_dev_t *hmp, char **datap, int pblksize)
1507 hammer2_dedup_t *dedup;
1508 hammer2_io_t *dio;
1509 hammer2_off_t off;
1510 uint64_t crc;
1511 uint64_t mask;
1512 char *data;
1513 char *dtmp;
1514 int i;
1516 if (hammer2_dedup_enable == 0)
1517 return 0;
1518 data = *datap;
1519 if (data == NULL)
1520 return 0;
1523 * XXX use the built-in crc (the dedup lookup sequencing
1524 * needs to be fixed so the check code is already present
1525 * when dedup_lookup is called)
1527 crc = XXH64(data, pblksize, XXH_HAMMER2_SEED);
1528 dedup = &hmp->heur_dedup[crc & (HAMMER2_DEDUP_HEUR_MASK & ~3)];
1530 if (hammer2_debug & 0x40000) {
1531 kprintf("LOC %04x/4 %016jx\n",
1532 (int)(dedup - hmp->heur_dedup),
1533 crc);
1536 for (i = 0; i < 4; ++i) {
1537 off = dedup[i].data_off;
1538 cpu_ccfence();
1539 if (dedup[i].data_crc != crc)
1540 continue;
1541 if ((1 << (int)(off & HAMMER2_OFF_MASK_RADIX)) != pblksize)
1542 continue;
1543 dio = hammer2_io_getquick(hmp, off, pblksize);
1544 if (dio) {
1545 dtmp = hammer2_io_data(dio, off),
1546 mask = hammer2_dedup_mask(dio, off, pblksize);
1547 if ((dio->dedup_alloc & mask) == mask &&
1548 (dio->dedup_valid & mask) == mask &&
1549 bcmp(data, dtmp, pblksize) == 0) {
1550 if (hammer2_debug & 0x40000) {
1551 kprintf("DEDUP SUCCESS %016jx\n",
1552 (intmax_t)off);
1554 hammer2_io_putblk(&dio);
1555 *datap = NULL;
1556 dedup[i].ticks = ticks; /* update use */
1557 atomic_add_long(&hammer2_iod_file_wdedup,
1558 pblksize);
1560 return off; /* RETURN */
1562 hammer2_io_putblk(&dio);
1565 return 0;
1569 * Poof. Races are ok, if someone gets in and reuses a dedup offset
1570 * before or while we are clearing it they will also recover the freemap
1571 * entry (set it to fully allocated), so a bulkfree race can only set it
1572 * to a possibly-free state.
1574 * XXX ok, well, not really sure races are ok but going to run with it
1575 * for the moment.
1577 void
1578 hammer2_dedup_clear(hammer2_dev_t *hmp)
1580 int i;
1582 for (i = 0; i < HAMMER2_DEDUP_HEUR_SIZE; ++i) {
1583 hmp->heur_dedup[i].data_off = 0;
1584 hmp->heur_dedup[i].ticks = ticks - 1;