hammer2 - Add kernel-thread-based async bulk free
[dragonfly.git] / sys / vfs / hammer2 / hammer2_strategy.c
blobf950fc69a2a49732c3dc3c7f5577401e031ed66a
1 /*
2 * Copyright (c) 2011-2015 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@dragonflybsd.org>
6 * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org>
7 * by Daniel Flores (GSOC 2013 - mentored by Matthew Dillon, compression)
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in
17 * the documentation and/or other materials provided with the
18 * distribution.
19 * 3. Neither the name of The DragonFly Project nor the names of its
20 * contributors may be used to endorse or promote products derived
21 * from this software without specific, prior written permission.
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
26 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
27 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
28 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
29 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
30 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
31 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
32 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
33 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
37 * This module handles low level logical file I/O (strategy) which backs
38 * the logical buffer cache.
40 * [De]compression, zero-block, check codes, and buffer cache operations
41 * for file data is handled here.
43 * Live dedup makes its home here as well.
46 #include <sys/param.h>
47 #include <sys/systm.h>
48 #include <sys/kernel.h>
49 #include <sys/fcntl.h>
50 #include <sys/buf.h>
51 #include <sys/proc.h>
52 #include <sys/namei.h>
53 #include <sys/mount.h>
54 #include <sys/vnode.h>
55 #include <sys/mountctl.h>
56 #include <sys/dirent.h>
57 #include <sys/uio.h>
58 #include <sys/objcache.h>
59 #include <sys/event.h>
60 #include <sys/file.h>
61 #include <vfs/fifofs/fifo.h>
63 #include "hammer2.h"
64 #include "hammer2_lz4.h"
66 #include "zlib/hammer2_zlib.h"
68 struct objcache *cache_buffer_read;
69 struct objcache *cache_buffer_write;
72 * Strategy code (async logical file buffer I/O from system)
74 * Except for the transaction init (which should normally not block),
75 * we essentially run the strategy operation asynchronously via a XOP.
77 * XXX This isn't supposed to be able to deadlock against vfs_sync vfsync()
78 * calls but it has in the past when multiple flushes are queued.
80 * XXX We currently terminate the transaction once we get a quorum, otherwise
81 * the frontend can stall, but this can leave the remaining nodes with
82 * a potential flush conflict. We need to delay flushes on those nodes
83 * until running transactions complete separately from the normal
84 * transaction sequencing. FIXME TODO.
86 static void hammer2_strategy_xop_read(hammer2_thread_t *thr,
87 hammer2_xop_t *arg);
88 static void hammer2_strategy_xop_write(hammer2_thread_t *thr,
89 hammer2_xop_t *arg);
90 static int hammer2_strategy_read(struct vop_strategy_args *ap);
91 static int hammer2_strategy_write(struct vop_strategy_args *ap);
92 static void hammer2_strategy_read_completion(hammer2_chain_t *chain,
93 char *data, struct bio *bio);
95 static hammer2_off_t hammer2_dedup_lookup(hammer2_dev_t *hmp,
96 char **datap, int pblksize);
98 int h2timer[32];
99 int h2last;
100 int h2lid;
102 #define TIMER(which) do { \
103 if (h2last) \
104 h2timer[h2lid] += (int)(ticks - h2last);\
105 h2last = ticks; \
106 h2lid = which; \
107 } while(0)
110 hammer2_vop_strategy(struct vop_strategy_args *ap)
112 struct bio *biop;
113 struct buf *bp;
114 int error;
116 biop = ap->a_bio;
117 bp = biop->bio_buf;
119 switch(bp->b_cmd) {
120 case BUF_CMD_READ:
121 error = hammer2_strategy_read(ap);
122 ++hammer2_iod_file_read;
123 break;
124 case BUF_CMD_WRITE:
125 error = hammer2_strategy_write(ap);
126 ++hammer2_iod_file_write;
127 break;
128 default:
129 bp->b_error = error = EINVAL;
130 bp->b_flags |= B_ERROR;
131 biodone(biop);
132 break;
134 return (error);
138 * Return the largest contiguous physical disk range for the logical
139 * request, in bytes.
141 * (struct vnode *vp, off_t loffset, off_t *doffsetp, int *runp, int *runb)
143 * Basically disabled, the logical buffer write thread has to deal with
144 * buffers one-at-a-time. Note that this should not prevent cluster_read()
145 * from reading-ahead, it simply prevents it from trying form a single
146 * cluster buffer for the logical request. H2 already uses 64KB buffers!
149 hammer2_vop_bmap(struct vop_bmap_args *ap)
151 *ap->a_doffsetp = NOOFFSET;
152 if (ap->a_runp)
153 *ap->a_runp = 0;
154 if (ap->a_runb)
155 *ap->a_runb = 0;
156 return (EOPNOTSUPP);
159 /****************************************************************************
160 * READ SUPPORT *
161 ****************************************************************************/
163 * Callback used in read path in case that a block is compressed with LZ4.
165 static
166 void
167 hammer2_decompress_LZ4_callback(const char *data, u_int bytes, struct bio *bio)
169 struct buf *bp;
170 char *compressed_buffer;
171 int compressed_size;
172 int result;
174 bp = bio->bio_buf;
176 #if 0
177 if bio->bio_caller_info2.index &&
178 bio->bio_caller_info1.uvalue32 !=
179 crc32(bp->b_data, bp->b_bufsize) --- return error
180 #endif
182 KKASSERT(bp->b_bufsize <= HAMMER2_PBUFSIZE);
183 compressed_size = *(const int *)data;
184 KKASSERT((uint32_t)compressed_size <= bytes - sizeof(int));
186 compressed_buffer = objcache_get(cache_buffer_read, M_INTWAIT);
187 result = LZ4_decompress_safe(__DECONST(char *, &data[sizeof(int)]),
188 compressed_buffer,
189 compressed_size,
190 bp->b_bufsize);
191 if (result < 0) {
192 kprintf("READ PATH: Error during decompression."
193 "bio %016jx/%d\n",
194 (intmax_t)bio->bio_offset, bytes);
195 /* make sure it isn't random garbage */
196 bzero(compressed_buffer, bp->b_bufsize);
198 KKASSERT(result <= bp->b_bufsize);
199 bcopy(compressed_buffer, bp->b_data, bp->b_bufsize);
200 if (result < bp->b_bufsize)
201 bzero(bp->b_data + result, bp->b_bufsize - result);
202 objcache_put(cache_buffer_read, compressed_buffer);
203 bp->b_resid = 0;
204 bp->b_flags |= B_AGE;
208 * Callback used in read path in case that a block is compressed with ZLIB.
209 * It is almost identical to LZ4 callback, so in theory they can be unified,
210 * but we didn't want to make changes in bio structure for that.
212 static
213 void
214 hammer2_decompress_ZLIB_callback(const char *data, u_int bytes, struct bio *bio)
216 struct buf *bp;
217 char *compressed_buffer;
218 z_stream strm_decompress;
219 int result;
220 int ret;
222 bp = bio->bio_buf;
224 KKASSERT(bp->b_bufsize <= HAMMER2_PBUFSIZE);
225 strm_decompress.avail_in = 0;
226 strm_decompress.next_in = Z_NULL;
228 ret = inflateInit(&strm_decompress);
230 if (ret != Z_OK)
231 kprintf("HAMMER2 ZLIB: Fatal error in inflateInit.\n");
233 compressed_buffer = objcache_get(cache_buffer_read, M_INTWAIT);
234 strm_decompress.next_in = __DECONST(char *, data);
236 /* XXX supply proper size, subset of device bp */
237 strm_decompress.avail_in = bytes;
238 strm_decompress.next_out = compressed_buffer;
239 strm_decompress.avail_out = bp->b_bufsize;
241 ret = inflate(&strm_decompress, Z_FINISH);
242 if (ret != Z_STREAM_END) {
243 kprintf("HAMMER2 ZLIB: Fatar error during decompression.\n");
244 bzero(compressed_buffer, bp->b_bufsize);
246 bcopy(compressed_buffer, bp->b_data, bp->b_bufsize);
247 result = bp->b_bufsize - strm_decompress.avail_out;
248 if (result < bp->b_bufsize)
249 bzero(bp->b_data + result, strm_decompress.avail_out);
250 objcache_put(cache_buffer_read, compressed_buffer);
251 ret = inflateEnd(&strm_decompress);
253 bp->b_resid = 0;
254 bp->b_flags |= B_AGE;
258 * Logical buffer I/O, async read.
260 static
262 hammer2_strategy_read(struct vop_strategy_args *ap)
264 hammer2_xop_strategy_t *xop;
265 struct buf *bp;
266 struct bio *bio;
267 struct bio *nbio;
268 hammer2_inode_t *ip;
269 hammer2_key_t lbase;
271 bio = ap->a_bio;
272 bp = bio->bio_buf;
273 ip = VTOI(ap->a_vp);
274 nbio = push_bio(bio);
276 lbase = bio->bio_offset;
277 KKASSERT(((int)lbase & HAMMER2_PBUFMASK) == 0);
279 xop = hammer2_xop_alloc(ip, HAMMER2_XOP_STRATEGY);
280 xop->finished = 0;
281 xop->bio = bio;
282 xop->lbase = lbase;
283 hammer2_mtx_init(&xop->lock, "h2bior");
284 hammer2_xop_start(&xop->head, hammer2_strategy_xop_read);
285 /* asynchronous completion */
287 return(0);
291 * Per-node XOP (threaded), do a synchronous lookup of the chain and
292 * its data. The frontend is asynchronous, so we are also responsible
293 * for racing to terminate the frontend.
295 static
296 void
297 hammer2_strategy_xop_read(hammer2_thread_t *thr, hammer2_xop_t *arg)
299 hammer2_xop_strategy_t *xop = &arg->xop_strategy;
300 hammer2_chain_t *parent;
301 hammer2_chain_t *chain;
302 hammer2_key_t key_dummy;
303 hammer2_key_t lbase;
304 struct bio *bio;
305 struct buf *bp;
306 int cache_index = -1;
307 int error;
310 * Note that we can race completion of the bio supplied by
311 * the front-end so we cannot access it until we determine
312 * that we are the ones finishing it up.
314 TIMER(0);
315 lbase = xop->lbase;
318 * This is difficult to optimize. The logical buffer might be
319 * partially dirty (contain dummy zero-fill pages), which would
320 * mess up our crc calculation if we were to try a direct read.
321 * So for now we always double-buffer through the underlying
322 * storage.
324 * If not for the above problem we could conditionalize on
325 * (1) 64KB buffer, (2) one chain (not multi-master) and
326 * (3) !hammer2_double_buffer, and issue a direct read into the
327 * logical buffer.
329 parent = hammer2_inode_chain(xop->head.ip1, thr->clindex,
330 HAMMER2_RESOLVE_ALWAYS |
331 HAMMER2_RESOLVE_SHARED);
332 TIMER(1);
333 if (parent) {
334 chain = hammer2_chain_lookup(&parent, &key_dummy,
335 lbase, lbase,
336 &cache_index,
337 HAMMER2_LOOKUP_ALWAYS |
338 HAMMER2_LOOKUP_SHARED);
339 error = chain ? chain->error : 0;
340 } else {
341 error = EIO;
342 chain = NULL;
344 TIMER(2);
345 error = hammer2_xop_feed(&xop->head, chain, thr->clindex, error);
346 TIMER(3);
347 if (chain) {
348 hammer2_chain_unlock(chain);
349 hammer2_chain_drop(chain);
351 if (parent) {
352 hammer2_chain_unlock(parent);
353 hammer2_chain_drop(parent);
355 chain = NULL; /* safety */
356 parent = NULL; /* safety */
357 TIMER(4);
360 * Race to finish the frontend. First-to-complete. bio is only
361 * valid if we are determined to be the ones able to complete
362 * the operation.
364 if (xop->finished)
365 return;
366 hammer2_mtx_ex(&xop->lock);
367 if (xop->finished) {
368 hammer2_mtx_unlock(&xop->lock);
369 return;
371 bio = xop->bio;
372 bp = bio->bio_buf;
375 * Async operation has not completed and we now own the lock.
376 * Determine if we can complete the operation by issuing the
377 * frontend collection non-blocking.
379 * H2 double-buffers the data, setting B_NOTMETA on the logical
380 * buffer hints to the OS that the logical buffer should not be
381 * swapcached (since the device buffer can be).
383 * Also note that even for compressed data we would rather the
384 * kernel cache/swapcache device buffers more and (decompressed)
385 * logical buffers less, since that will significantly improve
386 * the amount of end-user data that can be cached.
388 error = hammer2_xop_collect(&xop->head, HAMMER2_XOP_COLLECT_NOWAIT);
389 TIMER(5);
391 switch(error) {
392 case 0:
393 xop->finished = 1;
394 hammer2_mtx_unlock(&xop->lock);
395 bp->b_flags |= B_NOTMETA;
396 chain = xop->head.cluster.focus;
397 hammer2_strategy_read_completion(chain, (char *)chain->data,
398 xop->bio);
399 biodone(bio);
400 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
401 break;
402 case ENOENT:
403 xop->finished = 1;
404 hammer2_mtx_unlock(&xop->lock);
405 bp->b_flags |= B_NOTMETA;
406 bp->b_resid = 0;
407 bp->b_error = 0;
408 bzero(bp->b_data, bp->b_bcount);
409 biodone(bio);
410 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
411 break;
412 case EINPROGRESS:
413 hammer2_mtx_unlock(&xop->lock);
414 break;
415 default:
416 kprintf("strategy_xop_read: error %d loff=%016jx\n",
417 error, bp->b_loffset);
418 xop->finished = 1;
419 hammer2_mtx_unlock(&xop->lock);
420 bp->b_flags |= B_ERROR;
421 bp->b_error = EIO;
422 biodone(bio);
423 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
424 break;
426 TIMER(6);
429 static
430 void
431 hammer2_strategy_read_completion(hammer2_chain_t *chain, char *data,
432 struct bio *bio)
434 struct buf *bp = bio->bio_buf;
436 if (chain->bref.type == HAMMER2_BREF_TYPE_INODE) {
438 * Copy from in-memory inode structure.
440 bcopy(((hammer2_inode_data_t *)data)->u.data,
441 bp->b_data, HAMMER2_EMBEDDED_BYTES);
442 bzero(bp->b_data + HAMMER2_EMBEDDED_BYTES,
443 bp->b_bcount - HAMMER2_EMBEDDED_BYTES);
444 bp->b_resid = 0;
445 bp->b_error = 0;
446 } else if (chain->bref.type == HAMMER2_BREF_TYPE_DATA) {
448 * Data is on-media, record for live dedup. Release the
449 * chain (try to free it) when done. The data is still
450 * cached by both the buffer cache in front and the
451 * block device behind us. This leaves more room in the
452 * LRU chain cache for meta-data chains which we really
453 * want to retain.
455 hammer2_dedup_record(chain, data);
456 atomic_set_int(&chain->flags, HAMMER2_CHAIN_RELEASE);
459 * Decompression and copy.
461 switch (HAMMER2_DEC_COMP(chain->bref.methods)) {
462 case HAMMER2_COMP_LZ4:
463 hammer2_decompress_LZ4_callback(data, chain->bytes,
464 bio);
465 /* b_resid set by call */
466 break;
467 case HAMMER2_COMP_ZLIB:
468 hammer2_decompress_ZLIB_callback(data, chain->bytes,
469 bio);
470 /* b_resid set by call */
471 break;
472 case HAMMER2_COMP_NONE:
473 KKASSERT(chain->bytes <= bp->b_bcount);
474 bcopy(data, bp->b_data, chain->bytes);
475 if (chain->bytes < bp->b_bcount) {
476 bzero(bp->b_data + chain->bytes,
477 bp->b_bcount - chain->bytes);
479 bp->b_resid = 0;
480 bp->b_error = 0;
481 break;
482 default:
483 panic("hammer2_strategy_read: "
484 "unknown compression type");
486 } else {
487 panic("hammer2_strategy_read: unknown bref type");
491 /****************************************************************************
492 * WRITE SUPPORT *
493 ****************************************************************************/
496 * Functions for compression in threads,
497 * from hammer2_vnops.c
499 static void hammer2_write_file_core(char *data, hammer2_inode_t *ip,
500 hammer2_chain_t **parentp,
501 hammer2_key_t lbase, int ioflag, int pblksize,
502 hammer2_tid_t mtid, int *errorp);
503 static void hammer2_compress_and_write(char *data, hammer2_inode_t *ip,
504 hammer2_chain_t **parentp,
505 hammer2_key_t lbase, int ioflag, int pblksize,
506 hammer2_tid_t mtid, int *errorp,
507 int comp_algo, int check_algo);
508 static void hammer2_zero_check_and_write(char *data, hammer2_inode_t *ip,
509 hammer2_chain_t **parentp,
510 hammer2_key_t lbase, int ioflag, int pblksize,
511 hammer2_tid_t mtid, int *errorp,
512 int check_algo);
513 static int test_block_zeros(const char *buf, size_t bytes);
514 static void zero_write(char *data, hammer2_inode_t *ip,
515 hammer2_chain_t **parentp,
516 hammer2_key_t lbase,
517 hammer2_tid_t mtid, int *errorp);
518 static void hammer2_write_bp(hammer2_chain_t *chain, char *data,
519 int ioflag, int pblksize,
520 hammer2_tid_t mtid, int *errorp,
521 int check_algo);
523 static
525 hammer2_strategy_write(struct vop_strategy_args *ap)
527 hammer2_xop_strategy_t *xop;
528 hammer2_pfs_t *pmp;
529 struct bio *bio;
530 struct buf *bp;
531 hammer2_inode_t *ip;
533 bio = ap->a_bio;
534 bp = bio->bio_buf;
535 ip = VTOI(ap->a_vp);
536 pmp = ip->pmp;
538 hammer2_lwinprog_ref(pmp);
539 hammer2_trans_assert_strategy(pmp);
540 hammer2_trans_init(pmp, HAMMER2_TRANS_BUFCACHE);
542 xop = hammer2_xop_alloc(ip, HAMMER2_XOP_MODIFYING |
543 HAMMER2_XOP_STRATEGY);
544 xop->finished = 0;
545 xop->bio = bio;
546 xop->lbase = bio->bio_offset;
547 hammer2_mtx_init(&xop->lock, "h2biow");
548 hammer2_xop_start(&xop->head, hammer2_strategy_xop_write);
549 /* asynchronous completion */
551 hammer2_lwinprog_wait(pmp, hammer2_flush_pipe);
553 return(0);
557 * Per-node XOP (threaded). Write the logical buffer to the media.
559 * This is a bit problematic because there may be multiple target and
560 * any of them may be able to release the bp. In addition, if our
561 * particulr target is offline we don't want to block the bp (and thus
562 * the frontend). To accomplish this we copy the data to the per-thr
563 * scratch buffer.
565 static
566 void
567 hammer2_strategy_xop_write(hammer2_thread_t *thr, hammer2_xop_t *arg)
569 hammer2_xop_strategy_t *xop = &arg->xop_strategy;
570 hammer2_chain_t *parent;
571 hammer2_key_t lbase;
572 hammer2_inode_t *ip;
573 struct bio *bio;
574 struct buf *bp;
575 int error;
576 int lblksize;
577 int pblksize;
578 hammer2_off_t bio_offset;
579 char *bio_data;
582 * We can only access the bp/bio if the frontend has not yet
583 * completed.
585 if (xop->finished)
586 return;
587 hammer2_mtx_sh(&xop->lock);
588 if (xop->finished) {
589 hammer2_mtx_unlock(&xop->lock);
590 return;
593 lbase = xop->lbase;
594 bio = xop->bio; /* ephermal */
595 bp = bio->bio_buf; /* ephermal */
596 ip = xop->head.ip1; /* retained by ref */
597 bio_offset = bio->bio_offset;
598 bio_data = thr->scratch;
600 /* hammer2_trans_init(parent->hmp->spmp, HAMMER2_TRANS_BUFCACHE); */
602 lblksize = hammer2_calc_logical(ip, bio->bio_offset, &lbase, NULL);
603 pblksize = hammer2_calc_physical(ip, lbase);
604 bcopy(bp->b_data, bio_data, lblksize);
606 hammer2_mtx_unlock(&xop->lock);
607 bp = NULL; /* safety, illegal to access after unlock */
608 bio = NULL; /* safety, illegal to access after unlock */
611 * Actual operation
613 parent = hammer2_inode_chain(ip, thr->clindex, HAMMER2_RESOLVE_ALWAYS);
614 hammer2_write_file_core(bio_data, ip, &parent,
615 lbase, IO_ASYNC, pblksize,
616 xop->head.mtid, &error);
617 if (parent) {
618 hammer2_chain_unlock(parent);
619 hammer2_chain_drop(parent);
620 parent = NULL; /* safety */
622 hammer2_xop_feed(&xop->head, NULL, thr->clindex, error);
625 * Try to complete the operation on behalf of the front-end.
627 if (xop->finished)
628 return;
629 hammer2_mtx_ex(&xop->lock);
630 if (xop->finished) {
631 hammer2_mtx_unlock(&xop->lock);
632 return;
636 * Async operation has not completed and we now own the lock.
637 * Determine if we can complete the operation by issuing the
638 * frontend collection non-blocking.
640 * H2 double-buffers the data, setting B_NOTMETA on the logical
641 * buffer hints to the OS that the logical buffer should not be
642 * swapcached (since the device buffer can be).
644 error = hammer2_xop_collect(&xop->head, HAMMER2_XOP_COLLECT_NOWAIT);
646 if (error == EINPROGRESS) {
647 hammer2_mtx_unlock(&xop->lock);
648 return;
652 * Async operation has completed.
654 xop->finished = 1;
655 hammer2_mtx_unlock(&xop->lock);
657 bio = xop->bio; /* now owned by us */
658 bp = bio->bio_buf; /* now owned by us */
660 if (error == ENOENT || error == 0) {
661 bp->b_flags |= B_NOTMETA;
662 bp->b_resid = 0;
663 bp->b_error = 0;
664 biodone(bio);
665 } else {
666 kprintf("strategy_xop_write: error %d loff=%016jx\n",
667 error, bp->b_loffset);
668 bp->b_flags |= B_ERROR;
669 bp->b_error = EIO;
670 biodone(bio);
672 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
673 hammer2_trans_assert_strategy(ip->pmp);
674 hammer2_lwinprog_drop(ip->pmp);
675 hammer2_trans_done(ip->pmp);
679 * Wait for pending I/O to complete
681 void
682 hammer2_bioq_sync(hammer2_pfs_t *pmp)
684 hammer2_lwinprog_wait(pmp, 0);
688 * Create a new cluster at (cparent, lbase) and assign physical storage,
689 * returning a cluster suitable for I/O. The cluster will be in a modified
690 * state.
692 * cparent can wind up being anything.
694 * If datap is not NULL, *datap points to the real data we intend to write.
695 * If we can dedup the storage location we set *datap to NULL to indicate
696 * to the caller that a dedup occurred.
698 * NOTE: Special case for data embedded in inode.
700 static
701 hammer2_chain_t *
702 hammer2_assign_physical(hammer2_inode_t *ip, hammer2_chain_t **parentp,
703 hammer2_key_t lbase, int pblksize,
704 hammer2_tid_t mtid, char **datap, int *errorp)
706 hammer2_chain_t *chain;
707 hammer2_key_t key_dummy;
708 hammer2_off_t dedup_off;
709 int pradix = hammer2_getradix(pblksize);
710 int cache_index = -1;
713 * Locate the chain associated with lbase, return a locked chain.
714 * However, do not instantiate any data reference (which utilizes a
715 * device buffer) because we will be using direct IO via the
716 * logical buffer cache buffer.
718 *errorp = 0;
719 KKASSERT(pblksize >= HAMMER2_ALLOC_MIN);
720 retry:
721 TIMER(30);
722 chain = hammer2_chain_lookup(parentp, &key_dummy,
723 lbase, lbase,
724 &cache_index,
725 HAMMER2_LOOKUP_NODATA);
728 * The lookup code should not return a DELETED chain to us, unless
729 * its a short-file embedded in the inode. Then it is possible for
730 * the lookup to return a deleted inode.
732 if (chain && (chain->flags & HAMMER2_CHAIN_DELETED) &&
733 chain->bref.type != HAMMER2_BREF_TYPE_INODE) {
734 kprintf("assign physical deleted chain @ "
735 "%016jx (%016jx.%02x) ip %016jx\n",
736 lbase, chain->bref.data_off, chain->bref.type,
737 ip->meta.inum);
738 Debugger("bleh");
741 if (chain == NULL) {
743 * We found a hole, create a new chain entry.
745 * NOTE: DATA chains are created without device backing
746 * store (nor do we want any).
748 dedup_off = hammer2_dedup_lookup((*parentp)->hmp, datap,
749 pblksize);
750 *errorp = hammer2_chain_create(parentp, &chain,
751 ip->pmp,
752 HAMMER2_ENC_CHECK(ip->meta.check_algo) |
753 HAMMER2_ENC_COMP(HAMMER2_COMP_NONE),
754 lbase, HAMMER2_PBUFRADIX,
755 HAMMER2_BREF_TYPE_DATA,
756 pblksize, mtid,
757 dedup_off, 0);
758 if (chain == NULL) {
759 panic("hammer2_chain_create: par=%p error=%d\n",
760 *parentp, *errorp);
761 goto retry;
763 /*ip->delta_dcount += pblksize;*/
764 } else {
765 switch (chain->bref.type) {
766 case HAMMER2_BREF_TYPE_INODE:
768 * The data is embedded in the inode, which requires
769 * a bit more finess.
771 hammer2_chain_modify_ip(ip, chain, mtid, 0);
772 break;
773 case HAMMER2_BREF_TYPE_DATA:
774 dedup_off = hammer2_dedup_lookup(chain->hmp, datap,
775 pblksize);
776 if (chain->bytes != pblksize) {
777 hammer2_chain_resize(chain,
778 mtid, dedup_off,
779 pradix,
780 HAMMER2_MODIFY_OPTDATA);
784 * DATA buffers must be marked modified whether the
785 * data is in a logical buffer or not. We also have
786 * to make this call to fixup the chain data pointers
787 * after resizing in case this is an encrypted or
788 * compressed buffer.
790 hammer2_chain_modify(chain, mtid, dedup_off,
791 HAMMER2_MODIFY_OPTDATA);
792 break;
793 default:
794 panic("hammer2_assign_physical: bad type");
795 /* NOT REACHED */
796 break;
799 TIMER(31);
800 return (chain);
804 * hammer2_write_file_core() - hammer2_write_thread() helper
806 * The core write function which determines which path to take
807 * depending on compression settings. We also have to locate the
808 * related chains so we can calculate and set the check data for
809 * the blockref.
811 static
812 void
813 hammer2_write_file_core(char *data, hammer2_inode_t *ip,
814 hammer2_chain_t **parentp,
815 hammer2_key_t lbase, int ioflag, int pblksize,
816 hammer2_tid_t mtid, int *errorp)
818 hammer2_chain_t *chain;
819 char *bdata;
821 *errorp = 0;
823 switch(HAMMER2_DEC_ALGO(ip->meta.comp_algo)) {
824 case HAMMER2_COMP_NONE:
826 * We have to assign physical storage to the buffer
827 * we intend to dirty or write now to avoid deadlocks
828 * in the strategy code later.
830 * This can return NOOFFSET for inode-embedded data.
831 * The strategy code will take care of it in that case.
833 bdata = data;
834 chain = hammer2_assign_physical(ip, parentp, lbase, pblksize,
835 mtid, &bdata, errorp);
836 if (chain->bref.type == HAMMER2_BREF_TYPE_INODE) {
837 hammer2_inode_data_t *wipdata;
839 wipdata = &chain->data->ipdata;
840 KKASSERT(wipdata->meta.op_flags &
841 HAMMER2_OPFLAG_DIRECTDATA);
842 bcopy(data, wipdata->u.data, HAMMER2_EMBEDDED_BYTES);
843 ++hammer2_iod_file_wembed;
844 } else if (bdata == NULL) {
846 * Copy of data already present on-media.
848 chain->bref.methods =
849 HAMMER2_ENC_COMP(HAMMER2_COMP_NONE) +
850 HAMMER2_ENC_CHECK(ip->meta.check_algo);
851 hammer2_chain_setcheck(chain, data);
852 } else {
853 hammer2_write_bp(chain, data, ioflag, pblksize,
854 mtid, errorp, ip->meta.check_algo);
856 if (chain) {
857 hammer2_chain_unlock(chain);
858 hammer2_chain_drop(chain);
860 break;
861 case HAMMER2_COMP_AUTOZERO:
863 * Check for zero-fill only
865 hammer2_zero_check_and_write(data, ip, parentp,
866 lbase, ioflag, pblksize,
867 mtid, errorp,
868 ip->meta.check_algo);
869 break;
870 case HAMMER2_COMP_LZ4:
871 case HAMMER2_COMP_ZLIB:
872 default:
874 * Check for zero-fill and attempt compression.
876 hammer2_compress_and_write(data, ip, parentp,
877 lbase, ioflag, pblksize,
878 mtid, errorp,
879 ip->meta.comp_algo,
880 ip->meta.check_algo);
881 break;
886 * Helper
888 * Generic function that will perform the compression in compression
889 * write path. The compression algorithm is determined by the settings
890 * obtained from inode.
892 static
893 void
894 hammer2_compress_and_write(char *data, hammer2_inode_t *ip,
895 hammer2_chain_t **parentp,
896 hammer2_key_t lbase, int ioflag, int pblksize,
897 hammer2_tid_t mtid, int *errorp, int comp_algo, int check_algo)
899 hammer2_chain_t *chain;
900 int comp_size;
901 int comp_block_size;
902 char *comp_buffer;
903 char *bdata;
906 * An all-zeros write creates a hole unless the check code
907 * is disabled. When the check code is disabled all writes
908 * are done in-place, including any all-zeros writes.
910 * NOTE: A snapshot will still force a copy-on-write
911 * (see the HAMMER2_CHECK_NONE in hammer2_chain.c).
913 if (check_algo != HAMMER2_CHECK_NONE &&
914 test_block_zeros(data, pblksize)) {
915 zero_write(data, ip, parentp, lbase, mtid, errorp);
916 return;
920 * Compression requested. Try to compress the block. We store
921 * the data normally if we cannot sufficiently compress it.
923 comp_size = 0;
924 comp_buffer = NULL;
926 KKASSERT(pblksize / 2 <= 32768);
928 if (ip->comp_heuristic < 8 || (ip->comp_heuristic & 7) == 0) {
929 z_stream strm_compress;
930 int comp_level;
931 int ret;
933 switch(HAMMER2_DEC_ALGO(comp_algo)) {
934 case HAMMER2_COMP_LZ4:
935 comp_buffer = objcache_get(cache_buffer_write,
936 M_INTWAIT);
937 comp_size = LZ4_compress_limitedOutput(
938 data,
939 &comp_buffer[sizeof(int)],
940 pblksize,
941 pblksize / 2 - sizeof(int));
943 * We need to prefix with the size, LZ4
944 * doesn't do it for us. Add the related
945 * overhead.
947 *(int *)comp_buffer = comp_size;
948 if (comp_size)
949 comp_size += sizeof(int);
950 break;
951 case HAMMER2_COMP_ZLIB:
952 comp_level = HAMMER2_DEC_LEVEL(comp_algo);
953 if (comp_level == 0)
954 comp_level = 6; /* default zlib compression */
955 else if (comp_level < 6)
956 comp_level = 6;
957 else if (comp_level > 9)
958 comp_level = 9;
959 ret = deflateInit(&strm_compress, comp_level);
960 if (ret != Z_OK) {
961 kprintf("HAMMER2 ZLIB: fatal error "
962 "on deflateInit.\n");
965 comp_buffer = objcache_get(cache_buffer_write,
966 M_INTWAIT);
967 strm_compress.next_in = data;
968 strm_compress.avail_in = pblksize;
969 strm_compress.next_out = comp_buffer;
970 strm_compress.avail_out = pblksize / 2;
971 ret = deflate(&strm_compress, Z_FINISH);
972 if (ret == Z_STREAM_END) {
973 comp_size = pblksize / 2 -
974 strm_compress.avail_out;
975 } else {
976 comp_size = 0;
978 ret = deflateEnd(&strm_compress);
979 break;
980 default:
981 kprintf("Error: Unknown compression method.\n");
982 kprintf("Comp_method = %d.\n", comp_algo);
983 break;
987 if (comp_size == 0) {
989 * compression failed or turned off
991 comp_block_size = pblksize; /* safety */
992 if (++ip->comp_heuristic > 128)
993 ip->comp_heuristic = 8;
994 } else {
996 * compression succeeded
998 ip->comp_heuristic = 0;
999 if (comp_size <= 1024) {
1000 comp_block_size = 1024;
1001 } else if (comp_size <= 2048) {
1002 comp_block_size = 2048;
1003 } else if (comp_size <= 4096) {
1004 comp_block_size = 4096;
1005 } else if (comp_size <= 8192) {
1006 comp_block_size = 8192;
1007 } else if (comp_size <= 16384) {
1008 comp_block_size = 16384;
1009 } else if (comp_size <= 32768) {
1010 comp_block_size = 32768;
1011 } else {
1012 panic("hammer2: WRITE PATH: "
1013 "Weird comp_size value.");
1014 /* NOT REACHED */
1015 comp_block_size = pblksize;
1019 * Must zero the remainder or dedup (which operates on a
1020 * physical block basis) will not find matches.
1022 if (comp_size < comp_block_size) {
1023 bzero(comp_buffer + comp_size,
1024 comp_block_size - comp_size);
1029 * Assign physical storage, data will be set to NULL if a live-dedup
1030 * was successful.
1032 bdata = comp_size ? comp_buffer : data;
1033 chain = hammer2_assign_physical(ip, parentp, lbase, comp_block_size,
1034 mtid, &bdata, errorp);
1036 if (*errorp) {
1037 kprintf("WRITE PATH: An error occurred while "
1038 "assigning physical space.\n");
1039 KKASSERT(chain == NULL);
1040 goto done;
1043 if (chain->bref.type == HAMMER2_BREF_TYPE_INODE) {
1044 hammer2_inode_data_t *wipdata;
1046 hammer2_chain_modify_ip(ip, chain, mtid, 0);
1047 wipdata = &chain->data->ipdata;
1048 KKASSERT(wipdata->meta.op_flags & HAMMER2_OPFLAG_DIRECTDATA);
1049 bcopy(data, wipdata->u.data, HAMMER2_EMBEDDED_BYTES);
1050 ++hammer2_iod_file_wembed;
1051 } else if (bdata == NULL) {
1053 * Live deduplication, a copy of the data is already present
1054 * on the media.
1056 if (comp_size) {
1057 chain->bref.methods =
1058 HAMMER2_ENC_COMP(comp_algo) +
1059 HAMMER2_ENC_CHECK(check_algo);
1060 } else {
1061 chain->bref.methods =
1062 HAMMER2_ENC_COMP(
1063 HAMMER2_COMP_NONE) +
1064 HAMMER2_ENC_CHECK(check_algo);
1066 bdata = comp_size ? comp_buffer : data;
1067 hammer2_chain_setcheck(chain, bdata);
1068 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_INITIAL);
1069 } else {
1070 hammer2_io_t *dio;
1072 KKASSERT(chain->flags & HAMMER2_CHAIN_MODIFIED);
1074 switch(chain->bref.type) {
1075 case HAMMER2_BREF_TYPE_INODE:
1076 panic("hammer2_write_bp: unexpected inode\n");
1077 break;
1078 case HAMMER2_BREF_TYPE_DATA:
1080 * Optimize out the read-before-write
1081 * if possible.
1083 *errorp = hammer2_io_newnz(chain->hmp,
1084 chain->bref.type,
1085 chain->bref.data_off,
1086 chain->bytes,
1087 &dio);
1088 if (*errorp) {
1089 hammer2_io_brelse(&dio);
1090 kprintf("hammer2: WRITE PATH: "
1091 "dbp bread error\n");
1092 break;
1094 bdata = hammer2_io_data(dio, chain->bref.data_off);
1097 * When loading the block make sure we don't
1098 * leave garbage after the compressed data.
1100 if (comp_size) {
1101 chain->bref.methods =
1102 HAMMER2_ENC_COMP(comp_algo) +
1103 HAMMER2_ENC_CHECK(check_algo);
1104 bcopy(comp_buffer, bdata, comp_size);
1105 } else {
1106 chain->bref.methods =
1107 HAMMER2_ENC_COMP(
1108 HAMMER2_COMP_NONE) +
1109 HAMMER2_ENC_CHECK(check_algo);
1110 bcopy(data, bdata, pblksize);
1114 * The flush code doesn't calculate check codes for
1115 * file data (doing so can result in excessive I/O),
1116 * so we do it here.
1118 hammer2_chain_setcheck(chain, bdata);
1119 hammer2_dedup_record(chain, bdata);
1122 * Device buffer is now valid, chain is no longer in
1123 * the initial state.
1125 * (No blockref table worries with file data)
1127 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_INITIAL);
1129 /* Now write the related bdp. */
1130 if (ioflag & IO_SYNC) {
1132 * Synchronous I/O requested.
1134 hammer2_io_bwrite(&dio);
1136 } else if ((ioflag & IO_DIRECT) &&
1137 loff + n == pblksize) {
1138 hammer2_io_bdwrite(&dio);
1140 } else if (ioflag & IO_ASYNC) {
1141 hammer2_io_bawrite(&dio);
1142 } else {
1143 hammer2_io_bdwrite(&dio);
1145 break;
1146 default:
1147 panic("hammer2_write_bp: bad chain type %d\n",
1148 chain->bref.type);
1149 /* NOT REACHED */
1150 break;
1153 done:
1154 if (chain) {
1155 hammer2_chain_unlock(chain);
1156 hammer2_chain_drop(chain);
1158 if (comp_buffer)
1159 objcache_put(cache_buffer_write, comp_buffer);
1163 * Helper
1165 * Function that performs zero-checking and writing without compression,
1166 * it corresponds to default zero-checking path.
1168 static
1169 void
1170 hammer2_zero_check_and_write(char *data, hammer2_inode_t *ip,
1171 hammer2_chain_t **parentp,
1172 hammer2_key_t lbase, int ioflag, int pblksize,
1173 hammer2_tid_t mtid, int *errorp,
1174 int check_algo)
1176 hammer2_chain_t *chain;
1178 if (check_algo != HAMMER2_CHECK_NONE &&
1179 test_block_zeros(data, pblksize)) {
1181 * An all-zeros write creates a hole unless the check code
1182 * is disabled. When the check code is disabled all writes
1183 * are done in-place, including any all-zeros writes.
1185 * NOTE: A snapshot will still force a copy-on-write
1186 * (see the HAMMER2_CHECK_NONE in hammer2_chain.c).
1188 zero_write(data, ip, parentp, lbase, mtid, errorp);
1189 } else {
1191 * Normal write
1193 chain = hammer2_assign_physical(ip, parentp, lbase, pblksize,
1194 mtid, &data, errorp);
1195 if (data) {
1196 hammer2_write_bp(chain, data, ioflag, pblksize,
1197 mtid, errorp, check_algo);
1198 } /* else dedup occurred */
1199 if (chain) {
1200 hammer2_chain_unlock(chain);
1201 hammer2_chain_drop(chain);
1207 * Helper
1209 * A function to test whether a block of data contains only zeros,
1210 * returns TRUE (non-zero) if the block is all zeros.
1212 static
1214 test_block_zeros(const char *buf, size_t bytes)
1216 size_t i;
1218 for (i = 0; i < bytes; i += sizeof(long)) {
1219 if (*(const long *)(buf + i) != 0)
1220 return (0);
1222 return (1);
1226 * Helper
1228 * Function to "write" a block that contains only zeros.
1230 static
1231 void
1232 zero_write(char *data, hammer2_inode_t *ip,
1233 hammer2_chain_t **parentp,
1234 hammer2_key_t lbase, hammer2_tid_t mtid, int *errorp)
1236 hammer2_chain_t *chain;
1237 hammer2_key_t key_dummy;
1238 int cache_index = -1;
1240 *errorp = 0;
1241 chain = hammer2_chain_lookup(parentp, &key_dummy,
1242 lbase, lbase,
1243 &cache_index,
1244 HAMMER2_LOOKUP_NODATA);
1245 if (chain) {
1246 if (chain->bref.type == HAMMER2_BREF_TYPE_INODE) {
1247 hammer2_inode_data_t *wipdata;
1249 hammer2_chain_modify_ip(ip, chain, mtid, 0);
1250 wipdata = &chain->data->ipdata;
1251 KKASSERT(wipdata->meta.op_flags &
1252 HAMMER2_OPFLAG_DIRECTDATA);
1253 bzero(wipdata->u.data, HAMMER2_EMBEDDED_BYTES);
1254 ++hammer2_iod_file_wembed;
1255 } else {
1256 hammer2_chain_delete(*parentp, chain,
1257 mtid, HAMMER2_DELETE_PERMANENT);
1258 ++hammer2_iod_file_wzero;
1260 hammer2_chain_unlock(chain);
1261 hammer2_chain_drop(chain);
1262 } else {
1263 ++hammer2_iod_file_wzero;
1268 * Helper
1270 * Function to write the data as it is, without performing any sort of
1271 * compression. This function is used in path without compression and
1272 * default zero-checking path.
1274 static
1275 void
1276 hammer2_write_bp(hammer2_chain_t *chain, char *data, int ioflag,
1277 int pblksize,
1278 hammer2_tid_t mtid, int *errorp, int check_algo)
1280 hammer2_inode_data_t *wipdata;
1281 hammer2_io_t *dio;
1282 char *bdata;
1283 int error;
1285 error = 0; /* XXX TODO below */
1287 KKASSERT(chain->flags & HAMMER2_CHAIN_MODIFIED);
1289 switch(chain->bref.type) {
1290 case HAMMER2_BREF_TYPE_INODE:
1291 wipdata = &chain->data->ipdata;
1292 KKASSERT(wipdata->meta.op_flags & HAMMER2_OPFLAG_DIRECTDATA);
1293 bcopy(data, wipdata->u.data, HAMMER2_EMBEDDED_BYTES);
1294 error = 0;
1295 ++hammer2_iod_file_wembed;
1296 break;
1297 case HAMMER2_BREF_TYPE_DATA:
1298 error = hammer2_io_newnz(chain->hmp,
1299 chain->bref.type,
1300 chain->bref.data_off,
1301 chain->bytes, &dio);
1302 if (error) {
1303 hammer2_io_bqrelse(&dio);
1304 kprintf("hammer2: WRITE PATH: "
1305 "dbp bread error\n");
1306 break;
1308 bdata = hammer2_io_data(dio, chain->bref.data_off);
1310 chain->bref.methods = HAMMER2_ENC_COMP(HAMMER2_COMP_NONE) +
1311 HAMMER2_ENC_CHECK(check_algo);
1312 bcopy(data, bdata, chain->bytes);
1315 * The flush code doesn't calculate check codes for
1316 * file data (doing so can result in excessive I/O),
1317 * so we do it here.
1319 hammer2_chain_setcheck(chain, bdata);
1320 hammer2_dedup_record(chain, bdata);
1323 * Device buffer is now valid, chain is no longer in
1324 * the initial state.
1326 * (No blockref table worries with file data)
1328 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_INITIAL);
1330 if (ioflag & IO_SYNC) {
1332 * Synchronous I/O requested.
1334 hammer2_io_bwrite(&dio);
1336 } else if ((ioflag & IO_DIRECT) &&
1337 loff + n == pblksize) {
1338 hammer2_io_bdwrite(&dio);
1340 } else if (ioflag & IO_ASYNC) {
1341 hammer2_io_bawrite(&dio);
1342 } else {
1343 hammer2_io_bdwrite(&dio);
1345 break;
1346 default:
1347 panic("hammer2_write_bp: bad chain type %d\n",
1348 chain->bref.type);
1349 /* NOT REACHED */
1350 error = 0;
1351 break;
1353 KKASSERT(error == 0); /* XXX TODO */
1354 *errorp = error;
1358 * LIVE DEDUP HEURISTIC
1360 * WARNING! This code is SMP safe but the heuristic allows SMP collisions.
1361 * All fields must be loaded into locals and validated.
1363 * WARNING! Should only be used for file data, hammer2_chain_modify() only
1364 * checks for the dedup case on data chains. Also, dedup data can
1365 * only be recorded for committed chains (so NOT strategy writes
1366 * which can undergo further modification after the fact!).
1368 void
1369 hammer2_dedup_record(hammer2_chain_t *chain, char *data)
1371 hammer2_dev_t *hmp;
1372 hammer2_dedup_t *dedup;
1373 uint64_t crc;
1374 int best = 0;
1375 int i;
1376 int dticks;
1378 if (hammer2_dedup_enable == 0)
1379 return;
1382 * Only committed data can be recorded for de-duplication, otherwise
1383 * the contents may change out from under us. So, on read if the
1384 * chain is not modified, and on flush when the chain is committed.
1386 if ((chain->flags &
1387 (HAMMER2_CHAIN_MODIFIED | HAMMER2_CHAIN_INITIAL)) == 0) {
1388 return;
1392 hmp = chain->hmp;
1394 switch(HAMMER2_DEC_CHECK(chain->bref.methods)) {
1395 case HAMMER2_CHECK_ISCSI32:
1397 * XXX use the built-in crc (the dedup lookup sequencing
1398 * needs to be fixed so the check code is already present
1399 * when dedup_lookup is called)
1401 #if 0
1402 crc = (uint64_t)(uint32_t)chain->bref.check.iscsi32.value;
1403 #endif
1404 crc = XXH64(data, chain->bytes, XXH_HAMMER2_SEED);
1405 break;
1406 case HAMMER2_CHECK_XXHASH64:
1407 crc = chain->bref.check.xxhash64.value;
1408 break;
1409 case HAMMER2_CHECK_SHA192:
1411 * XXX use the built-in crc (the dedup lookup sequencing
1412 * needs to be fixed so the check code is already present
1413 * when dedup_lookup is called)
1415 #if 0
1416 crc = ((uint64_t *)chain->bref.check.sha192.data)[0] ^
1417 ((uint64_t *)chain->bref.check.sha192.data)[1] ^
1418 ((uint64_t *)chain->bref.check.sha192.data)[2];
1419 #endif
1420 crc = XXH64(data, chain->bytes, XXH_HAMMER2_SEED);
1421 break;
1422 default:
1424 * Cannot dedup without a check code
1426 * NOTE: In particular, CHECK_NONE allows a sector to be
1427 * overwritten without copy-on-write, recording
1428 * a dedup block for a CHECK_NONE object would be
1429 * a disaster!
1431 return;
1433 dedup = &hmp->heur_dedup[crc & (HAMMER2_DEDUP_HEUR_MASK & ~3)];
1434 for (i = 0; i < 4; ++i) {
1435 if (dedup[i].data_crc == crc) {
1436 best = i;
1437 break;
1439 dticks = (int)(dedup[i].ticks - dedup[best].ticks);
1440 if (dticks < 0 || dticks > hz * 60 * 30)
1441 best = i;
1443 dedup += best;
1444 if (hammer2_debug & 0x40000) {
1445 kprintf("REC %04x %016jx %016jx\n",
1446 (int)(dedup - hmp->heur_dedup),
1447 crc,
1448 chain->bref.data_off);
1450 dedup->ticks = ticks;
1451 dedup->data_off = chain->bref.data_off;
1452 dedup->data_crc = crc;
1453 atomic_set_int(&chain->flags, HAMMER2_CHAIN_DEDUP);
1456 static
1457 hammer2_off_t
1458 hammer2_dedup_lookup(hammer2_dev_t *hmp, char **datap, int pblksize)
1460 hammer2_dedup_t *dedup;
1461 hammer2_io_t *dio;
1462 hammer2_off_t off;
1463 uint64_t crc;
1464 char *data;
1465 int i;
1467 if (hammer2_dedup_enable == 0)
1468 return 0;
1469 data = *datap;
1470 if (data == NULL)
1471 return 0;
1474 * XXX use the built-in crc (the dedup lookup sequencing
1475 * needs to be fixed so the check code is already present
1476 * when dedup_lookup is called)
1478 crc = XXH64(data, pblksize, XXH_HAMMER2_SEED);
1479 dedup = &hmp->heur_dedup[crc & (HAMMER2_DEDUP_HEUR_MASK & ~3)];
1481 if (hammer2_debug & 0x40000) {
1482 kprintf("LOC %04x/4 %016jx\n",
1483 (int)(dedup - hmp->heur_dedup),
1484 crc);
1487 for (i = 0; i < 4; ++i) {
1488 off = dedup[i].data_off;
1489 cpu_ccfence();
1490 if (dedup[i].data_crc != crc)
1491 continue;
1492 if ((1 << (int)(off & HAMMER2_OFF_MASK_RADIX)) != pblksize)
1493 continue;
1494 dio = hammer2_io_getquick(hmp, off, pblksize);
1495 if (dio &&
1496 bcmp(data, hammer2_io_data(dio, off), pblksize) == 0) {
1498 * Make sure the INVALOK flag is cleared to prevent
1499 * the possibly-dirty bp from being invalidated now
1500 * that we are using it as part of a de-dup operation.
1502 if (hammer2_debug & 0x40000) {
1503 kprintf("DEDUP SUCCESS %016jx\n",
1504 (intmax_t)off);
1506 atomic_clear_64(&dio->refs, HAMMER2_DIO_INVALOK);
1507 hammer2_io_putblk(&dio);
1508 *datap = NULL;
1509 dedup[i].ticks = ticks; /* update use */
1510 ++hammer2_iod_file_wdedup;
1512 return off; /* RETURN */
1514 if (dio)
1515 hammer2_io_putblk(&dio);
1517 return 0;
1521 * Poof. Races are ok, if someone gets in and reuses a dedup offset
1522 * before or while we are clearing it they will also recover the freemap
1523 * entry (set it to fully allocated), so a bulkfree race can only set it
1524 * to a possibly-free state.
1526 * XXX ok, well, not really sure races are ok but going to run with it
1527 * for the moment.
1529 void
1530 hammer2_dedup_clear(hammer2_dev_t *hmp)
1532 int i;
1534 for (i = 0; i < HAMMER2_DEDUP_HEUR_SIZE; ++i) {
1535 hmp->heur_dedup[i].data_off = 0;
1536 hmp->heur_dedup[i].ticks = ticks - 1;