hammer2 - Implement error processing and free reserve enforcement
[dragonfly.git] / sys / vfs / hammer2 / hammer2_strategy.c
blob85eb6d99fe3202dcffffaebb231a20c64859d1ce
1 /*
2 * Copyright (c) 2011-2015 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@dragonflybsd.org>
6 * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org>
7 * by Daniel Flores (GSOC 2013 - mentored by Matthew Dillon, compression)
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in
17 * the documentation and/or other materials provided with the
18 * distribution.
19 * 3. Neither the name of The DragonFly Project nor the names of its
20 * contributors may be used to endorse or promote products derived
21 * from this software without specific, prior written permission.
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
26 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
27 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
28 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
29 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
30 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
31 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
32 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
33 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
37 * This module handles low level logical file I/O (strategy) which backs
38 * the logical buffer cache.
40 * [De]compression, zero-block, check codes, and buffer cache operations
41 * for file data is handled here.
43 * Live dedup makes its home here as well.
46 #include <sys/param.h>
47 #include <sys/systm.h>
48 #include <sys/kernel.h>
49 #include <sys/fcntl.h>
50 #include <sys/buf.h>
51 #include <sys/proc.h>
52 #include <sys/namei.h>
53 #include <sys/mount.h>
54 #include <sys/vnode.h>
55 #include <sys/mountctl.h>
56 #include <sys/dirent.h>
57 #include <sys/uio.h>
58 #include <sys/objcache.h>
59 #include <sys/event.h>
60 #include <sys/file.h>
61 #include <vfs/fifofs/fifo.h>
63 #include "hammer2.h"
64 #include "hammer2_lz4.h"
66 #include "zlib/hammer2_zlib.h"
68 struct objcache *cache_buffer_read;
69 struct objcache *cache_buffer_write;
72 * Strategy code (async logical file buffer I/O from system)
74 * Except for the transaction init (which should normally not block),
75 * we essentially run the strategy operation asynchronously via a XOP.
77 * XXX This isn't supposed to be able to deadlock against vfs_sync vfsync()
78 * calls but it has in the past when multiple flushes are queued.
80 * XXX We currently terminate the transaction once we get a quorum, otherwise
81 * the frontend can stall, but this can leave the remaining nodes with
82 * a potential flush conflict. We need to delay flushes on those nodes
83 * until running transactions complete separately from the normal
84 * transaction sequencing. FIXME TODO.
86 static void hammer2_strategy_xop_read(hammer2_thread_t *thr,
87 hammer2_xop_t *arg);
88 static void hammer2_strategy_xop_write(hammer2_thread_t *thr,
89 hammer2_xop_t *arg);
90 static int hammer2_strategy_read(struct vop_strategy_args *ap);
91 static int hammer2_strategy_write(struct vop_strategy_args *ap);
92 static void hammer2_strategy_read_completion(hammer2_chain_t *chain,
93 char *data, struct bio *bio);
95 static hammer2_off_t hammer2_dedup_lookup(hammer2_dev_t *hmp,
96 char **datap, int pblksize);
98 int
99 hammer2_vop_strategy(struct vop_strategy_args *ap)
101 struct bio *biop;
102 struct buf *bp;
103 int error;
105 biop = ap->a_bio;
106 bp = biop->bio_buf;
108 switch(bp->b_cmd) {
109 case BUF_CMD_READ:
110 error = hammer2_strategy_read(ap);
111 ++hammer2_iod_file_read;
112 break;
113 case BUF_CMD_WRITE:
114 error = hammer2_strategy_write(ap);
115 ++hammer2_iod_file_write;
116 break;
117 default:
118 bp->b_error = error = EINVAL;
119 bp->b_flags |= B_ERROR;
120 biodone(biop);
121 break;
123 return (error);
127 * Return the largest contiguous physical disk range for the logical
128 * request, in bytes.
130 * (struct vnode *vp, off_t loffset, off_t *doffsetp, int *runp, int *runb)
132 * Basically disabled, the logical buffer write thread has to deal with
133 * buffers one-at-a-time. Note that this should not prevent cluster_read()
134 * from reading-ahead, it simply prevents it from trying form a single
135 * cluster buffer for the logical request. H2 already uses 64KB buffers!
138 hammer2_vop_bmap(struct vop_bmap_args *ap)
140 *ap->a_doffsetp = NOOFFSET;
141 if (ap->a_runp)
142 *ap->a_runp = 0;
143 if (ap->a_runb)
144 *ap->a_runb = 0;
145 return (EOPNOTSUPP);
148 /****************************************************************************
149 * READ SUPPORT *
150 ****************************************************************************/
152 * Callback used in read path in case that a block is compressed with LZ4.
154 static
155 void
156 hammer2_decompress_LZ4_callback(const char *data, u_int bytes, struct bio *bio)
158 struct buf *bp;
159 char *compressed_buffer;
160 int compressed_size;
161 int result;
163 bp = bio->bio_buf;
165 #if 0
166 if bio->bio_caller_info2.index &&
167 bio->bio_caller_info1.uvalue32 !=
168 crc32(bp->b_data, bp->b_bufsize) --- return error
169 #endif
171 KKASSERT(bp->b_bufsize <= HAMMER2_PBUFSIZE);
172 compressed_size = *(const int *)data;
173 KKASSERT((uint32_t)compressed_size <= bytes - sizeof(int));
175 compressed_buffer = objcache_get(cache_buffer_read, M_INTWAIT);
176 result = LZ4_decompress_safe(__DECONST(char *, &data[sizeof(int)]),
177 compressed_buffer,
178 compressed_size,
179 bp->b_bufsize);
180 if (result < 0) {
181 kprintf("READ PATH: Error during decompression."
182 "bio %016jx/%d\n",
183 (intmax_t)bio->bio_offset, bytes);
184 /* make sure it isn't random garbage */
185 bzero(compressed_buffer, bp->b_bufsize);
187 KKASSERT(result <= bp->b_bufsize);
188 bcopy(compressed_buffer, bp->b_data, bp->b_bufsize);
189 if (result < bp->b_bufsize)
190 bzero(bp->b_data + result, bp->b_bufsize - result);
191 objcache_put(cache_buffer_read, compressed_buffer);
192 bp->b_resid = 0;
193 bp->b_flags |= B_AGE;
197 * Callback used in read path in case that a block is compressed with ZLIB.
198 * It is almost identical to LZ4 callback, so in theory they can be unified,
199 * but we didn't want to make changes in bio structure for that.
201 static
202 void
203 hammer2_decompress_ZLIB_callback(const char *data, u_int bytes, struct bio *bio)
205 struct buf *bp;
206 char *compressed_buffer;
207 z_stream strm_decompress;
208 int result;
209 int ret;
211 bp = bio->bio_buf;
213 KKASSERT(bp->b_bufsize <= HAMMER2_PBUFSIZE);
214 strm_decompress.avail_in = 0;
215 strm_decompress.next_in = Z_NULL;
217 ret = inflateInit(&strm_decompress);
219 if (ret != Z_OK)
220 kprintf("HAMMER2 ZLIB: Fatal error in inflateInit.\n");
222 compressed_buffer = objcache_get(cache_buffer_read, M_INTWAIT);
223 strm_decompress.next_in = __DECONST(char *, data);
225 /* XXX supply proper size, subset of device bp */
226 strm_decompress.avail_in = bytes;
227 strm_decompress.next_out = compressed_buffer;
228 strm_decompress.avail_out = bp->b_bufsize;
230 ret = inflate(&strm_decompress, Z_FINISH);
231 if (ret != Z_STREAM_END) {
232 kprintf("HAMMER2 ZLIB: Fatar error during decompression.\n");
233 bzero(compressed_buffer, bp->b_bufsize);
235 bcopy(compressed_buffer, bp->b_data, bp->b_bufsize);
236 result = bp->b_bufsize - strm_decompress.avail_out;
237 if (result < bp->b_bufsize)
238 bzero(bp->b_data + result, strm_decompress.avail_out);
239 objcache_put(cache_buffer_read, compressed_buffer);
240 ret = inflateEnd(&strm_decompress);
242 bp->b_resid = 0;
243 bp->b_flags |= B_AGE;
247 * Logical buffer I/O, async read.
249 static
251 hammer2_strategy_read(struct vop_strategy_args *ap)
253 hammer2_xop_strategy_t *xop;
254 struct buf *bp;
255 struct bio *bio;
256 struct bio *nbio;
257 hammer2_inode_t *ip;
258 hammer2_key_t lbase;
260 bio = ap->a_bio;
261 bp = bio->bio_buf;
262 ip = VTOI(ap->a_vp);
263 nbio = push_bio(bio);
265 lbase = bio->bio_offset;
266 KKASSERT(((int)lbase & HAMMER2_PBUFMASK) == 0);
268 xop = hammer2_xop_alloc(ip, HAMMER2_XOP_STRATEGY);
269 xop->finished = 0;
270 xop->bio = bio;
271 xop->lbase = lbase;
272 hammer2_mtx_init(&xop->lock, "h2bior");
273 hammer2_xop_start(&xop->head, hammer2_strategy_xop_read);
274 /* asynchronous completion */
276 return(0);
280 * Per-node XOP (threaded), do a synchronous lookup of the chain and
281 * its data. The frontend is asynchronous, so we are also responsible
282 * for racing to terminate the frontend.
284 static
285 void
286 hammer2_strategy_xop_read(hammer2_thread_t *thr, hammer2_xop_t *arg)
288 hammer2_xop_strategy_t *xop = &arg->xop_strategy;
289 hammer2_chain_t *parent;
290 hammer2_chain_t *chain;
291 hammer2_key_t key_dummy;
292 hammer2_key_t lbase;
293 struct bio *bio;
294 struct buf *bp;
295 int error;
298 * Note that we can race completion of the bio supplied by
299 * the front-end so we cannot access it until we determine
300 * that we are the ones finishing it up.
302 lbase = xop->lbase;
305 * This is difficult to optimize. The logical buffer might be
306 * partially dirty (contain dummy zero-fill pages), which would
307 * mess up our crc calculation if we were to try a direct read.
308 * So for now we always double-buffer through the underlying
309 * storage.
311 * If not for the above problem we could conditionalize on
312 * (1) 64KB buffer, (2) one chain (not multi-master) and
313 * (3) !hammer2_double_buffer, and issue a direct read into the
314 * logical buffer.
316 parent = hammer2_inode_chain(xop->head.ip1, thr->clindex,
317 HAMMER2_RESOLVE_ALWAYS |
318 HAMMER2_RESOLVE_SHARED);
319 if (parent) {
320 chain = hammer2_chain_lookup(&parent, &key_dummy,
321 lbase, lbase,
322 &error,
323 HAMMER2_LOOKUP_ALWAYS |
324 HAMMER2_LOOKUP_SHARED);
325 if (chain)
326 error = chain->error;
327 } else {
328 error = HAMMER2_ERROR_EIO;
329 chain = NULL;
331 error = hammer2_xop_feed(&xop->head, chain, thr->clindex, error);
332 if (chain) {
333 hammer2_chain_unlock(chain);
334 hammer2_chain_drop(chain);
336 if (parent) {
337 hammer2_chain_unlock(parent);
338 hammer2_chain_drop(parent);
340 chain = NULL; /* safety */
341 parent = NULL; /* safety */
344 * Race to finish the frontend. First-to-complete. bio is only
345 * valid if we are determined to be the ones able to complete
346 * the operation.
348 if (xop->finished)
349 return;
350 hammer2_mtx_ex(&xop->lock);
351 if (xop->finished) {
352 hammer2_mtx_unlock(&xop->lock);
353 return;
355 bio = xop->bio;
356 bp = bio->bio_buf;
359 * Async operation has not completed and we now own the lock.
360 * Determine if we can complete the operation by issuing the
361 * frontend collection non-blocking.
363 * H2 double-buffers the data, setting B_NOTMETA on the logical
364 * buffer hints to the OS that the logical buffer should not be
365 * swapcached (since the device buffer can be).
367 * Also note that even for compressed data we would rather the
368 * kernel cache/swapcache device buffers more and (decompressed)
369 * logical buffers less, since that will significantly improve
370 * the amount of end-user data that can be cached.
372 error = hammer2_xop_collect(&xop->head, HAMMER2_XOP_COLLECT_NOWAIT);
374 switch(error) {
375 case 0:
376 xop->finished = 1;
377 hammer2_mtx_unlock(&xop->lock);
378 bp->b_flags |= B_NOTMETA;
379 chain = xop->head.cluster.focus;
380 hammer2_strategy_read_completion(chain, (char *)chain->data,
381 xop->bio);
382 biodone(bio);
383 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
384 break;
385 case HAMMER2_ERROR_ENOENT:
386 xop->finished = 1;
387 hammer2_mtx_unlock(&xop->lock);
388 bp->b_flags |= B_NOTMETA;
389 bp->b_resid = 0;
390 bp->b_error = 0;
391 bzero(bp->b_data, bp->b_bcount);
392 biodone(bio);
393 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
394 break;
395 case HAMMER2_ERROR_EINPROGRESS:
396 hammer2_mtx_unlock(&xop->lock);
397 break;
398 default:
399 kprintf("strategy_xop_read: error %08x loff=%016jx\n",
400 error, bp->b_loffset);
401 xop->finished = 1;
402 hammer2_mtx_unlock(&xop->lock);
403 bp->b_flags |= B_ERROR;
404 bp->b_error = EIO;
405 biodone(bio);
406 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
407 break;
411 static
412 void
413 hammer2_strategy_read_completion(hammer2_chain_t *chain, char *data,
414 struct bio *bio)
416 struct buf *bp = bio->bio_buf;
418 if (chain->bref.type == HAMMER2_BREF_TYPE_INODE) {
420 * Copy from in-memory inode structure.
422 bcopy(((hammer2_inode_data_t *)data)->u.data,
423 bp->b_data, HAMMER2_EMBEDDED_BYTES);
424 bzero(bp->b_data + HAMMER2_EMBEDDED_BYTES,
425 bp->b_bcount - HAMMER2_EMBEDDED_BYTES);
426 bp->b_resid = 0;
427 bp->b_error = 0;
428 } else if (chain->bref.type == HAMMER2_BREF_TYPE_DATA) {
430 * Data is on-media, record for live dedup. Release the
431 * chain (try to free it) when done. The data is still
432 * cached by both the buffer cache in front and the
433 * block device behind us. This leaves more room in the
434 * LRU chain cache for meta-data chains which we really
435 * want to retain.
437 * NOTE: Deduplication cannot be safely recorded for
438 * records without a check code.
440 hammer2_dedup_record(chain, NULL, data);
441 atomic_set_int(&chain->flags, HAMMER2_CHAIN_RELEASE);
444 * Decompression and copy.
446 switch (HAMMER2_DEC_COMP(chain->bref.methods)) {
447 case HAMMER2_COMP_LZ4:
448 hammer2_decompress_LZ4_callback(data, chain->bytes,
449 bio);
450 /* b_resid set by call */
451 break;
452 case HAMMER2_COMP_ZLIB:
453 hammer2_decompress_ZLIB_callback(data, chain->bytes,
454 bio);
455 /* b_resid set by call */
456 break;
457 case HAMMER2_COMP_NONE:
458 KKASSERT(chain->bytes <= bp->b_bcount);
459 bcopy(data, bp->b_data, chain->bytes);
460 if (chain->bytes < bp->b_bcount) {
461 bzero(bp->b_data + chain->bytes,
462 bp->b_bcount - chain->bytes);
464 bp->b_resid = 0;
465 bp->b_error = 0;
466 break;
467 default:
468 panic("hammer2_strategy_read: "
469 "unknown compression type");
471 } else {
472 panic("hammer2_strategy_read: unknown bref type");
476 /****************************************************************************
477 * WRITE SUPPORT *
478 ****************************************************************************/
481 * Functions for compression in threads,
482 * from hammer2_vnops.c
484 static void hammer2_write_file_core(char *data, hammer2_inode_t *ip,
485 hammer2_chain_t **parentp,
486 hammer2_key_t lbase, int ioflag, int pblksize,
487 hammer2_tid_t mtid, int *errorp);
488 static void hammer2_compress_and_write(char *data, hammer2_inode_t *ip,
489 hammer2_chain_t **parentp,
490 hammer2_key_t lbase, int ioflag, int pblksize,
491 hammer2_tid_t mtid, int *errorp,
492 int comp_algo, int check_algo);
493 static void hammer2_zero_check_and_write(char *data, hammer2_inode_t *ip,
494 hammer2_chain_t **parentp,
495 hammer2_key_t lbase, int ioflag, int pblksize,
496 hammer2_tid_t mtid, int *errorp,
497 int check_algo);
498 static int test_block_zeros(const char *buf, size_t bytes);
499 static void zero_write(char *data, hammer2_inode_t *ip,
500 hammer2_chain_t **parentp,
501 hammer2_key_t lbase,
502 hammer2_tid_t mtid, int *errorp);
503 static void hammer2_write_bp(hammer2_chain_t *chain, char *data,
504 int ioflag, int pblksize,
505 hammer2_tid_t mtid, int *errorp,
506 int check_algo);
508 static
510 hammer2_strategy_write(struct vop_strategy_args *ap)
512 hammer2_xop_strategy_t *xop;
513 hammer2_pfs_t *pmp;
514 struct bio *bio;
515 struct buf *bp;
516 hammer2_inode_t *ip;
518 bio = ap->a_bio;
519 bp = bio->bio_buf;
520 ip = VTOI(ap->a_vp);
521 pmp = ip->pmp;
523 hammer2_lwinprog_ref(pmp);
524 hammer2_trans_assert_strategy(pmp);
525 hammer2_trans_init(pmp, HAMMER2_TRANS_BUFCACHE);
527 xop = hammer2_xop_alloc(ip, HAMMER2_XOP_MODIFYING |
528 HAMMER2_XOP_STRATEGY);
529 xop->finished = 0;
530 xop->bio = bio;
531 xop->lbase = bio->bio_offset;
532 hammer2_mtx_init(&xop->lock, "h2biow");
533 hammer2_xop_start(&xop->head, hammer2_strategy_xop_write);
534 /* asynchronous completion */
536 hammer2_lwinprog_wait(pmp, hammer2_flush_pipe);
538 return(0);
542 * Per-node XOP (threaded). Write the logical buffer to the media.
544 * This is a bit problematic because there may be multiple target and
545 * any of them may be able to release the bp. In addition, if our
546 * particulr target is offline we don't want to block the bp (and thus
547 * the frontend). To accomplish this we copy the data to the per-thr
548 * scratch buffer.
550 static
551 void
552 hammer2_strategy_xop_write(hammer2_thread_t *thr, hammer2_xop_t *arg)
554 hammer2_xop_strategy_t *xop = &arg->xop_strategy;
555 hammer2_chain_t *parent;
556 hammer2_key_t lbase;
557 hammer2_inode_t *ip;
558 struct bio *bio;
559 struct buf *bp;
560 int error;
561 int lblksize;
562 int pblksize;
563 hammer2_off_t bio_offset;
564 char *bio_data;
567 * We can only access the bp/bio if the frontend has not yet
568 * completed.
570 if (xop->finished)
571 return;
572 hammer2_mtx_sh(&xop->lock);
573 if (xop->finished) {
574 hammer2_mtx_unlock(&xop->lock);
575 return;
578 lbase = xop->lbase;
579 bio = xop->bio; /* ephermal */
580 bp = bio->bio_buf; /* ephermal */
581 ip = xop->head.ip1; /* retained by ref */
582 bio_offset = bio->bio_offset;
583 bio_data = thr->scratch;
585 /* hammer2_trans_init(parent->hmp->spmp, HAMMER2_TRANS_BUFCACHE); */
587 lblksize = hammer2_calc_logical(ip, bio->bio_offset, &lbase, NULL);
588 pblksize = hammer2_calc_physical(ip, lbase);
589 bcopy(bp->b_data, bio_data, lblksize);
591 hammer2_mtx_unlock(&xop->lock);
592 bp = NULL; /* safety, illegal to access after unlock */
593 bio = NULL; /* safety, illegal to access after unlock */
596 * Actual operation
598 parent = hammer2_inode_chain(ip, thr->clindex, HAMMER2_RESOLVE_ALWAYS);
599 hammer2_write_file_core(bio_data, ip, &parent,
600 lbase, IO_ASYNC, pblksize,
601 xop->head.mtid, &error);
602 if (parent) {
603 hammer2_chain_unlock(parent);
604 hammer2_chain_drop(parent);
605 parent = NULL; /* safety */
607 hammer2_xop_feed(&xop->head, NULL, thr->clindex, error);
610 * Try to complete the operation on behalf of the front-end.
612 if (xop->finished)
613 return;
614 hammer2_mtx_ex(&xop->lock);
615 if (xop->finished) {
616 hammer2_mtx_unlock(&xop->lock);
617 return;
621 * Async operation has not completed and we now own the lock.
622 * Determine if we can complete the operation by issuing the
623 * frontend collection non-blocking.
625 * H2 double-buffers the data, setting B_NOTMETA on the logical
626 * buffer hints to the OS that the logical buffer should not be
627 * swapcached (since the device buffer can be).
629 error = hammer2_xop_collect(&xop->head, HAMMER2_XOP_COLLECT_NOWAIT);
631 if (error == HAMMER2_ERROR_EINPROGRESS) {
632 hammer2_mtx_unlock(&xop->lock);
633 return;
637 * Async operation has completed.
639 xop->finished = 1;
640 hammer2_mtx_unlock(&xop->lock);
642 bio = xop->bio; /* now owned by us */
643 bp = bio->bio_buf; /* now owned by us */
645 if (error == HAMMER2_ERROR_ENOENT || error == 0) {
646 bp->b_flags |= B_NOTMETA;
647 bp->b_resid = 0;
648 bp->b_error = 0;
649 biodone(bio);
650 } else {
651 kprintf("strategy_xop_write: error %d loff=%016jx\n",
652 error, bp->b_loffset);
653 bp->b_flags |= B_ERROR;
654 bp->b_error = EIO;
655 biodone(bio);
657 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
658 hammer2_trans_assert_strategy(ip->pmp);
659 hammer2_lwinprog_drop(ip->pmp);
660 hammer2_trans_done(ip->pmp);
664 * Wait for pending I/O to complete
666 void
667 hammer2_bioq_sync(hammer2_pfs_t *pmp)
669 hammer2_lwinprog_wait(pmp, 0);
673 * Assign physical storage at (cparent, lbase), returning a suitable chain
674 * and setting *errorp appropriately.
676 * If no error occurs, the returned chain will be in a modified state.
678 * If an error occurs, the returned chain may or may not be NULL. If
679 * not-null any chain->error (if not 0) will also be rolled up into *errorp.
680 * So the caller only needs to test *errorp.
682 * cparent can wind up being anything.
684 * If datap is not NULL, *datap points to the real data we intend to write.
685 * If we can dedup the storage location we set *datap to NULL to indicate
686 * to the caller that a dedup occurred.
688 * NOTE: Special case for data embedded in inode.
690 static
691 hammer2_chain_t *
692 hammer2_assign_physical(hammer2_inode_t *ip, hammer2_chain_t **parentp,
693 hammer2_key_t lbase, int pblksize,
694 hammer2_tid_t mtid, char **datap, int *errorp)
696 hammer2_chain_t *chain;
697 hammer2_key_t key_dummy;
698 hammer2_off_t dedup_off;
699 int pradix = hammer2_getradix(pblksize);
702 * Locate the chain associated with lbase, return a locked chain.
703 * However, do not instantiate any data reference (which utilizes a
704 * device buffer) because we will be using direct IO via the
705 * logical buffer cache buffer.
707 KKASSERT(pblksize >= HAMMER2_ALLOC_MIN);
709 chain = hammer2_chain_lookup(parentp, &key_dummy,
710 lbase, lbase,
711 errorp,
712 HAMMER2_LOOKUP_NODATA);
715 * The lookup code should not return a DELETED chain to us, unless
716 * its a short-file embedded in the inode. Then it is possible for
717 * the lookup to return a deleted inode.
719 if (chain && (chain->flags & HAMMER2_CHAIN_DELETED) &&
720 chain->bref.type != HAMMER2_BREF_TYPE_INODE) {
721 kprintf("assign physical deleted chain @ "
722 "%016jx (%016jx.%02x) ip %016jx\n",
723 lbase, chain->bref.data_off, chain->bref.type,
724 ip->meta.inum);
725 Debugger("bleh");
728 if (chain == NULL) {
730 * We found a hole, create a new chain entry.
732 * NOTE: DATA chains are created without device backing
733 * store (nor do we want any).
735 dedup_off = hammer2_dedup_lookup((*parentp)->hmp, datap,
736 pblksize);
737 *errorp |= hammer2_chain_create(parentp, &chain,
738 ip->pmp,
739 HAMMER2_ENC_CHECK(ip->meta.check_algo) |
740 HAMMER2_ENC_COMP(HAMMER2_COMP_NONE),
741 lbase, HAMMER2_PBUFRADIX,
742 HAMMER2_BREF_TYPE_DATA,
743 pblksize, mtid,
744 dedup_off, 0);
745 if (chain == NULL)
746 goto failed;
747 /*ip->delta_dcount += pblksize;*/
748 } else if (chain->error == 0) {
749 switch (chain->bref.type) {
750 case HAMMER2_BREF_TYPE_INODE:
752 * The data is embedded in the inode, which requires
753 * a bit more finess.
755 *errorp |= hammer2_chain_modify_ip(ip, chain, mtid, 0);
756 break;
757 case HAMMER2_BREF_TYPE_DATA:
758 dedup_off = hammer2_dedup_lookup(chain->hmp, datap,
759 pblksize);
760 if (chain->bytes != pblksize) {
761 *errorp |= hammer2_chain_resize(chain,
762 mtid, dedup_off,
763 pradix,
764 HAMMER2_MODIFY_OPTDATA);
765 if (*errorp)
766 break;
770 * DATA buffers must be marked modified whether the
771 * data is in a logical buffer or not. We also have
772 * to make this call to fixup the chain data pointers
773 * after resizing in case this is an encrypted or
774 * compressed buffer.
776 *errorp |= hammer2_chain_modify(chain, mtid, dedup_off,
777 HAMMER2_MODIFY_OPTDATA);
778 break;
779 default:
780 panic("hammer2_assign_physical: bad type");
781 /* NOT REACHED */
782 break;
784 } else {
785 *errorp = chain->error;
787 failed:
788 return (chain);
792 * hammer2_write_file_core() - hammer2_write_thread() helper
794 * The core write function which determines which path to take
795 * depending on compression settings. We also have to locate the
796 * related chains so we can calculate and set the check data for
797 * the blockref.
799 static
800 void
801 hammer2_write_file_core(char *data, hammer2_inode_t *ip,
802 hammer2_chain_t **parentp,
803 hammer2_key_t lbase, int ioflag, int pblksize,
804 hammer2_tid_t mtid, int *errorp)
806 hammer2_chain_t *chain;
807 char *bdata;
809 *errorp = 0;
811 switch(HAMMER2_DEC_ALGO(ip->meta.comp_algo)) {
812 case HAMMER2_COMP_NONE:
814 * We have to assign physical storage to the buffer
815 * we intend to dirty or write now to avoid deadlocks
816 * in the strategy code later.
818 * This can return NOOFFSET for inode-embedded data.
819 * The strategy code will take care of it in that case.
821 bdata = data;
822 chain = hammer2_assign_physical(ip, parentp, lbase, pblksize,
823 mtid, &bdata, errorp);
824 if (*errorp) {
825 /* skip modifications */
826 } else if (chain->bref.type == HAMMER2_BREF_TYPE_INODE) {
827 hammer2_inode_data_t *wipdata;
829 wipdata = &chain->data->ipdata;
830 KKASSERT(wipdata->meta.op_flags &
831 HAMMER2_OPFLAG_DIRECTDATA);
832 bcopy(data, wipdata->u.data, HAMMER2_EMBEDDED_BYTES);
833 ++hammer2_iod_file_wembed;
834 } else if (bdata == NULL) {
836 * Copy of data already present on-media.
838 chain->bref.methods =
839 HAMMER2_ENC_COMP(HAMMER2_COMP_NONE) +
840 HAMMER2_ENC_CHECK(ip->meta.check_algo);
841 hammer2_chain_setcheck(chain, data);
842 } else {
843 hammer2_write_bp(chain, data, ioflag, pblksize,
844 mtid, errorp, ip->meta.check_algo);
846 if (chain) {
847 hammer2_chain_unlock(chain);
848 hammer2_chain_drop(chain);
850 break;
851 case HAMMER2_COMP_AUTOZERO:
853 * Check for zero-fill only
855 hammer2_zero_check_and_write(data, ip, parentp,
856 lbase, ioflag, pblksize,
857 mtid, errorp,
858 ip->meta.check_algo);
859 break;
860 case HAMMER2_COMP_LZ4:
861 case HAMMER2_COMP_ZLIB:
862 default:
864 * Check for zero-fill and attempt compression.
866 hammer2_compress_and_write(data, ip, parentp,
867 lbase, ioflag, pblksize,
868 mtid, errorp,
869 ip->meta.comp_algo,
870 ip->meta.check_algo);
871 break;
876 * Helper
878 * Generic function that will perform the compression in compression
879 * write path. The compression algorithm is determined by the settings
880 * obtained from inode.
882 static
883 void
884 hammer2_compress_and_write(char *data, hammer2_inode_t *ip,
885 hammer2_chain_t **parentp,
886 hammer2_key_t lbase, int ioflag, int pblksize,
887 hammer2_tid_t mtid, int *errorp, int comp_algo, int check_algo)
889 hammer2_chain_t *chain;
890 int comp_size;
891 int comp_block_size;
892 char *comp_buffer;
893 char *bdata;
896 * An all-zeros write creates a hole unless the check code
897 * is disabled. When the check code is disabled all writes
898 * are done in-place, including any all-zeros writes.
900 * NOTE: A snapshot will still force a copy-on-write
901 * (see the HAMMER2_CHECK_NONE in hammer2_chain.c).
903 if (check_algo != HAMMER2_CHECK_NONE &&
904 test_block_zeros(data, pblksize)) {
905 zero_write(data, ip, parentp, lbase, mtid, errorp);
906 return;
910 * Compression requested. Try to compress the block. We store
911 * the data normally if we cannot sufficiently compress it.
913 * We have a heuristic to detect files which are mostly
914 * uncompressable and avoid the compression attempt in that
915 * case. If the compression heuristic is turned off, we always
916 * try to compress.
918 comp_size = 0;
919 comp_buffer = NULL;
921 KKASSERT(pblksize / 2 <= 32768);
923 if (ip->comp_heuristic < 8 || (ip->comp_heuristic & 7) == 0 ||
924 hammer2_always_compress) {
925 z_stream strm_compress;
926 int comp_level;
927 int ret;
929 switch(HAMMER2_DEC_ALGO(comp_algo)) {
930 case HAMMER2_COMP_LZ4:
931 comp_buffer = objcache_get(cache_buffer_write,
932 M_INTWAIT);
933 comp_size = LZ4_compress_limitedOutput(
934 data,
935 &comp_buffer[sizeof(int)],
936 pblksize,
937 pblksize / 2 - sizeof(int));
939 * We need to prefix with the size, LZ4
940 * doesn't do it for us. Add the related
941 * overhead.
943 *(int *)comp_buffer = comp_size;
944 if (comp_size)
945 comp_size += sizeof(int);
946 break;
947 case HAMMER2_COMP_ZLIB:
948 comp_level = HAMMER2_DEC_LEVEL(comp_algo);
949 if (comp_level == 0)
950 comp_level = 6; /* default zlib compression */
951 else if (comp_level < 6)
952 comp_level = 6;
953 else if (comp_level > 9)
954 comp_level = 9;
955 ret = deflateInit(&strm_compress, comp_level);
956 if (ret != Z_OK) {
957 kprintf("HAMMER2 ZLIB: fatal error "
958 "on deflateInit.\n");
961 comp_buffer = objcache_get(cache_buffer_write,
962 M_INTWAIT);
963 strm_compress.next_in = data;
964 strm_compress.avail_in = pblksize;
965 strm_compress.next_out = comp_buffer;
966 strm_compress.avail_out = pblksize / 2;
967 ret = deflate(&strm_compress, Z_FINISH);
968 if (ret == Z_STREAM_END) {
969 comp_size = pblksize / 2 -
970 strm_compress.avail_out;
971 } else {
972 comp_size = 0;
974 ret = deflateEnd(&strm_compress);
975 break;
976 default:
977 kprintf("Error: Unknown compression method.\n");
978 kprintf("Comp_method = %d.\n", comp_algo);
979 break;
983 if (comp_size == 0) {
985 * compression failed or turned off
987 comp_block_size = pblksize; /* safety */
988 if (++ip->comp_heuristic > 128)
989 ip->comp_heuristic = 8;
990 } else {
992 * compression succeeded
994 ip->comp_heuristic = 0;
995 if (comp_size <= 1024) {
996 comp_block_size = 1024;
997 } else if (comp_size <= 2048) {
998 comp_block_size = 2048;
999 } else if (comp_size <= 4096) {
1000 comp_block_size = 4096;
1001 } else if (comp_size <= 8192) {
1002 comp_block_size = 8192;
1003 } else if (comp_size <= 16384) {
1004 comp_block_size = 16384;
1005 } else if (comp_size <= 32768) {
1006 comp_block_size = 32768;
1007 } else {
1008 panic("hammer2: WRITE PATH: "
1009 "Weird comp_size value.");
1010 /* NOT REACHED */
1011 comp_block_size = pblksize;
1015 * Must zero the remainder or dedup (which operates on a
1016 * physical block basis) will not find matches.
1018 if (comp_size < comp_block_size) {
1019 bzero(comp_buffer + comp_size,
1020 comp_block_size - comp_size);
1025 * Assign physical storage, data will be set to NULL if a live-dedup
1026 * was successful.
1028 bdata = comp_size ? comp_buffer : data;
1029 chain = hammer2_assign_physical(ip, parentp, lbase, comp_block_size,
1030 mtid, &bdata, errorp);
1032 if (*errorp) {
1033 goto done;
1036 if (chain->bref.type == HAMMER2_BREF_TYPE_INODE) {
1037 hammer2_inode_data_t *wipdata;
1039 *errorp = hammer2_chain_modify_ip(ip, chain, mtid, 0);
1040 if (*errorp == 0) {
1041 wipdata = &chain->data->ipdata;
1042 KKASSERT(wipdata->meta.op_flags &
1043 HAMMER2_OPFLAG_DIRECTDATA);
1044 bcopy(data, wipdata->u.data, HAMMER2_EMBEDDED_BYTES);
1045 ++hammer2_iod_file_wembed;
1047 } else if (bdata == NULL) {
1049 * Live deduplication, a copy of the data is already present
1050 * on the media.
1052 if (comp_size) {
1053 chain->bref.methods =
1054 HAMMER2_ENC_COMP(comp_algo) +
1055 HAMMER2_ENC_CHECK(check_algo);
1056 } else {
1057 chain->bref.methods =
1058 HAMMER2_ENC_COMP(
1059 HAMMER2_COMP_NONE) +
1060 HAMMER2_ENC_CHECK(check_algo);
1062 bdata = comp_size ? comp_buffer : data;
1063 hammer2_chain_setcheck(chain, bdata);
1064 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_INITIAL);
1065 } else {
1066 hammer2_io_t *dio;
1068 KKASSERT(chain->flags & HAMMER2_CHAIN_MODIFIED);
1070 switch(chain->bref.type) {
1071 case HAMMER2_BREF_TYPE_INODE:
1072 panic("hammer2_write_bp: unexpected inode\n");
1073 break;
1074 case HAMMER2_BREF_TYPE_DATA:
1076 * Optimize out the read-before-write
1077 * if possible.
1079 *errorp = hammer2_io_newnz(chain->hmp,
1080 chain->bref.type,
1081 chain->bref.data_off,
1082 chain->bytes,
1083 &dio);
1084 if (*errorp) {
1085 hammer2_io_brelse(&dio);
1086 kprintf("hammer2: WRITE PATH: "
1087 "dbp bread error\n");
1088 break;
1090 bdata = hammer2_io_data(dio, chain->bref.data_off);
1093 * When loading the block make sure we don't
1094 * leave garbage after the compressed data.
1096 if (comp_size) {
1097 chain->bref.methods =
1098 HAMMER2_ENC_COMP(comp_algo) +
1099 HAMMER2_ENC_CHECK(check_algo);
1100 bcopy(comp_buffer, bdata, comp_size);
1101 } else {
1102 chain->bref.methods =
1103 HAMMER2_ENC_COMP(
1104 HAMMER2_COMP_NONE) +
1105 HAMMER2_ENC_CHECK(check_algo);
1106 bcopy(data, bdata, pblksize);
1110 * The flush code doesn't calculate check codes for
1111 * file data (doing so can result in excessive I/O),
1112 * so we do it here.
1114 hammer2_chain_setcheck(chain, bdata);
1117 * Device buffer is now valid, chain is no longer in
1118 * the initial state.
1120 * (No blockref table worries with file data)
1122 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_INITIAL);
1123 hammer2_dedup_record(chain, dio, bdata);
1125 /* Now write the related bdp. */
1126 if (ioflag & IO_SYNC) {
1128 * Synchronous I/O requested.
1130 hammer2_io_bwrite(&dio);
1132 } else if ((ioflag & IO_DIRECT) &&
1133 loff + n == pblksize) {
1134 hammer2_io_bdwrite(&dio);
1136 } else if (ioflag & IO_ASYNC) {
1137 hammer2_io_bawrite(&dio);
1138 } else {
1139 hammer2_io_bdwrite(&dio);
1141 break;
1142 default:
1143 panic("hammer2_write_bp: bad chain type %d\n",
1144 chain->bref.type);
1145 /* NOT REACHED */
1146 break;
1149 done:
1150 if (chain) {
1151 hammer2_chain_unlock(chain);
1152 hammer2_chain_drop(chain);
1154 if (comp_buffer)
1155 objcache_put(cache_buffer_write, comp_buffer);
1159 * Helper
1161 * Function that performs zero-checking and writing without compression,
1162 * it corresponds to default zero-checking path.
1164 static
1165 void
1166 hammer2_zero_check_and_write(char *data, hammer2_inode_t *ip,
1167 hammer2_chain_t **parentp,
1168 hammer2_key_t lbase, int ioflag, int pblksize,
1169 hammer2_tid_t mtid, int *errorp,
1170 int check_algo)
1172 hammer2_chain_t *chain;
1173 char *bdata;
1175 if (check_algo != HAMMER2_CHECK_NONE &&
1176 test_block_zeros(data, pblksize)) {
1178 * An all-zeros write creates a hole unless the check code
1179 * is disabled. When the check code is disabled all writes
1180 * are done in-place, including any all-zeros writes.
1182 * NOTE: A snapshot will still force a copy-on-write
1183 * (see the HAMMER2_CHECK_NONE in hammer2_chain.c).
1185 zero_write(data, ip, parentp, lbase, mtid, errorp);
1186 } else {
1188 * Normal write
1190 bdata = data;
1191 chain = hammer2_assign_physical(ip, parentp, lbase, pblksize,
1192 mtid, &bdata, errorp);
1193 if (*errorp) {
1194 /* do nothing */
1195 } else if (bdata) {
1196 hammer2_write_bp(chain, data, ioflag, pblksize,
1197 mtid, errorp, check_algo);
1198 } else {
1199 /* dedup occurred */
1200 chain->bref.methods =
1201 HAMMER2_ENC_COMP(HAMMER2_COMP_NONE) +
1202 HAMMER2_ENC_CHECK(check_algo);
1203 hammer2_chain_setcheck(chain, data);
1205 if (chain) {
1206 hammer2_chain_unlock(chain);
1207 hammer2_chain_drop(chain);
1213 * Helper
1215 * A function to test whether a block of data contains only zeros,
1216 * returns TRUE (non-zero) if the block is all zeros.
1218 static
1220 test_block_zeros(const char *buf, size_t bytes)
1222 size_t i;
1224 for (i = 0; i < bytes; i += sizeof(long)) {
1225 if (*(const long *)(buf + i) != 0)
1226 return (0);
1228 return (1);
1232 * Helper
1234 * Function to "write" a block that contains only zeros.
1236 static
1237 void
1238 zero_write(char *data, hammer2_inode_t *ip,
1239 hammer2_chain_t **parentp,
1240 hammer2_key_t lbase, hammer2_tid_t mtid, int *errorp)
1242 hammer2_chain_t *chain;
1243 hammer2_key_t key_dummy;
1245 chain = hammer2_chain_lookup(parentp, &key_dummy,
1246 lbase, lbase,
1247 errorp,
1248 HAMMER2_LOOKUP_NODATA);
1249 if (chain) {
1250 if (chain->bref.type == HAMMER2_BREF_TYPE_INODE) {
1251 hammer2_inode_data_t *wipdata;
1253 if (*errorp == 0) {
1254 *errorp = hammer2_chain_modify_ip(ip, chain,
1255 mtid, 0);
1257 if (*errorp == 0) {
1258 wipdata = &chain->data->ipdata;
1259 KKASSERT(wipdata->meta.op_flags &
1260 HAMMER2_OPFLAG_DIRECTDATA);
1261 bzero(wipdata->u.data, HAMMER2_EMBEDDED_BYTES);
1262 ++hammer2_iod_file_wembed;
1264 } else {
1265 /* chain->error ok for deletion */
1266 hammer2_chain_delete(*parentp, chain,
1267 mtid, HAMMER2_DELETE_PERMANENT);
1268 ++hammer2_iod_file_wzero;
1270 hammer2_chain_unlock(chain);
1271 hammer2_chain_drop(chain);
1272 } else {
1273 ++hammer2_iod_file_wzero;
1278 * Helper
1280 * Function to write the data as it is, without performing any sort of
1281 * compression. This function is used in path without compression and
1282 * default zero-checking path.
1284 static
1285 void
1286 hammer2_write_bp(hammer2_chain_t *chain, char *data, int ioflag,
1287 int pblksize,
1288 hammer2_tid_t mtid, int *errorp, int check_algo)
1290 hammer2_inode_data_t *wipdata;
1291 hammer2_io_t *dio;
1292 char *bdata;
1293 int error;
1295 error = 0; /* XXX TODO below */
1297 KKASSERT(chain->flags & HAMMER2_CHAIN_MODIFIED);
1299 switch(chain->bref.type) {
1300 case HAMMER2_BREF_TYPE_INODE:
1301 wipdata = &chain->data->ipdata;
1302 KKASSERT(wipdata->meta.op_flags & HAMMER2_OPFLAG_DIRECTDATA);
1303 bcopy(data, wipdata->u.data, HAMMER2_EMBEDDED_BYTES);
1304 error = 0;
1305 ++hammer2_iod_file_wembed;
1306 break;
1307 case HAMMER2_BREF_TYPE_DATA:
1308 error = hammer2_io_newnz(chain->hmp,
1309 chain->bref.type,
1310 chain->bref.data_off,
1311 chain->bytes, &dio);
1312 if (error) {
1313 hammer2_io_bqrelse(&dio);
1314 kprintf("hammer2: WRITE PATH: "
1315 "dbp bread error\n");
1316 break;
1318 bdata = hammer2_io_data(dio, chain->bref.data_off);
1320 chain->bref.methods = HAMMER2_ENC_COMP(HAMMER2_COMP_NONE) +
1321 HAMMER2_ENC_CHECK(check_algo);
1322 bcopy(data, bdata, chain->bytes);
1325 * The flush code doesn't calculate check codes for
1326 * file data (doing so can result in excessive I/O),
1327 * so we do it here.
1329 hammer2_chain_setcheck(chain, bdata);
1332 * Device buffer is now valid, chain is no longer in
1333 * the initial state.
1335 * (No blockref table worries with file data)
1337 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_INITIAL);
1338 hammer2_dedup_record(chain, dio, bdata);
1340 if (ioflag & IO_SYNC) {
1342 * Synchronous I/O requested.
1344 hammer2_io_bwrite(&dio);
1346 } else if ((ioflag & IO_DIRECT) &&
1347 loff + n == pblksize) {
1348 hammer2_io_bdwrite(&dio);
1350 } else if (ioflag & IO_ASYNC) {
1351 hammer2_io_bawrite(&dio);
1352 } else {
1353 hammer2_io_bdwrite(&dio);
1355 break;
1356 default:
1357 panic("hammer2_write_bp: bad chain type %d\n",
1358 chain->bref.type);
1359 /* NOT REACHED */
1360 error = 0;
1361 break;
1363 *errorp = error;
1367 * LIVE DEDUP HEURISTICS
1369 * Record media and crc information for possible dedup operation. Note
1370 * that the dedup mask bits must also be set in the related DIO for a dedup
1371 * to be fully validated (which is handled in the freemap allocation code).
1373 * WARNING! This code is SMP safe but the heuristic allows SMP collisions.
1374 * All fields must be loaded into locals and validated.
1376 * WARNING! Should only be used for file data and directory entries,
1377 * hammer2_chain_modify() only checks for the dedup case on data
1378 * chains. Also, dedup data can only be recorded for committed
1379 * chains (so NOT strategy writes which can undergo further
1380 * modification after the fact!).
1382 void
1383 hammer2_dedup_record(hammer2_chain_t *chain, hammer2_io_t *dio, char *data)
1385 hammer2_dev_t *hmp;
1386 hammer2_dedup_t *dedup;
1387 uint64_t crc;
1388 uint64_t mask;
1389 int best = 0;
1390 int i;
1391 int dticks;
1394 * We can only record a dedup if we have media data to test against.
1395 * If dedup is not enabled, return early, which allows a chain to
1396 * remain marked MODIFIED (which might have benefits in special
1397 * situations, though typically it does not).
1399 if (hammer2_dedup_enable == 0)
1400 return;
1401 if (dio == NULL) {
1402 dio = chain->dio;
1403 if (dio == NULL)
1404 return;
1407 hmp = chain->hmp;
1409 switch(HAMMER2_DEC_CHECK(chain->bref.methods)) {
1410 case HAMMER2_CHECK_ISCSI32:
1412 * XXX use the built-in crc (the dedup lookup sequencing
1413 * needs to be fixed so the check code is already present
1414 * when dedup_lookup is called)
1416 #if 0
1417 crc = (uint64_t)(uint32_t)chain->bref.check.iscsi32.value;
1418 #endif
1419 crc = XXH64(data, chain->bytes, XXH_HAMMER2_SEED);
1420 break;
1421 case HAMMER2_CHECK_XXHASH64:
1422 crc = chain->bref.check.xxhash64.value;
1423 break;
1424 case HAMMER2_CHECK_SHA192:
1426 * XXX use the built-in crc (the dedup lookup sequencing
1427 * needs to be fixed so the check code is already present
1428 * when dedup_lookup is called)
1430 #if 0
1431 crc = ((uint64_t *)chain->bref.check.sha192.data)[0] ^
1432 ((uint64_t *)chain->bref.check.sha192.data)[1] ^
1433 ((uint64_t *)chain->bref.check.sha192.data)[2];
1434 #endif
1435 crc = XXH64(data, chain->bytes, XXH_HAMMER2_SEED);
1436 break;
1437 default:
1439 * Cannot dedup without a check code
1441 * NOTE: In particular, CHECK_NONE allows a sector to be
1442 * overwritten without copy-on-write, recording
1443 * a dedup block for a CHECK_NONE object would be
1444 * a disaster!
1446 return;
1449 atomic_set_int(&chain->flags, HAMMER2_CHAIN_DEDUPABLE);
1451 dedup = &hmp->heur_dedup[crc & (HAMMER2_DEDUP_HEUR_MASK & ~3)];
1452 for (i = 0; i < 4; ++i) {
1453 if (dedup[i].data_crc == crc) {
1454 best = i;
1455 break;
1457 dticks = (int)(dedup[i].ticks - dedup[best].ticks);
1458 if (dticks < 0 || dticks > hz * 60 * 30)
1459 best = i;
1461 dedup += best;
1462 if (hammer2_debug & 0x40000) {
1463 kprintf("REC %04x %016jx %016jx\n",
1464 (int)(dedup - hmp->heur_dedup),
1465 crc,
1466 chain->bref.data_off);
1468 dedup->ticks = ticks;
1469 dedup->data_off = chain->bref.data_off;
1470 dedup->data_crc = crc;
1473 * Set the valid bits for the dedup only after we know the data
1474 * buffer has been updated. The alloc bits were set (and the valid
1475 * bits cleared) when the media was allocated.
1477 * This is done in two stages becuase the bulkfree code can race
1478 * the gap between allocation and data population. Both masks must
1479 * be set before a bcmp/dedup operation is able to use the block.
1481 mask = hammer2_dedup_mask(dio, chain->bref.data_off, chain->bytes);
1482 atomic_set_64(&dio->dedup_valid, mask);
1484 #if 0
1486 * XXX removed. MODIFIED is an integral part of the flush code,
1487 * lets not just clear it
1490 * Once we record the dedup the chain must be marked clean to
1491 * prevent reuse of the underlying block. Remember that this
1492 * write occurs when the buffer cache is flushed (i.e. on sync(),
1493 * fsync(), filesystem periodic sync, or when the kernel needs to
1494 * flush a buffer), and not whenever the user write()s.
1496 if (chain->flags & HAMMER2_CHAIN_MODIFIED) {
1497 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_MODIFIED);
1498 atomic_add_long(&hammer2_count_modified_chains, -1);
1499 if (chain->pmp)
1500 hammer2_pfs_memory_wakeup(chain->pmp);
1502 #endif
1505 static
1506 hammer2_off_t
1507 hammer2_dedup_lookup(hammer2_dev_t *hmp, char **datap, int pblksize)
1509 hammer2_dedup_t *dedup;
1510 hammer2_io_t *dio;
1511 hammer2_off_t off;
1512 uint64_t crc;
1513 uint64_t mask;
1514 char *data;
1515 char *dtmp;
1516 int i;
1518 if (hammer2_dedup_enable == 0)
1519 return 0;
1520 data = *datap;
1521 if (data == NULL)
1522 return 0;
1525 * XXX use the built-in crc (the dedup lookup sequencing
1526 * needs to be fixed so the check code is already present
1527 * when dedup_lookup is called)
1529 crc = XXH64(data, pblksize, XXH_HAMMER2_SEED);
1530 dedup = &hmp->heur_dedup[crc & (HAMMER2_DEDUP_HEUR_MASK & ~3)];
1532 if (hammer2_debug & 0x40000) {
1533 kprintf("LOC %04x/4 %016jx\n",
1534 (int)(dedup - hmp->heur_dedup),
1535 crc);
1538 for (i = 0; i < 4; ++i) {
1539 off = dedup[i].data_off;
1540 cpu_ccfence();
1541 if (dedup[i].data_crc != crc)
1542 continue;
1543 if ((1 << (int)(off & HAMMER2_OFF_MASK_RADIX)) != pblksize)
1544 continue;
1545 dio = hammer2_io_getquick(hmp, off, pblksize);
1546 if (dio) {
1547 dtmp = hammer2_io_data(dio, off),
1548 mask = hammer2_dedup_mask(dio, off, pblksize);
1549 if ((dio->dedup_alloc & mask) == mask &&
1550 (dio->dedup_valid & mask) == mask &&
1551 bcmp(data, dtmp, pblksize) == 0) {
1552 if (hammer2_debug & 0x40000) {
1553 kprintf("DEDUP SUCCESS %016jx\n",
1554 (intmax_t)off);
1556 hammer2_io_putblk(&dio);
1557 *datap = NULL;
1558 dedup[i].ticks = ticks; /* update use */
1559 atomic_add_long(&hammer2_iod_file_wdedup,
1560 pblksize);
1562 return off; /* RETURN */
1564 hammer2_io_putblk(&dio);
1567 return 0;
1571 * Poof. Races are ok, if someone gets in and reuses a dedup offset
1572 * before or while we are clearing it they will also recover the freemap
1573 * entry (set it to fully allocated), so a bulkfree race can only set it
1574 * to a possibly-free state.
1576 * XXX ok, well, not really sure races are ok but going to run with it
1577 * for the moment.
1579 void
1580 hammer2_dedup_clear(hammer2_dev_t *hmp)
1582 int i;
1584 for (i = 0; i < HAMMER2_DEDUP_HEUR_SIZE; ++i) {
1585 hmp->heur_dedup[i].data_off = 0;
1586 hmp->heur_dedup[i].ticks = ticks - 1;