hammer2 - Remote xop implementation part 1
[dragonfly.git] / sys / vfs / hammer2 / hammer2_strategy.c
blob63af6320c0fcddd3eca9783b96bdcc3ea5eddb36
1 /*
2 * Copyright (c) 2011-2018 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@dragonflybsd.org>
6 * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org>
7 * by Daniel Flores (GSOC 2013 - mentored by Matthew Dillon, compression)
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in
17 * the documentation and/or other materials provided with the
18 * distribution.
19 * 3. Neither the name of The DragonFly Project nor the names of its
20 * contributors may be used to endorse or promote products derived
21 * from this software without specific, prior written permission.
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
26 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
27 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
28 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
29 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
30 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
31 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
32 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
33 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
37 * This module handles low level logical file I/O (strategy) which backs
38 * the logical buffer cache.
40 * [De]compression, zero-block, check codes, and buffer cache operations
41 * for file data is handled here.
43 * Live dedup makes its home here as well.
46 #include <sys/param.h>
47 #include <sys/systm.h>
48 #include <sys/kernel.h>
49 #include <sys/fcntl.h>
50 #include <sys/buf.h>
51 #include <sys/proc.h>
52 #include <sys/namei.h>
53 #include <sys/mount.h>
54 #include <sys/vnode.h>
55 #include <sys/mountctl.h>
56 #include <sys/dirent.h>
57 #include <sys/uio.h>
58 #include <sys/objcache.h>
59 #include <sys/event.h>
60 #include <sys/file.h>
61 #include <vfs/fifofs/fifo.h>
63 #include "hammer2.h"
64 #include "hammer2_lz4.h"
66 #include "zlib/hammer2_zlib.h"
68 struct objcache *cache_buffer_read;
69 struct objcache *cache_buffer_write;
72 * Strategy code (async logical file buffer I/O from system)
74 * Except for the transaction init (which should normally not block),
75 * we essentially run the strategy operation asynchronously via a XOP.
77 * WARNING! The XOP deals with buffer synchronization. It is not synchronized
78 * to the current cpu.
80 * XXX This isn't supposed to be able to deadlock against vfs_sync vfsync()
81 * calls but it has in the past when multiple flushes are queued.
83 * XXX We currently terminate the transaction once we get a quorum, otherwise
84 * the frontend can stall, but this can leave the remaining nodes with
85 * a potential flush conflict. We need to delay flushes on those nodes
86 * until running transactions complete separately from the normal
87 * transaction sequencing. FIXME TODO.
89 static int hammer2_strategy_read(struct vop_strategy_args *ap);
90 static int hammer2_strategy_write(struct vop_strategy_args *ap);
91 static void hammer2_strategy_read_completion(hammer2_chain_t *focus,
92 const char *data, struct bio *bio);
94 static hammer2_off_t hammer2_dedup_lookup(hammer2_dev_t *hmp,
95 char **datap, int pblksize);
97 int
98 hammer2_vop_strategy(struct vop_strategy_args *ap)
100 struct bio *biop;
101 struct buf *bp;
102 int error;
104 biop = ap->a_bio;
105 bp = biop->bio_buf;
107 switch(bp->b_cmd) {
108 case BUF_CMD_READ:
109 error = hammer2_strategy_read(ap);
110 ++hammer2_iod_file_read;
111 break;
112 case BUF_CMD_WRITE:
113 error = hammer2_strategy_write(ap);
114 ++hammer2_iod_file_write;
115 break;
116 default:
117 bp->b_error = error = EINVAL;
118 bp->b_flags |= B_ERROR;
119 biodone(biop);
120 break;
122 return (error);
126 * Return the largest contiguous physical disk range for the logical
127 * request, in bytes.
129 * (struct vnode *vp, off_t loffset, off_t *doffsetp, int *runp, int *runb)
131 * Basically disabled, the logical buffer write thread has to deal with
132 * buffers one-at-a-time. Note that this should not prevent cluster_read()
133 * from reading-ahead, it simply prevents it from trying form a single
134 * cluster buffer for the logical request. H2 already uses 64KB buffers!
137 hammer2_vop_bmap(struct vop_bmap_args *ap)
139 *ap->a_doffsetp = NOOFFSET;
140 if (ap->a_runp)
141 *ap->a_runp = 0;
142 if (ap->a_runb)
143 *ap->a_runb = 0;
144 return (EOPNOTSUPP);
147 /****************************************************************************
148 * READ SUPPORT *
149 ****************************************************************************/
151 * Callback used in read path in case that a block is compressed with LZ4.
153 static
154 void
155 hammer2_decompress_LZ4_callback(const char *data, u_int bytes, struct bio *bio)
157 struct buf *bp;
158 char *compressed_buffer;
159 int compressed_size;
160 int result;
162 bp = bio->bio_buf;
164 #if 0
165 if bio->bio_caller_info2.index &&
166 bio->bio_caller_info1.uvalue32 !=
167 crc32(bp->b_data, bp->b_bufsize) --- return error
168 #endif
170 KKASSERT(bp->b_bufsize <= HAMMER2_PBUFSIZE);
171 compressed_size = *(const int *)data;
172 KKASSERT((uint32_t)compressed_size <= bytes - sizeof(int));
174 compressed_buffer = objcache_get(cache_buffer_read, M_INTWAIT);
175 result = LZ4_decompress_safe(__DECONST(char *, &data[sizeof(int)]),
176 compressed_buffer,
177 compressed_size,
178 bp->b_bufsize);
179 if (result < 0) {
180 kprintf("READ PATH: Error during decompression."
181 "bio %016jx/%d\n",
182 (intmax_t)bio->bio_offset, bytes);
183 /* make sure it isn't random garbage */
184 bzero(compressed_buffer, bp->b_bufsize);
186 KKASSERT(result <= bp->b_bufsize);
187 bcopy(compressed_buffer, bp->b_data, bp->b_bufsize);
188 if (result < bp->b_bufsize)
189 bzero(bp->b_data + result, bp->b_bufsize - result);
190 objcache_put(cache_buffer_read, compressed_buffer);
191 bp->b_resid = 0;
192 bp->b_flags |= B_AGE;
196 * Callback used in read path in case that a block is compressed with ZLIB.
197 * It is almost identical to LZ4 callback, so in theory they can be unified,
198 * but we didn't want to make changes in bio structure for that.
200 static
201 void
202 hammer2_decompress_ZLIB_callback(const char *data, u_int bytes, struct bio *bio)
204 struct buf *bp;
205 char *compressed_buffer;
206 z_stream strm_decompress;
207 int result;
208 int ret;
210 bp = bio->bio_buf;
212 KKASSERT(bp->b_bufsize <= HAMMER2_PBUFSIZE);
213 strm_decompress.avail_in = 0;
214 strm_decompress.next_in = Z_NULL;
216 ret = inflateInit(&strm_decompress);
218 if (ret != Z_OK)
219 kprintf("HAMMER2 ZLIB: Fatal error in inflateInit.\n");
221 compressed_buffer = objcache_get(cache_buffer_read, M_INTWAIT);
222 strm_decompress.next_in = __DECONST(char *, data);
224 /* XXX supply proper size, subset of device bp */
225 strm_decompress.avail_in = bytes;
226 strm_decompress.next_out = compressed_buffer;
227 strm_decompress.avail_out = bp->b_bufsize;
229 ret = inflate(&strm_decompress, Z_FINISH);
230 if (ret != Z_STREAM_END) {
231 kprintf("HAMMER2 ZLIB: Fatar error during decompression.\n");
232 bzero(compressed_buffer, bp->b_bufsize);
234 bcopy(compressed_buffer, bp->b_data, bp->b_bufsize);
235 result = bp->b_bufsize - strm_decompress.avail_out;
236 if (result < bp->b_bufsize)
237 bzero(bp->b_data + result, strm_decompress.avail_out);
238 objcache_put(cache_buffer_read, compressed_buffer);
239 ret = inflateEnd(&strm_decompress);
241 bp->b_resid = 0;
242 bp->b_flags |= B_AGE;
246 * Logical buffer I/O, async read.
248 static
250 hammer2_strategy_read(struct vop_strategy_args *ap)
252 hammer2_xop_strategy_t *xop;
253 struct buf *bp;
254 struct bio *bio;
255 struct bio *nbio;
256 hammer2_inode_t *ip;
257 hammer2_key_t lbase;
259 bio = ap->a_bio;
260 bp = bio->bio_buf;
261 ip = VTOI(ap->a_vp);
262 nbio = push_bio(bio);
264 lbase = bio->bio_offset;
265 KKASSERT(((int)lbase & HAMMER2_PBUFMASK) == 0);
267 xop = hammer2_xop_alloc(ip, HAMMER2_XOP_STRATEGY);
268 xop->finished = 0;
269 xop->bio = bio;
270 xop->lbase = lbase;
271 hammer2_mtx_init(&xop->lock, "h2bior");
272 hammer2_xop_start(&xop->head, &hammer2_strategy_read_desc);
273 /* asynchronous completion */
275 return(0);
279 * Per-node XOP (threaded), do a synchronous lookup of the chain and
280 * its data. The frontend is asynchronous, so we are also responsible
281 * for racing to terminate the frontend.
283 void
284 hammer2_xop_strategy_read(hammer2_xop_t *arg, void *scratch, int clindex)
286 hammer2_xop_strategy_t *xop = &arg->xop_strategy;
287 hammer2_chain_t *parent;
288 hammer2_chain_t *chain;
289 hammer2_chain_t *focus;
290 hammer2_key_t key_dummy;
291 hammer2_key_t lbase;
292 struct bio *bio;
293 struct buf *bp;
294 const char *data;
295 int error;
298 * Note that we can race completion of the bio supplied by
299 * the front-end so we cannot access it until we determine
300 * that we are the ones finishing it up.
302 lbase = xop->lbase;
305 * This is difficult to optimize. The logical buffer might be
306 * partially dirty (contain dummy zero-fill pages), which would
307 * mess up our crc calculation if we were to try a direct read.
308 * So for now we always double-buffer through the underlying
309 * storage.
311 * If not for the above problem we could conditionalize on
312 * (1) 64KB buffer, (2) one chain (not multi-master) and
313 * (3) !hammer2_double_buffer, and issue a direct read into the
314 * logical buffer.
316 parent = hammer2_inode_chain(xop->head.ip1, clindex,
317 HAMMER2_RESOLVE_ALWAYS |
318 HAMMER2_RESOLVE_SHARED);
319 if (parent) {
320 chain = hammer2_chain_lookup(&parent, &key_dummy,
321 lbase, lbase,
322 &error,
323 HAMMER2_LOOKUP_ALWAYS |
324 HAMMER2_LOOKUP_SHARED);
325 if (chain)
326 error = chain->error;
327 } else {
328 error = HAMMER2_ERROR_EIO;
329 chain = NULL;
331 error = hammer2_xop_feed(&xop->head, chain, clindex, error);
332 if (chain) {
333 hammer2_chain_unlock(chain);
334 hammer2_chain_drop(chain);
336 if (parent) {
337 hammer2_chain_unlock(parent);
338 hammer2_chain_drop(parent);
340 chain = NULL; /* safety */
341 parent = NULL; /* safety */
344 * Race to finish the frontend. First-to-complete. bio is only
345 * valid if we are determined to be the ones able to complete
346 * the operation.
348 if (xop->finished)
349 return;
350 hammer2_mtx_ex(&xop->lock);
351 if (xop->finished) {
352 hammer2_mtx_unlock(&xop->lock);
353 return;
355 bio = xop->bio;
356 bp = bio->bio_buf;
357 bkvasync(bp);
360 * Async operation has not completed and we now own the lock.
361 * Determine if we can complete the operation by issuing the
362 * frontend collection non-blocking.
364 * H2 double-buffers the data, setting B_NOTMETA on the logical
365 * buffer hints to the OS that the logical buffer should not be
366 * swapcached (since the device buffer can be).
368 * Also note that even for compressed data we would rather the
369 * kernel cache/swapcache device buffers more and (decompressed)
370 * logical buffers less, since that will significantly improve
371 * the amount of end-user data that can be cached.
373 * NOTE: The chain->data for xop->head.cluster.focus will be
374 * synchronized to the current cpu by xop_collect(),
375 * but other chains in the cluster might not be.
377 error = hammer2_xop_collect(&xop->head, HAMMER2_XOP_COLLECT_NOWAIT);
379 switch(error) {
380 case 0:
381 xop->finished = 1;
382 hammer2_mtx_unlock(&xop->lock);
383 bp->b_flags |= B_NOTMETA;
384 focus = xop->head.cluster.focus;
385 data = hammer2_xop_gdata(&xop->head)->buf;
386 hammer2_strategy_read_completion(focus, data, xop->bio);
387 hammer2_xop_pdata(&xop->head);
388 biodone(bio);
389 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
390 break;
391 case HAMMER2_ERROR_ENOENT:
392 xop->finished = 1;
393 hammer2_mtx_unlock(&xop->lock);
394 bp->b_flags |= B_NOTMETA;
395 bp->b_resid = 0;
396 bp->b_error = 0;
397 bzero(bp->b_data, bp->b_bcount);
398 biodone(bio);
399 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
400 break;
401 case HAMMER2_ERROR_EINPROGRESS:
402 hammer2_mtx_unlock(&xop->lock);
403 break;
404 default:
405 kprintf("xop_strategy_read: error %08x loff=%016jx\n",
406 error, bp->b_loffset);
407 xop->finished = 1;
408 hammer2_mtx_unlock(&xop->lock);
409 bp->b_flags |= B_ERROR;
410 bp->b_error = EIO;
411 biodone(bio);
412 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
413 break;
417 static
418 void
419 hammer2_strategy_read_completion(hammer2_chain_t *focus, const char *data,
420 struct bio *bio)
422 struct buf *bp = bio->bio_buf;
424 if (focus->bref.type == HAMMER2_BREF_TYPE_INODE) {
426 * Copy from in-memory inode structure.
428 bcopy(((const hammer2_inode_data_t *)data)->u.data,
429 bp->b_data, HAMMER2_EMBEDDED_BYTES);
430 bzero(bp->b_data + HAMMER2_EMBEDDED_BYTES,
431 bp->b_bcount - HAMMER2_EMBEDDED_BYTES);
432 bp->b_resid = 0;
433 bp->b_error = 0;
434 } else if (focus->bref.type == HAMMER2_BREF_TYPE_DATA) {
436 * Data is on-media, record for live dedup. Release the
437 * chain (try to free it) when done. The data is still
438 * cached by both the buffer cache in front and the
439 * block device behind us. This leaves more room in the
440 * LRU chain cache for meta-data chains which we really
441 * want to retain.
443 * NOTE: Deduplication cannot be safely recorded for
444 * records without a check code.
446 hammer2_dedup_record(focus, NULL, data);
447 atomic_set_int(&focus->flags, HAMMER2_CHAIN_RELEASE);
450 * Decompression and copy.
452 switch (HAMMER2_DEC_COMP(focus->bref.methods)) {
453 case HAMMER2_COMP_LZ4:
454 hammer2_decompress_LZ4_callback(data, focus->bytes,
455 bio);
456 /* b_resid set by call */
457 break;
458 case HAMMER2_COMP_ZLIB:
459 hammer2_decompress_ZLIB_callback(data, focus->bytes,
460 bio);
461 /* b_resid set by call */
462 break;
463 case HAMMER2_COMP_NONE:
464 KKASSERT(focus->bytes <= bp->b_bcount);
465 bcopy(data, bp->b_data, focus->bytes);
466 if (focus->bytes < bp->b_bcount) {
467 bzero(bp->b_data + focus->bytes,
468 bp->b_bcount - focus->bytes);
470 bp->b_resid = 0;
471 bp->b_error = 0;
472 break;
473 default:
474 panic("hammer2_strategy_read: "
475 "unknown compression type");
477 } else {
478 panic("hammer2_strategy_read: unknown bref type");
482 /****************************************************************************
483 * WRITE SUPPORT *
484 ****************************************************************************/
487 * Functions for compression in threads,
488 * from hammer2_vnops.c
490 static void hammer2_write_file_core(char *data, hammer2_inode_t *ip,
491 hammer2_chain_t **parentp,
492 hammer2_key_t lbase, int ioflag, int pblksize,
493 hammer2_tid_t mtid, int *errorp);
494 static void hammer2_compress_and_write(char *data, hammer2_inode_t *ip,
495 hammer2_chain_t **parentp,
496 hammer2_key_t lbase, int ioflag, int pblksize,
497 hammer2_tid_t mtid, int *errorp,
498 int comp_algo, int check_algo);
499 static void hammer2_zero_check_and_write(char *data, hammer2_inode_t *ip,
500 hammer2_chain_t **parentp,
501 hammer2_key_t lbase, int ioflag, int pblksize,
502 hammer2_tid_t mtid, int *errorp,
503 int check_algo);
504 static int test_block_zeros(const char *buf, size_t bytes);
505 static void zero_write(char *data, hammer2_inode_t *ip,
506 hammer2_chain_t **parentp,
507 hammer2_key_t lbase,
508 hammer2_tid_t mtid, int *errorp);
509 static void hammer2_write_bp(hammer2_chain_t *chain, char *data,
510 int ioflag, int pblksize,
511 hammer2_tid_t mtid, int *errorp,
512 int check_algo);
515 hammer2_strategy_write(struct vop_strategy_args *ap)
517 hammer2_xop_strategy_t *xop;
518 hammer2_pfs_t *pmp;
519 struct bio *bio;
520 struct buf *bp;
521 hammer2_inode_t *ip;
523 bio = ap->a_bio;
524 bp = bio->bio_buf;
525 ip = VTOI(ap->a_vp);
526 pmp = ip->pmp;
528 atomic_set_int(&ip->flags, HAMMER2_INODE_DIRTYDATA);
529 hammer2_lwinprog_ref(pmp);
530 hammer2_trans_assert_strategy(pmp);
531 hammer2_trans_init(pmp, HAMMER2_TRANS_BUFCACHE);
533 xop = hammer2_xop_alloc(ip, HAMMER2_XOP_MODIFYING |
534 HAMMER2_XOP_STRATEGY);
535 xop->finished = 0;
536 xop->bio = bio;
537 xop->lbase = bio->bio_offset;
538 hammer2_mtx_init(&xop->lock, "h2biow");
539 hammer2_xop_start(&xop->head, &hammer2_strategy_write_desc);
540 /* asynchronous completion */
542 hammer2_lwinprog_wait(pmp, hammer2_flush_pipe);
544 return(0);
548 * Per-node XOP (threaded). Write the logical buffer to the media.
550 * This is a bit problematic because there may be multiple target and
551 * any of them may be able to release the bp. In addition, if our
552 * particulr target is offline we don't want to block the bp (and thus
553 * the frontend). To accomplish this we copy the data to the per-thr
554 * scratch buffer.
556 void
557 hammer2_xop_strategy_write(hammer2_xop_t *arg, void *scratch, int clindex)
559 hammer2_xop_strategy_t *xop = &arg->xop_strategy;
560 hammer2_chain_t *parent;
561 hammer2_key_t lbase;
562 hammer2_inode_t *ip;
563 struct bio *bio;
564 struct buf *bp;
565 int error;
566 int lblksize;
567 int pblksize;
568 hammer2_off_t bio_offset;
569 char *bio_data;
572 * We can only access the bp/bio if the frontend has not yet
573 * completed.
575 if (xop->finished)
576 return;
577 hammer2_mtx_sh(&xop->lock);
578 if (xop->finished) {
579 hammer2_mtx_unlock(&xop->lock);
580 return;
583 lbase = xop->lbase;
584 bio = xop->bio; /* ephermal */
585 bp = bio->bio_buf; /* ephermal */
586 ip = xop->head.ip1; /* retained by ref */
587 bio_offset = bio->bio_offset;
588 bio_data = scratch;
590 /* hammer2_trans_init(parent->hmp->spmp, HAMMER2_TRANS_BUFCACHE); */
592 lblksize = hammer2_calc_logical(ip, bio->bio_offset, &lbase, NULL);
593 pblksize = hammer2_calc_physical(ip, lbase);
594 bkvasync(bp);
595 KKASSERT(lblksize <= MAXPHYS);
596 bcopy(bp->b_data, bio_data, lblksize);
598 hammer2_mtx_unlock(&xop->lock);
599 bp = NULL; /* safety, illegal to access after unlock */
600 bio = NULL; /* safety, illegal to access after unlock */
603 * Actual operation
605 parent = hammer2_inode_chain(ip, clindex, HAMMER2_RESOLVE_ALWAYS);
606 hammer2_write_file_core(bio_data, ip, &parent,
607 lbase, IO_ASYNC, pblksize,
608 xop->head.mtid, &error);
609 if (parent) {
610 hammer2_chain_unlock(parent);
611 hammer2_chain_drop(parent);
612 parent = NULL; /* safety */
614 hammer2_xop_feed(&xop->head, NULL, clindex, error);
617 * Try to complete the operation on behalf of the front-end.
619 if (xop->finished)
620 return;
621 hammer2_mtx_ex(&xop->lock);
622 if (xop->finished) {
623 hammer2_mtx_unlock(&xop->lock);
624 return;
628 * Async operation has not completed and we now own the lock.
629 * Determine if we can complete the operation by issuing the
630 * frontend collection non-blocking.
632 * H2 double-buffers the data, setting B_NOTMETA on the logical
633 * buffer hints to the OS that the logical buffer should not be
634 * swapcached (since the device buffer can be).
636 error = hammer2_xop_collect(&xop->head, HAMMER2_XOP_COLLECT_NOWAIT);
638 if (error == HAMMER2_ERROR_EINPROGRESS) {
639 hammer2_mtx_unlock(&xop->lock);
640 return;
644 * Async operation has completed.
646 xop->finished = 1;
647 hammer2_mtx_unlock(&xop->lock);
649 bio = xop->bio; /* now owned by us */
650 bp = bio->bio_buf; /* now owned by us */
652 if (error == HAMMER2_ERROR_ENOENT || error == 0) {
653 bp->b_flags |= B_NOTMETA;
654 bp->b_resid = 0;
655 bp->b_error = 0;
656 biodone(bio);
657 } else {
658 kprintf("xop_strategy_write: error %d loff=%016jx\n",
659 error, bp->b_loffset);
660 bp->b_flags |= B_ERROR;
661 bp->b_error = EIO;
662 biodone(bio);
664 hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
665 hammer2_trans_assert_strategy(ip->pmp);
666 hammer2_lwinprog_drop(ip->pmp);
667 hammer2_trans_done(ip->pmp, 0);
671 * Wait for pending I/O to complete
673 void
674 hammer2_bioq_sync(hammer2_pfs_t *pmp)
676 hammer2_lwinprog_wait(pmp, 0);
680 * Assign physical storage at (cparent, lbase), returning a suitable chain
681 * and setting *errorp appropriately.
683 * If no error occurs, the returned chain will be in a modified state.
685 * If an error occurs, the returned chain may or may not be NULL. If
686 * not-null any chain->error (if not 0) will also be rolled up into *errorp.
687 * So the caller only needs to test *errorp.
689 * cparent can wind up being anything.
691 * If datap is not NULL, *datap points to the real data we intend to write.
692 * If we can dedup the storage location we set *datap to NULL to indicate
693 * to the caller that a dedup occurred.
695 * NOTE: Special case for data embedded in inode.
697 static
698 hammer2_chain_t *
699 hammer2_assign_physical(hammer2_inode_t *ip, hammer2_chain_t **parentp,
700 hammer2_key_t lbase, int pblksize,
701 hammer2_tid_t mtid, char **datap, int *errorp)
703 hammer2_chain_t *chain;
704 hammer2_key_t key_dummy;
705 hammer2_off_t dedup_off;
706 int pradix = hammer2_getradix(pblksize);
709 * Locate the chain associated with lbase, return a locked chain.
710 * However, do not instantiate any data reference (which utilizes a
711 * device buffer) because we will be using direct IO via the
712 * logical buffer cache buffer.
714 KKASSERT(pblksize >= HAMMER2_ALLOC_MIN);
716 chain = hammer2_chain_lookup(parentp, &key_dummy,
717 lbase, lbase,
718 errorp,
719 HAMMER2_LOOKUP_NODATA);
722 * The lookup code should not return a DELETED chain to us, unless
723 * its a short-file embedded in the inode. Then it is possible for
724 * the lookup to return a deleted inode.
726 if (chain && (chain->flags & HAMMER2_CHAIN_DELETED) &&
727 chain->bref.type != HAMMER2_BREF_TYPE_INODE) {
728 kprintf("assign physical deleted chain @ "
729 "%016jx (%016jx.%02x) ip %016jx\n",
730 lbase, chain->bref.data_off, chain->bref.type,
731 ip->meta.inum);
732 Debugger("bleh");
735 if (chain == NULL) {
737 * We found a hole, create a new chain entry.
739 * NOTE: DATA chains are created without device backing
740 * store (nor do we want any).
742 dedup_off = hammer2_dedup_lookup((*parentp)->hmp, datap,
743 pblksize);
744 *errorp |= hammer2_chain_create(parentp, &chain,
745 ip->pmp,
746 HAMMER2_ENC_CHECK(ip->meta.check_algo) |
747 HAMMER2_ENC_COMP(HAMMER2_COMP_NONE),
748 lbase, HAMMER2_PBUFRADIX,
749 HAMMER2_BREF_TYPE_DATA,
750 pblksize, mtid,
751 dedup_off, 0);
752 if (chain == NULL)
753 goto failed;
754 /*ip->delta_dcount += pblksize;*/
755 } else if (chain->error == 0) {
756 switch (chain->bref.type) {
757 case HAMMER2_BREF_TYPE_INODE:
759 * The data is embedded in the inode, which requires
760 * a bit more finess.
762 *errorp |= hammer2_chain_modify_ip(ip, chain, mtid, 0);
763 break;
764 case HAMMER2_BREF_TYPE_DATA:
765 dedup_off = hammer2_dedup_lookup(chain->hmp, datap,
766 pblksize);
767 if (chain->bytes != pblksize) {
768 *errorp |= hammer2_chain_resize(chain,
769 mtid, dedup_off,
770 pradix,
771 HAMMER2_MODIFY_OPTDATA);
772 if (*errorp)
773 break;
777 * DATA buffers must be marked modified whether the
778 * data is in a logical buffer or not. We also have
779 * to make this call to fixup the chain data pointers
780 * after resizing in case this is an encrypted or
781 * compressed buffer.
783 *errorp |= hammer2_chain_modify(chain, mtid, dedup_off,
784 HAMMER2_MODIFY_OPTDATA);
785 break;
786 default:
787 panic("hammer2_assign_physical: bad type");
788 /* NOT REACHED */
789 break;
791 } else {
792 *errorp = chain->error;
794 atomic_set_int(&ip->flags, HAMMER2_INODE_DIRTYDATA);
795 failed:
796 return (chain);
800 * hammer2_write_file_core() - hammer2_write_thread() helper
802 * The core write function which determines which path to take
803 * depending on compression settings. We also have to locate the
804 * related chains so we can calculate and set the check data for
805 * the blockref.
807 static
808 void
809 hammer2_write_file_core(char *data, hammer2_inode_t *ip,
810 hammer2_chain_t **parentp,
811 hammer2_key_t lbase, int ioflag, int pblksize,
812 hammer2_tid_t mtid, int *errorp)
814 hammer2_chain_t *chain;
815 char *bdata;
817 *errorp = 0;
819 switch(HAMMER2_DEC_ALGO(ip->meta.comp_algo)) {
820 case HAMMER2_COMP_NONE:
822 * We have to assign physical storage to the buffer
823 * we intend to dirty or write now to avoid deadlocks
824 * in the strategy code later.
826 * This can return NOOFFSET for inode-embedded data.
827 * The strategy code will take care of it in that case.
829 bdata = data;
830 chain = hammer2_assign_physical(ip, parentp, lbase, pblksize,
831 mtid, &bdata, errorp);
832 if (*errorp) {
833 /* skip modifications */
834 } else if (chain->bref.type == HAMMER2_BREF_TYPE_INODE) {
835 hammer2_inode_data_t *wipdata;
837 wipdata = &chain->data->ipdata;
838 KKASSERT(wipdata->meta.op_flags &
839 HAMMER2_OPFLAG_DIRECTDATA);
840 bcopy(data, wipdata->u.data, HAMMER2_EMBEDDED_BYTES);
841 ++hammer2_iod_file_wembed;
842 } else if (bdata == NULL) {
844 * Copy of data already present on-media.
846 chain->bref.methods =
847 HAMMER2_ENC_COMP(HAMMER2_COMP_NONE) +
848 HAMMER2_ENC_CHECK(ip->meta.check_algo);
849 hammer2_chain_setcheck(chain, data);
850 } else {
851 hammer2_write_bp(chain, data, ioflag, pblksize,
852 mtid, errorp, ip->meta.check_algo);
854 if (chain) {
855 hammer2_chain_unlock(chain);
856 hammer2_chain_drop(chain);
858 break;
859 case HAMMER2_COMP_AUTOZERO:
861 * Check for zero-fill only
863 hammer2_zero_check_and_write(data, ip, parentp,
864 lbase, ioflag, pblksize,
865 mtid, errorp,
866 ip->meta.check_algo);
867 break;
868 case HAMMER2_COMP_LZ4:
869 case HAMMER2_COMP_ZLIB:
870 default:
872 * Check for zero-fill and attempt compression.
874 hammer2_compress_and_write(data, ip, parentp,
875 lbase, ioflag, pblksize,
876 mtid, errorp,
877 ip->meta.comp_algo,
878 ip->meta.check_algo);
879 break;
884 * Helper
886 * Generic function that will perform the compression in compression
887 * write path. The compression algorithm is determined by the settings
888 * obtained from inode.
890 static
891 void
892 hammer2_compress_and_write(char *data, hammer2_inode_t *ip,
893 hammer2_chain_t **parentp,
894 hammer2_key_t lbase, int ioflag, int pblksize,
895 hammer2_tid_t mtid, int *errorp, int comp_algo, int check_algo)
897 hammer2_chain_t *chain;
898 int comp_size;
899 int comp_block_size;
900 char *comp_buffer;
901 char *bdata;
904 * An all-zeros write creates a hole unless the check code
905 * is disabled. When the check code is disabled all writes
906 * are done in-place, including any all-zeros writes.
908 * NOTE: A snapshot will still force a copy-on-write
909 * (see the HAMMER2_CHECK_NONE in hammer2_chain.c).
911 if (check_algo != HAMMER2_CHECK_NONE &&
912 test_block_zeros(data, pblksize)) {
913 zero_write(data, ip, parentp, lbase, mtid, errorp);
914 return;
918 * Compression requested. Try to compress the block. We store
919 * the data normally if we cannot sufficiently compress it.
921 * We have a heuristic to detect files which are mostly
922 * uncompressable and avoid the compression attempt in that
923 * case. If the compression heuristic is turned off, we always
924 * try to compress.
926 comp_size = 0;
927 comp_buffer = NULL;
929 KKASSERT(pblksize / 2 <= 32768);
931 if (ip->comp_heuristic < 8 || (ip->comp_heuristic & 7) == 0 ||
932 hammer2_always_compress) {
933 z_stream strm_compress;
934 int comp_level;
935 int ret;
937 switch(HAMMER2_DEC_ALGO(comp_algo)) {
938 case HAMMER2_COMP_LZ4:
940 * We need to prefix with the size, LZ4
941 * doesn't do it for us. Add the related
942 * overhead.
944 * NOTE: The LZ4 code seems to assume at least an
945 * 8-byte buffer size granularity and may
946 * overrun the buffer if given a 4-byte
947 * granularity.
949 comp_buffer = objcache_get(cache_buffer_write,
950 M_INTWAIT);
951 comp_size = LZ4_compress_limitedOutput(
952 data,
953 &comp_buffer[sizeof(int)],
954 pblksize,
955 pblksize / 2 - sizeof(int64_t));
956 *(int *)comp_buffer = comp_size;
957 if (comp_size)
958 comp_size += sizeof(int);
959 break;
960 case HAMMER2_COMP_ZLIB:
961 comp_level = HAMMER2_DEC_LEVEL(comp_algo);
962 if (comp_level == 0)
963 comp_level = 6; /* default zlib compression */
964 else if (comp_level < 6)
965 comp_level = 6;
966 else if (comp_level > 9)
967 comp_level = 9;
968 ret = deflateInit(&strm_compress, comp_level);
969 if (ret != Z_OK) {
970 kprintf("HAMMER2 ZLIB: fatal error "
971 "on deflateInit.\n");
974 comp_buffer = objcache_get(cache_buffer_write,
975 M_INTWAIT);
976 strm_compress.next_in = data;
977 strm_compress.avail_in = pblksize;
978 strm_compress.next_out = comp_buffer;
979 strm_compress.avail_out = pblksize / 2;
980 ret = deflate(&strm_compress, Z_FINISH);
981 if (ret == Z_STREAM_END) {
982 comp_size = pblksize / 2 -
983 strm_compress.avail_out;
984 } else {
985 comp_size = 0;
987 ret = deflateEnd(&strm_compress);
988 break;
989 default:
990 kprintf("Error: Unknown compression method.\n");
991 kprintf("Comp_method = %d.\n", comp_algo);
992 break;
996 if (comp_size == 0) {
998 * compression failed or turned off
1000 comp_block_size = pblksize; /* safety */
1001 if (++ip->comp_heuristic > 128)
1002 ip->comp_heuristic = 8;
1003 } else {
1005 * compression succeeded
1007 ip->comp_heuristic = 0;
1008 if (comp_size <= 1024) {
1009 comp_block_size = 1024;
1010 } else if (comp_size <= 2048) {
1011 comp_block_size = 2048;
1012 } else if (comp_size <= 4096) {
1013 comp_block_size = 4096;
1014 } else if (comp_size <= 8192) {
1015 comp_block_size = 8192;
1016 } else if (comp_size <= 16384) {
1017 comp_block_size = 16384;
1018 } else if (comp_size <= 32768) {
1019 comp_block_size = 32768;
1020 } else {
1021 panic("hammer2: WRITE PATH: "
1022 "Weird comp_size value.");
1023 /* NOT REACHED */
1024 comp_block_size = pblksize;
1028 * Must zero the remainder or dedup (which operates on a
1029 * physical block basis) will not find matches.
1031 if (comp_size < comp_block_size) {
1032 bzero(comp_buffer + comp_size,
1033 comp_block_size - comp_size);
1038 * Assign physical storage, data will be set to NULL if a live-dedup
1039 * was successful.
1041 bdata = comp_size ? comp_buffer : data;
1042 chain = hammer2_assign_physical(ip, parentp, lbase, comp_block_size,
1043 mtid, &bdata, errorp);
1045 if (*errorp) {
1046 goto done;
1049 if (chain->bref.type == HAMMER2_BREF_TYPE_INODE) {
1050 hammer2_inode_data_t *wipdata;
1052 *errorp = hammer2_chain_modify_ip(ip, chain, mtid, 0);
1053 if (*errorp == 0) {
1054 wipdata = &chain->data->ipdata;
1055 KKASSERT(wipdata->meta.op_flags &
1056 HAMMER2_OPFLAG_DIRECTDATA);
1057 bcopy(data, wipdata->u.data, HAMMER2_EMBEDDED_BYTES);
1058 ++hammer2_iod_file_wembed;
1060 } else if (bdata == NULL) {
1062 * Live deduplication, a copy of the data is already present
1063 * on the media.
1065 if (comp_size) {
1066 chain->bref.methods =
1067 HAMMER2_ENC_COMP(comp_algo) +
1068 HAMMER2_ENC_CHECK(check_algo);
1069 } else {
1070 chain->bref.methods =
1071 HAMMER2_ENC_COMP(
1072 HAMMER2_COMP_NONE) +
1073 HAMMER2_ENC_CHECK(check_algo);
1075 bdata = comp_size ? comp_buffer : data;
1076 hammer2_chain_setcheck(chain, bdata);
1077 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_INITIAL);
1078 } else {
1079 hammer2_io_t *dio;
1081 KKASSERT(chain->flags & HAMMER2_CHAIN_MODIFIED);
1083 switch(chain->bref.type) {
1084 case HAMMER2_BREF_TYPE_INODE:
1085 panic("hammer2_write_bp: unexpected inode\n");
1086 break;
1087 case HAMMER2_BREF_TYPE_DATA:
1089 * Optimize out the read-before-write
1090 * if possible.
1092 *errorp = hammer2_io_newnz(chain->hmp,
1093 chain->bref.type,
1094 chain->bref.data_off,
1095 chain->bytes,
1096 &dio);
1097 if (*errorp) {
1098 hammer2_io_brelse(&dio);
1099 kprintf("hammer2: WRITE PATH: "
1100 "dbp bread error\n");
1101 break;
1103 bdata = hammer2_io_data(dio, chain->bref.data_off);
1106 * When loading the block make sure we don't
1107 * leave garbage after the compressed data.
1109 if (comp_size) {
1110 chain->bref.methods =
1111 HAMMER2_ENC_COMP(comp_algo) +
1112 HAMMER2_ENC_CHECK(check_algo);
1113 bcopy(comp_buffer, bdata, comp_size);
1114 } else {
1115 chain->bref.methods =
1116 HAMMER2_ENC_COMP(
1117 HAMMER2_COMP_NONE) +
1118 HAMMER2_ENC_CHECK(check_algo);
1119 bcopy(data, bdata, pblksize);
1123 * The flush code doesn't calculate check codes for
1124 * file data (doing so can result in excessive I/O),
1125 * so we do it here.
1127 hammer2_chain_setcheck(chain, bdata);
1130 * Device buffer is now valid, chain is no longer in
1131 * the initial state.
1133 * (No blockref table worries with file data)
1135 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_INITIAL);
1136 hammer2_dedup_record(chain, dio, bdata);
1138 /* Now write the related bdp. */
1139 if (ioflag & IO_SYNC) {
1141 * Synchronous I/O requested.
1143 hammer2_io_bwrite(&dio);
1145 } else if ((ioflag & IO_DIRECT) &&
1146 loff + n == pblksize) {
1147 hammer2_io_bdwrite(&dio);
1149 } else if (ioflag & IO_ASYNC) {
1150 hammer2_io_bawrite(&dio);
1151 } else {
1152 hammer2_io_bdwrite(&dio);
1154 break;
1155 default:
1156 panic("hammer2_write_bp: bad chain type %d\n",
1157 chain->bref.type);
1158 /* NOT REACHED */
1159 break;
1162 done:
1163 if (chain) {
1164 hammer2_chain_unlock(chain);
1165 hammer2_chain_drop(chain);
1167 if (comp_buffer)
1168 objcache_put(cache_buffer_write, comp_buffer);
1172 * Helper
1174 * Function that performs zero-checking and writing without compression,
1175 * it corresponds to default zero-checking path.
1177 static
1178 void
1179 hammer2_zero_check_and_write(char *data, hammer2_inode_t *ip,
1180 hammer2_chain_t **parentp,
1181 hammer2_key_t lbase, int ioflag, int pblksize,
1182 hammer2_tid_t mtid, int *errorp,
1183 int check_algo)
1185 hammer2_chain_t *chain;
1186 char *bdata;
1188 if (check_algo != HAMMER2_CHECK_NONE &&
1189 test_block_zeros(data, pblksize)) {
1191 * An all-zeros write creates a hole unless the check code
1192 * is disabled. When the check code is disabled all writes
1193 * are done in-place, including any all-zeros writes.
1195 * NOTE: A snapshot will still force a copy-on-write
1196 * (see the HAMMER2_CHECK_NONE in hammer2_chain.c).
1198 zero_write(data, ip, parentp, lbase, mtid, errorp);
1199 } else {
1201 * Normal write
1203 bdata = data;
1204 chain = hammer2_assign_physical(ip, parentp, lbase, pblksize,
1205 mtid, &bdata, errorp);
1206 if (*errorp) {
1207 /* do nothing */
1208 } else if (bdata) {
1209 hammer2_write_bp(chain, data, ioflag, pblksize,
1210 mtid, errorp, check_algo);
1211 } else {
1212 /* dedup occurred */
1213 chain->bref.methods =
1214 HAMMER2_ENC_COMP(HAMMER2_COMP_NONE) +
1215 HAMMER2_ENC_CHECK(check_algo);
1216 hammer2_chain_setcheck(chain, data);
1218 if (chain) {
1219 hammer2_chain_unlock(chain);
1220 hammer2_chain_drop(chain);
1226 * Helper
1228 * A function to test whether a block of data contains only zeros,
1229 * returns TRUE (non-zero) if the block is all zeros.
1231 static
1233 test_block_zeros(const char *buf, size_t bytes)
1235 size_t i;
1237 for (i = 0; i < bytes; i += sizeof(long)) {
1238 if (*(const long *)(buf + i) != 0)
1239 return (0);
1241 return (1);
1245 * Helper
1247 * Function to "write" a block that contains only zeros.
1249 static
1250 void
1251 zero_write(char *data, hammer2_inode_t *ip,
1252 hammer2_chain_t **parentp,
1253 hammer2_key_t lbase, hammer2_tid_t mtid, int *errorp)
1255 hammer2_chain_t *chain;
1256 hammer2_key_t key_dummy;
1258 chain = hammer2_chain_lookup(parentp, &key_dummy,
1259 lbase, lbase,
1260 errorp,
1261 HAMMER2_LOOKUP_NODATA);
1262 if (chain) {
1263 if (chain->bref.type == HAMMER2_BREF_TYPE_INODE) {
1264 hammer2_inode_data_t *wipdata;
1266 if (*errorp == 0) {
1267 *errorp = hammer2_chain_modify_ip(ip, chain,
1268 mtid, 0);
1270 if (*errorp == 0) {
1271 wipdata = &chain->data->ipdata;
1272 KKASSERT(wipdata->meta.op_flags &
1273 HAMMER2_OPFLAG_DIRECTDATA);
1274 bzero(wipdata->u.data, HAMMER2_EMBEDDED_BYTES);
1275 ++hammer2_iod_file_wembed;
1277 } else {
1278 /* chain->error ok for deletion */
1279 hammer2_chain_delete(*parentp, chain,
1280 mtid, HAMMER2_DELETE_PERMANENT);
1281 ++hammer2_iod_file_wzero;
1283 atomic_set_int(&ip->flags, HAMMER2_INODE_DIRTYDATA);
1284 hammer2_chain_unlock(chain);
1285 hammer2_chain_drop(chain);
1286 } else {
1287 ++hammer2_iod_file_wzero;
1292 * Helper
1294 * Function to write the data as it is, without performing any sort of
1295 * compression. This function is used in path without compression and
1296 * default zero-checking path.
1298 static
1299 void
1300 hammer2_write_bp(hammer2_chain_t *chain, char *data, int ioflag,
1301 int pblksize,
1302 hammer2_tid_t mtid, int *errorp, int check_algo)
1304 hammer2_inode_data_t *wipdata;
1305 hammer2_io_t *dio;
1306 char *bdata;
1307 int error;
1309 error = 0; /* XXX TODO below */
1311 KKASSERT(chain->flags & HAMMER2_CHAIN_MODIFIED);
1313 switch(chain->bref.type) {
1314 case HAMMER2_BREF_TYPE_INODE:
1315 wipdata = &chain->data->ipdata;
1316 KKASSERT(wipdata->meta.op_flags & HAMMER2_OPFLAG_DIRECTDATA);
1317 bcopy(data, wipdata->u.data, HAMMER2_EMBEDDED_BYTES);
1318 error = 0;
1319 ++hammer2_iod_file_wembed;
1320 break;
1321 case HAMMER2_BREF_TYPE_DATA:
1322 error = hammer2_io_newnz(chain->hmp,
1323 chain->bref.type,
1324 chain->bref.data_off,
1325 chain->bytes, &dio);
1326 if (error) {
1327 hammer2_io_bqrelse(&dio);
1328 kprintf("hammer2: WRITE PATH: "
1329 "dbp bread error\n");
1330 break;
1332 bdata = hammer2_io_data(dio, chain->bref.data_off);
1334 chain->bref.methods = HAMMER2_ENC_COMP(HAMMER2_COMP_NONE) +
1335 HAMMER2_ENC_CHECK(check_algo);
1336 bcopy(data, bdata, chain->bytes);
1339 * The flush code doesn't calculate check codes for
1340 * file data (doing so can result in excessive I/O),
1341 * so we do it here.
1343 hammer2_chain_setcheck(chain, bdata);
1346 * Device buffer is now valid, chain is no longer in
1347 * the initial state.
1349 * (No blockref table worries with file data)
1351 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_INITIAL);
1352 hammer2_dedup_record(chain, dio, bdata);
1354 if (ioflag & IO_SYNC) {
1356 * Synchronous I/O requested.
1358 hammer2_io_bwrite(&dio);
1360 } else if ((ioflag & IO_DIRECT) &&
1361 loff + n == pblksize) {
1362 hammer2_io_bdwrite(&dio);
1364 } else if (ioflag & IO_ASYNC) {
1365 hammer2_io_bawrite(&dio);
1366 } else {
1367 hammer2_io_bdwrite(&dio);
1369 break;
1370 default:
1371 panic("hammer2_write_bp: bad chain type %d\n",
1372 chain->bref.type);
1373 /* NOT REACHED */
1374 error = 0;
1375 break;
1377 *errorp = error;
1381 * LIVE DEDUP HEURISTICS
1383 * Record media and crc information for possible dedup operation. Note
1384 * that the dedup mask bits must also be set in the related DIO for a dedup
1385 * to be fully validated (which is handled in the freemap allocation code).
1387 * WARNING! This code is SMP safe but the heuristic allows SMP collisions.
1388 * All fields must be loaded into locals and validated.
1390 * WARNING! Should only be used for file data and directory entries,
1391 * hammer2_chain_modify() only checks for the dedup case on data
1392 * chains. Also, dedup data can only be recorded for committed
1393 * chains (so NOT strategy writes which can undergo further
1394 * modification after the fact!).
1396 void
1397 hammer2_dedup_record(hammer2_chain_t *chain, hammer2_io_t *dio,
1398 const char *data)
1400 hammer2_dev_t *hmp;
1401 hammer2_dedup_t *dedup;
1402 uint64_t crc;
1403 uint64_t mask;
1404 int best = 0;
1405 int i;
1406 int dticks;
1409 * We can only record a dedup if we have media data to test against.
1410 * If dedup is not enabled, return early, which allows a chain to
1411 * remain marked MODIFIED (which might have benefits in special
1412 * situations, though typically it does not).
1414 if (hammer2_dedup_enable == 0)
1415 return;
1416 if (dio == NULL) {
1417 dio = chain->dio;
1418 if (dio == NULL)
1419 return;
1422 hmp = chain->hmp;
1424 switch(HAMMER2_DEC_CHECK(chain->bref.methods)) {
1425 case HAMMER2_CHECK_ISCSI32:
1427 * XXX use the built-in crc (the dedup lookup sequencing
1428 * needs to be fixed so the check code is already present
1429 * when dedup_lookup is called)
1431 #if 0
1432 crc = (uint64_t)(uint32_t)chain->bref.check.iscsi32.value;
1433 #endif
1434 crc = XXH64(data, chain->bytes, XXH_HAMMER2_SEED);
1435 break;
1436 case HAMMER2_CHECK_XXHASH64:
1437 crc = chain->bref.check.xxhash64.value;
1438 break;
1439 case HAMMER2_CHECK_SHA192:
1441 * XXX use the built-in crc (the dedup lookup sequencing
1442 * needs to be fixed so the check code is already present
1443 * when dedup_lookup is called)
1445 #if 0
1446 crc = ((uint64_t *)chain->bref.check.sha192.data)[0] ^
1447 ((uint64_t *)chain->bref.check.sha192.data)[1] ^
1448 ((uint64_t *)chain->bref.check.sha192.data)[2];
1449 #endif
1450 crc = XXH64(data, chain->bytes, XXH_HAMMER2_SEED);
1451 break;
1452 default:
1454 * Cannot dedup without a check code
1456 * NOTE: In particular, CHECK_NONE allows a sector to be
1457 * overwritten without copy-on-write, recording
1458 * a dedup block for a CHECK_NONE object would be
1459 * a disaster!
1461 return;
1464 atomic_set_int(&chain->flags, HAMMER2_CHAIN_DEDUPABLE);
1466 dedup = &hmp->heur_dedup[crc & (HAMMER2_DEDUP_HEUR_MASK & ~3)];
1467 for (i = 0; i < 4; ++i) {
1468 if (dedup[i].data_crc == crc) {
1469 best = i;
1470 break;
1472 dticks = (int)(dedup[i].ticks - dedup[best].ticks);
1473 if (dticks < 0 || dticks > hz * 60 * 30)
1474 best = i;
1476 dedup += best;
1477 if (hammer2_debug & 0x40000) {
1478 kprintf("REC %04x %016jx %016jx\n",
1479 (int)(dedup - hmp->heur_dedup),
1480 crc,
1481 chain->bref.data_off);
1483 dedup->ticks = ticks;
1484 dedup->data_off = chain->bref.data_off;
1485 dedup->data_crc = crc;
1488 * Set the valid bits for the dedup only after we know the data
1489 * buffer has been updated. The alloc bits were set (and the valid
1490 * bits cleared) when the media was allocated.
1492 * This is done in two stages becuase the bulkfree code can race
1493 * the gap between allocation and data population. Both masks must
1494 * be set before a bcmp/dedup operation is able to use the block.
1496 mask = hammer2_dedup_mask(dio, chain->bref.data_off, chain->bytes);
1497 atomic_set_64(&dio->dedup_valid, mask);
1499 #if 0
1501 * XXX removed. MODIFIED is an integral part of the flush code,
1502 * lets not just clear it
1505 * Once we record the dedup the chain must be marked clean to
1506 * prevent reuse of the underlying block. Remember that this
1507 * write occurs when the buffer cache is flushed (i.e. on sync(),
1508 * fsync(), filesystem periodic sync, or when the kernel needs to
1509 * flush a buffer), and not whenever the user write()s.
1511 if (chain->flags & HAMMER2_CHAIN_MODIFIED) {
1512 atomic_clear_int(&chain->flags, HAMMER2_CHAIN_MODIFIED);
1513 atomic_add_long(&hammer2_count_modified_chains, -1);
1514 if (chain->pmp)
1515 hammer2_pfs_memory_wakeup(chain->pmp);
1517 #endif
1520 static
1521 hammer2_off_t
1522 hammer2_dedup_lookup(hammer2_dev_t *hmp, char **datap, int pblksize)
1524 hammer2_dedup_t *dedup;
1525 hammer2_io_t *dio;
1526 hammer2_off_t off;
1527 uint64_t crc;
1528 uint64_t mask;
1529 char *data;
1530 char *dtmp;
1531 int i;
1533 if (hammer2_dedup_enable == 0)
1534 return 0;
1535 data = *datap;
1536 if (data == NULL)
1537 return 0;
1540 * XXX use the built-in crc (the dedup lookup sequencing
1541 * needs to be fixed so the check code is already present
1542 * when dedup_lookup is called)
1544 crc = XXH64(data, pblksize, XXH_HAMMER2_SEED);
1545 dedup = &hmp->heur_dedup[crc & (HAMMER2_DEDUP_HEUR_MASK & ~3)];
1547 if (hammer2_debug & 0x40000) {
1548 kprintf("LOC %04x/4 %016jx\n",
1549 (int)(dedup - hmp->heur_dedup),
1550 crc);
1553 for (i = 0; i < 4; ++i) {
1554 off = dedup[i].data_off;
1555 cpu_ccfence();
1556 if (dedup[i].data_crc != crc)
1557 continue;
1558 if ((1 << (int)(off & HAMMER2_OFF_MASK_RADIX)) != pblksize)
1559 continue;
1560 dio = hammer2_io_getquick(hmp, off, pblksize);
1561 if (dio) {
1562 dtmp = hammer2_io_data(dio, off),
1563 mask = hammer2_dedup_mask(dio, off, pblksize);
1564 if ((dio->dedup_alloc & mask) == mask &&
1565 (dio->dedup_valid & mask) == mask &&
1566 bcmp(data, dtmp, pblksize) == 0) {
1567 if (hammer2_debug & 0x40000) {
1568 kprintf("DEDUP SUCCESS %016jx\n",
1569 (intmax_t)off);
1571 hammer2_io_putblk(&dio);
1572 *datap = NULL;
1573 dedup[i].ticks = ticks; /* update use */
1574 atomic_add_long(&hammer2_iod_file_wdedup,
1575 pblksize);
1577 return off; /* RETURN */
1579 hammer2_io_putblk(&dio);
1582 return 0;
1586 * Poof. Races are ok, if someone gets in and reuses a dedup offset
1587 * before or while we are clearing it they will also recover the freemap
1588 * entry (set it to fully allocated), so a bulkfree race can only set it
1589 * to a possibly-free state.
1591 * XXX ok, well, not really sure races are ok but going to run with it
1592 * for the moment.
1594 void
1595 hammer2_dedup_clear(hammer2_dev_t *hmp)
1597 int i;
1599 for (i = 0; i < HAMMER2_DEDUP_HEUR_SIZE; ++i) {
1600 hmp->heur_dedup[i].data_off = 0;
1601 hmp->heur_dedup[i].ticks = ticks - 1;