dma: factor out mail handling code
[dragonfly.git] / sys / vfs / hammer / hammer_subs.c
blob5c73ce1198d13d9f217bb2a83dfb827a173cde3e
1 /*
2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
34 * $DragonFly: src/sys/vfs/hammer/hammer_subs.c,v 1.35 2008/10/15 22:38:37 dillon Exp $
37 * HAMMER structural locking
40 #include "hammer.h"
41 #include <sys/dirent.h>
43 void
44 hammer_lock_ex_ident(struct hammer_lock *lock, const char *ident)
46 thread_t td = curthread;
47 u_int lv;
48 u_int nlv;
50 KKASSERT(lock->refs > 0);
51 for (;;) {
52 lv = lock->lockval;
54 if (lv == 0) {
55 nlv = 1 | HAMMER_LOCKF_EXCLUSIVE;
56 if (atomic_cmpset_int(&lock->lockval, lv, nlv)) {
57 lock->owner = td;
58 break;
60 } else if ((lv & HAMMER_LOCKF_EXCLUSIVE) && lock->owner == td) {
61 nlv = (lv + 1);
62 if (atomic_cmpset_int(&lock->lockval, lv, nlv))
63 break;
64 } else {
65 if (hammer_debug_locks) {
66 kprintf("hammer_lock_ex: held by %p\n",
67 lock->owner);
69 nlv = lv | HAMMER_LOCKF_WANTED;
70 ++hammer_contention_count;
71 tsleep_interlock(lock, 0);
72 if (atomic_cmpset_int(&lock->lockval, lv, nlv)) {
73 tsleep(lock, PINTERLOCKED, ident, 0);
74 if (hammer_debug_locks)
75 kprintf("hammer_lock_ex: try again\n");
82 * Try to obtain an exclusive lock
84 int
85 hammer_lock_ex_try(struct hammer_lock *lock)
87 thread_t td = curthread;
88 int error;
89 u_int lv;
90 u_int nlv;
92 KKASSERT(lock->refs > 0);
93 for (;;) {
94 lv = lock->lockval;
96 if (lv == 0) {
97 nlv = 1 | HAMMER_LOCKF_EXCLUSIVE;
98 if (atomic_cmpset_int(&lock->lockval, lv, nlv)) {
99 lock->owner = td;
100 error = 0;
101 break;
103 } else if ((lv & HAMMER_LOCKF_EXCLUSIVE) && lock->owner == td) {
104 nlv = (lv + 1);
105 if (atomic_cmpset_int(&lock->lockval, lv, nlv)) {
106 error = 0;
107 break;
109 } else {
110 error = EAGAIN;
111 break;
114 return (error);
118 * Obtain a shared lock
120 * We do not give pending exclusive locks priority over shared locks as
121 * doing so could lead to a deadlock.
123 void
124 hammer_lock_sh(struct hammer_lock *lock)
126 thread_t td = curthread;
127 u_int lv;
128 u_int nlv;
130 KKASSERT(lock->refs > 0);
131 for (;;) {
132 lv = lock->lockval;
134 if ((lv & HAMMER_LOCKF_EXCLUSIVE) == 0) {
135 nlv = (lv + 1);
136 if (atomic_cmpset_int(&lock->lockval, lv, nlv))
137 break;
138 } else if (lock->owner == td) {
140 * Disallowed case, drop into kernel debugger for
141 * now. A cont continues w/ an exclusive lock.
143 nlv = (lv + 1);
144 if (atomic_cmpset_int(&lock->lockval, lv, nlv)) {
145 Debugger("hammer_lock_sh: already hold ex");
146 break;
148 } else {
149 nlv = lv | HAMMER_LOCKF_WANTED;
150 ++hammer_contention_count;
151 tsleep_interlock(lock, 0);
152 if (atomic_cmpset_int(&lock->lockval, lv, nlv)) {
153 tsleep(lock, PINTERLOCKED, "hmrlck", 0);
160 hammer_lock_sh_try(struct hammer_lock *lock)
162 thread_t td = curthread;
163 u_int lv;
164 u_int nlv;
165 int error;
167 KKASSERT(lock->refs > 0);
168 for (;;) {
169 lv = lock->lockval;
171 if ((lv & HAMMER_LOCKF_EXCLUSIVE) == 0) {
172 nlv = (lv + 1);
173 if (atomic_cmpset_int(&lock->lockval, lv, nlv)) {
174 error = 0;
175 break;
177 } else if (lock->owner == td) {
179 * Disallowed case, drop into kernel debugger for
180 * now. A cont continues w/ an exclusive lock.
182 nlv = (lv + 1);
183 if (atomic_cmpset_int(&lock->lockval, lv, nlv)) {
184 Debugger("hammer_lock_sh: already hold ex");
185 error = 0;
186 break;
188 } else {
189 error = EAGAIN;
190 break;
193 return (error);
197 * Upgrade a shared lock to an exclusively held lock. This function will
198 * return EDEADLK If there is more then one shared holder.
200 * No error occurs and no action is taken if the lock is already exclusively
201 * held by the caller. If the lock is not held at all or held exclusively
202 * by someone else, this function will panic.
205 hammer_lock_upgrade(struct hammer_lock *lock)
207 thread_t td = curthread;
208 u_int lv;
209 u_int nlv;
210 int error;
212 for (;;) {
213 lv = lock->lockval;
215 if ((lv & ~HAMMER_LOCKF_WANTED) == 1) {
216 nlv = lv | HAMMER_LOCKF_EXCLUSIVE;
217 if (atomic_cmpset_int(&lock->lockval, lv, nlv)) {
218 lock->owner = td;
219 error = 0;
220 break;
222 } else if (lv & HAMMER_LOCKF_EXCLUSIVE) {
223 if (lock->owner != curthread)
224 panic("hammer_lock_upgrade: illegal state");
225 error = 0;
226 break;
227 } else if ((lv & ~HAMMER_LOCKF_WANTED) == 0) {
228 panic("hammer_lock_upgrade: lock is not held");
229 /* NOT REACHED */
230 error = EDEADLK;
231 break;
232 } else {
233 error = EDEADLK;
234 break;
237 return (error);
241 * Downgrade an exclusively held lock to a shared lock.
243 void
244 hammer_lock_downgrade(struct hammer_lock *lock)
246 thread_t td = curthread;
247 u_int lv;
248 u_int nlv;
250 KKASSERT((lock->lockval & ~HAMMER_LOCKF_WANTED) ==
251 (HAMMER_LOCKF_EXCLUSIVE | 1));
252 KKASSERT(lock->owner == td);
255 * NOTE: Must clear owner before releasing exclusivity
257 lock->owner = NULL;
259 for (;;) {
260 lv = lock->lockval;
261 nlv = lv & ~(HAMMER_LOCKF_EXCLUSIVE | HAMMER_LOCKF_WANTED);
262 if (atomic_cmpset_int(&lock->lockval, lv, nlv)) {
263 if (lv & HAMMER_LOCKF_WANTED)
264 wakeup(lock);
265 break;
270 void
271 hammer_unlock(struct hammer_lock *lock)
273 thread_t td = curthread;
274 u_int lv;
275 u_int nlv;
277 lv = lock->lockval;
278 KKASSERT(lv != 0);
279 if (lv & HAMMER_LOCKF_EXCLUSIVE)
280 KKASSERT(lock->owner == td);
282 for (;;) {
283 lv = lock->lockval;
284 nlv = lv & ~(HAMMER_LOCKF_EXCLUSIVE | HAMMER_LOCKF_WANTED);
285 if (nlv > 1) {
286 nlv = lv - 1;
287 if (atomic_cmpset_int(&lock->lockval, lv, nlv))
288 break;
289 } else if (nlv == 1) {
290 nlv = 0;
291 if (lv & HAMMER_LOCKF_EXCLUSIVE)
292 lock->owner = NULL;
293 if (atomic_cmpset_int(&lock->lockval, lv, nlv)) {
294 if (lv & HAMMER_LOCKF_WANTED)
295 wakeup(lock);
296 break;
298 } else {
299 panic("hammer_unlock: lock %p is not held", lock);
305 * The calling thread must be holding a shared or exclusive lock.
306 * Returns < 0 if lock is held shared, and > 0 if held exlusively.
309 hammer_lock_status(struct hammer_lock *lock)
311 u_int lv = lock->lockval;
313 if (lv & HAMMER_LOCKF_EXCLUSIVE)
314 return(1);
315 else if (lv)
316 return(-1);
317 panic("hammer_lock_status: lock must be held: %p", lock);
320 void
321 hammer_ref(struct hammer_lock *lock)
323 KKASSERT(lock->refs >= 0);
324 atomic_add_int(&lock->refs, 1);
327 void
328 hammer_unref(struct hammer_lock *lock)
330 KKASSERT(lock->refs > 0);
331 atomic_subtract_int(&lock->refs, 1);
335 * The sync_lock must be held when doing any modifying operations on
336 * meta-data. It does not have to be held when modifying non-meta-data buffers
337 * (backend or frontend).
339 * The flusher holds the lock exclusively while all other consumers hold it
340 * shared. All modifying operations made while holding the lock are atomic
341 * in that they will be made part of the same flush group.
343 * Due to the atomicy requirement deadlock recovery code CANNOT release the
344 * sync lock, nor can we give pending exclusive sync locks priority over
345 * a shared sync lock as this could lead to a 3-way deadlock.
347 void
348 hammer_sync_lock_ex(hammer_transaction_t trans)
350 ++trans->sync_lock_refs;
351 hammer_lock_ex(&trans->hmp->sync_lock);
354 void
355 hammer_sync_lock_sh(hammer_transaction_t trans)
357 ++trans->sync_lock_refs;
358 hammer_lock_sh(&trans->hmp->sync_lock);
362 hammer_sync_lock_sh_try(hammer_transaction_t trans)
364 int error;
366 ++trans->sync_lock_refs;
367 if ((error = hammer_lock_sh_try(&trans->hmp->sync_lock)) != 0)
368 --trans->sync_lock_refs;
369 return (error);
372 void
373 hammer_sync_unlock(hammer_transaction_t trans)
375 --trans->sync_lock_refs;
376 hammer_unlock(&trans->hmp->sync_lock);
380 * Misc
382 u_int32_t
383 hammer_to_unix_xid(uuid_t *uuid)
385 return(*(u_int32_t *)&uuid->node[2]);
388 void
389 hammer_guid_to_uuid(uuid_t *uuid, u_int32_t guid)
391 bzero(uuid, sizeof(*uuid));
392 *(u_int32_t *)&uuid->node[2] = guid;
395 void
396 hammer_time_to_timespec(u_int64_t xtime, struct timespec *ts)
398 ts->tv_sec = (unsigned long)(xtime / 1000000);
399 ts->tv_nsec = (unsigned int)(xtime % 1000000) * 1000L;
402 u_int64_t
403 hammer_timespec_to_time(struct timespec *ts)
405 u_int64_t xtime;
407 xtime = (unsigned)(ts->tv_nsec / 1000) +
408 (unsigned long)ts->tv_sec * 1000000ULL;
409 return(xtime);
414 * Convert a HAMMER filesystem object type to a vnode type
416 enum vtype
417 hammer_get_vnode_type(u_int8_t obj_type)
419 switch(obj_type) {
420 case HAMMER_OBJTYPE_DIRECTORY:
421 return(VDIR);
422 case HAMMER_OBJTYPE_REGFILE:
423 return(VREG);
424 case HAMMER_OBJTYPE_DBFILE:
425 return(VDATABASE);
426 case HAMMER_OBJTYPE_FIFO:
427 return(VFIFO);
428 case HAMMER_OBJTYPE_SOCKET:
429 return(VSOCK);
430 case HAMMER_OBJTYPE_CDEV:
431 return(VCHR);
432 case HAMMER_OBJTYPE_BDEV:
433 return(VBLK);
434 case HAMMER_OBJTYPE_SOFTLINK:
435 return(VLNK);
436 default:
437 return(VBAD);
439 /* not reached */
443 hammer_get_dtype(u_int8_t obj_type)
445 switch(obj_type) {
446 case HAMMER_OBJTYPE_DIRECTORY:
447 return(DT_DIR);
448 case HAMMER_OBJTYPE_REGFILE:
449 return(DT_REG);
450 case HAMMER_OBJTYPE_DBFILE:
451 return(DT_DBF);
452 case HAMMER_OBJTYPE_FIFO:
453 return(DT_FIFO);
454 case HAMMER_OBJTYPE_SOCKET:
455 return(DT_SOCK);
456 case HAMMER_OBJTYPE_CDEV:
457 return(DT_CHR);
458 case HAMMER_OBJTYPE_BDEV:
459 return(DT_BLK);
460 case HAMMER_OBJTYPE_SOFTLINK:
461 return(DT_LNK);
462 default:
463 return(DT_UNKNOWN);
465 /* not reached */
468 u_int8_t
469 hammer_get_obj_type(enum vtype vtype)
471 switch(vtype) {
472 case VDIR:
473 return(HAMMER_OBJTYPE_DIRECTORY);
474 case VREG:
475 return(HAMMER_OBJTYPE_REGFILE);
476 case VDATABASE:
477 return(HAMMER_OBJTYPE_DBFILE);
478 case VFIFO:
479 return(HAMMER_OBJTYPE_FIFO);
480 case VSOCK:
481 return(HAMMER_OBJTYPE_SOCKET);
482 case VCHR:
483 return(HAMMER_OBJTYPE_CDEV);
484 case VBLK:
485 return(HAMMER_OBJTYPE_BDEV);
486 case VLNK:
487 return(HAMMER_OBJTYPE_SOFTLINK);
488 default:
489 return(HAMMER_OBJTYPE_UNKNOWN);
491 /* not reached */
495 * Return flags for hammer_delete_at_cursor()
498 hammer_nohistory(hammer_inode_t ip)
500 if (ip->hmp->hflags & HMNT_NOHISTORY)
501 return(HAMMER_DELETE_DESTROY);
502 if (ip->ino_data.uflags & (SF_NOHISTORY|UF_NOHISTORY))
503 return(HAMMER_DELETE_DESTROY);
504 return(0);
508 * ALGORITHM VERSION 1:
509 * Return a namekey hash. The 64 bit namekey hash consists of a 32 bit
510 * crc in the MSB and 0 in the LSB. The caller will use the low 32 bits
511 * to generate a unique key and will scan all entries with the same upper
512 * 32 bits when issuing a lookup.
514 * 0hhhhhhhhhhhhhhh hhhhhhhhhhhhhhhh 0000000000000000 0000000000000000
516 * ALGORITHM VERSION 2:
518 * The 64 bit hash key is generated from the following components. The
519 * first three characters are encoded as 5-bit quantities, the middle
520 * N characters are hashed into a 6 bit quantity, and the last two
521 * characters are encoded as 5-bit quantities. A 32 bit hash of the
522 * entire filename is encoded in the low 32 bits. Bit 0 is set to
523 * 0 to guarantee us a 2^24 bit iteration space.
525 * 0aaaaabbbbbccccc mmmmmmyyyyyzzzzz hhhhhhhhhhhhhhhh hhhhhhhhhhhhhhh0
527 * This gives us a domain sort for the first three characters, the last
528 * two characters, and breaks the middle space into 64 random domains.
529 * The domain sort folds upper case, lower case, digits, and punctuation
530 * spaces together, the idea being the filenames tend to not be a mix
531 * of those domains.
533 * The 64 random domains act as a sub-sort for the middle characters
534 * but may cause a random seek. If the filesystem is being accessed
535 * in sorted order we should tend to get very good linearity for most
536 * filenames and devolve into more random seeks otherwise.
538 * We strip bit 63 in order to provide a positive key, this way a seek
539 * offset of 0 will represent the base of the directory.
541 * This function can never return 0. We use the MSB-0 space to synthesize
542 * artificial directory entries such as "." and "..".
544 int64_t
545 hammer_directory_namekey(hammer_inode_t dip, const void *name, int len,
546 u_int32_t *max_iterationsp)
548 int64_t key;
549 int32_t crcx;
550 const char *aname = name;
552 switch (dip->ino_data.cap_flags & HAMMER_INODE_CAP_DIRHASH_MASK) {
553 case HAMMER_INODE_CAP_DIRHASH_ALG0:
554 key = (int64_t)(crc32(aname, len) & 0x7FFFFFFF) << 32;
555 if (key == 0)
556 key |= 0x100000000LL;
557 *max_iterationsp = 0xFFFFFFFFU;
558 break;
559 case HAMMER_INODE_CAP_DIRHASH_ALG1:
560 key = (u_int32_t)crc32(aname, len) & 0xFFFFFFFEU;
562 switch(len) {
563 default:
564 crcx = crc32(aname + 3, len - 5);
565 crcx = crcx ^ (crcx >> 6) ^ (crcx >> 12);
566 key |= (int64_t)(crcx & 0x3F) << 42;
567 /* fall through */
568 case 5:
569 case 4:
570 /* fall through */
571 case 3:
572 key |= ((int64_t)(aname[2] & 0x1F) << 48);
573 /* fall through */
574 case 2:
575 key |= ((int64_t)(aname[1] & 0x1F) << 53) |
576 ((int64_t)(aname[len-2] & 0x1F) << 37);
577 /* fall through */
578 case 1:
579 key |= ((int64_t)(aname[0] & 0x1F) << 58) |
580 ((int64_t)(aname[len-1] & 0x1F) << 32);
581 /* fall through */
582 case 0:
583 break;
585 if ((key & 0xFFFFFFFF00000000LL) == 0)
586 key |= 0x100000000LL;
587 if (hammer_debug_general & 0x0400) {
588 kprintf("namekey2: 0x%016llx %*.*s\n",
589 (long long)key, len, len, aname);
591 *max_iterationsp = 0x00FFFFFF;
592 break;
593 case HAMMER_INODE_CAP_DIRHASH_ALG2:
594 case HAMMER_INODE_CAP_DIRHASH_ALG3:
595 default:
596 key = 0; /* compiler warning */
597 *max_iterationsp = 1; /* sanity */
598 panic("hammer_directory_namekey: bad algorithm %p\n", dip);
599 break;
601 return(key);
605 * Convert string after @@ (@@ not included) to TID. Returns 0 on success,
606 * EINVAL on failure.
608 * If this function fails *ispfs, *tidp, and *localizationp will not
609 * be modified.
612 hammer_str_to_tid(const char *str, int *ispfsp,
613 hammer_tid_t *tidp, u_int32_t *localizationp)
615 hammer_tid_t tid;
616 u_int32_t localization;
617 char *ptr;
618 int ispfs;
619 int n;
622 * Forms allowed for TID: "0x%016llx"
623 * "-1"
625 tid = strtouq(str, &ptr, 0);
626 n = ptr - str;
627 if (n == 2 && str[0] == '-' && str[1] == '1') {
628 /* ok */
629 } else if (n == 18 && str[0] == '0' && (str[1] | 0x20) == 'x') {
630 /* ok */
631 } else {
632 return(EINVAL);
636 * Forms allowed for PFS: ":%05d" (i.e. "...:0" would be illegal).
638 str = ptr;
639 if (*str == ':') {
640 localization = strtoul(str + 1, &ptr, 10) << 16;
641 if (ptr - str != 6)
642 return(EINVAL);
643 str = ptr;
644 ispfs = 1;
645 } else {
646 localization = *localizationp;
647 ispfs = 0;
651 * Any trailing junk invalidates special extension handling.
653 if (*str)
654 return(EINVAL);
655 *tidp = tid;
656 *localizationp = localization;
657 *ispfsp = ispfs;
658 return(0);
661 void
662 hammer_crc_set_blockmap(hammer_blockmap_t blockmap)
664 blockmap->entry_crc = crc32(blockmap, HAMMER_BLOCKMAP_CRCSIZE);
667 void
668 hammer_crc_set_volume(hammer_volume_ondisk_t ondisk)
670 ondisk->vol_crc = crc32(ondisk, HAMMER_VOL_CRCSIZE1) ^
671 crc32(&ondisk->vol_crc + 1, HAMMER_VOL_CRCSIZE2);
675 hammer_crc_test_blockmap(hammer_blockmap_t blockmap)
677 hammer_crc_t crc;
679 crc = crc32(blockmap, HAMMER_BLOCKMAP_CRCSIZE);
680 return (blockmap->entry_crc == crc);
684 hammer_crc_test_volume(hammer_volume_ondisk_t ondisk)
686 hammer_crc_t crc;
688 crc = crc32(ondisk, HAMMER_VOL_CRCSIZE1) ^
689 crc32(&ondisk->vol_crc + 1, HAMMER_VOL_CRCSIZE2);
690 return (ondisk->vol_crc == crc);
694 hammer_crc_test_btree(hammer_node_ondisk_t ondisk)
696 hammer_crc_t crc;
698 crc = crc32(&ondisk->crc + 1, HAMMER_BTREE_CRCSIZE);
699 return (ondisk->crc == crc);
703 * Test or set the leaf->data_crc field. Deal with any special cases given
704 * a generic B-Tree leaf element and its data.
706 * NOTE: Inode-data: the atime and mtime fields are not CRCd, allowing them
707 * to be updated in-place.
710 hammer_crc_test_leaf(void *data, hammer_btree_leaf_elm_t leaf)
712 hammer_crc_t crc;
714 if (leaf->data_len == 0) {
715 crc = 0;
716 } else {
717 switch(leaf->base.rec_type) {
718 case HAMMER_RECTYPE_INODE:
719 if (leaf->data_len != sizeof(struct hammer_inode_data))
720 return(0);
721 crc = crc32(data, HAMMER_INODE_CRCSIZE);
722 break;
723 default:
724 crc = crc32(data, leaf->data_len);
725 break;
728 return (leaf->data_crc == crc);
731 void
732 hammer_crc_set_leaf(void *data, hammer_btree_leaf_elm_t leaf)
734 if (leaf->data_len == 0) {
735 leaf->data_crc = 0;
736 } else {
737 switch(leaf->base.rec_type) {
738 case HAMMER_RECTYPE_INODE:
739 KKASSERT(leaf->data_len ==
740 sizeof(struct hammer_inode_data));
741 leaf->data_crc = crc32(data, HAMMER_INODE_CRCSIZE);
742 break;
743 default:
744 leaf->data_crc = crc32(data, leaf->data_len);
745 break;
750 void
751 hkprintf(const char *ctl, ...)
753 __va_list va;
755 if (hammer_debug_debug) {
756 __va_start(va, ctl);
757 kvprintf(ctl, va);
758 __va_end(va);
763 * Return the block size at the specified file offset.
766 hammer_blocksize(int64_t file_offset)
768 if (file_offset < HAMMER_XDEMARC)
769 return(HAMMER_BUFSIZE);
770 else
771 return(HAMMER_XBUFSIZE);
775 * Return the demarkation point between the two offsets where
776 * the block size changes.
778 int64_t
779 hammer_blockdemarc(int64_t file_offset1, int64_t file_offset2)
781 if (file_offset1 < HAMMER_XDEMARC) {
782 if (file_offset2 <= HAMMER_XDEMARC)
783 return(file_offset2);
784 return(HAMMER_XDEMARC);
786 panic("hammer_blockdemarc: illegal range %lld %lld\n",
787 (long long)file_offset1, (long long)file_offset2);
790 udev_t
791 hammer_fsid_to_udev(uuid_t *uuid)
793 u_int32_t crc;
795 crc = crc32(uuid, sizeof(*uuid));
796 return((udev_t)crc);