2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * $DragonFly: src/sys/vfs/hammer/hammer_subs.c,v 1.26 2008/06/20 21:24:53 dillon Exp $
37 * HAMMER structural locking
41 #include <sys/dirent.h>
44 hammer_lock_ex_ident(struct hammer_lock
*lock
, const char *ident
)
46 thread_t td
= curthread
;
48 KKASSERT(lock
->refs
> 0);
50 if (lock
->locktd
!= td
) {
51 while (lock
->locktd
!= NULL
|| lock
->lockcount
) {
54 if (hammer_debug_locks
) {
55 kprintf("hammer_lock_ex: held by %p\n",
58 ++hammer_contention_count
;
59 tsleep(lock
, 0, ident
, 0);
60 if (hammer_debug_locks
)
61 kprintf("hammer_lock_ex: try again\n");
66 KKASSERT(lock
->lockcount
>= 0);
72 * Try to obtain an exclusive lock
75 hammer_lock_ex_try(struct hammer_lock
*lock
)
77 thread_t td
= curthread
;
79 KKASSERT(lock
->refs
> 0);
81 if (lock
->locktd
!= td
) {
82 if (lock
->locktd
!= NULL
|| lock
->lockcount
) {
88 KKASSERT(lock
->lockcount
>= 0);
95 * Obtain a shared lock
98 hammer_lock_sh(struct hammer_lock
*lock
)
100 KKASSERT(lock
->refs
> 0);
102 while (lock
->locktd
!= NULL
) {
103 if (lock
->locktd
== curthread
) {
104 Debugger("hammer_lock_sh: lock_sh on exclusive");
110 tsleep(lock
, 0, "hmrlck", 0);
112 KKASSERT(lock
->lockcount
<= 0);
118 * Obtain a shared lock at a lower priority then thread waiting for an
119 * exclusive lock. To avoid a deadlock this may only be done if no other
120 * shared locks are being held by the caller.
123 hammer_lock_sh_lowpri(struct hammer_lock
*lock
)
125 KKASSERT(lock
->refs
> 0);
127 while (lock
->locktd
!= NULL
|| lock
->exwanted
) {
128 if (lock
->locktd
== curthread
) {
129 Debugger("hammer_lock_sh: lock_sh on exclusive");
135 tsleep(lock
, 0, "hmrlck", 0);
137 KKASSERT(lock
->lockcount
<= 0);
143 hammer_lock_sh_try(struct hammer_lock
*lock
)
145 KKASSERT(lock
->refs
> 0);
151 KKASSERT(lock
->lockcount
<= 0);
158 * Upgrade a shared lock to an exclusively held lock. This function will
159 * return EDEADLK If there is more then one shared holder.
161 * No error occurs and no action is taken if the lock is already exclusively
162 * held by the caller. If the lock is not held at all or held exclusively
163 * by someone else, this function will panic.
166 hammer_lock_upgrade(struct hammer_lock
*lock
)
171 if (lock
->lockcount
> 0) {
172 if (lock
->locktd
!= curthread
)
173 panic("hammer_lock_upgrade: illegal lock state");
175 } else if (lock
->lockcount
== -1) {
177 lock
->locktd
= curthread
;
179 } else if (lock
->lockcount
!= 0) {
182 panic("hammer_lock_upgrade: lock is not held");
191 * Downgrade an exclusively held lock to a shared lock.
194 hammer_lock_downgrade(struct hammer_lock
*lock
)
196 KKASSERT(lock
->lockcount
== 1 && lock
->locktd
== curthread
);
198 lock
->lockcount
= -1;
205 /* XXX memory barrier */
209 hammer_unlock(struct hammer_lock
*lock
)
212 KKASSERT(lock
->lockcount
!= 0);
213 if (lock
->lockcount
< 0) {
214 if (++lock
->lockcount
== 0 && lock
->wanted
) {
219 KKASSERT(lock
->locktd
== curthread
);
220 if (--lock
->lockcount
== 0) {
233 hammer_ref(struct hammer_lock
*lock
)
235 KKASSERT(lock
->refs
>= 0);
242 hammer_unref(struct hammer_lock
*lock
)
244 KKASSERT(lock
->refs
> 0);
251 * The sync_lock must be held when doing any modifying operations on
252 * meta-data. The flusher holds the lock exclusively while the reblocker
253 * and pruner use a shared lock.
255 * Modifying operations can run in parallel until the flusher needs to
256 * sync the disk media.
259 hammer_sync_lock_ex(hammer_transaction_t trans
)
261 ++trans
->sync_lock_refs
;
262 hammer_lock_ex(&trans
->hmp
->sync_lock
);
266 hammer_sync_lock_sh(hammer_transaction_t trans
)
268 ++trans
->sync_lock_refs
;
269 hammer_lock_sh(&trans
->hmp
->sync_lock
);
273 hammer_sync_lock_sh_try(hammer_transaction_t trans
)
277 ++trans
->sync_lock_refs
;
278 if ((error
= hammer_lock_sh_try(&trans
->hmp
->sync_lock
)) != 0)
279 --trans
->sync_lock_refs
;
284 hammer_sync_unlock(hammer_transaction_t trans
)
286 --trans
->sync_lock_refs
;
287 hammer_unlock(&trans
->hmp
->sync_lock
);
294 hammer_to_unix_xid(uuid_t
*uuid
)
296 return(*(u_int32_t
*)&uuid
->node
[2]);
300 hammer_guid_to_uuid(uuid_t
*uuid
, u_int32_t guid
)
302 bzero(uuid
, sizeof(*uuid
));
303 *(u_int32_t
*)&uuid
->node
[2] = guid
;
307 hammer_time_to_timespec(u_int64_t xtime
, struct timespec
*ts
)
309 ts
->tv_sec
= (unsigned long)(xtime
/ 1000000);
310 ts
->tv_nsec
= (unsigned int)(xtime
% 1000000) * 1000L;
314 hammer_timespec_to_time(struct timespec
*ts
)
318 xtime
= (unsigned)(ts
->tv_nsec
/ 1000) +
319 (unsigned long)ts
->tv_sec
* 1000000ULL;
325 * Convert a HAMMER filesystem object type to a vnode type
328 hammer_get_vnode_type(u_int8_t obj_type
)
331 case HAMMER_OBJTYPE_DIRECTORY
:
333 case HAMMER_OBJTYPE_REGFILE
:
335 case HAMMER_OBJTYPE_DBFILE
:
337 case HAMMER_OBJTYPE_FIFO
:
339 case HAMMER_OBJTYPE_CDEV
:
341 case HAMMER_OBJTYPE_BDEV
:
343 case HAMMER_OBJTYPE_SOFTLINK
:
352 hammer_get_dtype(u_int8_t obj_type
)
355 case HAMMER_OBJTYPE_DIRECTORY
:
357 case HAMMER_OBJTYPE_REGFILE
:
359 case HAMMER_OBJTYPE_DBFILE
:
361 case HAMMER_OBJTYPE_FIFO
:
363 case HAMMER_OBJTYPE_CDEV
:
365 case HAMMER_OBJTYPE_BDEV
:
367 case HAMMER_OBJTYPE_SOFTLINK
:
376 hammer_get_obj_type(enum vtype vtype
)
380 return(HAMMER_OBJTYPE_DIRECTORY
);
382 return(HAMMER_OBJTYPE_REGFILE
);
384 return(HAMMER_OBJTYPE_DBFILE
);
386 return(HAMMER_OBJTYPE_FIFO
);
388 return(HAMMER_OBJTYPE_CDEV
);
390 return(HAMMER_OBJTYPE_BDEV
);
392 return(HAMMER_OBJTYPE_SOFTLINK
);
394 return(HAMMER_OBJTYPE_UNKNOWN
);
400 hammer_nohistory(hammer_inode_t ip
)
402 if (ip
->hmp
->hflags
& HMNT_NOHISTORY
)
404 if (ip
->ino_data
.uflags
& (SF_NOHISTORY
|UF_NOHISTORY
))
410 * Return a namekey hash. The 64 bit namekey hash consists of a 32 bit
411 * crc in the MSB and 0 in the LSB. The caller will use the low bits to
412 * generate a unique key and will scan all entries with the same upper
413 * 32 bits when issuing a lookup.
415 * We strip bit 63 in order to provide a positive key, this way a seek
416 * offset of 0 will represent the base of the directory.
418 * This function can never return 0. We use the MSB-0 space to synthesize
419 * artificial directory entries such as "." and "..".
422 hammer_directory_namekey(void *name
, int len
)
426 key
= (int64_t)(crc32(name
, len
) & 0x7FFFFFFF) << 32;
428 key
|= 0x100000000LL
;
439 tid
= ts
.tv_sec
* 1000000000LL + ts
.tv_nsec
;
444 hammer_str_to_tid(const char *str
)
447 int len
= strlen(str
);
450 tid
= strtouq(str
, NULL
, 0); /* full TID */
452 tid
= strtouq(str
, NULL
, 0) * 1000000000LL; /* time_t */
457 hammer_crc_set_blockmap(hammer_blockmap_t blockmap
)
459 blockmap
->entry_crc
= crc32(blockmap
, HAMMER_BLOCKMAP_CRCSIZE
);
463 hammer_crc_set_volume(hammer_volume_ondisk_t ondisk
)
465 ondisk
->vol_crc
= crc32(ondisk
, HAMMER_VOL_CRCSIZE1
) ^
466 crc32(&ondisk
->vol_crc
+ 1, HAMMER_VOL_CRCSIZE2
);
470 hammer_crc_test_blockmap(hammer_blockmap_t blockmap
)
474 crc
= crc32(blockmap
, HAMMER_BLOCKMAP_CRCSIZE
);
475 return (blockmap
->entry_crc
== crc
);
479 hammer_crc_test_volume(hammer_volume_ondisk_t ondisk
)
483 crc
= crc32(ondisk
, HAMMER_VOL_CRCSIZE1
) ^
484 crc32(&ondisk
->vol_crc
+ 1, HAMMER_VOL_CRCSIZE2
);
485 return (ondisk
->vol_crc
== crc
);
489 hammer_crc_test_btree(hammer_node_ondisk_t ondisk
)
493 crc
= crc32(&ondisk
->crc
+ 1, HAMMER_BTREE_CRCSIZE
);
494 return (ondisk
->crc
== crc
);
498 * Test or set the leaf->data_crc field. Deal with any special cases given
499 * a generic B-Tree leaf element and its data.
501 * NOTE: Inode-data: the atime and mtime fields are not CRCd, allowing them
502 * to be updated in-place.
505 hammer_crc_test_leaf(void *data
, hammer_btree_leaf_elm_t leaf
)
509 if (leaf
->data_len
== 0) {
512 switch(leaf
->base
.rec_type
) {
513 case HAMMER_RECTYPE_INODE
:
514 if (leaf
->data_len
!= sizeof(struct hammer_inode_data
))
516 crc
= crc32(data
, HAMMER_INODE_CRCSIZE
);
519 crc
= crc32(data
, leaf
->data_len
);
523 return (leaf
->data_crc
== crc
);
527 hammer_crc_set_leaf(void *data
, hammer_btree_leaf_elm_t leaf
)
529 if (leaf
->data_len
== 0) {
532 switch(leaf
->base
.rec_type
) {
533 case HAMMER_RECTYPE_INODE
:
534 KKASSERT(leaf
->data_len
==
535 sizeof(struct hammer_inode_data
));
536 leaf
->data_crc
= crc32(data
, HAMMER_INODE_CRCSIZE
);
539 leaf
->data_crc
= crc32(data
, leaf
->data_len
);
546 hkprintf(const char *ctl
, ...)
550 if (hammer_debug_debug
) {
558 * Return the block size at the specified file offset.
561 hammer_blocksize(int64_t file_offset
)
563 if (file_offset
< HAMMER_XDEMARC
)
564 return(HAMMER_BUFSIZE
);
566 return(HAMMER_XBUFSIZE
);
570 * Return the demarkation point between the two offsets where
571 * the block size changes.
574 hammer_blockdemarc(int64_t file_offset1
, int64_t file_offset2
)
576 if (file_offset1
< HAMMER_XDEMARC
) {
577 if (file_offset2
<= HAMMER_XDEMARC
)
578 return(file_offset2
);
579 return(HAMMER_XDEMARC
);
581 panic("hammer_blockdemarc: illegal range %lld %lld\n",
582 file_offset1
, file_offset2
);