HAMMER 61B/Many: Stabilization
[dragonfly.git] / sys / vfs / hammer / hammer_subs.c
blob573c8e7fa12bf74dccfd91013b3d291329aa557d
1 /*
2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
34 * $DragonFly: src/sys/vfs/hammer/hammer_subs.c,v 1.34 2008/07/11 01:22:29 dillon Exp $
37 * HAMMER structural locking
40 #include "hammer.h"
41 #include <sys/dirent.h>
43 void
44 hammer_lock_ex_ident(struct hammer_lock *lock, const char *ident)
46 thread_t td = curthread;
48 KKASSERT(lock->refs > 0);
49 crit_enter();
50 if (lock->locktd != td) {
51 while (lock->locktd != NULL || lock->lockcount) {
52 ++lock->exwanted;
53 lock->wanted = 1;
54 if (hammer_debug_locks) {
55 kprintf("hammer_lock_ex: held by %p\n",
56 lock->locktd);
58 ++hammer_contention_count;
59 tsleep(lock, 0, ident, 0);
60 if (hammer_debug_locks)
61 kprintf("hammer_lock_ex: try again\n");
62 --lock->exwanted;
64 lock->locktd = td;
66 KKASSERT(lock->lockcount >= 0);
67 ++lock->lockcount;
68 crit_exit();
72 * Try to obtain an exclusive lock
74 int
75 hammer_lock_ex_try(struct hammer_lock *lock)
77 thread_t td = curthread;
79 KKASSERT(lock->refs > 0);
80 crit_enter();
81 if (lock->locktd != td) {
82 if (lock->locktd != NULL || lock->lockcount) {
83 crit_exit();
84 return(EAGAIN);
86 lock->locktd = td;
88 KKASSERT(lock->lockcount >= 0);
89 ++lock->lockcount;
90 crit_exit();
91 return(0);
95 * Obtain a shared lock
97 * We do not give pending exclusive locks priority over shared locks as
98 * doing so could lead to a deadlock.
100 void
101 hammer_lock_sh(struct hammer_lock *lock)
103 KKASSERT(lock->refs > 0);
104 crit_enter();
105 while (lock->locktd != NULL) {
106 if (lock->locktd == curthread) {
107 Debugger("hammer_lock_sh: lock_sh on exclusive");
108 ++lock->lockcount;
109 crit_exit();
110 return;
112 lock->wanted = 1;
113 tsleep(lock, 0, "hmrlck", 0);
115 KKASSERT(lock->lockcount <= 0);
116 --lock->lockcount;
117 crit_exit();
121 hammer_lock_sh_try(struct hammer_lock *lock)
123 KKASSERT(lock->refs > 0);
124 crit_enter();
125 if (lock->locktd) {
126 crit_exit();
127 return(EAGAIN);
129 KKASSERT(lock->lockcount <= 0);
130 --lock->lockcount;
131 crit_exit();
132 return(0);
136 * Upgrade a shared lock to an exclusively held lock. This function will
137 * return EDEADLK If there is more then one shared holder.
139 * No error occurs and no action is taken if the lock is already exclusively
140 * held by the caller. If the lock is not held at all or held exclusively
141 * by someone else, this function will panic.
144 hammer_lock_upgrade(struct hammer_lock *lock)
146 int error;
148 crit_enter();
149 if (lock->lockcount > 0) {
150 if (lock->locktd != curthread)
151 panic("hammer_lock_upgrade: illegal lock state");
152 error = 0;
153 } else if (lock->lockcount == -1) {
154 lock->lockcount = 1;
155 lock->locktd = curthread;
156 error = 0;
157 } else if (lock->lockcount != 0) {
158 error = EDEADLK;
159 } else {
160 panic("hammer_lock_upgrade: lock is not held");
161 /* NOT REACHED */
162 error = 0;
164 crit_exit();
165 return(error);
169 * Downgrade an exclusively held lock to a shared lock.
171 void
172 hammer_lock_downgrade(struct hammer_lock *lock)
174 KKASSERT(lock->lockcount == 1 && lock->locktd == curthread);
175 crit_enter();
176 lock->lockcount = -1;
177 lock->locktd = NULL;
178 if (lock->wanted) {
179 lock->wanted = 0;
180 wakeup(lock);
182 crit_exit();
183 /* XXX memory barrier */
186 void
187 hammer_unlock(struct hammer_lock *lock)
189 crit_enter();
190 KKASSERT(lock->lockcount != 0);
191 if (lock->lockcount < 0) {
192 if (++lock->lockcount == 0 && lock->wanted) {
193 lock->wanted = 0;
194 wakeup(lock);
196 } else {
197 KKASSERT(lock->locktd == curthread);
198 if (--lock->lockcount == 0) {
199 lock->locktd = NULL;
200 if (lock->wanted) {
201 lock->wanted = 0;
202 wakeup(lock);
207 crit_exit();
211 * The calling thread must be holding a shared or exclusive lock.
212 * Returns < 0 if lock is held shared, and > 0 if held exlusively.
215 hammer_lock_status(struct hammer_lock *lock)
217 if (lock->lockcount < 0)
218 return(-1);
219 if (lock->lockcount > 0)
220 return(1);
221 panic("hammer_lock_status: lock must be held: %p", lock);
224 void
225 hammer_ref(struct hammer_lock *lock)
227 KKASSERT(lock->refs >= 0);
228 crit_enter();
229 ++lock->refs;
230 crit_exit();
233 void
234 hammer_unref(struct hammer_lock *lock)
236 KKASSERT(lock->refs > 0);
237 crit_enter();
238 --lock->refs;
239 crit_exit();
243 * The sync_lock must be held when doing any modifying operations on
244 * meta-data. It does not have to be held when modifying non-meta-data buffers
245 * (backend or frontend).
247 * The flusher holds the lock exclusively while all other consumers hold it
248 * shared. All modifying operations made while holding the lock are atomic
249 * in that they will be made part of the same flush group.
251 * Due to the atomicy requirement deadlock recovery code CANNOT release the
252 * sync lock, nor can we give pending exclusive sync locks priority over
253 * a shared sync lock as this could lead to a 3-way deadlock.
255 void
256 hammer_sync_lock_ex(hammer_transaction_t trans)
258 ++trans->sync_lock_refs;
259 hammer_lock_ex(&trans->hmp->sync_lock);
262 void
263 hammer_sync_lock_sh(hammer_transaction_t trans)
265 ++trans->sync_lock_refs;
266 hammer_lock_sh(&trans->hmp->sync_lock);
270 hammer_sync_lock_sh_try(hammer_transaction_t trans)
272 int error;
274 ++trans->sync_lock_refs;
275 if ((error = hammer_lock_sh_try(&trans->hmp->sync_lock)) != 0)
276 --trans->sync_lock_refs;
277 return (error);
280 void
281 hammer_sync_unlock(hammer_transaction_t trans)
283 --trans->sync_lock_refs;
284 hammer_unlock(&trans->hmp->sync_lock);
288 * Misc
290 u_int32_t
291 hammer_to_unix_xid(uuid_t *uuid)
293 return(*(u_int32_t *)&uuid->node[2]);
296 void
297 hammer_guid_to_uuid(uuid_t *uuid, u_int32_t guid)
299 bzero(uuid, sizeof(*uuid));
300 *(u_int32_t *)&uuid->node[2] = guid;
303 void
304 hammer_time_to_timespec(u_int64_t xtime, struct timespec *ts)
306 ts->tv_sec = (unsigned long)(xtime / 1000000);
307 ts->tv_nsec = (unsigned int)(xtime % 1000000) * 1000L;
310 u_int64_t
311 hammer_timespec_to_time(struct timespec *ts)
313 u_int64_t xtime;
315 xtime = (unsigned)(ts->tv_nsec / 1000) +
316 (unsigned long)ts->tv_sec * 1000000ULL;
317 return(xtime);
322 * Convert a HAMMER filesystem object type to a vnode type
324 enum vtype
325 hammer_get_vnode_type(u_int8_t obj_type)
327 switch(obj_type) {
328 case HAMMER_OBJTYPE_DIRECTORY:
329 return(VDIR);
330 case HAMMER_OBJTYPE_REGFILE:
331 return(VREG);
332 case HAMMER_OBJTYPE_DBFILE:
333 return(VDATABASE);
334 case HAMMER_OBJTYPE_FIFO:
335 return(VFIFO);
336 case HAMMER_OBJTYPE_SOCKET:
337 return(VSOCK);
338 case HAMMER_OBJTYPE_CDEV:
339 return(VCHR);
340 case HAMMER_OBJTYPE_BDEV:
341 return(VBLK);
342 case HAMMER_OBJTYPE_SOFTLINK:
343 return(VLNK);
344 default:
345 return(VBAD);
347 /* not reached */
351 hammer_get_dtype(u_int8_t obj_type)
353 switch(obj_type) {
354 case HAMMER_OBJTYPE_DIRECTORY:
355 return(DT_DIR);
356 case HAMMER_OBJTYPE_REGFILE:
357 return(DT_REG);
358 case HAMMER_OBJTYPE_DBFILE:
359 return(DT_DBF);
360 case HAMMER_OBJTYPE_FIFO:
361 return(DT_FIFO);
362 case HAMMER_OBJTYPE_SOCKET:
363 return(DT_SOCK);
364 case HAMMER_OBJTYPE_CDEV:
365 return(DT_CHR);
366 case HAMMER_OBJTYPE_BDEV:
367 return(DT_BLK);
368 case HAMMER_OBJTYPE_SOFTLINK:
369 return(DT_LNK);
370 default:
371 return(DT_UNKNOWN);
373 /* not reached */
376 u_int8_t
377 hammer_get_obj_type(enum vtype vtype)
379 switch(vtype) {
380 case VDIR:
381 return(HAMMER_OBJTYPE_DIRECTORY);
382 case VREG:
383 return(HAMMER_OBJTYPE_REGFILE);
384 case VDATABASE:
385 return(HAMMER_OBJTYPE_DBFILE);
386 case VFIFO:
387 return(HAMMER_OBJTYPE_FIFO);
388 case VSOCK:
389 return(HAMMER_OBJTYPE_SOCKET);
390 case VCHR:
391 return(HAMMER_OBJTYPE_CDEV);
392 case VBLK:
393 return(HAMMER_OBJTYPE_BDEV);
394 case VLNK:
395 return(HAMMER_OBJTYPE_SOFTLINK);
396 default:
397 return(HAMMER_OBJTYPE_UNKNOWN);
399 /* not reached */
403 * Return flags for hammer_delete_at_cursor()
406 hammer_nohistory(hammer_inode_t ip)
408 if (ip->hmp->hflags & HMNT_NOHISTORY)
409 return(HAMMER_DELETE_DESTROY);
410 if (ip->ino_data.uflags & (SF_NOHISTORY|UF_NOHISTORY))
411 return(HAMMER_DELETE_DESTROY);
412 return(0);
416 * Return a namekey hash. The 64 bit namekey hash consists of a 32 bit
417 * crc in the MSB and 0 in the LSB. The caller will use the low bits to
418 * generate a unique key and will scan all entries with the same upper
419 * 32 bits when issuing a lookup.
421 * We strip bit 63 in order to provide a positive key, this way a seek
422 * offset of 0 will represent the base of the directory.
424 * This function can never return 0. We use the MSB-0 space to synthesize
425 * artificial directory entries such as "." and "..".
427 int64_t
428 hammer_directory_namekey(const void *name, int len)
430 int64_t key;
432 key = (int64_t)(crc32(name, len) & 0x7FFFFFFF) << 32;
433 if (key == 0)
434 key |= 0x100000000LL;
435 return(key);
438 hammer_tid_t
439 hammer_str_to_tid(const char *str, int *ispfs, u_int32_t *localizationp)
442 hammer_tid_t tid;
443 char *ptr;
445 tid = strtouq(str, &ptr, 0);
446 if (*ptr == ':') {
447 *ispfs = 1;
448 *localizationp = strtoul(ptr + 1, NULL, 10) << 16;
449 } else {
450 *ispfs = 0;
452 return(tid);
455 void
456 hammer_crc_set_blockmap(hammer_blockmap_t blockmap)
458 blockmap->entry_crc = crc32(blockmap, HAMMER_BLOCKMAP_CRCSIZE);
461 void
462 hammer_crc_set_volume(hammer_volume_ondisk_t ondisk)
464 ondisk->vol_crc = crc32(ondisk, HAMMER_VOL_CRCSIZE1) ^
465 crc32(&ondisk->vol_crc + 1, HAMMER_VOL_CRCSIZE2);
469 hammer_crc_test_blockmap(hammer_blockmap_t blockmap)
471 hammer_crc_t crc;
473 crc = crc32(blockmap, HAMMER_BLOCKMAP_CRCSIZE);
474 return (blockmap->entry_crc == crc);
478 hammer_crc_test_volume(hammer_volume_ondisk_t ondisk)
480 hammer_crc_t crc;
482 crc = crc32(ondisk, HAMMER_VOL_CRCSIZE1) ^
483 crc32(&ondisk->vol_crc + 1, HAMMER_VOL_CRCSIZE2);
484 return (ondisk->vol_crc == crc);
488 hammer_crc_test_btree(hammer_node_ondisk_t ondisk)
490 hammer_crc_t crc;
492 crc = crc32(&ondisk->crc + 1, HAMMER_BTREE_CRCSIZE);
493 return (ondisk->crc == crc);
497 * Test or set the leaf->data_crc field. Deal with any special cases given
498 * a generic B-Tree leaf element and its data.
500 * NOTE: Inode-data: the atime and mtime fields are not CRCd, allowing them
501 * to be updated in-place.
504 hammer_crc_test_leaf(void *data, hammer_btree_leaf_elm_t leaf)
506 hammer_crc_t crc;
508 if (leaf->data_len == 0) {
509 crc = 0;
510 } else {
511 switch(leaf->base.rec_type) {
512 case HAMMER_RECTYPE_INODE:
513 if (leaf->data_len != sizeof(struct hammer_inode_data))
514 return(0);
515 crc = crc32(data, HAMMER_INODE_CRCSIZE);
516 break;
517 default:
518 crc = crc32(data, leaf->data_len);
519 break;
522 return (leaf->data_crc == crc);
525 void
526 hammer_crc_set_leaf(void *data, hammer_btree_leaf_elm_t leaf)
528 if (leaf->data_len == 0) {
529 leaf->data_crc = 0;
530 } else {
531 switch(leaf->base.rec_type) {
532 case HAMMER_RECTYPE_INODE:
533 KKASSERT(leaf->data_len ==
534 sizeof(struct hammer_inode_data));
535 leaf->data_crc = crc32(data, HAMMER_INODE_CRCSIZE);
536 break;
537 default:
538 leaf->data_crc = crc32(data, leaf->data_len);
539 break;
544 void
545 hkprintf(const char *ctl, ...)
547 __va_list va;
549 if (hammer_debug_debug) {
550 __va_start(va, ctl);
551 kvprintf(ctl, va);
552 __va_end(va);
557 * Return the block size at the specified file offset.
560 hammer_blocksize(int64_t file_offset)
562 if (file_offset < HAMMER_XDEMARC)
563 return(HAMMER_BUFSIZE);
564 else
565 return(HAMMER_XBUFSIZE);
569 * Return the demarkation point between the two offsets where
570 * the block size changes.
572 int64_t
573 hammer_blockdemarc(int64_t file_offset1, int64_t file_offset2)
575 if (file_offset1 < HAMMER_XDEMARC) {
576 if (file_offset2 <= HAMMER_XDEMARC)
577 return(file_offset2);
578 return(HAMMER_XDEMARC);
580 panic("hammer_blockdemarc: illegal range %lld %lld\n",
581 file_offset1, file_offset2);
584 udev_t
585 hammer_fsid_to_udev(uuid_t *uuid)
587 u_int32_t crc;
589 crc = crc32(uuid, sizeof(*uuid));
590 return((udev_t)crc);