HAMMER 56A/Many: Performance tuning - MEDIA STRUCTURES CHANGED!
[dragonfly.git] / sys / vfs / hammer / hammer_cursor.c
blobfb152ec117ecf0a8d0068fdfdf4f3dc6efbd521e
1 /*
2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
34 * $DragonFly: src/sys/vfs/hammer/hammer_cursor.c,v 1.30 2008/06/17 04:02:38 dillon Exp $
38 * HAMMER B-Tree index - cursor support routines
40 #include "hammer.h"
42 static int hammer_load_cursor_parent(hammer_cursor_t cursor, int try_exclusive);
45 * Initialize a fresh cursor using the B-Tree node cache. If the cache
46 * is not available initialize a fresh cursor at the root of the filesystem.
48 int
49 hammer_init_cursor(hammer_transaction_t trans, hammer_cursor_t cursor,
50 struct hammer_node **cache, hammer_inode_t ip)
52 hammer_volume_t volume;
53 hammer_node_t node;
54 int error;
56 bzero(cursor, sizeof(*cursor));
58 cursor->trans = trans;
61 * If the cursor operation is on behalf of an inode, lock
62 * the inode.
64 if ((cursor->ip = ip) != NULL) {
65 ++ip->cursor_ip_refs;
66 if (trans->type == HAMMER_TRANS_FLS)
67 hammer_lock_ex(&ip->lock);
68 else
69 hammer_lock_sh(&ip->lock);
73 * Step 1 - acquire a locked node from the cache if possible
75 if (cache && *cache) {
76 node = hammer_ref_node_safe(trans->hmp, cache, &error);
77 if (error == 0) {
78 hammer_lock_sh(&node->lock);
79 if (node->flags & HAMMER_NODE_DELETED) {
80 hammer_unlock(&node->lock);
81 hammer_rel_node(node);
82 node = NULL;
85 } else {
86 node = NULL;
90 * Step 2 - If we couldn't get a node from the cache, get
91 * the one from the root of the filesystem.
93 while (node == NULL) {
94 volume = hammer_get_root_volume(trans->hmp, &error);
95 if (error)
96 break;
97 node = hammer_get_node(trans->hmp,
98 volume->ondisk->vol0_btree_root,
99 0, &error);
100 hammer_rel_volume(volume, 0);
101 if (error)
102 break;
103 hammer_lock_sh(&node->lock);
106 * If someone got in before we could lock the node, retry.
108 if (node->flags & HAMMER_NODE_DELETED) {
109 hammer_unlock(&node->lock);
110 hammer_rel_node(node);
111 node = NULL;
112 continue;
114 if (volume->ondisk->vol0_btree_root != node->node_offset) {
115 hammer_unlock(&node->lock);
116 hammer_rel_node(node);
117 node = NULL;
118 continue;
123 * Step 3 - finish initializing the cursor by acquiring the parent
125 cursor->node = node;
126 if (error == 0)
127 error = hammer_load_cursor_parent(cursor, 0);
128 KKASSERT(error == 0);
129 /* if (error) hammer_done_cursor(cursor); */
130 return(error);
133 #if 0
135 hammer_reinit_cursor(hammer_cursor_t cursor)
137 hammer_transaction_t trans;
138 hammer_inode_t ip;
139 struct hammer_node **cache;
141 trans = cursor->trans;
142 ip = cursor->ip;
143 hammer_done_cursor(cursor);
144 cache = ip ? &ip->cache[0] : NULL;
145 error = hammer_init_cursor(trans, cursor, cache, ip);
146 return (error);
149 #endif
152 * Normalize a cursor. Sometimes cursors can be left in a state
153 * where node is NULL. If the cursor is in this state, cursor up.
155 void
156 hammer_normalize_cursor(hammer_cursor_t cursor)
158 if (cursor->node == NULL) {
159 KKASSERT(cursor->parent != NULL);
160 hammer_cursor_up(cursor);
166 * We are finished with a cursor. We NULL out various fields as sanity
167 * check, in case the structure is inappropriately used afterwords.
169 void
170 hammer_done_cursor(hammer_cursor_t cursor)
172 hammer_inode_t ip;
174 if (cursor->parent) {
175 hammer_unlock(&cursor->parent->lock);
176 hammer_rel_node(cursor->parent);
177 cursor->parent = NULL;
179 if (cursor->node) {
180 hammer_unlock(&cursor->node->lock);
181 hammer_rel_node(cursor->node);
182 cursor->node = NULL;
184 if (cursor->data_buffer) {
185 hammer_rel_buffer(cursor->data_buffer, 0);
186 cursor->data_buffer = NULL;
188 if (cursor->record_buffer) {
189 hammer_rel_buffer(cursor->record_buffer, 0);
190 cursor->record_buffer = NULL;
192 if ((ip = cursor->ip) != NULL) {
193 hammer_mem_done(cursor);
194 KKASSERT(ip->cursor_ip_refs > 0);
195 --ip->cursor_ip_refs;
196 hammer_unlock(&ip->lock);
197 cursor->ip = NULL;
202 * If we deadlocked this node will be referenced. Do a quick
203 * lock/unlock to wait for the deadlock condition to clear.
205 if (cursor->deadlk_node) {
206 hammer_lock_ex_ident(&cursor->deadlk_node->lock, "hmrdlk");
207 hammer_unlock(&cursor->deadlk_node->lock);
208 hammer_rel_node(cursor->deadlk_node);
209 cursor->deadlk_node = NULL;
211 if (cursor->deadlk_rec) {
212 hammer_wait_mem_record_ident(cursor->deadlk_rec, "hmmdlr");
213 hammer_rel_mem_record(cursor->deadlk_rec);
214 cursor->deadlk_rec = NULL;
217 cursor->data = NULL;
218 cursor->leaf = NULL;
219 cursor->left_bound = NULL;
220 cursor->right_bound = NULL;
221 cursor->trans = NULL;
225 * Upgrade cursor->node and cursor->parent to exclusive locks. This
226 * function can return EDEADLK.
228 * The lock must already be either held shared or already held exclusively
229 * by us.
231 * If we fail to upgrade the lock and cursor->deadlk_node is NULL,
232 * we add another reference to the node that failed and set
233 * cursor->deadlk_node so hammer_done_cursor() can block on it.
236 hammer_cursor_upgrade(hammer_cursor_t cursor)
238 int error;
240 error = hammer_lock_upgrade(&cursor->node->lock);
241 if (error && cursor->deadlk_node == NULL) {
242 cursor->deadlk_node = cursor->node;
243 hammer_ref_node(cursor->deadlk_node);
244 } else if (error == 0 && cursor->parent) {
245 error = hammer_lock_upgrade(&cursor->parent->lock);
246 if (error && cursor->deadlk_node == NULL) {
247 cursor->deadlk_node = cursor->parent;
248 hammer_ref_node(cursor->deadlk_node);
251 return(error);
255 hammer_cursor_upgrade_node(hammer_cursor_t cursor)
257 int error;
259 error = hammer_lock_upgrade(&cursor->node->lock);
260 if (error && cursor->deadlk_node == NULL) {
261 cursor->deadlk_node = cursor->node;
262 hammer_ref_node(cursor->deadlk_node);
264 return(error);
268 * Downgrade cursor->node and cursor->parent to shared locks. This
269 * function can return EDEADLK.
271 void
272 hammer_cursor_downgrade(hammer_cursor_t cursor)
274 if (hammer_lock_excl_owned(&cursor->node->lock, curthread))
275 hammer_lock_downgrade(&cursor->node->lock);
276 if (cursor->parent &&
277 hammer_lock_excl_owned(&cursor->parent->lock, curthread)) {
278 hammer_lock_downgrade(&cursor->parent->lock);
283 * Seek the cursor to the specified node and index.
285 * The caller must ref the node prior to calling this routine and release
286 * it after it returns. If the seek succeeds the cursor will gain its own
287 * ref on the node.
290 hammer_cursor_seek(hammer_cursor_t cursor, hammer_node_t node, int index)
292 int error;
294 hammer_cursor_downgrade(cursor);
295 error = 0;
297 if (cursor->node != node) {
298 hammer_unlock(&cursor->node->lock);
299 hammer_rel_node(cursor->node);
300 cursor->node = node;
301 hammer_ref_node(node);
302 hammer_lock_sh(&node->lock);
303 KKASSERT ((node->flags & HAMMER_NODE_DELETED) == 0);
305 if (cursor->parent) {
306 hammer_unlock(&cursor->parent->lock);
307 hammer_rel_node(cursor->parent);
308 cursor->parent = NULL;
309 cursor->parent_index = 0;
311 error = hammer_load_cursor_parent(cursor, 0);
313 cursor->index = index;
314 return (error);
318 * Load the parent of cursor->node into cursor->parent.
320 static
322 hammer_load_cursor_parent(hammer_cursor_t cursor, int try_exclusive)
324 hammer_mount_t hmp;
325 hammer_node_t parent;
326 hammer_node_t node;
327 hammer_btree_elm_t elm;
328 int error;
329 int i;
331 hmp = cursor->trans->hmp;
333 if (cursor->node->ondisk->parent) {
334 node = cursor->node;
335 parent = hammer_get_node(hmp, node->ondisk->parent, 0, &error);
336 if (error)
337 return(error);
338 if (try_exclusive) {
339 if (hammer_lock_ex_try(&parent->lock)) {
340 hammer_rel_node(parent);
341 return(EDEADLK);
343 } else {
344 hammer_lock_sh(&parent->lock);
346 KKASSERT ((parent->flags & HAMMER_NODE_DELETED) == 0);
347 elm = NULL;
348 for (i = 0; i < parent->ondisk->count; ++i) {
349 elm = &parent->ondisk->elms[i];
350 if (parent->ondisk->elms[i].internal.subtree_offset ==
351 node->node_offset) {
352 break;
355 if (i == parent->ondisk->count) {
356 hammer_unlock(&parent->lock);
357 panic("Bad B-Tree link: parent %p node %p\n", parent, node);
359 KKASSERT(i != parent->ondisk->count);
360 cursor->parent = parent;
361 cursor->parent_index = i;
362 cursor->left_bound = &elm[0].internal.base;
363 cursor->right_bound = &elm[1].internal.base;
364 return(error);
365 } else {
366 cursor->parent = NULL;
367 cursor->parent_index = 0;
368 cursor->left_bound = &hmp->root_btree_beg;
369 cursor->right_bound = &hmp->root_btree_end;
370 error = 0;
372 return(error);
376 * Cursor up to our parent node. Return ENOENT if we are at the root of
377 * the filesystem.
380 hammer_cursor_up(hammer_cursor_t cursor)
382 int error;
384 hammer_cursor_downgrade(cursor);
387 * If the parent is NULL we are at the root of the B-Tree and
388 * return ENOENT.
390 if (cursor->parent == NULL)
391 return (ENOENT);
394 * Set the node to its parent.
396 hammer_unlock(&cursor->node->lock);
397 hammer_rel_node(cursor->node);
398 cursor->node = cursor->parent;
399 cursor->index = cursor->parent_index;
400 cursor->parent = NULL;
401 cursor->parent_index = 0;
403 error = hammer_load_cursor_parent(cursor, 0);
404 return(error);
408 * Special cursor up given a locked cursor. The orignal node is not
409 * unlocked and released and the cursor is not downgraded. If we are
410 * unable to acquire and lock the parent, EDEADLK is returned.
413 hammer_cursor_up_locked(hammer_cursor_t cursor)
415 hammer_node_t save;
416 int error;
419 * If the parent is NULL we are at the root of the B-Tree and
420 * return ENOENT.
422 if (cursor->parent == NULL)
423 return (ENOENT);
425 save = cursor->node;
428 * Set the node to its parent.
430 cursor->node = cursor->parent;
431 cursor->index = cursor->parent_index;
432 cursor->parent = NULL;
433 cursor->parent_index = 0;
436 * load the new parent, attempt to exclusively lock it. Note that
437 * we are still holding the old parent (now cursor->node) exclusively
438 * locked. This can return EDEADLK.
440 error = hammer_load_cursor_parent(cursor, 1);
441 if (error) {
442 cursor->parent = cursor->node;
443 cursor->parent_index = cursor->index;
444 cursor->node = save;
445 cursor->index = 0;
447 return(error);
452 * Cursor down through the current node, which must be an internal node.
454 * This routine adjusts the cursor and sets index to 0.
457 hammer_cursor_down(hammer_cursor_t cursor)
459 hammer_node_t node;
460 hammer_btree_elm_t elm;
461 int error;
464 * The current node becomes the current parent
466 hammer_cursor_downgrade(cursor);
467 node = cursor->node;
468 KKASSERT(cursor->index >= 0 && cursor->index < node->ondisk->count);
469 if (cursor->parent) {
470 hammer_unlock(&cursor->parent->lock);
471 hammer_rel_node(cursor->parent);
473 cursor->parent = node;
474 cursor->parent_index = cursor->index;
475 cursor->node = NULL;
476 cursor->index = 0;
479 * Extract element to push into at (node,index), set bounds.
481 elm = &node->ondisk->elms[cursor->parent_index];
484 * Ok, push down into elm. If elm specifies an internal or leaf
485 * node the current node must be an internal node. If elm specifies
486 * a spike then the current node must be a leaf node.
488 switch(elm->base.btype) {
489 case HAMMER_BTREE_TYPE_INTERNAL:
490 case HAMMER_BTREE_TYPE_LEAF:
491 KKASSERT(node->ondisk->type == HAMMER_BTREE_TYPE_INTERNAL);
492 KKASSERT(elm->internal.subtree_offset != 0);
493 cursor->left_bound = &elm[0].internal.base;
494 cursor->right_bound = &elm[1].internal.base;
495 node = hammer_get_node(cursor->trans->hmp,
496 elm->internal.subtree_offset, 0, &error);
497 if (error == 0) {
498 KASSERT(elm->base.btype == node->ondisk->type, ("BTYPE MISMATCH %c %c NODE %p\n", elm->base.btype, node->ondisk->type, node));
499 if (node->ondisk->parent != cursor->parent->node_offset)
500 panic("node %p %016llx vs %016llx\n", node, node->ondisk->parent, cursor->parent->node_offset);
501 KKASSERT(node->ondisk->parent == cursor->parent->node_offset);
503 break;
504 default:
505 panic("hammer_cursor_down: illegal btype %02x (%c)\n",
506 elm->base.btype,
507 (elm->base.btype ? elm->base.btype : '?'));
508 break;
510 if (error == 0) {
511 hammer_lock_sh(&node->lock);
512 KKASSERT ((node->flags & HAMMER_NODE_DELETED) == 0);
513 cursor->node = node;
514 cursor->index = 0;
516 return(error);