Add tunable to enable/disable PBCC support in acx(4) and it is enabled
[dragonfly.git] / sys / vfs / hammer / hammer_object.c
blob9afef572db4b7139b2d54533aff5bbf58cb61d49
1 /*
2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
34 * $DragonFly: src/sys/vfs/hammer/hammer_object.c,v 1.61 2008/06/07 07:41:51 dillon Exp $
37 #include "hammer.h"
39 static int hammer_mem_add(hammer_record_t record);
40 static int hammer_mem_lookup(hammer_cursor_t cursor);
41 static int hammer_mem_first(hammer_cursor_t cursor);
43 struct rec_trunc_info {
44 u_int16_t rec_type;
45 int64_t trunc_off;
49 * Red-black tree support.
51 static int
52 hammer_rec_rb_compare(hammer_record_t rec1, hammer_record_t rec2)
54 if (rec1->leaf.base.rec_type < rec2->leaf.base.rec_type)
55 return(-1);
56 if (rec1->leaf.base.rec_type > rec2->leaf.base.rec_type)
57 return(1);
59 if (rec1->leaf.base.key < rec2->leaf.base.key)
60 return(-1);
61 if (rec1->leaf.base.key > rec2->leaf.base.key)
62 return(1);
64 if (rec1->leaf.base.create_tid == 0) {
65 if (rec2->leaf.base.create_tid == 0)
66 return(0);
67 return(1);
69 if (rec2->leaf.base.create_tid == 0)
70 return(-1);
72 if (rec1->leaf.base.create_tid < rec2->leaf.base.create_tid)
73 return(-1);
74 if (rec1->leaf.base.create_tid > rec2->leaf.base.create_tid)
75 return(1);
78 * Never match against an item deleted by the front-end.
80 if (rec1->flags & HAMMER_RECF_DELETED_FE)
81 return(1);
82 if (rec2->flags & HAMMER_RECF_DELETED_FE)
83 return(-1);
85 return(0);
88 static int
89 hammer_rec_compare(hammer_base_elm_t info, hammer_record_t rec)
91 if (info->rec_type < rec->leaf.base.rec_type)
92 return(-3);
93 if (info->rec_type > rec->leaf.base.rec_type)
94 return(3);
96 if (info->key < rec->leaf.base.key)
97 return(-2);
98 if (info->key > rec->leaf.base.key)
99 return(2);
101 if (info->create_tid == 0) {
102 if (rec->leaf.base.create_tid == 0)
103 return(0);
104 return(1);
106 if (rec->leaf.base.create_tid == 0)
107 return(-1);
108 if (info->create_tid < rec->leaf.base.create_tid)
109 return(-1);
110 if (info->create_tid > rec->leaf.base.create_tid)
111 return(1);
112 return(0);
116 * RB_SCAN comparison code for hammer_mem_first(). The argument order
117 * is reversed so the comparison result has to be negated. key_beg and
118 * key_end are both range-inclusive.
120 * The creation timestamp can cause hammer_rec_compare() to return -1 or +1.
121 * These do not stop the scan.
123 * Localized deletions are not cached in-memory.
125 static
127 hammer_rec_scan_cmp(hammer_record_t rec, void *data)
129 hammer_cursor_t cursor = data;
130 int r;
132 r = hammer_rec_compare(&cursor->key_beg, rec);
133 if (r > 1)
134 return(-1);
135 r = hammer_rec_compare(&cursor->key_end, rec);
136 if (r < -1)
137 return(1);
138 return(0);
142 * This compare function is used when simply looking up key_beg.
144 static
146 hammer_rec_find_cmp(hammer_record_t rec, void *data)
148 hammer_cursor_t cursor = data;
149 int r;
151 r = hammer_rec_compare(&cursor->key_beg, rec);
152 if (r > 1)
153 return(-1);
154 if (r < -1)
155 return(1);
156 return(0);
160 * Locate blocks within the truncation range. Partial blocks do not count.
162 static
164 hammer_rec_trunc_cmp(hammer_record_t rec, void *data)
166 struct rec_trunc_info *info = data;
168 if (rec->leaf.base.rec_type < info->rec_type)
169 return(-1);
170 if (rec->leaf.base.rec_type > info->rec_type)
171 return(1);
173 switch(rec->leaf.base.rec_type) {
174 case HAMMER_RECTYPE_DB:
176 * DB record key is not beyond the truncation point, retain.
178 if (rec->leaf.base.key < info->trunc_off)
179 return(-1);
180 break;
181 case HAMMER_RECTYPE_DATA:
183 * DATA record offset start is not beyond the truncation point,
184 * retain.
186 if (rec->leaf.base.key - rec->leaf.data_len < info->trunc_off)
187 return(-1);
188 break;
189 default:
190 panic("hammer_rec_trunc_cmp: unexpected record type");
194 * The record start is >= the truncation point, return match,
195 * the record should be destroyed.
197 return(0);
200 RB_GENERATE(hammer_rec_rb_tree, hammer_record, rb_node, hammer_rec_rb_compare);
201 RB_GENERATE_XLOOKUP(hammer_rec_rb_tree, INFO, hammer_record, rb_node,
202 hammer_rec_compare, hammer_base_elm_t);
205 * Allocate a record for the caller to finish filling in. The record is
206 * returned referenced.
208 hammer_record_t
209 hammer_alloc_mem_record(hammer_inode_t ip, int data_len)
211 hammer_record_t record;
213 ++hammer_count_records;
214 record = kmalloc(sizeof(*record), M_HAMMER, M_WAITOK | M_ZERO);
215 record->flush_state = HAMMER_FST_IDLE;
216 record->ip = ip;
217 record->leaf.base.btype = HAMMER_BTREE_TYPE_RECORD;
218 record->leaf.data_len = data_len;
219 hammer_ref(&record->lock);
221 if (data_len) {
222 record->data = kmalloc(data_len, M_HAMMER, M_WAITOK | M_ZERO);
223 record->flags |= HAMMER_RECF_ALLOCDATA;
224 ++hammer_count_record_datas;
227 return (record);
230 void
231 hammer_wait_mem_record(hammer_record_t record)
233 while (record->flush_state == HAMMER_FST_FLUSH) {
234 record->flags |= HAMMER_RECF_WANTED;
235 tsleep(record, 0, "hmrrc2", 0);
240 * Called from the backend, hammer_inode.c, after a record has been
241 * flushed to disk. The record has been exclusively locked by the
242 * caller and interlocked with BE.
244 * We clean up the state, unlock, and release the record (the record
245 * was referenced by the fact that it was in the HAMMER_FST_FLUSH state).
247 void
248 hammer_flush_record_done(hammer_record_t record, int error)
250 hammer_inode_t target_ip;
252 KKASSERT(record->flush_state == HAMMER_FST_FLUSH);
253 KKASSERT(record->flags & HAMMER_RECF_INTERLOCK_BE);
255 if (error) {
257 * An error occured, the backend was unable to sync the
258 * record to its media. Leave the record intact.
260 Debugger("flush_record_done error");
263 if (record->flags & HAMMER_RECF_DELETED_BE) {
264 if ((target_ip = record->target_ip) != NULL) {
265 TAILQ_REMOVE(&target_ip->target_list, record,
266 target_entry);
267 record->target_ip = NULL;
268 hammer_test_inode(target_ip);
270 record->flush_state = HAMMER_FST_IDLE;
271 } else {
272 if (record->target_ip) {
273 record->flush_state = HAMMER_FST_SETUP;
274 hammer_test_inode(record->ip);
275 hammer_test_inode(record->target_ip);
276 } else {
277 record->flush_state = HAMMER_FST_IDLE;
280 record->flags &= ~HAMMER_RECF_INTERLOCK_BE;
281 if (record->flags & HAMMER_RECF_WANTED) {
282 record->flags &= ~HAMMER_RECF_WANTED;
283 wakeup(record);
285 hammer_rel_mem_record(record);
289 * Release a memory record. Records marked for deletion are immediately
290 * removed from the RB-Tree but otherwise left intact until the last ref
291 * goes away.
293 void
294 hammer_rel_mem_record(struct hammer_record *record)
296 hammer_inode_t ip, target_ip;
298 hammer_unref(&record->lock);
300 if (record->flags & HAMMER_RECF_DELETED_FE) {
301 if (record->lock.refs == 0) {
302 KKASSERT(record->flush_state != HAMMER_FST_FLUSH);
304 ip = record->ip;
305 if ((target_ip = record->target_ip) != NULL) {
306 TAILQ_REMOVE(&target_ip->target_list,
307 record, target_entry);
308 record->target_ip = NULL;
309 hammer_test_inode(target_ip);
312 if (record->flags & HAMMER_RECF_ONRBTREE) {
313 RB_REMOVE(hammer_rec_rb_tree,
314 &record->ip->rec_tree,
315 record);
316 KKASSERT(ip->rsv_recs > 0);
317 --ip->hmp->rsv_recs;
318 --ip->rsv_recs;
319 ip->hmp->rsv_databytes -= record->leaf.data_len;
320 record->flags &= ~HAMMER_RECF_ONRBTREE;
321 if (RB_EMPTY(&record->ip->rec_tree)) {
322 record->ip->flags &= ~HAMMER_INODE_XDIRTY;
323 hammer_test_inode(record->ip);
326 if (record->flags & HAMMER_RECF_ALLOCDATA) {
327 --hammer_count_record_datas;
328 kfree(record->data, M_HAMMER);
329 record->flags &= ~HAMMER_RECF_ALLOCDATA;
331 record->data = NULL;
332 --hammer_count_records;
333 if (record->type == HAMMER_MEM_RECORD_DATA)
334 hammer_cleanup_write_io(record->ip);
335 kfree(record, M_HAMMER);
336 return;
342 * Record visibility depends on whether the record is being accessed by
343 * the backend or the frontend.
345 * Return non-zero if the record is visible, zero if it isn't or if it is
346 * deleted.
348 static __inline
350 hammer_ip_iterate_mem_good(hammer_cursor_t cursor, hammer_record_t record)
352 if (cursor->flags & HAMMER_CURSOR_BACKEND) {
353 if (record->flags & HAMMER_RECF_DELETED_BE)
354 return(0);
355 } else {
356 if (record->flags & HAMMER_RECF_DELETED_FE)
357 return(0);
359 return(1);
363 * This callback is used as part of the RB_SCAN function for in-memory
364 * records. We terminate it (return -1) as soon as we get a match.
366 * This routine is used by frontend code.
368 * The primary compare code does not account for ASOF lookups. This
369 * code handles that case as well as a few others.
371 static
373 hammer_rec_scan_callback(hammer_record_t rec, void *data)
375 hammer_cursor_t cursor = data;
378 * We terminate on success, so this should be NULL on entry.
380 KKASSERT(cursor->iprec == NULL);
383 * Skip if the record was marked deleted.
385 if (hammer_ip_iterate_mem_good(cursor, rec) == 0)
386 return(0);
389 * Skip if not visible due to our as-of TID
391 if (cursor->flags & HAMMER_CURSOR_ASOF) {
392 if (cursor->asof < rec->leaf.base.create_tid)
393 return(0);
394 if (rec->leaf.base.delete_tid &&
395 cursor->asof >= rec->leaf.base.delete_tid) {
396 return(0);
401 * If the record is queued to the flusher we have to block until
402 * it isn't. Otherwise we may see duplication between our memory
403 * cache and the media.
405 hammer_ref(&rec->lock);
407 #warning "This deadlocks"
408 #if 0
409 if (rec->flush_state == HAMMER_FST_FLUSH)
410 hammer_wait_mem_record(rec);
411 #endif
414 * The record may have been deleted while we were blocked.
416 if (hammer_ip_iterate_mem_good(cursor, rec) == 0) {
417 hammer_rel_mem_record(rec);
418 return(0);
422 * Set the matching record and stop the scan.
424 cursor->iprec = rec;
425 return(-1);
430 * Lookup an in-memory record given the key specified in the cursor. Works
431 * just like hammer_btree_lookup() but operates on an inode's in-memory
432 * record list.
434 * The lookup must fail if the record is marked for deferred deletion.
436 static
438 hammer_mem_lookup(hammer_cursor_t cursor)
440 int error;
442 KKASSERT(cursor->ip);
443 if (cursor->iprec) {
444 hammer_rel_mem_record(cursor->iprec);
445 cursor->iprec = NULL;
447 hammer_rec_rb_tree_RB_SCAN(&cursor->ip->rec_tree, hammer_rec_find_cmp,
448 hammer_rec_scan_callback, cursor);
450 if (cursor->iprec == NULL)
451 error = ENOENT;
452 else
453 error = 0;
454 return(error);
458 * hammer_mem_first() - locate the first in-memory record matching the
459 * cursor within the bounds of the key range.
461 static
463 hammer_mem_first(hammer_cursor_t cursor)
465 hammer_inode_t ip;
467 ip = cursor->ip;
468 KKASSERT(ip != NULL);
470 if (cursor->iprec) {
471 hammer_rel_mem_record(cursor->iprec);
472 cursor->iprec = NULL;
475 hammer_rec_rb_tree_RB_SCAN(&ip->rec_tree, hammer_rec_scan_cmp,
476 hammer_rec_scan_callback, cursor);
479 * Adjust scan.node and keep it linked into the RB-tree so we can
480 * hold the cursor through third party modifications of the RB-tree.
482 if (cursor->iprec)
483 return(0);
484 return(ENOENT);
487 void
488 hammer_mem_done(hammer_cursor_t cursor)
490 if (cursor->iprec) {
491 hammer_rel_mem_record(cursor->iprec);
492 cursor->iprec = NULL;
496 /************************************************************************
497 * HAMMER IN-MEMORY RECORD FUNCTIONS *
498 ************************************************************************
500 * These functions manipulate in-memory records. Such records typically
501 * exist prior to being committed to disk or indexed via the on-disk B-Tree.
505 * Add a directory entry (dip,ncp) which references inode (ip).
507 * Note that the low 32 bits of the namekey are set temporarily to create
508 * a unique in-memory record, and may be modified a second time when the
509 * record is synchronized to disk. In particular, the low 32 bits cannot be
510 * all 0's when synching to disk, which is not handled here.
513 hammer_ip_add_directory(struct hammer_transaction *trans,
514 struct hammer_inode *dip, struct namecache *ncp,
515 struct hammer_inode *ip)
517 hammer_record_t record;
518 int error;
519 int bytes;
521 bytes = ncp->nc_nlen; /* NOTE: terminating \0 is NOT included */
522 record = hammer_alloc_mem_record(dip, HAMMER_ENTRY_SIZE(bytes));
523 if (++trans->hmp->namekey_iterator == 0)
524 ++trans->hmp->namekey_iterator;
526 record->type = HAMMER_MEM_RECORD_ADD;
527 record->leaf.base.localization = HAMMER_LOCALIZE_MISC;
528 record->leaf.base.obj_id = dip->obj_id;
529 record->leaf.base.key = hammer_directory_namekey(ncp->nc_name, bytes);
530 record->leaf.base.key += trans->hmp->namekey_iterator;
531 record->leaf.base.rec_type = HAMMER_RECTYPE_DIRENTRY;
532 record->leaf.base.obj_type = ip->ino_leaf.base.obj_type;
533 record->data->entry.obj_id = ip->obj_id;
534 bcopy(ncp->nc_name, record->data->entry.name, bytes);
536 ++ip->ino_data.nlinks;
537 hammer_modify_inode(ip, HAMMER_INODE_DDIRTY);
540 * The target inode and the directory entry are bound together.
542 record->target_ip = ip;
543 record->flush_state = HAMMER_FST_SETUP;
544 TAILQ_INSERT_TAIL(&ip->target_list, record, target_entry);
547 * The inode now has a dependancy and must be taken out of the idle
548 * state. An inode not in an idle state is given an extra reference.
550 if (ip->flush_state == HAMMER_FST_IDLE) {
551 hammer_ref(&ip->lock);
552 ip->flush_state = HAMMER_FST_SETUP;
554 error = hammer_mem_add(record);
555 return(error);
559 * Delete the directory entry and update the inode link count. The
560 * cursor must be seeked to the directory entry record being deleted.
562 * The related inode should be share-locked by the caller. The caller is
563 * on the frontend.
565 * This function can return EDEADLK requiring the caller to terminate
566 * the cursor, any locks, wait on the returned record, and retry.
569 hammer_ip_del_directory(struct hammer_transaction *trans,
570 hammer_cursor_t cursor, struct hammer_inode *dip,
571 struct hammer_inode *ip)
573 hammer_record_t record;
574 int error;
576 if (hammer_cursor_inmem(cursor)) {
578 * In-memory (unsynchronized) records can simply be freed.
579 * Even though the HAMMER_RECF_DELETED_FE flag is ignored
580 * by the backend, we must still avoid races against the
581 * backend potentially syncing the record to the media.
583 * We cannot call hammer_ip_delete_record(), that routine may
584 * only be called from the backend.
586 record = cursor->iprec;
587 if (record->flags & HAMMER_RECF_INTERLOCK_BE) {
588 KKASSERT(cursor->deadlk_rec == NULL);
589 hammer_ref(&record->lock);
590 cursor->deadlk_rec = record;
591 error = EDEADLK;
592 } else {
593 KKASSERT(record->type == HAMMER_MEM_RECORD_ADD);
594 record->flags |= HAMMER_RECF_DELETED_FE;
595 error = 0;
597 } else {
599 * If the record is on-disk we have to queue the deletion by
600 * the record's key. This also causes lookups to skip the
601 * record.
603 KKASSERT(dip->flags &
604 (HAMMER_INODE_ONDISK | HAMMER_INODE_DONDISK));
605 record = hammer_alloc_mem_record(dip, 0);
606 record->type = HAMMER_MEM_RECORD_DEL;
607 record->leaf.base = cursor->leaf->base;
609 record->target_ip = ip;
610 record->flush_state = HAMMER_FST_SETUP;
611 TAILQ_INSERT_TAIL(&ip->target_list, record, target_entry);
614 * The inode now has a dependancy and must be taken out of
615 * the idle state. An inode not in an idle state is given
616 * an extra reference.
618 if (ip->flush_state == HAMMER_FST_IDLE) {
619 hammer_ref(&ip->lock);
620 ip->flush_state = HAMMER_FST_SETUP;
623 error = hammer_mem_add(record);
627 * One less link. The file may still be open in the OS even after
628 * all links have gone away.
630 * We have to terminate the cursor before syncing the inode to
631 * avoid deadlocking against ourselves. XXX this may no longer
632 * be true.
634 * If nlinks drops to zero and the vnode is inactive (or there is
635 * no vnode), call hammer_inode_unloadable_check() to zonk the
636 * inode. If we don't do this here the inode will not be destroyed
637 * on-media until we unmount.
639 if (error == 0) {
640 --ip->ino_data.nlinks;
641 hammer_modify_inode(ip, HAMMER_INODE_DDIRTY);
642 if (ip->ino_data.nlinks == 0 &&
643 (ip->vp == NULL || (ip->vp->v_flag & VINACTIVE))) {
644 hammer_done_cursor(cursor);
645 hammer_inode_unloadable_check(ip, 1);
646 hammer_flush_inode(ip, 0);
650 return(error);
654 * Add a record to an inode.
656 * The caller must allocate the record with hammer_alloc_mem_record(ip) and
657 * initialize the following additional fields:
659 * The related inode should be share-locked by the caller. The caller is
660 * on the frontend.
662 * record->rec.entry.base.base.key
663 * record->rec.entry.base.base.rec_type
664 * record->rec.entry.base.base.data_len
665 * record->data (a copy will be kmalloc'd if it cannot be embedded)
668 hammer_ip_add_record(struct hammer_transaction *trans, hammer_record_t record)
670 hammer_inode_t ip = record->ip;
671 int error;
673 KKASSERT(record->leaf.base.localization != 0);
674 record->leaf.base.obj_id = ip->obj_id;
675 record->leaf.base.obj_type = ip->ino_leaf.base.obj_type;
676 error = hammer_mem_add(record);
677 return(error);
681 * Locate a bulk record in-memory. Bulk records allow disk space to be
682 * reserved so the front-end can flush large data writes without having
683 * to queue the BIO to the flusher. Only the related record gets queued
684 * to the flusher.
686 static hammer_record_t
687 hammer_ip_get_bulk(hammer_inode_t ip, off_t file_offset, int bytes)
689 hammer_record_t record;
690 struct hammer_base_elm elm;
692 bzero(&elm, sizeof(elm));
693 elm.obj_id = ip->obj_id;
694 elm.key = file_offset + bytes;
695 elm.create_tid = 0;
696 elm.delete_tid = 0;
697 elm.rec_type = HAMMER_RECTYPE_DATA;
698 elm.obj_type = 0; /* unused */
699 elm.btype = HAMMER_BTREE_TYPE_RECORD; /* unused */
700 elm.localization = HAMMER_LOCALIZE_MISC;
702 record = hammer_rec_rb_tree_RB_LOOKUP_INFO(&ip->rec_tree, &elm);
703 if (record)
704 hammer_ref(&record->lock);
705 return(record);
709 * Reserve blockmap space placemarked with an in-memory record.
711 * This routine is called by the front-end in order to be able to directly
712 * flush a buffer cache buffer.
714 hammer_record_t
715 hammer_ip_add_bulk(hammer_inode_t ip, off_t file_offset,
716 void *data, int bytes, int *force_altp)
718 hammer_record_t record;
719 hammer_record_t conflict;
720 int error;
723 * If the record already exists just return it. If it exists but
724 * is being flushed we can't reuse the conflict record and we can't
725 * create a new one (unlike directories data records have no iterator
726 * so we would be creating a duplicate). In that case return NULL
727 * to force the front-end to queue the buffer.
729 * This is kinda messy. We can't have an in-memory record AND its
730 * buffer cache buffer queued to the same flush cycle at the same
731 * time as that would result in a [delete-]create-delete-create
732 * sequence with the same transaction id. Set *force_altp to 1
733 * to deal with the situation.
735 *force_altp = 0;
736 conflict = hammer_ip_get_bulk(ip, file_offset, bytes);
737 if (conflict) {
739 * We can't reuse the record if it is owned by the backend
740 * or has been deleted.
742 if (conflict->flush_state == HAMMER_FST_FLUSH) {
743 hammer_rel_mem_record(conflict);
744 *force_altp = 1;
745 kprintf("a");
746 return(NULL);
748 if (conflict->flags & HAMMER_RECF_DELETED_FE) {
749 hammer_rel_mem_record(conflict);
750 *force_altp = 1;
751 kprintf("b");
752 return(NULL);
754 KKASSERT(conflict->leaf.data_len == bytes);
755 conflict->leaf.data_crc = crc32(data, bytes);
757 /* reusing conflict, remove extra rsv stats */
758 hammer_cleanup_write_io(ip);
759 return(conflict);
763 * Otherwise create it. This is called with the related BIO locked
764 * so there should be no possible conflict.
766 record = hammer_alloc_mem_record(ip, 0);
767 record->leaf.data_offset = hammer_blockmap_reserve(ip->hmp, HAMMER_ZONE_LARGE_DATA_INDEX, bytes, &error);
768 if (record->leaf.data_offset == 0) {
769 hammer_rel_mem_record(record);
770 return(NULL);
772 record->type = HAMMER_MEM_RECORD_DATA;
773 record->leaf.base.rec_type = HAMMER_RECTYPE_DATA;
774 record->leaf.base.obj_type = ip->ino_leaf.base.obj_type;
775 record->leaf.base.obj_id = ip->obj_id;
776 record->leaf.base.key = file_offset + bytes;
777 record->leaf.base.localization = HAMMER_LOCALIZE_MISC;
778 record->leaf.data_len = bytes;
779 record->leaf.data_crc = crc32(data, bytes);
781 hammer_ref(&record->lock); /* mem_add eats a reference */
782 error = hammer_mem_add(record);
783 KKASSERT(error == 0);
784 return (record);
788 * Frontend truncation code. Scan in-memory records only. On-disk records
789 * and records in a flushing state are handled by the backend. The vnops
790 * setattr code will handle the block containing the truncation point.
792 * Partial blocks are not deleted.
794 static int
795 hammer_rec_trunc_callback(hammer_record_t record, void *data __unused)
797 if (record->flags & HAMMER_RECF_DELETED_FE)
798 return(0);
799 if (record->flush_state == HAMMER_FST_FLUSH)
800 return(0);
801 KKASSERT((record->flags & HAMMER_RECF_INTERLOCK_BE) == 0);
802 hammer_ref(&record->lock);
803 record->flags |= HAMMER_RECF_DELETED_FE;
804 hammer_rel_mem_record(record);
805 return(0);
809 hammer_ip_frontend_trunc(struct hammer_inode *ip, off_t file_size)
811 struct rec_trunc_info info;
813 switch(ip->ino_data.obj_type) {
814 case HAMMER_OBJTYPE_REGFILE:
815 info.rec_type = HAMMER_RECTYPE_DATA;
816 break;
817 case HAMMER_OBJTYPE_DBFILE:
818 info.rec_type = HAMMER_RECTYPE_DB;
819 break;
820 default:
821 return(EINVAL);
823 info.trunc_off = file_size;
824 hammer_rec_rb_tree_RB_SCAN(&ip->rec_tree, hammer_rec_trunc_cmp,
825 hammer_rec_trunc_callback, &file_size);
826 return(0);
830 * Backend code
832 * Sync data from a buffer cache buffer (typically) to the filesystem. This
833 * is called via the strategy called from a cached data source. This code
834 * is responsible for actually writing a data record out to the disk.
836 * This can only occur non-historically (i.e. 'current' data only).
838 * The file offset must be HAMMER_BUFSIZE aligned but the data length
839 * can be truncated. The record (currently) always represents a BUFSIZE
840 * swath of space whether the data is truncated or not.
843 hammer_ip_sync_data(hammer_cursor_t cursor, hammer_inode_t ip,
844 int64_t offset, void *data, int bytes)
846 hammer_transaction_t trans = cursor->trans;
847 struct hammer_btree_leaf_elm elm;
848 hammer_off_t data_offset;
849 void *bdata;
850 int error;
851 int aligned_bytes;
853 KKASSERT((offset & HAMMER_BUFMASK) == 0);
854 KKASSERT(trans->type == HAMMER_TRANS_FLS);
855 KKASSERT(bytes != 0);
858 * We don't have to do this but it's probably a good idea to
859 * align data allocations to 64-byte boundaries for future
860 * expansion.
862 aligned_bytes = (bytes + 63) & ~63;
863 retry:
864 hammer_normalize_cursor(cursor);
865 cursor->key_beg.localization = HAMMER_LOCALIZE_MISC;
866 cursor->key_beg.obj_id = ip->obj_id;
867 cursor->key_beg.key = offset + aligned_bytes;
868 cursor->key_beg.create_tid = trans->tid;
869 cursor->key_beg.delete_tid = 0;
870 cursor->key_beg.rec_type = HAMMER_RECTYPE_DATA;
871 cursor->asof = trans->tid;
872 cursor->flags &= ~HAMMER_CURSOR_INITMASK;
873 cursor->flags |= HAMMER_CURSOR_INSERT;
874 cursor->flags |= HAMMER_CURSOR_BACKEND;
877 * Issue a lookup to position the cursor.
879 error = hammer_btree_lookup(cursor);
880 if (error == 0) {
881 kprintf("hammer_ip_sync_data: duplicate data at "
882 "(%lld,%d) tid %016llx\n",
883 offset, aligned_bytes, trans->tid);
884 hammer_print_btree_elm(&cursor->node->ondisk->
885 elms[cursor->index],
886 HAMMER_BTREE_TYPE_LEAF, cursor->index);
887 panic("Duplicate data");
888 error = EIO;
890 if (error != ENOENT)
891 goto done;
894 * Allocate our data. The data buffer is not marked modified (yet)
896 bdata = hammer_alloc_data(trans, aligned_bytes, &data_offset,
897 &cursor->data_buffer, &error);
899 if (bdata == NULL)
900 goto done;
903 * Fill everything in and insert our B-Tree node.
905 * NOTE: hammer_alloc_data() has already marked the data buffer
906 * as modified. If we do it again we will generate unnecessary
907 * undo elements.
909 elm.base.btype = HAMMER_BTREE_TYPE_RECORD;
910 elm.base.localization = HAMMER_LOCALIZE_MISC;
911 elm.base.obj_id = ip->obj_id;
912 elm.base.key = offset + aligned_bytes;
913 elm.base.create_tid = trans->tid;
914 elm.base.delete_tid = 0;
915 elm.base.rec_type = HAMMER_RECTYPE_DATA;
916 elm.atime = 0;
917 elm.data_offset = data_offset;
918 elm.data_len = aligned_bytes;
919 elm.data_crc = crc32(data, aligned_bytes);
922 * Copy the data to the allocated buffer. Since we are aligning
923 * the record size as specified in elm.data_len, make sure to zero
924 * out any extranious bytes.
926 hammer_modify_buffer(trans, cursor->data_buffer, NULL, 0);
927 bcopy(data, bdata, bytes);
928 if (aligned_bytes > bytes)
929 bzero((char *)bdata + bytes, aligned_bytes - bytes);
930 hammer_modify_buffer_done(cursor->data_buffer);
933 * Data records can wind up on-disk before the inode itself is
934 * on-disk. One must assume data records may be on-disk if either
935 * HAMMER_INODE_DONDISK or HAMMER_INODE_ONDISK is set
937 ip->flags |= HAMMER_INODE_DONDISK;
939 error = hammer_btree_insert(cursor, &elm);
940 if (error == 0)
941 goto done;
943 hammer_blockmap_free(trans, data_offset, aligned_bytes);
944 done:
945 if (error == EDEADLK) {
946 hammer_done_cursor(cursor);
947 error = hammer_init_cursor(trans, cursor, &ip->cache[0], ip);
948 if (error == 0)
949 goto retry;
951 return(error);
955 * Backend code which actually performs the write to the media. This
956 * routine is typically called from the flusher. The bio will be disposed
957 * of (biodone'd) by this routine.
959 * Iterate the related records and mark for deletion. If existing edge
960 * records (left and right side) overlap our write they have to be marked
961 * deleted and new records created, usually referencing a portion of the
962 * original data. Then add a record to represent the buffer.
965 hammer_dowrite(hammer_cursor_t cursor, hammer_inode_t ip,
966 off_t file_offset, void *data, int bytes)
968 int error;
970 KKASSERT(ip->flush_state == HAMMER_FST_FLUSH);
973 * If the inode is going or gone, just throw away any frontend
974 * buffers.
976 if (ip->flags & HAMMER_INODE_DELETED)
977 return(0);
980 * Delete any records overlapping our range. This function will
981 * (eventually) properly truncate partial overlaps.
983 if (ip->sync_ino_data.obj_type == HAMMER_OBJTYPE_DBFILE) {
984 error = hammer_ip_delete_range(cursor, ip, file_offset,
985 file_offset, 0);
986 } else {
987 error = hammer_ip_delete_range(cursor, ip, file_offset,
988 file_offset + bytes - 1, 0);
992 * Add a single record to cover the write. We can write a record
993 * with only the actual file data - for example, a small 200 byte
994 * file does not have to write out a 16K record.
996 * While the data size does not have to be aligned, we still do it
997 * to reduce fragmentation in a future allocation model.
999 if (error == 0) {
1000 int limit_size;
1002 if (ip->sync_ino_data.size - file_offset > bytes) {
1003 limit_size = bytes;
1004 } else {
1005 limit_size = (int)(ip->sync_ino_data.size -
1006 file_offset);
1007 KKASSERT(limit_size >= 0);
1009 if (limit_size) {
1010 error = hammer_ip_sync_data(cursor, ip, file_offset,
1011 data, limit_size);
1014 if (error)
1015 Debugger("hammer_dowrite: error");
1016 return(error);
1021 * Backend code. Sync a record to the media.
1024 hammer_ip_sync_record_cursor(hammer_cursor_t cursor, hammer_record_t record)
1026 hammer_transaction_t trans = cursor->trans;
1027 void *bdata;
1028 int error;
1030 KKASSERT(record->flush_state == HAMMER_FST_FLUSH);
1031 KKASSERT(record->flags & HAMMER_RECF_INTERLOCK_BE);
1032 KKASSERT(record->leaf.base.localization != 0);
1035 * If this is a bulk-data record placemarker there may be an existing
1036 * record on-disk, indicating a data overwrite. If there is the
1037 * on-disk record must be deleted before we can insert our new record.
1039 * We've synthesized this record and do not know what the create_tid
1040 * on-disk is, nor how much data it represents.
1042 * Keep in mind that (key) for data records is (base_offset + len),
1043 * not (base_offset). Also, we only want to get rid of on-disk
1044 * records since we are trying to sync our in-memory record, call
1045 * hammer_ip_delete_range() with truncating set to 1 to make sure
1046 * it skips in-memory records.
1048 * It is ok for the lookup to return ENOENT.
1050 if (record->type == HAMMER_MEM_RECORD_DATA) {
1051 error = hammer_ip_delete_range(
1052 cursor, record->ip,
1053 record->leaf.base.key - record->leaf.data_len,
1054 record->leaf.base.key - 1, 1);
1055 if (error && error != ENOENT)
1056 goto done;
1060 * Setup the cursor.
1062 hammer_normalize_cursor(cursor);
1063 cursor->key_beg = record->leaf.base;
1064 cursor->flags &= ~HAMMER_CURSOR_INITMASK;
1065 cursor->flags |= HAMMER_CURSOR_BACKEND;
1066 cursor->flags &= ~HAMMER_CURSOR_INSERT;
1069 * Records can wind up on-media before the inode itself is on-media.
1070 * Flag the case.
1072 record->ip->flags |= HAMMER_INODE_DONDISK;
1075 * If we are deleting a directory entry an exact match must be
1076 * found on-disk.
1078 if (record->type == HAMMER_MEM_RECORD_DEL) {
1079 error = hammer_btree_lookup(cursor);
1080 if (error == 0) {
1081 error = hammer_ip_delete_record(cursor, record->ip,
1082 trans->tid);
1083 if (error == 0) {
1084 record->flags |= HAMMER_RECF_DELETED_FE;
1085 record->flags |= HAMMER_RECF_DELETED_BE;
1088 goto done;
1092 * We are inserting.
1094 * Issue a lookup to position the cursor and locate the cluster. The
1095 * target key should not exist. If we are creating a directory entry
1096 * we may have to iterate the low 32 bits of the key to find an unused
1097 * key.
1099 cursor->flags |= HAMMER_CURSOR_INSERT;
1101 for (;;) {
1102 error = hammer_btree_lookup(cursor);
1103 if (hammer_debug_inode)
1104 kprintf("DOINSERT LOOKUP %d\n", error);
1105 if (error)
1106 break;
1107 if (record->leaf.base.rec_type != HAMMER_RECTYPE_DIRENTRY) {
1108 kprintf("hammer_ip_sync_record: duplicate rec "
1109 "at (%016llx)\n", record->leaf.base.key);
1110 Debugger("duplicate record1");
1111 error = EIO;
1112 break;
1114 if (++trans->hmp->namekey_iterator == 0)
1115 ++trans->hmp->namekey_iterator;
1116 record->leaf.base.key &= ~(0xFFFFFFFFLL);
1117 record->leaf.base.key |= trans->hmp->namekey_iterator;
1118 cursor->key_beg.key = record->leaf.base.key;
1120 #if 0
1121 if (record->type == HAMMER_MEM_RECORD_DATA)
1122 kprintf("sync_record %016llx ---------------- %016llx %d\n",
1123 record->leaf.base.key - record->leaf.data_len,
1124 record->leaf.data_offset, error);
1125 #endif
1128 if (error != ENOENT)
1129 goto done;
1132 * Allocate the record and data. The result buffers will be
1133 * marked as being modified and further calls to
1134 * hammer_modify_buffer() will result in unneeded UNDO records.
1136 * Support zero-fill records (data == NULL and data_len != 0)
1138 if (record->type == HAMMER_MEM_RECORD_DATA) {
1140 * The data portion of a bulk-data record has already been
1141 * committed to disk, we need only adjust the layer2
1142 * statistics in the same transaction as our B-Tree insert.
1144 KKASSERT(record->leaf.data_offset != 0);
1145 hammer_blockmap_free(trans, record->leaf.data_offset,
1146 -record->leaf.data_len);
1147 error = 0;
1148 } else if (record->data && record->leaf.data_len) {
1150 * Wholely cached record, with data. Allocate the data.
1152 bdata = hammer_alloc_data(trans, record->leaf.data_len,
1153 &record->leaf.data_offset,
1154 &cursor->data_buffer, &error);
1155 if (bdata == NULL)
1156 goto done;
1157 record->leaf.data_crc = crc32(record->data,
1158 record->leaf.data_len);
1159 hammer_modify_buffer(trans, cursor->data_buffer, NULL, 0);
1160 bcopy(record->data, bdata, record->leaf.data_len);
1161 hammer_modify_buffer_done(cursor->data_buffer);
1162 } else {
1164 * Wholely cached record, without data.
1166 record->leaf.data_offset = 0;
1167 record->leaf.data_crc = 0;
1170 error = hammer_btree_insert(cursor, &record->leaf);
1171 if (hammer_debug_inode && error)
1172 kprintf("BTREE INSERT error %d @ %016llx:%d key %016llx\n", error, cursor->node->node_offset, cursor->index, record->leaf.base.key);
1175 * Our record is on-disk, normally mark the in-memory version as
1176 * deleted. If the record represented a directory deletion but
1177 * we had to sync a valid directory entry to disk we must convert
1178 * the record to a covering delete so the frontend does not have
1179 * visibility on the synced entry.
1181 if (error == 0) {
1182 if (record->flags & HAMMER_RECF_CONVERT_DELETE) {
1183 KKASSERT(record->type == HAMMER_MEM_RECORD_ADD);
1184 record->flags &= ~HAMMER_RECF_DELETED_FE;
1185 record->type = HAMMER_MEM_RECORD_DEL;
1186 KKASSERT(record->flush_state == HAMMER_FST_FLUSH);
1187 record->flags &= ~HAMMER_RECF_CONVERT_DELETE;
1188 /* hammer_flush_record_done takes care of the rest */
1189 } else {
1190 record->flags |= HAMMER_RECF_DELETED_FE;
1191 record->flags |= HAMMER_RECF_DELETED_BE;
1193 } else {
1194 if (record->leaf.data_offset) {
1195 hammer_blockmap_free(trans, record->leaf.data_offset,
1196 record->leaf.data_len);
1200 done:
1201 return(error);
1205 * Add the record to the inode's rec_tree. The low 32 bits of a directory
1206 * entry's key is used to deal with hash collisions in the upper 32 bits.
1207 * A unique 64 bit key is generated in-memory and may be regenerated a
1208 * second time when the directory record is flushed to the on-disk B-Tree.
1210 * A referenced record is passed to this function. This function
1211 * eats the reference. If an error occurs the record will be deleted.
1213 * A copy of the temporary record->data pointer provided by the caller
1214 * will be made.
1216 static
1218 hammer_mem_add(hammer_record_t record)
1220 hammer_mount_t hmp = record->ip->hmp;
1223 * Make a private copy of record->data
1225 if (record->data)
1226 KKASSERT(record->flags & HAMMER_RECF_ALLOCDATA);
1229 * Insert into the RB tree, find an unused iterator if this is
1230 * a directory entry.
1232 while (RB_INSERT(hammer_rec_rb_tree, &record->ip->rec_tree, record)) {
1233 if (record->leaf.base.rec_type != HAMMER_RECTYPE_DIRENTRY){
1234 record->flags |= HAMMER_RECF_DELETED_FE;
1235 hammer_rel_mem_record(record);
1236 return (EEXIST);
1238 if (++hmp->namekey_iterator == 0)
1239 ++hmp->namekey_iterator;
1240 record->leaf.base.key &= ~(0xFFFFFFFFLL);
1241 record->leaf.base.key |= hmp->namekey_iterator;
1243 ++hmp->rsv_recs;
1244 ++record->ip->rsv_recs;
1245 record->ip->hmp->rsv_databytes += record->leaf.data_len;
1246 record->flags |= HAMMER_RECF_ONRBTREE;
1247 hammer_modify_inode(record->ip, HAMMER_INODE_XDIRTY);
1248 hammer_rel_mem_record(record);
1249 return(0);
1252 /************************************************************************
1253 * HAMMER INODE MERGED-RECORD FUNCTIONS *
1254 ************************************************************************
1256 * These functions augment the B-Tree scanning functions in hammer_btree.c
1257 * by merging in-memory records with on-disk records.
1261 * Locate a particular record either in-memory or on-disk.
1263 * NOTE: This is basically a standalone routine, hammer_ip_next() may
1264 * NOT be called to iterate results.
1267 hammer_ip_lookup(hammer_cursor_t cursor)
1269 int error;
1272 * If the element is in-memory return it without searching the
1273 * on-disk B-Tree
1275 KKASSERT(cursor->ip);
1276 error = hammer_mem_lookup(cursor);
1277 if (error == 0) {
1278 cursor->leaf = &cursor->iprec->leaf;
1279 return(error);
1281 if (error != ENOENT)
1282 return(error);
1285 * If the inode has on-disk components search the on-disk B-Tree.
1287 if ((cursor->ip->flags & (HAMMER_INODE_ONDISK|HAMMER_INODE_DONDISK)) == 0)
1288 return(error);
1289 error = hammer_btree_lookup(cursor);
1290 if (error == 0)
1291 error = hammer_btree_extract(cursor, HAMMER_CURSOR_GET_LEAF);
1292 return(error);
1296 * Locate the first record within the cursor's key_beg/key_end range,
1297 * restricted to a particular inode. 0 is returned on success, ENOENT
1298 * if no records matched the requested range, or some other error.
1300 * When 0 is returned hammer_ip_next() may be used to iterate additional
1301 * records within the requested range.
1303 * This function can return EDEADLK, requiring the caller to terminate
1304 * the cursor and try again.
1307 hammer_ip_first(hammer_cursor_t cursor)
1309 hammer_inode_t ip = cursor->ip;
1310 int error;
1312 KKASSERT(ip != NULL);
1315 * Clean up fields and setup for merged scan
1317 cursor->flags &= ~HAMMER_CURSOR_DELBTREE;
1318 cursor->flags |= HAMMER_CURSOR_ATEDISK | HAMMER_CURSOR_ATEMEM;
1319 cursor->flags |= HAMMER_CURSOR_DISKEOF | HAMMER_CURSOR_MEMEOF;
1320 if (cursor->iprec) {
1321 hammer_rel_mem_record(cursor->iprec);
1322 cursor->iprec = NULL;
1326 * Search the on-disk B-Tree. hammer_btree_lookup() only does an
1327 * exact lookup so if we get ENOENT we have to call the iterate
1328 * function to validate the first record after the begin key.
1330 * The ATEDISK flag is used by hammer_btree_iterate to determine
1331 * whether it must index forwards or not. It is also used here
1332 * to select the next record from in-memory or on-disk.
1334 * EDEADLK can only occur if the lookup hit an empty internal
1335 * element and couldn't delete it. Since this could only occur
1336 * in-range, we can just iterate from the failure point.
1338 if (ip->flags & (HAMMER_INODE_ONDISK|HAMMER_INODE_DONDISK)) {
1339 error = hammer_btree_lookup(cursor);
1340 if (error == ENOENT || error == EDEADLK) {
1341 cursor->flags &= ~HAMMER_CURSOR_ATEDISK;
1342 if (hammer_debug_general & 0x2000)
1343 kprintf("error %d node %p %016llx index %d\n", error, cursor->node, cursor->node->node_offset, cursor->index);
1344 error = hammer_btree_iterate(cursor);
1346 if (error && error != ENOENT)
1347 return(error);
1348 if (error == 0) {
1349 cursor->flags &= ~HAMMER_CURSOR_DISKEOF;
1350 cursor->flags &= ~HAMMER_CURSOR_ATEDISK;
1351 } else {
1352 cursor->flags |= HAMMER_CURSOR_ATEDISK;
1357 * Search the in-memory record list (Red-Black tree). Unlike the
1358 * B-Tree search, mem_first checks for records in the range.
1360 error = hammer_mem_first(cursor);
1361 if (error && error != ENOENT)
1362 return(error);
1363 if (error == 0) {
1364 cursor->flags &= ~HAMMER_CURSOR_MEMEOF;
1365 cursor->flags &= ~HAMMER_CURSOR_ATEMEM;
1366 if (hammer_ip_iterate_mem_good(cursor, cursor->iprec) == 0)
1367 cursor->flags |= HAMMER_CURSOR_ATEMEM;
1371 * This will return the first matching record.
1373 return(hammer_ip_next(cursor));
1377 * Retrieve the next record in a merged iteration within the bounds of the
1378 * cursor. This call may be made multiple times after the cursor has been
1379 * initially searched with hammer_ip_first().
1381 * 0 is returned on success, ENOENT if no further records match the
1382 * requested range, or some other error code is returned.
1385 hammer_ip_next(hammer_cursor_t cursor)
1387 hammer_btree_elm_t elm;
1388 hammer_record_t rec, save;
1389 int error;
1390 int r;
1392 next_btree:
1394 * Load the current on-disk and in-memory record. If we ate any
1395 * records we have to get the next one.
1397 * If we deleted the last on-disk record we had scanned ATEDISK will
1398 * be clear and DELBTREE will be set, forcing a call to iterate. The
1399 * fact that ATEDISK is clear causes iterate to re-test the 'current'
1400 * element. If ATEDISK is set, iterate will skip the 'current'
1401 * element.
1403 * Get the next on-disk record
1405 if (cursor->flags & (HAMMER_CURSOR_ATEDISK|HAMMER_CURSOR_DELBTREE)) {
1406 if ((cursor->flags & HAMMER_CURSOR_DISKEOF) == 0) {
1407 error = hammer_btree_iterate(cursor);
1408 cursor->flags &= ~HAMMER_CURSOR_DELBTREE;
1409 if (error == 0)
1410 cursor->flags &= ~HAMMER_CURSOR_ATEDISK;
1411 else
1412 cursor->flags |= HAMMER_CURSOR_DISKEOF |
1413 HAMMER_CURSOR_ATEDISK;
1417 next_memory:
1419 * Get the next in-memory record. The record can be ripped out
1420 * of the RB tree so we maintain a scan_info structure to track
1421 * the next node.
1423 * hammer_rec_scan_cmp: Is the record still in our general range,
1424 * (non-inclusive of snapshot exclusions)?
1425 * hammer_rec_scan_callback: Is the record in our snapshot?
1427 if (cursor->flags & HAMMER_CURSOR_ATEMEM) {
1428 if ((cursor->flags & HAMMER_CURSOR_MEMEOF) == 0) {
1429 save = cursor->iprec;
1430 cursor->iprec = NULL;
1431 rec = save ? hammer_rec_rb_tree_RB_NEXT(save) : NULL;
1432 while (rec) {
1433 if (hammer_rec_scan_cmp(rec, cursor) != 0)
1434 break;
1435 if (hammer_rec_scan_callback(rec, cursor) != 0)
1436 break;
1437 rec = hammer_rec_rb_tree_RB_NEXT(rec);
1439 if (save)
1440 hammer_rel_mem_record(save);
1441 if (cursor->iprec) {
1442 KKASSERT(cursor->iprec == rec);
1443 cursor->flags &= ~HAMMER_CURSOR_ATEMEM;
1444 } else {
1445 cursor->flags |= HAMMER_CURSOR_MEMEOF;
1451 * The memory record may have become stale while being held in
1452 * cursor->iprec. We are interlocked against the backend on
1453 * with regards to B-Tree entries.
1455 if ((cursor->flags & HAMMER_CURSOR_ATEMEM) == 0) {
1456 if (hammer_ip_iterate_mem_good(cursor, cursor->iprec) == 0) {
1457 cursor->flags |= HAMMER_CURSOR_ATEMEM;
1458 goto next_memory;
1463 * Extract either the disk or memory record depending on their
1464 * relative position.
1466 error = 0;
1467 switch(cursor->flags & (HAMMER_CURSOR_ATEDISK | HAMMER_CURSOR_ATEMEM)) {
1468 case 0:
1470 * Both entries valid. Return the btree entry if it is
1471 * in front of the memory entry.
1473 elm = &cursor->node->ondisk->elms[cursor->index];
1474 r = hammer_btree_cmp(&elm->base, &cursor->iprec->leaf.base);
1477 * Special case. If the entries only differ by their
1478 * create_tid, assume they are equal and fall through.
1480 * This case can occur for memory-data records because
1481 * their initial create_tid is 0 (infinity).
1483 if (r == -1)
1484 r = 0;
1485 if (r < 0) {
1486 error = hammer_btree_extract(cursor,
1487 HAMMER_CURSOR_GET_LEAF);
1488 cursor->flags |= HAMMER_CURSOR_ATEDISK;
1489 break;
1493 * If the entries match exactly the memory entry is either
1494 * an on-disk directory entry deletion or a bulk data
1495 * overwrite. If it is a directory entry deletion we eat
1496 * both entries.
1498 * For the bulk-data overwrite case it is possible to have
1499 * visibility into both, which simply means the syncer
1500 * hasn't gotten around to doing the delete+insert sequence
1501 * on the B-Tree. Use the memory entry and throw away the
1502 * on-disk entry.
1504 * If the in-memory record is not either of these we
1505 * probably caught the syncer while it was syncing it to
1506 * the media. Since we hold a shared lock on the cursor,
1507 * the in-memory record had better be marked deleted at
1508 * this point.
1510 if (r == 0) {
1511 if (cursor->iprec->type == HAMMER_MEM_RECORD_DEL) {
1512 if ((cursor->flags & HAMMER_CURSOR_DELETE_VISIBILITY) == 0) {
1513 cursor->flags |= HAMMER_CURSOR_ATEDISK;
1514 cursor->flags |= HAMMER_CURSOR_ATEMEM;
1515 goto next_btree;
1517 } else if (cursor->iprec->type == HAMMER_MEM_RECORD_DATA) {
1518 if ((cursor->flags & HAMMER_CURSOR_DELETE_VISIBILITY) == 0) {
1519 cursor->flags |= HAMMER_CURSOR_ATEDISK;
1521 /* fall through to memory entry */
1522 } else {
1523 panic("hammer_ip_next: duplicate mem/b-tree entry");
1524 cursor->flags |= HAMMER_CURSOR_ATEMEM;
1525 goto next_memory;
1528 /* fall through to the memory entry */
1529 case HAMMER_CURSOR_ATEDISK:
1531 * Only the memory entry is valid.
1533 cursor->leaf = &cursor->iprec->leaf;
1534 cursor->flags |= HAMMER_CURSOR_ATEMEM;
1537 * If the memory entry is an on-disk deletion we should have
1538 * also had found a B-Tree record. If the backend beat us
1539 * to it it would have interlocked the cursor and we should
1540 * have seen the in-memory record marked DELETED_FE.
1542 if (cursor->iprec->type == HAMMER_MEM_RECORD_DEL &&
1543 (cursor->flags & HAMMER_CURSOR_DELETE_VISIBILITY) == 0) {
1544 panic("hammer_ip_next: del-on-disk with no b-tree entry");
1546 break;
1547 case HAMMER_CURSOR_ATEMEM:
1549 * Only the disk entry is valid
1551 error = hammer_btree_extract(cursor, HAMMER_CURSOR_GET_LEAF);
1552 cursor->flags |= HAMMER_CURSOR_ATEDISK;
1553 break;
1554 default:
1556 * Neither entry is valid
1558 * XXX error not set properly
1560 cursor->leaf = NULL;
1561 error = ENOENT;
1562 break;
1564 return(error);
1568 * Resolve the cursor->data pointer for the current cursor position in
1569 * a merged iteration.
1572 hammer_ip_resolve_data(hammer_cursor_t cursor)
1574 hammer_record_t record;
1575 int error;
1577 if (hammer_cursor_inmem(cursor)) {
1579 * The data associated with an in-memory record is usually
1580 * kmalloced, but reserve-ahead data records will have an
1581 * on-disk reference.
1583 * NOTE: Reserve-ahead data records must be handled in the
1584 * context of the related high level buffer cache buffer
1585 * to interlock against async writes.
1587 record = cursor->iprec;
1588 cursor->data = record->data;
1589 error = 0;
1590 if (cursor->data == NULL) {
1591 KKASSERT(record->leaf.base.rec_type ==
1592 HAMMER_RECTYPE_DATA);
1593 cursor->data = hammer_bread(cursor->trans->hmp,
1594 record->leaf.data_offset,
1595 &error,
1596 &cursor->data_buffer);
1598 } else {
1599 cursor->leaf = &cursor->node->ondisk->elms[cursor->index].leaf;
1600 error = hammer_btree_extract(cursor, HAMMER_CURSOR_GET_DATA);
1602 return(error);
1606 * Backend truncation / record replacement - delete records in range.
1608 * Delete all records within the specified range for inode ip. In-memory
1609 * records still associated with the frontend are ignored.
1611 * NOTE: An unaligned range will cause new records to be added to cover
1612 * the edge cases. (XXX not implemented yet).
1614 * NOTE: Replacement via reservations (see hammer_ip_sync_record_cursor())
1615 * also do not deal with unaligned ranges.
1617 * NOTE: ran_end is inclusive (e.g. 0,1023 instead of 0,1024).
1619 * NOTE: Record keys for regular file data have to be special-cased since
1620 * they indicate the end of the range (key = base + bytes).
1623 hammer_ip_delete_range(hammer_cursor_t cursor, hammer_inode_t ip,
1624 int64_t ran_beg, int64_t ran_end, int truncating)
1626 hammer_transaction_t trans = cursor->trans;
1627 hammer_btree_leaf_elm_t leaf;
1628 int error;
1629 int64_t off;
1631 #if 0
1632 kprintf("delete_range %p %016llx-%016llx\n", ip, ran_beg, ran_end);
1633 #endif
1635 KKASSERT(trans->type == HAMMER_TRANS_FLS);
1636 retry:
1637 hammer_normalize_cursor(cursor);
1638 cursor->key_beg.localization = HAMMER_LOCALIZE_MISC;
1639 cursor->key_beg.obj_id = ip->obj_id;
1640 cursor->key_beg.create_tid = 0;
1641 cursor->key_beg.delete_tid = 0;
1642 cursor->key_beg.obj_type = 0;
1643 cursor->asof = ip->obj_asof;
1644 cursor->flags &= ~HAMMER_CURSOR_INITMASK;
1645 cursor->flags |= HAMMER_CURSOR_ASOF;
1646 cursor->flags |= HAMMER_CURSOR_DELETE_VISIBILITY;
1647 cursor->flags |= HAMMER_CURSOR_BACKEND;
1649 cursor->key_end = cursor->key_beg;
1650 if (ip->ino_data.obj_type == HAMMER_OBJTYPE_DBFILE) {
1651 cursor->key_beg.key = ran_beg;
1652 cursor->key_beg.rec_type = HAMMER_RECTYPE_DB;
1653 cursor->key_end.rec_type = HAMMER_RECTYPE_DB;
1654 cursor->key_end.key = ran_end;
1655 } else {
1657 * The key in the B-Tree is (base+bytes), so the first possible
1658 * matching key is ran_beg + 1.
1660 int64_t tmp64;
1662 cursor->key_beg.key = ran_beg + 1;
1663 cursor->key_beg.rec_type = HAMMER_RECTYPE_DATA;
1664 cursor->key_end.rec_type = HAMMER_RECTYPE_DATA;
1666 tmp64 = ran_end + MAXPHYS + 1; /* work around GCC-4 bug */
1667 if (tmp64 < ran_end)
1668 cursor->key_end.key = 0x7FFFFFFFFFFFFFFFLL;
1669 else
1670 cursor->key_end.key = ran_end + MAXPHYS + 1;
1672 cursor->flags |= HAMMER_CURSOR_END_INCLUSIVE;
1674 error = hammer_ip_first(cursor);
1677 * Iterate through matching records and mark them as deleted.
1679 while (error == 0) {
1680 leaf = cursor->leaf;
1682 KKASSERT(leaf->base.delete_tid == 0);
1685 * There may be overlap cases for regular file data. Also
1686 * remember the key for a regular file record is (base + len),
1687 * NOT (base).
1689 if (leaf->base.rec_type == HAMMER_RECTYPE_DATA) {
1690 off = leaf->base.key - leaf->data_len;
1692 * Check the left edge case. We currently do not
1693 * split existing records.
1695 if (off < ran_beg) {
1696 panic("hammer left edge case %016llx %d\n",
1697 leaf->base.key, leaf->data_len);
1701 * Check the right edge case. Note that the
1702 * record can be completely out of bounds, which
1703 * terminates the search.
1705 * base->key is exclusive of the right edge while
1706 * ran_end is inclusive of the right edge. The
1707 * (key - data_len) left boundary is inclusive.
1709 * XXX theory-check this test at some point, are
1710 * we missing a + 1 somewhere? Note that ran_end
1711 * could overflow.
1713 if (leaf->base.key - 1 > ran_end) {
1714 if (leaf->base.key - leaf->data_len > ran_end)
1715 break;
1716 panic("hammer right edge case\n");
1721 * Delete the record. When truncating we do not delete
1722 * in-memory (data) records because they represent data
1723 * written after the truncation.
1725 * This will also physically destroy the B-Tree entry and
1726 * data if the retention policy dictates. The function
1727 * will set HAMMER_CURSOR_DELBTREE which hammer_ip_next()
1728 * uses to perform a fixup.
1730 if (truncating == 0 || hammer_cursor_ondisk(cursor))
1731 error = hammer_ip_delete_record(cursor, ip, trans->tid);
1732 if (error)
1733 break;
1734 error = hammer_ip_next(cursor);
1736 if (error == EDEADLK) {
1737 hammer_done_cursor(cursor);
1738 error = hammer_init_cursor(trans, cursor, &ip->cache[0], ip);
1739 if (error == 0)
1740 goto retry;
1742 if (error == ENOENT)
1743 error = 0;
1744 return(error);
1748 * Backend truncation - delete all records.
1750 * Delete all user records associated with an inode except the inode record
1751 * itself. Directory entries are not deleted (they must be properly disposed
1752 * of or nlinks would get upset).
1755 hammer_ip_delete_range_all(hammer_cursor_t cursor, hammer_inode_t ip,
1756 int *countp)
1758 hammer_transaction_t trans = cursor->trans;
1759 hammer_btree_leaf_elm_t leaf;
1760 int error;
1762 KKASSERT(trans->type == HAMMER_TRANS_FLS);
1763 retry:
1764 hammer_normalize_cursor(cursor);
1765 cursor->key_beg.localization = HAMMER_LOCALIZE_MISC;
1766 cursor->key_beg.obj_id = ip->obj_id;
1767 cursor->key_beg.create_tid = 0;
1768 cursor->key_beg.delete_tid = 0;
1769 cursor->key_beg.obj_type = 0;
1770 cursor->key_beg.rec_type = HAMMER_RECTYPE_INODE + 1;
1771 cursor->key_beg.key = HAMMER_MIN_KEY;
1773 cursor->key_end = cursor->key_beg;
1774 cursor->key_end.rec_type = 0xFFFF;
1775 cursor->key_end.key = HAMMER_MAX_KEY;
1777 cursor->asof = ip->obj_asof;
1778 cursor->flags &= ~HAMMER_CURSOR_INITMASK;
1779 cursor->flags |= HAMMER_CURSOR_END_INCLUSIVE | HAMMER_CURSOR_ASOF;
1780 cursor->flags |= HAMMER_CURSOR_DELETE_VISIBILITY;
1781 cursor->flags |= HAMMER_CURSOR_BACKEND;
1783 error = hammer_ip_first(cursor);
1786 * Iterate through matching records and mark them as deleted.
1788 while (error == 0) {
1789 leaf = cursor->leaf;
1791 KKASSERT(leaf->base.delete_tid == 0);
1794 * Mark the record and B-Tree entry as deleted. This will
1795 * also physically delete the B-Tree entry, record, and
1796 * data if the retention policy dictates. The function
1797 * will set HAMMER_CURSOR_DELBTREE which hammer_ip_next()
1798 * uses to perform a fixup.
1800 * Directory entries (and delete-on-disk directory entries)
1801 * must be synced and cannot be deleted.
1803 if (leaf->base.rec_type != HAMMER_RECTYPE_DIRENTRY) {
1804 error = hammer_ip_delete_record(cursor, ip, trans->tid);
1805 ++*countp;
1807 if (error)
1808 break;
1809 error = hammer_ip_next(cursor);
1811 if (error == EDEADLK) {
1812 hammer_done_cursor(cursor);
1813 error = hammer_init_cursor(trans, cursor, &ip->cache[0], ip);
1814 if (error == 0)
1815 goto retry;
1817 if (error == ENOENT)
1818 error = 0;
1819 return(error);
1823 * Delete the record at the current cursor. On success the cursor will
1824 * be positioned appropriately for an iteration but may no longer be at
1825 * a leaf node.
1827 * This routine is only called from the backend.
1829 * NOTE: This can return EDEADLK, requiring the caller to terminate the
1830 * cursor and retry.
1833 hammer_ip_delete_record(hammer_cursor_t cursor, hammer_inode_t ip,
1834 hammer_tid_t tid)
1836 hammer_btree_elm_t elm;
1837 hammer_mount_t hmp;
1838 int error;
1839 int dodelete;
1841 KKASSERT(cursor->flags & HAMMER_CURSOR_BACKEND);
1844 * In-memory (unsynchronized) records can simply be freed. This
1845 * only occurs in range iterations since all other records are
1846 * individually synchronized. Thus there should be no confusion with
1847 * the interlock.
1849 if (hammer_cursor_inmem(cursor)) {
1850 KKASSERT((cursor->iprec->flags & HAMMER_RECF_INTERLOCK_BE) ==0);
1851 cursor->iprec->flags |= HAMMER_RECF_DELETED_FE;
1852 cursor->iprec->flags |= HAMMER_RECF_DELETED_BE;
1853 return(0);
1857 * On-disk records are marked as deleted by updating their delete_tid.
1858 * This does not effect their position in the B-Tree (which is based
1859 * on their create_tid).
1861 error = hammer_btree_extract(cursor, HAMMER_CURSOR_GET_LEAF);
1862 elm = NULL;
1863 hmp = cursor->node->hmp;
1866 * If we were mounted with the nohistory option, we physically
1867 * delete the record.
1869 dodelete = hammer_nohistory(ip);
1871 if (error == 0) {
1872 error = hammer_cursor_upgrade(cursor);
1873 if (error == 0) {
1874 elm = &cursor->node->ondisk->elms[cursor->index];
1875 hammer_modify_node(cursor->trans, cursor->node,
1876 &elm->leaf.base.delete_tid,
1877 sizeof(elm->leaf.base.delete_tid));
1878 elm->leaf.base.delete_tid = tid;
1879 hammer_modify_node_done(cursor->node);
1882 * An on-disk record cannot have the same delete_tid
1883 * as its create_tid. In a chain of record updates
1884 * this could result in a duplicate record.
1886 KKASSERT(elm->leaf.base.delete_tid != elm->leaf.base.create_tid);
1890 if (error == 0 && dodelete) {
1891 error = hammer_delete_at_cursor(cursor, NULL);
1892 if (error) {
1893 panic("hammer_ip_delete_record: unable to physically delete the record!\n");
1894 error = 0;
1897 return(error);
1901 hammer_delete_at_cursor(hammer_cursor_t cursor, int64_t *stat_bytes)
1903 hammer_btree_elm_t elm;
1904 hammer_off_t data_offset;
1905 int32_t data_len;
1906 u_int16_t rec_type;
1907 int error;
1909 elm = &cursor->node->ondisk->elms[cursor->index];
1910 KKASSERT(elm->base.btype == HAMMER_BTREE_TYPE_RECORD);
1912 data_offset = elm->leaf.data_offset;
1913 data_len = elm->leaf.data_len;
1914 rec_type = elm->leaf.base.rec_type;
1916 error = hammer_btree_delete(cursor);
1917 if (error == 0) {
1919 * This forces a fixup for the iteration because
1920 * the cursor is now either sitting at the 'next'
1921 * element or sitting at the end of a leaf.
1923 if ((cursor->flags & HAMMER_CURSOR_DISKEOF) == 0) {
1924 cursor->flags |= HAMMER_CURSOR_DELBTREE;
1925 cursor->flags &= ~HAMMER_CURSOR_ATEDISK;
1928 if (error == 0) {
1929 switch(data_offset & HAMMER_OFF_ZONE_MASK) {
1930 case HAMMER_ZONE_LARGE_DATA:
1931 case HAMMER_ZONE_SMALL_DATA:
1932 hammer_blockmap_free(cursor->trans,
1933 data_offset, data_len);
1934 break;
1935 default:
1936 break;
1939 return (error);
1943 * Determine whether we can remove a directory. This routine checks whether
1944 * a directory is empty or not and enforces flush connectivity.
1946 * Flush connectivity requires that we block if the target directory is
1947 * currently flushing, otherwise it may not end up in the same flush group.
1949 * Returns 0 on success, ENOTEMPTY or EDEADLK (or other errors) on failure.
1952 hammer_ip_check_directory_empty(hammer_transaction_t trans, hammer_inode_t ip)
1954 struct hammer_cursor cursor;
1955 int error;
1958 * Check directory empty
1960 hammer_init_cursor(trans, &cursor, &ip->cache[0], ip);
1962 cursor.key_beg.localization = HAMMER_LOCALIZE_MISC;
1963 cursor.key_beg.obj_id = ip->obj_id;
1964 cursor.key_beg.create_tid = 0;
1965 cursor.key_beg.delete_tid = 0;
1966 cursor.key_beg.obj_type = 0;
1967 cursor.key_beg.rec_type = HAMMER_RECTYPE_INODE + 1;
1968 cursor.key_beg.key = HAMMER_MIN_KEY;
1970 cursor.key_end = cursor.key_beg;
1971 cursor.key_end.rec_type = 0xFFFF;
1972 cursor.key_end.key = HAMMER_MAX_KEY;
1974 cursor.asof = ip->obj_asof;
1975 cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE | HAMMER_CURSOR_ASOF;
1977 error = hammer_ip_first(&cursor);
1978 if (error == ENOENT)
1979 error = 0;
1980 else if (error == 0)
1981 error = ENOTEMPTY;
1982 hammer_done_cursor(&cursor);
1983 return(error);