4 * hed - Hexadecimal editor
5 * Copyright (C) 2004 Petr Baudis <pasky@ucw.cz>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 * There hammer on the anvil smote,
23 * There chisel clove, and graver wrote;
24 * There forged was blade, and bound was hilt;
25 * The delver mined, the mason built.
26 * There beryl, pearl, and opal pale,
27 * And metal wrought like fishes' mail,
28 * Buckler and corslet, axe and sword,
29 * And shining spears were laid in hoard.
32 /* Feature macros needed for:
47 #include <sys/ioctl.h>
49 #include <linux/fs.h> /* BLKGETSIZE and BLKGETSIZE64 */
58 /* memrchr() might not be available */
61 #endif /* HAVE_MEMRCHR */
64 * `Piles of jewels?' said Gandalf. `No. The Orcs have often plundered Moria;
65 * there is nothing left in the upper halls. And since the dwarves fled, no one
66 * dares to seek the shafts and treasuries down in the deep places: they are
67 * drowned in water--or in a shadow of fear.'
70 /* TODO: Currently the file blocks allocation is not very sophisticated and
71 * when the weather is bad it could probably have rather horrible results. */
75 #define BDEBUG(x...) fprintf(stderr, x)
80 /* Number of blocks in cache */
81 #define CACHE_LENGTH 64
83 /* Blocks for readahead */
84 #define FILE_READAHEAD (CACHE_LENGTH/2)
86 #define file_block hed_block
88 #define first_block(f) next_block(last_block(f))
89 #define prev_block(b) (tree_entry(prev_in_tree(&(b)->t),struct file_block,t))
90 #define next_block(b) (tree_entry(next_in_tree(&(b)->t),struct file_block,t))
91 #define last_block(f) (&(f)->eof_block)
93 #define block_offset(b) tree_block_offset(&(b)->t)
95 #define recalc_block_recursive(b) recalc_node_recursive(&(b)->t)
97 #define chain_block_before(tree,b,p) insert_into_tree((tree), &(b)->t, &(p)->t)
98 #define recalc_chain_block_before(tree,b,p) do { \
99 chain_block_before((tree), (b), (p)); \
100 recalc_block_recursive((b)); \
103 #define unchain_block(tree,b) del_from_tree((tree), &(b)->t)
104 #define recalc_unchain_block(tree,b) recalc_del_from_tree((tree), &(b)->t)
106 #define init_block_list(tree,b) init_tree(tree, &(b)->t)
107 #define init_block_link(b) init_node(&(b)->t)
109 #define find_block(tree,o) tree_entry(find_in_tree((tree),(o)),struct file_block,t)
111 #ifdef HED_CONFIG_SWAP
113 /* Return the swp file object */
114 static inline struct swp_file
*
115 file_swp(struct hed_file
*file
)
122 /* Provide a stub for the non-swap case */
124 file_swp(struct hed_file
*file
)
131 #ifdef HED_CONFIG_READAHEAD
133 #define file_ra_none(f) ((f)->readahead == HED_RA_NONE)
134 #define file_ra_forward(f) ((f)->readahead == HED_RA_FORWARD)
135 #define file_ra_backward(f) ((f)->readahead == HED_RA_BACKWARD)
139 #define file_ra_none(f) (1)
140 #define file_ra_forward(f) (0)
141 #define file_ra_backward(f) (0)
143 #endif /* HED_CONFIG_READAHEAD */
145 /* Get the physical offset of the byte immediately following @block. */
146 static inline hed_uoff_t
147 phys_end(const struct hed_block
*block
)
149 return hed_block_is_inserted(block
)
151 : block
->phys_pos
+ hed_block_size(block
);
154 static struct hed_block
*
155 next_nonzero_block(struct hed_block
*block
)
157 while (!hed_block_is_eof(block
)) {
158 block
= next_block(block
);
159 if (hed_block_size(block
))
165 static struct hed_block
*
166 prev_nonzero_block(struct hed_block
*block
)
169 block
= prev_block(block
);
170 if (hed_block_is_eof(block
))
172 } while (!hed_block_size(block
));
177 hed_block_is_after_erase(struct hed_block
*block
)
179 struct hed_block
*prev
= prev_nonzero_block(block
);
181 ? block
->phys_pos
> phys_end(prev
)
186 hed_block_is_after_insert(struct hed_block
*block
)
188 struct hed_block
*prev
= prev_nonzero_block(block
);
189 return prev
&& hed_block_is_inserted(prev
);
193 # define dump_blocks(file) {}
197 block_phys_size(struct hed_file
*file
, struct file_block
*block
)
199 struct file_block
*next
;
201 if (hed_block_is_eof(block
))
203 next
= next_block(block
);
204 return next
->phys_pos
- block
->phys_pos
;
208 dump_block(int level
, struct hed_file
*file
, struct hed_tree_node
*node
,
209 hed_uoff_t
*cur_offset
, hed_uoff_t
*cur_poffset
)
211 struct hed_block
*block
= tree_entry(node
, struct hed_block
, t
);
212 bool virtual = hed_block_is_virtual(block
);
218 dump_block(level
+ 1, file
, node
->left
, cur_offset
, cur_poffset
);
219 p
= hed_block_data(block
);
220 if (level
< 20) t
[level
] = '>'; else t
[19] = '.';
221 fprintf(stderr
, "%s [%06llx] [%06llx] %c%c%c %05llx %05llx"
222 " {%02x%02x%02x%02x} -- %p ^%p [%06llx]\n",
224 (unsigned long long) *cur_offset
,
225 (unsigned long long) *cur_poffset
,
227 hed_block_is_inserted(block
) ? 'i' : ' ',
228 hed_block_is_dirty(block
) ? '*' : ' ',
229 (unsigned long long) node
->size
,
230 (unsigned long long) block_phys_size(file
, block
),
231 p
&& block
->t
.size
> 0 ? p
[0] : 0,
232 p
&& block
->t
.size
> 1 ? p
[1] : 0,
233 p
&& block
->t
.size
> 2 ? p
[2] : 0,
234 p
&& block
->t
.size
> 3 ? p
[3] : 0,
236 (unsigned long long) node
->cover_size
238 list_for_each_entry (cur
, &block
->refs
, list
) {
239 fprintf(stderr
, " <%p>: %llx->%p:%llx\n",
240 cur
, (long long)cur
->pos
,
241 cur
->block
, (unsigned long long)cur
->off
);
243 assert(*cur_poffset
== block
->phys_pos
);
244 *cur_offset
+= node
->size
;
245 *cur_poffset
+= block_phys_size(file
, block
);
247 dump_block(level
+ 1, file
, node
->right
, cur_offset
, cur_poffset
);
248 assert(node
->cover_size
== (node
->left
? node
->left
->cover_size
: 0)
249 + (node
->right
? node
->right
->cover_size
: 0)
253 /* Walk the tree manually here, because foreach_block() does not provide
254 * the tree structure.
255 * TODO: Change this if you plan to debug any other block containers.
258 dump_blocks(struct hed_file
*file
)
260 struct file_block
*first
= first_block(file
);
261 hed_uoff_t cur_offset
, cur_poffset
;
263 fprintf(stderr
, "-- blocks dump --\n");
265 cur_poffset
= first
->phys_pos
;
266 dump_block(0, file
, hed_file_blocks(file
)->root
,
267 &cur_offset
, &cur_poffset
);
268 fprintf(stderr
, "-- blocks dump end --\n");
273 get_cursor(struct hed_file
*file
, hed_uoff_t offset
, hed_cursor_t
*curs
)
275 struct file_block
*block
;
277 block
= find_block(hed_file_blocks(file
), offset
);
278 assert(block
!= NULL
);
281 curs
->off
= offset
- block_offset(block
);
282 list_add(&curs
->list
, &block
->refs
);
284 BDEBUG("Mapped %llx to %llx+%llx/%llx\n",
285 offset
, offset
- curs
->off
, curs
->off
, block
->t
.size
);
289 hed_get_cursor(struct hed_file
*file
, hed_uoff_t offset
, hed_cursor_t
*curs
)
291 get_cursor(file
, offset
, curs
);
295 put_cursor(hed_cursor_t
*curs
)
297 list_del(&curs
->list
);
301 hed_put_cursor(hed_cursor_t
*curs
)
307 hed_update_cursor(struct hed_file
*file
, hed_uoff_t offset
, hed_cursor_t
*curs
)
310 get_cursor(file
, offset
, curs
);
314 hed_dup_cursor(const hed_cursor_t
*src
, hed_cursor_t
*dst
)
317 dst
->block
= src
->block
;
319 list_add_tail(&dst
->list
, &src
->block
->refs
);
323 hed_dup2_cursor(const hed_cursor_t
*src
, hed_cursor_t
*dst
)
325 if (hed_is_a_cursor(dst
))
327 hed_dup_cursor(src
, dst
);
330 /* Move cursors from @old to @new, adding @off to their block
331 * offsets to keep them at the same position. */
333 update_cursors(const struct hed_block
*old
, struct hed_block
*new,
338 BDEBUG("Updating cursors from <%p> to <%p>%c%llx\n",
339 old
, new, off
>= 0 ? '+' : '-', off
>= 0 ? off
: -off
);
341 list_for_each_entry(curs
, &old
->refs
, list
) {
347 /* Move cursors in the range <@start;@end> from @old to @new,
348 * adding @off to their block offset, plus moving the reference list. */
350 move_cursors(const struct hed_block
*old
, struct hed_block
*new,
351 hed_uoff_t start
, hed_uoff_t end
, hed_off_t off
)
353 hed_cursor_t
*curs
, *nextcurs
;
355 BDEBUG("Moving cursors from <%p>:%llx:%llx to <%p>%c%llx\n",
356 old
, start
, end
, new,
357 off
>= 0 ? '+' : '-', off
>= 0 ? off
: -off
);
359 list_for_each_entry_safe(curs
, nextcurs
, &old
->refs
, list
)
360 if (curs
->off
>= start
&& curs
->off
<= end
) {
363 list_move(&curs
->list
, &new->refs
);
367 /* Move cursors in the range @block:<@start;@end> to @newpos */
369 move_cursors_abs(const struct hed_block
*block
,
370 hed_uoff_t start
, hed_uoff_t end
,
371 const hed_cursor_t
*newpos
)
373 hed_cursor_t
*curs
, *nextcurs
;
375 BDEBUG("Moving cursors from <%p>:%llx:%llx to <%p>:%llx\n",
376 block
, start
, end
, newpos
->block
, newpos
->off
);
378 list_for_each_entry_safe(curs
, nextcurs
, &block
->refs
, list
)
379 if (curs
->off
>= start
&& curs
->off
<= end
) {
380 curs
->pos
= newpos
->pos
;
381 curs
->block
= newpos
->block
;
382 curs
->off
= newpos
->off
;
383 list_move(&curs
->list
, &newpos
->block
->refs
);
387 /* Update the positions of cursors at and after @start for all
388 * blocks starting at @block */
390 slide_cursors(struct hed_file
*file
, const struct hed_block
*block
,
391 hed_uoff_t start
, hed_off_t off
)
394 const struct hed_block
*nblock
;
396 BDEBUG("Sliding cursors >= %llx by %c%llx, starting at <%p>\n",
397 start
, off
>= 0 ? '+' : '-', off
>= 0 ? off
: -off
, block
);
401 list_for_each_entry(curs
, &block
->refs
, list
)
402 if (curs
->pos
>= start
)
404 nblock
= next_block(block
);
405 } while (!hed_block_is_eof(block
));
408 static struct hed_block
*
409 new_block(struct hed_file
*file
, long flags
)
411 struct file_block
*new;
413 if (! (new = swp_zalloc(file_swp(file
), sizeof(struct file_block
))) )
417 init_block_link(new);
418 INIT_LIST_HEAD(&new->refs
);
419 if (flags
& HED_BLOCK_EXCACHE
)
420 INIT_LIST_HEAD(&new->lru
);
422 list_add_tail(&new->lru
, &file
->lru
);
427 static struct hed_block
*
428 new_virt_block(struct hed_file
*file
, hed_uoff_t pos
, hed_uoff_t size
,
431 struct hed_block
*new =
432 new_block(file
, (HED_BLOCK_EXCACHE
|
440 BDEBUG("Spawned new virtual block [%llx] at %llx\n", size
, pos
);
444 static struct hed_block
*
445 new_data_block(struct hed_file
*file
, hed_uoff_t pos
, hed_uoff_t size
,
446 struct hed_block_data
*dataobj
)
448 struct hed_block
*new =
454 new->dataobj
= dataobj
;
457 new->dataoff
= FILE_BLOCK_OFF(pos
);
458 BDEBUG("Spawned new data block [%llx] at %llx\n", size
, pos
);
463 file_free_block(struct hed_file
*file
, struct file_block
*block
)
466 cache_put(file
->cache
, block
->dataobj
);
467 list_del(&block
->lru
);
469 swp_free(file_swp(file
), block
);
473 kill_block_if_empty(struct hed_file
*file
, struct file_block
*block
)
475 if (!hed_block_is_eof(block
) && block
->t
.size
== 0 &&
476 list_empty(&block
->refs
)) {
477 /* No recalculation needed, zero size. */
478 unchain_block(hed_file_blocks(file
), block
);
479 file_free_block(file
, block
);
485 /* This may kill the previous block as well, if it can be merged
486 * with the next one. It will never kill anything which _follows_. */
488 file_kill_block(struct hed_file
*file
, struct file_block
*block
)
490 hed_uoff_t phys_pos
= block
->phys_pos
;
491 struct file_block
*prev
= prev_block(block
);
492 struct file_block
*next
= next_block(block
);
493 struct file_block
*merger
;
494 bool killprev
= false;
496 /* We should never kill a dirty block! */
497 assert(!hed_block_is_dirty(block
));
498 /* We shouldn't get with an empty block here (that might
499 * need special considerations with virtualization). */
500 assert(block
->t
.size
> 0);
502 if (!hed_block_is_eof(block
) &&
503 hed_block_is_inner_virtual(next
) &&
504 phys_pos
+ block
->t
.size
== next
->phys_pos
) {
505 if (!hed_block_is_eof(prev
) &&
506 hed_block_is_inner_virtual(prev
) &&
507 prev
->phys_pos
+ prev
->t
.size
== phys_pos
)
510 merger
->phys_pos
-= block
->t
.size
;
511 update_cursors(merger
, merger
, block
->t
.size
);
512 update_cursors(block
, merger
, 0);
513 } else if (!hed_block_is_eof(prev
) &&
514 hed_block_is_inner_virtual(prev
) &&
515 prev
->phys_pos
+ prev
->t
.size
== phys_pos
) {
517 update_cursors(block
, merger
, merger
->t
.size
);
518 } else if (!hed_block_is_virtual(block
)) {
519 /* Convert physical to virtual */
520 assert(block
->dataobj
);
521 cache_put(file
->cache
, block
->dataobj
);
522 block
->dataobj
= NULL
;
524 list_del_init(&block
->lru
); /* unlink the block from LRU */
525 hed_block_set_excache(block
); /* say it's unlinked */
526 hed_block_set_virtual(block
);
529 /* Already virtual and cannot merge */
532 list_splice(&block
->refs
, &merger
->refs
);
534 /* Messing with block sizes and unchaining is a bit tricky
535 * since unchain_block() can splay(). So we really need
536 * to recalc_block_recursive() right after we update the size.
537 * If this place turns out to be a hot-spot, we can optimize
538 * the tree operations here. */
539 merger
->t
.size
+= block
->t
.size
;
540 recalc_block_recursive(merger
);
542 /* Destroy the block */
543 recalc_unchain_block(hed_file_blocks(file
), block
);
544 file_free_block(file
, block
);
547 file_kill_block(file
, prev
);
550 static struct hed_block
*
551 split_block(struct hed_file
*file
, struct hed_block
*block
,
552 hed_uoff_t splitpoint
)
554 struct hed_block
*head
;
556 head
= new_block(file
, block
->flags
& ~HED_BLOCK_EOF
);
560 if ( (head
->dataobj
= block
->dataobj
) ) {
561 cache_get(head
->dataobj
);
562 head
->dataoff
= block
->dataoff
;
563 block
->dataoff
+= splitpoint
;
565 assert(hed_block_is_virtual(block
));
567 head
->t
.size
= splitpoint
;
568 head
->phys_pos
= block
->phys_pos
;
570 block
->t
.size
-= splitpoint
;
571 if (!hed_block_is_inserted(block
))
572 block
->phys_pos
+= splitpoint
;
573 recalc_block_recursive(block
);
574 recalc_chain_block_before(hed_file_blocks(file
), head
, block
);
576 move_cursors(block
, head
, 0, splitpoint
- 1, 0);
577 update_cursors(block
, block
, -splitpoint
);
582 /* Replace a chunk in @block with @newblock */
584 replace_chunk(struct hed_file
*file
, struct hed_block
*block
,
585 hed_uoff_t offset
, struct hed_block
*newblock
)
587 size_t len
= newblock
->t
.size
;
588 hed_uoff_t leadlen
= offset
+ len
;
590 assert(offset
< block
->t
.size
);
591 assert(len
<= block
->t
.size
- offset
);
593 /* Re-create the head block if necessary */
595 struct file_block
*head
;
597 head
= new_block(file
, block
->flags
& ~HED_BLOCK_EOF
);
600 head
->t
.size
= offset
;
601 head
->dataobj
= block
->dataobj
;
602 head
->dataoff
= block
->dataoff
;
603 head
->phys_pos
= block
->phys_pos
;
605 recalc_chain_block_before(hed_file_blocks(file
),
608 /* Move cursors to the head */
609 move_cursors(block
, head
, 0, offset
- 1, 0);
612 /* Move pointers to the new block */
614 move_cursors(block
, newblock
, offset
, leadlen
- 1, -offset
);
616 /* Shorten the tail block */
617 block
->t
.size
-= leadlen
;
618 block
->dataoff
+= leadlen
;
619 assert(!hed_block_is_inserted(block
));
620 block
->phys_pos
+= leadlen
;
621 recalc_block_recursive(block
);
622 update_cursors(block
, block
, -leadlen
);
624 /* Insert the new block */
625 recalc_chain_block_before(hed_file_blocks(file
), newblock
, block
);
627 /* Kill the leading block if possible */
628 kill_block_if_empty(file
, block
);
633 #ifdef HED_CONFIG_SWAP
636 swp_filename(const char *filename
)
638 size_t fnlen
= strlen(filename
);
642 if (!(swp
= malloc(fnlen
+ 9)) )
644 strcpy(swp
, filename
);
646 file
= strrchr(swp
, '/');
647 file
= file
? file
+ 1 : swp
;
649 strcpy(stpcpy(file
+ 1, filename
+ (file
-swp
)), ".hedswp");
654 newswp_filename(char *swpname
)
659 while (!access(ret
, F_OK
)) {
660 if (ret
== swpname
) {
661 if (! (ret
= strdup(swpname
)) )
663 p
= ret
+ strlen(ret
) - 1;
676 hed_file_remove_swap(struct hed_file
*file
)
678 if (remove(file
->swpname
))
680 if (rename(file
->newswpname
, file
->swpname
))
683 free(file
->newswpname
);
684 file
->newswpname
= file
->swpname
;
688 static inline struct hed_file
*
689 file_swp_init(const char *name
)
691 char *swpname
, *newswpname
;
692 struct swp_file
*swp
;
693 struct hed_file
*file
;
695 swpname
= swp_filename(name
);
698 newswpname
= newswp_filename(swpname
);
701 swp
= swp_init_write(newswpname
);
703 goto fail_free_newname
;
705 assert(sizeof(struct swp_header
) + sizeof(struct hed_file
)
707 file
= swp_private(swp
);
708 memset(file
, 0, sizeof *file
);
711 file
->swpname
= swpname
;
712 file
->newswpname
= newswpname
;
719 if (swpname
!= newswpname
)
726 file_swp_done(struct hed_file
*file
)
728 remove(file
->newswpname
);
729 if (file
->newswpname
!= file
->swpname
)
730 free(file
->newswpname
);
732 swp_done(file_swp(file
));
733 /* file was de-allocated automatically with file->swp */
736 #else /* HED_CONFIG_SWAP */
738 static inline struct hed_file
*
739 file_swp_init(const char *name
)
741 return calloc(1, sizeof(struct hed_file
));
745 file_swp_done(struct hed_file
*file
)
750 #endif /* HED_CONFIG_SWAP */
752 static inline struct stat
*
753 file_stat(struct hed_file
*file
)
759 hed_file_update_size(struct hed_file
*file
)
761 hed_uoff_t oldsize
= file
->phys_size
;
763 if(fstat(file
->fd
, file_stat(file
)) < 0)
766 if (S_ISBLK(file_stat(file
)->st_mode
)) {
767 if (ioctl(file
->fd
, BLKGETSIZE64
, &file
->phys_size
)) {
768 unsigned long size_in_blocks
;
769 if (ioctl(file
->fd
, BLKGETSIZE
, &size_in_blocks
))
771 file
->phys_size
= (hed_uoff_t
)size_in_blocks
<< 9;
773 } else if (S_ISREG(file_stat(file
)->st_mode
)) {
774 file
->phys_size
= file_stat(file
)->st_size
;
775 } else if (S_ISCHR(file_stat(file
)->st_mode
)) {
776 if (lseek(file
->fd
, 0, SEEK_SET
) < 0)
778 file
->phys_size
= (hed_uoff_t
)OFF_MAX
+ 1;
784 file
->size
+= file
->phys_size
- oldsize
;
789 do_file_open(struct hed_file
*file
)
791 file
->fd
= open(file
->name
, O_RDONLY
);
795 fprintf(stderr
, "Warning: File %s does not exist\n",
797 memset(file_stat(file
), 0, sizeof(struct stat
));
799 } else if (hed_file_update_size(file
)) {
808 file_setup_blocks(struct hed_file
*file
)
810 hed_uoff_t phys_size
= file
->phys_size
;
811 struct file_block
*block
;
813 block
= &file
->eof_block
;
814 block
->flags
= HED_BLOCK_EXCACHE
| HED_BLOCK_VIRTUAL
| HED_BLOCK_EOF
;
815 INIT_LIST_HEAD(&block
->lru
);
816 INIT_LIST_HEAD(&block
->refs
);
817 block
->t
.size
= OFF_MAX
- phys_size
+ 1;
818 block
->phys_pos
= phys_size
;
820 init_block_list(hed_file_blocks(file
), block
);
823 block
= new_virt_block(file
, 0, phys_size
, 0);
826 recalc_chain_block_before(hed_file_blocks(file
), block
,
836 return file_access_init();
840 hed_open(const char *name
)
842 struct hed_file
*file
;
844 if (! (file
= file_swp_init(name
)) )
849 file
->cache
= cache_init(CACHE_LENGTH
, file_swp(file
));
852 INIT_LIST_HEAD(&file
->lru
);
854 if (do_file_open(file
))
857 if (file_setup_blocks(file
))
860 fixup_register(file
);
871 hed_close(struct hed_file
*file
)
875 /* Do not check for errors:
876 * 1. This FD is read-only => no data loss possbile
877 * 2. We're about to exit anyway => no resource leak
882 fixup_deregister(file
);
884 /* No need to free file blocks here, because all data is
885 * allocated either from the cache or from the swap file
886 * and both is going to be destroyed now.
890 cache_done(file
->cache
);
895 /* Adjust a cursor after off gets outside its block */
897 fixup_cursor_slow(hed_cursor_t
*curs
)
899 struct file_block
*block
= curs
->block
;
900 hed_uoff_t off
= curs
->off
;
903 if ((hed_off_t
)off
< 0) {
904 block
= prev_block(block
);
905 off
+= block
->t
.size
;
907 off
-= block
->t
.size
;
908 block
= next_block(block
);
910 } while (off
>= block
->t
.size
);
914 list_move(&curs
->list
, &block
->refs
);
917 /* Adjust a cursor if off gets outside its block.
918 * This is separate from fixup_cursor_slow, because it is supposed
919 * to be small enough to be inlined (which is a win, because most of
920 * the time no fixup has to be done, so the fast inlined path is used).
923 fixup_cursor(hed_cursor_t
*curs
)
925 if (curs
->off
>= curs
->block
->t
.size
)
926 fixup_cursor_slow(curs
);
930 hed_move_relative(hed_cursor_t
*curs
, hed_off_t num
)
932 hed_off_t newpos
= curs
->pos
+ num
;
934 newpos
= num
< 0 ? 0 : OFF_MAX
;
935 num
= newpos
- curs
->pos
;
943 /* Relative move with no checking (and only by a small amount) */
945 move_rel_fast(hed_cursor_t
*curs
, ssize_t num
)
953 alloc_caches(struct hed_file
*file
, struct hed_block_data
**caches
, int n
)
955 struct remap_control rc
;
958 BDEBUG("Allocate %d caches (%d free slots available)\n",
959 n
, file
->cache
->nfree
);
961 assert(n
<= CACHE_LENGTH
);
962 while (file
->cache
->nfree
< n
) {
963 struct file_block
*block
;
965 assert(!list_empty(&file
->lru
));
966 block
= list_entry(file
->lru
.next
, struct file_block
, lru
);
967 BDEBUG("Killing block at physical %llx\n", block
->phys_pos
);
968 file_kill_block(file
, block
);
971 for (i
= 0; i
< n
; ++i
) {
972 caches
[i
] = cache_alloc(file
->cache
);
977 remap_compact(&rc
, file
->cache
, caches
, n
);
978 for (i
= 0; i
< n
; ++i
)
979 remap_add(&rc
, caches
[i
]->data
);
984 free_caches(struct hed_file
*file
, struct hed_block_data
**preload
, int n
)
988 for (i
= 0; i
< n
; ++i
)
990 cache_put(file
->cache
, preload
[i
]);
994 file_load_data(struct hed_file
*file
,
995 struct hed_block_data
**caches
, int n
,
998 struct hed_block_data
*dataobj
= caches
[0];
999 void *data
= dataobj
->data
;
1000 ssize_t rsize
, total
, segsize
;
1002 segsize
= n
<< FILE_BLOCK_SHIFT
;
1003 for (total
= 0; total
< segsize
; total
+= rsize
) {
1004 rsize
= pread(file
->fd
, data
+ total
,
1005 segsize
- total
, offset
+ total
);
1007 memset(data
+ total
, 0, segsize
- total
);
1011 dataobj
= caches
[total
>> FILE_BLOCK_SHIFT
];
1012 caches
[total
>> FILE_BLOCK_SHIFT
] = NULL
;
1013 data
= dataobj
->data
;
1014 cache_put(file
->cache
, dataobj
);
1015 total
= FILE_BLOCK_ROUND(total
);
1016 rsize
= FILE_BLOCK_SIZE
;
1017 BDEBUG("Error reading block at phys %llx: %s\n",
1018 offset
+ total
, strerror(errno
));
1022 BDEBUG("Loaded data at phys %llx up to %llx\n",
1023 offset
, offset
+ segsize
);
1027 #ifdef HED_CONFIG_MMAP
1030 file_load_data_mmap(struct hed_file
*file
,
1031 struct hed_block_data
**caches
, int n
,
1038 segsize
= n
<< FILE_BLOCK_SHIFT
;
1039 data
= mmap(NULL
, segsize
,
1040 PROT_READ
| PROT_WRITE
,
1041 MAP_PRIVATE
| (file
->fd
< 0 ? MAP_ANONYMOUS
: 0),
1044 if (data
== MAP_FAILED
) {
1045 BDEBUG("mmap failed at %llx: fail over to traditional read\n",
1048 data
= mmap(NULL
, segsize
,
1049 PROT_READ
| PROT_WRITE
,
1050 MAP_PRIVATE
| MAP_ANONYMOUS
,
1052 if (data
== MAP_FAILED
)
1055 for (i
= 0; i
< n
; ++i
)
1056 caches
[i
]->data
= data
+ (i
<< FILE_BLOCK_SHIFT
);
1057 return file_load_data(file
, caches
, n
, offset
);
1060 for (i
= 0; i
< n
; ++i
)
1061 caches
[i
]->data
= data
+ (i
<< FILE_BLOCK_SHIFT
);
1063 BDEBUG("Loaded data at phys %llx up to %llx\n",
1064 offset
, offset
+ segsize
);
1067 # define file_load_data file_load_data_mmap
1071 /* Find the block with the lowest physical position that intersects
1072 * the loaded segment. The search starts at @block.
1074 static struct hed_block
*
1075 first_load_block(struct hed_tree
*tree
, struct hed_block
*block
,
1076 hed_uoff_t segstart
)
1078 struct hed_block
*prev
= block
;
1081 prev
= prev_block(prev
);
1082 } while (!hed_block_is_eof(prev
) && phys_end(prev
) > segstart
);
1087 load_blocks(struct hed_file
*file
, const hed_cursor_t
*from
)
1089 hed_uoff_t physpos
, segstart
;
1090 struct hed_block_data
*preload
[FILE_READAHEAD
];
1091 size_t ra_bkw
, ra_fwd
, ra_off
;
1095 segstart
= hed_cursor_phys_pos(from
);
1096 ra_bkw
= FILE_BLOCK_OFF(segstart
);
1097 ra_fwd
= FILE_BLOCK_SIZE
- ra_bkw
;
1099 if (file_ra_forward(file
))
1100 ra_fwd
+= (FILE_READAHEAD
- 1) << FILE_BLOCK_SHIFT
;
1101 else if (file_ra_backward(file
))
1102 ra_bkw
+= (FILE_READAHEAD
- 1) << FILE_BLOCK_SHIFT
;
1104 if (ra_bkw
> segstart
)
1106 if (ra_fwd
> file
->phys_size
- segstart
)
1107 ra_fwd
= file
->phys_size
- segstart
;
1111 pos
.block
= first_load_block(hed_file_blocks(file
),
1112 from
->block
, segstart
);
1113 pos
.off
= segstart
>= pos
.block
->phys_pos
1114 ? segstart
- pos
.block
->phys_pos
1117 list_add(&pos
.list
, &pos
.block
->refs
);
1118 nblocks
= ((ra_fwd
- 1) >> FILE_BLOCK_SHIFT
) + 1;
1119 alloc_caches(file
, preload
, nblocks
);
1122 if (file_load_data(file
, preload
, nblocks
, segstart
)) {
1123 free_caches(file
, preload
, nblocks
);
1127 while (physpos
= hed_cursor_phys_pos(&pos
),
1128 ra_off
= physpos
- segstart
,
1130 struct hed_block_data
*dataobj
;
1131 struct hed_block
*newblock
;
1134 if (!hed_block_is_virtual(pos
.block
)) {
1135 pos
.block
= next_block(pos
.block
);
1140 datalen
= FILE_BLOCK_SIZE
- FILE_BLOCK_OFF(physpos
);
1141 if (datalen
> hed_block_size(pos
.block
) - pos
.off
)
1142 datalen
= hed_block_size(pos
.block
) - pos
.off
;
1144 dataobj
= preload
[ra_off
>> FILE_BLOCK_SHIFT
];
1146 ? new_data_block(file
, physpos
, datalen
, dataobj
)
1147 : new_virt_block(file
, physpos
, datalen
,
1150 /* Punch the new block */
1151 BDEBUG("Add %s block at %llx, length %llx\n",
1152 hed_block_is_virtual(newblock
) ? "error" : "physical",
1153 newblock
->phys_pos
, newblock
->t
.size
);
1154 if (replace_chunk(file
, pos
.block
, pos
.off
, newblock
)) {
1155 file_free_block(file
, newblock
);
1156 free_caches(file
, preload
, nblocks
);
1160 pos
.block
= next_block(newblock
);
1164 /* All cache objects now have an extra reference from the
1165 * allocation. Drop it. */
1166 free_caches(file
, preload
, nblocks
);
1172 /* Shorten a block at beginning and enlarge the preceding block.
1174 * Re-allocate at most @len bytes from the beginning of @block to the
1175 * end of the preceding block.
1176 * If @block is virtual, this will effectively devirtualize the range.
1177 * If @block is not virtual, this will change the backing store of
1178 * the bytes in the range.
1179 * Returns: the number of bytes actually moved.
1182 shrink_at_begin(struct hed_file
*file
, struct file_block
*block
,
1183 size_t len
, long state
)
1185 struct file_block
*prev
= prev_block(block
);
1188 /* Basic assumptions */
1189 assert(!(state
& HED_BLOCK_VIRTUAL
));
1191 /* The previous block must exist: */
1192 if (hed_block_is_eof(prev
))
1195 /* The block flags must match the requested @state: */
1196 if ((prev
->flags
& HED_BLOCK_STATEMASK
) != state
)
1199 /* No deletions at end, or similar: */
1200 if (prev
->phys_pos
+ prev
->t
.size
!= block
->phys_pos
)
1203 /* Append less bytes than requested if not all are available */
1204 assert(prev
->t
.size
<= prev
->dataobj
->size
- prev
->dataoff
);
1205 maxgrow
= prev
->dataobj
->size
- prev
->dataoff
- prev
->t
.size
;
1211 BDEBUG("Appending 0:%lx to the previous block\n", len
);
1213 /* Move cursors away from the to-be-chopped beginning */
1214 move_cursors(block
, prev
, 0, len
- 1, prev
->t
.size
);
1216 /* Enlarge the previous block */
1217 prev
->t
.size
+= len
;
1218 recalc_block_recursive(prev
);
1220 /* Shorten the original block */
1221 block
->t
.size
-= len
;
1222 block
->dataoff
+= len
;
1223 block
->phys_pos
+= len
;
1224 recalc_block_recursive(block
);
1228 /* Shorten a block at end and enlarge the following block.
1230 * Re-allocate at most @len bytes from the end of @block to the
1231 * beginning of the following block.
1232 * If @block is virtual, this will effectively devirtualize the range.
1233 * If @block is not virtual, this will change the backing store of
1234 * the bytes in the range.
1235 * Returns: the number of bytes actually moved.
1238 shrink_at_end(struct hed_file
*file
, struct file_block
*block
,
1239 size_t len
, long state
)
1241 struct file_block
*next
= next_block(block
);
1244 /* Basic assumptions */
1245 assert(!(state
& HED_BLOCK_VIRTUAL
));
1247 /* The next block must exist: */
1248 if (hed_block_is_eof(block
))
1251 /* The block flags must match the requested @state: */
1252 if ((next
->flags
& HED_BLOCK_STATEMASK
) != state
)
1255 /* No deletions at end, or similar: */
1256 if (block
->phys_pos
+ block
->t
.size
!= next
->phys_pos
)
1259 /* Prepend less bytes than requested if not all are available */
1260 if (len
> next
->dataoff
)
1261 len
= next
->dataoff
;
1264 off
= block
->t
.size
- len
;
1266 BDEBUG("Prepending %llx:%lx to the next block\n", off
, len
);
1268 /* Shift cursors in the new physical block */
1269 update_cursors(next
, next
, len
);
1271 /* Move cursors away from the to-be-chopped end */
1272 move_cursors(block
, next
, off
, UOFF_MAX
, -off
);
1274 /* Enlarge the next block */
1275 next
->dataoff
-= len
;
1276 next
->phys_pos
-= len
;
1277 next
->t
.size
+= len
;
1278 recalc_block_recursive(next
);
1280 /* Shorten the original block */
1281 block
->t
.size
-= len
;
1282 recalc_block_recursive(block
);
1286 /* Search for an existing data object within the same physical block
1287 * as @curs, and having the given @state flags.
1289 static struct hed_block_data
*
1290 search_data(struct hed_file
*file
, const hed_cursor_t
*curs
, long state
)
1292 struct file_block
*block
;
1295 physpos
= FILE_BLOCK_ROUND(curs
->block
->phys_pos
+ curs
->off
);
1296 BDEBUG("Search for already loaded data at %llx starting at %llx...",
1297 physpos
, curs
->block
->phys_pos
);
1299 /* Search backwards */
1300 block
= curs
->block
;
1301 while (!hed_block_is_eof(block
= prev_block(block
))) {
1302 if (block
->phys_pos
< physpos
)
1304 if ((block
->flags
& HED_BLOCK_STATEMASK
) == state
) {
1305 BDEBUG(" found at %llx\n", block
->phys_pos
);
1306 assert(block
->dataobj
);
1307 return block
->dataobj
;
1311 /* Search forwards */
1312 block
= curs
->block
;
1313 while (!hed_block_is_eof(block
)) {
1314 block
= next_block(block
);
1315 if (block
->phys_pos
>= physpos
+ FILE_BLOCK_SIZE
)
1317 if ((block
->flags
& HED_BLOCK_STATEMASK
) == state
) {
1318 BDEBUG(" found at %llx\n", block
->phys_pos
);
1319 assert(block
->dataobj
);
1320 return block
->dataobj
;
1324 BDEBUG(" not found\n");
1329 reuse_loaded_data(struct hed_file
*file
, const hed_cursor_t
*curs
,
1330 struct hed_block_data
*data
)
1332 struct file_block
*physblock
;
1333 struct file_block
*block
= curs
->block
;
1334 hed_uoff_t block_offset
= curs
->off
;
1335 hed_uoff_t physpos
= block
->phys_pos
+ block_offset
;
1336 size_t part
= FILE_BLOCK_OFF(physpos
);
1338 FILE_BLOCK_SIZE
- part
<= block
->t
.size
- block_offset
1339 ? FILE_BLOCK_SIZE
- part
1340 : block
->t
.size
- block_offset
;
1342 if (part
> block_offset
)
1343 part
= block_offset
;
1346 block_offset
-= part
;
1348 if (! (physblock
= new_data_block(file
, physpos
, len
, data
)) )
1351 BDEBUG("Add physical block at %llx, length %llx\n",
1352 physblock
->phys_pos
, physblock
->t
.size
);
1353 if (replace_chunk(file
, block
, block_offset
, physblock
)) {
1354 file_free_block(file
, physblock
);
1362 /* Replace a part of a virtual block with content loaded
1363 * from disk. The amount of data loaded from the disk depends
1364 * on various factors with the goal to choose the most efficient
1365 * ratio. The only guarantee is that the byte at @curs will
1366 * be in a non-virtual block when this function returns 0.
1369 devirtualize_clean(struct hed_file
*file
, const hed_cursor_t
*curs
)
1371 struct file_block
*block
= curs
->block
;
1372 hed_uoff_t block_offset
= curs
->off
;
1373 hed_uoff_t remain
= block
->t
.size
- block_offset
;
1374 struct hed_block_data
*data
;
1376 BDEBUG("punch a clean hole at %llx into %llx:%llx\n", block_offset
,
1377 block_offset(block
), block
->t
.size
);
1378 assert(hed_block_is_virtual(block
));
1380 /* Check if we can combine with a neighbouring block */
1381 if (shrink_at_begin(file
, block
,
1382 hed_block_size(block
), 0) > block_offset
1383 || shrink_at_end(file
, block
,
1384 hed_block_size(block
), 0) >= remain
) {
1385 kill_block_if_empty(file
, block
);
1390 /* Check if the block is already loaded elsewhere */
1391 data
= search_data(file
, curs
, 0);
1393 ? reuse_loaded_data(file
, curs
, data
)
1394 : load_blocks(file
, curs
);
1397 /* Unles the block at @curs is already dirty, replace at most
1398 * @len bytes at @curs with a newly allocated out-of-cache block.
1399 * The block is marked dirty and its data is left uninitialized.
1400 * Note that this function may devirtualize less than @len bytes.
1401 * In the worst case only 1 byte at @curs will be available.
1404 prepare_modify(struct hed_file
*file
, hed_cursor_t
*curs
, size_t len
)
1406 struct file_block
*block
= curs
->block
;
1407 hed_uoff_t block_offset
= curs
->off
;
1408 hed_uoff_t remain
= block
->t
.size
- block_offset
;
1409 struct file_block
*newblock
;
1411 if (hed_block_is_dirty(block
))
1417 BDEBUG("punch a dirty hole at %llx:%lx into %llx:%llx\n",
1419 block_offset(block
), block
->t
.size
);
1421 /* Check if we can combine with a neighbouring block */
1422 if ((block_offset
== 0 &&
1423 shrink_at_begin(file
, block
, len
, HED_BLOCK_DIRTY
)) ||
1425 shrink_at_end(file
, block
, len
, HED_BLOCK_DIRTY
) >= len
)) {
1426 kill_block_if_empty(file
, block
);
1431 /* Initialize a new block */
1432 newblock
= new_block(file
, HED_BLOCK_EXCACHE
| HED_BLOCK_DIRTY
);
1437 if ( (newblock
->dataobj
= search_data(file
, curs
, HED_BLOCK_DIRTY
)) )
1438 cache_get(newblock
->dataobj
);
1439 else if (! (newblock
->dataobj
= block_data_new(file
->cache
,
1443 newblock
->phys_pos
= block
->phys_pos
+ block_offset
;
1444 newblock
->dataoff
= FILE_BLOCK_OFF(newblock
->phys_pos
);
1445 if (len
> FILE_BLOCK_SIZE
- newblock
->dataoff
)
1446 len
= FILE_BLOCK_SIZE
- newblock
->dataoff
;
1447 newblock
->t
.size
= len
;
1449 if (replace_chunk(file
, block
, block_offset
, newblock
))
1456 file_free_block(file
, newblock
);
1461 /* Ensure that @curs points to an up-to-date non-virtual block.
1462 * Load the data from disk if necessary, return zero on failure. */
1464 hed_prepare_read(struct hed_file
*file
, const hed_cursor_t
*curs
, size_t len
)
1466 struct file_block
*block
= curs
->block
;
1467 if (hed_block_is_inner_virtual(block
) &&
1468 devirtualize_clean(file
, curs
) < 0)
1471 return hed_cursor_chunk_len(curs
, len
);
1474 /* Move the block pointer to the next block */
1475 static struct hed_block
*
1476 cursor_next_block(hed_cursor_t
*curs
)
1478 struct hed_block
*block
= next_nonzero_block(curs
->block
);
1481 curs
->block
= block
;
1483 list_move(&curs
->list
, &block
->refs
);
1488 /* This is an optimized way of doing:
1490 * hed_move_relative(curs, curs->block->t.size);
1492 * for the case when curs->off == 0.
1494 static struct hed_block
*
1495 move_to_next(hed_cursor_t
*curs
)
1497 curs
->pos
+= hed_block_size(curs
->block
);
1498 return cursor_next_block(curs
);
1501 /* Copy in @count bytes from @pos.
1502 * Returns the number of bytes that were not read (i.e. zero on success).
1503 * The @pos cursor is moved by the amount of data read.
1504 * CAUTION: If you read up to MAX_OFF, then @pos points one byte
1505 * beyond the EOF block upon return.
1508 copy_in(struct hed_file
*file
, void *buf
, size_t count
, hed_cursor_t
*pos
)
1513 while (count
&& (cpylen
= hed_prepare_read(file
, pos
, count
))) {
1514 if (hed_block_is_virtual(pos
->block
))
1515 memset(buf
, 0, cpylen
);
1517 memcpy(buf
, hed_cursor_data(pos
), cpylen
);
1521 if ( (pos
->off
+= cpylen
) >= hed_block_size(pos
->block
) )
1522 if (!cursor_next_block(pos
))
1530 hed_file_cpin(struct hed_file
*file
, void *buf
, size_t count
,
1531 const hed_cursor_t
*pos
)
1536 hed_dup_cursor(pos
, &mypos
);
1537 ret
= copy_in(file
, buf
, count
, &mypos
);
1542 /* Set the modified flag */
1544 set_modified(struct hed_file
*file
)
1546 file
->modified
= true;
1549 /* Clear the modified flag */
1551 clear_modified(struct hed_file
*file
)
1553 file
->modified
= false;
1557 hed_file_set_byte(struct hed_file
*file
, hed_cursor_t
*curs
,
1560 hed_uoff_t offset
= curs
->pos
;
1562 if (prepare_modify(file
, curs
, 1))
1566 if (offset
>= file
->size
)
1567 file
->size
= offset
+ 1;
1569 hed_block_data(curs
->block
)[curs
->off
] = byte
;
1574 hed_file_set_block(struct hed_file
*file
, hed_cursor_t
*curs
,
1575 unsigned char *buf
, size_t len
)
1580 if (prepare_modify(file
, curs
, len
))
1584 span
= hed_cursor_chunk_len(curs
, len
);
1585 memcpy(hed_cursor_data(curs
), buf
, span
);
1588 move_rel_fast(curs
, span
);
1590 if (curs
->pos
> file
->size
)
1591 file
->size
= curs
->pos
;
1597 hed_file_set_bytes(struct hed_file
*file
, hed_cursor_t
*curs
,
1598 unsigned char byte
, hed_uoff_t rep
)
1603 if (prepare_modify(file
, curs
, rep
))
1607 span
= hed_cursor_chunk_len(curs
, rep
);
1608 memset(hed_cursor_data(curs
), byte
, span
);
1610 move_rel_fast(curs
, span
);
1612 if (curs
->pos
> file
->size
)
1613 file
->size
= curs
->pos
;
1619 file_erase_continuous(struct hed_file
*file
, hed_cursor_t
*curs
, size_t len
)
1621 struct file_block
*block
= curs
->block
;
1622 hed_uoff_t block_offset
= curs
->off
;
1624 /* Find the new position */
1625 hed_move_relative(curs
, len
);
1627 /* Move all other cursors in the erased range to the new position */
1629 move_cursors_abs(block
, block_offset
,
1630 block_offset
+ len
- 1, curs
);
1632 if (!block_offset
) {
1633 block
->dataoff
+= len
;
1634 if (!hed_block_is_inserted(block
))
1635 block
->phys_pos
+= len
;
1636 } else if (block_offset
+ len
< block
->t
.size
) {
1637 block
= split_block(file
, block
, block_offset
+ len
);
1642 move_cursors(block
, block
, block_offset
, UOFF_MAX
, -(hed_uoff_t
)len
);
1644 block
->t
.size
-= len
;
1645 recalc_block_recursive(block
);
1647 kill_block_if_empty(file
, block
);
1652 hed_file_erase_block(struct hed_file
*file
, hed_cursor_t
*curs
, hed_uoff_t len
)
1658 if (offset
> hed_file_size(file
))
1660 else if (len
> hed_file_size(file
) - offset
)
1661 len
= hed_file_size(file
) - offset
;
1665 size_t span
= hed_cursor_chunk_len(curs
, todo
);
1667 if (file_erase_continuous(file
, curs
, span
))
1677 file
->eof_block
.t
.size
+= len
;
1678 recalc_block_recursive(&file
->eof_block
);
1680 struct hed_block
*slideblock
= prev_block(curs
->block
);
1681 if (hed_block_is_eof(slideblock
))
1682 slideblock
= curs
->block
;
1683 slide_cursors(file
, slideblock
, curs
->pos
, -len
);
1689 hed_file_insert_begin(struct hed_file
*file
, const hed_cursor_t
*curs
,
1690 hed_cursor_t
*curs_ins
)
1692 struct file_block
*newblock
;
1694 BDEBUG("Starting insert at %llx\n", curs
->pos
);
1696 newblock
= new_block(file
,
1697 HED_BLOCK_EXCACHE
| HED_BLOCK_INSERTED
);
1701 newblock
->phys_pos
= hed_cursor_phys_pos(curs
);
1702 newblock
->dataobj
= block_data_new(file
->cache
, FILE_BLOCK_SIZE
);
1703 if (!newblock
->dataobj
) {
1704 file_free_block(file
, newblock
);
1708 if (curs
->off
&& !split_block(file
, curs
->block
, curs
->off
)) {
1709 file_free_block(file
, newblock
);
1713 chain_block_before(hed_file_blocks(file
), newblock
, curs
->block
);
1715 curs_ins
->pos
= curs
->pos
;
1716 curs_ins
->off
= newblock
->t
.size
;
1717 curs_ins
->block
= newblock
;
1718 list_add(&curs_ins
->list
, &newblock
->refs
);
1725 hed_file_insert_end(struct hed_file
*file
, hed_cursor_t
*curs_ins
)
1727 struct file_block
*block
= curs_ins
->block
;
1729 BDEBUG("End insert at %llx\n", curs_ins
->pos
);
1731 curs_ins
->block
= NULL
;
1732 list_del(&curs_ins
->list
);
1733 if (!kill_block_if_empty(file
, block
))
1734 block_data_shrink(file
->cache
, block
->dataobj
,
1735 block
->dataoff
+ block
->t
.size
);
1741 insert_block(struct hed_file
*file
, hed_cursor_t
*curs
,
1742 unsigned char *buf
, size_t len
)
1744 struct file_block
*block
= curs
->block
;
1745 hed_uoff_t offset
= curs
->pos
;
1747 assert(file
&& offset
>= 0);
1749 assert(hed_block_is_excache(block
));
1750 hed_block_set_dirty(block
);
1753 memcpy(hed_block_data(block
) + curs
->off
, buf
, len
);
1754 block
->t
.size
+= len
;
1755 recalc_block_recursive(block
);
1759 if (curs
->pos
> file
->size
)
1760 file
->size
= curs
->pos
;
1764 slide_cursors(file
, next_block(block
), offset
, len
);
1768 hed_file_insert_block(struct hed_file
*file
, hed_cursor_t
*curs
,
1769 unsigned char *buf
, size_t len
)
1772 struct file_block
*block
= curs
->block
;
1773 size_t remain
= block
->dataobj
->size
- curs
->off
;
1776 list_del(&curs
->list
);
1777 curs
->block
= next_block(block
);
1780 if (!hed_file_insert_begin(file
, curs
, curs
))
1783 curs
->block
= block
;
1784 curs
->off
= block
->t
.size
;
1785 list_add(&curs
->list
, &block
->refs
);
1792 BDEBUG("Append %ld bytes to the insert block\n",
1794 insert_block(file
, curs
, buf
, remain
);
1802 hed_file_insert_byte(struct hed_file
*file
, hed_cursor_t
*curs
,
1805 return hed_file_insert_block(file
, curs
, &byte
, 1);
1809 hed_file_insert_once(struct hed_file
*file
, hed_cursor_t
*curs
,
1810 unsigned char *buf
, size_t len
)
1812 hed_cursor_t insert
;
1814 if (!hed_file_insert_begin(file
, curs
, &insert
)) {
1815 len
= hed_file_insert_block(file
, &insert
, buf
, len
);
1816 hed_file_insert_end(file
, &insert
);
1821 struct commit_control
{
1822 struct hed_file
*file
;
1823 int wfd
; /* file descriptor for writing */
1824 int needwrite
; /* non-zero if write is needed */
1825 hed_cursor_t begoff
, endoff
;
1827 void *partbuf
; /* allocated 3*FILE_BLOCK_SIZE */
1828 void *partial
; /* pointer into partbuf */
1831 /* Get the logical<->physical shift value after the specified block.
1832 * It is essential to get the value _AFTER_ the block, because the
1833 * shift value is used to decide how the current block will be handled.
1836 get_shift(const hed_cursor_t
*curs
)
1838 struct file_block
*block
= curs
->block
;
1839 size_t curshift
= hed_block_is_inserted(block
) ? block
->t
.size
: 0;
1840 return curshift
+ curs
->pos
- curs
->off
- block
->phys_pos
;
1844 switch_partial(struct commit_control
*cc
)
1846 cc
->partial
+= FILE_BLOCK_SIZE
;
1847 if (cc
->partial
>= cc
->partbuf
+ 3*FILE_BLOCK_SIZE
)
1848 cc
->partial
= cc
->partbuf
;
1851 /* Write @writelen bytes from the partial buffer at @cc->begoff. */
1853 commit_block(struct commit_control
*cc
, size_t len
)
1858 BDEBUG(" -> write %lx bytes at %llx\n",
1859 (unsigned long)len
, cc
->begoff
.pos
- len
);
1860 written
= pwrite(cc
->wfd
, cc
->partial
, len
, cc
->begoff
.pos
- len
);
1862 /* TODO: keep data in a new list of dirty blocks */
1868 commit_partial(struct commit_control
*cc
)
1870 size_t partoff
, remain
, left
;
1873 partoff
= FILE_BLOCK_OFF(cc
->begoff
.pos
);
1874 remain
= FILE_BLOCK_SIZE
- partoff
;
1875 if (remain
> cc
->endoff
.pos
- cc
->begoff
.pos
)
1876 remain
= cc
->endoff
.pos
- cc
->begoff
.pos
;
1877 if ((writelen
= partoff
+ remain
) == 0)
1880 BDEBUG("Fill partial %llx-%llx\n",
1881 cc
->begoff
.pos
, cc
->begoff
.pos
+ remain
);
1883 left
= copy_in(cc
->file
, cc
->partial
+ partoff
, remain
, &cc
->begoff
);
1885 hed_move_relative(&cc
->begoff
, left
);
1889 if (FILE_BLOCK_OFF(cc
->begoff
.pos
) &&
1890 !hed_block_is_eof(cc
->begoff
.block
))
1893 return commit_block(cc
, writelen
);
1897 * Beware, cc->begoff is undefined upon return!
1900 commit_forwards(struct commit_control
*cc
)
1902 hed_uoff_t endpos
= cc
->endoff
.pos
;
1905 BDEBUG("Writing forwards %llx-%llx\n",
1906 cc
->begoff
.pos
, cc
->endoff
.pos
);
1911 while (cc
->begoff
.pos
< endpos
)
1912 ret
|= commit_partial(cc
);
1918 * Beware, cc->begoff is undefined upon return!
1921 commit_backwards(struct commit_control
*cc
)
1923 void *retpartial
= cc
->partial
;
1924 hed_uoff_t begpos
= cc
->begoff
.pos
;
1925 hed_uoff_t blkpos
; /* start of current partial block */
1928 BDEBUG("Writing backwards %llx-%llx\n",
1929 cc
->begoff
.pos
, cc
->endoff
.pos
);
1934 blkpos
= FILE_BLOCK_ROUND(cc
->endoff
.pos
);
1935 if (blkpos
<= begpos
)
1938 /* Handle the trailing partial block */
1939 hed_update_cursor(cc
->file
, blkpos
, &cc
->begoff
);
1941 ret
|= commit_partial(cc
);
1942 retpartial
= cc
->partial
;
1944 /* Handle the middle part */
1946 while ( (blkpos
-= FILE_BLOCK_SIZE
) > begpos
) {
1947 hed_update_cursor(cc
->file
, blkpos
, &cc
->begoff
);
1948 ret
|= commit_partial(cc
);
1950 switch_partial(cc
); /* wrap around */
1953 /* Handle the first block (partiall or not) */
1954 hed_update_cursor(cc
->file
, begpos
, &cc
->begoff
);
1955 ret
|= commit_partial(cc
);
1957 cc
->partial
= retpartial
;
1961 /* Handle the partial block before a skipped one. */
1963 begin_skip(struct commit_control
*cc
)
1965 size_t minsize
= FILE_BLOCK_SIZE
- FILE_BLOCK_OFF(cc
->endoff
.pos
);
1969 /* Check if at least one complete physical block can be skipped */
1970 if (cc
->endoff
.block
->t
.size
< minsize
)
1973 /* Write out the partially dirty block */
1974 remain
= FILE_BLOCK_OFF(minsize
);
1975 hed_move_relative(&cc
->endoff
, remain
);
1977 ret
|= commit_forwards(cc
);
1979 ret
|= commit_backwards(cc
);
1980 hed_move_relative(&cc
->endoff
, -(hed_off_t
)remain
);
1981 hed_dup2_cursor(&cc
->endoff
, &cc
->begoff
);
1987 /* Handle the last partially skipped physical block. */
1989 end_skip(struct commit_control
*cc
)
1994 /* Find the beginning of the physical block */
1995 hed_dup2_cursor(&cc
->endoff
, &cc
->begoff
);
1996 partlen
= FILE_BLOCK_OFF(cc
->begoff
.pos
);
1997 hed_move_relative(&cc
->begoff
, -(hed_off_t
)partlen
);
1999 /* Read the partial data before this block */
2000 if (hed_file_cpin(cc
->file
, cc
->partial
, partlen
, &cc
->begoff
))
2008 undirty_blocks(struct hed_file
*file
)
2010 struct file_block
*block
, *next
;
2013 BDEBUG("Undirtying blocks:\n");
2016 next
= first_block(file
);
2017 while (!hed_block_is_eof(next
)) {
2019 next
= next_block(block
);
2021 if (kill_block_if_empty(file
, block
))
2024 if (!hed_block_is_virtual(block
)) {
2025 cache_put(file
->cache
, block
->dataobj
);
2026 block
->dataobj
= NULL
;
2027 list_del_init(&block
->lru
);
2028 block
->flags
= HED_BLOCK_EXCACHE
| HED_BLOCK_VIRTUAL
;
2031 block
->phys_pos
= pos
;
2032 pos
+= block
->t
.size
;
2035 block
= first_block(file
);
2036 while (!hed_block_is_eof(block
)) {
2037 next
= next_block(block
);
2038 file_kill_block(file
, block
);
2042 BDEBUG("After undirtying\n");
2047 commit_init(struct commit_control
*cc
, struct hed_file
*file
)
2051 cc
->partbuf
= malloc(3*FILE_BLOCK_SIZE
);
2055 cc
->wfd
= open(file
->name
,
2056 O_RDWR
| (file
->fd
< 0 ? O_CREAT
: 0), 0666);
2061 (file
->fd
= open(file
->name
, O_RDONLY
)) < 0)
2075 hed_file_commit(struct hed_file
*file
)
2077 struct commit_control cc
;
2080 if (commit_init(&cc
, file
))
2085 cc
.partial
= cc
.partbuf
;
2086 get_cursor(file
, 0,&cc
.begoff
);
2087 hed_dup_cursor(&cc
.begoff
, &cc
.endoff
);
2088 cc
.shift
= -cc
.begoff
.block
->phys_pos
;
2090 while(!hed_block_is_eof(cc
.endoff
.block
)) {
2091 hed_off_t newshift
= cc
.endoff
.pos
< file
->phys_size
2092 ? get_shift(&cc
.endoff
)
2095 if (cc
.shift
<= 0 && newshift
> 0) {
2096 ret
|= commit_forwards(&cc
);
2097 hed_dup2_cursor(&cc
.endoff
, &cc
.begoff
);
2098 } else if (cc
.shift
> 0 && newshift
<= 0) {
2099 ret
|= commit_backwards(&cc
);
2100 hed_dup2_cursor(&cc
.endoff
, &cc
.begoff
);
2102 cc
.shift
= newshift
;
2104 if (!newshift
&& !hed_block_is_dirty(cc
.endoff
.block
)) {
2106 ret
|= begin_skip(&cc
);
2107 } else if (!cc
.needwrite
)
2108 ret
|= end_skip(&cc
);
2110 if (!move_to_next(&cc
.endoff
))
2113 assert(cc
.endoff
.pos
== hed_file_size(file
));
2115 if (cc
.begoff
.pos
< hed_file_size(file
)) {
2117 ret
|= commit_forwards(&cc
);
2119 ret
|= commit_backwards(&cc
);
2122 put_cursor(&cc
.begoff
);
2123 put_cursor(&cc
.endoff
);
2125 ftruncate(cc
.wfd
, hed_file_size(file
));
2126 file
->phys_size
= hed_file_size(file
);
2128 ret
|= close(cc
.wfd
);
2131 undirty_blocks(file
);
2134 clear_modified(file
);
2139 #ifdef HED_CONFIG_SWAP
2141 hed_file_write_swap(struct hed_file
*file
)
2143 return swp_write(file_swp(file
));
2147 do_read_swap(struct hed_file
*file
, struct swp_file
*swp
, hed_cursor_t
*pos
)
2149 struct hed_file
*swpfile
= swp_private(swp
);
2150 struct file_block
*cur
, block
;
2151 hed_uoff_t phys_pos
;
2153 if (file_stat(swpfile
)->st_size
!= file_stat(file
)->st_size
||
2154 file_stat(swpfile
)->st_mtime
!= file_stat(file
)->st_mtime
) {
2155 fprintf(stderr
, "stat info mismatch (you modified the file since hed ran on it; refusing to touch it)\n");
2159 BDEBUG("Swap header match\n");
2162 cur
= first_block(swpfile
);
2164 struct hed_block_data dataobj
;
2165 size_t (*mergefn
)(struct hed_file
*, hed_cursor_t
*,
2166 unsigned char*, size_t);
2170 if (swp_cpin(swp
, &block
, cur
, sizeof(struct file_block
))) {
2171 perror("Cannot read block descriptor");
2174 BDEBUG("BLOCK %p: flags %02lx phys 0x%02llx size 0x%llx\n",
2175 cur
, block
.flags
, (long long)block
.phys_pos
,
2176 (long long)hed_block_size(&block
));
2178 if (block
.phys_pos
- phys_pos
) {
2179 if (hed_file_erase_block(file
, pos
,
2180 block
.phys_pos
- phys_pos
)) {
2181 perror("Cannot erase");
2184 phys_pos
= block
.phys_pos
;
2187 if (!hed_block_is_inserted(&block
))
2188 phys_pos
+= hed_block_size(&block
);
2190 if (!hed_block_is_dirty(&block
)) {
2191 hed_move_relative(pos
, hed_block_size(&block
));
2195 if (swp_cpin(swp
, &dataobj
, block
.dataobj
,
2196 sizeof(struct hed_block_data
))) {
2197 perror("Cannot read data descriptor");
2200 BDEBUG("DATA %p: size 0x%lx\n",
2201 block
.dataobj
, (long)dataobj
.size
);
2203 if (! (data
= malloc(hed_block_size(&block
))) ) {
2204 perror("Cannot allocate data");
2208 if (swp_cpin(swp
, data
, dataobj
.data
+ block
.dataoff
,
2209 hed_block_size(&block
))) {
2210 perror("Cannot read data");
2214 mergefn
= hed_block_is_inserted(&block
)
2215 ? hed_file_insert_once
2216 : hed_file_set_block
;
2217 res
= mergefn(file
, pos
, data
, hed_block_size(&block
));
2220 perror("Cannot merge data");
2223 } while (cur
= next_block(&block
), !hed_block_is_eof(&block
));
2230 hed_file_read_swap(struct hed_file
*file
)
2232 struct swp_file
*swp
;
2236 if (! (swp
= swp_init_read(file
->swpname
)) )
2239 get_cursor(file
, 0, &pos
);
2240 ret
= do_read_swap(file
, swp
, &pos
);
2247 #endif /* HED_CONFIG_SWAP */
2249 struct ffb_hookdata
{
2250 struct hed_file
*file
;
2252 hed_expr_reg_cb base_ecb
;
2253 void *base_ecb_data
;
2257 eval_reg_cb(void *hookdata
, char reg
, hed_off_t ofs
,
2258 unsigned char *scramble
, size_t len
)
2260 struct ffb_hookdata
*data
= hookdata
;
2263 long ret
= HED_AEF_DYNAMIC
;
2265 hed_dup_cursor(data
->pos
, &pos
);
2266 hed_move_relative(&pos
, ofs
);
2267 if (copy_in(data
->file
, scramble
, len
, &pos
))
2268 ret
= HED_AEF_ERROR
;
2273 return data
->base_ecb(data
->base_ecb_data
, reg
, ofs
, scramble
, len
);
2277 reverse(unsigned char *p
, size_t len
)
2279 unsigned char *q
= p
+ len
;
2281 unsigned char x
= *p
;
2288 compute_badchar(ssize_t
*badchar
, const unsigned char *s
, ssize_t len
)
2292 badchar
[*s
++] = i
++;
2296 compute_sfx(ssize_t
*sfx
, const unsigned char *s
, ssize_t len
)
2302 for (i
= len
- 2; i
>= 0; --i
) {
2303 if (i
> g
&& sfx
[i
+ len
- 1 - f
] < i
- g
)
2304 sfx
[i
] = sfx
[i
+ len
- 1 - f
];
2309 while (g
>= 0 && s
[g
] == s
[g
+ len
- 1 - f
])
2317 compute_goodsfx(ssize_t
*goodsfx
, const unsigned char *s
, ssize_t len
)
2319 ssize_t i
, j
, *sfx
= goodsfx
+ len
;
2321 compute_sfx(sfx
, s
, len
);
2323 for (i
= 0; i
< len
; ++i
)
2326 for (i
= len
- 1; i
>= 0; --i
)
2327 if (sfx
[i
] == i
+ 1)
2328 for (; j
< len
- 1 - i
; ++j
)
2329 if (goodsfx
[j
] == len
)
2330 goodsfx
[j
] = len
- 1 - i
;
2331 for (i
= 0; i
<= len
- 2; ++i
)
2332 goodsfx
[len
- 1 - sfx
[i
]] = len
- 1 - i
;
2335 /* Search for a constant byte string using the Boyer-Moore algorithm. */
2336 static inline unsigned char*
2337 bm_find(unsigned char *buf
, size_t buflen
, unsigned char *needle
,
2338 size_t maxidx
, ssize_t
*badchar
, ssize_t
*goodsfx
)
2340 while (buflen
> maxidx
) {
2345 for (p
= buf
+ maxidx
, i
= maxidx
; p
>= buf
; --p
, --i
)
2346 if (needle
[i
] != *p
)
2351 shift
= i
+ 1 - badchar
[*p
];
2352 if (shift
< goodsfx
[i
])
2361 /* Search for a constant byte string backwards. */
2362 static inline unsigned char*
2363 bm_find_rev(unsigned char *buf
, size_t buflen
, unsigned char *needle
,
2364 size_t maxidx
, ssize_t
*badchar
, ssize_t
*goodsfx
)
2367 while (buflen
> maxidx
) {
2372 for (p
= buf
, i
= maxidx
; p
<= buf
+ maxidx
; ++p
, --i
)
2373 if (needle
[i
] != *p
)
2375 if (p
> buf
+ maxidx
)
2378 shift
= i
+ 1 - badchar
[*p
];
2379 if (shift
< goodsfx
[i
])
2388 /* Search for a constant byte string in @buf.
2389 * If @buflen is negative, search backwards, otherwise search forwards.
2391 static inline unsigned char*
2392 find_bytestr_buf(unsigned char *buf
, ssize_t buflen
,
2393 unsigned char *needle
, ssize_t maxidx
,
2394 ssize_t
*badchar
, ssize_t
*goodsfx
)
2399 return memrchr(buf
- buflen
+ 1, *needle
, buflen
);
2400 return bm_find_rev(buf
, buflen
, needle
, maxidx
,
2404 return memchr(buf
, *needle
, buflen
);
2405 return bm_find(buf
, buflen
, needle
, maxidx
,
2410 /* Search for a constant byte string using the Boyer-Moore algorithm. */
2412 find_bytestr(struct hed_file
*file
, hed_cursor_t
*from
, int dir
,
2413 unsigned char *needle
, ssize_t len
)
2416 ssize_t
*badchar
, *goodsfx
;
2417 unsigned char *readbuf
;
2422 dynalloc
= calloc(sizeof(ssize_t
) * (256 + 2*len
)
2425 return HED_FINDOFF_ERROR
;
2427 goodsfx
= badchar
+ 256;
2428 readbuf
= dynalloc
+ sizeof(ssize_t
) * (256 + 2*len
);
2431 reverse(needle
, len
);
2432 compute_badchar(badchar
, needle
, len
);
2433 compute_goodsfx(goodsfx
, needle
, len
);
2436 badchar
= goodsfx
= NULL
;
2440 --len
; /* simplify offset computing */
2448 ret
= HED_FINDOFF_NO_MATCH
;
2449 while (from
->pos
>= 0) {
2453 if (hed_block_is_eof(from
->block
))
2456 remain
= hed_prepare_read(file
, from
, SSIZE_MAX
);
2458 ret
= HED_FINDOFF_ERROR
;
2462 remain
= -(from
->off
+ 1);
2464 if (!hed_block_is_bad(from
->block
)) {
2465 unsigned char *p
, *q
;
2467 if ((dir
>= 0 && remain
> slen
) ||
2468 (dir
< 0 && remain
< slen
)) {
2469 assert(!hed_block_is_virtual(from
->block
));
2470 assert(from
->block
->dataobj
);
2471 p
= from
->block
->dataobj
->data
+
2472 from
->block
->dataoff
+ from
->off
;
2473 from
->off
+= remain
;
2474 from
->pos
+= remain
;
2475 } else if (dir
>= 0) {
2477 if (copy_in(file
, readbuf
, remain
, from
)) {
2478 ret
= HED_FINDOFF_ERROR
;
2484 from
->off
+= remain
+ 1;
2485 from
->pos
+= remain
+ 1;
2488 fixup_cursor_slow(from
);
2489 if (copy_in(file
, readbuf
, -remain
, from
)) {
2490 ret
= HED_FINDOFF_ERROR
;
2493 from
->off
-= -remain
+ 1;
2494 from
->pos
-= -remain
+ 1;
2495 p
= readbuf
+ (-remain
- 1);
2498 q
= find_bytestr_buf(p
, remain
, needle
, len
,
2501 move_rel_fast(from
, q
- p
- remain
);
2508 /* bad blocks cannot match anything */
2509 from
->off
+= remain
;
2510 from
->pos
+= remain
;
2520 find_expr(struct hed_file
*file
, hed_cursor_t
*from
, int dir
,
2521 struct hed_expr
*expr
, struct ffb_hookdata
*data
)
2523 int len
= hed_expr_len(expr
);
2528 if (len
> hed_file_size(file
))
2529 return HED_FINDOFF_NO_MATCH
;
2530 if ((hed_off_t
)hed_file_size(file
) - from
->pos
- len
< 0)
2531 hed_move_relative(from
, ((hed_off_t
)hed_file_size(file
) -
2540 buf
= hed_expr_eval(expr
, eval_reg_cb
, NULL
, data
);
2542 return HED_FINDOFF_ERROR
;
2544 hed_dup_cursor(from
, &match
);
2546 for (pos
= 0; pos
< len
; pos
++) {
2548 remain
= hed_prepare_read(file
, &match
,
2551 hed_block_is_bad(match
.block
))
2553 p
= hed_cursor_data(&match
);
2554 cursor_next_block(&match
);
2556 if (*p
++ != buf
[pos
])
2565 return HED_FINDOFF_ERROR
;
2569 if (0 > from
->pos
|| from
->pos
> hed_file_size(file
) - len
)
2573 if (! (hed_expr_flags(expr
) & HED_AEF_DYNAMIC
) )
2574 return find_bytestr(file
, from
, dir
, buf
, len
);
2577 return HED_FINDOFF_NO_MATCH
;
2581 hed_file_find_expr(struct hed_file
*file
, hed_cursor_t
*pos
, int dir
,
2582 struct hed_expr
*expr
,
2583 hed_expr_reg_cb expr_cb
, void *expr_cb_data
)
2585 struct ffb_hookdata data
;
2588 assert(dir
== 1 || dir
== -1);
2592 data
.base_ecb
= expr_cb
;
2593 data
.base_ecb_data
= expr_cb_data
;
2595 hed_file_set_readahead(file
,
2596 dir
> 0 ? HED_RA_FORWARD
: HED_RA_BACKWARD
);
2597 res
= find_expr(file
, pos
, dir
, expr
, &data
);
2598 hed_file_set_readahead(file
, HED_RA_NONE
);