4 * hed - Hexadecimal editor
5 * Copyright (C) 2004 Petr Baudis <pasky@ucw.cz>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 * There hammer on the anvil smote,
23 * There chisel clove, and graver wrote;
24 * There forged was blade, and bound was hilt;
25 * The delver mined, the mason built.
26 * There beryl, pearl, and opal pale,
27 * And metal wrought like fishes' mail,
28 * Buckler and corslet, axe and sword,
29 * And shining spears were laid in hoard.
32 /* Feature macros needed for:
47 #include <sys/ioctl.h>
49 #include <linux/fs.h> /* BLKGETSIZE and BLKGETSIZE64 */
52 #include "file_priv.h"
59 /* memrchr() might not be available */
62 #endif /* HAVE_MEMRCHR */
65 * `Piles of jewels?' said Gandalf. `No. The Orcs have often plundered Moria;
66 * there is nothing left in the upper halls. And since the dwarves fled, no one
67 * dares to seek the shafts and treasuries down in the deep places: they are
68 * drowned in water--or in a shadow of fear.'
71 /* TODO: Currently the file blocks allocation is not very sophisticated and
72 * when the weather is bad it could probably have rather horrible results. */
76 #define BDEBUG(x...) fprintf(stderr, x)
81 /* Number of blocks in cache */
82 #define CACHE_LENGTH 64
84 /* Blocks for readahead */
85 #define FILE_READAHEAD (CACHE_LENGTH/2)
87 /* Searches for data object don't care about the EOF flag */
88 #define STATEMASK_SANS_EOF (HED_BLOCK_STATEMASK & ~HED_BLOCK_EOF)
90 #define first_block(f) next_block(last_block(f))
91 #define prev_block(b) (tree_entry(prev_in_tree(&(b)->t),struct hed_block,t))
92 #define next_block(b) (tree_entry(next_in_tree(&(b)->t),struct hed_block,t))
93 #define last_block(f) (&(f)->terminal_block)
95 #define block_offset(b) tree_block_offset(&(b)->t)
97 #define recalc_block_recursive(b) recalc_node_recursive(&(b)->t)
99 #define chain_block_before(tree,b,p) insert_into_tree((tree), &(b)->t, &(p)->t)
100 #define recalc_chain_block_before(tree,b,p) do { \
101 chain_block_before((tree), (b), (p)); \
102 recalc_block_recursive((b)); \
105 #define unchain_block(tree,b) del_from_tree((tree), &(b)->t)
106 #define recalc_unchain_block(tree,b) recalc_del_from_tree((tree), &(b)->t)
108 #define init_block_list(tree,b) init_tree(tree, &(b)->t)
109 #define init_block_link(b) init_node(&(b)->t)
111 #define find_block(tree,o) tree_entry(find_in_tree((tree),(o)),struct hed_block,t)
113 #define block_is_loadable(b) \
114 (((b)->flags & (HED_BLOCK_VIRTUAL | HED_BLOCK_EOF | HED_BLOCK_BAD)) \
115 == HED_BLOCK_VIRTUAL)
117 #ifdef HED_CONFIG_SWAP
119 /* Return the swp file object */
120 static inline struct swp_file
*
121 file_swp(struct file_priv
*file
)
128 /* Provide a stub for the non-swap case */
130 file_swp(struct file_priv
*file
)
137 #ifdef HED_CONFIG_READAHEAD
139 #define file_ra_none(f) ((f)->readahead == HED_RA_NONE)
140 #define file_ra_forward(f) ((f)->readahead == HED_RA_FORWARD)
141 #define file_ra_backward(f) ((f)->readahead == HED_RA_BACKWARD)
144 set_readahead(struct file_priv
*file
, int val
)
146 file
->readahead
= val
;
151 #define file_ra_none(f) (1)
152 #define file_ra_forward(f) (0)
153 #define file_ra_backward(f) (0)
154 #define set_readahead(file,t) do {} while(0)
156 #endif /* HED_CONFIG_READAHEAD */
159 hed_file_set_readahead(struct hed_file
*file
, enum hed_readahead val
)
161 set_readahead(file_private(file
), val
);
165 /* Get the physical offset of the byte immediately following @block. */
166 static inline hed_uoff_t
167 phys_end(const struct hed_block
*block
)
169 return hed_block_is_inserted(block
)
171 : block
->phys_pos
+ hed_block_size(block
);
174 static struct hed_block
*
175 next_nonzero_block(struct hed_block
*block
)
177 while (!hed_block_is_terminal(block
)) {
178 block
= next_block(block
);
179 if (hed_block_size(block
))
185 static struct hed_block
*
186 prev_nonzero_block(struct hed_block
*block
)
189 block
= prev_block(block
);
190 if (hed_block_is_terminal(block
))
192 } while (!hed_block_size(block
));
197 hed_block_is_after_erase(struct hed_block
*block
)
199 struct hed_block
*prev
= prev_nonzero_block(block
);
201 ? block
->phys_pos
> phys_end(prev
)
206 hed_block_is_after_insert(struct hed_block
*block
)
208 struct hed_block
*prev
= prev_nonzero_block(block
);
209 return prev
&& hed_block_is_inserted(prev
);
212 /* Get the blocks tree */
213 static inline struct hed_tree
*
214 hed_file_blocks(struct file_priv
*file
)
216 return &file
->blocks
;
220 # define dump_blocks(file) {}
224 block_phys_size(struct hed_block
*block
)
226 struct hed_block
*next
;
228 if (hed_block_is_terminal(block
))
230 next
= next_block(block
);
231 return next
->phys_pos
- block
->phys_pos
;
235 dump_block(int level
, struct file_priv
*file
, struct hed_tree_node
*node
,
236 hed_uoff_t
*cur_offset
, hed_uoff_t
*cur_poffset
)
238 struct hed_block
*block
= tree_entry(node
, struct hed_block
, t
);
244 dump_block(level
+ 1, file
, node
->left
, cur_offset
, cur_poffset
);
245 p
= hed_block_data(block
);
246 if (level
< 20) t
[level
] = '>'; else t
[19] = '.';
247 fprintf(stderr
, "%s [%06llx] [%06llx] %c%c%c%c%c %05llx %05llx"
248 " {%02x%02x%02x%02x} -- %p ^%p [%06llx]\n",
250 (unsigned long long) *cur_offset
,
251 (unsigned long long) *cur_poffset
,
252 hed_block_is_bad(block
) ? 'b' : ' ',
253 hed_block_is_eof(block
) ? 'e' : ' ',
254 hed_block_is_virtual(block
) ? 'v' : ' ',
255 hed_block_is_inserted(block
) ? 'i' : ' ',
256 hed_block_is_dirty(block
) ? '*' : ' ',
257 (unsigned long long) node
->size
,
258 (unsigned long long) block_phys_size(block
),
259 p
&& block
->t
.size
> 0 ? p
[0] : 0,
260 p
&& block
->t
.size
> 1 ? p
[1] : 0,
261 p
&& block
->t
.size
> 2 ? p
[2] : 0,
262 p
&& block
->t
.size
> 3 ? p
[3] : 0,
264 (unsigned long long) node
->cover_size
266 list_for_each_entry (cur
, &block
->refs
, list
) {
267 fprintf(stderr
, " <%p>: %llx->%p:%llx\n",
268 cur
, (long long)cur
->pos
,
269 cur
->block
, (unsigned long long)cur
->off
);
271 assert(*cur_poffset
== block
->phys_pos
);
272 *cur_offset
+= node
->size
;
273 *cur_poffset
+= block_phys_size(block
);
275 dump_block(level
+ 1, file
, node
->right
, cur_offset
, cur_poffset
);
276 assert(node
->cover_size
== (node
->left
? node
->left
->cover_size
: 0)
277 + (node
->right
? node
->right
->cover_size
: 0)
281 /* Walk the tree manually here, because foreach_block() does not provide
282 * the tree structure.
283 * TODO: Change this if you plan to debug any other block containers.
286 dump_blocks(struct file_priv
*file
)
288 struct hed_block
*first
= first_block(file
);
289 hed_uoff_t cur_offset
, cur_poffset
;
291 fprintf(stderr
, "-- blocks dump --\n");
293 cur_poffset
= first
->phys_pos
;
294 dump_block(0, file
, hed_file_blocks(file
)->root
,
295 &cur_offset
, &cur_poffset
);
296 fprintf(stderr
, "-- blocks dump end --\n");
301 get_cursor(struct file_priv
*file
, hed_uoff_t offset
, hed_cursor_t
*curs
)
303 struct hed_block
*block
;
305 block
= find_block(hed_file_blocks(file
), offset
);
306 assert(block
!= NULL
);
309 curs
->off
= offset
- block_offset(block
);
310 list_add(&curs
->list
, &block
->refs
);
312 BDEBUG("Mapped %llx to %llx+%llx/%llx\n",
313 offset
, offset
- curs
->off
, curs
->off
, block
->t
.size
);
317 hed_get_cursor(struct hed_file
*file
, hed_uoff_t offset
, hed_cursor_t
*curs
)
319 get_cursor(file_private(file
), offset
, curs
);
323 put_cursor(hed_cursor_t
*curs
)
325 list_del(&curs
->list
);
329 hed_put_cursor(hed_cursor_t
*curs
)
335 hed_update_cursor(struct hed_file
*file
, hed_uoff_t offset
, hed_cursor_t
*curs
)
338 get_cursor(file_private(file
), offset
, curs
);
342 hed_dup_cursor(const hed_cursor_t
*src
, hed_cursor_t
*dst
)
345 dst
->block
= src
->block
;
347 list_add_tail(&dst
->list
, &src
->block
->refs
);
351 hed_dup2_cursor(const hed_cursor_t
*src
, hed_cursor_t
*dst
)
353 if (hed_is_a_cursor(dst
))
355 hed_dup_cursor(src
, dst
);
358 /* Move cursors from @old to @new, adding @off to their block
359 * offsets to keep them at the same position. */
361 update_cursors(const struct hed_block
*old
, struct hed_block
*new,
366 BDEBUG("Updating cursors from <%p> to <%p>%c%llx\n",
367 old
, new, off
>= 0 ? '+' : '-', off
>= 0 ? off
: -off
);
369 list_for_each_entry(curs
, &old
->refs
, list
) {
375 /* Move cursors in the range <@start;@end> from @old to @new,
376 * adding @off to their block offset, plus moving the reference list. */
378 move_cursors(const struct hed_block
*old
, struct hed_block
*new,
379 hed_uoff_t start
, hed_uoff_t end
, hed_off_t off
)
381 hed_cursor_t
*curs
, *nextcurs
;
383 BDEBUG("Moving cursors from <%p>:%llx:%llx to <%p>%c%llx\n",
384 old
, start
, end
, new,
385 off
>= 0 ? '+' : '-', off
>= 0 ? off
: -off
);
387 list_for_each_entry_safe(curs
, nextcurs
, &old
->refs
, list
)
388 if (curs
->off
>= start
&& curs
->off
<= end
) {
391 list_move(&curs
->list
, &new->refs
);
395 /* Move cursors in the range @block:<@start;@end> to @newpos */
397 move_cursors_abs(const struct hed_block
*block
,
398 hed_uoff_t start
, hed_uoff_t end
,
399 const hed_cursor_t
*newpos
)
401 hed_cursor_t
*curs
, *nextcurs
;
403 BDEBUG("Moving cursors from <%p>:%llx:%llx to <%p>:%llx\n",
404 block
, start
, end
, newpos
->block
, newpos
->off
);
406 list_for_each_entry_safe(curs
, nextcurs
, &block
->refs
, list
)
407 if (curs
->off
>= start
&& curs
->off
<= end
) {
408 curs
->pos
= newpos
->pos
;
409 curs
->block
= newpos
->block
;
410 curs
->off
= newpos
->off
;
411 list_move(&curs
->list
, &newpos
->block
->refs
);
415 /* Update the positions of cursors at and after @start for all
416 * blocks starting at @block */
418 slide_cursors(const struct hed_block
*block
, hed_uoff_t start
, hed_off_t off
)
421 const struct hed_block
*nblock
;
423 BDEBUG("Sliding cursors >= %llx by %c%llx, starting at <%p>\n",
424 start
, off
>= 0 ? '+' : '-', off
>= 0 ? off
: -off
, block
);
428 list_for_each_entry(curs
, &block
->refs
, list
)
429 if (curs
->pos
>= start
)
431 nblock
= next_block(block
);
432 } while (!hed_block_is_terminal(block
));
435 static struct hed_block
*
436 new_block(struct file_priv
*file
, long flags
)
438 struct hed_block
*new;
440 if (! (new = swp_zalloc(file_swp(file
), sizeof(struct hed_block
))) )
444 init_block_link(new);
445 INIT_LIST_HEAD(&new->refs
);
446 if (flags
& HED_BLOCK_EXCACHE
)
447 INIT_LIST_HEAD(&new->lru
);
449 list_add_tail(&new->lru
, &file
->lru
);
454 static struct hed_block
*
455 new_virt_block(struct file_priv
*file
, hed_uoff_t pos
, hed_uoff_t size
,
458 struct hed_block
*new = new_block(file
, (HED_BLOCK_EXCACHE
|
466 BDEBUG("Spawned new virtual block [%llx] at %llx\n", size
, pos
);
470 static struct hed_block
*
471 new_data_block(struct file_priv
*file
, hed_uoff_t pos
, hed_uoff_t size
,
472 struct hed_block_data
*dataobj
)
474 struct hed_block
*new = new_block(file
, 0);
479 new->dataobj
= dataobj
;
482 new->dataoff
= FILE_BLOCK_OFF(pos
);
483 BDEBUG("Spawned new data block [%llx] at %llx\n", size
, pos
);
488 file_free_block(struct file_priv
*file
, struct hed_block
*block
)
491 cache_put(file
->cache
, block
->dataobj
);
492 list_del(&block
->lru
);
494 swp_free(file_swp(file
), block
);
498 merge_and_free(struct file_priv
*file
, struct hed_block
*block
,
499 struct hed_block
*merger
, hed_off_t off
)
501 /* A bit more efficient than move_cursors() */
502 update_cursors(block
, merger
, off
);
503 list_splice(&block
->refs
, &merger
->refs
);
505 /* Messing with block sizes and unchaining is a bit tricky
506 * since unchain_block() can splay(). So we really need
507 * to recalc_block_recursive() right after we update the size.
508 * If this place turns out to be a hot-spot, we can optimize
509 * the tree operations here. */
510 merger
->t
.size
+= hed_block_size(block
);
511 recalc_block_recursive(merger
);
513 /* Destroy the block */
514 recalc_unchain_block(hed_file_blocks(file
), block
);
515 file_free_block(file
, block
);
518 /* This may kill the previous block as well, if it can be merged
519 * with the next one. It will never kill anything which _follows_. */
521 kill_block(struct file_priv
*file
, struct hed_block
*block
)
523 struct hed_block
*prev
, *next
;
525 assert(!hed_block_is_dirty(block
) || !hed_block_size(block
));
527 if (!hed_block_is_virtual(block
)) {
528 /* Convert physical to virtual */
529 assert(block
->dataobj
);
530 cache_put(file
->cache
, block
->dataobj
);
531 block
->dataobj
= NULL
;
533 list_del_init(&block
->lru
); /* unlink the block from LRU */
534 block
->flags
= HED_BLOCK_EXCACHE
| HED_BLOCK_VIRTUAL
|
535 (block
->flags
& HED_BLOCK_EOF
);
538 prev
= prev_block(block
);
539 if (prev
->flags
== block
->flags
&&
540 prev
->phys_pos
+ hed_block_size(prev
) == block
->phys_pos
) {
541 merge_and_free(file
, block
, prev
, hed_block_size(prev
));
545 next
= next_block(block
);
546 if (next
->flags
== block
->flags
&&
547 block
->phys_pos
+ hed_block_size(block
) == next
->phys_pos
) {
548 update_cursors(next
, next
, hed_block_size(block
));
549 next
->phys_pos
-= hed_block_size(block
);
550 merge_and_free(file
, block
, next
, 0);
554 if (!hed_block_size(block
)) {
555 /* No recalculation needed, zero size. */
556 unchain_block(hed_file_blocks(file
), block
);
557 file_free_block(file
, block
);
562 kill_block_if_empty(struct file_priv
*file
, struct hed_block
*block
)
564 if (!hed_block_is_terminal(block
) && hed_block_size(block
) == 0 &&
565 list_empty(&block
->refs
)) {
566 kill_block(file
, block
);
572 static struct hed_block
*
573 split_block(struct file_priv
*file
, struct hed_block
*block
,
574 hed_uoff_t splitpoint
)
576 struct hed_block
*head
;
578 head
= new_block(file
, block
->flags
& ~HED_BLOCK_TERMINAL
);
582 if ( (head
->dataobj
= block
->dataobj
) ) {
583 cache_get(head
->dataobj
);
584 head
->dataoff
= block
->dataoff
;
585 block
->dataoff
+= splitpoint
;
587 assert(hed_block_is_virtual(block
));
589 head
->t
.size
= splitpoint
;
590 head
->phys_pos
= block
->phys_pos
;
592 block
->t
.size
-= splitpoint
;
593 if (!hed_block_is_inserted(block
))
594 block
->phys_pos
+= splitpoint
;
595 recalc_block_recursive(block
);
596 recalc_chain_block_before(hed_file_blocks(file
), head
, block
);
598 move_cursors(block
, head
, 0, splitpoint
- 1, 0);
599 update_cursors(block
, block
, -splitpoint
);
604 /* Replace a chunk in @block with @newblock */
606 replace_chunk(struct file_priv
*file
, struct hed_block
*block
,
607 hed_uoff_t offset
, struct hed_block
*newblock
)
611 assert(offset
< block
->t
.size
);
612 assert(newblock
->t
.size
<= block
->t
.size
- offset
);
614 /* Re-create the head block if necessary */
615 if (offset
&& !split_block(file
, block
, offset
))
618 /* Move pointers to the new block */
619 len
= hed_block_size(newblock
);
621 move_cursors(block
, newblock
, 0, len
- 1, 0);
623 /* Shorten the tail block */
624 len
= hed_block_size(newblock
);
625 block
->t
.size
-= len
;
626 block
->dataoff
+= len
;
627 assert(!hed_block_is_inserted(block
));
628 block
->phys_pos
+= len
;
629 recalc_block_recursive(block
);
630 update_cursors(block
, block
, -len
);
632 /* Insert the new block */
633 recalc_chain_block_before(hed_file_blocks(file
), newblock
, block
);
635 /* Kill the leading block if possible */
636 kill_block_if_empty(file
, block
);
641 #ifdef HED_CONFIG_SWAP
644 swp_filename(const char *filename
)
646 size_t fnlen
= strlen(filename
);
650 if (!(swp
= malloc(fnlen
+ 9)) )
652 strcpy(swp
, filename
);
654 file
= strrchr(swp
, '/');
655 file
= file
? file
+ 1 : swp
;
657 strcpy(stpcpy(file
+ 1, filename
+ (file
-swp
)), ".hedswp");
662 newswp_filename(char *swpname
)
664 char *p
= NULL
; /* bogus assignment to silence a warning */
668 while (!access(ret
, F_OK
)) {
669 if (ret
== swpname
) {
670 if (! (ret
= strdup(swpname
)) )
672 p
= ret
+ strlen(ret
) - 1;
685 hed_file_remove_swap(struct hed_file
*f
)
687 struct file_priv
*file
= file_private(f
);
688 if (remove(file
->swpname
))
690 if (rename(file
->newswpname
, file
->swpname
))
693 free(file
->newswpname
);
694 file
->newswpname
= file
->swpname
;
698 static inline struct file_priv
*
699 file_swp_init(const char *name
)
701 char *swpname
, *newswpname
;
702 struct swp_file
*swp
;
703 struct file_priv
*file
;
705 swpname
= swp_filename(name
);
708 newswpname
= newswp_filename(swpname
);
711 swp
= swp_init_write(newswpname
);
713 goto fail_free_newname
;
715 assert(sizeof(struct swp_header
) + sizeof(struct file_priv
)
717 file
= swp_private(swp
);
718 memset(file
, 0, sizeof *file
);
721 file
->swpname
= swpname
;
722 file
->newswpname
= newswpname
;
729 if (swpname
!= newswpname
)
736 file_swp_done(struct file_priv
*file
)
738 remove(file
->newswpname
);
739 if (file
->newswpname
!= file
->swpname
)
740 free(file
->newswpname
);
742 swp_done(file_swp(file
));
743 /* file was de-allocated automatically with file->swp */
747 hed_file_has_swap(struct hed_file
*f
)
749 struct file_priv
*file
= file_private(f
);
750 return file
->swpname
!= file
->newswpname
;
754 hed_file_swap_name(struct hed_file
*f
)
756 struct file_priv
*file
= file_private(f
);
757 return file
->swpname
;
760 #else /* HED_CONFIG_SWAP */
762 static inline struct file_priv
*
763 file_swp_init(const char *name
)
765 return calloc(1, sizeof(struct file_priv
));
769 file_swp_done(struct file_priv
*file
)
775 hed_file_has_swap(struct hed_file
*file
)
781 hed_file_remove_swap(struct hed_file
*file
)
787 hed_file_swap_name(struct hed_file
*file
)
792 #endif /* HED_CONFIG_SWAP */
794 static inline struct stat
*
795 file_stat(struct file_priv
*file
)
801 hed_file_update_size(struct hed_file
*f
)
803 struct file_priv
*file
= file_private(f
);
804 hed_uoff_t oldsize
= file
->f
.phys_size
;
806 if(fstat(file
->fd
, file_stat(file
)) < 0)
809 if (S_ISBLK(file_stat(file
)->st_mode
)) {
810 if (ioctl(file
->fd
, BLKGETSIZE64
, &file
->f
.phys_size
)) {
811 unsigned long size_in_blocks
;
812 if (ioctl(file
->fd
, BLKGETSIZE
, &size_in_blocks
))
814 file
->f
.phys_size
= (hed_uoff_t
)size_in_blocks
<< 9;
816 } else if (S_ISREG(file_stat(file
)->st_mode
)) {
817 file
->f
.phys_size
= file_stat(file
)->st_size
;
818 } else if (S_ISCHR(file_stat(file
)->st_mode
)) {
819 if (lseek(file
->fd
, 0, SEEK_SET
) < 0)
821 file
->f
.phys_size
= (hed_uoff_t
)OFF_MAX
+ 1;
827 file
->f
.size
+= file
->f
.phys_size
- oldsize
;
832 do_file_open(struct file_priv
*file
)
834 file
->fd
= open(file
->f
.name
, O_RDONLY
);
838 fprintf(stderr
, "Warning: File %s does not exist\n",
840 memset(file_stat(file
), 0, sizeof(struct stat
));
842 } else if (hed_file_update_size(&file
->f
)) {
851 file_setup_blocks(struct file_priv
*file
)
853 hed_uoff_t phys_size
= file
->f
.phys_size
;
854 struct hed_block
*block
;
856 block
= &file
->terminal_block
;
857 block
->flags
= HED_BLOCK_EXCACHE
| HED_BLOCK_VIRTUAL
858 | HED_BLOCK_EOF
| HED_BLOCK_TERMINAL
;
859 INIT_LIST_HEAD(&block
->lru
);
860 INIT_LIST_HEAD(&block
->refs
);
861 block
->t
.size
= OFF_MAX
- phys_size
+ 1;
862 block
->phys_pos
= phys_size
;
864 init_block_list(hed_file_blocks(file
), block
);
867 block
= new_virt_block(file
, 0, phys_size
, 0);
870 recalc_chain_block_before(hed_file_blocks(file
), block
,
871 &file
->terminal_block
);
880 return file_access_init();
884 hed_open(const char *name
)
886 struct file_priv
*file
= file_swp_init(name
);
892 INIT_LIST_HEAD(&file
->lru
);
894 file
->cache
= cache_init(CACHE_LENGTH
, file_swp(file
));
898 if (do_file_open(file
))
901 if (file_setup_blocks(file
))
904 fixup_register(file
);
915 hed_close(struct hed_file
*f
)
917 struct file_priv
*file
= file_private(f
);
920 /* Do not check for errors:
921 * 1. This FD is read-only => no data loss possbile
922 * 2. We're about to exit anyway => no resource leak
927 fixup_deregister(file
);
929 /* No need to free file blocks here, because all data is
930 * allocated either from the cache or from the swap file
931 * and both is going to be destroyed now.
935 cache_done(file
->cache
);
940 /* Adjust a cursor after off gets outside its block */
942 fixup_cursor_slow(hed_cursor_t
*curs
)
944 struct hed_block
*block
= curs
->block
;
945 hed_uoff_t off
= curs
->off
;
948 if ((hed_off_t
)off
< 0) {
949 block
= prev_block(block
);
950 off
+= block
->t
.size
;
952 off
-= block
->t
.size
;
953 block
= next_block(block
);
955 } while (!hed_block_is_terminal(block
) && off
>= block
->t
.size
);
959 list_move(&curs
->list
, &block
->refs
);
962 /* Adjust a cursor if off gets outside its block.
963 * This is separate from fixup_cursor_slow, because it is supposed
964 * to be small enough to be inlined (which is a win, because most of
965 * the time no fixup has to be done, so the fast inlined path is used).
968 fixup_cursor(hed_cursor_t
*curs
)
970 if (curs
->off
>= curs
->block
->t
.size
)
971 fixup_cursor_slow(curs
);
975 hed_move_relative(hed_cursor_t
*curs
, hed_off_t num
)
977 hed_off_t newpos
= curs
->pos
+ num
;
979 newpos
= num
< 0 ? 0 : OFF_MAX
;
980 num
= newpos
- curs
->pos
;
988 /* Relative move with no checking (and only by a small amount) */
990 move_rel_fast(hed_cursor_t
*curs
, ssize_t num
)
998 alloc_caches(struct file_priv
*file
, struct hed_block_data
**caches
, int n
)
1000 struct remap_control rc
;
1003 BDEBUG("Allocate %d caches (%d free slots available)\n",
1004 n
, file
->cache
->nfree
);
1006 assert(n
<= CACHE_LENGTH
);
1007 while (file
->cache
->nfree
< n
) {
1008 struct hed_block
*block
;
1010 assert(!list_empty(&file
->lru
));
1011 block
= list_entry(file
->lru
.next
, struct hed_block
, lru
);
1012 BDEBUG("Killing block at physical %llx\n", block
->phys_pos
);
1013 kill_block(file
, block
);
1016 for (i
= 0; i
< n
; ++i
) {
1017 caches
[i
] = cache_alloc(file
->cache
);
1022 remap_compact(&rc
, file
->cache
, caches
, n
);
1023 for (i
= 0; i
< n
; ++i
)
1024 remap_add(&rc
, caches
[i
]->data
);
1029 free_caches(struct file_priv
*file
, struct hed_block_data
**preload
, int n
)
1033 for (i
= 0; i
< n
; ++i
)
1035 cache_put(file
->cache
, preload
[i
]);
1039 file_load_data(struct file_priv
*file
,
1040 struct hed_block_data
**caches
, int n
,
1043 struct hed_block_data
*dataobj
= caches
[0];
1044 void *data
= dataobj
->data
;
1045 ssize_t rsize
, total
, segsize
;
1047 segsize
= n
<< FILE_BLOCK_SHIFT
;
1048 for (total
= 0; total
< segsize
; total
+= rsize
) {
1049 rsize
= pread(file
->fd
, data
+ total
,
1050 segsize
- total
, offset
+ total
);
1052 memset(data
+ total
, 0, segsize
- total
);
1056 cache_put(file
->cache
,
1057 caches
[total
>> FILE_BLOCK_SHIFT
]);
1058 caches
[total
>> FILE_BLOCK_SHIFT
] = NULL
;
1059 total
= FILE_BLOCK_ROUND(total
);
1060 rsize
= FILE_BLOCK_SIZE
;
1061 BDEBUG("Error reading block at phys %llx: %s\n",
1062 offset
+ total
, strerror(errno
));
1066 BDEBUG("Loaded data at phys %llx up to %llx\n",
1067 offset
, offset
+ segsize
);
1071 #ifdef HED_CONFIG_MMAP
1074 file_load_data_mmap(struct file_priv
*file
,
1075 struct hed_block_data
**caches
, int n
,
1082 segsize
= n
<< FILE_BLOCK_SHIFT
;
1083 data
= mmap(NULL
, segsize
,
1084 PROT_READ
| PROT_WRITE
,
1085 MAP_PRIVATE
| (file
->fd
< 0 ? MAP_ANONYMOUS
: 0),
1088 if (data
== MAP_FAILED
) {
1089 BDEBUG("mmap failed at %llx: fail over to traditional read\n",
1092 data
= mmap(NULL
, segsize
,
1093 PROT_READ
| PROT_WRITE
,
1094 MAP_PRIVATE
| MAP_ANONYMOUS
,
1096 if (data
== MAP_FAILED
)
1099 for (i
= 0; i
< n
; ++i
)
1100 caches
[i
]->data
= data
+ (i
<< FILE_BLOCK_SHIFT
);
1101 return file_load_data(file
, caches
, n
, offset
);
1104 for (i
= 0; i
< n
; ++i
)
1105 caches
[i
]->data
= data
+ (i
<< FILE_BLOCK_SHIFT
);
1107 BDEBUG("Loaded data at phys %llx up to %llx\n",
1108 offset
, offset
+ segsize
);
1111 # define file_load_data file_load_data_mmap
1115 /* Find the block with the lowest physical position that intersects
1116 * the loaded segment. The search starts at @block.
1118 static struct hed_block
*
1119 first_load_block(struct hed_block
*block
, hed_uoff_t segstart
)
1121 struct hed_block
*prev
= block
;
1124 prev
= prev_block(prev
);
1125 } while (!hed_block_is_terminal(prev
) && phys_end(prev
) > segstart
);
1130 load_blocks(struct file_priv
*file
, const hed_cursor_t
*from
)
1132 hed_uoff_t physpos
, segstart
;
1133 struct hed_block_data
*preload
[FILE_READAHEAD
];
1134 size_t ra_bkw
, ra_fwd
, ra_off
;
1139 segstart
= hed_cursor_phys_pos(from
);
1140 ra_bkw
= FILE_BLOCK_OFF(segstart
);
1141 ra_fwd
= FILE_BLOCK_SIZE
- ra_bkw
;
1143 if (file_ra_forward(file
))
1144 ra_fwd
+= (FILE_READAHEAD
- 1) << FILE_BLOCK_SHIFT
;
1145 else if (file_ra_backward(file
))
1146 ra_bkw
+= (FILE_READAHEAD
- 1) << FILE_BLOCK_SHIFT
;
1148 if (ra_bkw
> segstart
)
1150 if (ra_fwd
> file
->f
.phys_size
- segstart
)
1151 ra_fwd
= file
->f
.phys_size
- segstart
;
1155 pos
.block
= first_load_block(from
->block
, segstart
);
1156 pos
.off
= segstart
>= pos
.block
->phys_pos
1157 ? segstart
- pos
.block
->phys_pos
1160 list_add(&pos
.list
, &pos
.block
->refs
);
1161 nblocks
= ((ra_fwd
- 1) >> FILE_BLOCK_SHIFT
) + 1;
1162 alloc_caches(file
, preload
, nblocks
);
1165 ret
= -1; /* be a pessimist */
1166 if (file_load_data(file
, preload
, nblocks
, segstart
))
1169 while (physpos
= hed_cursor_phys_pos(&pos
),
1170 ra_off
= physpos
- segstart
,
1172 struct hed_block_data
*dataobj
;
1173 struct hed_block
*newblock
;
1176 if (!hed_block_is_virtual(pos
.block
)) {
1177 pos
.block
= next_block(pos
.block
);
1182 datalen
= FILE_BLOCK_SIZE
- FILE_BLOCK_OFF(physpos
);
1183 if (datalen
> hed_block_size(pos
.block
) - pos
.off
)
1184 datalen
= hed_block_size(pos
.block
) - pos
.off
;
1186 dataobj
= preload
[ra_off
>> FILE_BLOCK_SHIFT
];
1188 ? new_data_block(file
, physpos
, datalen
, dataobj
)
1189 : new_virt_block(file
, physpos
, datalen
,
1194 /* Punch the new block */
1195 BDEBUG("Add %s block at %llx, length %llx\n",
1196 hed_block_is_virtual(newblock
) ? "error" : "physical",
1197 newblock
->phys_pos
, newblock
->t
.size
);
1198 if (replace_chunk(file
, pos
.block
, pos
.off
, newblock
)) {
1199 file_free_block(file
, newblock
);
1203 pos
.block
= next_block(newblock
);
1209 /* All cache objects now have an extra reference from the
1210 * allocation. Drop it. */
1211 free_caches(file
, preload
, nblocks
);
1217 /* Shorten a block at beginning and enlarge the preceding block.
1219 * Re-allocate at most @len bytes from the beginning of @block to the
1220 * end of the preceding block.
1221 * If @block is virtual, this will effectively devirtualize the range.
1222 * If @block is not virtual, this will change the backing store of
1223 * the bytes in the range.
1224 * Returns: the number of bytes actually moved.
1227 shrink_at_begin(struct hed_block
*block
, size_t len
, long state
)
1229 struct hed_block
*prev
;
1232 /* Basic assumptions */
1233 assert(!(state
& HED_BLOCK_VIRTUAL
));
1235 /* The previous block must exist: */
1236 prev
= prev_block(block
);
1237 if (hed_block_is_terminal(prev
))
1240 /* The block flags must match the requested @state: */
1241 if ((prev
->flags
& HED_BLOCK_STATEMASK
) != state
)
1244 /* No deletions at end, or similar: */
1245 if (prev
->phys_pos
+ prev
->t
.size
!= block
->phys_pos
)
1248 /* Append less bytes than requested if not all are available */
1249 assert(prev
->t
.size
<= prev
->dataobj
->size
- prev
->dataoff
);
1250 maxgrow
= prev
->dataobj
->size
- prev
->dataoff
- prev
->t
.size
;
1256 BDEBUG("Appending 0:%lx to the previous block\n", len
);
1258 /* Move cursors away from the to-be-chopped beginning */
1259 move_cursors(block
, prev
, 0, len
- 1, prev
->t
.size
);
1261 /* Enlarge the previous block */
1262 prev
->t
.size
+= len
;
1263 recalc_block_recursive(prev
);
1265 /* Shorten the original block */
1266 block
->t
.size
-= len
;
1267 block
->dataoff
+= len
;
1268 block
->phys_pos
+= len
;
1269 recalc_block_recursive(block
);
1273 /* Shorten a block at end and enlarge the following block.
1275 * Re-allocate at most @len bytes from the end of @block to the
1276 * beginning of the following block.
1277 * If @block is virtual, this will effectively devirtualize the range.
1278 * If @block is not virtual, this will change the backing store of
1279 * the bytes in the range.
1280 * Returns: the number of bytes actually moved.
1283 shrink_at_end(struct hed_block
*block
, size_t len
, long state
)
1285 struct hed_block
*next
;
1288 /* Basic assumptions */
1289 assert(!(state
& HED_BLOCK_VIRTUAL
));
1291 /* The next block must exist: */
1292 if (hed_block_is_terminal(block
))
1294 next
= next_block(block
);
1296 /* The block flags must match the requested @state: */
1297 if ((next
->flags
& HED_BLOCK_STATEMASK
) != state
)
1300 /* No deletions at end, or similar: */
1301 if (block
->phys_pos
+ block
->t
.size
!= next
->phys_pos
)
1304 /* Prepend less bytes than requested if not all are available */
1305 if (len
> next
->dataoff
)
1306 len
= next
->dataoff
;
1309 off
= block
->t
.size
- len
;
1311 BDEBUG("Prepending %llx:%lx to the next block\n", off
, len
);
1313 /* Shift cursors in the new physical block */
1314 update_cursors(next
, next
, len
);
1316 /* Move cursors away from the to-be-chopped end */
1317 move_cursors(block
, next
, off
, UOFF_MAX
, -off
);
1319 /* Enlarge the next block */
1320 next
->dataoff
-= len
;
1321 next
->phys_pos
-= len
;
1322 next
->t
.size
+= len
;
1323 recalc_block_recursive(next
);
1325 /* Shorten the original block */
1326 block
->t
.size
-= len
;
1327 recalc_block_recursive(block
);
1331 /* Search for an existing data object within the same physical block
1332 * as @curs, and having the given @state flags.
1334 static struct hed_block_data
*
1335 search_data(struct file_priv
*file
, const hed_cursor_t
*curs
, long state
)
1337 struct hed_block
*block
;
1340 physpos
= FILE_BLOCK_ROUND(curs
->block
->phys_pos
+ curs
->off
);
1341 BDEBUG("Search for already loaded data at %llx starting at %llx...",
1342 physpos
, curs
->block
->phys_pos
);
1344 /* Search backwards */
1345 block
= curs
->block
;
1346 while (!hed_block_is_terminal(block
= prev_block(block
))) {
1347 if (block
->phys_pos
< physpos
)
1349 if ((block
->flags
& STATEMASK_SANS_EOF
) == state
) {
1350 BDEBUG(" found at %llx\n", block
->phys_pos
);
1351 assert(block
->dataobj
);
1352 return block
->dataobj
;
1356 /* Search forwards */
1357 block
= curs
->block
;
1358 while (!hed_block_is_terminal(block
)) {
1359 block
= next_block(block
);
1360 if (block
->phys_pos
>= physpos
+ FILE_BLOCK_SIZE
)
1362 if ((block
->flags
& STATEMASK_SANS_EOF
) == state
) {
1363 BDEBUG(" found at %llx\n", block
->phys_pos
);
1364 assert(block
->dataobj
);
1365 return block
->dataobj
;
1369 BDEBUG(" not found\n");
1374 reuse_loaded_data(struct file_priv
*file
, const hed_cursor_t
*curs
,
1375 struct hed_block_data
*data
)
1377 struct hed_block
*physblock
;
1378 struct hed_block
*block
= curs
->block
;
1379 hed_uoff_t block_offset
= curs
->off
;
1380 hed_uoff_t physpos
= block
->phys_pos
+ block_offset
;
1381 size_t part
= FILE_BLOCK_OFF(physpos
);
1383 FILE_BLOCK_SIZE
- part
<= block
->t
.size
- block_offset
1384 ? FILE_BLOCK_SIZE
- part
1385 : block
->t
.size
- block_offset
;
1387 if (part
> block_offset
)
1388 part
= block_offset
;
1391 block_offset
-= part
;
1393 if (! (physblock
= new_data_block(file
, physpos
, len
, data
)) )
1396 BDEBUG("Add physical block at %llx, length %llx\n",
1397 physblock
->phys_pos
, physblock
->t
.size
);
1398 if (replace_chunk(file
, block
, block_offset
, physblock
)) {
1399 file_free_block(file
, physblock
);
1407 /* Replace a part of a virtual block with content loaded
1408 * from disk. The amount of data loaded from the disk depends
1409 * on various factors with the goal to choose the most efficient
1410 * ratio. The only guarantee is that the byte at @curs will
1411 * be in a non-virtual block when this function returns 0.
1414 devirtualize_clean(struct file_priv
*file
, const hed_cursor_t
*curs
)
1416 struct hed_block
*block
= curs
->block
;
1417 hed_uoff_t block_offset
= curs
->off
;
1418 hed_uoff_t remain
= block
->t
.size
- block_offset
;
1419 struct hed_block_data
*data
;
1421 BDEBUG("punch a clean hole at %llx into %llx:%llx\n", block_offset
,
1422 block_offset(block
), block
->t
.size
);
1423 assert(hed_block_is_virtual(block
));
1425 /* Check if we can combine with a neighbouring block */
1426 if (shrink_at_begin(block
, hed_block_size(block
), 0) > block_offset
1427 || shrink_at_end(block
, hed_block_size(block
), 0) >= remain
) {
1428 kill_block_if_empty(file
, block
);
1433 /* Check if the block is already loaded elsewhere */
1434 data
= search_data(file
, curs
, 0);
1436 ? reuse_loaded_data(file
, curs
, data
)
1437 : load_blocks(file
, curs
);
1440 /* Unles the block at @curs is already dirty, replace at most
1441 * @len bytes at @curs with a newly allocated out-of-cache block.
1442 * The block is marked dirty and its data is left uninitialized.
1443 * Note that this function may devirtualize less than @len bytes.
1444 * In the worst case only 1 byte at @curs will be available.
1447 prepare_modify(struct file_priv
*file
, hed_cursor_t
*curs
, size_t len
)
1449 struct hed_block
*block
= curs
->block
;
1450 hed_uoff_t block_offset
= curs
->off
;
1451 hed_uoff_t remain
= block
->t
.size
- block_offset
;
1452 long newstate
= HED_BLOCK_DIRTY
| (block
->flags
& HED_BLOCK_EOF
);
1453 struct hed_block
*newblock
;
1455 if (hed_block_is_dirty(block
))
1461 BDEBUG("punch a dirty hole at %llx:%lx into %llx:%llx\n",
1463 block_offset(block
), block
->t
.size
);
1465 /* Check if we can combine with a neighbouring block */
1466 if ((block_offset
== 0 &&
1467 shrink_at_begin(block
, len
, newstate
)) ||
1469 shrink_at_end(block
, len
, newstate
) >= len
)) {
1470 kill_block_if_empty(file
, block
);
1475 /* Initialize a new block */
1476 newblock
= new_block(file
, HED_BLOCK_EXCACHE
| newstate
);
1481 if ( (newblock
->dataobj
= search_data(file
, curs
, HED_BLOCK_DIRTY
)) )
1482 cache_get(newblock
->dataobj
);
1483 else if (! (newblock
->dataobj
= block_data_new(file
->cache
,
1487 newblock
->phys_pos
= block
->phys_pos
+ block_offset
;
1488 newblock
->dataoff
= FILE_BLOCK_OFF(newblock
->phys_pos
);
1489 if (len
> FILE_BLOCK_SIZE
- newblock
->dataoff
)
1490 len
= FILE_BLOCK_SIZE
- newblock
->dataoff
;
1491 newblock
->t
.size
= len
;
1493 if (replace_chunk(file
, block
, block_offset
, newblock
))
1500 file_free_block(file
, newblock
);
1505 /* Ensure that @curs points to an up-to-date non-virtual block.
1506 * Load the data from disk if necessary, return zero on failure. */
1508 hed_prepare_read(struct hed_file
*f
, const hed_cursor_t
*curs
, size_t len
)
1510 struct file_priv
*file
= file_private(f
);
1511 struct hed_block
*block
= curs
->block
;
1512 if (block_is_loadable(block
) &&
1513 devirtualize_clean(file
, curs
) < 0)
1516 return hed_cursor_chunk_len(curs
, len
);
1519 /* Move the block pointer to the next block */
1520 static struct hed_block
*
1521 cursor_next_block(hed_cursor_t
*curs
)
1523 struct hed_block
*block
= next_nonzero_block(curs
->block
);
1526 curs
->block
= block
;
1528 list_move(&curs
->list
, &block
->refs
);
1533 /* Copy in @count bytes from @pos.
1534 * Returns the number of bytes that were not read (i.e. zero on success).
1535 * The @pos cursor is moved by the amount of data read.
1536 * CAUTION: If you read up to MAX_OFF, then @pos points one byte
1537 * beyond the EOF block upon return.
1540 copy_in(struct file_priv
*file
, void *buf
, size_t count
, hed_cursor_t
*pos
)
1545 while (count
&& (cpylen
= hed_prepare_read(&file
->f
, pos
, count
))) {
1546 if (hed_block_is_bad(pos
->block
))
1548 else if (hed_block_is_virtual(pos
->block
))
1549 memset(buf
, 0, cpylen
);
1551 memcpy(buf
, hed_cursor_data(pos
), cpylen
);
1555 if ( (pos
->off
+= cpylen
) >= hed_block_size(pos
->block
) )
1556 if (!cursor_next_block(pos
))
1564 hed_file_cpin(struct hed_file
*file
, void *buf
, size_t count
,
1565 const hed_cursor_t
*pos
)
1570 hed_dup_cursor(pos
, &mypos
);
1571 ret
= copy_in(file_private(file
), buf
, count
, &mypos
);
1576 /* Set the modified flag */
1578 set_modified(struct file_priv
*file
)
1580 file
->f
.modified
= true;
1583 /* Clear the modified flag */
1585 clear_modified(struct file_priv
*file
)
1587 file
->f
.modified
= false;
1591 hed_file_set_byte(struct hed_file
*f
, hed_cursor_t
*curs
, unsigned char byte
)
1593 struct file_priv
*file
= file_private(f
);
1595 if (prepare_modify(file
, curs
, 1))
1599 hed_block_data(curs
->block
)[curs
->off
] = byte
;
1601 if (hed_block_is_terminal(next_block(curs
->block
)))
1602 file
->f
.size
= curs
->pos
+ 1;
1608 hed_file_set_block(struct hed_file
*f
, hed_cursor_t
*curs
,
1609 unsigned char *buf
, size_t len
)
1611 struct file_priv
*file
= file_private(f
);
1615 if (prepare_modify(file
, curs
, len
))
1619 span
= hed_cursor_chunk_len(curs
, len
);
1620 memcpy(hed_cursor_data(curs
), buf
, span
);
1623 move_rel_fast(curs
, span
);
1626 if (hed_block_is_terminal(curs
->block
))
1627 file
->f
.size
= curs
->pos
;
1633 hed_file_set_bytes(struct hed_file
*f
, hed_cursor_t
*curs
,
1634 unsigned char byte
, hed_uoff_t rep
)
1636 struct file_priv
*file
= file_private(f
);
1640 if (prepare_modify(file
, curs
, rep
))
1644 span
= hed_cursor_chunk_len(curs
, rep
);
1645 memset(hed_cursor_data(curs
), byte
, span
);
1647 move_rel_fast(curs
, span
);
1650 if (hed_block_is_terminal(curs
->block
))
1651 file
->f
.size
= curs
->pos
;
1657 file_erase_continuous(struct file_priv
*file
, hed_cursor_t
*curs
, size_t len
)
1659 struct hed_block
*block
= curs
->block
;
1660 hed_uoff_t block_offset
= curs
->off
;
1662 /* Find the new position */
1663 hed_move_relative(curs
, len
);
1665 /* Move all other cursors in the erased range to the new position */
1667 move_cursors_abs(block
, block_offset
,
1668 block_offset
+ len
- 1, curs
);
1670 if (!block_offset
) {
1671 block
->dataoff
+= len
;
1672 if (!hed_block_is_inserted(block
))
1673 block
->phys_pos
+= len
;
1674 } else if (block_offset
+ len
< block
->t
.size
) {
1675 block
= split_block(file
, block
, block_offset
+ len
);
1680 move_cursors(block
, block
, block_offset
, UOFF_MAX
, -(hed_uoff_t
)len
);
1682 block
->t
.size
-= len
;
1683 recalc_block_recursive(block
);
1685 kill_block_if_empty(file
, block
);
1690 hed_file_erase_block(struct hed_file
*f
, hed_cursor_t
*curs
, hed_uoff_t len
)
1692 struct file_priv
*file
= file_private(f
);
1696 while (!hed_block_is_terminal(curs
->block
) && todo
) {
1697 size_t span
= hed_cursor_chunk_len(curs
, todo
);
1699 if (file_erase_continuous(file
, curs
, span
))
1706 file
->f
.size
-= len
;
1709 file
->terminal_block
.t
.size
+= len
;
1710 recalc_block_recursive(&file
->terminal_block
);
1712 struct hed_block
*slideblock
= prev_block(curs
->block
);
1713 if (hed_block_is_terminal(slideblock
))
1714 slideblock
= curs
->block
;
1715 slide_cursors(slideblock
, curs
->pos
, -len
);
1717 return hed_block_is_terminal(curs
->block
) ? 0 : todo
;
1720 /* Note that @curs may be detached from the block reference list
1721 * with put_cursor(). Only the pos, block and off fields are used.
1722 * This is an implementation detail currently required by
1723 * hed_file_insert_block(), which sets @curs and @curs_ins to the
1724 * same value. Other users of the library must not rely on it.
1727 hed_file_insert_begin(struct hed_file
*f
, const hed_cursor_t
*curs
,
1728 hed_cursor_t
*curs_ins
)
1730 struct file_priv
*file
= file_private(f
);
1731 struct hed_block
*newblock
;
1733 BDEBUG("Starting insert at %llx\n", curs
->pos
);
1735 newblock
= new_block(file
, (HED_BLOCK_EXCACHE
|
1736 HED_BLOCK_INSERTED
| HED_BLOCK_DIRTY
|
1737 (curs
->block
->flags
& HED_BLOCK_EOF
)));
1741 newblock
->phys_pos
= hed_cursor_phys_pos(curs
);
1742 newblock
->dataobj
= block_data_new(file
->cache
, FILE_BLOCK_SIZE
);
1743 if (!newblock
->dataobj
) {
1744 file_free_block(file
, newblock
);
1748 if (curs
->off
&& !split_block(file
, curs
->block
, curs
->off
)) {
1749 file_free_block(file
, newblock
);
1753 chain_block_before(hed_file_blocks(file
), newblock
, curs
->block
);
1755 curs_ins
->pos
= curs
->pos
;
1756 curs_ins
->off
= newblock
->t
.size
;
1757 curs_ins
->block
= newblock
;
1758 list_add(&curs_ins
->list
, &newblock
->refs
);
1765 hed_file_insert_end(struct hed_file
*f
, hed_cursor_t
*curs_ins
)
1767 struct file_priv
*file
= file_private(f
);
1768 struct hed_block
*block
= curs_ins
->block
;
1770 BDEBUG("End insert at %llx\n", curs_ins
->pos
);
1772 curs_ins
->block
= NULL
;
1773 list_del(&curs_ins
->list
);
1774 if (!kill_block_if_empty(file
, block
))
1775 block_data_shrink(file
->cache
, block
->dataobj
,
1776 block
->dataoff
+ block
->t
.size
);
1782 insert_block(struct file_priv
*file
, hed_cursor_t
*curs
,
1783 unsigned char *buf
, size_t len
)
1785 struct hed_block
*block
= curs
->block
;
1786 hed_uoff_t offset
= curs
->pos
;
1788 assert(hed_block_is_excache(block
));
1789 assert(hed_block_is_dirty(block
));
1792 memcpy(hed_block_data(block
) + curs
->off
, buf
, len
);
1793 block
->t
.size
+= len
;
1794 recalc_block_recursive(block
);
1798 slide_cursors(next_block(block
), offset
, len
);
1802 hed_file_insert_block(struct hed_file
*f
, hed_cursor_t
*curs
,
1803 unsigned char *buf
, size_t len
)
1805 struct file_priv
*file
= file_private(f
);
1808 struct hed_block
*block
= curs
->block
;
1811 remain
= block
->dataobj
->size
- block
->dataoff
- curs
->off
;
1814 curs
->block
= next_block(block
);
1817 if (hed_file_insert_begin(&file
->f
, curs
, curs
))
1826 BDEBUG("Append %ld bytes to the insert block\n",
1828 insert_block(file
, curs
, buf
, remain
);
1834 if (curs
->pos
> file
->f
.size
)
1835 file
->f
.size
= curs
->pos
;
1837 file
->f
.size
+= len
;
1839 file
->terminal_block
.t
.size
-= len
;
1840 recalc_block_recursive(&file
->terminal_block
);
1846 hed_file_insert_byte(struct hed_file
*file
, hed_cursor_t
*curs
,
1849 return hed_file_insert_block(file
, curs
, &byte
, 1);
1853 hed_file_insert_once(struct hed_file
*file
, hed_cursor_t
*curs
,
1854 unsigned char *buf
, size_t len
)
1856 hed_cursor_t insert
;
1858 if (!hed_file_insert_begin(file
, curs
, &insert
)) {
1859 len
= hed_file_insert_block(file
, &insert
, buf
, len
);
1860 hed_file_insert_end(file
, &insert
);
1865 struct commit_control
{
1866 struct file_priv
*file
;
1867 int wfd
; /* file descriptor for writing */
1868 hed_cursor_t begoff
, endoff
;
1869 void *buffer
; /* write buffer (one block) */
1873 commit_write(struct commit_control
*cc
, hed_uoff_t pos
, ssize_t len
)
1875 BDEBUG(" -> write %lx bytes at %llx\n",
1876 (unsigned long)len
, pos
);
1877 return pwrite(cc
->wfd
, cc
->buffer
, len
, pos
);
1880 /* Commit forwards. */
1882 commit_forwards(struct commit_control
*cc
)
1886 BDEBUG("Writing forwards %llx-%llx\n",
1887 cc
->begoff
.pos
, cc
->endoff
.pos
);
1889 while (cc
->begoff
.pos
< cc
->endoff
.pos
) {
1893 left
= copy_in(cc
->file
, cc
->buffer
,
1894 FILE_BLOCK_SIZE
, &cc
->begoff
);
1896 move_rel_fast(&cc
->begoff
, left
);
1901 written
= commit_write(cc
, cc
->begoff
.pos
- FILE_BLOCK_SIZE
,
1903 if (written
< FILE_BLOCK_SIZE
)
1910 /* Commit backwards. */
1912 commit_backwards(struct commit_control
*cc
)
1917 BDEBUG("Writing backwards %llx-%llx\n",
1918 cc
->begoff
.pos
, cc
->endoff
.pos
);
1920 hed_dup_cursor(&cc
->endoff
, &curs
);
1921 while (curs
.pos
> cc
->begoff
.pos
) {
1925 move_rel_fast(&curs
, -FILE_BLOCK_SIZE
);
1926 left
= hed_file_cpin(&cc
->file
->f
, cc
->buffer
,
1927 FILE_BLOCK_SIZE
, &curs
);
1933 written
= commit_write(cc
, curs
.pos
, FILE_BLOCK_SIZE
);
1934 if (written
< FILE_BLOCK_SIZE
)
1937 hed_put_cursor(&curs
);
1942 /* Return the number of clean bytes following @curs.
1943 * Usage note: the caller must ensure that the starting position
1947 clean_span(hed_cursor_t
*curs
)
1949 hed_uoff_t next_pos
;
1950 struct hed_block
*block
= curs
->block
;
1951 hed_uoff_t ret
= -curs
->off
;
1953 assert(!hed_block_is_dirty(block
));
1956 ret
+= hed_block_size(block
);
1957 next_pos
= block
->phys_pos
+ hed_block_size(block
);
1958 block
= next_nonzero_block(block
);
1959 } while (block
->phys_pos
== next_pos
&& /* no insertions, deletions */
1960 !hed_block_is_dirty(block
) && /* no modifications */
1961 !hed_block_is_terminal(block
));
1966 undirty_blocks(struct file_priv
*file
)
1968 struct hed_block
*block
, *next
;
1971 BDEBUG("Undirtying blocks:\n");
1974 next
= first_block(file
);
1975 while (!hed_block_is_terminal(next
)) {
1977 next
= next_block(block
);
1979 if (kill_block_if_empty(file
, block
))
1982 if (!hed_block_is_virtual(block
)) {
1983 cache_put(file
->cache
, block
->dataobj
);
1984 block
->dataobj
= NULL
;
1985 list_del_init(&block
->lru
);
1986 block
->flags
= HED_BLOCK_EXCACHE
| HED_BLOCK_VIRTUAL
;
1989 block
->phys_pos
= pos
;
1990 pos
+= block
->t
.size
;
1993 block
= first_block(file
);
1994 while (!hed_block_is_terminal(block
)) {
1995 next
= next_block(block
);
1996 kill_block(file
, block
);
2000 BDEBUG("After undirtying\n");
2005 commit_init(struct commit_control
*cc
, struct file_priv
*file
)
2008 cc
->buffer
= malloc(FILE_BLOCK_SIZE
);
2013 file
->fd
= open(file
->f
.name
, O_RDONLY
| O_CREAT
, 0666);
2018 cc
->wfd
= open(file
->f
.name
, O_WRONLY
);
2031 hed_file_commit(struct hed_file
*f
)
2033 struct file_priv
*file
= file_private(f
);
2034 struct commit_control cc
;
2038 if (commit_init(&cc
, file
))
2043 get_cursor(file
, 0,&cc
.begoff
);
2044 hed_dup_cursor(&cc
.begoff
, &cc
.endoff
);
2045 shift
= -cc
.begoff
.block
->phys_pos
;
2046 while(!hed_block_is_terminal(cc
.endoff
.block
)) {
2050 if (!shift
&& !hed_block_is_dirty(cc
.endoff
.block
)) {
2051 skip
= FILE_BLOCK_ROUND(clean_span(&cc
.endoff
));
2053 ret
|= commit_forwards(&cc
);
2055 BDEBUG("Skipping %llx-%llx\n",
2056 cc
.endoff
.pos
, cc
.endoff
.pos
+ skip
);
2057 hed_move_relative(&cc
.endoff
, skip
);
2058 hed_dup2_cursor(&cc
.endoff
, &cc
.begoff
);
2063 skip
= FILE_BLOCK_ROUND(hed_cursor_span(&cc
.endoff
));
2064 hed_move_relative(&cc
.endoff
, skip
?: FILE_BLOCK_SIZE
);
2066 newshift
= !hed_block_is_eof(cc
.endoff
.block
)
2067 ? cc
.endoff
.pos
- hed_cursor_phys_pos(&cc
.endoff
)
2070 if (shift
<= 0 && newshift
> 0) {
2071 move_rel_fast(&cc
.endoff
, -FILE_BLOCK_SIZE
);
2072 ret
|= commit_forwards(&cc
);
2073 hed_dup2_cursor(&cc
.endoff
, &cc
.begoff
);
2074 } else if (shift
> 0 && newshift
<= 0) {
2075 ret
|= commit_backwards(&cc
);
2076 hed_dup2_cursor(&cc
.endoff
, &cc
.begoff
);
2080 assert(cc
.endoff
.pos
>= hed_file_size(&file
->f
));
2082 ret
|= commit_forwards(&cc
);
2083 put_cursor(&cc
.begoff
);
2084 put_cursor(&cc
.endoff
);
2086 ftruncate(cc
.wfd
, hed_file_size(&file
->f
));
2087 file
->f
.phys_size
= hed_file_size(&file
->f
);
2089 ret
|= close(cc
.wfd
);
2092 undirty_blocks(file
);
2095 clear_modified(file
);
2100 #ifdef HED_CONFIG_SWAP
2102 hed_file_write_swap(struct hed_file
*file
)
2104 return swp_write(file_swp(file_private(file
)));
2108 do_read_swap(struct file_priv
*file
, struct swp_file
*swp
, hed_cursor_t
*pos
)
2110 struct file_priv
*swpfile
= swp_private(swp
);
2111 struct hed_block
*cur
, block
;
2112 hed_uoff_t phys_pos
;
2114 if (file_stat(swpfile
)->st_size
!= file_stat(file
)->st_size
||
2115 file_stat(swpfile
)->st_mtime
!= file_stat(file
)->st_mtime
) {
2116 fprintf(stderr
, "stat info mismatch (you modified the file since hed ran on it; refusing to touch it)\n");
2120 BDEBUG("Swap header match\n");
2123 cur
= first_block(swpfile
);
2125 struct hed_block_data dataobj
;
2126 size_t (*mergefn
)(struct hed_file
*, hed_cursor_t
*,
2127 unsigned char*, size_t);
2131 if (swp_cpin(swp
, &block
, cur
, sizeof(struct hed_block
))) {
2132 perror("Cannot read block descriptor");
2135 BDEBUG("BLOCK %p: flags %02lx phys 0x%02llx size 0x%llx\n",
2136 cur
, block
.flags
, (long long)block
.phys_pos
,
2137 (long long)hed_block_size(&block
));
2139 if (block
.phys_pos
- phys_pos
) {
2140 if (hed_file_erase_block(&file
->f
, pos
,
2141 block
.phys_pos
- phys_pos
)) {
2142 perror("Cannot erase");
2145 phys_pos
= block
.phys_pos
;
2148 if (!hed_block_is_inserted(&block
))
2149 phys_pos
+= hed_block_size(&block
);
2151 if (!hed_block_is_dirty(&block
)) {
2152 hed_move_relative(pos
, hed_block_size(&block
));
2156 if (swp_cpin(swp
, &dataobj
, block
.dataobj
,
2157 sizeof(struct hed_block_data
))) {
2158 perror("Cannot read data descriptor");
2161 BDEBUG("DATA %p: size 0x%lx\n",
2162 block
.dataobj
, (long)dataobj
.size
);
2164 if (! (data
= malloc(hed_block_size(&block
))) ) {
2165 perror("Cannot allocate data");
2169 if (swp_cpin(swp
, data
, dataobj
.data
+ block
.dataoff
,
2170 hed_block_size(&block
))) {
2171 perror("Cannot read data");
2175 mergefn
= hed_block_is_inserted(&block
)
2176 ? hed_file_insert_once
2177 : hed_file_set_block
;
2178 res
= mergefn(&file
->f
, pos
, data
, hed_block_size(&block
));
2181 perror("Cannot merge data");
2184 } while (cur
= next_block(&block
), !hed_block_is_terminal(&block
));
2191 hed_file_read_swap(struct hed_file
*f
)
2193 struct file_priv
*file
= file_private(f
);
2194 struct swp_file
*swp
;
2198 if (! (swp
= swp_init_read(file
->swpname
)) )
2201 get_cursor(file
, 0, &pos
);
2202 ret
= do_read_swap(file
, swp
, &pos
);
2212 hed_file_write_swap(struct hed_file
*file
)
2218 hed_file_read_swap(struct hed_file
*file
)
2223 #endif /* HED_CONFIG_SWAP */
2225 /* Check if the search string is all zero */
2227 is_allzero(unsigned char *s
, size_t len
)
2236 reverse(unsigned char *p
, size_t len
)
2238 unsigned char *q
= p
+ len
;
2240 unsigned char x
= *p
;
2247 compute_badchar(ssize_t
*badchar
, const unsigned char *s
, ssize_t len
)
2251 badchar
[*s
++] = i
++;
2255 compute_sfx(ssize_t
*sfx
, const unsigned char *s
, ssize_t len
)
2260 f
= 0; /* bogus assignment to silence a warning */
2262 for (i
= len
- 2; i
>= 0; --i
) {
2263 if (i
> g
&& sfx
[i
+ len
- 1 - f
] < i
- g
)
2264 sfx
[i
] = sfx
[i
+ len
- 1 - f
];
2269 while (g
>= 0 && s
[g
] == s
[g
+ len
- 1 - f
])
2277 compute_goodsfx(ssize_t
*goodsfx
, const unsigned char *s
, ssize_t len
)
2279 ssize_t i
, j
, *sfx
= goodsfx
+ len
;
2281 compute_sfx(sfx
, s
, len
);
2283 for (i
= 0; i
< len
; ++i
)
2286 for (i
= len
- 1; i
>= 0; --i
)
2287 if (sfx
[i
] == i
+ 1)
2288 for (; j
< len
- 1 - i
; ++j
)
2289 if (goodsfx
[j
] == len
)
2290 goodsfx
[j
] = len
- 1 - i
;
2291 for (i
= 0; i
<= len
- 2; ++i
)
2292 goodsfx
[len
- 1 - sfx
[i
]] = len
- 1 - i
;
2295 /* Search for a constant byte string using the Boyer-Moore algorithm. */
2296 static inline unsigned char*
2297 search_buf(unsigned char *buf
, size_t buflen
, unsigned char *needle
,
2298 size_t maxidx
, ssize_t
*badchar
, ssize_t
*goodsfx
)
2301 return memchr(buf
, *needle
, buflen
);
2303 while (buflen
> maxidx
) {
2308 for (p
= buf
+ maxidx
, i
= maxidx
; p
>= buf
; --p
, --i
)
2309 if (needle
[i
] != *p
)
2314 shift
= i
+ 1 - badchar
[*p
];
2315 if (shift
< goodsfx
[i
])
2324 /* Search for a constant byte string backwards. */
2325 static inline unsigned char*
2326 search_buf_rev(unsigned char *buf
, size_t buflen
, unsigned char *needle
,
2327 size_t maxidx
, ssize_t
*badchar
, ssize_t
*goodsfx
)
2330 return memrchr(buf
, *needle
, buflen
);
2332 buf
+= buflen
- maxidx
- 1;
2333 while (buflen
> maxidx
) {
2338 for (p
= buf
, i
= maxidx
; p
<= buf
+ maxidx
; ++p
, --i
)
2339 if (needle
[i
] != *p
)
2341 if (p
> buf
+ maxidx
)
2344 shift
= i
+ 1 - badchar
[*p
];
2345 if (shift
< goodsfx
[i
])
2354 /* Search for a constant byte string using the Boyer-Moore algorithm. */
2356 find_bytestr(struct file_priv
*file
, hed_cursor_t
*from
, int dir
,
2357 unsigned char *needle
, size_t len
)
2360 ssize_t
*badchar
, *goodsfx
;
2361 unsigned char *readbuf
;
2362 unsigned char *p
, *q
;
2368 dynalloc
= calloc(sizeof(ssize_t
) * (256 + 2*len
)
2371 return HED_FINDOFF_ERROR
;
2373 goodsfx
= badchar
+ 256;
2374 readbuf
= dynalloc
+ sizeof(ssize_t
) * (256 + 2*len
);
2377 reverse(needle
, len
);
2378 compute_badchar(badchar
, needle
, len
);
2379 compute_goodsfx(goodsfx
, needle
, len
);
2382 badchar
= goodsfx
= NULL
;
2386 assert(!hed_block_is_terminal(from
->block
));
2388 allzero
= is_allzero(needle
, len
);
2389 --len
; /* simplify offset computing */
2391 ret
= HED_FINDOFF_NO_MATCH
;
2394 while (move_rel_fast(from
, -remain
),
2395 ret
&& from
->pos
>= len
) {
2397 if (!hed_prepare_read(&file
->f
, from
, SIZE_MAX
)) {
2398 ret
= HED_FINDOFF_ERROR
;
2405 if (remain
> from
->pos
)
2407 move_rel_fast(from
, -remain
);
2409 if (hed_file_cpin(&file
->f
, readbuf
,
2415 } else if (!hed_block_is_virtual(from
->block
)) {
2416 p
= from
->block
->dataobj
->data
+
2417 from
->block
->dataoff
;
2418 from
->off
-= remain
;
2419 from
->pos
-= remain
;
2421 } else if (!hed_block_is_bad(from
->block
) && allzero
) {
2430 q
= search_buf_rev(p
, remain
, needle
, len
,
2439 for ( ; ret
&& !hed_block_is_terminal(from
->block
);
2440 move_rel_fast(from
, remain
)) {
2442 remain
= hed_prepare_read(&file
->f
, from
, SIZE_MAX
);
2444 ret
= HED_FINDOFF_ERROR
;
2448 if (remain
<= len
) {
2450 remain
= copy_in(file
, readbuf
, remain
, from
);
2456 } else if (!hed_block_is_virtual(from
->block
)) {
2457 p
= from
->block
->dataobj
->data
+
2458 from
->block
->dataoff
+ from
->off
;
2459 from
->off
+= remain
;
2460 from
->pos
+= remain
;
2461 } else if (!hed_block_is_bad(from
->block
) && allzero
) {
2470 q
= search_buf(p
, remain
, needle
, len
,
2474 remain
= q
- p
- remain
;
2486 find_expr(struct file_priv
*file
, hed_cursor_t
*from
, int dir
,
2487 struct hed_expr
*expr
)
2489 size_t len
= hed_expr_len(expr
);
2493 return HED_FINDOFF_NO_MATCH
;
2501 if (hed_expr_eval(expr
) & HED_AEF_ERROR
)
2502 return HED_FINDOFF_ERROR
;
2503 buf
= hed_expr_buf(expr
);
2505 hed_dup_cursor(from
, &match
);
2506 p
= NULL
; /* bogus assignment to silence a warning */
2508 for (pos
= 0; pos
< len
; pos
++) {
2510 remain
= hed_prepare_read(&file
->f
, &match
,
2513 hed_block_is_bad(match
.block
))
2515 p
= hed_cursor_data(&match
);
2516 cursor_next_block(&match
);
2518 if ((p
? *p
++ : 0) != buf
[pos
])
2527 return HED_FINDOFF_ERROR
;
2529 if (dir
< 0 && hed_block_is_terminal(from
->block
)) {
2530 from
->pos
-= from
->off
;
2533 move_rel_fast(from
, dir
);
2534 if (hed_block_is_terminal(from
->block
))
2537 if (! (hed_expr_flags(expr
) & HED_AEF_DYNAMIC
) )
2538 return find_bytestr(file
, from
, dir
, buf
, len
);
2541 return HED_FINDOFF_NO_MATCH
;
2545 hed_file_find_expr(struct hed_file
*f
, hed_cursor_t
*pos
, int dir
,
2546 struct hed_expr
*expr
)
2548 struct file_priv
*file
= file_private(f
);
2551 assert(dir
== 1 || dir
== -1);
2553 set_readahead(file
, dir
> 0 ? HED_RA_FORWARD
: HED_RA_BACKWARD
);
2554 res
= find_expr(file
, pos
, dir
, expr
);
2555 set_readahead(file
, HED_RA_NONE
);