2 Copyright 2020 Google LLC
4 Use of this source code is governed by a BSD-style
5 license that can be found in the LICENSE file or at
6 https://developers.google.com/open-source/licenses/bsd
15 #include "reftable-error.h"
16 #include "reftable-record.h"
17 #include "reftable-merged.h"
22 static int stack_try_add(struct reftable_stack
*st
,
23 int (*write_table
)(struct reftable_writer
*wr
,
26 static int stack_write_compact(struct reftable_stack
*st
,
27 struct reftable_writer
*wr
, int first
, int last
,
28 struct reftable_log_expiry_config
*config
);
29 static int stack_check_addition(struct reftable_stack
*st
,
30 const char *new_tab_name
);
31 static void reftable_addition_close(struct reftable_addition
*add
);
32 static int reftable_stack_reload_maybe_reuse(struct reftable_stack
*st
,
35 static void stack_filename(struct strbuf
*dest
, struct reftable_stack
*st
,
39 strbuf_addstr(dest
, st
->reftable_dir
);
40 strbuf_addstr(dest
, "/");
41 strbuf_addstr(dest
, name
);
44 static ssize_t
reftable_fd_write(void *arg
, const void *data
, size_t sz
)
46 int *fdp
= (int *)arg
;
47 return write_in_full(*fdp
, data
, sz
);
50 int reftable_new_stack(struct reftable_stack
**dest
, const char *dir
,
51 struct reftable_write_options config
)
53 struct reftable_stack
*p
=
54 reftable_calloc(sizeof(struct reftable_stack
));
55 struct strbuf list_file_name
= STRBUF_INIT
;
58 if (config
.hash_id
== 0) {
59 config
.hash_id
= GIT_SHA1_FORMAT_ID
;
64 strbuf_reset(&list_file_name
);
65 strbuf_addstr(&list_file_name
, dir
);
66 strbuf_addstr(&list_file_name
, "/tables.list");
68 p
->list_file
= strbuf_detach(&list_file_name
, NULL
);
69 p
->reftable_dir
= xstrdup(dir
);
72 err
= reftable_stack_reload_maybe_reuse(p
, 1);
74 reftable_stack_destroy(p
);
81 static int fd_read_lines(int fd
, char ***namesp
)
83 off_t size
= lseek(fd
, 0, SEEK_END
);
87 err
= REFTABLE_IO_ERROR
;
90 err
= lseek(fd
, 0, SEEK_SET
);
92 err
= REFTABLE_IO_ERROR
;
96 buf
= reftable_malloc(size
+ 1);
97 if (read_in_full(fd
, buf
, size
) != size
) {
98 err
= REFTABLE_IO_ERROR
;
103 parse_names(buf
, size
, namesp
);
110 int read_lines(const char *filename
, char ***namesp
)
112 int fd
= open(filename
, O_RDONLY
);
115 if (errno
== ENOENT
) {
116 *namesp
= reftable_calloc(sizeof(char *));
120 return REFTABLE_IO_ERROR
;
122 err
= fd_read_lines(fd
, namesp
);
127 struct reftable_merged_table
*
128 reftable_stack_merged_table(struct reftable_stack
*st
)
133 static int has_name(char **names
, const char *name
)
136 if (!strcmp(*names
, name
))
143 /* Close and free the stack */
144 void reftable_stack_destroy(struct reftable_stack
*st
)
149 reftable_merged_table_free(st
->merged
);
153 err
= read_lines(st
->list_file
, &names
);
155 FREE_AND_NULL(names
);
160 struct strbuf filename
= STRBUF_INIT
;
161 for (i
= 0; i
< st
->readers_len
; i
++) {
162 const char *name
= reader_name(st
->readers
[i
]);
163 strbuf_reset(&filename
);
164 if (names
&& !has_name(names
, name
)) {
165 stack_filename(&filename
, st
, name
);
167 reftable_reader_free(st
->readers
[i
]);
170 /* On Windows, can only unlink after closing. */
171 unlink(filename
.buf
);
174 strbuf_release(&filename
);
176 FREE_AND_NULL(st
->readers
);
178 stat_validity_clear(&st
->list_validity
);
179 FREE_AND_NULL(st
->list_file
);
180 FREE_AND_NULL(st
->reftable_dir
);
185 static struct reftable_reader
**stack_copy_readers(struct reftable_stack
*st
,
188 struct reftable_reader
**cur
=
189 reftable_calloc(sizeof(struct reftable_reader
*) * cur_len
);
191 for (i
= 0; i
< cur_len
; i
++) {
192 cur
[i
] = st
->readers
[i
];
197 static int reftable_stack_reload_once(struct reftable_stack
*st
, char **names
,
200 int cur_len
= !st
->merged
? 0 : st
->merged
->stack_len
;
201 struct reftable_reader
**cur
= stack_copy_readers(st
, cur_len
);
203 int names_len
= names_length(names
);
204 struct reftable_reader
**new_readers
=
205 reftable_calloc(sizeof(struct reftable_reader
*) * names_len
);
206 struct reftable_table
*new_tables
=
207 reftable_calloc(sizeof(struct reftable_table
) * names_len
);
208 int new_readers_len
= 0;
209 struct reftable_merged_table
*new_merged
= NULL
;
210 struct strbuf table_path
= STRBUF_INIT
;
214 struct reftable_reader
*rd
= NULL
;
215 char *name
= *names
++;
217 /* this is linear; we assume compaction keeps the number of
218 tables under control so this is not quadratic. */
220 for (j
= 0; reuse_open
&& j
< cur_len
; j
++) {
221 if (cur
[j
] && 0 == strcmp(cur
[j
]->name
, name
)) {
229 struct reftable_block_source src
= { NULL
};
230 stack_filename(&table_path
, st
, name
);
232 err
= reftable_block_source_from_file(&src
,
237 err
= reftable_new_reader(&rd
, &src
, name
);
242 new_readers
[new_readers_len
] = rd
;
243 reftable_table_from_reader(&new_tables
[new_readers_len
], rd
);
248 err
= reftable_new_merged_table(&new_merged
, new_tables
,
249 new_readers_len
, st
->config
.hash_id
);
254 st
->readers_len
= new_readers_len
;
256 merged_table_release(st
->merged
);
257 reftable_merged_table_free(st
->merged
);
260 reftable_free(st
->readers
);
262 st
->readers
= new_readers
;
266 new_merged
->suppress_deletions
= 1;
267 st
->merged
= new_merged
;
268 for (i
= 0; i
< cur_len
; i
++) {
270 const char *name
= reader_name(cur
[i
]);
271 stack_filename(&table_path
, st
, name
);
273 reader_close(cur
[i
]);
274 reftable_reader_free(cur
[i
]);
276 /* On Windows, can only unlink after closing. */
277 unlink(table_path
.buf
);
282 for (i
= 0; i
< new_readers_len
; i
++) {
283 reader_close(new_readers
[i
]);
284 reftable_reader_free(new_readers
[i
]);
286 reftable_free(new_readers
);
287 reftable_free(new_tables
);
289 strbuf_release(&table_path
);
293 /* return negative if a before b. */
294 static int tv_cmp(struct timeval
*a
, struct timeval
*b
)
296 time_t diff
= a
->tv_sec
- b
->tv_sec
;
297 int udiff
= a
->tv_usec
- b
->tv_usec
;
305 static int reftable_stack_reload_maybe_reuse(struct reftable_stack
*st
,
308 char **names
= NULL
, **names_after
= NULL
;
309 struct timeval deadline
;
314 err
= gettimeofday(&deadline
, NULL
);
317 deadline
.tv_sec
+= 3;
322 err
= gettimeofday(&now
, NULL
);
327 * Only look at deadlines after the first few times. This
328 * simplifies debugging in GDB.
331 if (tries
> 3 && tv_cmp(&now
, &deadline
) >= 0)
334 fd
= open(st
->list_file
, O_RDONLY
);
336 if (errno
!= ENOENT
) {
337 err
= REFTABLE_IO_ERROR
;
341 names
= reftable_calloc(sizeof(char *));
343 err
= fd_read_lines(fd
, &names
);
348 err
= reftable_stack_reload_once(st
, names
, reuse_open
);
351 if (err
!= REFTABLE_NOT_EXIST_ERROR
)
355 * REFTABLE_NOT_EXIST_ERROR can be caused by a concurrent
356 * writer. Check if there was one by checking if the name list
359 err
= read_lines(st
->list_file
, &names_after
);
362 if (names_equal(names_after
, names
)) {
363 err
= REFTABLE_NOT_EXIST_ERROR
;
369 free_names(names_after
);
374 delay
= delay
+ (delay
* rand()) / RAND_MAX
+ 1;
375 sleep_millisec(delay
);
378 stat_validity_update(&st
->list_validity
, fd
);
382 stat_validity_clear(&st
->list_validity
);
386 free_names(names_after
);
393 static int stack_uptodate(struct reftable_stack
*st
)
399 if (stat_validity_check(&st
->list_validity
, st
->list_file
))
402 err
= read_lines(st
->list_file
, &names
);
406 for (i
= 0; i
< st
->readers_len
; i
++) {
412 if (strcmp(st
->readers
[i
]->name
, names
[i
])) {
418 if (names
[st
->merged
->stack_len
]) {
428 int reftable_stack_reload(struct reftable_stack
*st
)
430 int err
= stack_uptodate(st
);
432 return reftable_stack_reload_maybe_reuse(st
, 1);
436 int reftable_stack_add(struct reftable_stack
*st
,
437 int (*write
)(struct reftable_writer
*wr
, void *arg
),
440 int err
= stack_try_add(st
, write
, arg
);
442 if (err
== REFTABLE_LOCK_ERROR
) {
443 /* Ignore error return, we want to propagate
446 reftable_stack_reload(st
);
451 if (!st
->disable_auto_compact
)
452 return reftable_stack_auto_compact(st
);
457 static void format_name(struct strbuf
*dest
, uint64_t min
, uint64_t max
)
460 uint32_t rnd
= (uint32_t)git_rand();
461 snprintf(buf
, sizeof(buf
), "0x%012" PRIx64
"-0x%012" PRIx64
"-%08x",
464 strbuf_addstr(dest
, buf
);
467 struct reftable_addition
{
468 struct tempfile
*lock_file
;
469 struct reftable_stack
*stack
;
473 uint64_t next_update_index
;
476 #define REFTABLE_ADDITION_INIT {0}
478 static int reftable_stack_init_addition(struct reftable_addition
*add
,
479 struct reftable_stack
*st
)
481 struct strbuf lock_file_name
= STRBUF_INIT
;
485 strbuf_addf(&lock_file_name
, "%s.lock", st
->list_file
);
487 add
->lock_file
= create_tempfile(lock_file_name
.buf
);
488 if (!add
->lock_file
) {
489 if (errno
== EEXIST
) {
490 err
= REFTABLE_LOCK_ERROR
;
492 err
= REFTABLE_IO_ERROR
;
496 if (st
->config
.default_permissions
) {
497 if (chmod(add
->lock_file
->filename
.buf
, st
->config
.default_permissions
) < 0) {
498 err
= REFTABLE_IO_ERROR
;
503 err
= stack_uptodate(st
);
508 err
= REFTABLE_LOCK_ERROR
;
512 add
->next_update_index
= reftable_stack_next_update_index(st
);
515 reftable_addition_close(add
);
517 strbuf_release(&lock_file_name
);
521 static void reftable_addition_close(struct reftable_addition
*add
)
524 struct strbuf nm
= STRBUF_INIT
;
525 for (i
= 0; i
< add
->new_tables_len
; i
++) {
526 stack_filename(&nm
, add
->stack
, add
->new_tables
[i
]);
528 reftable_free(add
->new_tables
[i
]);
529 add
->new_tables
[i
] = NULL
;
531 reftable_free(add
->new_tables
);
532 add
->new_tables
= NULL
;
533 add
->new_tables_len
= 0;
535 delete_tempfile(&add
->lock_file
);
539 void reftable_addition_destroy(struct reftable_addition
*add
)
544 reftable_addition_close(add
);
548 int reftable_addition_commit(struct reftable_addition
*add
)
550 struct strbuf table_list
= STRBUF_INIT
;
551 int lock_file_fd
= get_tempfile_fd(add
->lock_file
);
555 if (add
->new_tables_len
== 0)
558 for (i
= 0; i
< add
->stack
->merged
->stack_len
; i
++) {
559 strbuf_addstr(&table_list
, add
->stack
->readers
[i
]->name
);
560 strbuf_addstr(&table_list
, "\n");
562 for (i
= 0; i
< add
->new_tables_len
; i
++) {
563 strbuf_addstr(&table_list
, add
->new_tables
[i
]);
564 strbuf_addstr(&table_list
, "\n");
567 err
= write_in_full(lock_file_fd
, table_list
.buf
, table_list
.len
);
568 strbuf_release(&table_list
);
570 err
= REFTABLE_IO_ERROR
;
574 err
= rename_tempfile(&add
->lock_file
, add
->stack
->list_file
);
576 err
= REFTABLE_IO_ERROR
;
580 /* success, no more state to clean up. */
581 for (i
= 0; i
< add
->new_tables_len
; i
++) {
582 reftable_free(add
->new_tables
[i
]);
584 reftable_free(add
->new_tables
);
585 add
->new_tables
= NULL
;
586 add
->new_tables_len
= 0;
588 err
= reftable_stack_reload(add
->stack
);
592 if (!add
->stack
->disable_auto_compact
)
593 err
= reftable_stack_auto_compact(add
->stack
);
596 reftable_addition_close(add
);
600 int reftable_stack_new_addition(struct reftable_addition
**dest
,
601 struct reftable_stack
*st
)
604 struct reftable_addition empty
= REFTABLE_ADDITION_INIT
;
605 *dest
= reftable_calloc(sizeof(**dest
));
607 err
= reftable_stack_init_addition(*dest
, st
);
609 reftable_free(*dest
);
615 static int stack_try_add(struct reftable_stack
*st
,
616 int (*write_table
)(struct reftable_writer
*wr
,
620 struct reftable_addition add
= REFTABLE_ADDITION_INIT
;
621 int err
= reftable_stack_init_addition(&add
, st
);
625 err
= REFTABLE_LOCK_ERROR
;
629 err
= reftable_addition_add(&add
, write_table
, arg
);
633 err
= reftable_addition_commit(&add
);
635 reftable_addition_close(&add
);
639 int reftable_addition_add(struct reftable_addition
*add
,
640 int (*write_table
)(struct reftable_writer
*wr
,
644 struct strbuf temp_tab_file_name
= STRBUF_INIT
;
645 struct strbuf tab_file_name
= STRBUF_INIT
;
646 struct strbuf next_name
= STRBUF_INIT
;
647 struct reftable_writer
*wr
= NULL
;
651 strbuf_reset(&next_name
);
652 format_name(&next_name
, add
->next_update_index
, add
->next_update_index
);
654 stack_filename(&temp_tab_file_name
, add
->stack
, next_name
.buf
);
655 strbuf_addstr(&temp_tab_file_name
, ".temp.XXXXXX");
657 tab_fd
= mkstemp(temp_tab_file_name
.buf
);
659 err
= REFTABLE_IO_ERROR
;
662 if (add
->stack
->config
.default_permissions
) {
663 if (chmod(temp_tab_file_name
.buf
, add
->stack
->config
.default_permissions
)) {
664 err
= REFTABLE_IO_ERROR
;
668 wr
= reftable_new_writer(reftable_fd_write
, &tab_fd
,
669 &add
->stack
->config
);
670 err
= write_table(wr
, arg
);
674 err
= reftable_writer_close(wr
);
675 if (err
== REFTABLE_EMPTY_TABLE_ERROR
) {
685 err
= REFTABLE_IO_ERROR
;
689 err
= stack_check_addition(add
->stack
, temp_tab_file_name
.buf
);
693 if (wr
->min_update_index
< add
->next_update_index
) {
694 err
= REFTABLE_API_ERROR
;
698 format_name(&next_name
, wr
->min_update_index
, wr
->max_update_index
);
699 strbuf_addstr(&next_name
, ".ref");
701 stack_filename(&tab_file_name
, add
->stack
, next_name
.buf
);
704 On windows, this relies on rand() picking a unique destination name.
705 Maybe we should do retry loop as well?
707 err
= rename(temp_tab_file_name
.buf
, tab_file_name
.buf
);
709 err
= REFTABLE_IO_ERROR
;
713 add
->new_tables
= reftable_realloc(add
->new_tables
,
714 sizeof(*add
->new_tables
) *
715 (add
->new_tables_len
+ 1));
716 add
->new_tables
[add
->new_tables_len
] = strbuf_detach(&next_name
, NULL
);
717 add
->new_tables_len
++;
723 if (temp_tab_file_name
.len
> 0) {
724 unlink(temp_tab_file_name
.buf
);
727 strbuf_release(&temp_tab_file_name
);
728 strbuf_release(&tab_file_name
);
729 strbuf_release(&next_name
);
730 reftable_writer_free(wr
);
734 uint64_t reftable_stack_next_update_index(struct reftable_stack
*st
)
736 int sz
= st
->merged
->stack_len
;
738 return reftable_reader_max_update_index(st
->readers
[sz
- 1]) +
743 static int stack_compact_locked(struct reftable_stack
*st
, int first
, int last
,
744 struct strbuf
*temp_tab
,
745 struct reftable_log_expiry_config
*config
)
747 struct strbuf next_name
= STRBUF_INIT
;
749 struct reftable_writer
*wr
= NULL
;
752 format_name(&next_name
,
753 reftable_reader_min_update_index(st
->readers
[first
]),
754 reftable_reader_max_update_index(st
->readers
[last
]));
756 stack_filename(temp_tab
, st
, next_name
.buf
);
757 strbuf_addstr(temp_tab
, ".temp.XXXXXX");
759 tab_fd
= mkstemp(temp_tab
->buf
);
760 wr
= reftable_new_writer(reftable_fd_write
, &tab_fd
, &st
->config
);
762 err
= stack_write_compact(st
, wr
, first
, last
, config
);
765 err
= reftable_writer_close(wr
);
773 reftable_writer_free(wr
);
778 if (err
!= 0 && temp_tab
->len
> 0) {
779 unlink(temp_tab
->buf
);
780 strbuf_release(temp_tab
);
782 strbuf_release(&next_name
);
786 static int stack_write_compact(struct reftable_stack
*st
,
787 struct reftable_writer
*wr
, int first
, int last
,
788 struct reftable_log_expiry_config
*config
)
790 int subtabs_len
= last
- first
+ 1;
791 struct reftable_table
*subtabs
= reftable_calloc(
792 sizeof(struct reftable_table
) * (last
- first
+ 1));
793 struct reftable_merged_table
*mt
= NULL
;
795 struct reftable_iterator it
= { NULL
};
796 struct reftable_ref_record ref
= { NULL
};
797 struct reftable_log_record log
= { NULL
};
799 uint64_t entries
= 0;
802 for (i
= first
, j
= 0; i
<= last
; i
++) {
803 struct reftable_reader
*t
= st
->readers
[i
];
804 reftable_table_from_reader(&subtabs
[j
++], t
);
805 st
->stats
.bytes
+= t
->size
;
807 reftable_writer_set_limits(wr
, st
->readers
[first
]->min_update_index
,
808 st
->readers
[last
]->max_update_index
);
810 err
= reftable_new_merged_table(&mt
, subtabs
, subtabs_len
,
813 reftable_free(subtabs
);
817 err
= reftable_merged_table_seek_ref(mt
, &it
, "");
822 err
= reftable_iterator_next_ref(&it
, &ref
);
831 if (first
== 0 && reftable_ref_record_is_deletion(&ref
)) {
835 err
= reftable_writer_add_ref(wr
, &ref
);
841 reftable_iterator_destroy(&it
);
843 err
= reftable_merged_table_seek_log(mt
, &it
, "");
848 err
= reftable_iterator_next_log(&it
, &log
);
856 if (first
== 0 && reftable_log_record_is_deletion(&log
)) {
860 if (config
&& config
->min_update_index
> 0 &&
861 log
.update_index
< config
->min_update_index
) {
865 if (config
&& config
->time
> 0 &&
866 log
.value
.update
.time
< config
->time
) {
870 err
= reftable_writer_add_log(wr
, &log
);
878 reftable_iterator_destroy(&it
);
880 merged_table_release(mt
);
881 reftable_merged_table_free(mt
);
883 reftable_ref_record_release(&ref
);
884 reftable_log_record_release(&log
);
885 st
->stats
.entries_written
+= entries
;
889 /* < 0: error. 0 == OK, > 0 attempt failed; could retry. */
890 static int stack_compact_range(struct reftable_stack
*st
, int first
, int last
,
891 struct reftable_log_expiry_config
*expiry
)
893 struct strbuf temp_tab_file_name
= STRBUF_INIT
;
894 struct strbuf new_table_name
= STRBUF_INIT
;
895 struct strbuf lock_file_name
= STRBUF_INIT
;
896 struct strbuf ref_list_contents
= STRBUF_INIT
;
897 struct strbuf new_table_path
= STRBUF_INIT
;
900 int lock_file_fd
= -1;
901 int compact_count
= last
- first
+ 1;
903 char **delete_on_success
=
904 reftable_calloc(sizeof(char *) * (compact_count
+ 1));
905 char **subtable_locks
=
906 reftable_calloc(sizeof(char *) * (compact_count
+ 1));
909 int is_empty_table
= 0;
911 if (first
> last
|| (!expiry
&& first
== last
)) {
916 st
->stats
.attempts
++;
918 strbuf_reset(&lock_file_name
);
919 strbuf_addstr(&lock_file_name
, st
->list_file
);
920 strbuf_addstr(&lock_file_name
, ".lock");
923 open(lock_file_name
.buf
, O_EXCL
| O_CREAT
| O_WRONLY
, 0666);
924 if (lock_file_fd
< 0) {
925 if (errno
== EEXIST
) {
928 err
= REFTABLE_IO_ERROR
;
932 /* Don't want to write to the lock for now. */
937 err
= stack_uptodate(st
);
941 for (i
= first
, j
= 0; i
<= last
; i
++) {
942 struct strbuf subtab_file_name
= STRBUF_INIT
;
943 struct strbuf subtab_lock
= STRBUF_INIT
;
944 int sublock_file_fd
= -1;
946 stack_filename(&subtab_file_name
, st
,
947 reader_name(st
->readers
[i
]));
949 strbuf_reset(&subtab_lock
);
950 strbuf_addbuf(&subtab_lock
, &subtab_file_name
);
951 strbuf_addstr(&subtab_lock
, ".lock");
953 sublock_file_fd
= open(subtab_lock
.buf
,
954 O_EXCL
| O_CREAT
| O_WRONLY
, 0666);
955 if (sublock_file_fd
>= 0) {
956 close(sublock_file_fd
);
957 } else if (sublock_file_fd
< 0) {
958 if (errno
== EEXIST
) {
961 err
= REFTABLE_IO_ERROR
;
965 subtable_locks
[j
] = subtab_lock
.buf
;
966 delete_on_success
[j
] = subtab_file_name
.buf
;
973 err
= unlink(lock_file_name
.buf
);
978 err
= stack_compact_locked(st
, first
, last
, &temp_tab_file_name
,
980 /* Compaction + tombstones can create an empty table out of non-empty
982 is_empty_table
= (err
== REFTABLE_EMPTY_TABLE_ERROR
);
983 if (is_empty_table
) {
990 open(lock_file_name
.buf
, O_EXCL
| O_CREAT
| O_WRONLY
, 0666);
991 if (lock_file_fd
< 0) {
992 if (errno
== EEXIST
) {
995 err
= REFTABLE_IO_ERROR
;
1000 if (st
->config
.default_permissions
) {
1001 if (chmod(lock_file_name
.buf
, st
->config
.default_permissions
) < 0) {
1002 err
= REFTABLE_IO_ERROR
;
1007 format_name(&new_table_name
, st
->readers
[first
]->min_update_index
,
1008 st
->readers
[last
]->max_update_index
);
1009 strbuf_addstr(&new_table_name
, ".ref");
1011 stack_filename(&new_table_path
, st
, new_table_name
.buf
);
1013 if (!is_empty_table
) {
1015 err
= rename(temp_tab_file_name
.buf
, new_table_path
.buf
);
1017 err
= REFTABLE_IO_ERROR
;
1022 for (i
= 0; i
< first
; i
++) {
1023 strbuf_addstr(&ref_list_contents
, st
->readers
[i
]->name
);
1024 strbuf_addstr(&ref_list_contents
, "\n");
1026 if (!is_empty_table
) {
1027 strbuf_addbuf(&ref_list_contents
, &new_table_name
);
1028 strbuf_addstr(&ref_list_contents
, "\n");
1030 for (i
= last
+ 1; i
< st
->merged
->stack_len
; i
++) {
1031 strbuf_addstr(&ref_list_contents
, st
->readers
[i
]->name
);
1032 strbuf_addstr(&ref_list_contents
, "\n");
1035 err
= write_in_full(lock_file_fd
, ref_list_contents
.buf
, ref_list_contents
.len
);
1037 err
= REFTABLE_IO_ERROR
;
1038 unlink(new_table_path
.buf
);
1041 err
= close(lock_file_fd
);
1044 err
= REFTABLE_IO_ERROR
;
1045 unlink(new_table_path
.buf
);
1049 err
= rename(lock_file_name
.buf
, st
->list_file
);
1051 err
= REFTABLE_IO_ERROR
;
1052 unlink(new_table_path
.buf
);
1057 /* Reload the stack before deleting. On windows, we can only delete the
1058 files after we closed them.
1060 err
= reftable_stack_reload_maybe_reuse(st
, first
< last
);
1062 listp
= delete_on_success
;
1064 if (strcmp(*listp
, new_table_path
.buf
)) {
1071 free_names(delete_on_success
);
1073 listp
= subtable_locks
;
1078 free_names(subtable_locks
);
1079 if (lock_file_fd
>= 0) {
1080 close(lock_file_fd
);
1084 unlink(lock_file_name
.buf
);
1086 strbuf_release(&new_table_name
);
1087 strbuf_release(&new_table_path
);
1088 strbuf_release(&ref_list_contents
);
1089 strbuf_release(&temp_tab_file_name
);
1090 strbuf_release(&lock_file_name
);
1094 int reftable_stack_compact_all(struct reftable_stack
*st
,
1095 struct reftable_log_expiry_config
*config
)
1097 return stack_compact_range(st
, 0, st
->merged
->stack_len
- 1, config
);
1100 static int stack_compact_range_stats(struct reftable_stack
*st
, int first
,
1102 struct reftable_log_expiry_config
*config
)
1104 int err
= stack_compact_range(st
, first
, last
, config
);
1106 st
->stats
.failures
++;
1111 static int segment_size(struct segment
*s
)
1113 return s
->end
- s
->start
;
1116 int fastlog2(uint64_t sz
)
1121 for (; sz
; sz
/= 2) {
1127 struct segment
*sizes_to_segments(int *seglen
, uint64_t *sizes
, int n
)
1129 struct segment
*segs
= reftable_calloc(sizeof(struct segment
) * n
);
1131 struct segment cur
= { 0 };
1138 for (i
= 0; i
< n
; i
++) {
1139 int log
= fastlog2(sizes
[i
]);
1140 if (cur
.log
!= log
&& cur
.bytes
> 0) {
1141 struct segment fresh
= {
1151 cur
.bytes
+= sizes
[i
];
1158 struct segment
suggest_compaction_segment(uint64_t *sizes
, int n
)
1161 struct segment
*segs
= sizes_to_segments(&seglen
, sizes
, n
);
1162 struct segment min_seg
= {
1166 for (i
= 0; i
< seglen
; i
++) {
1167 if (segment_size(&segs
[i
]) == 1) {
1171 if (segs
[i
].log
< min_seg
.log
) {
1176 while (min_seg
.start
> 0) {
1177 int prev
= min_seg
.start
- 1;
1178 if (fastlog2(min_seg
.bytes
) < fastlog2(sizes
[prev
])) {
1182 min_seg
.start
= prev
;
1183 min_seg
.bytes
+= sizes
[prev
];
1186 reftable_free(segs
);
1190 static uint64_t *stack_table_sizes_for_compaction(struct reftable_stack
*st
)
1193 reftable_calloc(sizeof(uint64_t) * st
->merged
->stack_len
);
1194 int version
= (st
->config
.hash_id
== GIT_SHA1_FORMAT_ID
) ? 1 : 2;
1195 int overhead
= header_size(version
) - 1;
1197 for (i
= 0; i
< st
->merged
->stack_len
; i
++) {
1198 sizes
[i
] = st
->readers
[i
]->size
- overhead
;
1203 int reftable_stack_auto_compact(struct reftable_stack
*st
)
1205 uint64_t *sizes
= stack_table_sizes_for_compaction(st
);
1206 struct segment seg
=
1207 suggest_compaction_segment(sizes
, st
->merged
->stack_len
);
1208 reftable_free(sizes
);
1209 if (segment_size(&seg
) > 0)
1210 return stack_compact_range_stats(st
, seg
.start
, seg
.end
- 1,
1216 struct reftable_compaction_stats
*
1217 reftable_stack_compaction_stats(struct reftable_stack
*st
)
1222 int reftable_stack_read_ref(struct reftable_stack
*st
, const char *refname
,
1223 struct reftable_ref_record
*ref
)
1225 struct reftable_table tab
= { NULL
};
1226 reftable_table_from_merged_table(&tab
, reftable_stack_merged_table(st
));
1227 return reftable_table_read_ref(&tab
, refname
, ref
);
1230 int reftable_stack_read_log(struct reftable_stack
*st
, const char *refname
,
1231 struct reftable_log_record
*log
)
1233 struct reftable_iterator it
= { NULL
};
1234 struct reftable_merged_table
*mt
= reftable_stack_merged_table(st
);
1235 int err
= reftable_merged_table_seek_log(mt
, &it
, refname
);
1239 err
= reftable_iterator_next_log(&it
, log
);
1243 if (strcmp(log
->refname
, refname
) ||
1244 reftable_log_record_is_deletion(log
)) {
1251 reftable_log_record_release(log
);
1253 reftable_iterator_destroy(&it
);
1257 static int stack_check_addition(struct reftable_stack
*st
,
1258 const char *new_tab_name
)
1261 struct reftable_block_source src
= { NULL
};
1262 struct reftable_reader
*rd
= NULL
;
1263 struct reftable_table tab
= { NULL
};
1264 struct reftable_ref_record
*refs
= NULL
;
1265 struct reftable_iterator it
= { NULL
};
1270 if (st
->config
.skip_name_check
)
1273 err
= reftable_block_source_from_file(&src
, new_tab_name
);
1277 err
= reftable_new_reader(&rd
, &src
, new_tab_name
);
1281 err
= reftable_reader_seek_ref(rd
, &it
, "");
1290 struct reftable_ref_record ref
= { NULL
};
1291 err
= reftable_iterator_next_ref(&it
, &ref
);
1300 refs
= reftable_realloc(refs
, cap
* sizeof(refs
[0]));
1306 reftable_table_from_merged_table(&tab
, reftable_stack_merged_table(st
));
1308 err
= validate_ref_record_addition(tab
, refs
, len
);
1311 for (i
= 0; i
< len
; i
++) {
1312 reftable_ref_record_release(&refs
[i
]);
1316 reftable_iterator_destroy(&it
);
1317 reftable_reader_free(rd
);
1321 static int is_table_name(const char *s
)
1323 const char *dot
= strrchr(s
, '.');
1324 return dot
&& !strcmp(dot
, ".ref");
1327 static void remove_maybe_stale_table(struct reftable_stack
*st
, uint64_t max
,
1331 uint64_t update_idx
= 0;
1332 struct reftable_block_source src
= { NULL
};
1333 struct reftable_reader
*rd
= NULL
;
1334 struct strbuf table_path
= STRBUF_INIT
;
1335 stack_filename(&table_path
, st
, name
);
1337 err
= reftable_block_source_from_file(&src
, table_path
.buf
);
1341 err
= reftable_new_reader(&rd
, &src
, name
);
1345 update_idx
= reftable_reader_max_update_index(rd
);
1346 reftable_reader_free(rd
);
1348 if (update_idx
<= max
) {
1349 unlink(table_path
.buf
);
1352 strbuf_release(&table_path
);
1355 static int reftable_stack_clean_locked(struct reftable_stack
*st
)
1357 uint64_t max
= reftable_merged_table_max_update_index(
1358 reftable_stack_merged_table(st
));
1359 DIR *dir
= opendir(st
->reftable_dir
);
1360 struct dirent
*d
= NULL
;
1362 return REFTABLE_IO_ERROR
;
1365 while ((d
= readdir(dir
))) {
1368 if (!is_table_name(d
->d_name
))
1371 for (i
= 0; !found
&& i
< st
->readers_len
; i
++) {
1372 found
= !strcmp(reader_name(st
->readers
[i
]), d
->d_name
);
1377 remove_maybe_stale_table(st
, max
, d
->d_name
);
1384 int reftable_stack_clean(struct reftable_stack
*st
)
1386 struct reftable_addition
*add
= NULL
;
1387 int err
= reftable_stack_new_addition(&add
, st
);
1392 err
= reftable_stack_reload(st
);
1397 err
= reftable_stack_clean_locked(st
);
1400 reftable_addition_destroy(add
);
1404 int reftable_stack_print_directory(const char *stackdir
, uint32_t hash_id
)
1406 struct reftable_stack
*stack
= NULL
;
1407 struct reftable_write_options cfg
= { .hash_id
= hash_id
};
1408 struct reftable_merged_table
*merged
= NULL
;
1409 struct reftable_table table
= { NULL
};
1411 int err
= reftable_new_stack(&stack
, stackdir
, cfg
);
1415 merged
= reftable_stack_merged_table(stack
);
1416 reftable_table_from_merged_table(&table
, merged
);
1417 err
= reftable_table_print(&table
);
1420 reftable_stack_destroy(stack
);