reftable/stack: use stat info to avoid re-reading stack list
[git/gitster.git] / reftable / stack.c
blobc28d82299d8082ebb1d3c19d9dbc763b2ab9f8ad
1 /*
2 Copyright 2020 Google LLC
4 Use of this source code is governed by a BSD-style
5 license that can be found in the LICENSE file or at
6 https://developers.google.com/open-source/licenses/bsd
7 */
9 #include "stack.h"
11 #include "system.h"
12 #include "merged.h"
13 #include "reader.h"
14 #include "refname.h"
15 #include "reftable-error.h"
16 #include "reftable-record.h"
17 #include "reftable-merged.h"
18 #include "writer.h"
20 #include "tempfile.h"
22 static int stack_try_add(struct reftable_stack *st,
23 int (*write_table)(struct reftable_writer *wr,
24 void *arg),
25 void *arg);
26 static int stack_write_compact(struct reftable_stack *st,
27 struct reftable_writer *wr, int first, int last,
28 struct reftable_log_expiry_config *config);
29 static int stack_check_addition(struct reftable_stack *st,
30 const char *new_tab_name);
31 static void reftable_addition_close(struct reftable_addition *add);
32 static int reftable_stack_reload_maybe_reuse(struct reftable_stack *st,
33 int reuse_open);
35 static void stack_filename(struct strbuf *dest, struct reftable_stack *st,
36 const char *name)
38 strbuf_reset(dest);
39 strbuf_addstr(dest, st->reftable_dir);
40 strbuf_addstr(dest, "/");
41 strbuf_addstr(dest, name);
44 static ssize_t reftable_fd_write(void *arg, const void *data, size_t sz)
46 int *fdp = (int *)arg;
47 return write_in_full(*fdp, data, sz);
50 int reftable_new_stack(struct reftable_stack **dest, const char *dir,
51 struct reftable_write_options config)
53 struct reftable_stack *p =
54 reftable_calloc(sizeof(struct reftable_stack));
55 struct strbuf list_file_name = STRBUF_INIT;
56 int err = 0;
58 if (config.hash_id == 0) {
59 config.hash_id = GIT_SHA1_FORMAT_ID;
62 *dest = NULL;
64 strbuf_reset(&list_file_name);
65 strbuf_addstr(&list_file_name, dir);
66 strbuf_addstr(&list_file_name, "/tables.list");
68 p->list_file = strbuf_detach(&list_file_name, NULL);
69 p->reftable_dir = xstrdup(dir);
70 p->config = config;
72 err = reftable_stack_reload_maybe_reuse(p, 1);
73 if (err < 0) {
74 reftable_stack_destroy(p);
75 } else {
76 *dest = p;
78 return err;
81 static int fd_read_lines(int fd, char ***namesp)
83 off_t size = lseek(fd, 0, SEEK_END);
84 char *buf = NULL;
85 int err = 0;
86 if (size < 0) {
87 err = REFTABLE_IO_ERROR;
88 goto done;
90 err = lseek(fd, 0, SEEK_SET);
91 if (err < 0) {
92 err = REFTABLE_IO_ERROR;
93 goto done;
96 buf = reftable_malloc(size + 1);
97 if (read_in_full(fd, buf, size) != size) {
98 err = REFTABLE_IO_ERROR;
99 goto done;
101 buf[size] = 0;
103 parse_names(buf, size, namesp);
105 done:
106 reftable_free(buf);
107 return err;
110 int read_lines(const char *filename, char ***namesp)
112 int fd = open(filename, O_RDONLY);
113 int err = 0;
114 if (fd < 0) {
115 if (errno == ENOENT) {
116 *namesp = reftable_calloc(sizeof(char *));
117 return 0;
120 return REFTABLE_IO_ERROR;
122 err = fd_read_lines(fd, namesp);
123 close(fd);
124 return err;
127 struct reftable_merged_table *
128 reftable_stack_merged_table(struct reftable_stack *st)
130 return st->merged;
133 static int has_name(char **names, const char *name)
135 while (*names) {
136 if (!strcmp(*names, name))
137 return 1;
138 names++;
140 return 0;
143 /* Close and free the stack */
144 void reftable_stack_destroy(struct reftable_stack *st)
146 char **names = NULL;
147 int err = 0;
148 if (st->merged) {
149 reftable_merged_table_free(st->merged);
150 st->merged = NULL;
153 err = read_lines(st->list_file, &names);
154 if (err < 0) {
155 FREE_AND_NULL(names);
158 if (st->readers) {
159 int i = 0;
160 struct strbuf filename = STRBUF_INIT;
161 for (i = 0; i < st->readers_len; i++) {
162 const char *name = reader_name(st->readers[i]);
163 strbuf_reset(&filename);
164 if (names && !has_name(names, name)) {
165 stack_filename(&filename, st, name);
167 reftable_reader_free(st->readers[i]);
169 if (filename.len) {
170 /* On Windows, can only unlink after closing. */
171 unlink(filename.buf);
174 strbuf_release(&filename);
175 st->readers_len = 0;
176 FREE_AND_NULL(st->readers);
178 stat_validity_clear(&st->list_validity);
179 FREE_AND_NULL(st->list_file);
180 FREE_AND_NULL(st->reftable_dir);
181 reftable_free(st);
182 free_names(names);
185 static struct reftable_reader **stack_copy_readers(struct reftable_stack *st,
186 int cur_len)
188 struct reftable_reader **cur =
189 reftable_calloc(sizeof(struct reftable_reader *) * cur_len);
190 int i = 0;
191 for (i = 0; i < cur_len; i++) {
192 cur[i] = st->readers[i];
194 return cur;
197 static int reftable_stack_reload_once(struct reftable_stack *st, char **names,
198 int reuse_open)
200 int cur_len = !st->merged ? 0 : st->merged->stack_len;
201 struct reftable_reader **cur = stack_copy_readers(st, cur_len);
202 int err = 0;
203 int names_len = names_length(names);
204 struct reftable_reader **new_readers =
205 reftable_calloc(sizeof(struct reftable_reader *) * names_len);
206 struct reftable_table *new_tables =
207 reftable_calloc(sizeof(struct reftable_table) * names_len);
208 int new_readers_len = 0;
209 struct reftable_merged_table *new_merged = NULL;
210 struct strbuf table_path = STRBUF_INIT;
211 int i;
213 while (*names) {
214 struct reftable_reader *rd = NULL;
215 char *name = *names++;
217 /* this is linear; we assume compaction keeps the number of
218 tables under control so this is not quadratic. */
219 int j = 0;
220 for (j = 0; reuse_open && j < cur_len; j++) {
221 if (cur[j] && 0 == strcmp(cur[j]->name, name)) {
222 rd = cur[j];
223 cur[j] = NULL;
224 break;
228 if (!rd) {
229 struct reftable_block_source src = { NULL };
230 stack_filename(&table_path, st, name);
232 err = reftable_block_source_from_file(&src,
233 table_path.buf);
234 if (err < 0)
235 goto done;
237 err = reftable_new_reader(&rd, &src, name);
238 if (err < 0)
239 goto done;
242 new_readers[new_readers_len] = rd;
243 reftable_table_from_reader(&new_tables[new_readers_len], rd);
244 new_readers_len++;
247 /* success! */
248 err = reftable_new_merged_table(&new_merged, new_tables,
249 new_readers_len, st->config.hash_id);
250 if (err < 0)
251 goto done;
253 new_tables = NULL;
254 st->readers_len = new_readers_len;
255 if (st->merged) {
256 merged_table_release(st->merged);
257 reftable_merged_table_free(st->merged);
259 if (st->readers) {
260 reftable_free(st->readers);
262 st->readers = new_readers;
263 new_readers = NULL;
264 new_readers_len = 0;
266 new_merged->suppress_deletions = 1;
267 st->merged = new_merged;
268 for (i = 0; i < cur_len; i++) {
269 if (cur[i]) {
270 const char *name = reader_name(cur[i]);
271 stack_filename(&table_path, st, name);
273 reader_close(cur[i]);
274 reftable_reader_free(cur[i]);
276 /* On Windows, can only unlink after closing. */
277 unlink(table_path.buf);
281 done:
282 for (i = 0; i < new_readers_len; i++) {
283 reader_close(new_readers[i]);
284 reftable_reader_free(new_readers[i]);
286 reftable_free(new_readers);
287 reftable_free(new_tables);
288 reftable_free(cur);
289 strbuf_release(&table_path);
290 return err;
293 /* return negative if a before b. */
294 static int tv_cmp(struct timeval *a, struct timeval *b)
296 time_t diff = a->tv_sec - b->tv_sec;
297 int udiff = a->tv_usec - b->tv_usec;
299 if (diff != 0)
300 return diff;
302 return udiff;
305 static int reftable_stack_reload_maybe_reuse(struct reftable_stack *st,
306 int reuse_open)
308 char **names = NULL, **names_after = NULL;
309 struct timeval deadline;
310 int64_t delay = 0;
311 int tries = 0, err;
312 int fd = -1;
314 err = gettimeofday(&deadline, NULL);
315 if (err < 0)
316 goto out;
317 deadline.tv_sec += 3;
319 while (1) {
320 struct timeval now;
322 err = gettimeofday(&now, NULL);
323 if (err < 0)
324 goto out;
327 * Only look at deadlines after the first few times. This
328 * simplifies debugging in GDB.
330 tries++;
331 if (tries > 3 && tv_cmp(&now, &deadline) >= 0)
332 goto out;
334 fd = open(st->list_file, O_RDONLY);
335 if (fd < 0) {
336 if (errno != ENOENT) {
337 err = REFTABLE_IO_ERROR;
338 goto out;
341 names = reftable_calloc(sizeof(char *));
342 } else {
343 err = fd_read_lines(fd, &names);
344 if (err < 0)
345 goto out;
348 err = reftable_stack_reload_once(st, names, reuse_open);
349 if (!err)
350 break;
351 if (err != REFTABLE_NOT_EXIST_ERROR)
352 goto out;
355 * REFTABLE_NOT_EXIST_ERROR can be caused by a concurrent
356 * writer. Check if there was one by checking if the name list
357 * changed.
359 err = read_lines(st->list_file, &names_after);
360 if (err < 0)
361 goto out;
362 if (names_equal(names_after, names)) {
363 err = REFTABLE_NOT_EXIST_ERROR;
364 goto out;
367 free_names(names);
368 names = NULL;
369 free_names(names_after);
370 names_after = NULL;
371 close(fd);
372 fd = -1;
374 delay = delay + (delay * rand()) / RAND_MAX + 1;
375 sleep_millisec(delay);
378 stat_validity_update(&st->list_validity, fd);
380 out:
381 if (err)
382 stat_validity_clear(&st->list_validity);
383 if (fd >= 0)
384 close(fd);
385 free_names(names);
386 free_names(names_after);
387 return err;
390 /* -1 = error
391 0 = up to date
392 1 = changed. */
393 static int stack_uptodate(struct reftable_stack *st)
395 char **names = NULL;
396 int err;
397 int i = 0;
399 if (stat_validity_check(&st->list_validity, st->list_file))
400 return 0;
402 err = read_lines(st->list_file, &names);
403 if (err < 0)
404 return err;
406 for (i = 0; i < st->readers_len; i++) {
407 if (!names[i]) {
408 err = 1;
409 goto done;
412 if (strcmp(st->readers[i]->name, names[i])) {
413 err = 1;
414 goto done;
418 if (names[st->merged->stack_len]) {
419 err = 1;
420 goto done;
423 done:
424 free_names(names);
425 return err;
428 int reftable_stack_reload(struct reftable_stack *st)
430 int err = stack_uptodate(st);
431 if (err > 0)
432 return reftable_stack_reload_maybe_reuse(st, 1);
433 return err;
436 int reftable_stack_add(struct reftable_stack *st,
437 int (*write)(struct reftable_writer *wr, void *arg),
438 void *arg)
440 int err = stack_try_add(st, write, arg);
441 if (err < 0) {
442 if (err == REFTABLE_LOCK_ERROR) {
443 /* Ignore error return, we want to propagate
444 REFTABLE_LOCK_ERROR.
446 reftable_stack_reload(st);
448 return err;
451 if (!st->disable_auto_compact)
452 return reftable_stack_auto_compact(st);
454 return 0;
457 static void format_name(struct strbuf *dest, uint64_t min, uint64_t max)
459 char buf[100];
460 uint32_t rnd = (uint32_t)git_rand();
461 snprintf(buf, sizeof(buf), "0x%012" PRIx64 "-0x%012" PRIx64 "-%08x",
462 min, max, rnd);
463 strbuf_reset(dest);
464 strbuf_addstr(dest, buf);
467 struct reftable_addition {
468 struct tempfile *lock_file;
469 struct reftable_stack *stack;
471 char **new_tables;
472 int new_tables_len;
473 uint64_t next_update_index;
476 #define REFTABLE_ADDITION_INIT {0}
478 static int reftable_stack_init_addition(struct reftable_addition *add,
479 struct reftable_stack *st)
481 struct strbuf lock_file_name = STRBUF_INIT;
482 int err = 0;
483 add->stack = st;
485 strbuf_addf(&lock_file_name, "%s.lock", st->list_file);
487 add->lock_file = create_tempfile(lock_file_name.buf);
488 if (!add->lock_file) {
489 if (errno == EEXIST) {
490 err = REFTABLE_LOCK_ERROR;
491 } else {
492 err = REFTABLE_IO_ERROR;
494 goto done;
496 if (st->config.default_permissions) {
497 if (chmod(add->lock_file->filename.buf, st->config.default_permissions) < 0) {
498 err = REFTABLE_IO_ERROR;
499 goto done;
503 err = stack_uptodate(st);
504 if (err < 0)
505 goto done;
507 if (err > 1) {
508 err = REFTABLE_LOCK_ERROR;
509 goto done;
512 add->next_update_index = reftable_stack_next_update_index(st);
513 done:
514 if (err) {
515 reftable_addition_close(add);
517 strbuf_release(&lock_file_name);
518 return err;
521 static void reftable_addition_close(struct reftable_addition *add)
523 int i = 0;
524 struct strbuf nm = STRBUF_INIT;
525 for (i = 0; i < add->new_tables_len; i++) {
526 stack_filename(&nm, add->stack, add->new_tables[i]);
527 unlink(nm.buf);
528 reftable_free(add->new_tables[i]);
529 add->new_tables[i] = NULL;
531 reftable_free(add->new_tables);
532 add->new_tables = NULL;
533 add->new_tables_len = 0;
535 delete_tempfile(&add->lock_file);
536 strbuf_release(&nm);
539 void reftable_addition_destroy(struct reftable_addition *add)
541 if (!add) {
542 return;
544 reftable_addition_close(add);
545 reftable_free(add);
548 int reftable_addition_commit(struct reftable_addition *add)
550 struct strbuf table_list = STRBUF_INIT;
551 int lock_file_fd = get_tempfile_fd(add->lock_file);
552 int i = 0;
553 int err = 0;
555 if (add->new_tables_len == 0)
556 goto done;
558 for (i = 0; i < add->stack->merged->stack_len; i++) {
559 strbuf_addstr(&table_list, add->stack->readers[i]->name);
560 strbuf_addstr(&table_list, "\n");
562 for (i = 0; i < add->new_tables_len; i++) {
563 strbuf_addstr(&table_list, add->new_tables[i]);
564 strbuf_addstr(&table_list, "\n");
567 err = write_in_full(lock_file_fd, table_list.buf, table_list.len);
568 strbuf_release(&table_list);
569 if (err < 0) {
570 err = REFTABLE_IO_ERROR;
571 goto done;
574 err = rename_tempfile(&add->lock_file, add->stack->list_file);
575 if (err < 0) {
576 err = REFTABLE_IO_ERROR;
577 goto done;
580 /* success, no more state to clean up. */
581 for (i = 0; i < add->new_tables_len; i++) {
582 reftable_free(add->new_tables[i]);
584 reftable_free(add->new_tables);
585 add->new_tables = NULL;
586 add->new_tables_len = 0;
588 err = reftable_stack_reload(add->stack);
589 if (err)
590 goto done;
592 if (!add->stack->disable_auto_compact)
593 err = reftable_stack_auto_compact(add->stack);
595 done:
596 reftable_addition_close(add);
597 return err;
600 int reftable_stack_new_addition(struct reftable_addition **dest,
601 struct reftable_stack *st)
603 int err = 0;
604 struct reftable_addition empty = REFTABLE_ADDITION_INIT;
605 *dest = reftable_calloc(sizeof(**dest));
606 **dest = empty;
607 err = reftable_stack_init_addition(*dest, st);
608 if (err) {
609 reftable_free(*dest);
610 *dest = NULL;
612 return err;
615 static int stack_try_add(struct reftable_stack *st,
616 int (*write_table)(struct reftable_writer *wr,
617 void *arg),
618 void *arg)
620 struct reftable_addition add = REFTABLE_ADDITION_INIT;
621 int err = reftable_stack_init_addition(&add, st);
622 if (err < 0)
623 goto done;
624 if (err > 0) {
625 err = REFTABLE_LOCK_ERROR;
626 goto done;
629 err = reftable_addition_add(&add, write_table, arg);
630 if (err < 0)
631 goto done;
633 err = reftable_addition_commit(&add);
634 done:
635 reftable_addition_close(&add);
636 return err;
639 int reftable_addition_add(struct reftable_addition *add,
640 int (*write_table)(struct reftable_writer *wr,
641 void *arg),
642 void *arg)
644 struct strbuf temp_tab_file_name = STRBUF_INIT;
645 struct strbuf tab_file_name = STRBUF_INIT;
646 struct strbuf next_name = STRBUF_INIT;
647 struct reftable_writer *wr = NULL;
648 int err = 0;
649 int tab_fd = 0;
651 strbuf_reset(&next_name);
652 format_name(&next_name, add->next_update_index, add->next_update_index);
654 stack_filename(&temp_tab_file_name, add->stack, next_name.buf);
655 strbuf_addstr(&temp_tab_file_name, ".temp.XXXXXX");
657 tab_fd = mkstemp(temp_tab_file_name.buf);
658 if (tab_fd < 0) {
659 err = REFTABLE_IO_ERROR;
660 goto done;
662 if (add->stack->config.default_permissions) {
663 if (chmod(temp_tab_file_name.buf, add->stack->config.default_permissions)) {
664 err = REFTABLE_IO_ERROR;
665 goto done;
668 wr = reftable_new_writer(reftable_fd_write, &tab_fd,
669 &add->stack->config);
670 err = write_table(wr, arg);
671 if (err < 0)
672 goto done;
674 err = reftable_writer_close(wr);
675 if (err == REFTABLE_EMPTY_TABLE_ERROR) {
676 err = 0;
677 goto done;
679 if (err < 0)
680 goto done;
682 err = close(tab_fd);
683 tab_fd = 0;
684 if (err < 0) {
685 err = REFTABLE_IO_ERROR;
686 goto done;
689 err = stack_check_addition(add->stack, temp_tab_file_name.buf);
690 if (err < 0)
691 goto done;
693 if (wr->min_update_index < add->next_update_index) {
694 err = REFTABLE_API_ERROR;
695 goto done;
698 format_name(&next_name, wr->min_update_index, wr->max_update_index);
699 strbuf_addstr(&next_name, ".ref");
701 stack_filename(&tab_file_name, add->stack, next_name.buf);
704 On windows, this relies on rand() picking a unique destination name.
705 Maybe we should do retry loop as well?
707 err = rename(temp_tab_file_name.buf, tab_file_name.buf);
708 if (err < 0) {
709 err = REFTABLE_IO_ERROR;
710 goto done;
713 add->new_tables = reftable_realloc(add->new_tables,
714 sizeof(*add->new_tables) *
715 (add->new_tables_len + 1));
716 add->new_tables[add->new_tables_len] = strbuf_detach(&next_name, NULL);
717 add->new_tables_len++;
718 done:
719 if (tab_fd > 0) {
720 close(tab_fd);
721 tab_fd = 0;
723 if (temp_tab_file_name.len > 0) {
724 unlink(temp_tab_file_name.buf);
727 strbuf_release(&temp_tab_file_name);
728 strbuf_release(&tab_file_name);
729 strbuf_release(&next_name);
730 reftable_writer_free(wr);
731 return err;
734 uint64_t reftable_stack_next_update_index(struct reftable_stack *st)
736 int sz = st->merged->stack_len;
737 if (sz > 0)
738 return reftable_reader_max_update_index(st->readers[sz - 1]) +
740 return 1;
743 static int stack_compact_locked(struct reftable_stack *st, int first, int last,
744 struct strbuf *temp_tab,
745 struct reftable_log_expiry_config *config)
747 struct strbuf next_name = STRBUF_INIT;
748 int tab_fd = -1;
749 struct reftable_writer *wr = NULL;
750 int err = 0;
752 format_name(&next_name,
753 reftable_reader_min_update_index(st->readers[first]),
754 reftable_reader_max_update_index(st->readers[last]));
756 stack_filename(temp_tab, st, next_name.buf);
757 strbuf_addstr(temp_tab, ".temp.XXXXXX");
759 tab_fd = mkstemp(temp_tab->buf);
760 wr = reftable_new_writer(reftable_fd_write, &tab_fd, &st->config);
762 err = stack_write_compact(st, wr, first, last, config);
763 if (err < 0)
764 goto done;
765 err = reftable_writer_close(wr);
766 if (err < 0)
767 goto done;
769 err = close(tab_fd);
770 tab_fd = 0;
772 done:
773 reftable_writer_free(wr);
774 if (tab_fd > 0) {
775 close(tab_fd);
776 tab_fd = 0;
778 if (err != 0 && temp_tab->len > 0) {
779 unlink(temp_tab->buf);
780 strbuf_release(temp_tab);
782 strbuf_release(&next_name);
783 return err;
786 static int stack_write_compact(struct reftable_stack *st,
787 struct reftable_writer *wr, int first, int last,
788 struct reftable_log_expiry_config *config)
790 int subtabs_len = last - first + 1;
791 struct reftable_table *subtabs = reftable_calloc(
792 sizeof(struct reftable_table) * (last - first + 1));
793 struct reftable_merged_table *mt = NULL;
794 int err = 0;
795 struct reftable_iterator it = { NULL };
796 struct reftable_ref_record ref = { NULL };
797 struct reftable_log_record log = { NULL };
799 uint64_t entries = 0;
801 int i = 0, j = 0;
802 for (i = first, j = 0; i <= last; i++) {
803 struct reftable_reader *t = st->readers[i];
804 reftable_table_from_reader(&subtabs[j++], t);
805 st->stats.bytes += t->size;
807 reftable_writer_set_limits(wr, st->readers[first]->min_update_index,
808 st->readers[last]->max_update_index);
810 err = reftable_new_merged_table(&mt, subtabs, subtabs_len,
811 st->config.hash_id);
812 if (err < 0) {
813 reftable_free(subtabs);
814 goto done;
817 err = reftable_merged_table_seek_ref(mt, &it, "");
818 if (err < 0)
819 goto done;
821 while (1) {
822 err = reftable_iterator_next_ref(&it, &ref);
823 if (err > 0) {
824 err = 0;
825 break;
827 if (err < 0) {
828 break;
831 if (first == 0 && reftable_ref_record_is_deletion(&ref)) {
832 continue;
835 err = reftable_writer_add_ref(wr, &ref);
836 if (err < 0) {
837 break;
839 entries++;
841 reftable_iterator_destroy(&it);
843 err = reftable_merged_table_seek_log(mt, &it, "");
844 if (err < 0)
845 goto done;
847 while (1) {
848 err = reftable_iterator_next_log(&it, &log);
849 if (err > 0) {
850 err = 0;
851 break;
853 if (err < 0) {
854 break;
856 if (first == 0 && reftable_log_record_is_deletion(&log)) {
857 continue;
860 if (config && config->min_update_index > 0 &&
861 log.update_index < config->min_update_index) {
862 continue;
865 if (config && config->time > 0 &&
866 log.value.update.time < config->time) {
867 continue;
870 err = reftable_writer_add_log(wr, &log);
871 if (err < 0) {
872 break;
874 entries++;
877 done:
878 reftable_iterator_destroy(&it);
879 if (mt) {
880 merged_table_release(mt);
881 reftable_merged_table_free(mt);
883 reftable_ref_record_release(&ref);
884 reftable_log_record_release(&log);
885 st->stats.entries_written += entries;
886 return err;
889 /* < 0: error. 0 == OK, > 0 attempt failed; could retry. */
890 static int stack_compact_range(struct reftable_stack *st, int first, int last,
891 struct reftable_log_expiry_config *expiry)
893 struct strbuf temp_tab_file_name = STRBUF_INIT;
894 struct strbuf new_table_name = STRBUF_INIT;
895 struct strbuf lock_file_name = STRBUF_INIT;
896 struct strbuf ref_list_contents = STRBUF_INIT;
897 struct strbuf new_table_path = STRBUF_INIT;
898 int err = 0;
899 int have_lock = 0;
900 int lock_file_fd = -1;
901 int compact_count = last - first + 1;
902 char **listp = NULL;
903 char **delete_on_success =
904 reftable_calloc(sizeof(char *) * (compact_count + 1));
905 char **subtable_locks =
906 reftable_calloc(sizeof(char *) * (compact_count + 1));
907 int i = 0;
908 int j = 0;
909 int is_empty_table = 0;
911 if (first > last || (!expiry && first == last)) {
912 err = 0;
913 goto done;
916 st->stats.attempts++;
918 strbuf_reset(&lock_file_name);
919 strbuf_addstr(&lock_file_name, st->list_file);
920 strbuf_addstr(&lock_file_name, ".lock");
922 lock_file_fd =
923 open(lock_file_name.buf, O_EXCL | O_CREAT | O_WRONLY, 0666);
924 if (lock_file_fd < 0) {
925 if (errno == EEXIST) {
926 err = 1;
927 } else {
928 err = REFTABLE_IO_ERROR;
930 goto done;
932 /* Don't want to write to the lock for now. */
933 close(lock_file_fd);
934 lock_file_fd = -1;
936 have_lock = 1;
937 err = stack_uptodate(st);
938 if (err != 0)
939 goto done;
941 for (i = first, j = 0; i <= last; i++) {
942 struct strbuf subtab_file_name = STRBUF_INIT;
943 struct strbuf subtab_lock = STRBUF_INIT;
944 int sublock_file_fd = -1;
946 stack_filename(&subtab_file_name, st,
947 reader_name(st->readers[i]));
949 strbuf_reset(&subtab_lock);
950 strbuf_addbuf(&subtab_lock, &subtab_file_name);
951 strbuf_addstr(&subtab_lock, ".lock");
953 sublock_file_fd = open(subtab_lock.buf,
954 O_EXCL | O_CREAT | O_WRONLY, 0666);
955 if (sublock_file_fd >= 0) {
956 close(sublock_file_fd);
957 } else if (sublock_file_fd < 0) {
958 if (errno == EEXIST) {
959 err = 1;
960 } else {
961 err = REFTABLE_IO_ERROR;
965 subtable_locks[j] = subtab_lock.buf;
966 delete_on_success[j] = subtab_file_name.buf;
967 j++;
969 if (err != 0)
970 goto done;
973 err = unlink(lock_file_name.buf);
974 if (err < 0)
975 goto done;
976 have_lock = 0;
978 err = stack_compact_locked(st, first, last, &temp_tab_file_name,
979 expiry);
980 /* Compaction + tombstones can create an empty table out of non-empty
981 * tables. */
982 is_empty_table = (err == REFTABLE_EMPTY_TABLE_ERROR);
983 if (is_empty_table) {
984 err = 0;
986 if (err < 0)
987 goto done;
989 lock_file_fd =
990 open(lock_file_name.buf, O_EXCL | O_CREAT | O_WRONLY, 0666);
991 if (lock_file_fd < 0) {
992 if (errno == EEXIST) {
993 err = 1;
994 } else {
995 err = REFTABLE_IO_ERROR;
997 goto done;
999 have_lock = 1;
1000 if (st->config.default_permissions) {
1001 if (chmod(lock_file_name.buf, st->config.default_permissions) < 0) {
1002 err = REFTABLE_IO_ERROR;
1003 goto done;
1007 format_name(&new_table_name, st->readers[first]->min_update_index,
1008 st->readers[last]->max_update_index);
1009 strbuf_addstr(&new_table_name, ".ref");
1011 stack_filename(&new_table_path, st, new_table_name.buf);
1013 if (!is_empty_table) {
1014 /* retry? */
1015 err = rename(temp_tab_file_name.buf, new_table_path.buf);
1016 if (err < 0) {
1017 err = REFTABLE_IO_ERROR;
1018 goto done;
1022 for (i = 0; i < first; i++) {
1023 strbuf_addstr(&ref_list_contents, st->readers[i]->name);
1024 strbuf_addstr(&ref_list_contents, "\n");
1026 if (!is_empty_table) {
1027 strbuf_addbuf(&ref_list_contents, &new_table_name);
1028 strbuf_addstr(&ref_list_contents, "\n");
1030 for (i = last + 1; i < st->merged->stack_len; i++) {
1031 strbuf_addstr(&ref_list_contents, st->readers[i]->name);
1032 strbuf_addstr(&ref_list_contents, "\n");
1035 err = write_in_full(lock_file_fd, ref_list_contents.buf, ref_list_contents.len);
1036 if (err < 0) {
1037 err = REFTABLE_IO_ERROR;
1038 unlink(new_table_path.buf);
1039 goto done;
1041 err = close(lock_file_fd);
1042 lock_file_fd = -1;
1043 if (err < 0) {
1044 err = REFTABLE_IO_ERROR;
1045 unlink(new_table_path.buf);
1046 goto done;
1049 err = rename(lock_file_name.buf, st->list_file);
1050 if (err < 0) {
1051 err = REFTABLE_IO_ERROR;
1052 unlink(new_table_path.buf);
1053 goto done;
1055 have_lock = 0;
1057 /* Reload the stack before deleting. On windows, we can only delete the
1058 files after we closed them.
1060 err = reftable_stack_reload_maybe_reuse(st, first < last);
1062 listp = delete_on_success;
1063 while (*listp) {
1064 if (strcmp(*listp, new_table_path.buf)) {
1065 unlink(*listp);
1067 listp++;
1070 done:
1071 free_names(delete_on_success);
1073 listp = subtable_locks;
1074 while (*listp) {
1075 unlink(*listp);
1076 listp++;
1078 free_names(subtable_locks);
1079 if (lock_file_fd >= 0) {
1080 close(lock_file_fd);
1081 lock_file_fd = -1;
1083 if (have_lock) {
1084 unlink(lock_file_name.buf);
1086 strbuf_release(&new_table_name);
1087 strbuf_release(&new_table_path);
1088 strbuf_release(&ref_list_contents);
1089 strbuf_release(&temp_tab_file_name);
1090 strbuf_release(&lock_file_name);
1091 return err;
1094 int reftable_stack_compact_all(struct reftable_stack *st,
1095 struct reftable_log_expiry_config *config)
1097 return stack_compact_range(st, 0, st->merged->stack_len - 1, config);
1100 static int stack_compact_range_stats(struct reftable_stack *st, int first,
1101 int last,
1102 struct reftable_log_expiry_config *config)
1104 int err = stack_compact_range(st, first, last, config);
1105 if (err > 0) {
1106 st->stats.failures++;
1108 return err;
1111 static int segment_size(struct segment *s)
1113 return s->end - s->start;
1116 int fastlog2(uint64_t sz)
1118 int l = 0;
1119 if (sz == 0)
1120 return 0;
1121 for (; sz; sz /= 2) {
1122 l++;
1124 return l - 1;
1127 struct segment *sizes_to_segments(int *seglen, uint64_t *sizes, int n)
1129 struct segment *segs = reftable_calloc(sizeof(struct segment) * n);
1130 int next = 0;
1131 struct segment cur = { 0 };
1132 int i = 0;
1134 if (n == 0) {
1135 *seglen = 0;
1136 return segs;
1138 for (i = 0; i < n; i++) {
1139 int log = fastlog2(sizes[i]);
1140 if (cur.log != log && cur.bytes > 0) {
1141 struct segment fresh = {
1142 .start = i,
1145 segs[next++] = cur;
1146 cur = fresh;
1149 cur.log = log;
1150 cur.end = i + 1;
1151 cur.bytes += sizes[i];
1153 segs[next++] = cur;
1154 *seglen = next;
1155 return segs;
1158 struct segment suggest_compaction_segment(uint64_t *sizes, int n)
1160 int seglen = 0;
1161 struct segment *segs = sizes_to_segments(&seglen, sizes, n);
1162 struct segment min_seg = {
1163 .log = 64,
1165 int i = 0;
1166 for (i = 0; i < seglen; i++) {
1167 if (segment_size(&segs[i]) == 1) {
1168 continue;
1171 if (segs[i].log < min_seg.log) {
1172 min_seg = segs[i];
1176 while (min_seg.start > 0) {
1177 int prev = min_seg.start - 1;
1178 if (fastlog2(min_seg.bytes) < fastlog2(sizes[prev])) {
1179 break;
1182 min_seg.start = prev;
1183 min_seg.bytes += sizes[prev];
1186 reftable_free(segs);
1187 return min_seg;
1190 static uint64_t *stack_table_sizes_for_compaction(struct reftable_stack *st)
1192 uint64_t *sizes =
1193 reftable_calloc(sizeof(uint64_t) * st->merged->stack_len);
1194 int version = (st->config.hash_id == GIT_SHA1_FORMAT_ID) ? 1 : 2;
1195 int overhead = header_size(version) - 1;
1196 int i = 0;
1197 for (i = 0; i < st->merged->stack_len; i++) {
1198 sizes[i] = st->readers[i]->size - overhead;
1200 return sizes;
1203 int reftable_stack_auto_compact(struct reftable_stack *st)
1205 uint64_t *sizes = stack_table_sizes_for_compaction(st);
1206 struct segment seg =
1207 suggest_compaction_segment(sizes, st->merged->stack_len);
1208 reftable_free(sizes);
1209 if (segment_size(&seg) > 0)
1210 return stack_compact_range_stats(st, seg.start, seg.end - 1,
1211 NULL);
1213 return 0;
1216 struct reftable_compaction_stats *
1217 reftable_stack_compaction_stats(struct reftable_stack *st)
1219 return &st->stats;
1222 int reftable_stack_read_ref(struct reftable_stack *st, const char *refname,
1223 struct reftable_ref_record *ref)
1225 struct reftable_table tab = { NULL };
1226 reftable_table_from_merged_table(&tab, reftable_stack_merged_table(st));
1227 return reftable_table_read_ref(&tab, refname, ref);
1230 int reftable_stack_read_log(struct reftable_stack *st, const char *refname,
1231 struct reftable_log_record *log)
1233 struct reftable_iterator it = { NULL };
1234 struct reftable_merged_table *mt = reftable_stack_merged_table(st);
1235 int err = reftable_merged_table_seek_log(mt, &it, refname);
1236 if (err)
1237 goto done;
1239 err = reftable_iterator_next_log(&it, log);
1240 if (err)
1241 goto done;
1243 if (strcmp(log->refname, refname) ||
1244 reftable_log_record_is_deletion(log)) {
1245 err = 1;
1246 goto done;
1249 done:
1250 if (err) {
1251 reftable_log_record_release(log);
1253 reftable_iterator_destroy(&it);
1254 return err;
1257 static int stack_check_addition(struct reftable_stack *st,
1258 const char *new_tab_name)
1260 int err = 0;
1261 struct reftable_block_source src = { NULL };
1262 struct reftable_reader *rd = NULL;
1263 struct reftable_table tab = { NULL };
1264 struct reftable_ref_record *refs = NULL;
1265 struct reftable_iterator it = { NULL };
1266 int cap = 0;
1267 int len = 0;
1268 int i = 0;
1270 if (st->config.skip_name_check)
1271 return 0;
1273 err = reftable_block_source_from_file(&src, new_tab_name);
1274 if (err < 0)
1275 goto done;
1277 err = reftable_new_reader(&rd, &src, new_tab_name);
1278 if (err < 0)
1279 goto done;
1281 err = reftable_reader_seek_ref(rd, &it, "");
1282 if (err > 0) {
1283 err = 0;
1284 goto done;
1286 if (err < 0)
1287 goto done;
1289 while (1) {
1290 struct reftable_ref_record ref = { NULL };
1291 err = reftable_iterator_next_ref(&it, &ref);
1292 if (err > 0) {
1293 break;
1295 if (err < 0)
1296 goto done;
1298 if (len >= cap) {
1299 cap = 2 * cap + 1;
1300 refs = reftable_realloc(refs, cap * sizeof(refs[0]));
1303 refs[len++] = ref;
1306 reftable_table_from_merged_table(&tab, reftable_stack_merged_table(st));
1308 err = validate_ref_record_addition(tab, refs, len);
1310 done:
1311 for (i = 0; i < len; i++) {
1312 reftable_ref_record_release(&refs[i]);
1315 free(refs);
1316 reftable_iterator_destroy(&it);
1317 reftable_reader_free(rd);
1318 return err;
1321 static int is_table_name(const char *s)
1323 const char *dot = strrchr(s, '.');
1324 return dot && !strcmp(dot, ".ref");
1327 static void remove_maybe_stale_table(struct reftable_stack *st, uint64_t max,
1328 const char *name)
1330 int err = 0;
1331 uint64_t update_idx = 0;
1332 struct reftable_block_source src = { NULL };
1333 struct reftable_reader *rd = NULL;
1334 struct strbuf table_path = STRBUF_INIT;
1335 stack_filename(&table_path, st, name);
1337 err = reftable_block_source_from_file(&src, table_path.buf);
1338 if (err < 0)
1339 goto done;
1341 err = reftable_new_reader(&rd, &src, name);
1342 if (err < 0)
1343 goto done;
1345 update_idx = reftable_reader_max_update_index(rd);
1346 reftable_reader_free(rd);
1348 if (update_idx <= max) {
1349 unlink(table_path.buf);
1351 done:
1352 strbuf_release(&table_path);
1355 static int reftable_stack_clean_locked(struct reftable_stack *st)
1357 uint64_t max = reftable_merged_table_max_update_index(
1358 reftable_stack_merged_table(st));
1359 DIR *dir = opendir(st->reftable_dir);
1360 struct dirent *d = NULL;
1361 if (!dir) {
1362 return REFTABLE_IO_ERROR;
1365 while ((d = readdir(dir))) {
1366 int i = 0;
1367 int found = 0;
1368 if (!is_table_name(d->d_name))
1369 continue;
1371 for (i = 0; !found && i < st->readers_len; i++) {
1372 found = !strcmp(reader_name(st->readers[i]), d->d_name);
1374 if (found)
1375 continue;
1377 remove_maybe_stale_table(st, max, d->d_name);
1380 closedir(dir);
1381 return 0;
1384 int reftable_stack_clean(struct reftable_stack *st)
1386 struct reftable_addition *add = NULL;
1387 int err = reftable_stack_new_addition(&add, st);
1388 if (err < 0) {
1389 goto done;
1392 err = reftable_stack_reload(st);
1393 if (err < 0) {
1394 goto done;
1397 err = reftable_stack_clean_locked(st);
1399 done:
1400 reftable_addition_destroy(add);
1401 return err;
1404 int reftable_stack_print_directory(const char *stackdir, uint32_t hash_id)
1406 struct reftable_stack *stack = NULL;
1407 struct reftable_write_options cfg = { .hash_id = hash_id };
1408 struct reftable_merged_table *merged = NULL;
1409 struct reftable_table table = { NULL };
1411 int err = reftable_new_stack(&stack, stackdir, cfg);
1412 if (err < 0)
1413 goto done;
1415 merged = reftable_stack_merged_table(stack);
1416 reftable_table_from_merged_table(&table, merged);
1417 err = reftable_table_print(&table);
1418 done:
1419 if (stack)
1420 reftable_stack_destroy(stack);
1421 return err;