Merge branch 'mh/credential-oauth-refresh-token-with-wincred'
[alt-git.git] / reftable / stack.c
bloba1dd79fc06160004148f1bb610709d07e139b295
1 /*
2 Copyright 2020 Google LLC
4 Use of this source code is governed by a BSD-style
5 license that can be found in the LICENSE file or at
6 https://developers.google.com/open-source/licenses/bsd
7 */
9 #include "stack.h"
11 #include "../write-or-die.h"
12 #include "system.h"
13 #include "merged.h"
14 #include "reader.h"
15 #include "refname.h"
16 #include "reftable-error.h"
17 #include "reftable-record.h"
18 #include "reftable-merged.h"
19 #include "writer.h"
20 #include "tempfile.h"
22 static int stack_try_add(struct reftable_stack *st,
23 int (*write_table)(struct reftable_writer *wr,
24 void *arg),
25 void *arg);
26 static int stack_write_compact(struct reftable_stack *st,
27 struct reftable_writer *wr, int first, int last,
28 struct reftable_log_expiry_config *config);
29 static int stack_check_addition(struct reftable_stack *st,
30 const char *new_tab_name);
31 static void reftable_addition_close(struct reftable_addition *add);
32 static int reftable_stack_reload_maybe_reuse(struct reftable_stack *st,
33 int reuse_open);
35 static void stack_filename(struct strbuf *dest, struct reftable_stack *st,
36 const char *name)
38 strbuf_reset(dest);
39 strbuf_addstr(dest, st->reftable_dir);
40 strbuf_addstr(dest, "/");
41 strbuf_addstr(dest, name);
44 static ssize_t reftable_fd_write(void *arg, const void *data, size_t sz)
46 int *fdp = (int *)arg;
47 return write_in_full(*fdp, data, sz);
50 static int reftable_fd_flush(void *arg)
52 int *fdp = (int *)arg;
54 return fsync_component(FSYNC_COMPONENT_REFERENCE, *fdp);
57 int reftable_new_stack(struct reftable_stack **dest, const char *dir,
58 struct reftable_write_options config)
60 struct reftable_stack *p =
61 reftable_calloc(sizeof(struct reftable_stack));
62 struct strbuf list_file_name = STRBUF_INIT;
63 int err = 0;
65 if (config.hash_id == 0) {
66 config.hash_id = GIT_SHA1_FORMAT_ID;
69 *dest = NULL;
71 strbuf_reset(&list_file_name);
72 strbuf_addstr(&list_file_name, dir);
73 strbuf_addstr(&list_file_name, "/tables.list");
75 p->list_file = strbuf_detach(&list_file_name, NULL);
76 p->list_fd = -1;
77 p->reftable_dir = xstrdup(dir);
78 p->config = config;
80 err = reftable_stack_reload_maybe_reuse(p, 1);
81 if (err < 0) {
82 reftable_stack_destroy(p);
83 } else {
84 *dest = p;
86 return err;
89 static int fd_read_lines(int fd, char ***namesp)
91 off_t size = lseek(fd, 0, SEEK_END);
92 char *buf = NULL;
93 int err = 0;
94 if (size < 0) {
95 err = REFTABLE_IO_ERROR;
96 goto done;
98 err = lseek(fd, 0, SEEK_SET);
99 if (err < 0) {
100 err = REFTABLE_IO_ERROR;
101 goto done;
104 buf = reftable_malloc(size + 1);
105 if (read_in_full(fd, buf, size) != size) {
106 err = REFTABLE_IO_ERROR;
107 goto done;
109 buf[size] = 0;
111 parse_names(buf, size, namesp);
113 done:
114 reftable_free(buf);
115 return err;
118 int read_lines(const char *filename, char ***namesp)
120 int fd = open(filename, O_RDONLY);
121 int err = 0;
122 if (fd < 0) {
123 if (errno == ENOENT) {
124 *namesp = reftable_calloc(sizeof(char *));
125 return 0;
128 return REFTABLE_IO_ERROR;
130 err = fd_read_lines(fd, namesp);
131 close(fd);
132 return err;
135 struct reftable_merged_table *
136 reftable_stack_merged_table(struct reftable_stack *st)
138 return st->merged;
141 static int has_name(char **names, const char *name)
143 while (*names) {
144 if (!strcmp(*names, name))
145 return 1;
146 names++;
148 return 0;
151 /* Close and free the stack */
152 void reftable_stack_destroy(struct reftable_stack *st)
154 char **names = NULL;
155 int err = 0;
156 if (st->merged) {
157 reftable_merged_table_free(st->merged);
158 st->merged = NULL;
161 err = read_lines(st->list_file, &names);
162 if (err < 0) {
163 FREE_AND_NULL(names);
166 if (st->readers) {
167 int i = 0;
168 struct strbuf filename = STRBUF_INIT;
169 for (i = 0; i < st->readers_len; i++) {
170 const char *name = reader_name(st->readers[i]);
171 strbuf_reset(&filename);
172 if (names && !has_name(names, name)) {
173 stack_filename(&filename, st, name);
175 reftable_reader_free(st->readers[i]);
177 if (filename.len) {
178 /* On Windows, can only unlink after closing. */
179 unlink(filename.buf);
182 strbuf_release(&filename);
183 st->readers_len = 0;
184 FREE_AND_NULL(st->readers);
187 if (st->list_fd >= 0) {
188 close(st->list_fd);
189 st->list_fd = -1;
192 FREE_AND_NULL(st->list_file);
193 FREE_AND_NULL(st->reftable_dir);
194 reftable_free(st);
195 free_names(names);
198 static struct reftable_reader **stack_copy_readers(struct reftable_stack *st,
199 int cur_len)
201 struct reftable_reader **cur =
202 reftable_calloc(sizeof(struct reftable_reader *) * cur_len);
203 int i = 0;
204 for (i = 0; i < cur_len; i++) {
205 cur[i] = st->readers[i];
207 return cur;
210 static int reftable_stack_reload_once(struct reftable_stack *st, char **names,
211 int reuse_open)
213 int cur_len = !st->merged ? 0 : st->merged->stack_len;
214 struct reftable_reader **cur = stack_copy_readers(st, cur_len);
215 int err = 0;
216 int names_len = names_length(names);
217 struct reftable_reader **new_readers =
218 reftable_calloc(sizeof(struct reftable_reader *) * names_len);
219 struct reftable_table *new_tables =
220 reftable_calloc(sizeof(struct reftable_table) * names_len);
221 int new_readers_len = 0;
222 struct reftable_merged_table *new_merged = NULL;
223 struct strbuf table_path = STRBUF_INIT;
224 int i;
226 while (*names) {
227 struct reftable_reader *rd = NULL;
228 char *name = *names++;
230 /* this is linear; we assume compaction keeps the number of
231 tables under control so this is not quadratic. */
232 int j = 0;
233 for (j = 0; reuse_open && j < cur_len; j++) {
234 if (cur[j] && 0 == strcmp(cur[j]->name, name)) {
235 rd = cur[j];
236 cur[j] = NULL;
237 break;
241 if (!rd) {
242 struct reftable_block_source src = { NULL };
243 stack_filename(&table_path, st, name);
245 err = reftable_block_source_from_file(&src,
246 table_path.buf);
247 if (err < 0)
248 goto done;
250 err = reftable_new_reader(&rd, &src, name);
251 if (err < 0)
252 goto done;
255 new_readers[new_readers_len] = rd;
256 reftable_table_from_reader(&new_tables[new_readers_len], rd);
257 new_readers_len++;
260 /* success! */
261 err = reftable_new_merged_table(&new_merged, new_tables,
262 new_readers_len, st->config.hash_id);
263 if (err < 0)
264 goto done;
266 new_tables = NULL;
267 st->readers_len = new_readers_len;
268 if (st->merged) {
269 merged_table_release(st->merged);
270 reftable_merged_table_free(st->merged);
272 if (st->readers) {
273 reftable_free(st->readers);
275 st->readers = new_readers;
276 new_readers = NULL;
277 new_readers_len = 0;
279 new_merged->suppress_deletions = 1;
280 st->merged = new_merged;
281 for (i = 0; i < cur_len; i++) {
282 if (cur[i]) {
283 const char *name = reader_name(cur[i]);
284 stack_filename(&table_path, st, name);
286 reader_close(cur[i]);
287 reftable_reader_free(cur[i]);
289 /* On Windows, can only unlink after closing. */
290 unlink(table_path.buf);
294 done:
295 for (i = 0; i < new_readers_len; i++) {
296 reader_close(new_readers[i]);
297 reftable_reader_free(new_readers[i]);
299 reftable_free(new_readers);
300 reftable_free(new_tables);
301 reftable_free(cur);
302 strbuf_release(&table_path);
303 return err;
306 /* return negative if a before b. */
307 static int tv_cmp(struct timeval *a, struct timeval *b)
309 time_t diff = a->tv_sec - b->tv_sec;
310 int udiff = a->tv_usec - b->tv_usec;
312 if (diff != 0)
313 return diff;
315 return udiff;
318 static int reftable_stack_reload_maybe_reuse(struct reftable_stack *st,
319 int reuse_open)
321 char **names = NULL, **names_after = NULL;
322 struct timeval deadline;
323 int64_t delay = 0;
324 int tries = 0, err;
325 int fd = -1;
327 err = gettimeofday(&deadline, NULL);
328 if (err < 0)
329 goto out;
330 deadline.tv_sec += 3;
332 while (1) {
333 struct timeval now;
335 err = gettimeofday(&now, NULL);
336 if (err < 0)
337 goto out;
340 * Only look at deadlines after the first few times. This
341 * simplifies debugging in GDB.
343 tries++;
344 if (tries > 3 && tv_cmp(&now, &deadline) >= 0)
345 goto out;
347 fd = open(st->list_file, O_RDONLY);
348 if (fd < 0) {
349 if (errno != ENOENT) {
350 err = REFTABLE_IO_ERROR;
351 goto out;
354 names = reftable_calloc(sizeof(char *));
355 } else {
356 err = fd_read_lines(fd, &names);
357 if (err < 0)
358 goto out;
361 err = reftable_stack_reload_once(st, names, reuse_open);
362 if (!err)
363 break;
364 if (err != REFTABLE_NOT_EXIST_ERROR)
365 goto out;
368 * REFTABLE_NOT_EXIST_ERROR can be caused by a concurrent
369 * writer. Check if there was one by checking if the name list
370 * changed.
372 err = read_lines(st->list_file, &names_after);
373 if (err < 0)
374 goto out;
375 if (names_equal(names_after, names)) {
376 err = REFTABLE_NOT_EXIST_ERROR;
377 goto out;
380 free_names(names);
381 names = NULL;
382 free_names(names_after);
383 names_after = NULL;
384 close(fd);
385 fd = -1;
387 delay = delay + (delay * rand()) / RAND_MAX + 1;
388 sleep_millisec(delay);
391 out:
393 * Invalidate the stat cache. It is sufficient to only close the file
394 * descriptor and keep the cached stat info because we never use the
395 * latter when the former is negative.
397 if (st->list_fd >= 0) {
398 close(st->list_fd);
399 st->list_fd = -1;
403 * Cache stat information in case it provides a useful signal to us.
404 * According to POSIX, "The st_ino and st_dev fields taken together
405 * uniquely identify the file within the system." That being said,
406 * Windows is not POSIX compliant and we do not have these fields
407 * available. So the information we have there is insufficient to
408 * determine whether two file descriptors point to the same file.
410 * While we could fall back to using other signals like the file's
411 * mtime, those are not sufficient to avoid races. We thus refrain from
412 * using the stat cache on such systems and fall back to the secondary
413 * caching mechanism, which is to check whether contents of the file
414 * have changed.
416 * On other systems which are POSIX compliant we must keep the file
417 * descriptor open. This is to avoid a race condition where two
418 * processes access the reftable stack at the same point in time:
420 * 1. A reads the reftable stack and caches its stat info.
422 * 2. B updates the stack, appending a new table to "tables.list".
423 * This will both use a new inode and result in a different file
424 * size, thus invalidating A's cache in theory.
426 * 3. B decides to auto-compact the stack and merges two tables. The
427 * file size now matches what A has cached again. Furthermore, the
428 * filesystem may decide to recycle the inode number of the file
429 * we have replaced in (2) because it is not in use anymore.
431 * 4. A reloads the reftable stack. Neither the inode number nor the
432 * file size changed. If the timestamps did not change either then
433 * we think the cached copy of our stack is up-to-date.
435 * By keeping the file descriptor open the inode number cannot be
436 * recycled, mitigating the race.
438 if (!err && fd >= 0 && !fstat(fd, &st->list_st) &&
439 st->list_st.st_dev && st->list_st.st_ino) {
440 st->list_fd = fd;
441 fd = -1;
444 if (fd >= 0)
445 close(fd);
446 free_names(names);
447 free_names(names_after);
448 return err;
451 /* -1 = error
452 0 = up to date
453 1 = changed. */
454 static int stack_uptodate(struct reftable_stack *st)
456 char **names = NULL;
457 int err;
458 int i = 0;
461 * When we have cached stat information available then we use it to
462 * verify whether the file has been rewritten.
464 * Note that we explicitly do not want to use `stat_validity_check()`
465 * and friends here because they may end up not comparing the `st_dev`
466 * and `st_ino` fields. These functions thus cannot guarantee that we
467 * indeed still have the same file.
469 if (st->list_fd >= 0) {
470 struct stat list_st;
472 if (stat(st->list_file, &list_st) < 0) {
474 * It's fine for "tables.list" to not exist. In that
475 * case, we have to refresh when the loaded stack has
476 * any readers.
478 if (errno == ENOENT)
479 return !!st->readers_len;
480 return REFTABLE_IO_ERROR;
484 * When "tables.list" refers to the same file we can assume
485 * that it didn't change. This is because we always use
486 * rename(3P) to update the file and never write to it
487 * directly.
489 if (st->list_st.st_dev == list_st.st_dev &&
490 st->list_st.st_ino == list_st.st_ino)
491 return 0;
494 err = read_lines(st->list_file, &names);
495 if (err < 0)
496 return err;
498 for (i = 0; i < st->readers_len; i++) {
499 if (!names[i]) {
500 err = 1;
501 goto done;
504 if (strcmp(st->readers[i]->name, names[i])) {
505 err = 1;
506 goto done;
510 if (names[st->merged->stack_len]) {
511 err = 1;
512 goto done;
515 done:
516 free_names(names);
517 return err;
520 int reftable_stack_reload(struct reftable_stack *st)
522 int err = stack_uptodate(st);
523 if (err > 0)
524 return reftable_stack_reload_maybe_reuse(st, 1);
525 return err;
528 int reftable_stack_add(struct reftable_stack *st,
529 int (*write)(struct reftable_writer *wr, void *arg),
530 void *arg)
532 int err = stack_try_add(st, write, arg);
533 if (err < 0) {
534 if (err == REFTABLE_LOCK_ERROR) {
535 /* Ignore error return, we want to propagate
536 REFTABLE_LOCK_ERROR.
538 reftable_stack_reload(st);
540 return err;
543 return 0;
546 static void format_name(struct strbuf *dest, uint64_t min, uint64_t max)
548 char buf[100];
549 uint32_t rnd = (uint32_t)git_rand();
550 snprintf(buf, sizeof(buf), "0x%012" PRIx64 "-0x%012" PRIx64 "-%08x",
551 min, max, rnd);
552 strbuf_reset(dest);
553 strbuf_addstr(dest, buf);
556 struct reftable_addition {
557 struct tempfile *lock_file;
558 struct reftable_stack *stack;
560 char **new_tables;
561 int new_tables_len;
562 uint64_t next_update_index;
565 #define REFTABLE_ADDITION_INIT {0}
567 static int reftable_stack_init_addition(struct reftable_addition *add,
568 struct reftable_stack *st)
570 struct strbuf lock_file_name = STRBUF_INIT;
571 int err = 0;
572 add->stack = st;
574 strbuf_addf(&lock_file_name, "%s.lock", st->list_file);
576 add->lock_file = create_tempfile(lock_file_name.buf);
577 if (!add->lock_file) {
578 if (errno == EEXIST) {
579 err = REFTABLE_LOCK_ERROR;
580 } else {
581 err = REFTABLE_IO_ERROR;
583 goto done;
585 if (st->config.default_permissions) {
586 if (chmod(add->lock_file->filename.buf, st->config.default_permissions) < 0) {
587 err = REFTABLE_IO_ERROR;
588 goto done;
592 err = stack_uptodate(st);
593 if (err < 0)
594 goto done;
596 if (err > 1) {
597 err = REFTABLE_LOCK_ERROR;
598 goto done;
601 add->next_update_index = reftable_stack_next_update_index(st);
602 done:
603 if (err) {
604 reftable_addition_close(add);
606 strbuf_release(&lock_file_name);
607 return err;
610 static void reftable_addition_close(struct reftable_addition *add)
612 int i = 0;
613 struct strbuf nm = STRBUF_INIT;
614 for (i = 0; i < add->new_tables_len; i++) {
615 stack_filename(&nm, add->stack, add->new_tables[i]);
616 unlink(nm.buf);
617 reftable_free(add->new_tables[i]);
618 add->new_tables[i] = NULL;
620 reftable_free(add->new_tables);
621 add->new_tables = NULL;
622 add->new_tables_len = 0;
624 delete_tempfile(&add->lock_file);
625 strbuf_release(&nm);
628 void reftable_addition_destroy(struct reftable_addition *add)
630 if (!add) {
631 return;
633 reftable_addition_close(add);
634 reftable_free(add);
637 int reftable_addition_commit(struct reftable_addition *add)
639 struct strbuf table_list = STRBUF_INIT;
640 int lock_file_fd = get_tempfile_fd(add->lock_file);
641 int i = 0;
642 int err = 0;
644 if (add->new_tables_len == 0)
645 goto done;
647 for (i = 0; i < add->stack->merged->stack_len; i++) {
648 strbuf_addstr(&table_list, add->stack->readers[i]->name);
649 strbuf_addstr(&table_list, "\n");
651 for (i = 0; i < add->new_tables_len; i++) {
652 strbuf_addstr(&table_list, add->new_tables[i]);
653 strbuf_addstr(&table_list, "\n");
656 err = write_in_full(lock_file_fd, table_list.buf, table_list.len);
657 strbuf_release(&table_list);
658 if (err < 0) {
659 err = REFTABLE_IO_ERROR;
660 goto done;
663 fsync_component_or_die(FSYNC_COMPONENT_REFERENCE, lock_file_fd,
664 get_tempfile_path(add->lock_file));
666 err = rename_tempfile(&add->lock_file, add->stack->list_file);
667 if (err < 0) {
668 err = REFTABLE_IO_ERROR;
669 goto done;
672 /* success, no more state to clean up. */
673 for (i = 0; i < add->new_tables_len; i++) {
674 reftable_free(add->new_tables[i]);
676 reftable_free(add->new_tables);
677 add->new_tables = NULL;
678 add->new_tables_len = 0;
680 err = reftable_stack_reload_maybe_reuse(add->stack, 1);
681 if (err)
682 goto done;
684 if (!add->stack->disable_auto_compact)
685 err = reftable_stack_auto_compact(add->stack);
687 done:
688 reftable_addition_close(add);
689 return err;
692 int reftable_stack_new_addition(struct reftable_addition **dest,
693 struct reftable_stack *st)
695 int err = 0;
696 struct reftable_addition empty = REFTABLE_ADDITION_INIT;
697 *dest = reftable_calloc(sizeof(**dest));
698 **dest = empty;
699 err = reftable_stack_init_addition(*dest, st);
700 if (err) {
701 reftable_free(*dest);
702 *dest = NULL;
704 return err;
707 static int stack_try_add(struct reftable_stack *st,
708 int (*write_table)(struct reftable_writer *wr,
709 void *arg),
710 void *arg)
712 struct reftable_addition add = REFTABLE_ADDITION_INIT;
713 int err = reftable_stack_init_addition(&add, st);
714 if (err < 0)
715 goto done;
716 if (err > 0) {
717 err = REFTABLE_LOCK_ERROR;
718 goto done;
721 err = reftable_addition_add(&add, write_table, arg);
722 if (err < 0)
723 goto done;
725 err = reftable_addition_commit(&add);
726 done:
727 reftable_addition_close(&add);
728 return err;
731 int reftable_addition_add(struct reftable_addition *add,
732 int (*write_table)(struct reftable_writer *wr,
733 void *arg),
734 void *arg)
736 struct strbuf temp_tab_file_name = STRBUF_INIT;
737 struct strbuf tab_file_name = STRBUF_INIT;
738 struct strbuf next_name = STRBUF_INIT;
739 struct reftable_writer *wr = NULL;
740 int err = 0;
741 int tab_fd = 0;
743 strbuf_reset(&next_name);
744 format_name(&next_name, add->next_update_index, add->next_update_index);
746 stack_filename(&temp_tab_file_name, add->stack, next_name.buf);
747 strbuf_addstr(&temp_tab_file_name, ".temp.XXXXXX");
749 tab_fd = mkstemp(temp_tab_file_name.buf);
750 if (tab_fd < 0) {
751 err = REFTABLE_IO_ERROR;
752 goto done;
754 if (add->stack->config.default_permissions) {
755 if (chmod(temp_tab_file_name.buf, add->stack->config.default_permissions)) {
756 err = REFTABLE_IO_ERROR;
757 goto done;
760 wr = reftable_new_writer(reftable_fd_write, reftable_fd_flush, &tab_fd,
761 &add->stack->config);
762 err = write_table(wr, arg);
763 if (err < 0)
764 goto done;
766 err = reftable_writer_close(wr);
767 if (err == REFTABLE_EMPTY_TABLE_ERROR) {
768 err = 0;
769 goto done;
771 if (err < 0)
772 goto done;
774 err = close(tab_fd);
775 tab_fd = 0;
776 if (err < 0) {
777 err = REFTABLE_IO_ERROR;
778 goto done;
781 err = stack_check_addition(add->stack, temp_tab_file_name.buf);
782 if (err < 0)
783 goto done;
785 if (wr->min_update_index < add->next_update_index) {
786 err = REFTABLE_API_ERROR;
787 goto done;
790 format_name(&next_name, wr->min_update_index, wr->max_update_index);
791 strbuf_addstr(&next_name, ".ref");
793 stack_filename(&tab_file_name, add->stack, next_name.buf);
796 On windows, this relies on rand() picking a unique destination name.
797 Maybe we should do retry loop as well?
799 err = rename(temp_tab_file_name.buf, tab_file_name.buf);
800 if (err < 0) {
801 err = REFTABLE_IO_ERROR;
802 goto done;
805 add->new_tables = reftable_realloc(add->new_tables,
806 sizeof(*add->new_tables) *
807 (add->new_tables_len + 1));
808 add->new_tables[add->new_tables_len] = strbuf_detach(&next_name, NULL);
809 add->new_tables_len++;
810 done:
811 if (tab_fd > 0) {
812 close(tab_fd);
813 tab_fd = 0;
815 if (temp_tab_file_name.len > 0) {
816 unlink(temp_tab_file_name.buf);
819 strbuf_release(&temp_tab_file_name);
820 strbuf_release(&tab_file_name);
821 strbuf_release(&next_name);
822 reftable_writer_free(wr);
823 return err;
826 uint64_t reftable_stack_next_update_index(struct reftable_stack *st)
828 int sz = st->merged->stack_len;
829 if (sz > 0)
830 return reftable_reader_max_update_index(st->readers[sz - 1]) +
832 return 1;
835 static int stack_compact_locked(struct reftable_stack *st, int first, int last,
836 struct strbuf *temp_tab,
837 struct reftable_log_expiry_config *config)
839 struct strbuf next_name = STRBUF_INIT;
840 int tab_fd = -1;
841 struct reftable_writer *wr = NULL;
842 int err = 0;
844 format_name(&next_name,
845 reftable_reader_min_update_index(st->readers[first]),
846 reftable_reader_max_update_index(st->readers[last]));
848 stack_filename(temp_tab, st, next_name.buf);
849 strbuf_addstr(temp_tab, ".temp.XXXXXX");
851 tab_fd = mkstemp(temp_tab->buf);
852 if (st->config.default_permissions &&
853 chmod(temp_tab->buf, st->config.default_permissions) < 0) {
854 err = REFTABLE_IO_ERROR;
855 goto done;
858 wr = reftable_new_writer(reftable_fd_write, reftable_fd_flush, &tab_fd, &st->config);
860 err = stack_write_compact(st, wr, first, last, config);
861 if (err < 0)
862 goto done;
863 err = reftable_writer_close(wr);
864 if (err < 0)
865 goto done;
867 err = close(tab_fd);
868 tab_fd = 0;
870 done:
871 reftable_writer_free(wr);
872 if (tab_fd > 0) {
873 close(tab_fd);
874 tab_fd = 0;
876 if (err != 0 && temp_tab->len > 0) {
877 unlink(temp_tab->buf);
878 strbuf_release(temp_tab);
880 strbuf_release(&next_name);
881 return err;
884 static int stack_write_compact(struct reftable_stack *st,
885 struct reftable_writer *wr, int first, int last,
886 struct reftable_log_expiry_config *config)
888 int subtabs_len = last - first + 1;
889 struct reftable_table *subtabs = reftable_calloc(
890 sizeof(struct reftable_table) * (last - first + 1));
891 struct reftable_merged_table *mt = NULL;
892 int err = 0;
893 struct reftable_iterator it = { NULL };
894 struct reftable_ref_record ref = { NULL };
895 struct reftable_log_record log = { NULL };
897 uint64_t entries = 0;
899 int i = 0, j = 0;
900 for (i = first, j = 0; i <= last; i++) {
901 struct reftable_reader *t = st->readers[i];
902 reftable_table_from_reader(&subtabs[j++], t);
903 st->stats.bytes += t->size;
905 reftable_writer_set_limits(wr, st->readers[first]->min_update_index,
906 st->readers[last]->max_update_index);
908 err = reftable_new_merged_table(&mt, subtabs, subtabs_len,
909 st->config.hash_id);
910 if (err < 0) {
911 reftable_free(subtabs);
912 goto done;
915 err = reftable_merged_table_seek_ref(mt, &it, "");
916 if (err < 0)
917 goto done;
919 while (1) {
920 err = reftable_iterator_next_ref(&it, &ref);
921 if (err > 0) {
922 err = 0;
923 break;
925 if (err < 0)
926 goto done;
928 if (first == 0 && reftable_ref_record_is_deletion(&ref)) {
929 continue;
932 err = reftable_writer_add_ref(wr, &ref);
933 if (err < 0)
934 goto done;
935 entries++;
937 reftable_iterator_destroy(&it);
939 err = reftable_merged_table_seek_log(mt, &it, "");
940 if (err < 0)
941 goto done;
943 while (1) {
944 err = reftable_iterator_next_log(&it, &log);
945 if (err > 0) {
946 err = 0;
947 break;
949 if (err < 0)
950 goto done;
951 if (first == 0 && reftable_log_record_is_deletion(&log)) {
952 continue;
955 if (config && config->min_update_index > 0 &&
956 log.update_index < config->min_update_index) {
957 continue;
960 if (config && config->time > 0 &&
961 log.value.update.time < config->time) {
962 continue;
965 err = reftable_writer_add_log(wr, &log);
966 if (err < 0)
967 goto done;
968 entries++;
971 done:
972 reftable_iterator_destroy(&it);
973 if (mt) {
974 merged_table_release(mt);
975 reftable_merged_table_free(mt);
977 reftable_ref_record_release(&ref);
978 reftable_log_record_release(&log);
979 st->stats.entries_written += entries;
980 return err;
983 /* < 0: error. 0 == OK, > 0 attempt failed; could retry. */
984 static int stack_compact_range(struct reftable_stack *st, int first, int last,
985 struct reftable_log_expiry_config *expiry)
987 struct strbuf temp_tab_file_name = STRBUF_INIT;
988 struct strbuf new_table_name = STRBUF_INIT;
989 struct strbuf lock_file_name = STRBUF_INIT;
990 struct strbuf ref_list_contents = STRBUF_INIT;
991 struct strbuf new_table_path = STRBUF_INIT;
992 int err = 0;
993 int have_lock = 0;
994 int lock_file_fd = -1;
995 int compact_count = last - first + 1;
996 char **listp = NULL;
997 char **delete_on_success =
998 reftable_calloc(sizeof(char *) * (compact_count + 1));
999 char **subtable_locks =
1000 reftable_calloc(sizeof(char *) * (compact_count + 1));
1001 int i = 0;
1002 int j = 0;
1003 int is_empty_table = 0;
1005 if (first > last || (!expiry && first == last)) {
1006 err = 0;
1007 goto done;
1010 st->stats.attempts++;
1012 strbuf_reset(&lock_file_name);
1013 strbuf_addstr(&lock_file_name, st->list_file);
1014 strbuf_addstr(&lock_file_name, ".lock");
1016 lock_file_fd =
1017 open(lock_file_name.buf, O_EXCL | O_CREAT | O_WRONLY, 0666);
1018 if (lock_file_fd < 0) {
1019 if (errno == EEXIST) {
1020 err = 1;
1021 } else {
1022 err = REFTABLE_IO_ERROR;
1024 goto done;
1026 /* Don't want to write to the lock for now. */
1027 close(lock_file_fd);
1028 lock_file_fd = -1;
1030 have_lock = 1;
1031 err = stack_uptodate(st);
1032 if (err != 0)
1033 goto done;
1035 for (i = first, j = 0; i <= last; i++) {
1036 struct strbuf subtab_file_name = STRBUF_INIT;
1037 struct strbuf subtab_lock = STRBUF_INIT;
1038 int sublock_file_fd = -1;
1040 stack_filename(&subtab_file_name, st,
1041 reader_name(st->readers[i]));
1043 strbuf_reset(&subtab_lock);
1044 strbuf_addbuf(&subtab_lock, &subtab_file_name);
1045 strbuf_addstr(&subtab_lock, ".lock");
1047 sublock_file_fd = open(subtab_lock.buf,
1048 O_EXCL | O_CREAT | O_WRONLY, 0666);
1049 if (sublock_file_fd >= 0) {
1050 close(sublock_file_fd);
1051 } else if (sublock_file_fd < 0) {
1052 if (errno == EEXIST) {
1053 err = 1;
1054 } else {
1055 err = REFTABLE_IO_ERROR;
1059 subtable_locks[j] = subtab_lock.buf;
1060 delete_on_success[j] = subtab_file_name.buf;
1061 j++;
1063 if (err != 0)
1064 goto done;
1067 err = unlink(lock_file_name.buf);
1068 if (err < 0)
1069 goto done;
1070 have_lock = 0;
1072 err = stack_compact_locked(st, first, last, &temp_tab_file_name,
1073 expiry);
1074 /* Compaction + tombstones can create an empty table out of non-empty
1075 * tables. */
1076 is_empty_table = (err == REFTABLE_EMPTY_TABLE_ERROR);
1077 if (is_empty_table) {
1078 err = 0;
1080 if (err < 0)
1081 goto done;
1083 lock_file_fd =
1084 open(lock_file_name.buf, O_EXCL | O_CREAT | O_WRONLY, 0666);
1085 if (lock_file_fd < 0) {
1086 if (errno == EEXIST) {
1087 err = 1;
1088 } else {
1089 err = REFTABLE_IO_ERROR;
1091 goto done;
1093 have_lock = 1;
1094 if (st->config.default_permissions) {
1095 if (chmod(lock_file_name.buf, st->config.default_permissions) < 0) {
1096 err = REFTABLE_IO_ERROR;
1097 goto done;
1101 format_name(&new_table_name, st->readers[first]->min_update_index,
1102 st->readers[last]->max_update_index);
1103 strbuf_addstr(&new_table_name, ".ref");
1105 stack_filename(&new_table_path, st, new_table_name.buf);
1107 if (!is_empty_table) {
1108 /* retry? */
1109 err = rename(temp_tab_file_name.buf, new_table_path.buf);
1110 if (err < 0) {
1111 err = REFTABLE_IO_ERROR;
1112 goto done;
1116 for (i = 0; i < first; i++) {
1117 strbuf_addstr(&ref_list_contents, st->readers[i]->name);
1118 strbuf_addstr(&ref_list_contents, "\n");
1120 if (!is_empty_table) {
1121 strbuf_addbuf(&ref_list_contents, &new_table_name);
1122 strbuf_addstr(&ref_list_contents, "\n");
1124 for (i = last + 1; i < st->merged->stack_len; i++) {
1125 strbuf_addstr(&ref_list_contents, st->readers[i]->name);
1126 strbuf_addstr(&ref_list_contents, "\n");
1129 err = write_in_full(lock_file_fd, ref_list_contents.buf, ref_list_contents.len);
1130 if (err < 0) {
1131 err = REFTABLE_IO_ERROR;
1132 unlink(new_table_path.buf);
1133 goto done;
1136 err = fsync_component(FSYNC_COMPONENT_REFERENCE, lock_file_fd);
1137 if (err < 0) {
1138 err = REFTABLE_IO_ERROR;
1139 unlink(new_table_path.buf);
1140 goto done;
1143 err = close(lock_file_fd);
1144 lock_file_fd = -1;
1145 if (err < 0) {
1146 err = REFTABLE_IO_ERROR;
1147 unlink(new_table_path.buf);
1148 goto done;
1151 err = rename(lock_file_name.buf, st->list_file);
1152 if (err < 0) {
1153 err = REFTABLE_IO_ERROR;
1154 unlink(new_table_path.buf);
1155 goto done;
1157 have_lock = 0;
1159 /* Reload the stack before deleting. On windows, we can only delete the
1160 files after we closed them.
1162 err = reftable_stack_reload_maybe_reuse(st, first < last);
1164 listp = delete_on_success;
1165 while (*listp) {
1166 if (strcmp(*listp, new_table_path.buf)) {
1167 unlink(*listp);
1169 listp++;
1172 done:
1173 free_names(delete_on_success);
1175 listp = subtable_locks;
1176 while (*listp) {
1177 unlink(*listp);
1178 listp++;
1180 free_names(subtable_locks);
1181 if (lock_file_fd >= 0) {
1182 close(lock_file_fd);
1183 lock_file_fd = -1;
1185 if (have_lock) {
1186 unlink(lock_file_name.buf);
1188 strbuf_release(&new_table_name);
1189 strbuf_release(&new_table_path);
1190 strbuf_release(&ref_list_contents);
1191 strbuf_release(&temp_tab_file_name);
1192 strbuf_release(&lock_file_name);
1193 return err;
1196 int reftable_stack_compact_all(struct reftable_stack *st,
1197 struct reftable_log_expiry_config *config)
1199 return stack_compact_range(st, 0, st->merged->stack_len - 1, config);
1202 static int stack_compact_range_stats(struct reftable_stack *st, int first,
1203 int last,
1204 struct reftable_log_expiry_config *config)
1206 int err = stack_compact_range(st, first, last, config);
1207 if (err > 0) {
1208 st->stats.failures++;
1210 return err;
1213 static int segment_size(struct segment *s)
1215 return s->end - s->start;
1218 int fastlog2(uint64_t sz)
1220 int l = 0;
1221 if (sz == 0)
1222 return 0;
1223 for (; sz; sz /= 2) {
1224 l++;
1226 return l - 1;
1229 struct segment *sizes_to_segments(int *seglen, uint64_t *sizes, int n)
1231 struct segment *segs = reftable_calloc(sizeof(struct segment) * n);
1232 int next = 0;
1233 struct segment cur = { 0 };
1234 int i = 0;
1236 if (n == 0) {
1237 *seglen = 0;
1238 return segs;
1240 for (i = 0; i < n; i++) {
1241 int log = fastlog2(sizes[i]);
1242 if (cur.log != log && cur.bytes > 0) {
1243 struct segment fresh = {
1244 .start = i,
1247 segs[next++] = cur;
1248 cur = fresh;
1251 cur.log = log;
1252 cur.end = i + 1;
1253 cur.bytes += sizes[i];
1255 segs[next++] = cur;
1256 *seglen = next;
1257 return segs;
1260 struct segment suggest_compaction_segment(uint64_t *sizes, int n)
1262 int seglen = 0;
1263 struct segment *segs = sizes_to_segments(&seglen, sizes, n);
1264 struct segment min_seg = {
1265 .log = 64,
1267 int i = 0;
1268 for (i = 0; i < seglen; i++) {
1269 if (segment_size(&segs[i]) == 1) {
1270 continue;
1273 if (segs[i].log < min_seg.log) {
1274 min_seg = segs[i];
1278 while (min_seg.start > 0) {
1279 int prev = min_seg.start - 1;
1280 if (fastlog2(min_seg.bytes) < fastlog2(sizes[prev])) {
1281 break;
1284 min_seg.start = prev;
1285 min_seg.bytes += sizes[prev];
1288 reftable_free(segs);
1289 return min_seg;
1292 static uint64_t *stack_table_sizes_for_compaction(struct reftable_stack *st)
1294 uint64_t *sizes =
1295 reftable_calloc(sizeof(uint64_t) * st->merged->stack_len);
1296 int version = (st->config.hash_id == GIT_SHA1_FORMAT_ID) ? 1 : 2;
1297 int overhead = header_size(version) - 1;
1298 int i = 0;
1299 for (i = 0; i < st->merged->stack_len; i++) {
1300 sizes[i] = st->readers[i]->size - overhead;
1302 return sizes;
1305 int reftable_stack_auto_compact(struct reftable_stack *st)
1307 uint64_t *sizes = stack_table_sizes_for_compaction(st);
1308 struct segment seg =
1309 suggest_compaction_segment(sizes, st->merged->stack_len);
1310 reftable_free(sizes);
1311 if (segment_size(&seg) > 0)
1312 return stack_compact_range_stats(st, seg.start, seg.end - 1,
1313 NULL);
1315 return 0;
1318 struct reftable_compaction_stats *
1319 reftable_stack_compaction_stats(struct reftable_stack *st)
1321 return &st->stats;
1324 int reftable_stack_read_ref(struct reftable_stack *st, const char *refname,
1325 struct reftable_ref_record *ref)
1327 struct reftable_table tab = { NULL };
1328 reftable_table_from_merged_table(&tab, reftable_stack_merged_table(st));
1329 return reftable_table_read_ref(&tab, refname, ref);
1332 int reftable_stack_read_log(struct reftable_stack *st, const char *refname,
1333 struct reftable_log_record *log)
1335 struct reftable_iterator it = { NULL };
1336 struct reftable_merged_table *mt = reftable_stack_merged_table(st);
1337 int err = reftable_merged_table_seek_log(mt, &it, refname);
1338 if (err)
1339 goto done;
1341 err = reftable_iterator_next_log(&it, log);
1342 if (err)
1343 goto done;
1345 if (strcmp(log->refname, refname) ||
1346 reftable_log_record_is_deletion(log)) {
1347 err = 1;
1348 goto done;
1351 done:
1352 if (err) {
1353 reftable_log_record_release(log);
1355 reftable_iterator_destroy(&it);
1356 return err;
1359 static int stack_check_addition(struct reftable_stack *st,
1360 const char *new_tab_name)
1362 int err = 0;
1363 struct reftable_block_source src = { NULL };
1364 struct reftable_reader *rd = NULL;
1365 struct reftable_table tab = { NULL };
1366 struct reftable_ref_record *refs = NULL;
1367 struct reftable_iterator it = { NULL };
1368 int cap = 0;
1369 int len = 0;
1370 int i = 0;
1372 if (st->config.skip_name_check)
1373 return 0;
1375 err = reftable_block_source_from_file(&src, new_tab_name);
1376 if (err < 0)
1377 goto done;
1379 err = reftable_new_reader(&rd, &src, new_tab_name);
1380 if (err < 0)
1381 goto done;
1383 err = reftable_reader_seek_ref(rd, &it, "");
1384 if (err > 0) {
1385 err = 0;
1386 goto done;
1388 if (err < 0)
1389 goto done;
1391 while (1) {
1392 struct reftable_ref_record ref = { NULL };
1393 err = reftable_iterator_next_ref(&it, &ref);
1394 if (err > 0) {
1395 break;
1397 if (err < 0)
1398 goto done;
1400 if (len >= cap) {
1401 cap = 2 * cap + 1;
1402 refs = reftable_realloc(refs, cap * sizeof(refs[0]));
1405 refs[len++] = ref;
1408 reftable_table_from_merged_table(&tab, reftable_stack_merged_table(st));
1410 err = validate_ref_record_addition(tab, refs, len);
1412 done:
1413 for (i = 0; i < len; i++) {
1414 reftable_ref_record_release(&refs[i]);
1417 free(refs);
1418 reftable_iterator_destroy(&it);
1419 reftable_reader_free(rd);
1420 return err;
1423 static int is_table_name(const char *s)
1425 const char *dot = strrchr(s, '.');
1426 return dot && !strcmp(dot, ".ref");
1429 static void remove_maybe_stale_table(struct reftable_stack *st, uint64_t max,
1430 const char *name)
1432 int err = 0;
1433 uint64_t update_idx = 0;
1434 struct reftable_block_source src = { NULL };
1435 struct reftable_reader *rd = NULL;
1436 struct strbuf table_path = STRBUF_INIT;
1437 stack_filename(&table_path, st, name);
1439 err = reftable_block_source_from_file(&src, table_path.buf);
1440 if (err < 0)
1441 goto done;
1443 err = reftable_new_reader(&rd, &src, name);
1444 if (err < 0)
1445 goto done;
1447 update_idx = reftable_reader_max_update_index(rd);
1448 reftable_reader_free(rd);
1450 if (update_idx <= max) {
1451 unlink(table_path.buf);
1453 done:
1454 strbuf_release(&table_path);
1457 static int reftable_stack_clean_locked(struct reftable_stack *st)
1459 uint64_t max = reftable_merged_table_max_update_index(
1460 reftable_stack_merged_table(st));
1461 DIR *dir = opendir(st->reftable_dir);
1462 struct dirent *d = NULL;
1463 if (!dir) {
1464 return REFTABLE_IO_ERROR;
1467 while ((d = readdir(dir))) {
1468 int i = 0;
1469 int found = 0;
1470 if (!is_table_name(d->d_name))
1471 continue;
1473 for (i = 0; !found && i < st->readers_len; i++) {
1474 found = !strcmp(reader_name(st->readers[i]), d->d_name);
1476 if (found)
1477 continue;
1479 remove_maybe_stale_table(st, max, d->d_name);
1482 closedir(dir);
1483 return 0;
1486 int reftable_stack_clean(struct reftable_stack *st)
1488 struct reftable_addition *add = NULL;
1489 int err = reftable_stack_new_addition(&add, st);
1490 if (err < 0) {
1491 goto done;
1494 err = reftable_stack_reload(st);
1495 if (err < 0) {
1496 goto done;
1499 err = reftable_stack_clean_locked(st);
1501 done:
1502 reftable_addition_destroy(add);
1503 return err;
1506 int reftable_stack_print_directory(const char *stackdir, uint32_t hash_id)
1508 struct reftable_stack *stack = NULL;
1509 struct reftable_write_options cfg = { .hash_id = hash_id };
1510 struct reftable_merged_table *merged = NULL;
1511 struct reftable_table table = { NULL };
1513 int err = reftable_new_stack(&stack, stackdir, cfg);
1514 if (err < 0)
1515 goto done;
1517 merged = reftable_stack_merged_table(stack);
1518 reftable_table_from_merged_table(&table, merged);
1519 err = reftable_table_print(&table);
1520 done:
1521 if (stack)
1522 reftable_stack_destroy(stack);
1523 return err;