Merge branch 'gt/unit-test-strcmp-offset'
[git.git] / reftable / writer.c
blob45b3e9ce1f2e994625ada9efc8f461060706e6f2
1 /*
2 Copyright 2020 Google LLC
4 Use of this source code is governed by a BSD-style
5 license that can be found in the LICENSE file or at
6 https://developers.google.com/open-source/licenses/bsd
7 */
9 #include "writer.h"
11 #include "system.h"
13 #include "block.h"
14 #include "constants.h"
15 #include "record.h"
16 #include "tree.h"
17 #include "reftable-error.h"
19 /* finishes a block, and writes it to storage */
20 static int writer_flush_block(struct reftable_writer *w);
22 /* deallocates memory related to the index */
23 static void writer_clear_index(struct reftable_writer *w);
25 /* finishes writing a 'r' (refs) or 'g' (reflogs) section */
26 static int writer_finish_public_section(struct reftable_writer *w);
28 static struct reftable_block_stats *
29 writer_reftable_block_stats(struct reftable_writer *w, uint8_t typ)
31 switch (typ) {
32 case 'r':
33 return &w->stats.ref_stats;
34 case 'o':
35 return &w->stats.obj_stats;
36 case 'i':
37 return &w->stats.idx_stats;
38 case 'g':
39 return &w->stats.log_stats;
41 abort();
42 return NULL;
45 /* write data, queuing the padding for the next write. Returns negative for
46 * error. */
47 static int padded_write(struct reftable_writer *w, uint8_t *data, size_t len,
48 int padding)
50 int n = 0;
51 if (w->pending_padding > 0) {
52 uint8_t *zeroed = reftable_calloc(w->pending_padding, sizeof(*zeroed));
53 int n = w->write(w->write_arg, zeroed, w->pending_padding);
54 if (n < 0)
55 return n;
57 w->pending_padding = 0;
58 reftable_free(zeroed);
61 w->pending_padding = padding;
62 n = w->write(w->write_arg, data, len);
63 if (n < 0)
64 return n;
65 n += padding;
66 return 0;
69 static void options_set_defaults(struct reftable_write_options *opts)
71 if (opts->restart_interval == 0) {
72 opts->restart_interval = 16;
75 if (opts->hash_id == 0) {
76 opts->hash_id = GIT_SHA1_FORMAT_ID;
78 if (opts->block_size == 0) {
79 opts->block_size = DEFAULT_BLOCK_SIZE;
83 static int writer_version(struct reftable_writer *w)
85 return (w->opts.hash_id == 0 || w->opts.hash_id == GIT_SHA1_FORMAT_ID) ?
86 1 :
90 static int writer_write_header(struct reftable_writer *w, uint8_t *dest)
92 memcpy(dest, "REFT", 4);
94 dest[4] = writer_version(w);
96 put_be24(dest + 5, w->opts.block_size);
97 put_be64(dest + 8, w->min_update_index);
98 put_be64(dest + 16, w->max_update_index);
99 if (writer_version(w) == 2) {
100 put_be32(dest + 24, w->opts.hash_id);
102 return header_size(writer_version(w));
105 static void writer_reinit_block_writer(struct reftable_writer *w, uint8_t typ)
107 int block_start = 0;
108 if (w->next == 0) {
109 block_start = header_size(writer_version(w));
112 strbuf_reset(&w->last_key);
113 block_writer_init(&w->block_writer_data, typ, w->block,
114 w->opts.block_size, block_start,
115 hash_size(w->opts.hash_id));
116 w->block_writer = &w->block_writer_data;
117 w->block_writer->restart_interval = w->opts.restart_interval;
120 struct reftable_writer *
121 reftable_new_writer(ssize_t (*writer_func)(void *, const void *, size_t),
122 int (*flush_func)(void *),
123 void *writer_arg, const struct reftable_write_options *_opts)
125 struct reftable_writer *wp = reftable_calloc(1, sizeof(*wp));
126 struct reftable_write_options opts = {0};
128 if (_opts)
129 opts = *_opts;
130 options_set_defaults(&opts);
131 if (opts.block_size >= (1 << 24))
132 BUG("configured block size exceeds 16MB");
134 strbuf_init(&wp->block_writer_data.last_key, 0);
135 strbuf_init(&wp->last_key, 0);
136 REFTABLE_CALLOC_ARRAY(wp->block, opts.block_size);
137 wp->write = writer_func;
138 wp->write_arg = writer_arg;
139 wp->opts = opts;
140 wp->flush = flush_func;
141 writer_reinit_block_writer(wp, BLOCK_TYPE_REF);
143 return wp;
146 void reftable_writer_set_limits(struct reftable_writer *w, uint64_t min,
147 uint64_t max)
149 w->min_update_index = min;
150 w->max_update_index = max;
153 static void writer_release(struct reftable_writer *w)
155 if (w) {
156 reftable_free(w->block);
157 w->block = NULL;
158 block_writer_release(&w->block_writer_data);
159 w->block_writer = NULL;
160 writer_clear_index(w);
161 strbuf_release(&w->last_key);
165 void reftable_writer_free(struct reftable_writer *w)
167 writer_release(w);
168 reftable_free(w);
171 struct obj_index_tree_node {
172 struct strbuf hash;
173 uint64_t *offsets;
174 size_t offset_len;
175 size_t offset_cap;
178 #define OBJ_INDEX_TREE_NODE_INIT \
180 .hash = STRBUF_INIT \
183 static int obj_index_tree_node_compare(const void *a, const void *b)
185 return strbuf_cmp(&((const struct obj_index_tree_node *)a)->hash,
186 &((const struct obj_index_tree_node *)b)->hash);
189 static void writer_index_hash(struct reftable_writer *w, struct strbuf *hash)
191 uint64_t off = w->next;
193 struct obj_index_tree_node want = { .hash = *hash };
195 struct tree_node *node = tree_search(&want, &w->obj_index_tree,
196 &obj_index_tree_node_compare, 0);
197 struct obj_index_tree_node *key = NULL;
198 if (!node) {
199 struct obj_index_tree_node empty = OBJ_INDEX_TREE_NODE_INIT;
200 key = reftable_malloc(sizeof(struct obj_index_tree_node));
201 *key = empty;
203 strbuf_reset(&key->hash);
204 strbuf_addbuf(&key->hash, hash);
205 tree_search((void *)key, &w->obj_index_tree,
206 &obj_index_tree_node_compare, 1);
207 } else {
208 key = node->key;
211 if (key->offset_len > 0 && key->offsets[key->offset_len - 1] == off) {
212 return;
215 REFTABLE_ALLOC_GROW(key->offsets, key->offset_len + 1, key->offset_cap);
216 key->offsets[key->offset_len++] = off;
219 static int writer_add_record(struct reftable_writer *w,
220 struct reftable_record *rec)
222 struct strbuf key = STRBUF_INIT;
223 int err;
225 reftable_record_key(rec, &key);
226 if (strbuf_cmp(&w->last_key, &key) >= 0) {
227 err = REFTABLE_API_ERROR;
228 goto done;
231 strbuf_reset(&w->last_key);
232 strbuf_addbuf(&w->last_key, &key);
233 if (!w->block_writer)
234 writer_reinit_block_writer(w, reftable_record_type(rec));
236 if (block_writer_type(w->block_writer) != reftable_record_type(rec))
237 BUG("record of type %d added to writer of type %d",
238 reftable_record_type(rec), block_writer_type(w->block_writer));
241 * Try to add the record to the writer. If this succeeds then we're
242 * done. Otherwise the block writer may have hit the block size limit
243 * and needs to be flushed.
245 if (!block_writer_add(w->block_writer, rec)) {
246 err = 0;
247 goto done;
251 * The current block is full, so we need to flush and reinitialize the
252 * writer to start writing the next block.
254 err = writer_flush_block(w);
255 if (err < 0)
256 goto done;
257 writer_reinit_block_writer(w, reftable_record_type(rec));
260 * Try to add the record to the writer again. If this still fails then
261 * the record does not fit into the block size.
263 * TODO: it would be great to have `block_writer_add()` return proper
264 * error codes so that we don't have to second-guess the failure
265 * mode here.
267 err = block_writer_add(w->block_writer, rec);
268 if (err) {
269 err = REFTABLE_ENTRY_TOO_BIG_ERROR;
270 goto done;
273 done:
274 strbuf_release(&key);
275 return err;
278 int reftable_writer_add_ref(struct reftable_writer *w,
279 struct reftable_ref_record *ref)
281 struct reftable_record rec = {
282 .type = BLOCK_TYPE_REF,
283 .u = {
284 .ref = *ref
287 int err = 0;
289 if (!ref->refname)
290 return REFTABLE_API_ERROR;
291 if (ref->update_index < w->min_update_index ||
292 ref->update_index > w->max_update_index)
293 return REFTABLE_API_ERROR;
295 rec.u.ref.update_index -= w->min_update_index;
297 err = writer_add_record(w, &rec);
298 if (err < 0)
299 return err;
301 if (!w->opts.skip_index_objects && reftable_ref_record_val1(ref)) {
302 struct strbuf h = STRBUF_INIT;
303 strbuf_add(&h, (char *)reftable_ref_record_val1(ref),
304 hash_size(w->opts.hash_id));
305 writer_index_hash(w, &h);
306 strbuf_release(&h);
309 if (!w->opts.skip_index_objects && reftable_ref_record_val2(ref)) {
310 struct strbuf h = STRBUF_INIT;
311 strbuf_add(&h, reftable_ref_record_val2(ref),
312 hash_size(w->opts.hash_id));
313 writer_index_hash(w, &h);
314 strbuf_release(&h);
316 return 0;
319 int reftable_writer_add_refs(struct reftable_writer *w,
320 struct reftable_ref_record *refs, int n)
322 int err = 0;
323 int i = 0;
324 QSORT(refs, n, reftable_ref_record_compare_name);
325 for (i = 0; err == 0 && i < n; i++) {
326 err = reftable_writer_add_ref(w, &refs[i]);
328 return err;
331 static int reftable_writer_add_log_verbatim(struct reftable_writer *w,
332 struct reftable_log_record *log)
334 struct reftable_record rec = {
335 .type = BLOCK_TYPE_LOG,
336 .u = {
337 .log = *log,
340 if (w->block_writer &&
341 block_writer_type(w->block_writer) == BLOCK_TYPE_REF) {
342 int err = writer_finish_public_section(w);
343 if (err < 0)
344 return err;
347 w->next -= w->pending_padding;
348 w->pending_padding = 0;
349 return writer_add_record(w, &rec);
352 int reftable_writer_add_log(struct reftable_writer *w,
353 struct reftable_log_record *log)
355 char *input_log_message = NULL;
356 struct strbuf cleaned_message = STRBUF_INIT;
357 int err = 0;
359 if (log->value_type == REFTABLE_LOG_DELETION)
360 return reftable_writer_add_log_verbatim(w, log);
362 if (!log->refname)
363 return REFTABLE_API_ERROR;
365 input_log_message = log->value.update.message;
366 if (!w->opts.exact_log_message && log->value.update.message) {
367 strbuf_addstr(&cleaned_message, log->value.update.message);
368 while (cleaned_message.len &&
369 cleaned_message.buf[cleaned_message.len - 1] == '\n')
370 strbuf_setlen(&cleaned_message,
371 cleaned_message.len - 1);
372 if (strchr(cleaned_message.buf, '\n')) {
373 /* multiple lines not allowed. */
374 err = REFTABLE_API_ERROR;
375 goto done;
377 strbuf_addstr(&cleaned_message, "\n");
378 log->value.update.message = cleaned_message.buf;
381 err = reftable_writer_add_log_verbatim(w, log);
382 log->value.update.message = input_log_message;
383 done:
384 strbuf_release(&cleaned_message);
385 return err;
388 int reftable_writer_add_logs(struct reftable_writer *w,
389 struct reftable_log_record *logs, int n)
391 int err = 0;
392 int i = 0;
393 QSORT(logs, n, reftable_log_record_compare_key);
395 for (i = 0; err == 0 && i < n; i++) {
396 err = reftable_writer_add_log(w, &logs[i]);
398 return err;
401 static int writer_finish_section(struct reftable_writer *w)
403 struct reftable_block_stats *bstats = NULL;
404 uint8_t typ = block_writer_type(w->block_writer);
405 uint64_t index_start = 0;
406 int max_level = 0;
407 size_t threshold = w->opts.unpadded ? 1 : 3;
408 int before_blocks = w->stats.idx_stats.blocks;
409 int err;
411 err = writer_flush_block(w);
412 if (err < 0)
413 return err;
416 * When the section we are about to index has a lot of blocks then the
417 * index itself may span across multiple blocks, as well. This would
418 * require a linear scan over index blocks only to find the desired
419 * indexed block, which is inefficient. Instead, we write a multi-level
420 * index where index records of level N+1 will refer to index blocks of
421 * level N. This isn't constant time, either, but at least logarithmic.
423 * This loop handles writing this multi-level index. Note that we write
424 * the lowest-level index pointing to the indexed blocks first. We then
425 * continue writing additional index levels until the current level has
426 * less blocks than the threshold so that the highest level will be at
427 * the end of the index section.
429 * Readers are thus required to start reading the index section from
430 * its end, which is why we set `index_start` to the beginning of the
431 * last index section.
433 while (w->index_len > threshold) {
434 struct reftable_index_record *idx = NULL;
435 size_t i, idx_len;
437 max_level++;
438 index_start = w->next;
439 writer_reinit_block_writer(w, BLOCK_TYPE_INDEX);
441 idx = w->index;
442 idx_len = w->index_len;
444 w->index = NULL;
445 w->index_len = 0;
446 w->index_cap = 0;
447 for (i = 0; i < idx_len; i++) {
448 struct reftable_record rec = {
449 .type = BLOCK_TYPE_INDEX,
450 .u = {
451 .idx = idx[i],
455 err = writer_add_record(w, &rec);
456 if (err < 0)
457 return err;
460 err = writer_flush_block(w);
461 if (err < 0)
462 return err;
464 for (i = 0; i < idx_len; i++)
465 strbuf_release(&idx[i].last_key);
466 reftable_free(idx);
470 * The index may still contain a number of index blocks lower than the
471 * threshold. Clear it so that these entries don't leak into the next
472 * index section.
474 writer_clear_index(w);
476 bstats = writer_reftable_block_stats(w, typ);
477 bstats->index_blocks = w->stats.idx_stats.blocks - before_blocks;
478 bstats->index_offset = index_start;
479 bstats->max_index_level = max_level;
481 /* Reinit lastKey, as the next section can start with any key. */
482 strbuf_reset(&w->last_key);
484 return 0;
487 struct common_prefix_arg {
488 struct strbuf *last;
489 int max;
492 static void update_common(void *void_arg, void *key)
494 struct common_prefix_arg *arg = void_arg;
495 struct obj_index_tree_node *entry = key;
496 if (arg->last) {
497 int n = common_prefix_size(&entry->hash, arg->last);
498 if (n > arg->max) {
499 arg->max = n;
502 arg->last = &entry->hash;
505 struct write_record_arg {
506 struct reftable_writer *w;
507 int err;
510 static void write_object_record(void *void_arg, void *key)
512 struct write_record_arg *arg = void_arg;
513 struct obj_index_tree_node *entry = key;
514 struct reftable_record
515 rec = { .type = BLOCK_TYPE_OBJ,
516 .u.obj = {
517 .hash_prefix = (uint8_t *)entry->hash.buf,
518 .hash_prefix_len = arg->w->stats.object_id_len,
519 .offsets = entry->offsets,
520 .offset_len = entry->offset_len,
521 } };
522 if (arg->err < 0)
523 goto done;
525 arg->err = block_writer_add(arg->w->block_writer, &rec);
526 if (arg->err == 0)
527 goto done;
529 arg->err = writer_flush_block(arg->w);
530 if (arg->err < 0)
531 goto done;
533 writer_reinit_block_writer(arg->w, BLOCK_TYPE_OBJ);
534 arg->err = block_writer_add(arg->w->block_writer, &rec);
535 if (arg->err == 0)
536 goto done;
538 rec.u.obj.offset_len = 0;
539 arg->err = block_writer_add(arg->w->block_writer, &rec);
541 /* Should be able to write into a fresh block. */
542 assert(arg->err == 0);
544 done:;
547 static void object_record_free(void *void_arg, void *key)
549 struct obj_index_tree_node *entry = key;
551 FREE_AND_NULL(entry->offsets);
552 strbuf_release(&entry->hash);
553 reftable_free(entry);
556 static int writer_dump_object_index(struct reftable_writer *w)
558 struct write_record_arg closure = { .w = w };
559 struct common_prefix_arg common = {
560 .max = 1, /* obj_id_len should be >= 2. */
562 if (w->obj_index_tree) {
563 infix_walk(w->obj_index_tree, &update_common, &common);
565 w->stats.object_id_len = common.max + 1;
567 writer_reinit_block_writer(w, BLOCK_TYPE_OBJ);
569 if (w->obj_index_tree) {
570 infix_walk(w->obj_index_tree, &write_object_record, &closure);
573 if (closure.err < 0)
574 return closure.err;
575 return writer_finish_section(w);
578 static int writer_finish_public_section(struct reftable_writer *w)
580 uint8_t typ = 0;
581 int err = 0;
583 if (!w->block_writer)
584 return 0;
586 typ = block_writer_type(w->block_writer);
587 err = writer_finish_section(w);
588 if (err < 0)
589 return err;
590 if (typ == BLOCK_TYPE_REF && !w->opts.skip_index_objects &&
591 w->stats.ref_stats.index_blocks > 0) {
592 err = writer_dump_object_index(w);
593 if (err < 0)
594 return err;
597 if (w->obj_index_tree) {
598 infix_walk(w->obj_index_tree, &object_record_free, NULL);
599 tree_free(w->obj_index_tree);
600 w->obj_index_tree = NULL;
603 w->block_writer = NULL;
604 return 0;
607 int reftable_writer_close(struct reftable_writer *w)
609 uint8_t footer[72];
610 uint8_t *p = footer;
611 int err = writer_finish_public_section(w);
612 int empty_table = w->next == 0;
613 if (err != 0)
614 goto done;
615 w->pending_padding = 0;
616 if (empty_table) {
617 /* Empty tables need a header anyway. */
618 uint8_t header[28];
619 int n = writer_write_header(w, header);
620 err = padded_write(w, header, n, 0);
621 if (err < 0)
622 goto done;
625 p += writer_write_header(w, footer);
626 put_be64(p, w->stats.ref_stats.index_offset);
627 p += 8;
628 put_be64(p, (w->stats.obj_stats.offset) << 5 | w->stats.object_id_len);
629 p += 8;
630 put_be64(p, w->stats.obj_stats.index_offset);
631 p += 8;
633 put_be64(p, w->stats.log_stats.offset);
634 p += 8;
635 put_be64(p, w->stats.log_stats.index_offset);
636 p += 8;
638 put_be32(p, crc32(0, footer, p - footer));
639 p += 4;
641 err = w->flush(w->write_arg);
642 if (err < 0) {
643 err = REFTABLE_IO_ERROR;
644 goto done;
647 err = padded_write(w, footer, footer_size(writer_version(w)), 0);
648 if (err < 0)
649 goto done;
651 if (empty_table) {
652 err = REFTABLE_EMPTY_TABLE_ERROR;
653 goto done;
656 done:
657 writer_release(w);
658 return err;
661 static void writer_clear_index(struct reftable_writer *w)
663 for (size_t i = 0; w->index && i < w->index_len; i++)
664 strbuf_release(&w->index[i].last_key);
665 FREE_AND_NULL(w->index);
666 w->index_len = 0;
667 w->index_cap = 0;
670 static int writer_flush_nonempty_block(struct reftable_writer *w)
672 struct reftable_index_record index_record = {
673 .last_key = STRBUF_INIT,
675 uint8_t typ = block_writer_type(w->block_writer);
676 struct reftable_block_stats *bstats;
677 int raw_bytes, padding = 0, err;
678 uint64_t block_typ_off;
681 * Finish the current block. This will cause the block writer to emit
682 * restart points and potentially compress records in case we are
683 * writing a log block.
685 * Note that this is still happening in memory.
687 raw_bytes = block_writer_finish(w->block_writer);
688 if (raw_bytes < 0)
689 return raw_bytes;
692 * By default, all records except for log records are padded to the
693 * block size.
695 if (!w->opts.unpadded && typ != BLOCK_TYPE_LOG)
696 padding = w->opts.block_size - raw_bytes;
698 bstats = writer_reftable_block_stats(w, typ);
699 block_typ_off = (bstats->blocks == 0) ? w->next : 0;
700 if (block_typ_off > 0)
701 bstats->offset = block_typ_off;
702 bstats->entries += w->block_writer->entries;
703 bstats->restarts += w->block_writer->restart_len;
704 bstats->blocks++;
705 w->stats.blocks++;
708 * If this is the first block we're writing to the table then we need
709 * to also write the reftable header.
711 if (!w->next)
712 writer_write_header(w, w->block);
714 err = padded_write(w, w->block, raw_bytes, padding);
715 if (err < 0)
716 return err;
719 * Add an index record for every block that we're writing. If we end up
720 * having more than a threshold of index records we will end up writing
721 * an index section in `writer_finish_section()`. Each index record
722 * contains the last record key of the block it is indexing as well as
723 * the offset of that block.
725 * Note that this also applies when flushing index blocks, in which
726 * case we will end up with a multi-level index.
728 REFTABLE_ALLOC_GROW(w->index, w->index_len + 1, w->index_cap);
729 index_record.offset = w->next;
730 strbuf_reset(&index_record.last_key);
731 strbuf_addbuf(&index_record.last_key, &w->block_writer->last_key);
732 w->index[w->index_len] = index_record;
733 w->index_len++;
735 w->next += padding + raw_bytes;
736 w->block_writer = NULL;
738 return 0;
741 static int writer_flush_block(struct reftable_writer *w)
743 if (!w->block_writer)
744 return 0;
745 if (w->block_writer->entries == 0)
746 return 0;
747 return writer_flush_nonempty_block(w);
750 const struct reftable_stats *reftable_writer_stats(struct reftable_writer *w)
752 return &w->stats;