Merge branch 'hn/reftable-coverity-fixes'
[git/debian.git] / reftable / writer.c
blob944c2329ab568aa17cb1243507cc7a352faaf8ad
1 /*
2 Copyright 2020 Google LLC
4 Use of this source code is governed by a BSD-style
5 license that can be found in the LICENSE file or at
6 https://developers.google.com/open-source/licenses/bsd
7 */
9 #include "writer.h"
11 #include "system.h"
13 #include "block.h"
14 #include "constants.h"
15 #include "record.h"
16 #include "tree.h"
17 #include "reftable-error.h"
19 /* finishes a block, and writes it to storage */
20 static int writer_flush_block(struct reftable_writer *w);
22 /* deallocates memory related to the index */
23 static void writer_clear_index(struct reftable_writer *w);
25 /* finishes writing a 'r' (refs) or 'g' (reflogs) section */
26 static int writer_finish_public_section(struct reftable_writer *w);
28 static struct reftable_block_stats *
29 writer_reftable_block_stats(struct reftable_writer *w, uint8_t typ)
31 switch (typ) {
32 case 'r':
33 return &w->stats.ref_stats;
34 case 'o':
35 return &w->stats.obj_stats;
36 case 'i':
37 return &w->stats.idx_stats;
38 case 'g':
39 return &w->stats.log_stats;
41 abort();
42 return NULL;
45 /* write data, queuing the padding for the next write. Returns negative for
46 * error. */
47 static int padded_write(struct reftable_writer *w, uint8_t *data, size_t len,
48 int padding)
50 int n = 0;
51 if (w->pending_padding > 0) {
52 uint8_t *zeroed = reftable_calloc(w->pending_padding);
53 int n = w->write(w->write_arg, zeroed, w->pending_padding);
54 if (n < 0)
55 return n;
57 w->pending_padding = 0;
58 reftable_free(zeroed);
61 w->pending_padding = padding;
62 n = w->write(w->write_arg, data, len);
63 if (n < 0)
64 return n;
65 n += padding;
66 return 0;
69 static void options_set_defaults(struct reftable_write_options *opts)
71 if (opts->restart_interval == 0) {
72 opts->restart_interval = 16;
75 if (opts->hash_id == 0) {
76 opts->hash_id = GIT_SHA1_FORMAT_ID;
78 if (opts->block_size == 0) {
79 opts->block_size = DEFAULT_BLOCK_SIZE;
83 static int writer_version(struct reftable_writer *w)
85 return (w->opts.hash_id == 0 || w->opts.hash_id == GIT_SHA1_FORMAT_ID) ?
86 1 :
90 static int writer_write_header(struct reftable_writer *w, uint8_t *dest)
92 memcpy(dest, "REFT", 4);
94 dest[4] = writer_version(w);
96 put_be24(dest + 5, w->opts.block_size);
97 put_be64(dest + 8, w->min_update_index);
98 put_be64(dest + 16, w->max_update_index);
99 if (writer_version(w) == 2) {
100 put_be32(dest + 24, w->opts.hash_id);
102 return header_size(writer_version(w));
105 static void writer_reinit_block_writer(struct reftable_writer *w, uint8_t typ)
107 int block_start = 0;
108 if (w->next == 0) {
109 block_start = header_size(writer_version(w));
112 strbuf_release(&w->last_key);
113 block_writer_init(&w->block_writer_data, typ, w->block,
114 w->opts.block_size, block_start,
115 hash_size(w->opts.hash_id));
116 w->block_writer = &w->block_writer_data;
117 w->block_writer->restart_interval = w->opts.restart_interval;
120 static struct strbuf reftable_empty_strbuf = STRBUF_INIT;
122 struct reftable_writer *
123 reftable_new_writer(ssize_t (*writer_func)(void *, const void *, size_t),
124 void *writer_arg, struct reftable_write_options *opts)
126 struct reftable_writer *wp =
127 reftable_calloc(sizeof(struct reftable_writer));
128 strbuf_init(&wp->block_writer_data.last_key, 0);
129 options_set_defaults(opts);
130 if (opts->block_size >= (1 << 24)) {
131 /* TODO - error return? */
132 abort();
134 wp->last_key = reftable_empty_strbuf;
135 wp->block = reftable_calloc(opts->block_size);
136 wp->write = writer_func;
137 wp->write_arg = writer_arg;
138 wp->opts = *opts;
139 writer_reinit_block_writer(wp, BLOCK_TYPE_REF);
141 return wp;
144 void reftable_writer_set_limits(struct reftable_writer *w, uint64_t min,
145 uint64_t max)
147 w->min_update_index = min;
148 w->max_update_index = max;
151 void reftable_writer_free(struct reftable_writer *w)
153 if (!w)
154 return;
155 reftable_free(w->block);
156 reftable_free(w);
159 struct obj_index_tree_node {
160 struct strbuf hash;
161 uint64_t *offsets;
162 size_t offset_len;
163 size_t offset_cap;
166 #define OBJ_INDEX_TREE_NODE_INIT \
168 .hash = STRBUF_INIT \
171 static int obj_index_tree_node_compare(const void *a, const void *b)
173 return strbuf_cmp(&((const struct obj_index_tree_node *)a)->hash,
174 &((const struct obj_index_tree_node *)b)->hash);
177 static void writer_index_hash(struct reftable_writer *w, struct strbuf *hash)
179 uint64_t off = w->next;
181 struct obj_index_tree_node want = { .hash = *hash };
183 struct tree_node *node = tree_search(&want, &w->obj_index_tree,
184 &obj_index_tree_node_compare, 0);
185 struct obj_index_tree_node *key = NULL;
186 if (node == NULL) {
187 struct obj_index_tree_node empty = OBJ_INDEX_TREE_NODE_INIT;
188 key = reftable_malloc(sizeof(struct obj_index_tree_node));
189 *key = empty;
191 strbuf_reset(&key->hash);
192 strbuf_addbuf(&key->hash, hash);
193 tree_search((void *)key, &w->obj_index_tree,
194 &obj_index_tree_node_compare, 1);
195 } else {
196 key = node->key;
199 if (key->offset_len > 0 && key->offsets[key->offset_len - 1] == off) {
200 return;
203 if (key->offset_len == key->offset_cap) {
204 key->offset_cap = 2 * key->offset_cap + 1;
205 key->offsets = reftable_realloc(
206 key->offsets, sizeof(uint64_t) * key->offset_cap);
209 key->offsets[key->offset_len++] = off;
212 static int writer_add_record(struct reftable_writer *w,
213 struct reftable_record *rec)
215 struct strbuf key = STRBUF_INIT;
216 int err = -1;
217 reftable_record_key(rec, &key);
218 if (strbuf_cmp(&w->last_key, &key) >= 0) {
219 err = REFTABLE_API_ERROR;
220 goto done;
223 strbuf_reset(&w->last_key);
224 strbuf_addbuf(&w->last_key, &key);
225 if (w->block_writer == NULL) {
226 writer_reinit_block_writer(w, reftable_record_type(rec));
229 assert(block_writer_type(w->block_writer) == reftable_record_type(rec));
231 if (block_writer_add(w->block_writer, rec) == 0) {
232 err = 0;
233 goto done;
236 err = writer_flush_block(w);
237 if (err < 0) {
238 goto done;
241 writer_reinit_block_writer(w, reftable_record_type(rec));
242 err = block_writer_add(w->block_writer, rec);
243 if (err < 0) {
244 /* we are writing into memory, so an error can only mean it
245 * doesn't fit. */
246 err = REFTABLE_ENTRY_TOO_BIG_ERROR;
247 goto done;
250 err = 0;
251 done:
252 strbuf_release(&key);
253 return err;
256 int reftable_writer_add_ref(struct reftable_writer *w,
257 struct reftable_ref_record *ref)
259 struct reftable_record rec = {
260 .type = BLOCK_TYPE_REF,
261 .u.ref = *ref,
263 int err = 0;
265 if (ref->refname == NULL)
266 return REFTABLE_API_ERROR;
267 if (ref->update_index < w->min_update_index ||
268 ref->update_index > w->max_update_index)
269 return REFTABLE_API_ERROR;
271 rec.u.ref.update_index -= w->min_update_index;
273 err = writer_add_record(w, &rec);
274 if (err < 0)
275 return err;
277 if (!w->opts.skip_index_objects && reftable_ref_record_val1(ref)) {
278 struct strbuf h = STRBUF_INIT;
279 strbuf_add(&h, (char *)reftable_ref_record_val1(ref),
280 hash_size(w->opts.hash_id));
281 writer_index_hash(w, &h);
282 strbuf_release(&h);
285 if (!w->opts.skip_index_objects && reftable_ref_record_val2(ref)) {
286 struct strbuf h = STRBUF_INIT;
287 strbuf_add(&h, reftable_ref_record_val2(ref),
288 hash_size(w->opts.hash_id));
289 writer_index_hash(w, &h);
290 strbuf_release(&h);
292 return 0;
295 int reftable_writer_add_refs(struct reftable_writer *w,
296 struct reftable_ref_record *refs, int n)
298 int err = 0;
299 int i = 0;
300 QSORT(refs, n, reftable_ref_record_compare_name);
301 for (i = 0; err == 0 && i < n; i++) {
302 err = reftable_writer_add_ref(w, &refs[i]);
304 return err;
307 static int reftable_writer_add_log_verbatim(struct reftable_writer *w,
308 struct reftable_log_record *log)
310 struct reftable_record rec = {
311 .type = BLOCK_TYPE_LOG,
312 .u.log = *log,
314 if (w->block_writer &&
315 block_writer_type(w->block_writer) == BLOCK_TYPE_REF) {
316 int err = writer_finish_public_section(w);
317 if (err < 0)
318 return err;
321 w->next -= w->pending_padding;
322 w->pending_padding = 0;
323 return writer_add_record(w, &rec);
326 int reftable_writer_add_log(struct reftable_writer *w,
327 struct reftable_log_record *log)
329 char *input_log_message = NULL;
330 struct strbuf cleaned_message = STRBUF_INIT;
331 int err = 0;
333 if (log->value_type == REFTABLE_LOG_DELETION)
334 return reftable_writer_add_log_verbatim(w, log);
336 if (log->refname == NULL)
337 return REFTABLE_API_ERROR;
339 input_log_message = log->value.update.message;
340 if (!w->opts.exact_log_message && log->value.update.message) {
341 strbuf_addstr(&cleaned_message, log->value.update.message);
342 while (cleaned_message.len &&
343 cleaned_message.buf[cleaned_message.len - 1] == '\n')
344 strbuf_setlen(&cleaned_message,
345 cleaned_message.len - 1);
346 if (strchr(cleaned_message.buf, '\n')) {
347 /* multiple lines not allowed. */
348 err = REFTABLE_API_ERROR;
349 goto done;
351 strbuf_addstr(&cleaned_message, "\n");
352 log->value.update.message = cleaned_message.buf;
355 err = reftable_writer_add_log_verbatim(w, log);
356 log->value.update.message = input_log_message;
357 done:
358 strbuf_release(&cleaned_message);
359 return err;
362 int reftable_writer_add_logs(struct reftable_writer *w,
363 struct reftable_log_record *logs, int n)
365 int err = 0;
366 int i = 0;
367 QSORT(logs, n, reftable_log_record_compare_key);
369 for (i = 0; err == 0 && i < n; i++) {
370 err = reftable_writer_add_log(w, &logs[i]);
372 return err;
375 static int writer_finish_section(struct reftable_writer *w)
377 uint8_t typ = block_writer_type(w->block_writer);
378 uint64_t index_start = 0;
379 int max_level = 0;
380 int threshold = w->opts.unpadded ? 1 : 3;
381 int before_blocks = w->stats.idx_stats.blocks;
382 int err = writer_flush_block(w);
383 int i = 0;
384 struct reftable_block_stats *bstats = NULL;
385 if (err < 0)
386 return err;
388 while (w->index_len > threshold) {
389 struct reftable_index_record *idx = NULL;
390 int idx_len = 0;
392 max_level++;
393 index_start = w->next;
394 writer_reinit_block_writer(w, BLOCK_TYPE_INDEX);
396 idx = w->index;
397 idx_len = w->index_len;
399 w->index = NULL;
400 w->index_len = 0;
401 w->index_cap = 0;
402 for (i = 0; i < idx_len; i++) {
403 struct reftable_record rec = {
404 .type = BLOCK_TYPE_INDEX,
405 .u.idx = idx[i],
407 if (block_writer_add(w->block_writer, &rec) == 0) {
408 continue;
411 err = writer_flush_block(w);
412 if (err < 0)
413 return err;
415 writer_reinit_block_writer(w, BLOCK_TYPE_INDEX);
417 err = block_writer_add(w->block_writer, &rec);
418 if (err != 0) {
419 /* write into fresh block should always succeed
421 abort();
424 for (i = 0; i < idx_len; i++) {
425 strbuf_release(&idx[i].last_key);
427 reftable_free(idx);
430 writer_clear_index(w);
432 err = writer_flush_block(w);
433 if (err < 0)
434 return err;
436 bstats = writer_reftable_block_stats(w, typ);
437 bstats->index_blocks = w->stats.idx_stats.blocks - before_blocks;
438 bstats->index_offset = index_start;
439 bstats->max_index_level = max_level;
441 /* Reinit lastKey, as the next section can start with any key. */
442 w->last_key.len = 0;
444 return 0;
447 struct common_prefix_arg {
448 struct strbuf *last;
449 int max;
452 static void update_common(void *void_arg, void *key)
454 struct common_prefix_arg *arg = void_arg;
455 struct obj_index_tree_node *entry = key;
456 if (arg->last) {
457 int n = common_prefix_size(&entry->hash, arg->last);
458 if (n > arg->max) {
459 arg->max = n;
462 arg->last = &entry->hash;
465 struct write_record_arg {
466 struct reftable_writer *w;
467 int err;
470 static void write_object_record(void *void_arg, void *key)
472 struct write_record_arg *arg = void_arg;
473 struct obj_index_tree_node *entry = key;
474 struct reftable_record
475 rec = { .type = BLOCK_TYPE_OBJ,
476 .u.obj = {
477 .hash_prefix = (uint8_t *)entry->hash.buf,
478 .hash_prefix_len = arg->w->stats.object_id_len,
479 .offsets = entry->offsets,
480 .offset_len = entry->offset_len,
481 } };
482 if (arg->err < 0)
483 goto done;
485 arg->err = block_writer_add(arg->w->block_writer, &rec);
486 if (arg->err == 0)
487 goto done;
489 arg->err = writer_flush_block(arg->w);
490 if (arg->err < 0)
491 goto done;
493 writer_reinit_block_writer(arg->w, BLOCK_TYPE_OBJ);
494 arg->err = block_writer_add(arg->w->block_writer, &rec);
495 if (arg->err == 0)
496 goto done;
498 rec.u.obj.offset_len = 0;
499 arg->err = block_writer_add(arg->w->block_writer, &rec);
501 /* Should be able to write into a fresh block. */
502 assert(arg->err == 0);
504 done:;
507 static void object_record_free(void *void_arg, void *key)
509 struct obj_index_tree_node *entry = key;
511 FREE_AND_NULL(entry->offsets);
512 strbuf_release(&entry->hash);
513 reftable_free(entry);
516 static int writer_dump_object_index(struct reftable_writer *w)
518 struct write_record_arg closure = { .w = w };
519 struct common_prefix_arg common = { NULL };
520 if (w->obj_index_tree) {
521 infix_walk(w->obj_index_tree, &update_common, &common);
523 w->stats.object_id_len = common.max + 1;
525 writer_reinit_block_writer(w, BLOCK_TYPE_OBJ);
527 if (w->obj_index_tree) {
528 infix_walk(w->obj_index_tree, &write_object_record, &closure);
531 if (closure.err < 0)
532 return closure.err;
533 return writer_finish_section(w);
536 static int writer_finish_public_section(struct reftable_writer *w)
538 uint8_t typ = 0;
539 int err = 0;
541 if (w->block_writer == NULL)
542 return 0;
544 typ = block_writer_type(w->block_writer);
545 err = writer_finish_section(w);
546 if (err < 0)
547 return err;
548 if (typ == BLOCK_TYPE_REF && !w->opts.skip_index_objects &&
549 w->stats.ref_stats.index_blocks > 0) {
550 err = writer_dump_object_index(w);
551 if (err < 0)
552 return err;
555 if (w->obj_index_tree) {
556 infix_walk(w->obj_index_tree, &object_record_free, NULL);
557 tree_free(w->obj_index_tree);
558 w->obj_index_tree = NULL;
561 w->block_writer = NULL;
562 return 0;
565 int reftable_writer_close(struct reftable_writer *w)
567 uint8_t footer[72];
568 uint8_t *p = footer;
569 int err = writer_finish_public_section(w);
570 int empty_table = w->next == 0;
571 if (err != 0)
572 goto done;
573 w->pending_padding = 0;
574 if (empty_table) {
575 /* Empty tables need a header anyway. */
576 uint8_t header[28];
577 int n = writer_write_header(w, header);
578 err = padded_write(w, header, n, 0);
579 if (err < 0)
580 goto done;
583 p += writer_write_header(w, footer);
584 put_be64(p, w->stats.ref_stats.index_offset);
585 p += 8;
586 put_be64(p, (w->stats.obj_stats.offset) << 5 | w->stats.object_id_len);
587 p += 8;
588 put_be64(p, w->stats.obj_stats.index_offset);
589 p += 8;
591 put_be64(p, w->stats.log_stats.offset);
592 p += 8;
593 put_be64(p, w->stats.log_stats.index_offset);
594 p += 8;
596 put_be32(p, crc32(0, footer, p - footer));
597 p += 4;
599 err = padded_write(w, footer, footer_size(writer_version(w)), 0);
600 if (err < 0)
601 goto done;
603 if (empty_table) {
604 err = REFTABLE_EMPTY_TABLE_ERROR;
605 goto done;
608 done:
609 /* free up memory. */
610 block_writer_release(&w->block_writer_data);
611 writer_clear_index(w);
612 strbuf_release(&w->last_key);
613 return err;
616 static void writer_clear_index(struct reftable_writer *w)
618 int i = 0;
619 for (i = 0; i < w->index_len; i++) {
620 strbuf_release(&w->index[i].last_key);
623 FREE_AND_NULL(w->index);
624 w->index_len = 0;
625 w->index_cap = 0;
628 static const int debug = 0;
630 static int writer_flush_nonempty_block(struct reftable_writer *w)
632 uint8_t typ = block_writer_type(w->block_writer);
633 struct reftable_block_stats *bstats =
634 writer_reftable_block_stats(w, typ);
635 uint64_t block_typ_off = (bstats->blocks == 0) ? w->next : 0;
636 int raw_bytes = block_writer_finish(w->block_writer);
637 int padding = 0;
638 int err = 0;
639 struct reftable_index_record ir = { .last_key = STRBUF_INIT };
640 if (raw_bytes < 0)
641 return raw_bytes;
643 if (!w->opts.unpadded && typ != BLOCK_TYPE_LOG) {
644 padding = w->opts.block_size - raw_bytes;
647 if (block_typ_off > 0) {
648 bstats->offset = block_typ_off;
651 bstats->entries += w->block_writer->entries;
652 bstats->restarts += w->block_writer->restart_len;
653 bstats->blocks++;
654 w->stats.blocks++;
656 if (debug) {
657 fprintf(stderr, "block %c off %" PRIu64 " sz %d (%d)\n", typ,
658 w->next, raw_bytes,
659 get_be24(w->block + w->block_writer->header_off + 1));
662 if (w->next == 0) {
663 writer_write_header(w, w->block);
666 err = padded_write(w, w->block, raw_bytes, padding);
667 if (err < 0)
668 return err;
670 if (w->index_cap == w->index_len) {
671 w->index_cap = 2 * w->index_cap + 1;
672 w->index = reftable_realloc(
673 w->index,
674 sizeof(struct reftable_index_record) * w->index_cap);
677 ir.offset = w->next;
678 strbuf_reset(&ir.last_key);
679 strbuf_addbuf(&ir.last_key, &w->block_writer->last_key);
680 w->index[w->index_len] = ir;
682 w->index_len++;
683 w->next += padding + raw_bytes;
684 w->block_writer = NULL;
685 return 0;
688 static int writer_flush_block(struct reftable_writer *w)
690 if (w->block_writer == NULL)
691 return 0;
692 if (w->block_writer->entries == 0)
693 return 0;
694 return writer_flush_nonempty_block(w);
697 const struct reftable_stats *writer_stats(struct reftable_writer *w)
699 return &w->stats;