2 Copyright 2020 Google LLC
4 Use of this source code is governed by a BSD-style
5 license that can be found in the LICENSE file or at
6 https://developers.google.com/open-source/licenses/bsd
14 #include "constants.h"
17 #include "reftable-error.h"
19 /* finishes a block, and writes it to storage */
20 static int writer_flush_block(struct reftable_writer
*w
);
22 /* deallocates memory related to the index */
23 static void writer_clear_index(struct reftable_writer
*w
);
25 /* finishes writing a 'r' (refs) or 'g' (reflogs) section */
26 static int writer_finish_public_section(struct reftable_writer
*w
);
28 static struct reftable_block_stats
*
29 writer_reftable_block_stats(struct reftable_writer
*w
, uint8_t typ
)
33 return &w
->stats
.ref_stats
;
35 return &w
->stats
.obj_stats
;
37 return &w
->stats
.idx_stats
;
39 return &w
->stats
.log_stats
;
45 /* write data, queuing the padding for the next write. Returns negative for
47 static int padded_write(struct reftable_writer
*w
, uint8_t *data
, size_t len
,
51 if (w
->pending_padding
> 0) {
52 uint8_t *zeroed
= reftable_calloc(w
->pending_padding
);
53 int n
= w
->write(w
->write_arg
, zeroed
, w
->pending_padding
);
57 w
->pending_padding
= 0;
58 reftable_free(zeroed
);
61 w
->pending_padding
= padding
;
62 n
= w
->write(w
->write_arg
, data
, len
);
69 static void options_set_defaults(struct reftable_write_options
*opts
)
71 if (opts
->restart_interval
== 0) {
72 opts
->restart_interval
= 16;
75 if (opts
->hash_id
== 0) {
76 opts
->hash_id
= GIT_SHA1_FORMAT_ID
;
78 if (opts
->block_size
== 0) {
79 opts
->block_size
= DEFAULT_BLOCK_SIZE
;
83 static int writer_version(struct reftable_writer
*w
)
85 return (w
->opts
.hash_id
== 0 || w
->opts
.hash_id
== GIT_SHA1_FORMAT_ID
) ?
90 static int writer_write_header(struct reftable_writer
*w
, uint8_t *dest
)
92 memcpy(dest
, "REFT", 4);
94 dest
[4] = writer_version(w
);
96 put_be24(dest
+ 5, w
->opts
.block_size
);
97 put_be64(dest
+ 8, w
->min_update_index
);
98 put_be64(dest
+ 16, w
->max_update_index
);
99 if (writer_version(w
) == 2) {
100 put_be32(dest
+ 24, w
->opts
.hash_id
);
102 return header_size(writer_version(w
));
105 static void writer_reinit_block_writer(struct reftable_writer
*w
, uint8_t typ
)
109 block_start
= header_size(writer_version(w
));
112 strbuf_release(&w
->last_key
);
113 block_writer_init(&w
->block_writer_data
, typ
, w
->block
,
114 w
->opts
.block_size
, block_start
,
115 hash_size(w
->opts
.hash_id
));
116 w
->block_writer
= &w
->block_writer_data
;
117 w
->block_writer
->restart_interval
= w
->opts
.restart_interval
;
120 static struct strbuf reftable_empty_strbuf
= STRBUF_INIT
;
122 struct reftable_writer
*
123 reftable_new_writer(ssize_t (*writer_func
)(void *, const void *, size_t),
124 void *writer_arg
, struct reftable_write_options
*opts
)
126 struct reftable_writer
*wp
=
127 reftable_calloc(sizeof(struct reftable_writer
));
128 strbuf_init(&wp
->block_writer_data
.last_key
, 0);
129 options_set_defaults(opts
);
130 if (opts
->block_size
>= (1 << 24)) {
131 /* TODO - error return? */
134 wp
->last_key
= reftable_empty_strbuf
;
135 wp
->block
= reftable_calloc(opts
->block_size
);
136 wp
->write
= writer_func
;
137 wp
->write_arg
= writer_arg
;
139 writer_reinit_block_writer(wp
, BLOCK_TYPE_REF
);
144 void reftable_writer_set_limits(struct reftable_writer
*w
, uint64_t min
,
147 w
->min_update_index
= min
;
148 w
->max_update_index
= max
;
151 void reftable_writer_free(struct reftable_writer
*w
)
153 reftable_free(w
->block
);
157 struct obj_index_tree_node
{
164 #define OBJ_INDEX_TREE_NODE_INIT \
166 .hash = STRBUF_INIT \
169 static int obj_index_tree_node_compare(const void *a
, const void *b
)
171 return strbuf_cmp(&((const struct obj_index_tree_node
*)a
)->hash
,
172 &((const struct obj_index_tree_node
*)b
)->hash
);
175 static void writer_index_hash(struct reftable_writer
*w
, struct strbuf
*hash
)
177 uint64_t off
= w
->next
;
179 struct obj_index_tree_node want
= { .hash
= *hash
};
181 struct tree_node
*node
= tree_search(&want
, &w
->obj_index_tree
,
182 &obj_index_tree_node_compare
, 0);
183 struct obj_index_tree_node
*key
= NULL
;
185 struct obj_index_tree_node empty
= OBJ_INDEX_TREE_NODE_INIT
;
186 key
= reftable_malloc(sizeof(struct obj_index_tree_node
));
189 strbuf_reset(&key
->hash
);
190 strbuf_addbuf(&key
->hash
, hash
);
191 tree_search((void *)key
, &w
->obj_index_tree
,
192 &obj_index_tree_node_compare
, 1);
197 if (key
->offset_len
> 0 && key
->offsets
[key
->offset_len
- 1] == off
) {
201 if (key
->offset_len
== key
->offset_cap
) {
202 key
->offset_cap
= 2 * key
->offset_cap
+ 1;
203 key
->offsets
= reftable_realloc(
204 key
->offsets
, sizeof(uint64_t) * key
->offset_cap
);
207 key
->offsets
[key
->offset_len
++] = off
;
210 static int writer_add_record(struct reftable_writer
*w
,
211 struct reftable_record
*rec
)
213 struct strbuf key
= STRBUF_INIT
;
215 reftable_record_key(rec
, &key
);
216 if (strbuf_cmp(&w
->last_key
, &key
) >= 0) {
217 err
= REFTABLE_API_ERROR
;
221 strbuf_reset(&w
->last_key
);
222 strbuf_addbuf(&w
->last_key
, &key
);
223 if (w
->block_writer
== NULL
) {
224 writer_reinit_block_writer(w
, reftable_record_type(rec
));
227 assert(block_writer_type(w
->block_writer
) == reftable_record_type(rec
));
229 if (block_writer_add(w
->block_writer
, rec
) == 0) {
234 err
= writer_flush_block(w
);
239 writer_reinit_block_writer(w
, reftable_record_type(rec
));
240 err
= block_writer_add(w
->block_writer
, rec
);
247 strbuf_release(&key
);
251 int reftable_writer_add_ref(struct reftable_writer
*w
,
252 struct reftable_ref_record
*ref
)
254 struct reftable_record rec
= { NULL
};
255 struct reftable_ref_record copy
= *ref
;
258 if (ref
->refname
== NULL
)
259 return REFTABLE_API_ERROR
;
260 if (ref
->update_index
< w
->min_update_index
||
261 ref
->update_index
> w
->max_update_index
)
262 return REFTABLE_API_ERROR
;
264 reftable_record_from_ref(&rec
, ©
);
265 copy
.update_index
-= w
->min_update_index
;
267 err
= writer_add_record(w
, &rec
);
271 if (!w
->opts
.skip_index_objects
&& reftable_ref_record_val1(ref
)) {
272 struct strbuf h
= STRBUF_INIT
;
273 strbuf_add(&h
, (char *)reftable_ref_record_val1(ref
),
274 hash_size(w
->opts
.hash_id
));
275 writer_index_hash(w
, &h
);
279 if (!w
->opts
.skip_index_objects
&& reftable_ref_record_val2(ref
)) {
280 struct strbuf h
= STRBUF_INIT
;
281 strbuf_add(&h
, reftable_ref_record_val2(ref
),
282 hash_size(w
->opts
.hash_id
));
283 writer_index_hash(w
, &h
);
289 int reftable_writer_add_refs(struct reftable_writer
*w
,
290 struct reftable_ref_record
*refs
, int n
)
294 QSORT(refs
, n
, reftable_ref_record_compare_name
);
295 for (i
= 0; err
== 0 && i
< n
; i
++) {
296 err
= reftable_writer_add_ref(w
, &refs
[i
]);
301 static int reftable_writer_add_log_verbatim(struct reftable_writer
*w
,
302 struct reftable_log_record
*log
)
304 struct reftable_record rec
= { NULL
};
305 if (w
->block_writer
&&
306 block_writer_type(w
->block_writer
) == BLOCK_TYPE_REF
) {
307 int err
= writer_finish_public_section(w
);
312 w
->next
-= w
->pending_padding
;
313 w
->pending_padding
= 0;
315 reftable_record_from_log(&rec
, log
);
316 return writer_add_record(w
, &rec
);
319 int reftable_writer_add_log(struct reftable_writer
*w
,
320 struct reftable_log_record
*log
)
322 char *input_log_message
= NULL
;
323 struct strbuf cleaned_message
= STRBUF_INIT
;
326 if (log
->value_type
== REFTABLE_LOG_DELETION
)
327 return reftable_writer_add_log_verbatim(w
, log
);
329 if (log
->refname
== NULL
)
330 return REFTABLE_API_ERROR
;
332 input_log_message
= log
->value
.update
.message
;
333 if (!w
->opts
.exact_log_message
&& log
->value
.update
.message
) {
334 strbuf_addstr(&cleaned_message
, log
->value
.update
.message
);
335 while (cleaned_message
.len
&&
336 cleaned_message
.buf
[cleaned_message
.len
- 1] == '\n')
337 strbuf_setlen(&cleaned_message
,
338 cleaned_message
.len
- 1);
339 if (strchr(cleaned_message
.buf
, '\n')) {
340 /* multiple lines not allowed. */
341 err
= REFTABLE_API_ERROR
;
344 strbuf_addstr(&cleaned_message
, "\n");
345 log
->value
.update
.message
= cleaned_message
.buf
;
348 err
= reftable_writer_add_log_verbatim(w
, log
);
349 log
->value
.update
.message
= input_log_message
;
351 strbuf_release(&cleaned_message
);
355 int reftable_writer_add_logs(struct reftable_writer
*w
,
356 struct reftable_log_record
*logs
, int n
)
360 QSORT(logs
, n
, reftable_log_record_compare_key
);
362 for (i
= 0; err
== 0 && i
< n
; i
++) {
363 err
= reftable_writer_add_log(w
, &logs
[i
]);
368 static int writer_finish_section(struct reftable_writer
*w
)
370 uint8_t typ
= block_writer_type(w
->block_writer
);
371 uint64_t index_start
= 0;
373 int threshold
= w
->opts
.unpadded
? 1 : 3;
374 int before_blocks
= w
->stats
.idx_stats
.blocks
;
375 int err
= writer_flush_block(w
);
377 struct reftable_block_stats
*bstats
= NULL
;
381 while (w
->index_len
> threshold
) {
382 struct reftable_index_record
*idx
= NULL
;
386 index_start
= w
->next
;
387 writer_reinit_block_writer(w
, BLOCK_TYPE_INDEX
);
390 idx_len
= w
->index_len
;
395 for (i
= 0; i
< idx_len
; i
++) {
396 struct reftable_record rec
= { NULL
};
397 reftable_record_from_index(&rec
, idx
+ i
);
398 if (block_writer_add(w
->block_writer
, &rec
) == 0) {
402 err
= writer_flush_block(w
);
406 writer_reinit_block_writer(w
, BLOCK_TYPE_INDEX
);
408 err
= block_writer_add(w
->block_writer
, &rec
);
410 /* write into fresh block should always succeed
415 for (i
= 0; i
< idx_len
; i
++) {
416 strbuf_release(&idx
[i
].last_key
);
421 writer_clear_index(w
);
423 err
= writer_flush_block(w
);
427 bstats
= writer_reftable_block_stats(w
, typ
);
428 bstats
->index_blocks
= w
->stats
.idx_stats
.blocks
- before_blocks
;
429 bstats
->index_offset
= index_start
;
430 bstats
->max_index_level
= max_level
;
432 /* Reinit lastKey, as the next section can start with any key. */
438 struct common_prefix_arg
{
443 static void update_common(void *void_arg
, void *key
)
445 struct common_prefix_arg
*arg
= void_arg
;
446 struct obj_index_tree_node
*entry
= key
;
448 int n
= common_prefix_size(&entry
->hash
, arg
->last
);
453 arg
->last
= &entry
->hash
;
456 struct write_record_arg
{
457 struct reftable_writer
*w
;
461 static void write_object_record(void *void_arg
, void *key
)
463 struct write_record_arg
*arg
= void_arg
;
464 struct obj_index_tree_node
*entry
= key
;
465 struct reftable_obj_record obj_rec
= {
466 .hash_prefix
= (uint8_t *)entry
->hash
.buf
,
467 .hash_prefix_len
= arg
->w
->stats
.object_id_len
,
468 .offsets
= entry
->offsets
,
469 .offset_len
= entry
->offset_len
,
471 struct reftable_record rec
= { NULL
};
475 reftable_record_from_obj(&rec
, &obj_rec
);
476 arg
->err
= block_writer_add(arg
->w
->block_writer
, &rec
);
480 arg
->err
= writer_flush_block(arg
->w
);
484 writer_reinit_block_writer(arg
->w
, BLOCK_TYPE_OBJ
);
485 arg
->err
= block_writer_add(arg
->w
->block_writer
, &rec
);
488 obj_rec
.offset_len
= 0;
489 arg
->err
= block_writer_add(arg
->w
->block_writer
, &rec
);
491 /* Should be able to write into a fresh block. */
492 assert(arg
->err
== 0);
497 static void object_record_free(void *void_arg
, void *key
)
499 struct obj_index_tree_node
*entry
= key
;
501 FREE_AND_NULL(entry
->offsets
);
502 strbuf_release(&entry
->hash
);
503 reftable_free(entry
);
506 static int writer_dump_object_index(struct reftable_writer
*w
)
508 struct write_record_arg closure
= { .w
= w
};
509 struct common_prefix_arg common
= { NULL
};
510 if (w
->obj_index_tree
) {
511 infix_walk(w
->obj_index_tree
, &update_common
, &common
);
513 w
->stats
.object_id_len
= common
.max
+ 1;
515 writer_reinit_block_writer(w
, BLOCK_TYPE_OBJ
);
517 if (w
->obj_index_tree
) {
518 infix_walk(w
->obj_index_tree
, &write_object_record
, &closure
);
523 return writer_finish_section(w
);
526 static int writer_finish_public_section(struct reftable_writer
*w
)
531 if (w
->block_writer
== NULL
)
534 typ
= block_writer_type(w
->block_writer
);
535 err
= writer_finish_section(w
);
538 if (typ
== BLOCK_TYPE_REF
&& !w
->opts
.skip_index_objects
&&
539 w
->stats
.ref_stats
.index_blocks
> 0) {
540 err
= writer_dump_object_index(w
);
545 if (w
->obj_index_tree
) {
546 infix_walk(w
->obj_index_tree
, &object_record_free
, NULL
);
547 tree_free(w
->obj_index_tree
);
548 w
->obj_index_tree
= NULL
;
551 w
->block_writer
= NULL
;
555 int reftable_writer_close(struct reftable_writer
*w
)
559 int err
= writer_finish_public_section(w
);
560 int empty_table
= w
->next
== 0;
563 w
->pending_padding
= 0;
565 /* Empty tables need a header anyway. */
567 int n
= writer_write_header(w
, header
);
568 err
= padded_write(w
, header
, n
, 0);
573 p
+= writer_write_header(w
, footer
);
574 put_be64(p
, w
->stats
.ref_stats
.index_offset
);
576 put_be64(p
, (w
->stats
.obj_stats
.offset
) << 5 | w
->stats
.object_id_len
);
578 put_be64(p
, w
->stats
.obj_stats
.index_offset
);
581 put_be64(p
, w
->stats
.log_stats
.offset
);
583 put_be64(p
, w
->stats
.log_stats
.index_offset
);
586 put_be32(p
, crc32(0, footer
, p
- footer
));
589 err
= padded_write(w
, footer
, footer_size(writer_version(w
)), 0);
594 err
= REFTABLE_EMPTY_TABLE_ERROR
;
599 /* free up memory. */
600 block_writer_release(&w
->block_writer_data
);
601 writer_clear_index(w
);
602 strbuf_release(&w
->last_key
);
606 static void writer_clear_index(struct reftable_writer
*w
)
609 for (i
= 0; i
< w
->index_len
; i
++) {
610 strbuf_release(&w
->index
[i
].last_key
);
613 FREE_AND_NULL(w
->index
);
618 static const int debug
= 0;
620 static int writer_flush_nonempty_block(struct reftable_writer
*w
)
622 uint8_t typ
= block_writer_type(w
->block_writer
);
623 struct reftable_block_stats
*bstats
=
624 writer_reftable_block_stats(w
, typ
);
625 uint64_t block_typ_off
= (bstats
->blocks
== 0) ? w
->next
: 0;
626 int raw_bytes
= block_writer_finish(w
->block_writer
);
629 struct reftable_index_record ir
= { .last_key
= STRBUF_INIT
};
633 if (!w
->opts
.unpadded
&& typ
!= BLOCK_TYPE_LOG
) {
634 padding
= w
->opts
.block_size
- raw_bytes
;
637 if (block_typ_off
> 0) {
638 bstats
->offset
= block_typ_off
;
641 bstats
->entries
+= w
->block_writer
->entries
;
642 bstats
->restarts
+= w
->block_writer
->restart_len
;
647 fprintf(stderr
, "block %c off %" PRIu64
" sz %d (%d)\n", typ
,
649 get_be24(w
->block
+ w
->block_writer
->header_off
+ 1));
653 writer_write_header(w
, w
->block
);
656 err
= padded_write(w
, w
->block
, raw_bytes
, padding
);
660 if (w
->index_cap
== w
->index_len
) {
661 w
->index_cap
= 2 * w
->index_cap
+ 1;
662 w
->index
= reftable_realloc(
664 sizeof(struct reftable_index_record
) * w
->index_cap
);
668 strbuf_reset(&ir
.last_key
);
669 strbuf_addbuf(&ir
.last_key
, &w
->block_writer
->last_key
);
670 w
->index
[w
->index_len
] = ir
;
673 w
->next
+= padding
+ raw_bytes
;
674 w
->block_writer
= NULL
;
678 static int writer_flush_block(struct reftable_writer
*w
)
680 if (w
->block_writer
== NULL
)
682 if (w
->block_writer
->entries
== 0)
684 return writer_flush_nonempty_block(w
);
687 const struct reftable_stats
*writer_stats(struct reftable_writer
*w
)