Merge branch 'bc/editorconfig'
[git/debian.git] / bulk-checkin.c
blob409ecb566b3c863e453d10650d1bb21955e2dee3
1 /*
2 * Copyright (c) 2011, Google Inc.
3 */
4 #include "cache.h"
5 #include "bulk-checkin.h"
6 #include "repository.h"
7 #include "csum-file.h"
8 #include "pack.h"
9 #include "strbuf.h"
10 #include "packfile.h"
11 #include "object-store.h"
13 static struct bulk_checkin_state {
14 unsigned plugged:1;
16 char *pack_tmp_name;
17 struct hashfile *f;
18 off_t offset;
19 struct pack_idx_option pack_idx_opts;
21 struct pack_idx_entry **written;
22 uint32_t alloc_written;
23 uint32_t nr_written;
24 } state;
26 static void finish_bulk_checkin(struct bulk_checkin_state *state)
28 struct object_id oid;
29 struct strbuf packname = STRBUF_INIT;
30 int i;
32 if (!state->f)
33 return;
35 if (state->nr_written == 0) {
36 close(state->f->fd);
37 unlink(state->pack_tmp_name);
38 goto clear_exit;
39 } else if (state->nr_written == 1) {
40 finalize_hashfile(state->f, oid.hash, CSUM_HASH_IN_STREAM | CSUM_FSYNC | CSUM_CLOSE);
41 } else {
42 int fd = finalize_hashfile(state->f, oid.hash, 0);
43 fixup_pack_header_footer(fd, oid.hash, state->pack_tmp_name,
44 state->nr_written, oid.hash,
45 state->offset);
46 close(fd);
49 strbuf_addf(&packname, "%s/pack/pack-", get_object_directory());
50 finish_tmp_packfile(&packname, state->pack_tmp_name,
51 state->written, state->nr_written,
52 &state->pack_idx_opts, oid.hash);
53 for (i = 0; i < state->nr_written; i++)
54 free(state->written[i]);
56 clear_exit:
57 free(state->written);
58 memset(state, 0, sizeof(*state));
60 strbuf_release(&packname);
61 /* Make objects we just wrote available to ourselves */
62 reprepare_packed_git(the_repository);
65 static int already_written(struct bulk_checkin_state *state, struct object_id *oid)
67 int i;
69 /* The object may already exist in the repository */
70 if (has_sha1_file(oid->hash))
71 return 1;
73 /* Might want to keep the list sorted */
74 for (i = 0; i < state->nr_written; i++)
75 if (oideq(&state->written[i]->oid, oid))
76 return 1;
78 /* This is a new object we need to keep */
79 return 0;
83 * Read the contents from fd for size bytes, streaming it to the
84 * packfile in state while updating the hash in ctx. Signal a failure
85 * by returning a negative value when the resulting pack would exceed
86 * the pack size limit and this is not the first object in the pack,
87 * so that the caller can discard what we wrote from the current pack
88 * by truncating it and opening a new one. The caller will then call
89 * us again after rewinding the input fd.
91 * The already_hashed_to pointer is kept untouched by the caller to
92 * make sure we do not hash the same byte when we are called
93 * again. This way, the caller does not have to checkpoint its hash
94 * status before calling us just in case we ask it to call us again
95 * with a new pack.
97 static int stream_to_pack(struct bulk_checkin_state *state,
98 git_hash_ctx *ctx, off_t *already_hashed_to,
99 int fd, size_t size, enum object_type type,
100 const char *path, unsigned flags)
102 git_zstream s;
103 unsigned char obuf[16384];
104 unsigned hdrlen;
105 int status = Z_OK;
106 int write_object = (flags & HASH_WRITE_OBJECT);
107 off_t offset = 0;
109 git_deflate_init(&s, pack_compression_level);
111 hdrlen = encode_in_pack_object_header(obuf, sizeof(obuf), type, size);
112 s.next_out = obuf + hdrlen;
113 s.avail_out = sizeof(obuf) - hdrlen;
115 while (status != Z_STREAM_END) {
116 unsigned char ibuf[16384];
118 if (size && !s.avail_in) {
119 ssize_t rsize = size < sizeof(ibuf) ? size : sizeof(ibuf);
120 ssize_t read_result = read_in_full(fd, ibuf, rsize);
121 if (read_result < 0)
122 die_errno("failed to read from '%s'", path);
123 if (read_result != rsize)
124 die("failed to read %d bytes from '%s'",
125 (int)rsize, path);
126 offset += rsize;
127 if (*already_hashed_to < offset) {
128 size_t hsize = offset - *already_hashed_to;
129 if (rsize < hsize)
130 hsize = rsize;
131 if (hsize)
132 the_hash_algo->update_fn(ctx, ibuf, hsize);
133 *already_hashed_to = offset;
135 s.next_in = ibuf;
136 s.avail_in = rsize;
137 size -= rsize;
140 status = git_deflate(&s, size ? 0 : Z_FINISH);
142 if (!s.avail_out || status == Z_STREAM_END) {
143 if (write_object) {
144 size_t written = s.next_out - obuf;
146 /* would we bust the size limit? */
147 if (state->nr_written &&
148 pack_size_limit_cfg &&
149 pack_size_limit_cfg < state->offset + written) {
150 git_deflate_abort(&s);
151 return -1;
154 hashwrite(state->f, obuf, written);
155 state->offset += written;
157 s.next_out = obuf;
158 s.avail_out = sizeof(obuf);
161 switch (status) {
162 case Z_OK:
163 case Z_BUF_ERROR:
164 case Z_STREAM_END:
165 continue;
166 default:
167 die("unexpected deflate failure: %d", status);
170 git_deflate_end(&s);
171 return 0;
174 /* Lazily create backing packfile for the state */
175 static void prepare_to_stream(struct bulk_checkin_state *state,
176 unsigned flags)
178 if (!(flags & HASH_WRITE_OBJECT) || state->f)
179 return;
181 state->f = create_tmp_packfile(&state->pack_tmp_name);
182 reset_pack_idx_option(&state->pack_idx_opts);
184 /* Pretend we are going to write only one object */
185 state->offset = write_pack_header(state->f, 1);
186 if (!state->offset)
187 die_errno("unable to write pack header");
190 static int deflate_to_pack(struct bulk_checkin_state *state,
191 struct object_id *result_oid,
192 int fd, size_t size,
193 enum object_type type, const char *path,
194 unsigned flags)
196 off_t seekback, already_hashed_to;
197 git_hash_ctx ctx;
198 unsigned char obuf[16384];
199 unsigned header_len;
200 struct hashfile_checkpoint checkpoint;
201 struct pack_idx_entry *idx = NULL;
203 seekback = lseek(fd, 0, SEEK_CUR);
204 if (seekback == (off_t) -1)
205 return error("cannot find the current offset");
207 header_len = xsnprintf((char *)obuf, sizeof(obuf), "%s %" PRIuMAX,
208 type_name(type), (uintmax_t)size) + 1;
209 the_hash_algo->init_fn(&ctx);
210 the_hash_algo->update_fn(&ctx, obuf, header_len);
212 /* Note: idx is non-NULL when we are writing */
213 if ((flags & HASH_WRITE_OBJECT) != 0)
214 idx = xcalloc(1, sizeof(*idx));
216 already_hashed_to = 0;
218 while (1) {
219 prepare_to_stream(state, flags);
220 if (idx) {
221 hashfile_checkpoint(state->f, &checkpoint);
222 idx->offset = state->offset;
223 crc32_begin(state->f);
225 if (!stream_to_pack(state, &ctx, &already_hashed_to,
226 fd, size, type, path, flags))
227 break;
229 * Writing this object to the current pack will make
230 * it too big; we need to truncate it, start a new
231 * pack, and write into it.
233 if (!idx)
234 BUG("should not happen");
235 hashfile_truncate(state->f, &checkpoint);
236 state->offset = checkpoint.offset;
237 finish_bulk_checkin(state);
238 if (lseek(fd, seekback, SEEK_SET) == (off_t) -1)
239 return error("cannot seek back");
241 the_hash_algo->final_fn(result_oid->hash, &ctx);
242 if (!idx)
243 return 0;
245 idx->crc32 = crc32_end(state->f);
246 if (already_written(state, result_oid)) {
247 hashfile_truncate(state->f, &checkpoint);
248 state->offset = checkpoint.offset;
249 free(idx);
250 } else {
251 oidcpy(&idx->oid, result_oid);
252 ALLOC_GROW(state->written,
253 state->nr_written + 1,
254 state->alloc_written);
255 state->written[state->nr_written++] = idx;
257 return 0;
260 int index_bulk_checkin(struct object_id *oid,
261 int fd, size_t size, enum object_type type,
262 const char *path, unsigned flags)
264 int status = deflate_to_pack(&state, oid, fd, size, type,
265 path, flags);
266 if (!state.plugged)
267 finish_bulk_checkin(&state);
268 return status;
271 void plug_bulk_checkin(void)
273 state.plugged = 1;
276 void unplug_bulk_checkin(void)
278 state.plugged = 0;
279 if (state.f)
280 finish_bulk_checkin(&state);