Merge branch 'jt/p4-spell-re-with-raw-string'
[alt-git.git] / bulk-checkin.c
blobeb46b8863793e25b82ca6df27eb435476c4807a5
1 /*
2 * Copyright (c) 2011, Google Inc.
3 */
4 #include "git-compat-util.h"
5 #include "bulk-checkin.h"
6 #include "environment.h"
7 #include "gettext.h"
8 #include "hex.h"
9 #include "lockfile.h"
10 #include "repository.h"
11 #include "csum-file.h"
12 #include "pack.h"
13 #include "strbuf.h"
14 #include "tmp-objdir.h"
15 #include "packfile.h"
16 #include "object-file.h"
17 #include "object-store-ll.h"
19 static int odb_transaction_nesting;
21 static struct tmp_objdir *bulk_fsync_objdir;
23 static struct bulk_checkin_packfile {
24 char *pack_tmp_name;
25 struct hashfile *f;
26 off_t offset;
27 struct pack_idx_option pack_idx_opts;
29 struct pack_idx_entry **written;
30 uint32_t alloc_written;
31 uint32_t nr_written;
32 } bulk_checkin_packfile;
34 static void finish_tmp_packfile(struct strbuf *basename,
35 const char *pack_tmp_name,
36 struct pack_idx_entry **written_list,
37 uint32_t nr_written,
38 struct pack_idx_option *pack_idx_opts,
39 unsigned char hash[])
41 char *idx_tmp_name = NULL;
43 stage_tmp_packfiles(basename, pack_tmp_name, written_list, nr_written,
44 NULL, pack_idx_opts, hash, &idx_tmp_name);
45 rename_tmp_packfile_idx(basename, &idx_tmp_name);
47 free(idx_tmp_name);
50 static void flush_bulk_checkin_packfile(struct bulk_checkin_packfile *state)
52 unsigned char hash[GIT_MAX_RAWSZ];
53 struct strbuf packname = STRBUF_INIT;
54 int i;
56 if (!state->f)
57 return;
59 if (state->nr_written == 0) {
60 close(state->f->fd);
61 unlink(state->pack_tmp_name);
62 goto clear_exit;
63 } else if (state->nr_written == 1) {
64 finalize_hashfile(state->f, hash, FSYNC_COMPONENT_PACK,
65 CSUM_HASH_IN_STREAM | CSUM_FSYNC | CSUM_CLOSE);
66 } else {
67 int fd = finalize_hashfile(state->f, hash, FSYNC_COMPONENT_PACK, 0);
68 fixup_pack_header_footer(fd, hash, state->pack_tmp_name,
69 state->nr_written, hash,
70 state->offset);
71 close(fd);
74 strbuf_addf(&packname, "%s/pack/pack-%s.", get_object_directory(),
75 hash_to_hex(hash));
76 finish_tmp_packfile(&packname, state->pack_tmp_name,
77 state->written, state->nr_written,
78 &state->pack_idx_opts, hash);
79 for (i = 0; i < state->nr_written; i++)
80 free(state->written[i]);
82 clear_exit:
83 free(state->written);
84 memset(state, 0, sizeof(*state));
86 strbuf_release(&packname);
87 /* Make objects we just wrote available to ourselves */
88 reprepare_packed_git(the_repository);
92 * Cleanup after batch-mode fsync_object_files.
94 static void flush_batch_fsync(void)
96 struct strbuf temp_path = STRBUF_INIT;
97 struct tempfile *temp;
99 if (!bulk_fsync_objdir)
100 return;
103 * Issue a full hardware flush against a temporary file to ensure
104 * that all objects are durable before any renames occur. The code in
105 * fsync_loose_object_bulk_checkin has already issued a writeout
106 * request, but it has not flushed any writeback cache in the storage
107 * hardware or any filesystem logs. This fsync call acts as a barrier
108 * to ensure that the data in each new object file is durable before
109 * the final name is visible.
111 strbuf_addf(&temp_path, "%s/bulk_fsync_XXXXXX", get_object_directory());
112 temp = xmks_tempfile(temp_path.buf);
113 fsync_or_die(get_tempfile_fd(temp), get_tempfile_path(temp));
114 delete_tempfile(&temp);
115 strbuf_release(&temp_path);
118 * Make the object files visible in the primary ODB after their data is
119 * fully durable.
121 tmp_objdir_migrate(bulk_fsync_objdir);
122 bulk_fsync_objdir = NULL;
125 static int already_written(struct bulk_checkin_packfile *state, struct object_id *oid)
127 int i;
129 /* The object may already exist in the repository */
130 if (repo_has_object_file(the_repository, oid))
131 return 1;
133 /* Might want to keep the list sorted */
134 for (i = 0; i < state->nr_written; i++)
135 if (oideq(&state->written[i]->oid, oid))
136 return 1;
138 /* This is a new object we need to keep */
139 return 0;
143 * Read the contents from fd for size bytes, streaming it to the
144 * packfile in state while updating the hash in ctx. Signal a failure
145 * by returning a negative value when the resulting pack would exceed
146 * the pack size limit and this is not the first object in the pack,
147 * so that the caller can discard what we wrote from the current pack
148 * by truncating it and opening a new one. The caller will then call
149 * us again after rewinding the input fd.
151 * The already_hashed_to pointer is kept untouched by the caller to
152 * make sure we do not hash the same byte when we are called
153 * again. This way, the caller does not have to checkpoint its hash
154 * status before calling us just in case we ask it to call us again
155 * with a new pack.
157 static int stream_blob_to_pack(struct bulk_checkin_packfile *state,
158 git_hash_ctx *ctx, off_t *already_hashed_to,
159 int fd, size_t size, const char *path,
160 unsigned flags)
162 git_zstream s;
163 unsigned char ibuf[16384];
164 unsigned char obuf[16384];
165 unsigned hdrlen;
166 int status = Z_OK;
167 int write_object = (flags & HASH_WRITE_OBJECT);
168 off_t offset = 0;
170 git_deflate_init(&s, pack_compression_level);
172 hdrlen = encode_in_pack_object_header(obuf, sizeof(obuf), OBJ_BLOB, size);
173 s.next_out = obuf + hdrlen;
174 s.avail_out = sizeof(obuf) - hdrlen;
176 while (status != Z_STREAM_END) {
177 if (size && !s.avail_in) {
178 ssize_t rsize = size < sizeof(ibuf) ? size : sizeof(ibuf);
179 ssize_t read_result = read_in_full(fd, ibuf, rsize);
180 if (read_result < 0)
181 die_errno("failed to read from '%s'", path);
182 if (read_result != rsize)
183 die("failed to read %d bytes from '%s'",
184 (int)rsize, path);
185 offset += rsize;
186 if (*already_hashed_to < offset) {
187 size_t hsize = offset - *already_hashed_to;
188 if (rsize < hsize)
189 hsize = rsize;
190 if (hsize)
191 the_hash_algo->update_fn(ctx, ibuf, hsize);
192 *already_hashed_to = offset;
194 s.next_in = ibuf;
195 s.avail_in = rsize;
196 size -= rsize;
199 status = git_deflate(&s, size ? 0 : Z_FINISH);
201 if (!s.avail_out || status == Z_STREAM_END) {
202 if (write_object) {
203 size_t written = s.next_out - obuf;
205 /* would we bust the size limit? */
206 if (state->nr_written &&
207 pack_size_limit_cfg &&
208 pack_size_limit_cfg < state->offset + written) {
209 git_deflate_abort(&s);
210 return -1;
213 hashwrite(state->f, obuf, written);
214 state->offset += written;
216 s.next_out = obuf;
217 s.avail_out = sizeof(obuf);
220 switch (status) {
221 case Z_OK:
222 case Z_BUF_ERROR:
223 case Z_STREAM_END:
224 continue;
225 default:
226 die("unexpected deflate failure: %d", status);
229 git_deflate_end(&s);
230 return 0;
233 /* Lazily create backing packfile for the state */
234 static void prepare_to_stream(struct bulk_checkin_packfile *state,
235 unsigned flags)
237 if (!(flags & HASH_WRITE_OBJECT) || state->f)
238 return;
240 state->f = create_tmp_packfile(&state->pack_tmp_name);
241 reset_pack_idx_option(&state->pack_idx_opts);
243 /* Pretend we are going to write only one object */
244 state->offset = write_pack_header(state->f, 1);
245 if (!state->offset)
246 die_errno("unable to write pack header");
249 static int deflate_blob_to_pack(struct bulk_checkin_packfile *state,
250 struct object_id *result_oid,
251 int fd, size_t size,
252 const char *path, unsigned flags)
254 off_t seekback, already_hashed_to;
255 git_hash_ctx ctx;
256 unsigned char obuf[16384];
257 unsigned header_len;
258 struct hashfile_checkpoint checkpoint = {0};
259 struct pack_idx_entry *idx = NULL;
261 seekback = lseek(fd, 0, SEEK_CUR);
262 if (seekback == (off_t) -1)
263 return error("cannot find the current offset");
265 header_len = format_object_header((char *)obuf, sizeof(obuf),
266 OBJ_BLOB, size);
267 the_hash_algo->init_fn(&ctx);
268 the_hash_algo->update_fn(&ctx, obuf, header_len);
269 the_hash_algo->init_fn(&checkpoint.ctx);
271 /* Note: idx is non-NULL when we are writing */
272 if ((flags & HASH_WRITE_OBJECT) != 0)
273 CALLOC_ARRAY(idx, 1);
275 already_hashed_to = 0;
277 while (1) {
278 prepare_to_stream(state, flags);
279 if (idx) {
280 hashfile_checkpoint(state->f, &checkpoint);
281 idx->offset = state->offset;
282 crc32_begin(state->f);
284 if (!stream_blob_to_pack(state, &ctx, &already_hashed_to,
285 fd, size, path, flags))
286 break;
288 * Writing this object to the current pack will make
289 * it too big; we need to truncate it, start a new
290 * pack, and write into it.
292 if (!idx)
293 BUG("should not happen");
294 hashfile_truncate(state->f, &checkpoint);
295 state->offset = checkpoint.offset;
296 flush_bulk_checkin_packfile(state);
297 if (lseek(fd, seekback, SEEK_SET) == (off_t) -1)
298 return error("cannot seek back");
300 the_hash_algo->final_oid_fn(result_oid, &ctx);
301 if (!idx)
302 return 0;
304 idx->crc32 = crc32_end(state->f);
305 if (already_written(state, result_oid)) {
306 hashfile_truncate(state->f, &checkpoint);
307 state->offset = checkpoint.offset;
308 free(idx);
309 } else {
310 oidcpy(&idx->oid, result_oid);
311 ALLOC_GROW(state->written,
312 state->nr_written + 1,
313 state->alloc_written);
314 state->written[state->nr_written++] = idx;
316 return 0;
319 void prepare_loose_object_bulk_checkin(void)
322 * We lazily create the temporary object directory
323 * the first time an object might be added, since
324 * callers may not know whether any objects will be
325 * added at the time they call begin_odb_transaction.
327 if (!odb_transaction_nesting || bulk_fsync_objdir)
328 return;
330 bulk_fsync_objdir = tmp_objdir_create("bulk-fsync");
331 if (bulk_fsync_objdir)
332 tmp_objdir_replace_primary_odb(bulk_fsync_objdir, 0);
335 void fsync_loose_object_bulk_checkin(int fd, const char *filename)
338 * If we have an active ODB transaction, we issue a call that
339 * cleans the filesystem page cache but avoids a hardware flush
340 * command. Later on we will issue a single hardware flush
341 * before renaming the objects to their final names as part of
342 * flush_batch_fsync.
344 if (!bulk_fsync_objdir ||
345 git_fsync(fd, FSYNC_WRITEOUT_ONLY) < 0) {
346 if (errno == ENOSYS)
347 warning(_("core.fsyncMethod = batch is unsupported on this platform"));
348 fsync_or_die(fd, filename);
352 int index_blob_bulk_checkin(struct object_id *oid,
353 int fd, size_t size,
354 const char *path, unsigned flags)
356 int status = deflate_blob_to_pack(&bulk_checkin_packfile, oid, fd, size,
357 path, flags);
358 if (!odb_transaction_nesting)
359 flush_bulk_checkin_packfile(&bulk_checkin_packfile);
360 return status;
363 void begin_odb_transaction(void)
365 odb_transaction_nesting += 1;
368 void flush_odb_transaction(void)
370 flush_batch_fsync();
371 flush_bulk_checkin_packfile(&bulk_checkin_packfile);
374 void end_odb_transaction(void)
376 odb_transaction_nesting -= 1;
377 if (odb_transaction_nesting < 0)
378 BUG("Unbalanced ODB transaction nesting");
380 if (odb_transaction_nesting)
381 return;
383 flush_odb_transaction();