wrapper: use trace2 counters to collect fsync stats
[git.git] / parallel-checkout.c
blobb5a714c7111bda0d179077ef1614bbe5ce1570b5
1 #include "git-compat-util.h"
2 #include "config.h"
3 #include "entry.h"
4 #include "gettext.h"
5 #include "hash.h"
6 #include "hex.h"
7 #include "parallel-checkout.h"
8 #include "pkt-line.h"
9 #include "progress.h"
10 #include "read-cache-ll.h"
11 #include "run-command.h"
12 #include "sigchain.h"
13 #include "streaming.h"
14 #include "symlinks.h"
15 #include "thread-utils.h"
16 #include "trace2.h"
18 struct pc_worker {
19 struct child_process cp;
20 size_t next_item_to_complete, nr_items_to_complete;
23 struct parallel_checkout {
24 enum pc_status status;
25 struct parallel_checkout_item *items; /* The parallel checkout queue. */
26 size_t nr, alloc;
27 struct progress *progress;
28 unsigned int *progress_cnt;
31 static struct parallel_checkout parallel_checkout;
33 enum pc_status parallel_checkout_status(void)
35 return parallel_checkout.status;
38 static const int DEFAULT_THRESHOLD_FOR_PARALLELISM = 100;
39 static const int DEFAULT_NUM_WORKERS = 1;
41 void get_parallel_checkout_configs(int *num_workers, int *threshold)
43 char *env_workers = getenv("GIT_TEST_CHECKOUT_WORKERS");
45 if (env_workers && *env_workers) {
46 if (strtol_i(env_workers, 10, num_workers)) {
47 die(_("invalid value for '%s': '%s'"),
48 "GIT_TEST_CHECKOUT_WORKERS", env_workers);
50 if (*num_workers < 1)
51 *num_workers = online_cpus();
53 *threshold = 0;
54 return;
57 if (git_config_get_int("checkout.workers", num_workers))
58 *num_workers = DEFAULT_NUM_WORKERS;
59 else if (*num_workers < 1)
60 *num_workers = online_cpus();
62 if (git_config_get_int("checkout.thresholdForParallelism", threshold))
63 *threshold = DEFAULT_THRESHOLD_FOR_PARALLELISM;
66 void init_parallel_checkout(void)
68 if (parallel_checkout.status != PC_UNINITIALIZED)
69 BUG("parallel checkout already initialized");
71 parallel_checkout.status = PC_ACCEPTING_ENTRIES;
74 static void finish_parallel_checkout(void)
76 if (parallel_checkout.status == PC_UNINITIALIZED)
77 BUG("cannot finish parallel checkout: not initialized yet");
79 free(parallel_checkout.items);
80 memset(&parallel_checkout, 0, sizeof(parallel_checkout));
83 static int is_eligible_for_parallel_checkout(const struct cache_entry *ce,
84 const struct conv_attrs *ca)
86 enum conv_attrs_classification c;
87 size_t packed_item_size;
90 * Symlinks cannot be checked out in parallel as, in case of path
91 * collision, they could racily replace leading directories of other
92 * entries being checked out. Submodules are checked out in child
93 * processes, which have their own parallel checkout queues.
95 if (!S_ISREG(ce->ce_mode))
96 return 0;
98 packed_item_size = sizeof(struct pc_item_fixed_portion) + ce->ce_namelen +
99 (ca->working_tree_encoding ? strlen(ca->working_tree_encoding) : 0);
102 * The amount of data we send to the workers per checkout item is
103 * typically small (75~300B). So unless we find an insanely huge path
104 * of 64KB, we should never reach the 65KB limit of one pkt-line. If
105 * that does happen, we let the sequential code handle the item.
107 if (packed_item_size > LARGE_PACKET_DATA_MAX)
108 return 0;
110 c = classify_conv_attrs(ca);
111 switch (c) {
112 case CA_CLASS_INCORE:
113 return 1;
115 case CA_CLASS_INCORE_FILTER:
117 * It would be safe to allow concurrent instances of
118 * single-file smudge filters, like rot13, but we should not
119 * assume that all filters are parallel-process safe. So we
120 * don't allow this.
122 return 0;
124 case CA_CLASS_INCORE_PROCESS:
126 * The parallel queue and the delayed queue are not compatible,
127 * so they must be kept completely separated. And we can't tell
128 * if a long-running process will delay its response without
129 * actually asking it to perform the filtering. Therefore, this
130 * type of filter is not allowed in parallel checkout.
132 * Furthermore, there should only be one instance of the
133 * long-running process filter as we don't know how it is
134 * managing its own concurrency. So, spreading the entries that
135 * requisite such a filter among the parallel workers would
136 * require a lot more inter-process communication. We would
137 * probably have to designate a single process to interact with
138 * the filter and send all the necessary data to it, for each
139 * entry.
141 return 0;
143 case CA_CLASS_STREAMABLE:
144 return 1;
146 default:
147 BUG("unsupported conv_attrs classification '%d'", c);
151 int enqueue_checkout(struct cache_entry *ce, struct conv_attrs *ca,
152 int *checkout_counter)
154 struct parallel_checkout_item *pc_item;
156 if (parallel_checkout.status != PC_ACCEPTING_ENTRIES ||
157 !is_eligible_for_parallel_checkout(ce, ca))
158 return -1;
160 ALLOC_GROW(parallel_checkout.items, parallel_checkout.nr + 1,
161 parallel_checkout.alloc);
163 pc_item = &parallel_checkout.items[parallel_checkout.nr];
164 pc_item->ce = ce;
165 memcpy(&pc_item->ca, ca, sizeof(pc_item->ca));
166 pc_item->status = PC_ITEM_PENDING;
167 pc_item->id = parallel_checkout.nr;
168 pc_item->checkout_counter = checkout_counter;
169 parallel_checkout.nr++;
171 return 0;
174 size_t pc_queue_size(void)
176 return parallel_checkout.nr;
179 static void advance_progress_meter(void)
181 if (parallel_checkout.progress) {
182 (*parallel_checkout.progress_cnt)++;
183 display_progress(parallel_checkout.progress,
184 *parallel_checkout.progress_cnt);
188 static int handle_results(struct checkout *state)
190 int ret = 0;
191 size_t i;
192 int have_pending = 0;
195 * We first update the successfully written entries with the collected
196 * stat() data, so that they can be found by mark_colliding_entries(),
197 * in the next loop, when necessary.
199 for (i = 0; i < parallel_checkout.nr; i++) {
200 struct parallel_checkout_item *pc_item = &parallel_checkout.items[i];
201 if (pc_item->status == PC_ITEM_WRITTEN)
202 update_ce_after_write(state, pc_item->ce, &pc_item->st);
205 for (i = 0; i < parallel_checkout.nr; i++) {
206 struct parallel_checkout_item *pc_item = &parallel_checkout.items[i];
208 switch(pc_item->status) {
209 case PC_ITEM_WRITTEN:
210 if (pc_item->checkout_counter)
211 (*pc_item->checkout_counter)++;
212 break;
213 case PC_ITEM_COLLIDED:
215 * The entry could not be checked out due to a path
216 * collision with another entry. Since there can only
217 * be one entry of each colliding group on the disk, we
218 * could skip trying to check out this one and move on.
219 * However, this would leave the unwritten entries with
220 * null stat() fields on the index, which could
221 * potentially slow down subsequent operations that
222 * require refreshing it: git would not be able to
223 * trust st_size and would have to go to the filesystem
224 * to see if the contents match (see ie_modified()).
226 * Instead, let's pay the overhead only once, now, and
227 * call checkout_entry_ca() again for this file, to
228 * have its stat() data stored in the index. This also
229 * has the benefit of adding this entry and its
230 * colliding pair to the collision report message.
231 * Additionally, this overwriting behavior is consistent
232 * with what the sequential checkout does, so it doesn't
233 * add any extra overhead.
235 ret |= checkout_entry_ca(pc_item->ce, &pc_item->ca,
236 state, NULL,
237 pc_item->checkout_counter);
238 advance_progress_meter();
239 break;
240 case PC_ITEM_PENDING:
241 have_pending = 1;
242 /* fall through */
243 case PC_ITEM_FAILED:
244 ret = -1;
245 break;
246 default:
247 BUG("unknown checkout item status in parallel checkout");
251 if (have_pending)
252 error("parallel checkout finished with pending entries");
254 return ret;
257 static int reset_fd(int fd, const char *path)
259 if (lseek(fd, 0, SEEK_SET) != 0)
260 return error_errno("failed to rewind descriptor of '%s'", path);
261 if (ftruncate(fd, 0))
262 return error_errno("failed to truncate file '%s'", path);
263 return 0;
266 static int write_pc_item_to_fd(struct parallel_checkout_item *pc_item, int fd,
267 const char *path)
269 int ret;
270 struct stream_filter *filter;
271 struct strbuf buf = STRBUF_INIT;
272 char *blob;
273 size_t size;
274 ssize_t wrote;
276 /* Sanity check */
277 assert(is_eligible_for_parallel_checkout(pc_item->ce, &pc_item->ca));
279 filter = get_stream_filter_ca(&pc_item->ca, &pc_item->ce->oid);
280 if (filter) {
281 if (stream_blob_to_fd(fd, &pc_item->ce->oid, filter, 1)) {
282 /* On error, reset fd to try writing without streaming */
283 if (reset_fd(fd, path))
284 return -1;
285 } else {
286 return 0;
290 blob = read_blob_entry(pc_item->ce, &size);
291 if (!blob)
292 return error("cannot read object %s '%s'",
293 oid_to_hex(&pc_item->ce->oid), pc_item->ce->name);
296 * checkout metadata is used to give context for external process
297 * filters. Files requiring such filters are not eligible for parallel
298 * checkout, so pass NULL. Note: if that changes, the metadata must also
299 * be passed from the main process to the workers.
301 ret = convert_to_working_tree_ca(&pc_item->ca, pc_item->ce->name,
302 blob, size, &buf, NULL);
304 if (ret) {
305 size_t newsize;
306 free(blob);
307 blob = strbuf_detach(&buf, &newsize);
308 size = newsize;
311 wrote = write_in_full(fd, blob, size);
312 free(blob);
313 if (wrote < 0)
314 return error("unable to write file '%s'", path);
316 return 0;
319 static int close_and_clear(int *fd)
321 int ret = 0;
323 if (*fd >= 0) {
324 ret = close(*fd);
325 *fd = -1;
328 return ret;
331 void write_pc_item(struct parallel_checkout_item *pc_item,
332 struct checkout *state)
334 unsigned int mode = (pc_item->ce->ce_mode & 0100) ? 0777 : 0666;
335 int fd = -1, fstat_done = 0;
336 struct strbuf path = STRBUF_INIT;
337 const char *dir_sep;
339 strbuf_add(&path, state->base_dir, state->base_dir_len);
340 strbuf_add(&path, pc_item->ce->name, pc_item->ce->ce_namelen);
342 dir_sep = find_last_dir_sep(path.buf);
345 * The leading dirs should have been already created by now. But, in
346 * case of path collisions, one of the dirs could have been replaced by
347 * a symlink (checked out after we enqueued this entry for parallel
348 * checkout). Thus, we must check the leading dirs again.
350 if (dir_sep && !has_dirs_only_path(path.buf, dir_sep - path.buf,
351 state->base_dir_len)) {
352 pc_item->status = PC_ITEM_COLLIDED;
353 trace2_data_string("pcheckout", NULL, "collision/dirname", path.buf);
354 goto out;
357 fd = open(path.buf, O_WRONLY | O_CREAT | O_EXCL, mode);
359 if (fd < 0) {
360 if (errno == EEXIST || errno == EISDIR) {
362 * Errors which probably represent a path collision.
363 * Suppress the error message and mark the item to be
364 * retried later, sequentially. ENOTDIR and ENOENT are
365 * also interesting, but the above has_dirs_only_path()
366 * call should have already caught these cases.
368 pc_item->status = PC_ITEM_COLLIDED;
369 trace2_data_string("pcheckout", NULL,
370 "collision/basename", path.buf);
371 } else {
372 error_errno("failed to open file '%s'", path.buf);
373 pc_item->status = PC_ITEM_FAILED;
375 goto out;
378 if (write_pc_item_to_fd(pc_item, fd, path.buf)) {
379 /* Error was already reported. */
380 pc_item->status = PC_ITEM_FAILED;
381 close_and_clear(&fd);
382 unlink(path.buf);
383 goto out;
386 fstat_done = fstat_checkout_output(fd, state, &pc_item->st);
388 if (close_and_clear(&fd)) {
389 error_errno("unable to close file '%s'", path.buf);
390 pc_item->status = PC_ITEM_FAILED;
391 goto out;
394 if (state->refresh_cache && !fstat_done && lstat(path.buf, &pc_item->st) < 0) {
395 error_errno("unable to stat just-written file '%s'", path.buf);
396 pc_item->status = PC_ITEM_FAILED;
397 goto out;
400 pc_item->status = PC_ITEM_WRITTEN;
402 out:
403 strbuf_release(&path);
406 static void send_one_item(int fd, struct parallel_checkout_item *pc_item)
408 size_t len_data;
409 char *data, *variant;
410 struct pc_item_fixed_portion *fixed_portion;
411 const char *working_tree_encoding = pc_item->ca.working_tree_encoding;
412 size_t name_len = pc_item->ce->ce_namelen;
413 size_t working_tree_encoding_len = working_tree_encoding ?
414 strlen(working_tree_encoding) : 0;
417 * Any changes in the calculation of the message size must also be made
418 * in is_eligible_for_parallel_checkout().
420 len_data = sizeof(struct pc_item_fixed_portion) + name_len +
421 working_tree_encoding_len;
423 data = xmalloc(len_data);
425 fixed_portion = (struct pc_item_fixed_portion *)data;
426 fixed_portion->id = pc_item->id;
427 fixed_portion->ce_mode = pc_item->ce->ce_mode;
428 fixed_portion->crlf_action = pc_item->ca.crlf_action;
429 fixed_portion->ident = pc_item->ca.ident;
430 fixed_portion->name_len = name_len;
431 fixed_portion->working_tree_encoding_len = working_tree_encoding_len;
433 * We pad the unused bytes in the hash array because, otherwise,
434 * Valgrind would complain about passing uninitialized bytes to a
435 * write() syscall. The warning doesn't represent any real risk here,
436 * but it could hinder the detection of actual errors.
438 oidcpy_with_padding(&fixed_portion->oid, &pc_item->ce->oid);
440 variant = data + sizeof(*fixed_portion);
441 if (working_tree_encoding_len) {
442 memcpy(variant, working_tree_encoding, working_tree_encoding_len);
443 variant += working_tree_encoding_len;
445 memcpy(variant, pc_item->ce->name, name_len);
447 packet_write(fd, data, len_data);
449 free(data);
452 static void send_batch(int fd, size_t start, size_t nr)
454 size_t i;
455 sigchain_push(SIGPIPE, SIG_IGN);
456 for (i = 0; i < nr; i++)
457 send_one_item(fd, &parallel_checkout.items[start + i]);
458 packet_flush(fd);
459 sigchain_pop(SIGPIPE);
462 static struct pc_worker *setup_workers(struct checkout *state, int num_workers)
464 struct pc_worker *workers;
465 int i, workers_with_one_extra_item;
466 size_t base_batch_size, batch_beginning = 0;
468 ALLOC_ARRAY(workers, num_workers);
470 for (i = 0; i < num_workers; i++) {
471 struct child_process *cp = &workers[i].cp;
473 child_process_init(cp);
474 cp->git_cmd = 1;
475 cp->in = -1;
476 cp->out = -1;
477 cp->clean_on_exit = 1;
478 strvec_push(&cp->args, "checkout--worker");
479 if (state->base_dir_len)
480 strvec_pushf(&cp->args, "--prefix=%s", state->base_dir);
481 if (start_command(cp))
482 die("failed to spawn checkout worker");
485 base_batch_size = parallel_checkout.nr / num_workers;
486 workers_with_one_extra_item = parallel_checkout.nr % num_workers;
488 for (i = 0; i < num_workers; i++) {
489 struct pc_worker *worker = &workers[i];
490 size_t batch_size = base_batch_size;
492 /* distribute the extra work evenly */
493 if (i < workers_with_one_extra_item)
494 batch_size++;
496 send_batch(worker->cp.in, batch_beginning, batch_size);
497 worker->next_item_to_complete = batch_beginning;
498 worker->nr_items_to_complete = batch_size;
500 batch_beginning += batch_size;
503 return workers;
506 static void finish_workers(struct pc_worker *workers, int num_workers)
508 int i;
511 * Close pipes before calling finish_command() to let the workers
512 * exit asynchronously and avoid spending extra time on wait().
514 for (i = 0; i < num_workers; i++) {
515 struct child_process *cp = &workers[i].cp;
516 if (cp->in >= 0)
517 close(cp->in);
518 if (cp->out >= 0)
519 close(cp->out);
522 for (i = 0; i < num_workers; i++) {
523 int rc = finish_command(&workers[i].cp);
524 if (rc > 128) {
526 * For a normal non-zero exit, the worker should have
527 * already printed something useful to stderr. But a
528 * death by signal should be mentioned to the user.
530 error("checkout worker %d died of signal %d", i, rc - 128);
534 free(workers);
537 static inline void assert_pc_item_result_size(int got, int exp)
539 if (got != exp)
540 BUG("wrong result size from checkout worker (got %dB, exp %dB)",
541 got, exp);
544 static void parse_and_save_result(const char *buffer, int len,
545 struct pc_worker *worker)
547 struct pc_item_result *res;
548 struct parallel_checkout_item *pc_item;
549 struct stat *st = NULL;
551 if (len < PC_ITEM_RESULT_BASE_SIZE)
552 BUG("too short result from checkout worker (got %dB, exp >=%dB)",
553 len, (int)PC_ITEM_RESULT_BASE_SIZE);
555 res = (struct pc_item_result *)buffer;
558 * Worker should send either the full result struct on success, or
559 * just the base (i.e. no stat data), otherwise.
561 if (res->status == PC_ITEM_WRITTEN) {
562 assert_pc_item_result_size(len, (int)sizeof(struct pc_item_result));
563 st = &res->st;
564 } else {
565 assert_pc_item_result_size(len, (int)PC_ITEM_RESULT_BASE_SIZE);
568 if (!worker->nr_items_to_complete)
569 BUG("received result from supposedly finished checkout worker");
570 if (res->id != worker->next_item_to_complete)
571 BUG("unexpected item id from checkout worker (got %"PRIuMAX", exp %"PRIuMAX")",
572 (uintmax_t)res->id, (uintmax_t)worker->next_item_to_complete);
574 worker->next_item_to_complete++;
575 worker->nr_items_to_complete--;
577 pc_item = &parallel_checkout.items[res->id];
578 pc_item->status = res->status;
579 if (st)
580 pc_item->st = *st;
582 if (res->status != PC_ITEM_COLLIDED)
583 advance_progress_meter();
586 static void gather_results_from_workers(struct pc_worker *workers,
587 int num_workers)
589 int i, active_workers = num_workers;
590 struct pollfd *pfds;
592 CALLOC_ARRAY(pfds, num_workers);
593 for (i = 0; i < num_workers; i++) {
594 pfds[i].fd = workers[i].cp.out;
595 pfds[i].events = POLLIN;
598 while (active_workers) {
599 int nr = poll(pfds, num_workers, -1);
601 if (nr < 0) {
602 if (errno == EINTR)
603 continue;
604 die_errno("failed to poll checkout workers");
607 for (i = 0; i < num_workers && nr > 0; i++) {
608 struct pc_worker *worker = &workers[i];
609 struct pollfd *pfd = &pfds[i];
611 if (!pfd->revents)
612 continue;
614 if (pfd->revents & POLLIN) {
615 int len = packet_read(pfd->fd, packet_buffer,
616 sizeof(packet_buffer), 0);
618 if (len < 0) {
619 BUG("packet_read() returned negative value");
620 } else if (!len) {
621 pfd->fd = -1;
622 active_workers--;
623 } else {
624 parse_and_save_result(packet_buffer,
625 len, worker);
627 } else if (pfd->revents & POLLHUP) {
628 pfd->fd = -1;
629 active_workers--;
630 } else if (pfd->revents & (POLLNVAL | POLLERR)) {
631 die("error polling from checkout worker");
634 nr--;
638 free(pfds);
641 static void write_items_sequentially(struct checkout *state)
643 size_t i;
645 for (i = 0; i < parallel_checkout.nr; i++) {
646 struct parallel_checkout_item *pc_item = &parallel_checkout.items[i];
647 write_pc_item(pc_item, state);
648 if (pc_item->status != PC_ITEM_COLLIDED)
649 advance_progress_meter();
653 int run_parallel_checkout(struct checkout *state, int num_workers, int threshold,
654 struct progress *progress, unsigned int *progress_cnt)
656 int ret;
658 if (parallel_checkout.status != PC_ACCEPTING_ENTRIES)
659 BUG("cannot run parallel checkout: uninitialized or already running");
661 parallel_checkout.status = PC_RUNNING;
662 parallel_checkout.progress = progress;
663 parallel_checkout.progress_cnt = progress_cnt;
665 if (parallel_checkout.nr < num_workers)
666 num_workers = parallel_checkout.nr;
668 if (num_workers <= 1 || parallel_checkout.nr < threshold) {
669 write_items_sequentially(state);
670 } else {
671 struct pc_worker *workers = setup_workers(state, num_workers);
672 gather_results_from_workers(workers, num_workers);
673 finish_workers(workers, num_workers);
676 ret = handle_results(state);
678 finish_parallel_checkout();
679 return ret;