Merge branch 'tb/commit-graph-genv2-upgrade-fix' into maint
[git/debian.git] / parallel-checkout.c
blob4f6819f2406ea8f90651b7769cbaf1758ce4c098
1 #include "cache.h"
2 #include "config.h"
3 #include "entry.h"
4 #include "parallel-checkout.h"
5 #include "pkt-line.h"
6 #include "progress.h"
7 #include "run-command.h"
8 #include "sigchain.h"
9 #include "streaming.h"
10 #include "thread-utils.h"
11 #include "trace2.h"
13 struct pc_worker {
14 struct child_process cp;
15 size_t next_item_to_complete, nr_items_to_complete;
18 struct parallel_checkout {
19 enum pc_status status;
20 struct parallel_checkout_item *items; /* The parallel checkout queue. */
21 size_t nr, alloc;
22 struct progress *progress;
23 unsigned int *progress_cnt;
26 static struct parallel_checkout parallel_checkout;
28 enum pc_status parallel_checkout_status(void)
30 return parallel_checkout.status;
33 static const int DEFAULT_THRESHOLD_FOR_PARALLELISM = 100;
34 static const int DEFAULT_NUM_WORKERS = 1;
36 void get_parallel_checkout_configs(int *num_workers, int *threshold)
38 char *env_workers = getenv("GIT_TEST_CHECKOUT_WORKERS");
40 if (env_workers && *env_workers) {
41 if (strtol_i(env_workers, 10, num_workers)) {
42 die(_("invalid value for '%s': '%s'"),
43 "GIT_TEST_CHECKOUT_WORKERS", env_workers);
45 if (*num_workers < 1)
46 *num_workers = online_cpus();
48 *threshold = 0;
49 return;
52 if (git_config_get_int("checkout.workers", num_workers))
53 *num_workers = DEFAULT_NUM_WORKERS;
54 else if (*num_workers < 1)
55 *num_workers = online_cpus();
57 if (git_config_get_int("checkout.thresholdForParallelism", threshold))
58 *threshold = DEFAULT_THRESHOLD_FOR_PARALLELISM;
61 void init_parallel_checkout(void)
63 if (parallel_checkout.status != PC_UNINITIALIZED)
64 BUG("parallel checkout already initialized");
66 parallel_checkout.status = PC_ACCEPTING_ENTRIES;
69 static void finish_parallel_checkout(void)
71 if (parallel_checkout.status == PC_UNINITIALIZED)
72 BUG("cannot finish parallel checkout: not initialized yet");
74 free(parallel_checkout.items);
75 memset(&parallel_checkout, 0, sizeof(parallel_checkout));
78 static int is_eligible_for_parallel_checkout(const struct cache_entry *ce,
79 const struct conv_attrs *ca)
81 enum conv_attrs_classification c;
82 size_t packed_item_size;
85 * Symlinks cannot be checked out in parallel as, in case of path
86 * collision, they could racily replace leading directories of other
87 * entries being checked out. Submodules are checked out in child
88 * processes, which have their own parallel checkout queues.
90 if (!S_ISREG(ce->ce_mode))
91 return 0;
93 packed_item_size = sizeof(struct pc_item_fixed_portion) + ce->ce_namelen +
94 (ca->working_tree_encoding ? strlen(ca->working_tree_encoding) : 0);
97 * The amount of data we send to the workers per checkout item is
98 * typically small (75~300B). So unless we find an insanely huge path
99 * of 64KB, we should never reach the 65KB limit of one pkt-line. If
100 * that does happen, we let the sequential code handle the item.
102 if (packed_item_size > LARGE_PACKET_DATA_MAX)
103 return 0;
105 c = classify_conv_attrs(ca);
106 switch (c) {
107 case CA_CLASS_INCORE:
108 return 1;
110 case CA_CLASS_INCORE_FILTER:
112 * It would be safe to allow concurrent instances of
113 * single-file smudge filters, like rot13, but we should not
114 * assume that all filters are parallel-process safe. So we
115 * don't allow this.
117 return 0;
119 case CA_CLASS_INCORE_PROCESS:
121 * The parallel queue and the delayed queue are not compatible,
122 * so they must be kept completely separated. And we can't tell
123 * if a long-running process will delay its response without
124 * actually asking it to perform the filtering. Therefore, this
125 * type of filter is not allowed in parallel checkout.
127 * Furthermore, there should only be one instance of the
128 * long-running process filter as we don't know how it is
129 * managing its own concurrency. So, spreading the entries that
130 * requisite such a filter among the parallel workers would
131 * require a lot more inter-process communication. We would
132 * probably have to designate a single process to interact with
133 * the filter and send all the necessary data to it, for each
134 * entry.
136 return 0;
138 case CA_CLASS_STREAMABLE:
139 return 1;
141 default:
142 BUG("unsupported conv_attrs classification '%d'", c);
146 int enqueue_checkout(struct cache_entry *ce, struct conv_attrs *ca,
147 int *checkout_counter)
149 struct parallel_checkout_item *pc_item;
151 if (parallel_checkout.status != PC_ACCEPTING_ENTRIES ||
152 !is_eligible_for_parallel_checkout(ce, ca))
153 return -1;
155 ALLOC_GROW(parallel_checkout.items, parallel_checkout.nr + 1,
156 parallel_checkout.alloc);
158 pc_item = &parallel_checkout.items[parallel_checkout.nr];
159 pc_item->ce = ce;
160 memcpy(&pc_item->ca, ca, sizeof(pc_item->ca));
161 pc_item->status = PC_ITEM_PENDING;
162 pc_item->id = parallel_checkout.nr;
163 pc_item->checkout_counter = checkout_counter;
164 parallel_checkout.nr++;
166 return 0;
169 size_t pc_queue_size(void)
171 return parallel_checkout.nr;
174 static void advance_progress_meter(void)
176 if (parallel_checkout.progress) {
177 (*parallel_checkout.progress_cnt)++;
178 display_progress(parallel_checkout.progress,
179 *parallel_checkout.progress_cnt);
183 static int handle_results(struct checkout *state)
185 int ret = 0;
186 size_t i;
187 int have_pending = 0;
190 * We first update the successfully written entries with the collected
191 * stat() data, so that they can be found by mark_colliding_entries(),
192 * in the next loop, when necessary.
194 for (i = 0; i < parallel_checkout.nr; i++) {
195 struct parallel_checkout_item *pc_item = &parallel_checkout.items[i];
196 if (pc_item->status == PC_ITEM_WRITTEN)
197 update_ce_after_write(state, pc_item->ce, &pc_item->st);
200 for (i = 0; i < parallel_checkout.nr; i++) {
201 struct parallel_checkout_item *pc_item = &parallel_checkout.items[i];
203 switch(pc_item->status) {
204 case PC_ITEM_WRITTEN:
205 if (pc_item->checkout_counter)
206 (*pc_item->checkout_counter)++;
207 break;
208 case PC_ITEM_COLLIDED:
210 * The entry could not be checked out due to a path
211 * collision with another entry. Since there can only
212 * be one entry of each colliding group on the disk, we
213 * could skip trying to check out this one and move on.
214 * However, this would leave the unwritten entries with
215 * null stat() fields on the index, which could
216 * potentially slow down subsequent operations that
217 * require refreshing it: git would not be able to
218 * trust st_size and would have to go to the filesystem
219 * to see if the contents match (see ie_modified()).
221 * Instead, let's pay the overhead only once, now, and
222 * call checkout_entry_ca() again for this file, to
223 * have its stat() data stored in the index. This also
224 * has the benefit of adding this entry and its
225 * colliding pair to the collision report message.
226 * Additionally, this overwriting behavior is consistent
227 * with what the sequential checkout does, so it doesn't
228 * add any extra overhead.
230 ret |= checkout_entry_ca(pc_item->ce, &pc_item->ca,
231 state, NULL,
232 pc_item->checkout_counter);
233 advance_progress_meter();
234 break;
235 case PC_ITEM_PENDING:
236 have_pending = 1;
237 /* fall through */
238 case PC_ITEM_FAILED:
239 ret = -1;
240 break;
241 default:
242 BUG("unknown checkout item status in parallel checkout");
246 if (have_pending)
247 error("parallel checkout finished with pending entries");
249 return ret;
252 static int reset_fd(int fd, const char *path)
254 if (lseek(fd, 0, SEEK_SET) != 0)
255 return error_errno("failed to rewind descriptor of '%s'", path);
256 if (ftruncate(fd, 0))
257 return error_errno("failed to truncate file '%s'", path);
258 return 0;
261 static int write_pc_item_to_fd(struct parallel_checkout_item *pc_item, int fd,
262 const char *path)
264 int ret;
265 struct stream_filter *filter;
266 struct strbuf buf = STRBUF_INIT;
267 char *blob;
268 size_t size;
269 ssize_t wrote;
271 /* Sanity check */
272 assert(is_eligible_for_parallel_checkout(pc_item->ce, &pc_item->ca));
274 filter = get_stream_filter_ca(&pc_item->ca, &pc_item->ce->oid);
275 if (filter) {
276 if (stream_blob_to_fd(fd, &pc_item->ce->oid, filter, 1)) {
277 /* On error, reset fd to try writing without streaming */
278 if (reset_fd(fd, path))
279 return -1;
280 } else {
281 return 0;
285 blob = read_blob_entry(pc_item->ce, &size);
286 if (!blob)
287 return error("cannot read object %s '%s'",
288 oid_to_hex(&pc_item->ce->oid), pc_item->ce->name);
291 * checkout metadata is used to give context for external process
292 * filters. Files requiring such filters are not eligible for parallel
293 * checkout, so pass NULL. Note: if that changes, the metadata must also
294 * be passed from the main process to the workers.
296 ret = convert_to_working_tree_ca(&pc_item->ca, pc_item->ce->name,
297 blob, size, &buf, NULL);
299 if (ret) {
300 size_t newsize;
301 free(blob);
302 blob = strbuf_detach(&buf, &newsize);
303 size = newsize;
306 wrote = write_in_full(fd, blob, size);
307 free(blob);
308 if (wrote < 0)
309 return error("unable to write file '%s'", path);
311 return 0;
314 static int close_and_clear(int *fd)
316 int ret = 0;
318 if (*fd >= 0) {
319 ret = close(*fd);
320 *fd = -1;
323 return ret;
326 void write_pc_item(struct parallel_checkout_item *pc_item,
327 struct checkout *state)
329 unsigned int mode = (pc_item->ce->ce_mode & 0100) ? 0777 : 0666;
330 int fd = -1, fstat_done = 0;
331 struct strbuf path = STRBUF_INIT;
332 const char *dir_sep;
334 strbuf_add(&path, state->base_dir, state->base_dir_len);
335 strbuf_add(&path, pc_item->ce->name, pc_item->ce->ce_namelen);
337 dir_sep = find_last_dir_sep(path.buf);
340 * The leading dirs should have been already created by now. But, in
341 * case of path collisions, one of the dirs could have been replaced by
342 * a symlink (checked out after we enqueued this entry for parallel
343 * checkout). Thus, we must check the leading dirs again.
345 if (dir_sep && !has_dirs_only_path(path.buf, dir_sep - path.buf,
346 state->base_dir_len)) {
347 pc_item->status = PC_ITEM_COLLIDED;
348 trace2_data_string("pcheckout", NULL, "collision/dirname", path.buf);
349 goto out;
352 fd = open(path.buf, O_WRONLY | O_CREAT | O_EXCL, mode);
354 if (fd < 0) {
355 if (errno == EEXIST || errno == EISDIR) {
357 * Errors which probably represent a path collision.
358 * Suppress the error message and mark the item to be
359 * retried later, sequentially. ENOTDIR and ENOENT are
360 * also interesting, but the above has_dirs_only_path()
361 * call should have already caught these cases.
363 pc_item->status = PC_ITEM_COLLIDED;
364 trace2_data_string("pcheckout", NULL,
365 "collision/basename", path.buf);
366 } else {
367 error_errno("failed to open file '%s'", path.buf);
368 pc_item->status = PC_ITEM_FAILED;
370 goto out;
373 if (write_pc_item_to_fd(pc_item, fd, path.buf)) {
374 /* Error was already reported. */
375 pc_item->status = PC_ITEM_FAILED;
376 close_and_clear(&fd);
377 unlink(path.buf);
378 goto out;
381 fstat_done = fstat_checkout_output(fd, state, &pc_item->st);
383 if (close_and_clear(&fd)) {
384 error_errno("unable to close file '%s'", path.buf);
385 pc_item->status = PC_ITEM_FAILED;
386 goto out;
389 if (state->refresh_cache && !fstat_done && lstat(path.buf, &pc_item->st) < 0) {
390 error_errno("unable to stat just-written file '%s'", path.buf);
391 pc_item->status = PC_ITEM_FAILED;
392 goto out;
395 pc_item->status = PC_ITEM_WRITTEN;
397 out:
398 strbuf_release(&path);
401 static void send_one_item(int fd, struct parallel_checkout_item *pc_item)
403 size_t len_data;
404 char *data, *variant;
405 struct pc_item_fixed_portion *fixed_portion;
406 const char *working_tree_encoding = pc_item->ca.working_tree_encoding;
407 size_t name_len = pc_item->ce->ce_namelen;
408 size_t working_tree_encoding_len = working_tree_encoding ?
409 strlen(working_tree_encoding) : 0;
412 * Any changes in the calculation of the message size must also be made
413 * in is_eligible_for_parallel_checkout().
415 len_data = sizeof(struct pc_item_fixed_portion) + name_len +
416 working_tree_encoding_len;
418 data = xmalloc(len_data);
420 fixed_portion = (struct pc_item_fixed_portion *)data;
421 fixed_portion->id = pc_item->id;
422 fixed_portion->ce_mode = pc_item->ce->ce_mode;
423 fixed_portion->crlf_action = pc_item->ca.crlf_action;
424 fixed_portion->ident = pc_item->ca.ident;
425 fixed_portion->name_len = name_len;
426 fixed_portion->working_tree_encoding_len = working_tree_encoding_len;
428 * We pad the unused bytes in the hash array because, otherwise,
429 * Valgrind would complain about passing uninitialized bytes to a
430 * write() syscall. The warning doesn't represent any real risk here,
431 * but it could hinder the detection of actual errors.
433 oidcpy_with_padding(&fixed_portion->oid, &pc_item->ce->oid);
435 variant = data + sizeof(*fixed_portion);
436 if (working_tree_encoding_len) {
437 memcpy(variant, working_tree_encoding, working_tree_encoding_len);
438 variant += working_tree_encoding_len;
440 memcpy(variant, pc_item->ce->name, name_len);
442 packet_write(fd, data, len_data);
444 free(data);
447 static void send_batch(int fd, size_t start, size_t nr)
449 size_t i;
450 sigchain_push(SIGPIPE, SIG_IGN);
451 for (i = 0; i < nr; i++)
452 send_one_item(fd, &parallel_checkout.items[start + i]);
453 packet_flush(fd);
454 sigchain_pop(SIGPIPE);
457 static struct pc_worker *setup_workers(struct checkout *state, int num_workers)
459 struct pc_worker *workers;
460 int i, workers_with_one_extra_item;
461 size_t base_batch_size, batch_beginning = 0;
463 ALLOC_ARRAY(workers, num_workers);
465 for (i = 0; i < num_workers; i++) {
466 struct child_process *cp = &workers[i].cp;
468 child_process_init(cp);
469 cp->git_cmd = 1;
470 cp->in = -1;
471 cp->out = -1;
472 cp->clean_on_exit = 1;
473 strvec_push(&cp->args, "checkout--worker");
474 if (state->base_dir_len)
475 strvec_pushf(&cp->args, "--prefix=%s", state->base_dir);
476 if (start_command(cp))
477 die("failed to spawn checkout worker");
480 base_batch_size = parallel_checkout.nr / num_workers;
481 workers_with_one_extra_item = parallel_checkout.nr % num_workers;
483 for (i = 0; i < num_workers; i++) {
484 struct pc_worker *worker = &workers[i];
485 size_t batch_size = base_batch_size;
487 /* distribute the extra work evenly */
488 if (i < workers_with_one_extra_item)
489 batch_size++;
491 send_batch(worker->cp.in, batch_beginning, batch_size);
492 worker->next_item_to_complete = batch_beginning;
493 worker->nr_items_to_complete = batch_size;
495 batch_beginning += batch_size;
498 return workers;
501 static void finish_workers(struct pc_worker *workers, int num_workers)
503 int i;
506 * Close pipes before calling finish_command() to let the workers
507 * exit asynchronously and avoid spending extra time on wait().
509 for (i = 0; i < num_workers; i++) {
510 struct child_process *cp = &workers[i].cp;
511 if (cp->in >= 0)
512 close(cp->in);
513 if (cp->out >= 0)
514 close(cp->out);
517 for (i = 0; i < num_workers; i++) {
518 int rc = finish_command(&workers[i].cp);
519 if (rc > 128) {
521 * For a normal non-zero exit, the worker should have
522 * already printed something useful to stderr. But a
523 * death by signal should be mentioned to the user.
525 error("checkout worker %d died of signal %d", i, rc - 128);
529 free(workers);
532 static inline void assert_pc_item_result_size(int got, int exp)
534 if (got != exp)
535 BUG("wrong result size from checkout worker (got %dB, exp %dB)",
536 got, exp);
539 static void parse_and_save_result(const char *buffer, int len,
540 struct pc_worker *worker)
542 struct pc_item_result *res;
543 struct parallel_checkout_item *pc_item;
544 struct stat *st = NULL;
546 if (len < PC_ITEM_RESULT_BASE_SIZE)
547 BUG("too short result from checkout worker (got %dB, exp >=%dB)",
548 len, (int)PC_ITEM_RESULT_BASE_SIZE);
550 res = (struct pc_item_result *)buffer;
553 * Worker should send either the full result struct on success, or
554 * just the base (i.e. no stat data), otherwise.
556 if (res->status == PC_ITEM_WRITTEN) {
557 assert_pc_item_result_size(len, (int)sizeof(struct pc_item_result));
558 st = &res->st;
559 } else {
560 assert_pc_item_result_size(len, (int)PC_ITEM_RESULT_BASE_SIZE);
563 if (!worker->nr_items_to_complete)
564 BUG("received result from supposedly finished checkout worker");
565 if (res->id != worker->next_item_to_complete)
566 BUG("unexpected item id from checkout worker (got %"PRIuMAX", exp %"PRIuMAX")",
567 (uintmax_t)res->id, (uintmax_t)worker->next_item_to_complete);
569 worker->next_item_to_complete++;
570 worker->nr_items_to_complete--;
572 pc_item = &parallel_checkout.items[res->id];
573 pc_item->status = res->status;
574 if (st)
575 pc_item->st = *st;
577 if (res->status != PC_ITEM_COLLIDED)
578 advance_progress_meter();
581 static void gather_results_from_workers(struct pc_worker *workers,
582 int num_workers)
584 int i, active_workers = num_workers;
585 struct pollfd *pfds;
587 CALLOC_ARRAY(pfds, num_workers);
588 for (i = 0; i < num_workers; i++) {
589 pfds[i].fd = workers[i].cp.out;
590 pfds[i].events = POLLIN;
593 while (active_workers) {
594 int nr = poll(pfds, num_workers, -1);
596 if (nr < 0) {
597 if (errno == EINTR)
598 continue;
599 die_errno("failed to poll checkout workers");
602 for (i = 0; i < num_workers && nr > 0; i++) {
603 struct pc_worker *worker = &workers[i];
604 struct pollfd *pfd = &pfds[i];
606 if (!pfd->revents)
607 continue;
609 if (pfd->revents & POLLIN) {
610 int len = packet_read(pfd->fd, packet_buffer,
611 sizeof(packet_buffer), 0);
613 if (len < 0) {
614 BUG("packet_read() returned negative value");
615 } else if (!len) {
616 pfd->fd = -1;
617 active_workers--;
618 } else {
619 parse_and_save_result(packet_buffer,
620 len, worker);
622 } else if (pfd->revents & POLLHUP) {
623 pfd->fd = -1;
624 active_workers--;
625 } else if (pfd->revents & (POLLNVAL | POLLERR)) {
626 die("error polling from checkout worker");
629 nr--;
633 free(pfds);
636 static void write_items_sequentially(struct checkout *state)
638 size_t i;
640 for (i = 0; i < parallel_checkout.nr; i++) {
641 struct parallel_checkout_item *pc_item = &parallel_checkout.items[i];
642 write_pc_item(pc_item, state);
643 if (pc_item->status != PC_ITEM_COLLIDED)
644 advance_progress_meter();
648 int run_parallel_checkout(struct checkout *state, int num_workers, int threshold,
649 struct progress *progress, unsigned int *progress_cnt)
651 int ret;
653 if (parallel_checkout.status != PC_ACCEPTING_ENTRIES)
654 BUG("cannot run parallel checkout: uninitialized or already running");
656 parallel_checkout.status = PC_RUNNING;
657 parallel_checkout.progress = progress;
658 parallel_checkout.progress_cnt = progress_cnt;
660 if (parallel_checkout.nr < num_workers)
661 num_workers = parallel_checkout.nr;
663 if (num_workers <= 1 || parallel_checkout.nr < threshold) {
664 write_items_sequentially(state);
665 } else {
666 struct pc_worker *workers = setup_workers(state, num_workers);
667 gather_results_from_workers(workers, num_workers);
668 finish_workers(workers, num_workers);
671 ret = handle_results(state);
673 finish_parallel_checkout();
674 return ret;