7 #include "parallel-checkout.h"
10 #include "run-command.h"
12 #include "streaming.h"
13 #include "thread-utils.h"
18 struct child_process cp
;
19 size_t next_item_to_complete
, nr_items_to_complete
;
22 struct parallel_checkout
{
23 enum pc_status status
;
24 struct parallel_checkout_item
*items
; /* The parallel checkout queue. */
26 struct progress
*progress
;
27 unsigned int *progress_cnt
;
30 static struct parallel_checkout parallel_checkout
;
32 enum pc_status
parallel_checkout_status(void)
34 return parallel_checkout
.status
;
37 static const int DEFAULT_THRESHOLD_FOR_PARALLELISM
= 100;
38 static const int DEFAULT_NUM_WORKERS
= 1;
40 void get_parallel_checkout_configs(int *num_workers
, int *threshold
)
42 char *env_workers
= getenv("GIT_TEST_CHECKOUT_WORKERS");
44 if (env_workers
&& *env_workers
) {
45 if (strtol_i(env_workers
, 10, num_workers
)) {
46 die(_("invalid value for '%s': '%s'"),
47 "GIT_TEST_CHECKOUT_WORKERS", env_workers
);
50 *num_workers
= online_cpus();
56 if (git_config_get_int("checkout.workers", num_workers
))
57 *num_workers
= DEFAULT_NUM_WORKERS
;
58 else if (*num_workers
< 1)
59 *num_workers
= online_cpus();
61 if (git_config_get_int("checkout.thresholdForParallelism", threshold
))
62 *threshold
= DEFAULT_THRESHOLD_FOR_PARALLELISM
;
65 void init_parallel_checkout(void)
67 if (parallel_checkout
.status
!= PC_UNINITIALIZED
)
68 BUG("parallel checkout already initialized");
70 parallel_checkout
.status
= PC_ACCEPTING_ENTRIES
;
73 static void finish_parallel_checkout(void)
75 if (parallel_checkout
.status
== PC_UNINITIALIZED
)
76 BUG("cannot finish parallel checkout: not initialized yet");
78 free(parallel_checkout
.items
);
79 memset(¶llel_checkout
, 0, sizeof(parallel_checkout
));
82 static int is_eligible_for_parallel_checkout(const struct cache_entry
*ce
,
83 const struct conv_attrs
*ca
)
85 enum conv_attrs_classification c
;
86 size_t packed_item_size
;
89 * Symlinks cannot be checked out in parallel as, in case of path
90 * collision, they could racily replace leading directories of other
91 * entries being checked out. Submodules are checked out in child
92 * processes, which have their own parallel checkout queues.
94 if (!S_ISREG(ce
->ce_mode
))
97 packed_item_size
= sizeof(struct pc_item_fixed_portion
) + ce
->ce_namelen
+
98 (ca
->working_tree_encoding
? strlen(ca
->working_tree_encoding
) : 0);
101 * The amount of data we send to the workers per checkout item is
102 * typically small (75~300B). So unless we find an insanely huge path
103 * of 64KB, we should never reach the 65KB limit of one pkt-line. If
104 * that does happen, we let the sequential code handle the item.
106 if (packed_item_size
> LARGE_PACKET_DATA_MAX
)
109 c
= classify_conv_attrs(ca
);
111 case CA_CLASS_INCORE
:
114 case CA_CLASS_INCORE_FILTER
:
116 * It would be safe to allow concurrent instances of
117 * single-file smudge filters, like rot13, but we should not
118 * assume that all filters are parallel-process safe. So we
123 case CA_CLASS_INCORE_PROCESS
:
125 * The parallel queue and the delayed queue are not compatible,
126 * so they must be kept completely separated. And we can't tell
127 * if a long-running process will delay its response without
128 * actually asking it to perform the filtering. Therefore, this
129 * type of filter is not allowed in parallel checkout.
131 * Furthermore, there should only be one instance of the
132 * long-running process filter as we don't know how it is
133 * managing its own concurrency. So, spreading the entries that
134 * requisite such a filter among the parallel workers would
135 * require a lot more inter-process communication. We would
136 * probably have to designate a single process to interact with
137 * the filter and send all the necessary data to it, for each
142 case CA_CLASS_STREAMABLE
:
146 BUG("unsupported conv_attrs classification '%d'", c
);
150 int enqueue_checkout(struct cache_entry
*ce
, struct conv_attrs
*ca
,
151 int *checkout_counter
)
153 struct parallel_checkout_item
*pc_item
;
155 if (parallel_checkout
.status
!= PC_ACCEPTING_ENTRIES
||
156 !is_eligible_for_parallel_checkout(ce
, ca
))
159 ALLOC_GROW(parallel_checkout
.items
, parallel_checkout
.nr
+ 1,
160 parallel_checkout
.alloc
);
162 pc_item
= ¶llel_checkout
.items
[parallel_checkout
.nr
];
164 memcpy(&pc_item
->ca
, ca
, sizeof(pc_item
->ca
));
165 pc_item
->status
= PC_ITEM_PENDING
;
166 pc_item
->id
= parallel_checkout
.nr
;
167 pc_item
->checkout_counter
= checkout_counter
;
168 parallel_checkout
.nr
++;
173 size_t pc_queue_size(void)
175 return parallel_checkout
.nr
;
178 static void advance_progress_meter(void)
180 if (parallel_checkout
.progress
) {
181 (*parallel_checkout
.progress_cnt
)++;
182 display_progress(parallel_checkout
.progress
,
183 *parallel_checkout
.progress_cnt
);
187 static int handle_results(struct checkout
*state
)
191 int have_pending
= 0;
194 * We first update the successfully written entries with the collected
195 * stat() data, so that they can be found by mark_colliding_entries(),
196 * in the next loop, when necessary.
198 for (i
= 0; i
< parallel_checkout
.nr
; i
++) {
199 struct parallel_checkout_item
*pc_item
= ¶llel_checkout
.items
[i
];
200 if (pc_item
->status
== PC_ITEM_WRITTEN
)
201 update_ce_after_write(state
, pc_item
->ce
, &pc_item
->st
);
204 for (i
= 0; i
< parallel_checkout
.nr
; i
++) {
205 struct parallel_checkout_item
*pc_item
= ¶llel_checkout
.items
[i
];
207 switch(pc_item
->status
) {
208 case PC_ITEM_WRITTEN
:
209 if (pc_item
->checkout_counter
)
210 (*pc_item
->checkout_counter
)++;
212 case PC_ITEM_COLLIDED
:
214 * The entry could not be checked out due to a path
215 * collision with another entry. Since there can only
216 * be one entry of each colliding group on the disk, we
217 * could skip trying to check out this one and move on.
218 * However, this would leave the unwritten entries with
219 * null stat() fields on the index, which could
220 * potentially slow down subsequent operations that
221 * require refreshing it: git would not be able to
222 * trust st_size and would have to go to the filesystem
223 * to see if the contents match (see ie_modified()).
225 * Instead, let's pay the overhead only once, now, and
226 * call checkout_entry_ca() again for this file, to
227 * have its stat() data stored in the index. This also
228 * has the benefit of adding this entry and its
229 * colliding pair to the collision report message.
230 * Additionally, this overwriting behavior is consistent
231 * with what the sequential checkout does, so it doesn't
232 * add any extra overhead.
234 ret
|= checkout_entry_ca(pc_item
->ce
, &pc_item
->ca
,
236 pc_item
->checkout_counter
);
237 advance_progress_meter();
239 case PC_ITEM_PENDING
:
246 BUG("unknown checkout item status in parallel checkout");
251 error("parallel checkout finished with pending entries");
256 static int reset_fd(int fd
, const char *path
)
258 if (lseek(fd
, 0, SEEK_SET
) != 0)
259 return error_errno("failed to rewind descriptor of '%s'", path
);
260 if (ftruncate(fd
, 0))
261 return error_errno("failed to truncate file '%s'", path
);
265 static int write_pc_item_to_fd(struct parallel_checkout_item
*pc_item
, int fd
,
269 struct stream_filter
*filter
;
270 struct strbuf buf
= STRBUF_INIT
;
276 assert(is_eligible_for_parallel_checkout(pc_item
->ce
, &pc_item
->ca
));
278 filter
= get_stream_filter_ca(&pc_item
->ca
, &pc_item
->ce
->oid
);
280 if (stream_blob_to_fd(fd
, &pc_item
->ce
->oid
, filter
, 1)) {
281 /* On error, reset fd to try writing without streaming */
282 if (reset_fd(fd
, path
))
289 blob
= read_blob_entry(pc_item
->ce
, &size
);
291 return error("cannot read object %s '%s'",
292 oid_to_hex(&pc_item
->ce
->oid
), pc_item
->ce
->name
);
295 * checkout metadata is used to give context for external process
296 * filters. Files requiring such filters are not eligible for parallel
297 * checkout, so pass NULL. Note: if that changes, the metadata must also
298 * be passed from the main process to the workers.
300 ret
= convert_to_working_tree_ca(&pc_item
->ca
, pc_item
->ce
->name
,
301 blob
, size
, &buf
, NULL
);
306 blob
= strbuf_detach(&buf
, &newsize
);
310 wrote
= write_in_full(fd
, blob
, size
);
313 return error("unable to write file '%s'", path
);
318 static int close_and_clear(int *fd
)
330 void write_pc_item(struct parallel_checkout_item
*pc_item
,
331 struct checkout
*state
)
333 unsigned int mode
= (pc_item
->ce
->ce_mode
& 0100) ? 0777 : 0666;
334 int fd
= -1, fstat_done
= 0;
335 struct strbuf path
= STRBUF_INIT
;
338 strbuf_add(&path
, state
->base_dir
, state
->base_dir_len
);
339 strbuf_add(&path
, pc_item
->ce
->name
, pc_item
->ce
->ce_namelen
);
341 dir_sep
= find_last_dir_sep(path
.buf
);
344 * The leading dirs should have been already created by now. But, in
345 * case of path collisions, one of the dirs could have been replaced by
346 * a symlink (checked out after we enqueued this entry for parallel
347 * checkout). Thus, we must check the leading dirs again.
349 if (dir_sep
&& !has_dirs_only_path(path
.buf
, dir_sep
- path
.buf
,
350 state
->base_dir_len
)) {
351 pc_item
->status
= PC_ITEM_COLLIDED
;
352 trace2_data_string("pcheckout", NULL
, "collision/dirname", path
.buf
);
356 fd
= open(path
.buf
, O_WRONLY
| O_CREAT
| O_EXCL
, mode
);
359 if (errno
== EEXIST
|| errno
== EISDIR
) {
361 * Errors which probably represent a path collision.
362 * Suppress the error message and mark the item to be
363 * retried later, sequentially. ENOTDIR and ENOENT are
364 * also interesting, but the above has_dirs_only_path()
365 * call should have already caught these cases.
367 pc_item
->status
= PC_ITEM_COLLIDED
;
368 trace2_data_string("pcheckout", NULL
,
369 "collision/basename", path
.buf
);
371 error_errno("failed to open file '%s'", path
.buf
);
372 pc_item
->status
= PC_ITEM_FAILED
;
377 if (write_pc_item_to_fd(pc_item
, fd
, path
.buf
)) {
378 /* Error was already reported. */
379 pc_item
->status
= PC_ITEM_FAILED
;
380 close_and_clear(&fd
);
385 fstat_done
= fstat_checkout_output(fd
, state
, &pc_item
->st
);
387 if (close_and_clear(&fd
)) {
388 error_errno("unable to close file '%s'", path
.buf
);
389 pc_item
->status
= PC_ITEM_FAILED
;
393 if (state
->refresh_cache
&& !fstat_done
&& lstat(path
.buf
, &pc_item
->st
) < 0) {
394 error_errno("unable to stat just-written file '%s'", path
.buf
);
395 pc_item
->status
= PC_ITEM_FAILED
;
399 pc_item
->status
= PC_ITEM_WRITTEN
;
402 strbuf_release(&path
);
405 static void send_one_item(int fd
, struct parallel_checkout_item
*pc_item
)
408 char *data
, *variant
;
409 struct pc_item_fixed_portion
*fixed_portion
;
410 const char *working_tree_encoding
= pc_item
->ca
.working_tree_encoding
;
411 size_t name_len
= pc_item
->ce
->ce_namelen
;
412 size_t working_tree_encoding_len
= working_tree_encoding
?
413 strlen(working_tree_encoding
) : 0;
416 * Any changes in the calculation of the message size must also be made
417 * in is_eligible_for_parallel_checkout().
419 len_data
= sizeof(struct pc_item_fixed_portion
) + name_len
+
420 working_tree_encoding_len
;
422 data
= xmalloc(len_data
);
424 fixed_portion
= (struct pc_item_fixed_portion
*)data
;
425 fixed_portion
->id
= pc_item
->id
;
426 fixed_portion
->ce_mode
= pc_item
->ce
->ce_mode
;
427 fixed_portion
->crlf_action
= pc_item
->ca
.crlf_action
;
428 fixed_portion
->ident
= pc_item
->ca
.ident
;
429 fixed_portion
->name_len
= name_len
;
430 fixed_portion
->working_tree_encoding_len
= working_tree_encoding_len
;
432 * We pad the unused bytes in the hash array because, otherwise,
433 * Valgrind would complain about passing uninitialized bytes to a
434 * write() syscall. The warning doesn't represent any real risk here,
435 * but it could hinder the detection of actual errors.
437 oidcpy_with_padding(&fixed_portion
->oid
, &pc_item
->ce
->oid
);
439 variant
= data
+ sizeof(*fixed_portion
);
440 if (working_tree_encoding_len
) {
441 memcpy(variant
, working_tree_encoding
, working_tree_encoding_len
);
442 variant
+= working_tree_encoding_len
;
444 memcpy(variant
, pc_item
->ce
->name
, name_len
);
446 packet_write(fd
, data
, len_data
);
451 static void send_batch(int fd
, size_t start
, size_t nr
)
454 sigchain_push(SIGPIPE
, SIG_IGN
);
455 for (i
= 0; i
< nr
; i
++)
456 send_one_item(fd
, ¶llel_checkout
.items
[start
+ i
]);
458 sigchain_pop(SIGPIPE
);
461 static struct pc_worker
*setup_workers(struct checkout
*state
, int num_workers
)
463 struct pc_worker
*workers
;
464 int i
, workers_with_one_extra_item
;
465 size_t base_batch_size
, batch_beginning
= 0;
467 ALLOC_ARRAY(workers
, num_workers
);
469 for (i
= 0; i
< num_workers
; i
++) {
470 struct child_process
*cp
= &workers
[i
].cp
;
472 child_process_init(cp
);
476 cp
->clean_on_exit
= 1;
477 strvec_push(&cp
->args
, "checkout--worker");
478 if (state
->base_dir_len
)
479 strvec_pushf(&cp
->args
, "--prefix=%s", state
->base_dir
);
480 if (start_command(cp
))
481 die("failed to spawn checkout worker");
484 base_batch_size
= parallel_checkout
.nr
/ num_workers
;
485 workers_with_one_extra_item
= parallel_checkout
.nr
% num_workers
;
487 for (i
= 0; i
< num_workers
; i
++) {
488 struct pc_worker
*worker
= &workers
[i
];
489 size_t batch_size
= base_batch_size
;
491 /* distribute the extra work evenly */
492 if (i
< workers_with_one_extra_item
)
495 send_batch(worker
->cp
.in
, batch_beginning
, batch_size
);
496 worker
->next_item_to_complete
= batch_beginning
;
497 worker
->nr_items_to_complete
= batch_size
;
499 batch_beginning
+= batch_size
;
505 static void finish_workers(struct pc_worker
*workers
, int num_workers
)
510 * Close pipes before calling finish_command() to let the workers
511 * exit asynchronously and avoid spending extra time on wait().
513 for (i
= 0; i
< num_workers
; i
++) {
514 struct child_process
*cp
= &workers
[i
].cp
;
521 for (i
= 0; i
< num_workers
; i
++) {
522 int rc
= finish_command(&workers
[i
].cp
);
525 * For a normal non-zero exit, the worker should have
526 * already printed something useful to stderr. But a
527 * death by signal should be mentioned to the user.
529 error("checkout worker %d died of signal %d", i
, rc
- 128);
536 static inline void assert_pc_item_result_size(int got
, int exp
)
539 BUG("wrong result size from checkout worker (got %dB, exp %dB)",
543 static void parse_and_save_result(const char *buffer
, int len
,
544 struct pc_worker
*worker
)
546 struct pc_item_result
*res
;
547 struct parallel_checkout_item
*pc_item
;
548 struct stat
*st
= NULL
;
550 if (len
< PC_ITEM_RESULT_BASE_SIZE
)
551 BUG("too short result from checkout worker (got %dB, exp >=%dB)",
552 len
, (int)PC_ITEM_RESULT_BASE_SIZE
);
554 res
= (struct pc_item_result
*)buffer
;
557 * Worker should send either the full result struct on success, or
558 * just the base (i.e. no stat data), otherwise.
560 if (res
->status
== PC_ITEM_WRITTEN
) {
561 assert_pc_item_result_size(len
, (int)sizeof(struct pc_item_result
));
564 assert_pc_item_result_size(len
, (int)PC_ITEM_RESULT_BASE_SIZE
);
567 if (!worker
->nr_items_to_complete
)
568 BUG("received result from supposedly finished checkout worker");
569 if (res
->id
!= worker
->next_item_to_complete
)
570 BUG("unexpected item id from checkout worker (got %"PRIuMAX
", exp %"PRIuMAX
")",
571 (uintmax_t)res
->id
, (uintmax_t)worker
->next_item_to_complete
);
573 worker
->next_item_to_complete
++;
574 worker
->nr_items_to_complete
--;
576 pc_item
= ¶llel_checkout
.items
[res
->id
];
577 pc_item
->status
= res
->status
;
581 if (res
->status
!= PC_ITEM_COLLIDED
)
582 advance_progress_meter();
585 static void gather_results_from_workers(struct pc_worker
*workers
,
588 int i
, active_workers
= num_workers
;
591 CALLOC_ARRAY(pfds
, num_workers
);
592 for (i
= 0; i
< num_workers
; i
++) {
593 pfds
[i
].fd
= workers
[i
].cp
.out
;
594 pfds
[i
].events
= POLLIN
;
597 while (active_workers
) {
598 int nr
= poll(pfds
, num_workers
, -1);
603 die_errno("failed to poll checkout workers");
606 for (i
= 0; i
< num_workers
&& nr
> 0; i
++) {
607 struct pc_worker
*worker
= &workers
[i
];
608 struct pollfd
*pfd
= &pfds
[i
];
613 if (pfd
->revents
& POLLIN
) {
614 int len
= packet_read(pfd
->fd
, packet_buffer
,
615 sizeof(packet_buffer
), 0);
618 BUG("packet_read() returned negative value");
623 parse_and_save_result(packet_buffer
,
626 } else if (pfd
->revents
& POLLHUP
) {
629 } else if (pfd
->revents
& (POLLNVAL
| POLLERR
)) {
630 die("error polling from checkout worker");
640 static void write_items_sequentially(struct checkout
*state
)
644 for (i
= 0; i
< parallel_checkout
.nr
; i
++) {
645 struct parallel_checkout_item
*pc_item
= ¶llel_checkout
.items
[i
];
646 write_pc_item(pc_item
, state
);
647 if (pc_item
->status
!= PC_ITEM_COLLIDED
)
648 advance_progress_meter();
652 int run_parallel_checkout(struct checkout
*state
, int num_workers
, int threshold
,
653 struct progress
*progress
, unsigned int *progress_cnt
)
657 if (parallel_checkout
.status
!= PC_ACCEPTING_ENTRIES
)
658 BUG("cannot run parallel checkout: uninitialized or already running");
660 parallel_checkout
.status
= PC_RUNNING
;
661 parallel_checkout
.progress
= progress
;
662 parallel_checkout
.progress_cnt
= progress_cnt
;
664 if (parallel_checkout
.nr
< num_workers
)
665 num_workers
= parallel_checkout
.nr
;
667 if (num_workers
<= 1 || parallel_checkout
.nr
< threshold
) {
668 write_items_sequentially(state
);
670 struct pc_worker
*workers
= setup_workers(state
, num_workers
);
671 gather_results_from_workers(workers
, num_workers
);
672 finish_workers(workers
, num_workers
);
675 ret
= handle_results(state
);
677 finish_parallel_checkout();