8 #include "parallel-checkout.h"
11 #include "run-command.h"
13 #include "streaming.h"
15 #include "thread-utils.h"
20 struct child_process cp
;
21 size_t next_item_to_complete
, nr_items_to_complete
;
24 struct parallel_checkout
{
25 enum pc_status status
;
26 struct parallel_checkout_item
*items
; /* The parallel checkout queue. */
28 struct progress
*progress
;
29 unsigned int *progress_cnt
;
32 static struct parallel_checkout parallel_checkout
;
34 enum pc_status
parallel_checkout_status(void)
36 return parallel_checkout
.status
;
39 static const int DEFAULT_THRESHOLD_FOR_PARALLELISM
= 100;
40 static const int DEFAULT_NUM_WORKERS
= 1;
42 void get_parallel_checkout_configs(int *num_workers
, int *threshold
)
44 char *env_workers
= getenv("GIT_TEST_CHECKOUT_WORKERS");
46 if (env_workers
&& *env_workers
) {
47 if (strtol_i(env_workers
, 10, num_workers
)) {
48 die(_("invalid value for '%s': '%s'"),
49 "GIT_TEST_CHECKOUT_WORKERS", env_workers
);
52 *num_workers
= online_cpus();
58 if (git_config_get_int("checkout.workers", num_workers
))
59 *num_workers
= DEFAULT_NUM_WORKERS
;
60 else if (*num_workers
< 1)
61 *num_workers
= online_cpus();
63 if (git_config_get_int("checkout.thresholdForParallelism", threshold
))
64 *threshold
= DEFAULT_THRESHOLD_FOR_PARALLELISM
;
67 void init_parallel_checkout(void)
69 if (parallel_checkout
.status
!= PC_UNINITIALIZED
)
70 BUG("parallel checkout already initialized");
72 parallel_checkout
.status
= PC_ACCEPTING_ENTRIES
;
75 static void finish_parallel_checkout(void)
77 if (parallel_checkout
.status
== PC_UNINITIALIZED
)
78 BUG("cannot finish parallel checkout: not initialized yet");
80 free(parallel_checkout
.items
);
81 memset(¶llel_checkout
, 0, sizeof(parallel_checkout
));
84 static int is_eligible_for_parallel_checkout(const struct cache_entry
*ce
,
85 const struct conv_attrs
*ca
)
87 enum conv_attrs_classification c
;
88 size_t packed_item_size
;
91 * Symlinks cannot be checked out in parallel as, in case of path
92 * collision, they could racily replace leading directories of other
93 * entries being checked out. Submodules are checked out in child
94 * processes, which have their own parallel checkout queues.
96 if (!S_ISREG(ce
->ce_mode
))
99 packed_item_size
= sizeof(struct pc_item_fixed_portion
) + ce
->ce_namelen
+
100 (ca
->working_tree_encoding
? strlen(ca
->working_tree_encoding
) : 0);
103 * The amount of data we send to the workers per checkout item is
104 * typically small (75~300B). So unless we find an insanely huge path
105 * of 64KB, we should never reach the 65KB limit of one pkt-line. If
106 * that does happen, we let the sequential code handle the item.
108 if (packed_item_size
> LARGE_PACKET_DATA_MAX
)
111 c
= classify_conv_attrs(ca
);
113 case CA_CLASS_INCORE
:
116 case CA_CLASS_INCORE_FILTER
:
118 * It would be safe to allow concurrent instances of
119 * single-file smudge filters, like rot13, but we should not
120 * assume that all filters are parallel-process safe. So we
125 case CA_CLASS_INCORE_PROCESS
:
127 * The parallel queue and the delayed queue are not compatible,
128 * so they must be kept completely separated. And we can't tell
129 * if a long-running process will delay its response without
130 * actually asking it to perform the filtering. Therefore, this
131 * type of filter is not allowed in parallel checkout.
133 * Furthermore, there should only be one instance of the
134 * long-running process filter as we don't know how it is
135 * managing its own concurrency. So, spreading the entries that
136 * requisite such a filter among the parallel workers would
137 * require a lot more inter-process communication. We would
138 * probably have to designate a single process to interact with
139 * the filter and send all the necessary data to it, for each
144 case CA_CLASS_STREAMABLE
:
148 BUG("unsupported conv_attrs classification '%d'", c
);
152 int enqueue_checkout(struct cache_entry
*ce
, struct conv_attrs
*ca
,
153 int *checkout_counter
)
155 struct parallel_checkout_item
*pc_item
;
157 if (parallel_checkout
.status
!= PC_ACCEPTING_ENTRIES
||
158 !is_eligible_for_parallel_checkout(ce
, ca
))
161 ALLOC_GROW(parallel_checkout
.items
, parallel_checkout
.nr
+ 1,
162 parallel_checkout
.alloc
);
164 pc_item
= ¶llel_checkout
.items
[parallel_checkout
.nr
];
166 memcpy(&pc_item
->ca
, ca
, sizeof(pc_item
->ca
));
167 pc_item
->status
= PC_ITEM_PENDING
;
168 pc_item
->id
= parallel_checkout
.nr
;
169 pc_item
->checkout_counter
= checkout_counter
;
170 parallel_checkout
.nr
++;
175 size_t pc_queue_size(void)
177 return parallel_checkout
.nr
;
180 static void advance_progress_meter(void)
182 if (parallel_checkout
.progress
) {
183 (*parallel_checkout
.progress_cnt
)++;
184 display_progress(parallel_checkout
.progress
,
185 *parallel_checkout
.progress_cnt
);
189 static int handle_results(struct checkout
*state
)
193 int have_pending
= 0;
196 * We first update the successfully written entries with the collected
197 * stat() data, so that they can be found by mark_colliding_entries(),
198 * in the next loop, when necessary.
200 for (i
= 0; i
< parallel_checkout
.nr
; i
++) {
201 struct parallel_checkout_item
*pc_item
= ¶llel_checkout
.items
[i
];
202 if (pc_item
->status
== PC_ITEM_WRITTEN
)
203 update_ce_after_write(state
, pc_item
->ce
, &pc_item
->st
);
206 for (i
= 0; i
< parallel_checkout
.nr
; i
++) {
207 struct parallel_checkout_item
*pc_item
= ¶llel_checkout
.items
[i
];
209 switch(pc_item
->status
) {
210 case PC_ITEM_WRITTEN
:
211 if (pc_item
->checkout_counter
)
212 (*pc_item
->checkout_counter
)++;
214 case PC_ITEM_COLLIDED
:
216 * The entry could not be checked out due to a path
217 * collision with another entry. Since there can only
218 * be one entry of each colliding group on the disk, we
219 * could skip trying to check out this one and move on.
220 * However, this would leave the unwritten entries with
221 * null stat() fields on the index, which could
222 * potentially slow down subsequent operations that
223 * require refreshing it: git would not be able to
224 * trust st_size and would have to go to the filesystem
225 * to see if the contents match (see ie_modified()).
227 * Instead, let's pay the overhead only once, now, and
228 * call checkout_entry_ca() again for this file, to
229 * have its stat() data stored in the index. This also
230 * has the benefit of adding this entry and its
231 * colliding pair to the collision report message.
232 * Additionally, this overwriting behavior is consistent
233 * with what the sequential checkout does, so it doesn't
234 * add any extra overhead.
236 ret
|= checkout_entry_ca(pc_item
->ce
, &pc_item
->ca
,
238 pc_item
->checkout_counter
);
239 advance_progress_meter();
241 case PC_ITEM_PENDING
:
248 BUG("unknown checkout item status in parallel checkout");
253 error("parallel checkout finished with pending entries");
258 static int reset_fd(int fd
, const char *path
)
260 if (lseek(fd
, 0, SEEK_SET
) != 0)
261 return error_errno("failed to rewind descriptor of '%s'", path
);
262 if (ftruncate(fd
, 0))
263 return error_errno("failed to truncate file '%s'", path
);
267 static int write_pc_item_to_fd(struct parallel_checkout_item
*pc_item
, int fd
,
271 struct stream_filter
*filter
;
272 struct strbuf buf
= STRBUF_INIT
;
278 assert(is_eligible_for_parallel_checkout(pc_item
->ce
, &pc_item
->ca
));
280 filter
= get_stream_filter_ca(&pc_item
->ca
, &pc_item
->ce
->oid
);
282 if (stream_blob_to_fd(fd
, &pc_item
->ce
->oid
, filter
, 1)) {
283 /* On error, reset fd to try writing without streaming */
284 if (reset_fd(fd
, path
))
291 blob
= read_blob_entry(pc_item
->ce
, &size
);
293 return error("cannot read object %s '%s'",
294 oid_to_hex(&pc_item
->ce
->oid
), pc_item
->ce
->name
);
297 * checkout metadata is used to give context for external process
298 * filters. Files requiring such filters are not eligible for parallel
299 * checkout, so pass NULL. Note: if that changes, the metadata must also
300 * be passed from the main process to the workers.
302 ret
= convert_to_working_tree_ca(&pc_item
->ca
, pc_item
->ce
->name
,
303 blob
, size
, &buf
, NULL
);
308 blob
= strbuf_detach(&buf
, &newsize
);
312 wrote
= write_in_full(fd
, blob
, size
);
315 return error("unable to write file '%s'", path
);
320 static int close_and_clear(int *fd
)
332 void write_pc_item(struct parallel_checkout_item
*pc_item
,
333 struct checkout
*state
)
335 unsigned int mode
= (pc_item
->ce
->ce_mode
& 0100) ? 0777 : 0666;
336 int fd
= -1, fstat_done
= 0;
337 struct strbuf path
= STRBUF_INIT
;
340 strbuf_add(&path
, state
->base_dir
, state
->base_dir_len
);
341 strbuf_add(&path
, pc_item
->ce
->name
, pc_item
->ce
->ce_namelen
);
343 dir_sep
= find_last_dir_sep(path
.buf
);
346 * The leading dirs should have been already created by now. But, in
347 * case of path collisions, one of the dirs could have been replaced by
348 * a symlink (checked out after we enqueued this entry for parallel
349 * checkout). Thus, we must check the leading dirs again.
351 if (dir_sep
&& !has_dirs_only_path(path
.buf
, dir_sep
- path
.buf
,
352 state
->base_dir_len
)) {
353 pc_item
->status
= PC_ITEM_COLLIDED
;
354 trace2_data_string("pcheckout", NULL
, "collision/dirname", path
.buf
);
358 fd
= open(path
.buf
, O_WRONLY
| O_CREAT
| O_EXCL
, mode
);
361 if (errno
== EEXIST
|| errno
== EISDIR
) {
363 * Errors which probably represent a path collision.
364 * Suppress the error message and mark the item to be
365 * retried later, sequentially. ENOTDIR and ENOENT are
366 * also interesting, but the above has_dirs_only_path()
367 * call should have already caught these cases.
369 pc_item
->status
= PC_ITEM_COLLIDED
;
370 trace2_data_string("pcheckout", NULL
,
371 "collision/basename", path
.buf
);
373 error_errno("failed to open file '%s'", path
.buf
);
374 pc_item
->status
= PC_ITEM_FAILED
;
379 if (write_pc_item_to_fd(pc_item
, fd
, path
.buf
)) {
380 /* Error was already reported. */
381 pc_item
->status
= PC_ITEM_FAILED
;
382 close_and_clear(&fd
);
387 fstat_done
= fstat_checkout_output(fd
, state
, &pc_item
->st
);
389 if (close_and_clear(&fd
)) {
390 error_errno("unable to close file '%s'", path
.buf
);
391 pc_item
->status
= PC_ITEM_FAILED
;
395 if (state
->refresh_cache
&& !fstat_done
&& lstat(path
.buf
, &pc_item
->st
) < 0) {
396 error_errno("unable to stat just-written file '%s'", path
.buf
);
397 pc_item
->status
= PC_ITEM_FAILED
;
401 pc_item
->status
= PC_ITEM_WRITTEN
;
404 strbuf_release(&path
);
407 static void send_one_item(int fd
, struct parallel_checkout_item
*pc_item
)
410 char *data
, *variant
;
411 struct pc_item_fixed_portion
*fixed_portion
;
412 const char *working_tree_encoding
= pc_item
->ca
.working_tree_encoding
;
413 size_t name_len
= pc_item
->ce
->ce_namelen
;
414 size_t working_tree_encoding_len
= working_tree_encoding
?
415 strlen(working_tree_encoding
) : 0;
418 * Any changes in the calculation of the message size must also be made
419 * in is_eligible_for_parallel_checkout().
421 len_data
= sizeof(struct pc_item_fixed_portion
) + name_len
+
422 working_tree_encoding_len
;
424 data
= xmalloc(len_data
);
426 fixed_portion
= (struct pc_item_fixed_portion
*)data
;
427 fixed_portion
->id
= pc_item
->id
;
428 fixed_portion
->ce_mode
= pc_item
->ce
->ce_mode
;
429 fixed_portion
->crlf_action
= pc_item
->ca
.crlf_action
;
430 fixed_portion
->ident
= pc_item
->ca
.ident
;
431 fixed_portion
->name_len
= name_len
;
432 fixed_portion
->working_tree_encoding_len
= working_tree_encoding_len
;
434 * We pad the unused bytes in the hash array because, otherwise,
435 * Valgrind would complain about passing uninitialized bytes to a
436 * write() syscall. The warning doesn't represent any real risk here,
437 * but it could hinder the detection of actual errors.
439 oidcpy_with_padding(&fixed_portion
->oid
, &pc_item
->ce
->oid
);
441 variant
= data
+ sizeof(*fixed_portion
);
442 if (working_tree_encoding_len
) {
443 memcpy(variant
, working_tree_encoding
, working_tree_encoding_len
);
444 variant
+= working_tree_encoding_len
;
446 memcpy(variant
, pc_item
->ce
->name
, name_len
);
448 packet_write(fd
, data
, len_data
);
453 static void send_batch(int fd
, size_t start
, size_t nr
)
456 sigchain_push(SIGPIPE
, SIG_IGN
);
457 for (i
= 0; i
< nr
; i
++)
458 send_one_item(fd
, ¶llel_checkout
.items
[start
+ i
]);
460 sigchain_pop(SIGPIPE
);
463 static struct pc_worker
*setup_workers(struct checkout
*state
, int num_workers
)
465 struct pc_worker
*workers
;
466 int i
, workers_with_one_extra_item
;
467 size_t base_batch_size
, batch_beginning
= 0;
469 ALLOC_ARRAY(workers
, num_workers
);
471 for (i
= 0; i
< num_workers
; i
++) {
472 struct child_process
*cp
= &workers
[i
].cp
;
474 child_process_init(cp
);
478 cp
->clean_on_exit
= 1;
479 strvec_push(&cp
->args
, "checkout--worker");
480 if (state
->base_dir_len
)
481 strvec_pushf(&cp
->args
, "--prefix=%s", state
->base_dir
);
482 if (start_command(cp
))
483 die("failed to spawn checkout worker");
486 base_batch_size
= parallel_checkout
.nr
/ num_workers
;
487 workers_with_one_extra_item
= parallel_checkout
.nr
% num_workers
;
489 for (i
= 0; i
< num_workers
; i
++) {
490 struct pc_worker
*worker
= &workers
[i
];
491 size_t batch_size
= base_batch_size
;
493 /* distribute the extra work evenly */
494 if (i
< workers_with_one_extra_item
)
497 send_batch(worker
->cp
.in
, batch_beginning
, batch_size
);
498 worker
->next_item_to_complete
= batch_beginning
;
499 worker
->nr_items_to_complete
= batch_size
;
501 batch_beginning
+= batch_size
;
507 static void finish_workers(struct pc_worker
*workers
, int num_workers
)
512 * Close pipes before calling finish_command() to let the workers
513 * exit asynchronously and avoid spending extra time on wait().
515 for (i
= 0; i
< num_workers
; i
++) {
516 struct child_process
*cp
= &workers
[i
].cp
;
523 for (i
= 0; i
< num_workers
; i
++) {
524 int rc
= finish_command(&workers
[i
].cp
);
527 * For a normal non-zero exit, the worker should have
528 * already printed something useful to stderr. But a
529 * death by signal should be mentioned to the user.
531 error("checkout worker %d died of signal %d", i
, rc
- 128);
538 static inline void assert_pc_item_result_size(int got
, int exp
)
541 BUG("wrong result size from checkout worker (got %dB, exp %dB)",
545 static void parse_and_save_result(const char *buffer
, int len
,
546 struct pc_worker
*worker
)
548 struct pc_item_result
*res
;
549 struct parallel_checkout_item
*pc_item
;
550 struct stat
*st
= NULL
;
552 if (len
< PC_ITEM_RESULT_BASE_SIZE
)
553 BUG("too short result from checkout worker (got %dB, exp >=%dB)",
554 len
, (int)PC_ITEM_RESULT_BASE_SIZE
);
556 res
= (struct pc_item_result
*)buffer
;
559 * Worker should send either the full result struct on success, or
560 * just the base (i.e. no stat data), otherwise.
562 if (res
->status
== PC_ITEM_WRITTEN
) {
563 assert_pc_item_result_size(len
, (int)sizeof(struct pc_item_result
));
566 assert_pc_item_result_size(len
, (int)PC_ITEM_RESULT_BASE_SIZE
);
569 if (!worker
->nr_items_to_complete
)
570 BUG("received result from supposedly finished checkout worker");
571 if (res
->id
!= worker
->next_item_to_complete
)
572 BUG("unexpected item id from checkout worker (got %"PRIuMAX
", exp %"PRIuMAX
")",
573 (uintmax_t)res
->id
, (uintmax_t)worker
->next_item_to_complete
);
575 worker
->next_item_to_complete
++;
576 worker
->nr_items_to_complete
--;
578 pc_item
= ¶llel_checkout
.items
[res
->id
];
579 pc_item
->status
= res
->status
;
583 if (res
->status
!= PC_ITEM_COLLIDED
)
584 advance_progress_meter();
587 static void gather_results_from_workers(struct pc_worker
*workers
,
590 int i
, active_workers
= num_workers
;
593 CALLOC_ARRAY(pfds
, num_workers
);
594 for (i
= 0; i
< num_workers
; i
++) {
595 pfds
[i
].fd
= workers
[i
].cp
.out
;
596 pfds
[i
].events
= POLLIN
;
599 while (active_workers
) {
600 int nr
= poll(pfds
, num_workers
, -1);
605 die_errno("failed to poll checkout workers");
608 for (i
= 0; i
< num_workers
&& nr
> 0; i
++) {
609 struct pc_worker
*worker
= &workers
[i
];
610 struct pollfd
*pfd
= &pfds
[i
];
615 if (pfd
->revents
& POLLIN
) {
616 int len
= packet_read(pfd
->fd
, packet_buffer
,
617 sizeof(packet_buffer
), 0);
620 BUG("packet_read() returned negative value");
625 parse_and_save_result(packet_buffer
,
628 } else if (pfd
->revents
& POLLHUP
) {
631 } else if (pfd
->revents
& (POLLNVAL
| POLLERR
)) {
632 die("error polling from checkout worker");
642 static void write_items_sequentially(struct checkout
*state
)
646 for (i
= 0; i
< parallel_checkout
.nr
; i
++) {
647 struct parallel_checkout_item
*pc_item
= ¶llel_checkout
.items
[i
];
648 write_pc_item(pc_item
, state
);
649 if (pc_item
->status
!= PC_ITEM_COLLIDED
)
650 advance_progress_meter();
654 int run_parallel_checkout(struct checkout
*state
, int num_workers
, int threshold
,
655 struct progress
*progress
, unsigned int *progress_cnt
)
659 if (parallel_checkout
.status
!= PC_ACCEPTING_ENTRIES
)
660 BUG("cannot run parallel checkout: uninitialized or already running");
662 parallel_checkout
.status
= PC_RUNNING
;
663 parallel_checkout
.progress
= progress
;
664 parallel_checkout
.progress_cnt
= progress_cnt
;
666 if (parallel_checkout
.nr
< num_workers
)
667 num_workers
= parallel_checkout
.nr
;
669 if (num_workers
<= 1 || parallel_checkout
.nr
< threshold
) {
670 write_items_sequentially(state
);
672 struct pc_worker
*workers
= setup_workers(state
, num_workers
);
673 gather_results_from_workers(workers
, num_workers
);
674 finish_workers(workers
, num_workers
);
677 ret
= handle_results(state
);
679 finish_parallel_checkout();