4 #include "parallel-checkout.h"
7 #include "run-command.h"
10 #include "thread-utils.h"
14 struct child_process cp
;
15 size_t next_item_to_complete
, nr_items_to_complete
;
18 struct parallel_checkout
{
19 enum pc_status status
;
20 struct parallel_checkout_item
*items
; /* The parallel checkout queue. */
22 struct progress
*progress
;
23 unsigned int *progress_cnt
;
26 static struct parallel_checkout parallel_checkout
;
28 enum pc_status
parallel_checkout_status(void)
30 return parallel_checkout
.status
;
33 static const int DEFAULT_THRESHOLD_FOR_PARALLELISM
= 100;
34 static const int DEFAULT_NUM_WORKERS
= 1;
36 void get_parallel_checkout_configs(int *num_workers
, int *threshold
)
38 char *env_workers
= getenv("GIT_TEST_CHECKOUT_WORKERS");
40 if (env_workers
&& *env_workers
) {
41 if (strtol_i(env_workers
, 10, num_workers
)) {
42 die("invalid value for GIT_TEST_CHECKOUT_WORKERS: '%s'",
46 *num_workers
= online_cpus();
52 if (git_config_get_int("checkout.workers", num_workers
))
53 *num_workers
= DEFAULT_NUM_WORKERS
;
54 else if (*num_workers
< 1)
55 *num_workers
= online_cpus();
57 if (git_config_get_int("checkout.thresholdForParallelism", threshold
))
58 *threshold
= DEFAULT_THRESHOLD_FOR_PARALLELISM
;
61 void init_parallel_checkout(void)
63 if (parallel_checkout
.status
!= PC_UNINITIALIZED
)
64 BUG("parallel checkout already initialized");
66 parallel_checkout
.status
= PC_ACCEPTING_ENTRIES
;
69 static void finish_parallel_checkout(void)
71 if (parallel_checkout
.status
== PC_UNINITIALIZED
)
72 BUG("cannot finish parallel checkout: not initialized yet");
74 free(parallel_checkout
.items
);
75 memset(¶llel_checkout
, 0, sizeof(parallel_checkout
));
78 static int is_eligible_for_parallel_checkout(const struct cache_entry
*ce
,
79 const struct conv_attrs
*ca
)
81 enum conv_attrs_classification c
;
82 size_t packed_item_size
;
85 * Symlinks cannot be checked out in parallel as, in case of path
86 * collision, they could racily replace leading directories of other
87 * entries being checked out. Submodules are checked out in child
88 * processes, which have their own parallel checkout queues.
90 if (!S_ISREG(ce
->ce_mode
))
93 packed_item_size
= sizeof(struct pc_item_fixed_portion
) + ce
->ce_namelen
+
94 (ca
->working_tree_encoding
? strlen(ca
->working_tree_encoding
) : 0);
97 * The amount of data we send to the workers per checkout item is
98 * typically small (75~300B). So unless we find an insanely huge path
99 * of 64KB, we should never reach the 65KB limit of one pkt-line. If
100 * that does happen, we let the sequential code handle the item.
102 if (packed_item_size
> LARGE_PACKET_DATA_MAX
)
105 c
= classify_conv_attrs(ca
);
107 case CA_CLASS_INCORE
:
110 case CA_CLASS_INCORE_FILTER
:
112 * It would be safe to allow concurrent instances of
113 * single-file smudge filters, like rot13, but we should not
114 * assume that all filters are parallel-process safe. So we
119 case CA_CLASS_INCORE_PROCESS
:
121 * The parallel queue and the delayed queue are not compatible,
122 * so they must be kept completely separated. And we can't tell
123 * if a long-running process will delay its response without
124 * actually asking it to perform the filtering. Therefore, this
125 * type of filter is not allowed in parallel checkout.
127 * Furthermore, there should only be one instance of the
128 * long-running process filter as we don't know how it is
129 * managing its own concurrency. So, spreading the entries that
130 * requisite such a filter among the parallel workers would
131 * require a lot more inter-process communication. We would
132 * probably have to designate a single process to interact with
133 * the filter and send all the necessary data to it, for each
138 case CA_CLASS_STREAMABLE
:
142 BUG("unsupported conv_attrs classification '%d'", c
);
146 int enqueue_checkout(struct cache_entry
*ce
, struct conv_attrs
*ca
)
148 struct parallel_checkout_item
*pc_item
;
150 if (parallel_checkout
.status
!= PC_ACCEPTING_ENTRIES
||
151 !is_eligible_for_parallel_checkout(ce
, ca
))
154 ALLOC_GROW(parallel_checkout
.items
, parallel_checkout
.nr
+ 1,
155 parallel_checkout
.alloc
);
157 pc_item
= ¶llel_checkout
.items
[parallel_checkout
.nr
];
159 memcpy(&pc_item
->ca
, ca
, sizeof(pc_item
->ca
));
160 pc_item
->status
= PC_ITEM_PENDING
;
161 pc_item
->id
= parallel_checkout
.nr
;
162 parallel_checkout
.nr
++;
167 size_t pc_queue_size(void)
169 return parallel_checkout
.nr
;
172 static void advance_progress_meter(void)
174 if (parallel_checkout
.progress
) {
175 (*parallel_checkout
.progress_cnt
)++;
176 display_progress(parallel_checkout
.progress
,
177 *parallel_checkout
.progress_cnt
);
181 static int handle_results(struct checkout
*state
)
185 int have_pending
= 0;
188 * We first update the successfully written entries with the collected
189 * stat() data, so that they can be found by mark_colliding_entries(),
190 * in the next loop, when necessary.
192 for (i
= 0; i
< parallel_checkout
.nr
; i
++) {
193 struct parallel_checkout_item
*pc_item
= ¶llel_checkout
.items
[i
];
194 if (pc_item
->status
== PC_ITEM_WRITTEN
)
195 update_ce_after_write(state
, pc_item
->ce
, &pc_item
->st
);
198 for (i
= 0; i
< parallel_checkout
.nr
; i
++) {
199 struct parallel_checkout_item
*pc_item
= ¶llel_checkout
.items
[i
];
201 switch(pc_item
->status
) {
202 case PC_ITEM_WRITTEN
:
203 /* Already handled */
205 case PC_ITEM_COLLIDED
:
207 * The entry could not be checked out due to a path
208 * collision with another entry. Since there can only
209 * be one entry of each colliding group on the disk, we
210 * could skip trying to check out this one and move on.
211 * However, this would leave the unwritten entries with
212 * null stat() fields on the index, which could
213 * potentially slow down subsequent operations that
214 * require refreshing it: git would not be able to
215 * trust st_size and would have to go to the filesystem
216 * to see if the contents match (see ie_modified()).
218 * Instead, let's pay the overhead only once, now, and
219 * call checkout_entry_ca() again for this file, to
220 * have its stat() data stored in the index. This also
221 * has the benefit of adding this entry and its
222 * colliding pair to the collision report message.
223 * Additionally, this overwriting behavior is consistent
224 * with what the sequential checkout does, so it doesn't
225 * add any extra overhead.
227 ret
|= checkout_entry_ca(pc_item
->ce
, &pc_item
->ca
,
229 advance_progress_meter();
231 case PC_ITEM_PENDING
:
238 BUG("unknown checkout item status in parallel checkout");
243 error("parallel checkout finished with pending entries");
248 static int reset_fd(int fd
, const char *path
)
250 if (lseek(fd
, 0, SEEK_SET
) != 0)
251 return error_errno("failed to rewind descriptor of '%s'", path
);
252 if (ftruncate(fd
, 0))
253 return error_errno("failed to truncate file '%s'", path
);
257 static int write_pc_item_to_fd(struct parallel_checkout_item
*pc_item
, int fd
,
261 struct stream_filter
*filter
;
262 struct strbuf buf
= STRBUF_INIT
;
268 assert(is_eligible_for_parallel_checkout(pc_item
->ce
, &pc_item
->ca
));
270 filter
= get_stream_filter_ca(&pc_item
->ca
, &pc_item
->ce
->oid
);
272 if (stream_blob_to_fd(fd
, &pc_item
->ce
->oid
, filter
, 1)) {
273 /* On error, reset fd to try writing without streaming */
274 if (reset_fd(fd
, path
))
281 blob
= read_blob_entry(pc_item
->ce
, &size
);
283 return error("cannot read object %s '%s'",
284 oid_to_hex(&pc_item
->ce
->oid
), pc_item
->ce
->name
);
287 * checkout metadata is used to give context for external process
288 * filters. Files requiring such filters are not eligible for parallel
289 * checkout, so pass NULL. Note: if that changes, the metadata must also
290 * be passed from the main process to the workers.
292 ret
= convert_to_working_tree_ca(&pc_item
->ca
, pc_item
->ce
->name
,
293 blob
, size
, &buf
, NULL
);
298 blob
= strbuf_detach(&buf
, &newsize
);
302 wrote
= write_in_full(fd
, blob
, size
);
305 return error("unable to write file '%s'", path
);
310 static int close_and_clear(int *fd
)
322 void write_pc_item(struct parallel_checkout_item
*pc_item
,
323 struct checkout
*state
)
325 unsigned int mode
= (pc_item
->ce
->ce_mode
& 0100) ? 0777 : 0666;
326 int fd
= -1, fstat_done
= 0;
327 struct strbuf path
= STRBUF_INIT
;
330 strbuf_add(&path
, state
->base_dir
, state
->base_dir_len
);
331 strbuf_add(&path
, pc_item
->ce
->name
, pc_item
->ce
->ce_namelen
);
333 dir_sep
= find_last_dir_sep(path
.buf
);
336 * The leading dirs should have been already created by now. But, in
337 * case of path collisions, one of the dirs could have been replaced by
338 * a symlink (checked out after we enqueued this entry for parallel
339 * checkout). Thus, we must check the leading dirs again.
341 if (dir_sep
&& !has_dirs_only_path(path
.buf
, dir_sep
- path
.buf
,
342 state
->base_dir_len
)) {
343 pc_item
->status
= PC_ITEM_COLLIDED
;
344 trace2_data_string("pcheckout", NULL
, "collision/dirname", path
.buf
);
348 fd
= open(path
.buf
, O_WRONLY
| O_CREAT
| O_EXCL
, mode
);
351 if (errno
== EEXIST
|| errno
== EISDIR
) {
353 * Errors which probably represent a path collision.
354 * Suppress the error message and mark the item to be
355 * retried later, sequentially. ENOTDIR and ENOENT are
356 * also interesting, but the above has_dirs_only_path()
357 * call should have already caught these cases.
359 pc_item
->status
= PC_ITEM_COLLIDED
;
360 trace2_data_string("pcheckout", NULL
,
361 "collision/basename", path
.buf
);
363 error_errno("failed to open file '%s'", path
.buf
);
364 pc_item
->status
= PC_ITEM_FAILED
;
369 if (write_pc_item_to_fd(pc_item
, fd
, path
.buf
)) {
370 /* Error was already reported. */
371 pc_item
->status
= PC_ITEM_FAILED
;
372 close_and_clear(&fd
);
377 fstat_done
= fstat_checkout_output(fd
, state
, &pc_item
->st
);
379 if (close_and_clear(&fd
)) {
380 error_errno("unable to close file '%s'", path
.buf
);
381 pc_item
->status
= PC_ITEM_FAILED
;
385 if (state
->refresh_cache
&& !fstat_done
&& lstat(path
.buf
, &pc_item
->st
) < 0) {
386 error_errno("unable to stat just-written file '%s'", path
.buf
);
387 pc_item
->status
= PC_ITEM_FAILED
;
391 pc_item
->status
= PC_ITEM_WRITTEN
;
394 strbuf_release(&path
);
397 static void send_one_item(int fd
, struct parallel_checkout_item
*pc_item
)
400 char *data
, *variant
;
401 struct pc_item_fixed_portion
*fixed_portion
;
402 const char *working_tree_encoding
= pc_item
->ca
.working_tree_encoding
;
403 size_t name_len
= pc_item
->ce
->ce_namelen
;
404 size_t working_tree_encoding_len
= working_tree_encoding
?
405 strlen(working_tree_encoding
) : 0;
408 * Any changes in the calculation of the message size must also be made
409 * in is_eligible_for_parallel_checkout().
411 len_data
= sizeof(struct pc_item_fixed_portion
) + name_len
+
412 working_tree_encoding_len
;
414 data
= xmalloc(len_data
);
416 fixed_portion
= (struct pc_item_fixed_portion
*)data
;
417 fixed_portion
->id
= pc_item
->id
;
418 fixed_portion
->ce_mode
= pc_item
->ce
->ce_mode
;
419 fixed_portion
->crlf_action
= pc_item
->ca
.crlf_action
;
420 fixed_portion
->ident
= pc_item
->ca
.ident
;
421 fixed_portion
->name_len
= name_len
;
422 fixed_portion
->working_tree_encoding_len
= working_tree_encoding_len
;
424 * We pad the unused bytes in the hash array because, otherwise,
425 * Valgrind would complain about passing uninitialized bytes to a
426 * write() syscall. The warning doesn't represent any real risk here,
427 * but it could hinder the detection of actual errors.
429 oidcpy_with_padding(&fixed_portion
->oid
, &pc_item
->ce
->oid
);
431 variant
= data
+ sizeof(*fixed_portion
);
432 if (working_tree_encoding_len
) {
433 memcpy(variant
, working_tree_encoding
, working_tree_encoding_len
);
434 variant
+= working_tree_encoding_len
;
436 memcpy(variant
, pc_item
->ce
->name
, name_len
);
438 packet_write(fd
, data
, len_data
);
443 static void send_batch(int fd
, size_t start
, size_t nr
)
446 sigchain_push(SIGPIPE
, SIG_IGN
);
447 for (i
= 0; i
< nr
; i
++)
448 send_one_item(fd
, ¶llel_checkout
.items
[start
+ i
]);
450 sigchain_pop(SIGPIPE
);
453 static struct pc_worker
*setup_workers(struct checkout
*state
, int num_workers
)
455 struct pc_worker
*workers
;
456 int i
, workers_with_one_extra_item
;
457 size_t base_batch_size
, batch_beginning
= 0;
459 ALLOC_ARRAY(workers
, num_workers
);
461 for (i
= 0; i
< num_workers
; i
++) {
462 struct child_process
*cp
= &workers
[i
].cp
;
464 child_process_init(cp
);
468 cp
->clean_on_exit
= 1;
469 strvec_push(&cp
->args
, "checkout--worker");
470 if (state
->base_dir_len
)
471 strvec_pushf(&cp
->args
, "--prefix=%s", state
->base_dir
);
472 if (start_command(cp
))
473 die("failed to spawn checkout worker");
476 base_batch_size
= parallel_checkout
.nr
/ num_workers
;
477 workers_with_one_extra_item
= parallel_checkout
.nr
% num_workers
;
479 for (i
= 0; i
< num_workers
; i
++) {
480 struct pc_worker
*worker
= &workers
[i
];
481 size_t batch_size
= base_batch_size
;
483 /* distribute the extra work evenly */
484 if (i
< workers_with_one_extra_item
)
487 send_batch(worker
->cp
.in
, batch_beginning
, batch_size
);
488 worker
->next_item_to_complete
= batch_beginning
;
489 worker
->nr_items_to_complete
= batch_size
;
491 batch_beginning
+= batch_size
;
497 static void finish_workers(struct pc_worker
*workers
, int num_workers
)
502 * Close pipes before calling finish_command() to let the workers
503 * exit asynchronously and avoid spending extra time on wait().
505 for (i
= 0; i
< num_workers
; i
++) {
506 struct child_process
*cp
= &workers
[i
].cp
;
513 for (i
= 0; i
< num_workers
; i
++) {
514 int rc
= finish_command(&workers
[i
].cp
);
517 * For a normal non-zero exit, the worker should have
518 * already printed something useful to stderr. But a
519 * death by signal should be mentioned to the user.
521 error("checkout worker %d died of signal %d", i
, rc
- 128);
528 static inline void assert_pc_item_result_size(int got
, int exp
)
531 BUG("wrong result size from checkout worker (got %dB, exp %dB)",
535 static void parse_and_save_result(const char *buffer
, int len
,
536 struct pc_worker
*worker
)
538 struct pc_item_result
*res
;
539 struct parallel_checkout_item
*pc_item
;
540 struct stat
*st
= NULL
;
542 if (len
< PC_ITEM_RESULT_BASE_SIZE
)
543 BUG("too short result from checkout worker (got %dB, exp >=%dB)",
544 len
, (int)PC_ITEM_RESULT_BASE_SIZE
);
546 res
= (struct pc_item_result
*)buffer
;
549 * Worker should send either the full result struct on success, or
550 * just the base (i.e. no stat data), otherwise.
552 if (res
->status
== PC_ITEM_WRITTEN
) {
553 assert_pc_item_result_size(len
, (int)sizeof(struct pc_item_result
));
556 assert_pc_item_result_size(len
, (int)PC_ITEM_RESULT_BASE_SIZE
);
559 if (!worker
->nr_items_to_complete
)
560 BUG("received result from supposedly finished checkout worker");
561 if (res
->id
!= worker
->next_item_to_complete
)
562 BUG("unexpected item id from checkout worker (got %"PRIuMAX
", exp %"PRIuMAX
")",
563 (uintmax_t)res
->id
, (uintmax_t)worker
->next_item_to_complete
);
565 worker
->next_item_to_complete
++;
566 worker
->nr_items_to_complete
--;
568 pc_item
= ¶llel_checkout
.items
[res
->id
];
569 pc_item
->status
= res
->status
;
573 if (res
->status
!= PC_ITEM_COLLIDED
)
574 advance_progress_meter();
577 static void gather_results_from_workers(struct pc_worker
*workers
,
580 int i
, active_workers
= num_workers
;
583 CALLOC_ARRAY(pfds
, num_workers
);
584 for (i
= 0; i
< num_workers
; i
++) {
585 pfds
[i
].fd
= workers
[i
].cp
.out
;
586 pfds
[i
].events
= POLLIN
;
589 while (active_workers
) {
590 int nr
= poll(pfds
, num_workers
, -1);
595 die_errno("failed to poll checkout workers");
598 for (i
= 0; i
< num_workers
&& nr
> 0; i
++) {
599 struct pc_worker
*worker
= &workers
[i
];
600 struct pollfd
*pfd
= &pfds
[i
];
605 if (pfd
->revents
& POLLIN
) {
606 int len
= packet_read(pfd
->fd
, packet_buffer
,
607 sizeof(packet_buffer
), 0);
610 BUG("packet_read() returned negative value");
615 parse_and_save_result(packet_buffer
,
618 } else if (pfd
->revents
& POLLHUP
) {
621 } else if (pfd
->revents
& (POLLNVAL
| POLLERR
)) {
622 die("error polling from checkout worker");
632 static void write_items_sequentially(struct checkout
*state
)
636 for (i
= 0; i
< parallel_checkout
.nr
; i
++) {
637 struct parallel_checkout_item
*pc_item
= ¶llel_checkout
.items
[i
];
638 write_pc_item(pc_item
, state
);
639 if (pc_item
->status
!= PC_ITEM_COLLIDED
)
640 advance_progress_meter();
644 int run_parallel_checkout(struct checkout
*state
, int num_workers
, int threshold
,
645 struct progress
*progress
, unsigned int *progress_cnt
)
649 if (parallel_checkout
.status
!= PC_ACCEPTING_ENTRIES
)
650 BUG("cannot run parallel checkout: uninitialized or already running");
652 parallel_checkout
.status
= PC_RUNNING
;
653 parallel_checkout
.progress
= progress
;
654 parallel_checkout
.progress_cnt
= progress_cnt
;
656 if (parallel_checkout
.nr
< num_workers
)
657 num_workers
= parallel_checkout
.nr
;
659 if (num_workers
<= 1 || parallel_checkout
.nr
< threshold
) {
660 write_items_sequentially(state
);
662 struct pc_worker
*workers
= setup_workers(state
, num_workers
);
663 gather_results_from_workers(workers
, num_workers
);
664 finish_workers(workers
, num_workers
);
667 ret
= handle_results(state
);
669 finish_parallel_checkout();