t7700: have closing quote of a test at the beginning of line
[alt-git.git] / fetch-pack.c
blobadc1b68dd3ae87e6ff7501e6f3f3760c7b923d5e
1 #include "cache.h"
2 #include "repository.h"
3 #include "config.h"
4 #include "lockfile.h"
5 #include "refs.h"
6 #include "pkt-line.h"
7 #include "commit.h"
8 #include "tag.h"
9 #include "exec_cmd.h"
10 #include "pack.h"
11 #include "sideband.h"
12 #include "fetch-pack.h"
13 #include "remote.h"
14 #include "run-command.h"
15 #include "connect.h"
16 #include "transport.h"
17 #include "version.h"
18 #include "prio-queue.h"
19 #include "sha1-array.h"
20 #include "oidset.h"
21 #include "packfile.h"
23 static int transfer_unpack_limit = -1;
24 static int fetch_unpack_limit = -1;
25 static int unpack_limit = 100;
26 static int prefer_ofs_delta = 1;
27 static int no_done;
28 static int deepen_since_ok;
29 static int deepen_not_ok;
30 static int fetch_fsck_objects = -1;
31 static int transfer_fsck_objects = -1;
32 static int agent_supported;
33 static int server_supports_filtering;
34 static struct lock_file shallow_lock;
35 static const char *alternate_shallow_file;
37 /* Remember to update object flag allocation in object.h */
38 #define COMPLETE (1U << 0)
39 #define COMMON (1U << 1)
40 #define COMMON_REF (1U << 2)
41 #define SEEN (1U << 3)
42 #define POPPED (1U << 4)
43 #define ALTERNATE (1U << 5)
45 static int marked;
48 * After sending this many "have"s if we do not get any new ACK , we
49 * give up traversing our history.
51 #define MAX_IN_VAIN 256
53 static struct prio_queue rev_list = { compare_commits_by_commit_date };
54 static int non_common_revs, multi_ack, use_sideband;
55 /* Allow specifying sha1 if it is a ref tip. */
56 #define ALLOW_TIP_SHA1 01
57 /* Allow request of a sha1 if it is reachable from a ref (possibly hidden ref). */
58 #define ALLOW_REACHABLE_SHA1 02
59 static unsigned int allow_unadvertised_object_request;
61 __attribute__((format (printf, 2, 3)))
62 static inline void print_verbose(const struct fetch_pack_args *args,
63 const char *fmt, ...)
65 va_list params;
67 if (!args->verbose)
68 return;
70 va_start(params, fmt);
71 vfprintf(stderr, fmt, params);
72 va_end(params);
73 fputc('\n', stderr);
76 struct alternate_object_cache {
77 struct object **items;
78 size_t nr, alloc;
81 static void cache_one_alternate(const char *refname,
82 const struct object_id *oid,
83 void *vcache)
85 struct alternate_object_cache *cache = vcache;
86 struct object *obj = parse_object(oid);
88 if (!obj || (obj->flags & ALTERNATE))
89 return;
91 obj->flags |= ALTERNATE;
92 ALLOC_GROW(cache->items, cache->nr + 1, cache->alloc);
93 cache->items[cache->nr++] = obj;
96 static void for_each_cached_alternate(void (*cb)(struct object *))
98 static int initialized;
99 static struct alternate_object_cache cache;
100 size_t i;
102 if (!initialized) {
103 for_each_alternate_ref(cache_one_alternate, &cache);
104 initialized = 1;
107 for (i = 0; i < cache.nr; i++)
108 cb(cache.items[i]);
111 static void rev_list_push(struct commit *commit, int mark)
113 if (!(commit->object.flags & mark)) {
114 commit->object.flags |= mark;
116 if (parse_commit(commit))
117 return;
119 prio_queue_put(&rev_list, commit);
121 if (!(commit->object.flags & COMMON))
122 non_common_revs++;
126 static int rev_list_insert_ref(const char *refname, const struct object_id *oid)
128 struct object *o = deref_tag(parse_object(oid), refname, 0);
130 if (o && o->type == OBJ_COMMIT)
131 rev_list_push((struct commit *)o, SEEN);
133 return 0;
136 static int rev_list_insert_ref_oid(const char *refname, const struct object_id *oid,
137 int flag, void *cb_data)
139 return rev_list_insert_ref(refname, oid);
142 static int clear_marks(const char *refname, const struct object_id *oid,
143 int flag, void *cb_data)
145 struct object *o = deref_tag(parse_object(oid), refname, 0);
147 if (o && o->type == OBJ_COMMIT)
148 clear_commit_marks((struct commit *)o,
149 COMMON | COMMON_REF | SEEN | POPPED);
150 return 0;
154 This function marks a rev and its ancestors as common.
155 In some cases, it is desirable to mark only the ancestors (for example
156 when only the server does not yet know that they are common).
159 static void mark_common(struct commit *commit,
160 int ancestors_only, int dont_parse)
162 if (commit != NULL && !(commit->object.flags & COMMON)) {
163 struct object *o = (struct object *)commit;
165 if (!ancestors_only)
166 o->flags |= COMMON;
168 if (!(o->flags & SEEN))
169 rev_list_push(commit, SEEN);
170 else {
171 struct commit_list *parents;
173 if (!ancestors_only && !(o->flags & POPPED))
174 non_common_revs--;
175 if (!o->parsed && !dont_parse)
176 if (parse_commit(commit))
177 return;
179 for (parents = commit->parents;
180 parents;
181 parents = parents->next)
182 mark_common(parents->item, 0, dont_parse);
188 Get the next rev to send, ignoring the common.
191 static const struct object_id *get_rev(void)
193 struct commit *commit = NULL;
195 while (commit == NULL) {
196 unsigned int mark;
197 struct commit_list *parents;
199 if (rev_list.nr == 0 || non_common_revs == 0)
200 return NULL;
202 commit = prio_queue_get(&rev_list);
203 parse_commit(commit);
204 parents = commit->parents;
206 commit->object.flags |= POPPED;
207 if (!(commit->object.flags & COMMON))
208 non_common_revs--;
210 if (commit->object.flags & COMMON) {
211 /* do not send "have", and ignore ancestors */
212 commit = NULL;
213 mark = COMMON | SEEN;
214 } else if (commit->object.flags & COMMON_REF)
215 /* send "have", and ignore ancestors */
216 mark = COMMON | SEEN;
217 else
218 /* send "have", also for its ancestors */
219 mark = SEEN;
221 while (parents) {
222 if (!(parents->item->object.flags & SEEN))
223 rev_list_push(parents->item, mark);
224 if (mark & COMMON)
225 mark_common(parents->item, 1, 0);
226 parents = parents->next;
230 return &commit->object.oid;
233 enum ack_type {
234 NAK = 0,
235 ACK,
236 ACK_continue,
237 ACK_common,
238 ACK_ready
241 static void consume_shallow_list(struct fetch_pack_args *args, int fd)
243 if (args->stateless_rpc && args->deepen) {
244 /* If we sent a depth we will get back "duplicate"
245 * shallow and unshallow commands every time there
246 * is a block of have lines exchanged.
248 char *line;
249 while ((line = packet_read_line(fd, NULL))) {
250 if (starts_with(line, "shallow "))
251 continue;
252 if (starts_with(line, "unshallow "))
253 continue;
254 die(_("git fetch-pack: expected shallow list"));
259 static enum ack_type get_ack(int fd, struct object_id *result_oid)
261 int len;
262 char *line = packet_read_line(fd, &len);
263 const char *arg;
265 if (!line)
266 die(_("git fetch-pack: expected ACK/NAK, got a flush packet"));
267 if (!strcmp(line, "NAK"))
268 return NAK;
269 if (skip_prefix(line, "ACK ", &arg)) {
270 if (!get_oid_hex(arg, result_oid)) {
271 arg += 40;
272 len -= arg - line;
273 if (len < 1)
274 return ACK;
275 if (strstr(arg, "continue"))
276 return ACK_continue;
277 if (strstr(arg, "common"))
278 return ACK_common;
279 if (strstr(arg, "ready"))
280 return ACK_ready;
281 return ACK;
284 if (skip_prefix(line, "ERR ", &arg))
285 die(_("remote error: %s"), arg);
286 die(_("git fetch-pack: expected ACK/NAK, got '%s'"), line);
289 static void send_request(struct fetch_pack_args *args,
290 int fd, struct strbuf *buf)
292 if (args->stateless_rpc) {
293 send_sideband(fd, -1, buf->buf, buf->len, LARGE_PACKET_MAX);
294 packet_flush(fd);
295 } else
296 write_or_die(fd, buf->buf, buf->len);
299 static void insert_one_alternate_object(struct object *obj)
301 rev_list_insert_ref(NULL, &obj->oid);
304 #define INITIAL_FLUSH 16
305 #define PIPESAFE_FLUSH 32
306 #define LARGE_FLUSH 16384
308 static int next_flush(struct fetch_pack_args *args, int count)
310 if (args->stateless_rpc) {
311 if (count < LARGE_FLUSH)
312 count <<= 1;
313 else
314 count = count * 11 / 10;
315 } else {
316 if (count < PIPESAFE_FLUSH)
317 count <<= 1;
318 else
319 count += PIPESAFE_FLUSH;
321 return count;
324 static int find_common(struct fetch_pack_args *args,
325 int fd[2], struct object_id *result_oid,
326 struct ref *refs)
328 int fetching;
329 int count = 0, flushes = 0, flush_at = INITIAL_FLUSH, retval;
330 const struct object_id *oid;
331 unsigned in_vain = 0;
332 int got_continue = 0;
333 int got_ready = 0;
334 struct strbuf req_buf = STRBUF_INIT;
335 size_t state_len = 0;
337 if (args->stateless_rpc && multi_ack == 1)
338 die(_("--stateless-rpc requires multi_ack_detailed"));
339 if (marked)
340 for_each_ref(clear_marks, NULL);
341 marked = 1;
343 for_each_ref(rev_list_insert_ref_oid, NULL);
344 for_each_cached_alternate(insert_one_alternate_object);
346 fetching = 0;
347 for ( ; refs ; refs = refs->next) {
348 struct object_id *remote = &refs->old_oid;
349 const char *remote_hex;
350 struct object *o;
353 * If that object is complete (i.e. it is an ancestor of a
354 * local ref), we tell them we have it but do not have to
355 * tell them about its ancestors, which they already know
356 * about.
358 * We use lookup_object here because we are only
359 * interested in the case we *know* the object is
360 * reachable and we have already scanned it.
362 if (((o = lookup_object(remote->hash)) != NULL) &&
363 (o->flags & COMPLETE)) {
364 continue;
367 remote_hex = oid_to_hex(remote);
368 if (!fetching) {
369 struct strbuf c = STRBUF_INIT;
370 if (multi_ack == 2) strbuf_addstr(&c, " multi_ack_detailed");
371 if (multi_ack == 1) strbuf_addstr(&c, " multi_ack");
372 if (no_done) strbuf_addstr(&c, " no-done");
373 if (use_sideband == 2) strbuf_addstr(&c, " side-band-64k");
374 if (use_sideband == 1) strbuf_addstr(&c, " side-band");
375 if (args->deepen_relative) strbuf_addstr(&c, " deepen-relative");
376 if (args->use_thin_pack) strbuf_addstr(&c, " thin-pack");
377 if (args->no_progress) strbuf_addstr(&c, " no-progress");
378 if (args->include_tag) strbuf_addstr(&c, " include-tag");
379 if (prefer_ofs_delta) strbuf_addstr(&c, " ofs-delta");
380 if (deepen_since_ok) strbuf_addstr(&c, " deepen-since");
381 if (deepen_not_ok) strbuf_addstr(&c, " deepen-not");
382 if (agent_supported) strbuf_addf(&c, " agent=%s",
383 git_user_agent_sanitized());
384 if (args->filter_options.choice)
385 strbuf_addstr(&c, " filter");
386 packet_buf_write(&req_buf, "want %s%s\n", remote_hex, c.buf);
387 strbuf_release(&c);
388 } else
389 packet_buf_write(&req_buf, "want %s\n", remote_hex);
390 fetching++;
393 if (!fetching) {
394 strbuf_release(&req_buf);
395 packet_flush(fd[1]);
396 return 1;
399 if (is_repository_shallow())
400 write_shallow_commits(&req_buf, 1, NULL);
401 if (args->depth > 0)
402 packet_buf_write(&req_buf, "deepen %d", args->depth);
403 if (args->deepen_since) {
404 timestamp_t max_age = approxidate(args->deepen_since);
405 packet_buf_write(&req_buf, "deepen-since %"PRItime, max_age);
407 if (args->deepen_not) {
408 int i;
409 for (i = 0; i < args->deepen_not->nr; i++) {
410 struct string_list_item *s = args->deepen_not->items + i;
411 packet_buf_write(&req_buf, "deepen-not %s", s->string);
414 if (server_supports_filtering && args->filter_options.choice)
415 packet_buf_write(&req_buf, "filter %s",
416 args->filter_options.filter_spec);
417 packet_buf_flush(&req_buf);
418 state_len = req_buf.len;
420 if (args->deepen) {
421 char *line;
422 const char *arg;
423 struct object_id oid;
425 send_request(args, fd[1], &req_buf);
426 while ((line = packet_read_line(fd[0], NULL))) {
427 if (skip_prefix(line, "shallow ", &arg)) {
428 if (get_oid_hex(arg, &oid))
429 die(_("invalid shallow line: %s"), line);
430 register_shallow(&oid);
431 continue;
433 if (skip_prefix(line, "unshallow ", &arg)) {
434 if (get_oid_hex(arg, &oid))
435 die(_("invalid unshallow line: %s"), line);
436 if (!lookup_object(oid.hash))
437 die(_("object not found: %s"), line);
438 /* make sure that it is parsed as shallow */
439 if (!parse_object(&oid))
440 die(_("error in object: %s"), line);
441 if (unregister_shallow(&oid))
442 die(_("no shallow found: %s"), line);
443 continue;
445 die(_("expected shallow/unshallow, got %s"), line);
447 } else if (!args->stateless_rpc)
448 send_request(args, fd[1], &req_buf);
450 if (!args->stateless_rpc) {
451 /* If we aren't using the stateless-rpc interface
452 * we don't need to retain the headers.
454 strbuf_setlen(&req_buf, 0);
455 state_len = 0;
458 flushes = 0;
459 retval = -1;
460 if (args->no_dependents)
461 goto done;
462 while ((oid = get_rev())) {
463 packet_buf_write(&req_buf, "have %s\n", oid_to_hex(oid));
464 print_verbose(args, "have %s", oid_to_hex(oid));
465 in_vain++;
466 if (flush_at <= ++count) {
467 int ack;
469 packet_buf_flush(&req_buf);
470 send_request(args, fd[1], &req_buf);
471 strbuf_setlen(&req_buf, state_len);
472 flushes++;
473 flush_at = next_flush(args, count);
476 * We keep one window "ahead" of the other side, and
477 * will wait for an ACK only on the next one
479 if (!args->stateless_rpc && count == INITIAL_FLUSH)
480 continue;
482 consume_shallow_list(args, fd[0]);
483 do {
484 ack = get_ack(fd[0], result_oid);
485 if (ack)
486 print_verbose(args, _("got %s %d %s"), "ack",
487 ack, oid_to_hex(result_oid));
488 switch (ack) {
489 case ACK:
490 flushes = 0;
491 multi_ack = 0;
492 retval = 0;
493 goto done;
494 case ACK_common:
495 case ACK_ready:
496 case ACK_continue: {
497 struct commit *commit =
498 lookup_commit(result_oid);
499 if (!commit)
500 die(_("invalid commit %s"), oid_to_hex(result_oid));
501 if (args->stateless_rpc
502 && ack == ACK_common
503 && !(commit->object.flags & COMMON)) {
504 /* We need to replay the have for this object
505 * on the next RPC request so the peer knows
506 * it is in common with us.
508 const char *hex = oid_to_hex(result_oid);
509 packet_buf_write(&req_buf, "have %s\n", hex);
510 state_len = req_buf.len;
512 * Reset in_vain because an ack
513 * for this commit has not been
514 * seen.
516 in_vain = 0;
517 } else if (!args->stateless_rpc
518 || ack != ACK_common)
519 in_vain = 0;
520 mark_common(commit, 0, 1);
521 retval = 0;
522 got_continue = 1;
523 if (ack == ACK_ready) {
524 clear_prio_queue(&rev_list);
525 got_ready = 1;
527 break;
530 } while (ack);
531 flushes--;
532 if (got_continue && MAX_IN_VAIN < in_vain) {
533 print_verbose(args, _("giving up"));
534 break; /* give up */
538 done:
539 if (!got_ready || !no_done) {
540 packet_buf_write(&req_buf, "done\n");
541 send_request(args, fd[1], &req_buf);
543 print_verbose(args, _("done"));
544 if (retval != 0) {
545 multi_ack = 0;
546 flushes++;
548 strbuf_release(&req_buf);
550 if (!got_ready || !no_done)
551 consume_shallow_list(args, fd[0]);
552 while (flushes || multi_ack) {
553 int ack = get_ack(fd[0], result_oid);
554 if (ack) {
555 print_verbose(args, _("got %s (%d) %s"), "ack",
556 ack, oid_to_hex(result_oid));
557 if (ack == ACK)
558 return 0;
559 multi_ack = 1;
560 continue;
562 flushes--;
564 /* it is no error to fetch into a completely empty repo */
565 return count ? retval : 0;
568 static struct commit_list *complete;
570 static int mark_complete(const struct object_id *oid)
572 struct object *o = parse_object(oid);
574 while (o && o->type == OBJ_TAG) {
575 struct tag *t = (struct tag *) o;
576 if (!t->tagged)
577 break; /* broken repository */
578 o->flags |= COMPLETE;
579 o = parse_object(&t->tagged->oid);
581 if (o && o->type == OBJ_COMMIT) {
582 struct commit *commit = (struct commit *)o;
583 if (!(commit->object.flags & COMPLETE)) {
584 commit->object.flags |= COMPLETE;
585 commit_list_insert(commit, &complete);
588 return 0;
591 static int mark_complete_oid(const char *refname, const struct object_id *oid,
592 int flag, void *cb_data)
594 return mark_complete(oid);
597 static void mark_recent_complete_commits(struct fetch_pack_args *args,
598 timestamp_t cutoff)
600 while (complete && cutoff <= complete->item->date) {
601 print_verbose(args, _("Marking %s as complete"),
602 oid_to_hex(&complete->item->object.oid));
603 pop_most_recent_commit(&complete, COMPLETE);
607 static void add_refs_to_oidset(struct oidset *oids, struct ref *refs)
609 for (; refs; refs = refs->next)
610 oidset_insert(oids, &refs->old_oid);
613 static int tip_oids_contain(struct oidset *tip_oids,
614 struct ref *unmatched, struct ref *newlist,
615 const struct object_id *id)
618 * Note that this only looks at the ref lists the first time it's
619 * called. This works out in filter_refs() because even though it may
620 * add to "newlist" between calls, the additions will always be for
621 * oids that are already in the set.
623 if (!tip_oids->map.map.tablesize) {
624 add_refs_to_oidset(tip_oids, unmatched);
625 add_refs_to_oidset(tip_oids, newlist);
627 return oidset_contains(tip_oids, id);
630 static void filter_refs(struct fetch_pack_args *args,
631 struct ref **refs,
632 struct ref **sought, int nr_sought)
634 struct ref *newlist = NULL;
635 struct ref **newtail = &newlist;
636 struct ref *unmatched = NULL;
637 struct ref *ref, *next;
638 struct oidset tip_oids = OIDSET_INIT;
639 int i;
641 i = 0;
642 for (ref = *refs; ref; ref = next) {
643 int keep = 0;
644 next = ref->next;
646 if (starts_with(ref->name, "refs/") &&
647 check_refname_format(ref->name, 0))
648 ; /* trash */
649 else {
650 while (i < nr_sought) {
651 int cmp = strcmp(ref->name, sought[i]->name);
652 if (cmp < 0)
653 break; /* definitely do not have it */
654 else if (cmp == 0) {
655 keep = 1; /* definitely have it */
656 sought[i]->match_status = REF_MATCHED;
658 i++;
662 if (!keep && args->fetch_all &&
663 (!args->deepen || !starts_with(ref->name, "refs/tags/")))
664 keep = 1;
666 if (keep) {
667 *newtail = ref;
668 ref->next = NULL;
669 newtail = &ref->next;
670 } else {
671 ref->next = unmatched;
672 unmatched = ref;
676 /* Append unmatched requests to the list */
677 for (i = 0; i < nr_sought; i++) {
678 struct object_id oid;
679 const char *p;
681 ref = sought[i];
682 if (ref->match_status != REF_NOT_MATCHED)
683 continue;
684 if (parse_oid_hex(ref->name, &oid, &p) ||
685 *p != '\0' ||
686 oidcmp(&oid, &ref->old_oid))
687 continue;
689 if ((allow_unadvertised_object_request &
690 (ALLOW_TIP_SHA1 | ALLOW_REACHABLE_SHA1)) ||
691 tip_oids_contain(&tip_oids, unmatched, newlist,
692 &ref->old_oid)) {
693 ref->match_status = REF_MATCHED;
694 *newtail = copy_ref(ref);
695 newtail = &(*newtail)->next;
696 } else {
697 ref->match_status = REF_UNADVERTISED_NOT_ALLOWED;
701 oidset_clear(&tip_oids);
702 for (ref = unmatched; ref; ref = next) {
703 next = ref->next;
704 free(ref);
707 *refs = newlist;
710 static void mark_alternate_complete(struct object *obj)
712 mark_complete(&obj->oid);
715 struct loose_object_iter {
716 struct oidset *loose_object_set;
717 struct ref *refs;
721 * If the number of refs is not larger than the number of loose objects,
722 * this function stops inserting.
724 static int add_loose_objects_to_set(const struct object_id *oid,
725 const char *path,
726 void *data)
728 struct loose_object_iter *iter = data;
729 oidset_insert(iter->loose_object_set, oid);
730 if (iter->refs == NULL)
731 return 1;
733 iter->refs = iter->refs->next;
734 return 0;
737 static int everything_local(struct fetch_pack_args *args,
738 struct ref **refs,
739 struct ref **sought, int nr_sought)
741 struct ref *ref;
742 int retval;
743 int old_save_commit_buffer = save_commit_buffer;
744 timestamp_t cutoff = 0;
745 struct oidset loose_oid_set = OIDSET_INIT;
746 int use_oidset = 0;
747 struct loose_object_iter iter = {&loose_oid_set, *refs};
749 /* Enumerate all loose objects or know refs are not so many. */
750 use_oidset = !for_each_loose_object(add_loose_objects_to_set,
751 &iter, 0);
753 save_commit_buffer = 0;
755 for (ref = *refs; ref; ref = ref->next) {
756 struct object *o;
757 unsigned int flags = OBJECT_INFO_QUICK;
759 if (use_oidset &&
760 !oidset_contains(&loose_oid_set, &ref->old_oid)) {
762 * I know this does not exist in the loose form,
763 * so check if it exists in a non-loose form.
765 flags |= OBJECT_INFO_IGNORE_LOOSE;
768 if (!has_object_file_with_flags(&ref->old_oid, flags))
769 continue;
770 o = parse_object(&ref->old_oid);
771 if (!o)
772 continue;
774 /* We already have it -- which may mean that we were
775 * in sync with the other side at some time after
776 * that (it is OK if we guess wrong here).
778 if (o->type == OBJ_COMMIT) {
779 struct commit *commit = (struct commit *)o;
780 if (!cutoff || cutoff < commit->date)
781 cutoff = commit->date;
785 oidset_clear(&loose_oid_set);
787 if (!args->no_dependents) {
788 if (!args->deepen) {
789 for_each_ref(mark_complete_oid, NULL);
790 for_each_cached_alternate(mark_alternate_complete);
791 commit_list_sort_by_date(&complete);
792 if (cutoff)
793 mark_recent_complete_commits(args, cutoff);
797 * Mark all complete remote refs as common refs.
798 * Don't mark them common yet; the server has to be told so first.
800 for (ref = *refs; ref; ref = ref->next) {
801 struct object *o = deref_tag(lookup_object(ref->old_oid.hash),
802 NULL, 0);
804 if (!o || o->type != OBJ_COMMIT || !(o->flags & COMPLETE))
805 continue;
807 if (!(o->flags & SEEN)) {
808 rev_list_push((struct commit *)o, COMMON_REF | SEEN);
810 mark_common((struct commit *)o, 1, 1);
815 filter_refs(args, refs, sought, nr_sought);
817 for (retval = 1, ref = *refs; ref ; ref = ref->next) {
818 const struct object_id *remote = &ref->old_oid;
819 struct object *o;
821 o = lookup_object(remote->hash);
822 if (!o || !(o->flags & COMPLETE)) {
823 retval = 0;
824 print_verbose(args, "want %s (%s)", oid_to_hex(remote),
825 ref->name);
826 continue;
828 print_verbose(args, _("already have %s (%s)"), oid_to_hex(remote),
829 ref->name);
832 save_commit_buffer = old_save_commit_buffer;
834 return retval;
837 static int sideband_demux(int in, int out, void *data)
839 int *xd = data;
840 int ret;
842 ret = recv_sideband("fetch-pack", xd[0], out);
843 close(out);
844 return ret;
847 static int get_pack(struct fetch_pack_args *args,
848 int xd[2], char **pack_lockfile)
850 struct async demux;
851 int do_keep = args->keep_pack;
852 const char *cmd_name;
853 struct pack_header header;
854 int pass_header = 0;
855 struct child_process cmd = CHILD_PROCESS_INIT;
856 int ret;
858 memset(&demux, 0, sizeof(demux));
859 if (use_sideband) {
860 /* xd[] is talking with upload-pack; subprocess reads from
861 * xd[0], spits out band#2 to stderr, and feeds us band#1
862 * through demux->out.
864 demux.proc = sideband_demux;
865 demux.data = xd;
866 demux.out = -1;
867 demux.isolate_sigpipe = 1;
868 if (start_async(&demux))
869 die(_("fetch-pack: unable to fork off sideband demultiplexer"));
871 else
872 demux.out = xd[0];
874 if (!args->keep_pack && unpack_limit) {
876 if (read_pack_header(demux.out, &header))
877 die(_("protocol error: bad pack header"));
878 pass_header = 1;
879 if (ntohl(header.hdr_entries) < unpack_limit)
880 do_keep = 0;
881 else
882 do_keep = 1;
885 if (alternate_shallow_file) {
886 argv_array_push(&cmd.args, "--shallow-file");
887 argv_array_push(&cmd.args, alternate_shallow_file);
890 if (do_keep || args->from_promisor) {
891 if (pack_lockfile)
892 cmd.out = -1;
893 cmd_name = "index-pack";
894 argv_array_push(&cmd.args, cmd_name);
895 argv_array_push(&cmd.args, "--stdin");
896 if (!args->quiet && !args->no_progress)
897 argv_array_push(&cmd.args, "-v");
898 if (args->use_thin_pack)
899 argv_array_push(&cmd.args, "--fix-thin");
900 if (do_keep && (args->lock_pack || unpack_limit)) {
901 char hostname[HOST_NAME_MAX + 1];
902 if (xgethostname(hostname, sizeof(hostname)))
903 xsnprintf(hostname, sizeof(hostname), "localhost");
904 argv_array_pushf(&cmd.args,
905 "--keep=fetch-pack %"PRIuMAX " on %s",
906 (uintmax_t)getpid(), hostname);
908 if (args->check_self_contained_and_connected)
909 argv_array_push(&cmd.args, "--check-self-contained-and-connected");
910 if (args->from_promisor)
911 argv_array_push(&cmd.args, "--promisor");
913 else {
914 cmd_name = "unpack-objects";
915 argv_array_push(&cmd.args, cmd_name);
916 if (args->quiet || args->no_progress)
917 argv_array_push(&cmd.args, "-q");
918 args->check_self_contained_and_connected = 0;
921 if (pass_header)
922 argv_array_pushf(&cmd.args, "--pack_header=%"PRIu32",%"PRIu32,
923 ntohl(header.hdr_version),
924 ntohl(header.hdr_entries));
925 if (fetch_fsck_objects >= 0
926 ? fetch_fsck_objects
927 : transfer_fsck_objects >= 0
928 ? transfer_fsck_objects
929 : 0) {
930 if (args->from_promisor)
932 * We cannot use --strict in index-pack because it
933 * checks both broken objects and links, but we only
934 * want to check for broken objects.
936 argv_array_push(&cmd.args, "--fsck-objects");
937 else
938 argv_array_push(&cmd.args, "--strict");
941 cmd.in = demux.out;
942 cmd.git_cmd = 1;
943 if (start_command(&cmd))
944 die(_("fetch-pack: unable to fork off %s"), cmd_name);
945 if (do_keep && pack_lockfile) {
946 *pack_lockfile = index_pack_lockfile(cmd.out);
947 close(cmd.out);
950 if (!use_sideband)
951 /* Closed by start_command() */
952 xd[0] = -1;
954 ret = finish_command(&cmd);
955 if (!ret || (args->check_self_contained_and_connected && ret == 1))
956 args->self_contained_and_connected =
957 args->check_self_contained_and_connected &&
958 ret == 0;
959 else
960 die(_("%s failed"), cmd_name);
961 if (use_sideband && finish_async(&demux))
962 die(_("error in sideband demultiplexer"));
963 return 0;
966 static int cmp_ref_by_name(const void *a_, const void *b_)
968 const struct ref *a = *((const struct ref **)a_);
969 const struct ref *b = *((const struct ref **)b_);
970 return strcmp(a->name, b->name);
973 static struct ref *do_fetch_pack(struct fetch_pack_args *args,
974 int fd[2],
975 const struct ref *orig_ref,
976 struct ref **sought, int nr_sought,
977 struct shallow_info *si,
978 char **pack_lockfile)
980 struct ref *ref = copy_ref_list(orig_ref);
981 struct object_id oid;
982 const char *agent_feature;
983 int agent_len;
985 sort_ref_list(&ref, ref_compare_name);
986 QSORT(sought, nr_sought, cmp_ref_by_name);
988 if ((args->depth > 0 || is_repository_shallow()) && !server_supports("shallow"))
989 die(_("Server does not support shallow clients"));
990 if (args->depth > 0 || args->deepen_since || args->deepen_not)
991 args->deepen = 1;
992 if (server_supports("multi_ack_detailed")) {
993 print_verbose(args, _("Server supports multi_ack_detailed"));
994 multi_ack = 2;
995 if (server_supports("no-done")) {
996 print_verbose(args, _("Server supports no-done"));
997 if (args->stateless_rpc)
998 no_done = 1;
1001 else if (server_supports("multi_ack")) {
1002 print_verbose(args, _("Server supports multi_ack"));
1003 multi_ack = 1;
1005 if (server_supports("side-band-64k")) {
1006 print_verbose(args, _("Server supports side-band-64k"));
1007 use_sideband = 2;
1009 else if (server_supports("side-band")) {
1010 print_verbose(args, _("Server supports side-band"));
1011 use_sideband = 1;
1013 if (server_supports("allow-tip-sha1-in-want")) {
1014 print_verbose(args, _("Server supports allow-tip-sha1-in-want"));
1015 allow_unadvertised_object_request |= ALLOW_TIP_SHA1;
1017 if (server_supports("allow-reachable-sha1-in-want")) {
1018 print_verbose(args, _("Server supports allow-reachable-sha1-in-want"));
1019 allow_unadvertised_object_request |= ALLOW_REACHABLE_SHA1;
1021 if (!server_supports("thin-pack"))
1022 args->use_thin_pack = 0;
1023 if (!server_supports("no-progress"))
1024 args->no_progress = 0;
1025 if (!server_supports("include-tag"))
1026 args->include_tag = 0;
1027 if (server_supports("ofs-delta"))
1028 print_verbose(args, _("Server supports ofs-delta"));
1029 else
1030 prefer_ofs_delta = 0;
1032 if (server_supports("filter")) {
1033 server_supports_filtering = 1;
1034 print_verbose(args, _("Server supports filter"));
1035 } else if (args->filter_options.choice) {
1036 warning("filtering not recognized by server, ignoring");
1039 if ((agent_feature = server_feature_value("agent", &agent_len))) {
1040 agent_supported = 1;
1041 if (agent_len)
1042 print_verbose(args, _("Server version is %.*s"),
1043 agent_len, agent_feature);
1045 if (server_supports("deepen-since"))
1046 deepen_since_ok = 1;
1047 else if (args->deepen_since)
1048 die(_("Server does not support --shallow-since"));
1049 if (server_supports("deepen-not"))
1050 deepen_not_ok = 1;
1051 else if (args->deepen_not)
1052 die(_("Server does not support --shallow-exclude"));
1053 if (!server_supports("deepen-relative") && args->deepen_relative)
1054 die(_("Server does not support --deepen"));
1056 if (everything_local(args, &ref, sought, nr_sought)) {
1057 packet_flush(fd[1]);
1058 goto all_done;
1060 if (find_common(args, fd, &oid, ref) < 0)
1061 if (!args->keep_pack)
1062 /* When cloning, it is not unusual to have
1063 * no common commit.
1065 warning(_("no common commits"));
1067 if (args->stateless_rpc)
1068 packet_flush(fd[1]);
1069 if (args->deepen)
1070 setup_alternate_shallow(&shallow_lock, &alternate_shallow_file,
1071 NULL);
1072 else if (si->nr_ours || si->nr_theirs)
1073 alternate_shallow_file = setup_temporary_shallow(si->shallow);
1074 else
1075 alternate_shallow_file = NULL;
1076 if (get_pack(args, fd, pack_lockfile))
1077 die(_("git fetch-pack: fetch failed."));
1079 all_done:
1080 return ref;
1083 static void fetch_pack_config(void)
1085 git_config_get_int("fetch.unpacklimit", &fetch_unpack_limit);
1086 git_config_get_int("transfer.unpacklimit", &transfer_unpack_limit);
1087 git_config_get_bool("repack.usedeltabaseoffset", &prefer_ofs_delta);
1088 git_config_get_bool("fetch.fsckobjects", &fetch_fsck_objects);
1089 git_config_get_bool("transfer.fsckobjects", &transfer_fsck_objects);
1091 git_config(git_default_config, NULL);
1094 static void fetch_pack_setup(void)
1096 static int did_setup;
1097 if (did_setup)
1098 return;
1099 fetch_pack_config();
1100 if (0 <= transfer_unpack_limit)
1101 unpack_limit = transfer_unpack_limit;
1102 else if (0 <= fetch_unpack_limit)
1103 unpack_limit = fetch_unpack_limit;
1104 did_setup = 1;
1107 static int remove_duplicates_in_refs(struct ref **ref, int nr)
1109 struct string_list names = STRING_LIST_INIT_NODUP;
1110 int src, dst;
1112 for (src = dst = 0; src < nr; src++) {
1113 struct string_list_item *item;
1114 item = string_list_insert(&names, ref[src]->name);
1115 if (item->util)
1116 continue; /* already have it */
1117 item->util = ref[src];
1118 if (src != dst)
1119 ref[dst] = ref[src];
1120 dst++;
1122 for (src = dst; src < nr; src++)
1123 ref[src] = NULL;
1124 string_list_clear(&names, 0);
1125 return dst;
1128 static void update_shallow(struct fetch_pack_args *args,
1129 struct ref **sought, int nr_sought,
1130 struct shallow_info *si)
1132 struct oid_array ref = OID_ARRAY_INIT;
1133 int *status;
1134 int i;
1136 if (args->deepen && alternate_shallow_file) {
1137 if (*alternate_shallow_file == '\0') { /* --unshallow */
1138 unlink_or_warn(git_path_shallow());
1139 rollback_lock_file(&shallow_lock);
1140 } else
1141 commit_lock_file(&shallow_lock);
1142 return;
1145 if (!si->shallow || !si->shallow->nr)
1146 return;
1148 if (args->cloning) {
1150 * remote is shallow, but this is a clone, there are
1151 * no objects in repo to worry about. Accept any
1152 * shallow points that exist in the pack (iow in repo
1153 * after get_pack() and reprepare_packed_git())
1155 struct oid_array extra = OID_ARRAY_INIT;
1156 struct object_id *oid = si->shallow->oid;
1157 for (i = 0; i < si->shallow->nr; i++)
1158 if (has_object_file(&oid[i]))
1159 oid_array_append(&extra, &oid[i]);
1160 if (extra.nr) {
1161 setup_alternate_shallow(&shallow_lock,
1162 &alternate_shallow_file,
1163 &extra);
1164 commit_lock_file(&shallow_lock);
1166 oid_array_clear(&extra);
1167 return;
1170 if (!si->nr_ours && !si->nr_theirs)
1171 return;
1173 remove_nonexistent_theirs_shallow(si);
1174 if (!si->nr_ours && !si->nr_theirs)
1175 return;
1176 for (i = 0; i < nr_sought; i++)
1177 oid_array_append(&ref, &sought[i]->old_oid);
1178 si->ref = &ref;
1180 if (args->update_shallow) {
1182 * remote is also shallow, .git/shallow may be updated
1183 * so all refs can be accepted. Make sure we only add
1184 * shallow roots that are actually reachable from new
1185 * refs.
1187 struct oid_array extra = OID_ARRAY_INIT;
1188 struct object_id *oid = si->shallow->oid;
1189 assign_shallow_commits_to_refs(si, NULL, NULL);
1190 if (!si->nr_ours && !si->nr_theirs) {
1191 oid_array_clear(&ref);
1192 return;
1194 for (i = 0; i < si->nr_ours; i++)
1195 oid_array_append(&extra, &oid[si->ours[i]]);
1196 for (i = 0; i < si->nr_theirs; i++)
1197 oid_array_append(&extra, &oid[si->theirs[i]]);
1198 setup_alternate_shallow(&shallow_lock,
1199 &alternate_shallow_file,
1200 &extra);
1201 commit_lock_file(&shallow_lock);
1202 oid_array_clear(&extra);
1203 oid_array_clear(&ref);
1204 return;
1208 * remote is also shallow, check what ref is safe to update
1209 * without updating .git/shallow
1211 status = xcalloc(nr_sought, sizeof(*status));
1212 assign_shallow_commits_to_refs(si, NULL, status);
1213 if (si->nr_ours || si->nr_theirs) {
1214 for (i = 0; i < nr_sought; i++)
1215 if (status[i])
1216 sought[i]->status = REF_STATUS_REJECT_SHALLOW;
1218 free(status);
1219 oid_array_clear(&ref);
1222 struct ref *fetch_pack(struct fetch_pack_args *args,
1223 int fd[], struct child_process *conn,
1224 const struct ref *ref,
1225 const char *dest,
1226 struct ref **sought, int nr_sought,
1227 struct oid_array *shallow,
1228 char **pack_lockfile)
1230 struct ref *ref_cpy;
1231 struct shallow_info si;
1233 fetch_pack_setup();
1234 if (nr_sought)
1235 nr_sought = remove_duplicates_in_refs(sought, nr_sought);
1237 if (!ref) {
1238 packet_flush(fd[1]);
1239 die(_("no matching remote head"));
1241 prepare_shallow_info(&si, shallow);
1242 ref_cpy = do_fetch_pack(args, fd, ref, sought, nr_sought,
1243 &si, pack_lockfile);
1244 reprepare_packed_git(the_repository);
1245 update_shallow(args, sought, nr_sought, &si);
1246 clear_shallow_info(&si);
1247 return ref_cpy;
1250 int report_unmatched_refs(struct ref **sought, int nr_sought)
1252 int i, ret = 0;
1254 for (i = 0; i < nr_sought; i++) {
1255 if (!sought[i])
1256 continue;
1257 switch (sought[i]->match_status) {
1258 case REF_MATCHED:
1259 continue;
1260 case REF_NOT_MATCHED:
1261 error(_("no such remote ref %s"), sought[i]->name);
1262 break;
1263 case REF_UNADVERTISED_NOT_ALLOWED:
1264 error(_("Server does not allow request for unadvertised object %s"),
1265 sought[i]->name);
1266 break;
1268 ret = 1;
1270 return ret;