http-push: do not SEGV after fetching a bad pack idx file
[git/jrn.git] / http-walker.c
blobd6cc622e96d2aaa8ece45c517729b2d949471f4b
1 #include "cache.h"
2 #include "commit.h"
3 #include "pack.h"
4 #include "walker.h"
5 #include "http.h"
7 #define PREV_BUF_SIZE 4096
8 #define RANGE_HEADER_SIZE 30
10 struct alt_base
12 char *base;
13 int got_indices;
14 struct packed_git *packs;
15 struct alt_base *next;
18 enum object_request_state {
19 WAITING,
20 ABORTED,
21 ACTIVE,
22 COMPLETE,
25 struct object_request
27 struct walker *walker;
28 unsigned char sha1[20];
29 struct alt_base *repo;
30 char *url;
31 char filename[PATH_MAX];
32 char tmpfile[PATH_MAX];
33 int local;
34 enum object_request_state state;
35 CURLcode curl_result;
36 char errorstr[CURL_ERROR_SIZE];
37 long http_code;
38 unsigned char real_sha1[20];
39 git_SHA_CTX c;
40 z_stream stream;
41 int zret;
42 int rename;
43 struct active_request_slot *slot;
44 struct object_request *next;
47 struct alternates_request {
48 struct walker *walker;
49 const char *base;
50 char *url;
51 struct strbuf *buffer;
52 struct active_request_slot *slot;
53 int http_specific;
56 struct walker_data {
57 const char *url;
58 int got_alternates;
59 struct alt_base *alt;
60 struct curl_slist *no_pragma_header;
63 static struct object_request *object_queue_head;
65 static size_t fwrite_sha1_file(void *ptr, size_t eltsize, size_t nmemb,
66 void *data)
68 unsigned char expn[4096];
69 size_t size = eltsize * nmemb;
70 int posn = 0;
71 struct object_request *obj_req = (struct object_request *)data;
72 do {
73 ssize_t retval = xwrite(obj_req->local,
74 (char *) ptr + posn, size - posn);
75 if (retval < 0)
76 return posn;
77 posn += retval;
78 } while (posn < size);
80 obj_req->stream.avail_in = size;
81 obj_req->stream.next_in = ptr;
82 do {
83 obj_req->stream.next_out = expn;
84 obj_req->stream.avail_out = sizeof(expn);
85 obj_req->zret = git_inflate(&obj_req->stream, Z_SYNC_FLUSH);
86 git_SHA1_Update(&obj_req->c, expn,
87 sizeof(expn) - obj_req->stream.avail_out);
88 } while (obj_req->stream.avail_in && obj_req->zret == Z_OK);
89 data_received++;
90 return size;
93 static void fetch_alternates(struct walker *walker, const char *base);
95 static void process_object_response(void *callback_data);
97 static void start_object_request(struct walker *walker,
98 struct object_request *obj_req)
100 char *hex = sha1_to_hex(obj_req->sha1);
101 char prevfile[PATH_MAX];
102 char *url;
103 char *posn;
104 int prevlocal;
105 unsigned char prev_buf[PREV_BUF_SIZE];
106 ssize_t prev_read = 0;
107 long prev_posn = 0;
108 char range[RANGE_HEADER_SIZE];
109 struct curl_slist *range_header = NULL;
110 struct active_request_slot *slot;
111 struct walker_data *data = walker->data;
113 snprintf(prevfile, sizeof(prevfile), "%s.prev", obj_req->filename);
114 unlink_or_warn(prevfile);
115 rename(obj_req->tmpfile, prevfile);
116 unlink_or_warn(obj_req->tmpfile);
118 if (obj_req->local != -1)
119 error("fd leakage in start: %d", obj_req->local);
120 obj_req->local = open(obj_req->tmpfile,
121 O_WRONLY | O_CREAT | O_EXCL, 0666);
123 * This could have failed due to the "lazy directory creation";
124 * try to mkdir the last path component.
126 if (obj_req->local < 0 && errno == ENOENT) {
127 char *dir = strrchr(obj_req->tmpfile, '/');
128 if (dir) {
129 *dir = 0;
130 mkdir(obj_req->tmpfile, 0777);
131 *dir = '/';
133 obj_req->local = open(obj_req->tmpfile,
134 O_WRONLY | O_CREAT | O_EXCL, 0666);
137 if (obj_req->local < 0) {
138 obj_req->state = ABORTED;
139 error("Couldn't create temporary file %s for %s: %s",
140 obj_req->tmpfile, obj_req->filename, strerror(errno));
141 return;
144 memset(&obj_req->stream, 0, sizeof(obj_req->stream));
146 git_inflate_init(&obj_req->stream);
148 git_SHA1_Init(&obj_req->c);
150 url = xmalloc(strlen(obj_req->repo->base) + 51);
151 obj_req->url = xmalloc(strlen(obj_req->repo->base) + 51);
152 strcpy(url, obj_req->repo->base);
153 posn = url + strlen(obj_req->repo->base);
154 strcpy(posn, "/objects/");
155 posn += 9;
156 memcpy(posn, hex, 2);
157 posn += 2;
158 *(posn++) = '/';
159 strcpy(posn, hex + 2);
160 strcpy(obj_req->url, url);
163 * If a previous temp file is present, process what was already
164 * fetched.
166 prevlocal = open(prevfile, O_RDONLY);
167 if (prevlocal != -1) {
168 do {
169 prev_read = xread(prevlocal, prev_buf, PREV_BUF_SIZE);
170 if (prev_read>0) {
171 if (fwrite_sha1_file(prev_buf,
173 prev_read,
174 obj_req) == prev_read)
175 prev_posn += prev_read;
176 else
177 prev_read = -1;
179 } while (prev_read > 0);
180 close(prevlocal);
182 unlink_or_warn(prevfile);
185 * Reset inflate/SHA1 if there was an error reading the previous temp
186 * file; also rewind to the beginning of the local file.
188 if (prev_read == -1) {
189 memset(&obj_req->stream, 0, sizeof(obj_req->stream));
190 git_inflate_init(&obj_req->stream);
191 git_SHA1_Init(&obj_req->c);
192 if (prev_posn>0) {
193 prev_posn = 0;
194 lseek(obj_req->local, 0, SEEK_SET);
195 ftruncate(obj_req->local, 0);
199 slot = get_active_slot();
200 slot->callback_func = process_object_response;
201 slot->callback_data = obj_req;
202 obj_req->slot = slot;
204 curl_easy_setopt(slot->curl, CURLOPT_FILE, obj_req);
205 curl_easy_setopt(slot->curl, CURLOPT_WRITEFUNCTION, fwrite_sha1_file);
206 curl_easy_setopt(slot->curl, CURLOPT_ERRORBUFFER, obj_req->errorstr);
207 curl_easy_setopt(slot->curl, CURLOPT_URL, url);
208 curl_easy_setopt(slot->curl, CURLOPT_HTTPHEADER, data->no_pragma_header);
211 * If we have successfully processed data from a previous fetch
212 * attempt, only fetch the data we don't already have.
214 if (prev_posn>0) {
215 if (walker->get_verbosely)
216 fprintf(stderr,
217 "Resuming fetch of object %s at byte %ld\n",
218 hex, prev_posn);
219 sprintf(range, "Range: bytes=%ld-", prev_posn);
220 range_header = curl_slist_append(range_header, range);
221 curl_easy_setopt(slot->curl,
222 CURLOPT_HTTPHEADER, range_header);
225 /* Try to get the request started, abort the request on error */
226 obj_req->state = ACTIVE;
227 if (!start_active_slot(slot)) {
228 obj_req->state = ABORTED;
229 obj_req->slot = NULL;
230 close(obj_req->local);
231 obj_req->local = -1;
232 free(obj_req->url);
233 return;
237 static void finish_object_request(struct object_request *obj_req)
239 struct stat st;
241 close(obj_req->local);
242 obj_req->local = -1;
244 if (obj_req->http_code == 416) {
245 fprintf(stderr, "Warning: requested range invalid; we may already have all the data.\n");
246 } else if (obj_req->curl_result != CURLE_OK) {
247 if (stat(obj_req->tmpfile, &st) == 0)
248 if (st.st_size == 0)
249 unlink_or_warn(obj_req->tmpfile);
250 return;
253 git_inflate_end(&obj_req->stream);
254 git_SHA1_Final(obj_req->real_sha1, &obj_req->c);
255 if (obj_req->zret != Z_STREAM_END) {
256 unlink_or_warn(obj_req->tmpfile);
257 return;
259 if (hashcmp(obj_req->sha1, obj_req->real_sha1)) {
260 unlink_or_warn(obj_req->tmpfile);
261 return;
263 obj_req->rename =
264 move_temp_to_file(obj_req->tmpfile, obj_req->filename);
266 if (obj_req->rename == 0)
267 walker_say(obj_req->walker, "got %s\n", sha1_to_hex(obj_req->sha1));
270 static void process_object_response(void *callback_data)
272 struct object_request *obj_req =
273 (struct object_request *)callback_data;
274 struct walker *walker = obj_req->walker;
275 struct walker_data *data = walker->data;
276 struct alt_base *alt = data->alt;
278 obj_req->curl_result = obj_req->slot->curl_result;
279 obj_req->http_code = obj_req->slot->http_code;
280 obj_req->slot = NULL;
281 obj_req->state = COMPLETE;
283 /* Use alternates if necessary */
284 if (missing_target(obj_req)) {
285 fetch_alternates(walker, alt->base);
286 if (obj_req->repo->next != NULL) {
287 obj_req->repo =
288 obj_req->repo->next;
289 close(obj_req->local);
290 obj_req->local = -1;
291 start_object_request(walker, obj_req);
292 return;
296 finish_object_request(obj_req);
299 static void release_object_request(struct object_request *obj_req)
301 struct object_request *entry = object_queue_head;
303 if (obj_req->local != -1)
304 error("fd leakage in release: %d", obj_req->local);
305 if (obj_req == object_queue_head) {
306 object_queue_head = obj_req->next;
307 } else {
308 while (entry->next != NULL && entry->next != obj_req)
309 entry = entry->next;
310 if (entry->next == obj_req)
311 entry->next = entry->next->next;
314 free(obj_req->url);
315 free(obj_req);
318 #ifdef USE_CURL_MULTI
319 static int fill_active_slot(struct walker *walker)
321 struct object_request *obj_req;
323 for (obj_req = object_queue_head; obj_req; obj_req = obj_req->next) {
324 if (obj_req->state == WAITING) {
325 if (has_sha1_file(obj_req->sha1))
326 obj_req->state = COMPLETE;
327 else {
328 start_object_request(walker, obj_req);
329 return 1;
333 return 0;
335 #endif
337 static void prefetch(struct walker *walker, unsigned char *sha1)
339 struct object_request *newreq;
340 struct object_request *tail;
341 struct walker_data *data = walker->data;
342 char *filename = sha1_file_name(sha1);
344 newreq = xmalloc(sizeof(*newreq));
345 newreq->walker = walker;
346 hashcpy(newreq->sha1, sha1);
347 newreq->repo = data->alt;
348 newreq->url = NULL;
349 newreq->local = -1;
350 newreq->state = WAITING;
351 snprintf(newreq->filename, sizeof(newreq->filename), "%s", filename);
352 snprintf(newreq->tmpfile, sizeof(newreq->tmpfile),
353 "%s.temp", filename);
354 newreq->slot = NULL;
355 newreq->next = NULL;
357 if (object_queue_head == NULL) {
358 object_queue_head = newreq;
359 } else {
360 tail = object_queue_head;
361 while (tail->next != NULL)
362 tail = tail->next;
363 tail->next = newreq;
366 #ifdef USE_CURL_MULTI
367 fill_active_slots();
368 step_active_slots();
369 #endif
372 static int fetch_index(struct walker *walker, struct alt_base *repo, unsigned char *sha1)
374 int ret = 0;
375 char *hex = xstrdup(sha1_to_hex(sha1));
376 char *filename;
377 char *url;
378 char tmpfile[PATH_MAX];
379 long prev_posn = 0;
380 char range[RANGE_HEADER_SIZE];
381 struct curl_slist *range_header = NULL;
382 struct walker_data *data = walker->data;
384 FILE *indexfile;
385 struct active_request_slot *slot;
386 struct slot_results results;
388 /* Don't use the index if the pack isn't there */
389 url = xmalloc(strlen(repo->base) + 64);
390 sprintf(url, "%s/objects/pack/pack-%s.pack", repo->base, hex);
391 slot = get_active_slot();
392 slot->results = &results;
393 curl_easy_setopt(slot->curl, CURLOPT_URL, url);
394 curl_easy_setopt(slot->curl, CURLOPT_NOBODY, 1);
395 if (start_active_slot(slot)) {
396 run_active_slot(slot);
397 if (results.curl_result != CURLE_OK) {
398 ret = error("Unable to verify pack %s is available",
399 hex);
400 goto cleanup_pack;
402 } else {
403 ret = error("Unable to start request");
404 goto cleanup_pack;
407 if (has_pack_index(sha1)) {
408 ret = 0;
409 goto cleanup_pack;
412 if (walker->get_verbosely)
413 fprintf(stderr, "Getting index for pack %s\n", hex);
415 sprintf(url, "%s/objects/pack/pack-%s.idx", repo->base, hex);
417 filename = sha1_pack_index_name(sha1);
418 snprintf(tmpfile, sizeof(tmpfile), "%s.temp", filename);
419 indexfile = fopen(tmpfile, "a");
420 if (!indexfile) {
421 ret = error("Unable to open local file %s for pack index",
422 tmpfile);
423 goto cleanup_pack;
426 slot = get_active_slot();
427 slot->results = &results;
428 curl_easy_setopt(slot->curl, CURLOPT_NOBODY, 0);
429 curl_easy_setopt(slot->curl, CURLOPT_HTTPGET, 1);
430 curl_easy_setopt(slot->curl, CURLOPT_FILE, indexfile);
431 curl_easy_setopt(slot->curl, CURLOPT_WRITEFUNCTION, fwrite);
432 curl_easy_setopt(slot->curl, CURLOPT_URL, url);
433 curl_easy_setopt(slot->curl, CURLOPT_HTTPHEADER, data->no_pragma_header);
434 slot->local = indexfile;
437 * If there is data present from a previous transfer attempt,
438 * resume where it left off
440 prev_posn = ftell(indexfile);
441 if (prev_posn>0) {
442 if (walker->get_verbosely)
443 fprintf(stderr,
444 "Resuming fetch of index for pack %s at byte %ld\n",
445 hex, prev_posn);
446 sprintf(range, "Range: bytes=%ld-", prev_posn);
447 range_header = curl_slist_append(range_header, range);
448 curl_easy_setopt(slot->curl, CURLOPT_HTTPHEADER, range_header);
451 if (start_active_slot(slot)) {
452 run_active_slot(slot);
453 if (results.curl_result != CURLE_OK) {
454 ret = error("Unable to get pack index %s\n%s", url,
455 curl_errorstr);
456 goto cleanup_index;
458 } else {
459 ret = error("Unable to start request");
460 goto cleanup_index;
463 ret = move_temp_to_file(tmpfile, filename);
465 cleanup_index:
466 fclose(indexfile);
467 slot->local = NULL;
468 cleanup_pack:
469 free(url);
470 free(hex);
471 return ret;
474 static int setup_index(struct walker *walker, struct alt_base *repo, unsigned char *sha1)
476 struct packed_git *new_pack;
477 if (has_pack_file(sha1))
478 return 0; /* don't list this as something we can get */
480 if (fetch_index(walker, repo, sha1))
481 return -1;
483 new_pack = parse_pack_index(sha1);
484 if (!new_pack)
485 return -1; /* parse_pack_index() already issued error message */
486 new_pack->next = repo->packs;
487 repo->packs = new_pack;
488 return 0;
491 static void process_alternates_response(void *callback_data)
493 struct alternates_request *alt_req =
494 (struct alternates_request *)callback_data;
495 struct walker *walker = alt_req->walker;
496 struct walker_data *cdata = walker->data;
497 struct active_request_slot *slot = alt_req->slot;
498 struct alt_base *tail = cdata->alt;
499 const char *base = alt_req->base;
500 static const char null_byte = '\0';
501 char *data;
502 int i = 0;
504 if (alt_req->http_specific) {
505 if (slot->curl_result != CURLE_OK ||
506 !alt_req->buffer->len) {
508 /* Try reusing the slot to get non-http alternates */
509 alt_req->http_specific = 0;
510 sprintf(alt_req->url, "%s/objects/info/alternates",
511 base);
512 curl_easy_setopt(slot->curl, CURLOPT_URL,
513 alt_req->url);
514 active_requests++;
515 slot->in_use = 1;
516 if (slot->finished != NULL)
517 (*slot->finished) = 0;
518 if (!start_active_slot(slot)) {
519 cdata->got_alternates = -1;
520 slot->in_use = 0;
521 if (slot->finished != NULL)
522 (*slot->finished) = 1;
524 return;
526 } else if (slot->curl_result != CURLE_OK) {
527 if (!missing_target(slot)) {
528 cdata->got_alternates = -1;
529 return;
533 fwrite_buffer(&null_byte, 1, 1, alt_req->buffer);
534 alt_req->buffer->len--;
535 data = alt_req->buffer->buf;
537 while (i < alt_req->buffer->len) {
538 int posn = i;
539 while (posn < alt_req->buffer->len && data[posn] != '\n')
540 posn++;
541 if (data[posn] == '\n') {
542 int okay = 0;
543 int serverlen = 0;
544 struct alt_base *newalt;
545 char *target = NULL;
546 if (data[i] == '/') {
548 * This counts
549 * http://git.host/pub/scm/linux.git/
550 * -----------here^
551 * so memcpy(dst, base, serverlen) will
552 * copy up to "...git.host".
554 const char *colon_ss = strstr(base,"://");
555 if (colon_ss) {
556 serverlen = (strchr(colon_ss + 3, '/')
557 - base);
558 okay = 1;
560 } else if (!memcmp(data + i, "../", 3)) {
562 * Relative URL; chop the corresponding
563 * number of subpath from base (and ../
564 * from data), and concatenate the result.
566 * The code first drops ../ from data, and
567 * then drops one ../ from data and one path
568 * from base. IOW, one extra ../ is dropped
569 * from data than path is dropped from base.
571 * This is not wrong. The alternate in
572 * http://git.host/pub/scm/linux.git/
573 * to borrow from
574 * http://git.host/pub/scm/linus.git/
575 * is ../../linus.git/objects/. You need
576 * two ../../ to borrow from your direct
577 * neighbour.
579 i += 3;
580 serverlen = strlen(base);
581 while (i + 2 < posn &&
582 !memcmp(data + i, "../", 3)) {
583 do {
584 serverlen--;
585 } while (serverlen &&
586 base[serverlen - 1] != '/');
587 i += 3;
589 /* If the server got removed, give up. */
590 okay = strchr(base, ':') - base + 3 <
591 serverlen;
592 } else if (alt_req->http_specific) {
593 char *colon = strchr(data + i, ':');
594 char *slash = strchr(data + i, '/');
595 if (colon && slash && colon < data + posn &&
596 slash < data + posn && colon < slash) {
597 okay = 1;
600 /* skip "objects\n" at end */
601 if (okay) {
602 target = xmalloc(serverlen + posn - i - 6);
603 memcpy(target, base, serverlen);
604 memcpy(target + serverlen, data + i,
605 posn - i - 7);
606 target[serverlen + posn - i - 7] = 0;
607 if (walker->get_verbosely)
608 fprintf(stderr,
609 "Also look at %s\n", target);
610 newalt = xmalloc(sizeof(*newalt));
611 newalt->next = NULL;
612 newalt->base = target;
613 newalt->got_indices = 0;
614 newalt->packs = NULL;
616 while (tail->next != NULL)
617 tail = tail->next;
618 tail->next = newalt;
621 i = posn + 1;
624 cdata->got_alternates = 1;
627 static void fetch_alternates(struct walker *walker, const char *base)
629 struct strbuf buffer = STRBUF_INIT;
630 char *url;
631 struct active_request_slot *slot;
632 struct alternates_request alt_req;
633 struct walker_data *cdata = walker->data;
636 * If another request has already started fetching alternates,
637 * wait for them to arrive and return to processing this request's
638 * curl message
640 #ifdef USE_CURL_MULTI
641 while (cdata->got_alternates == 0) {
642 step_active_slots();
644 #endif
646 /* Nothing to do if they've already been fetched */
647 if (cdata->got_alternates == 1)
648 return;
650 /* Start the fetch */
651 cdata->got_alternates = 0;
653 if (walker->get_verbosely)
654 fprintf(stderr, "Getting alternates list for %s\n", base);
656 url = xmalloc(strlen(base) + 31);
657 sprintf(url, "%s/objects/info/http-alternates", base);
660 * Use a callback to process the result, since another request
661 * may fail and need to have alternates loaded before continuing
663 slot = get_active_slot();
664 slot->callback_func = process_alternates_response;
665 alt_req.walker = walker;
666 slot->callback_data = &alt_req;
668 curl_easy_setopt(slot->curl, CURLOPT_FILE, &buffer);
669 curl_easy_setopt(slot->curl, CURLOPT_WRITEFUNCTION, fwrite_buffer);
670 curl_easy_setopt(slot->curl, CURLOPT_URL, url);
672 alt_req.base = base;
673 alt_req.url = url;
674 alt_req.buffer = &buffer;
675 alt_req.http_specific = 1;
676 alt_req.slot = slot;
678 if (start_active_slot(slot))
679 run_active_slot(slot);
680 else
681 cdata->got_alternates = -1;
683 strbuf_release(&buffer);
684 free(url);
687 static int fetch_indices(struct walker *walker, struct alt_base *repo)
689 unsigned char sha1[20];
690 char *url;
691 struct strbuf buffer = STRBUF_INIT;
692 char *data;
693 int i = 0;
694 int ret = 0;
696 struct active_request_slot *slot;
697 struct slot_results results;
699 if (repo->got_indices)
700 return 0;
702 if (walker->get_verbosely)
703 fprintf(stderr, "Getting pack list for %s\n", repo->base);
705 url = xmalloc(strlen(repo->base) + 21);
706 sprintf(url, "%s/objects/info/packs", repo->base);
708 slot = get_active_slot();
709 slot->results = &results;
710 curl_easy_setopt(slot->curl, CURLOPT_FILE, &buffer);
711 curl_easy_setopt(slot->curl, CURLOPT_WRITEFUNCTION, fwrite_buffer);
712 curl_easy_setopt(slot->curl, CURLOPT_URL, url);
713 curl_easy_setopt(slot->curl, CURLOPT_HTTPHEADER, NULL);
714 if (start_active_slot(slot)) {
715 run_active_slot(slot);
716 if (results.curl_result != CURLE_OK) {
717 if (missing_target(&results)) {
718 repo->got_indices = 1;
719 goto cleanup;
720 } else {
721 repo->got_indices = 0;
722 ret = error("%s", curl_errorstr);
723 goto cleanup;
726 } else {
727 repo->got_indices = 0;
728 ret = error("Unable to start request");
729 goto cleanup;
732 data = buffer.buf;
733 while (i < buffer.len) {
734 switch (data[i]) {
735 case 'P':
736 i++;
737 if (i + 52 <= buffer.len &&
738 !prefixcmp(data + i, " pack-") &&
739 !prefixcmp(data + i + 46, ".pack\n")) {
740 get_sha1_hex(data + i + 6, sha1);
741 setup_index(walker, repo, sha1);
742 i += 51;
743 break;
745 default:
746 while (i < buffer.len && data[i] != '\n')
747 i++;
749 i++;
752 repo->got_indices = 1;
753 cleanup:
754 strbuf_release(&buffer);
755 free(url);
756 return ret;
759 static int fetch_pack(struct walker *walker, struct alt_base *repo, unsigned char *sha1)
761 char *url;
762 struct packed_git *target;
763 struct packed_git **lst;
764 FILE *packfile;
765 char *filename;
766 char tmpfile[PATH_MAX];
767 int ret;
768 long prev_posn = 0;
769 char range[RANGE_HEADER_SIZE];
770 struct curl_slist *range_header = NULL;
771 struct walker_data *data = walker->data;
773 struct active_request_slot *slot;
774 struct slot_results results;
776 if (fetch_indices(walker, repo))
777 return -1;
778 target = find_sha1_pack(sha1, repo->packs);
779 if (!target)
780 return -1;
782 if (walker->get_verbosely) {
783 fprintf(stderr, "Getting pack %s\n",
784 sha1_to_hex(target->sha1));
785 fprintf(stderr, " which contains %s\n",
786 sha1_to_hex(sha1));
789 url = xmalloc(strlen(repo->base) + 65);
790 sprintf(url, "%s/objects/pack/pack-%s.pack",
791 repo->base, sha1_to_hex(target->sha1));
793 filename = sha1_pack_name(target->sha1);
794 snprintf(tmpfile, sizeof(tmpfile), "%s.temp", filename);
795 packfile = fopen(tmpfile, "a");
796 if (!packfile)
797 return error("Unable to open local file %s for pack",
798 tmpfile);
800 slot = get_active_slot();
801 slot->results = &results;
802 curl_easy_setopt(slot->curl, CURLOPT_FILE, packfile);
803 curl_easy_setopt(slot->curl, CURLOPT_WRITEFUNCTION, fwrite);
804 curl_easy_setopt(slot->curl, CURLOPT_URL, url);
805 curl_easy_setopt(slot->curl, CURLOPT_HTTPHEADER, data->no_pragma_header);
806 slot->local = packfile;
809 * If there is data present from a previous transfer attempt,
810 * resume where it left off
812 prev_posn = ftell(packfile);
813 if (prev_posn>0) {
814 if (walker->get_verbosely)
815 fprintf(stderr,
816 "Resuming fetch of pack %s at byte %ld\n",
817 sha1_to_hex(target->sha1), prev_posn);
818 sprintf(range, "Range: bytes=%ld-", prev_posn);
819 range_header = curl_slist_append(range_header, range);
820 curl_easy_setopt(slot->curl, CURLOPT_HTTPHEADER, range_header);
823 if (start_active_slot(slot)) {
824 run_active_slot(slot);
825 if (results.curl_result != CURLE_OK) {
826 fclose(packfile);
827 slot->local = NULL;
828 return error("Unable to get pack file %s\n%s", url,
829 curl_errorstr);
831 } else {
832 fclose(packfile);
833 slot->local = NULL;
834 return error("Unable to start request");
837 target->pack_size = ftell(packfile);
838 fclose(packfile);
839 slot->local = NULL;
841 ret = move_temp_to_file(tmpfile, filename);
842 if (ret)
843 return ret;
845 lst = &repo->packs;
846 while (*lst != target)
847 lst = &((*lst)->next);
848 *lst = (*lst)->next;
850 if (verify_pack(target))
851 return -1;
852 install_packed_git(target);
854 return 0;
857 static void abort_object_request(struct object_request *obj_req)
859 if (obj_req->local >= 0) {
860 close(obj_req->local);
861 obj_req->local = -1;
863 unlink_or_warn(obj_req->tmpfile);
864 if (obj_req->slot) {
865 release_active_slot(obj_req->slot);
866 obj_req->slot = NULL;
868 release_object_request(obj_req);
871 static int fetch_object(struct walker *walker, struct alt_base *repo, unsigned char *sha1)
873 char *hex = sha1_to_hex(sha1);
874 int ret = 0;
875 struct object_request *obj_req = object_queue_head;
877 while (obj_req != NULL && hashcmp(obj_req->sha1, sha1))
878 obj_req = obj_req->next;
879 if (obj_req == NULL)
880 return error("Couldn't find request for %s in the queue", hex);
882 if (has_sha1_file(obj_req->sha1)) {
883 abort_object_request(obj_req);
884 return 0;
887 #ifdef USE_CURL_MULTI
888 while (obj_req->state == WAITING)
889 step_active_slots();
890 #else
891 start_object_request(walker, obj_req);
892 #endif
894 while (obj_req->state == ACTIVE)
895 run_active_slot(obj_req->slot);
897 if (obj_req->local != -1) {
898 close(obj_req->local);
899 obj_req->local = -1;
902 if (obj_req->state == ABORTED) {
903 ret = error("Request for %s aborted", hex);
904 } else if (obj_req->curl_result != CURLE_OK &&
905 obj_req->http_code != 416) {
906 if (missing_target(obj_req))
907 ret = -1; /* Be silent, it is probably in a pack. */
908 else
909 ret = error("%s (curl_result = %d, http_code = %ld, sha1 = %s)",
910 obj_req->errorstr, obj_req->curl_result,
911 obj_req->http_code, hex);
912 } else if (obj_req->zret != Z_STREAM_END) {
913 walker->corrupt_object_found++;
914 ret = error("File %s (%s) corrupt", hex, obj_req->url);
915 } else if (hashcmp(obj_req->sha1, obj_req->real_sha1)) {
916 ret = error("File %s has bad hash", hex);
917 } else if (obj_req->rename < 0) {
918 ret = error("unable to write sha1 filename %s",
919 obj_req->filename);
922 release_object_request(obj_req);
923 return ret;
926 static int fetch(struct walker *walker, unsigned char *sha1)
928 struct walker_data *data = walker->data;
929 struct alt_base *altbase = data->alt;
931 if (!fetch_object(walker, altbase, sha1))
932 return 0;
933 while (altbase) {
934 if (!fetch_pack(walker, altbase, sha1))
935 return 0;
936 fetch_alternates(walker, data->alt->base);
937 altbase = altbase->next;
939 return error("Unable to find %s under %s", sha1_to_hex(sha1),
940 data->alt->base);
943 static int fetch_ref(struct walker *walker, struct ref *ref)
945 struct walker_data *data = walker->data;
946 return http_fetch_ref(data->alt->base, ref);
949 static void cleanup(struct walker *walker)
951 struct walker_data *data = walker->data;
952 http_cleanup();
954 curl_slist_free_all(data->no_pragma_header);
957 struct walker *get_http_walker(const char *url, struct remote *remote)
959 char *s;
960 struct walker_data *data = xmalloc(sizeof(struct walker_data));
961 struct walker *walker = xmalloc(sizeof(struct walker));
963 http_init(remote);
965 data->no_pragma_header = curl_slist_append(NULL, "Pragma:");
967 data->alt = xmalloc(sizeof(*data->alt));
968 data->alt->base = xmalloc(strlen(url) + 1);
969 strcpy(data->alt->base, url);
970 for (s = data->alt->base + strlen(data->alt->base) - 1; *s == '/'; --s)
971 *s = 0;
973 data->alt->got_indices = 0;
974 data->alt->packs = NULL;
975 data->alt->next = NULL;
976 data->got_alternates = -1;
978 walker->corrupt_object_found = 0;
979 walker->fetch = fetch;
980 walker->fetch_ref = fetch_ref;
981 walker->prefetch = prefetch;
982 walker->cleanup = cleanup;
983 walker->data = data;
985 #ifdef USE_CURL_MULTI
986 add_fill_function(walker, (int (*)(void *)) fill_active_slot);
987 #endif
989 return walker;