Initial commit.
[CMakeLuaTailorHgBridge.git] / CMakeLua / Utilities / cmcurl / transfer.c
blob49e17bfd4b135d429eb41797d5f0f7e790fb6c3f
1 /***************************************************************************
2 * _ _ ____ _
3 * Project ___| | | | _ \| |
4 * / __| | | | |_) | |
5 * | (__| |_| | _ <| |___
6 * \___|\___/|_| \_\_____|
8 * Copyright (C) 1998 - 2007, Daniel Stenberg, <daniel@haxx.se>, et al.
10 * This software is licensed as described in the file COPYING, which
11 * you should have received as part of this distribution. The terms
12 * are also available at http://curl.haxx.se/docs/copyright.html.
14 * You may opt to use, copy, modify, merge, publish, distribute and/or sell
15 * copies of the Software, and permit persons to whom the Software is
16 * furnished to do so, under the terms of the COPYING file.
18 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
19 * KIND, either express or implied.
21 * $Id: transfer.c,v 1.3 2007/03/18 20:18:50 andy Exp $
22 ***************************************************************************/
24 #include "setup.h"
26 /* -- WIN32 approved -- */
27 #include <stdio.h>
28 #include <string.h>
29 #include <stdarg.h>
30 #include <stdlib.h>
31 #include <ctype.h>
32 #ifdef HAVE_SYS_TYPES_H
33 #include <sys/types.h>
34 #endif
35 #ifdef HAVE_SYS_STAT_H
36 #include <sys/stat.h>
37 #endif
39 #include <errno.h>
41 #include "strtoofft.h"
42 #include "strequal.h"
44 #ifdef WIN32
45 #include <time.h>
46 #include <io.h>
47 #else
48 #ifdef HAVE_SYS_SOCKET_H
49 #include <sys/socket.h>
50 #endif
51 #ifdef HAVE_NETINET_IN_H
52 #include <netinet/in.h>
53 #endif
54 #ifdef HAVE_SYS_TIME_H
55 #include <sys/time.h>
56 #endif
57 #ifdef HAVE_UNISTD_H
58 #include <unistd.h>
59 #endif
60 #include <netdb.h>
61 #ifdef HAVE_ARPA_INET_H
62 #include <arpa/inet.h>
63 #endif
64 #ifdef HAVE_NET_IF_H
65 #include <net/if.h>
66 #endif
67 #ifdef HAVE_SYS_IOCTL_H
68 #include <sys/ioctl.h>
69 #endif
70 #include <signal.h>
72 #ifdef HAVE_SYS_PARAM_H
73 #include <sys/param.h>
74 #endif
76 #ifdef HAVE_SYS_SELECT_H
77 #include <sys/select.h>
78 #endif
80 #ifndef HAVE_SOCKET
81 #error "We can't compile without socket() support!"
82 #endif
84 #endif
86 #include "urldata.h"
87 #include <curl/curl.h>
88 #include "netrc.h"
90 #include "content_encoding.h"
91 #include "hostip.h"
92 #include "transfer.h"
93 #include "sendf.h"
94 #include "speedcheck.h"
95 #include "progress.h"
96 #include "http.h"
97 #include "url.h"
98 #include "getinfo.h"
99 #include "sslgen.h"
100 #include "http_digest.h"
101 #include "http_ntlm.h"
102 #include "http_negotiate.h"
103 #include "share.h"
104 #include "memory.h"
105 #include "select.h"
106 #include "multiif.h"
107 #include "easyif.h" /* for Curl_convert_to_network prototype */
109 #define _MPRINTF_REPLACE /* use our functions only */
110 #include <curl/mprintf.h>
112 /* The last #include file should be: */
113 #include "memdebug.h"
115 #define CURL_TIMEOUT_EXPECT_100 1000 /* counting ms here */
118 * This function will call the read callback to fill our buffer with data
119 * to upload.
121 CURLcode Curl_fillreadbuffer(struct connectdata *conn, int bytes, int *nreadp)
123 struct SessionHandle *data = conn->data;
124 size_t buffersize = (size_t)bytes;
125 int nread;
127 if(conn->bits.upload_chunky) {
128 /* if chunked Transfer-Encoding */
129 buffersize -= (8 + 2 + 2); /* 32bit hex + CRLF + CRLF */
130 data->reqdata.upload_fromhere += 10; /* 32bit hex + CRLF */
133 /* this function returns a size_t, so we typecast to int to prevent warnings
134 with picky compilers */
135 nread = (int)conn->fread(data->reqdata.upload_fromhere, 1,
136 buffersize, conn->fread_in);
138 if(nread == CURL_READFUNC_ABORT) {
139 failf(data, "operation aborted by callback\n");
140 return CURLE_ABORTED_BY_CALLBACK;
143 if(!conn->bits.forbidchunk && conn->bits.upload_chunky) {
144 /* if chunked Transfer-Encoding */
145 char hexbuffer[11];
146 int hexlen = snprintf(hexbuffer, sizeof(hexbuffer),
147 "%x\r\n", nread);
148 /* move buffer pointer */
149 data->reqdata.upload_fromhere -= hexlen;
150 nread += hexlen;
152 /* copy the prefix to the buffer */
153 memcpy(data->reqdata.upload_fromhere, hexbuffer, hexlen);
155 /* always append CRLF to the data */
156 memcpy(data->reqdata.upload_fromhere + nread, "\r\n", 2);
158 if((nread - hexlen) == 0) {
159 /* mark this as done once this chunk is transfered */
160 data->reqdata.keep.upload_done = TRUE;
163 nread+=2; /* for the added CRLF */
166 *nreadp = nread;
168 #ifdef CURL_DOES_CONVERSIONS
169 if(data->set.prefer_ascii) {
170 CURLcode res;
171 res = Curl_convert_to_network(data, data->reqdata.upload_fromhere, nread);
172 /* Curl_convert_to_network calls failf if unsuccessful */
173 if(res != CURLE_OK) {
174 return(res);
177 #endif /* CURL_DOES_CONVERSIONS */
179 return CURLE_OK;
183 * checkhttpprefix()
185 * Returns TRUE if member of the list matches prefix of string
187 static bool
188 checkhttpprefix(struct SessionHandle *data,
189 const char *s)
191 struct curl_slist *head = data->set.http200aliases;
192 bool rc = FALSE;
193 #ifdef CURL_DOES_CONVERSIONS
194 /* convert from the network encoding using a scratch area */
195 char *scratch = calloc(1, strlen(s)+1);
196 if (NULL == scratch) {
197 failf (data, "Failed to calloc memory for conversion!");
198 return FALSE; /* can't return CURLE_OUT_OF_MEMORY so return FALSE */
200 strcpy(scratch, s);
201 if (CURLE_OK != Curl_convert_from_network(data, scratch, strlen(s)+1)) {
202 /* Curl_convert_from_network calls failf if unsuccessful */
203 free(scratch);
204 return FALSE; /* can't return CURLE_foobar so return FALSE */
206 s = scratch;
207 #endif /* CURL_DOES_CONVERSIONS */
209 while (head) {
210 if (checkprefix(head->data, s)) {
211 rc = TRUE;
212 break;
214 head = head->next;
217 if ((rc != TRUE) && (checkprefix("HTTP/", s))) {
218 rc = TRUE;
221 #ifdef CURL_DOES_CONVERSIONS
222 free(scratch);
223 #endif /* CURL_DOES_CONVERSIONS */
224 return rc;
228 * Curl_readrewind() rewinds the read stream. This typically (so far) only
229 * used for HTTP POST/PUT with multi-pass authentication when a sending was
230 * denied and a resend is necessary.
232 CURLcode Curl_readrewind(struct connectdata *conn)
234 struct SessionHandle *data = conn->data;
236 conn->bits.rewindaftersend = FALSE; /* we rewind now */
238 /* We have sent away data. If not using CURLOPT_POSTFIELDS or
239 CURLOPT_HTTPPOST, call app to rewind
241 if(data->set.postfields ||
242 (data->set.httpreq == HTTPREQ_POST_FORM))
243 ; /* do nothing */
244 else {
245 if(data->set.ioctl) {
246 curlioerr err;
248 err = (data->set.ioctl) (data, CURLIOCMD_RESTARTREAD,
249 data->set.ioctl_client);
250 infof(data, "the ioctl callback returned %d\n", (int)err);
252 if(err) {
253 /* FIXME: convert to a human readable error message */
254 failf(data, "ioctl callback returned error %d\n", (int)err);
255 return CURLE_SEND_FAIL_REWIND;
258 else {
259 /* If no CURLOPT_READFUNCTION is used, we know that we operate on a
260 given FILE * stream and we can actually attempt to rewind that
261 ourself with fseek() */
262 if(data->set.fread == (curl_read_callback)fread) {
263 if(-1 != fseek(data->set.in, 0, SEEK_SET))
264 /* successful rewind */
265 return CURLE_OK;
268 /* no callback set or failure aboe, makes us fail at once */
269 failf(data, "necessary data rewind wasn't possible\n");
270 return CURLE_SEND_FAIL_REWIND;
273 return CURLE_OK;
276 static int data_pending(struct connectdata *conn)
278 return Curl_ssl_data_pending(conn, FIRSTSOCKET);
281 #ifndef MIN
282 #define MIN(a,b) (a < b ? a : b)
283 #endif
285 static void read_rewind(struct connectdata *conn,
286 size_t thismuch)
288 conn->read_pos -= thismuch;
289 conn->bits.stream_was_rewound = TRUE;
291 #ifdef CURLDEBUG
293 char buf[512 + 1];
294 size_t show;
296 show = MIN(conn->buf_len - conn->read_pos, sizeof(buf)-1);
297 memcpy(buf, conn->master_buffer + conn->read_pos, show);
298 buf[show] = '\0';
300 DEBUGF(infof(conn->data,
301 "Buffer after stream rewind (read_pos = %d): [%s]",
302 conn->read_pos, buf));
304 #endif
308 * Curl_readwrite() is the low-level function to be called when data is to
309 * be read and written to/from the connection.
311 CURLcode Curl_readwrite(struct connectdata *conn,
312 bool *done)
314 struct SessionHandle *data = conn->data;
315 struct Curl_transfer_keeper *k = &data->reqdata.keep;
316 CURLcode result;
317 ssize_t nread; /* number of bytes read */
318 int didwhat=0;
320 curl_socket_t fd_read;
321 curl_socket_t fd_write;
322 int select_res;
324 curl_off_t contentlength;
326 /* only use the proper socket if the *_HOLD bit is not set simultaneously as
327 then we are in rate limiting state in that transfer direction */
329 if((k->keepon & (KEEP_READ|KEEP_READ_HOLD)) == KEEP_READ)
330 fd_read = conn->sockfd;
331 else
332 fd_read = CURL_SOCKET_BAD;
334 if((k->keepon & (KEEP_WRITE|KEEP_WRITE_HOLD)) == KEEP_WRITE)
335 fd_write = conn->writesockfd;
336 else
337 fd_write = CURL_SOCKET_BAD;
339 select_res = Curl_select(fd_read, fd_write, 0);
340 if(select_res == CSELECT_ERR) {
341 failf(data, "select/poll returned error");
342 return CURLE_SEND_ERROR;
345 do {
346 /* We go ahead and do a read if we have a readable socket or if
347 the stream was rewound (in which case we have data in a
348 buffer) */
349 if((k->keepon & KEEP_READ) &&
350 ((select_res & CSELECT_IN) || conn->bits.stream_was_rewound)) {
351 /* read */
352 bool is_empty_data = FALSE;
354 /* This is where we loop until we have read everything there is to
355 read or we get a EWOULDBLOCK */
356 do {
357 size_t buffersize = data->set.buffer_size?
358 data->set.buffer_size : BUFSIZE;
359 size_t bytestoread = buffersize;
360 int readrc;
362 if (k->size != -1 && !k->header) {
363 /* make sure we don't read "too much" if we can help it since we
364 might be pipelining and then someone else might want to read what
365 follows! */
366 curl_off_t totalleft = k->size - k->bytecount;
367 if(totalleft < (curl_off_t)bytestoread)
368 bytestoread = (size_t)totalleft;
371 /* receive data from the network! */
372 readrc = Curl_read(conn, conn->sockfd, k->buf, bytestoread, &nread);
374 /* subzero, this would've blocked */
375 if(0 > readrc)
376 break; /* get out of loop */
378 /* get the CURLcode from the int */
379 result = (CURLcode)readrc;
381 if(result>0)
382 return result;
384 if ((k->bytecount == 0) && (k->writebytecount == 0)) {
385 Curl_pgrsTime(data, TIMER_STARTTRANSFER);
386 if(k->wait100_after_headers)
387 /* set time stamp to compare with when waiting for the 100 */
388 k->start100 = Curl_tvnow();
391 didwhat |= KEEP_READ;
392 /* indicates data of zero size, i.e. empty file */
393 is_empty_data = (bool)((nread == 0) && (k->bodywrites == 0));
395 /* NULL terminate, allowing string ops to be used */
396 if (0 < nread || is_empty_data) {
397 k->buf[nread] = 0;
399 else if (0 >= nread) {
400 /* if we receive 0 or less here, the server closed the connection
401 and we bail out from this! */
403 k->keepon &= ~KEEP_READ;
404 break;
407 /* Default buffer to use when we write the buffer, it may be changed
408 in the flow below before the actual storing is done. */
409 k->str = k->buf;
411 /* Since this is a two-state thing, we check if we are parsing
412 headers at the moment or not. */
413 if (k->header) {
414 /* we are in parse-the-header-mode */
415 bool stop_reading = FALSE;
417 /* header line within buffer loop */
418 do {
419 size_t hbufp_index;
420 size_t rest_length;
421 size_t full_length;
422 int writetype;
424 /* str_start is start of line within buf */
425 k->str_start = k->str;
427 /* data is in network encoding so use 0x0a instead of '\n' */
428 k->end_ptr = memchr(k->str_start, 0x0a, nread);
430 if (!k->end_ptr) {
431 /* Not a complete header line within buffer, append the data to
432 the end of the headerbuff. */
434 if (k->hbuflen + nread >= data->state.headersize) {
435 /* We enlarge the header buffer as it is too small */
436 char *newbuff;
437 size_t newsize=CURLMAX((k->hbuflen+nread)*3/2,
438 data->state.headersize*2);
439 hbufp_index = k->hbufp - data->state.headerbuff;
440 newbuff = (char *)realloc(data->state.headerbuff, newsize);
441 if(!newbuff) {
442 failf (data, "Failed to alloc memory for big header!");
443 return CURLE_OUT_OF_MEMORY;
445 data->state.headersize=newsize;
446 data->state.headerbuff = newbuff;
447 k->hbufp = data->state.headerbuff + hbufp_index;
449 memcpy(k->hbufp, k->str, nread);
450 k->hbufp += nread;
451 k->hbuflen += nread;
452 if (!k->headerline && (k->hbuflen>5)) {
453 /* make a first check that this looks like a HTTP header */
454 if(!checkhttpprefix(data, data->state.headerbuff)) {
455 /* this is not the beginning of a HTTP first header line */
456 k->header = FALSE;
457 k->badheader = HEADER_ALLBAD;
458 break;
462 break; /* read more and try again */
465 /* decrease the size of the remaining (supposed) header line */
466 rest_length = (k->end_ptr - k->str)+1;
467 nread -= (ssize_t)rest_length;
469 k->str = k->end_ptr + 1; /* move past new line */
471 full_length = k->str - k->str_start;
474 * We're about to copy a chunk of data to the end of the
475 * already received header. We make sure that the full string
476 * fit in the allocated header buffer, or else we enlarge
477 * it.
479 if (k->hbuflen + full_length >=
480 data->state.headersize) {
481 char *newbuff;
482 size_t newsize=CURLMAX((k->hbuflen+full_length)*3/2,
483 data->state.headersize*2);
484 hbufp_index = k->hbufp - data->state.headerbuff;
485 newbuff = (char *)realloc(data->state.headerbuff, newsize);
486 if(!newbuff) {
487 failf (data, "Failed to alloc memory for big header!");
488 return CURLE_OUT_OF_MEMORY;
490 data->state.headersize= newsize;
491 data->state.headerbuff = newbuff;
492 k->hbufp = data->state.headerbuff + hbufp_index;
495 /* copy to end of line */
496 memcpy(k->hbufp, k->str_start, full_length);
497 k->hbufp += full_length;
498 k->hbuflen += full_length;
499 *k->hbufp = 0;
500 k->end_ptr = k->hbufp;
502 k->p = data->state.headerbuff;
504 /****
505 * We now have a FULL header line that p points to
506 *****/
508 if(!k->headerline) {
509 /* the first read header */
510 if((k->hbuflen>5) &&
511 !checkhttpprefix(data, data->state.headerbuff)) {
512 /* this is not the beginning of a HTTP first header line */
513 k->header = FALSE;
514 if(nread)
515 /* since there's more, this is a partial bad header */
516 k->badheader = HEADER_PARTHEADER;
517 else {
518 /* this was all we read so its all a bad header */
519 k->badheader = HEADER_ALLBAD;
520 nread = (ssize_t)rest_length;
522 break;
526 /* headers are in network encoding so
527 use 0x0a and 0x0d instead of '\n' and '\r' */
528 if ((0x0a == *k->p) || (0x0d == *k->p)) {
529 size_t headerlen;
530 /* Zero-length header line means end of headers! */
532 #ifdef CURL_DOES_CONVERSIONS
533 if (0x0d == *k->p) {
534 *k->p = '\r'; /* replace with CR in host encoding */
535 k->p++; /* pass the CR byte */
537 if (0x0a == *k->p) {
538 *k->p = '\n'; /* replace with LF in host encoding */
539 k->p++; /* pass the LF byte */
541 #else
542 if ('\r' == *k->p)
543 k->p++; /* pass the \r byte */
544 if ('\n' == *k->p)
545 k->p++; /* pass the \n byte */
546 #endif /* CURL_DOES_CONVERSIONS */
548 if(100 == k->httpcode) {
550 * We have made a HTTP PUT or POST and this is 1.1-lingo
551 * that tells us that the server is OK with this and ready
552 * to receive the data.
553 * However, we'll get more headers now so we must get
554 * back into the header-parsing state!
556 k->header = TRUE;
557 k->headerline = 0; /* restart the header line counter */
558 /* if we did wait for this do enable write now! */
559 if (k->write_after_100_header) {
561 k->write_after_100_header = FALSE;
562 k->keepon |= KEEP_WRITE;
565 else {
566 k->header = FALSE; /* no more header to parse! */
568 if((k->size == -1) && !conn->bits.chunk && !conn->bits.close)
569 /* When connection is not to get closed, but no
570 Content-Length nor Content-Encoding chunked have been
571 received, there is no body in this response. We don't set
572 stop_reading TRUE since that would also prevent necessary
573 authentication actions to take place. */
574 conn->bits.no_body = TRUE;
578 if (417 == k->httpcode) {
580 * we got: "417 Expectation Failed" this means:
581 * we have made a HTTP call and our Expect Header
582 * seems to cause a problem => abort the write operations
583 * (or prevent them from starting).
585 k->write_after_100_header = FALSE;
586 k->keepon &= ~KEEP_WRITE;
589 #ifndef CURL_DISABLE_HTTP
591 * When all the headers have been parsed, see if we should give
592 * up and return an error.
594 if (Curl_http_should_fail(conn)) {
595 failf (data, "The requested URL returned error: %d",
596 k->httpcode);
597 return CURLE_HTTP_RETURNED_ERROR;
599 #endif /* CURL_DISABLE_HTTP */
601 /* now, only output this if the header AND body are requested:
603 writetype = CLIENTWRITE_HEADER;
604 if (data->set.include_header)
605 writetype |= CLIENTWRITE_BODY;
607 headerlen = k->p - data->state.headerbuff;
609 result = Curl_client_write(conn, writetype,
610 data->state.headerbuff,
611 headerlen);
612 if(result)
613 return result;
615 data->info.header_size += (long)headerlen;
616 conn->headerbytecount += (long)headerlen;
618 conn->deductheadercount =
619 (100 == k->httpcode)?conn->headerbytecount:0;
621 if (data->reqdata.resume_from &&
622 (data->set.httpreq==HTTPREQ_GET) &&
623 (k->httpcode == 416)) {
624 /* "Requested Range Not Satisfiable" */
625 stop_reading = TRUE;
628 #ifndef CURL_DISABLE_HTTP
629 if(!stop_reading) {
630 /* Curl_http_auth_act() checks what authentication methods
631 * that are available and decides which one (if any) to
632 * use. It will set 'newurl' if an auth metod was picked. */
633 result = Curl_http_auth_act(conn);
635 if(result)
636 return result;
638 if(conn->bits.rewindaftersend) {
639 /* We rewind after a complete send, so thus we continue
640 sending now */
641 infof(data, "Keep sending data to get tossed away!\n");
642 k->keepon |= KEEP_WRITE;
645 #endif /* CURL_DISABLE_HTTP */
647 if(!k->header) {
649 * really end-of-headers.
651 * If we requested a "no body", this is a good time to get
652 * out and return home.
654 if(conn->bits.no_body)
655 stop_reading = TRUE;
656 else {
657 /* If we know the expected size of this document, we set the
658 maximum download size to the size of the expected
659 document or else, we won't know when to stop reading!
661 Note that we set the download maximum even if we read a
662 "Connection: close" header, to make sure that
663 "Content-Length: 0" still prevents us from attempting to
664 read the (missing) response-body.
666 /* According to RFC2616 section 4.4, we MUST ignore
667 Content-Length: headers if we are now receiving data
668 using chunked Transfer-Encoding.
670 if(conn->bits.chunk)
671 k->size=-1;
674 if(-1 != k->size) {
675 /* We do this operation even if no_body is true, since this
676 data might be retrieved later with curl_easy_getinfo()
677 and its CURLINFO_CONTENT_LENGTH_DOWNLOAD option. */
679 Curl_pgrsSetDownloadSize(data, k->size);
680 k->maxdownload = k->size;
682 /* If max download size is *zero* (nothing) we already
683 have nothing and can safely return ok now! */
684 if(0 == k->maxdownload)
685 stop_reading = TRUE;
687 if(stop_reading) {
688 /* we make sure that this socket isn't read more now */
689 k->keepon &= ~KEEP_READ;
692 break; /* exit header line loop */
695 /* We continue reading headers, so reset the line-based
696 header parsing variables hbufp && hbuflen */
697 k->hbufp = data->state.headerbuff;
698 k->hbuflen = 0;
699 continue;
703 * Checks for special headers coming up.
706 if (!k->headerline++) {
707 /* This is the first header, it MUST be the error code line
708 or else we consider this to be the body right away! */
709 int httpversion_major;
710 int nc;
711 #ifdef CURL_DOES_CONVERSIONS
712 #define HEADER1 scratch
713 #define SCRATCHSIZE 21
714 CURLcode res;
715 char scratch[SCRATCHSIZE+1]; /* "HTTP/major.minor 123" */
716 /* We can't really convert this yet because we
717 don't know if it's the 1st header line or the body.
718 So we do a partial conversion into a scratch area,
719 leaving the data at k->p as-is.
721 strncpy(&scratch[0], k->p, SCRATCHSIZE);
722 scratch[SCRATCHSIZE] = 0; /* null terminate */
723 res = Curl_convert_from_network(data,
724 &scratch[0],
725 SCRATCHSIZE);
726 if (CURLE_OK != res) {
727 /* Curl_convert_from_network calls failf if unsuccessful */
728 return res;
730 #else
731 #define HEADER1 k->p /* no conversion needed, just use k->p */
732 #endif /* CURL_DOES_CONVERSIONS */
734 nc = sscanf(HEADER1,
735 " HTTP/%d.%d %3d",
736 &httpversion_major,
737 &k->httpversion,
738 &k->httpcode);
739 if (nc==3) {
740 k->httpversion += 10 * httpversion_major;
742 else {
743 /* this is the real world, not a Nirvana
744 NCSA 1.5.x returns this crap when asked for HTTP/1.1
746 nc=sscanf(HEADER1, " HTTP %3d", &k->httpcode);
747 k->httpversion = 10;
749 /* If user has set option HTTP200ALIASES,
750 compare header line against list of aliases
752 if (!nc) {
753 if (checkhttpprefix(data, k->p)) {
754 nc = 1;
755 k->httpcode = 200;
756 k->httpversion =
757 (data->set.httpversion==CURL_HTTP_VERSION_1_0)? 10 : 11;
762 if (nc) {
763 data->info.httpcode = k->httpcode;
764 data->info.httpversion = k->httpversion;
767 * This code executes as part of processing the header. As a
768 * result, it's not totally clear how to interpret the
769 * response code yet as that depends on what other headers may
770 * be present. 401 and 407 may be errors, but may be OK
771 * depending on how authentication is working. Other codes
772 * are definitely errors, so give up here.
774 if (data->set.http_fail_on_error && (k->httpcode >= 400) &&
775 ((k->httpcode != 401) || !data->set.userpwd) &&
776 ((k->httpcode != 407) || !data->set.proxyuserpwd) ) {
778 if (data->reqdata.resume_from &&
779 (data->set.httpreq==HTTPREQ_GET) &&
780 (k->httpcode == 416)) {
781 /* "Requested Range Not Satisfiable", just proceed and
782 pretend this is no error */
784 else {
785 /* serious error, go home! */
786 failf (data, "The requested URL returned error: %d",
787 k->httpcode);
788 return CURLE_HTTP_RETURNED_ERROR;
792 if(k->httpversion == 10)
793 /* Default action for HTTP/1.0 must be to close, unless
794 we get one of those fancy headers that tell us the
795 server keeps it open for us! */
796 conn->bits.close = TRUE;
798 switch(k->httpcode) {
799 case 204:
800 /* (quote from RFC2616, section 10.2.5): The server has
801 * fulfilled the request but does not need to return an
802 * entity-body ... The 204 response MUST NOT include a
803 * message-body, and thus is always terminated by the first
804 * empty line after the header fields. */
805 /* FALLTHROUGH */
806 case 416: /* Requested Range Not Satisfiable, it has the
807 Content-Length: set as the "real" document but no
808 actual response is sent. */
809 case 304:
810 /* (quote from RFC2616, section 10.3.5): The 304 response
811 * MUST NOT contain a message-body, and thus is always
812 * terminated by the first empty line after the header
813 * fields. */
814 k->size=0;
815 k->maxdownload=0;
816 k->ignorecl = TRUE; /* ignore Content-Length headers */
817 break;
818 default:
819 /* nothing */
820 break;
823 else {
824 k->header = FALSE; /* this is not a header line */
825 break;
829 #ifdef CURL_DOES_CONVERSIONS
830 /* convert from the network encoding */
831 result = Curl_convert_from_network(data, k->p, strlen(k->p));
832 if (CURLE_OK != result) {
833 return(result);
835 /* Curl_convert_from_network calls failf if unsuccessful */
836 #endif /* CURL_DOES_CONVERSIONS */
838 /* Check for Content-Length: header lines to get size. Ignore
839 the header completely if we get a 416 response as then we're
840 resuming a document that we don't get, and this header contains
841 info about the true size of the document we didn't get now. */
842 if (!k->ignorecl && !data->set.ignorecl &&
843 checkprefix("Content-Length:", k->p)) {
844 contentlength = curlx_strtoofft(k->p+15, NULL, 10);
845 if (data->set.max_filesize &&
846 contentlength > data->set.max_filesize) {
847 failf(data, "Maximum file size exceeded");
848 return CURLE_FILESIZE_EXCEEDED;
850 if(contentlength >= 0) {
851 k->size = contentlength;
852 k->maxdownload = k->size;
854 else {
855 /* Negative Content-Length is really odd, and we know it
856 happens for example when older Apache servers send large
857 files */
858 conn->bits.close = TRUE;
859 infof(data, "Negative content-length: %" FORMAT_OFF_T
860 ", closing after transfer\n", contentlength);
863 /* check for Content-Type: header lines to get the mime-type */
864 else if (checkprefix("Content-Type:", k->p)) {
865 char *start;
866 char *end;
867 size_t len;
869 /* Find the first non-space letter */
870 for(start=k->p+13;
871 *start && ISSPACE(*start);
872 start++)
873 ; /* empty loop */
875 /* data is now in the host encoding so
876 use '\r' and '\n' instead of 0x0d and 0x0a */
877 end = strchr(start, '\r');
878 if(!end)
879 end = strchr(start, '\n');
881 if(end) {
882 /* skip all trailing space letters */
883 for(; ISSPACE(*end) && (end > start); end--)
884 ; /* empty loop */
886 /* get length of the type */
887 len = end-start+1;
889 /* allocate memory of a cloned copy */
890 Curl_safefree(data->info.contenttype);
892 data->info.contenttype = malloc(len + 1);
893 if (NULL == data->info.contenttype)
894 return CURLE_OUT_OF_MEMORY;
896 /* copy the content-type string */
897 memcpy(data->info.contenttype, start, len);
898 data->info.contenttype[len] = 0; /* zero terminate */
901 #ifndef CURL_DISABLE_HTTP
902 else if((k->httpversion == 10) &&
903 conn->bits.httpproxy &&
904 Curl_compareheader(k->p,
905 "Proxy-Connection:", "keep-alive")) {
907 * When a HTTP/1.0 reply comes when using a proxy, the
908 * 'Proxy-Connection: keep-alive' line tells us the
909 * connection will be kept alive for our pleasure.
910 * Default action for 1.0 is to close.
912 conn->bits.close = FALSE; /* don't close when done */
913 infof(data, "HTTP/1.0 proxy connection set to keep alive!\n");
915 else if((k->httpversion == 11) &&
916 conn->bits.httpproxy &&
917 Curl_compareheader(k->p,
918 "Proxy-Connection:", "close")) {
920 * We get a HTTP/1.1 response from a proxy and it says it'll
921 * close down after this transfer.
923 conn->bits.close = TRUE; /* close when done */
924 infof(data, "HTTP/1.1 proxy connection set close!\n");
926 else if((k->httpversion == 10) &&
927 Curl_compareheader(k->p, "Connection:", "keep-alive")) {
929 * A HTTP/1.0 reply with the 'Connection: keep-alive' line
930 * tells us the connection will be kept alive for our
931 * pleasure. Default action for 1.0 is to close.
933 * [RFC2068, section 19.7.1] */
934 conn->bits.close = FALSE; /* don't close when done */
935 infof(data, "HTTP/1.0 connection set to keep alive!\n");
937 else if (Curl_compareheader(k->p, "Connection:", "close")) {
939 * [RFC 2616, section 8.1.2.1]
940 * "Connection: close" is HTTP/1.1 language and means that
941 * the connection will close when this request has been
942 * served.
944 conn->bits.close = TRUE; /* close when done */
946 else if (Curl_compareheader(k->p,
947 "Transfer-Encoding:", "chunked")) {
949 * [RFC 2616, section 3.6.1] A 'chunked' transfer encoding
950 * means that the server will send a series of "chunks". Each
951 * chunk starts with line with info (including size of the
952 * coming block) (terminated with CRLF), then a block of data
953 * with the previously mentioned size. There can be any amount
954 * of chunks, and a chunk-data set to zero signals the
955 * end-of-chunks. */
956 conn->bits.chunk = TRUE; /* chunks coming our way */
958 /* init our chunky engine */
959 Curl_httpchunk_init(conn);
962 else if (checkprefix("Trailer:", k->p) ||
963 checkprefix("Trailers:", k->p)) {
965 * This test helps Curl_httpchunk_read() to determine to look
966 * for well formed trailers after the zero chunksize record. In
967 * this case a CRLF is required after the zero chunksize record
968 * when no trailers are sent, or after the last trailer record.
970 * It seems both Trailer: and Trailers: occur in the wild.
972 conn->bits.trailerHdrPresent = TRUE;
975 else if (checkprefix("Content-Encoding:", k->p) &&
976 data->set.encoding) {
978 * Process Content-Encoding. Look for the values: identity,
979 * gzip, deflate, compress, x-gzip and x-compress. x-gzip and
980 * x-compress are the same as gzip and compress. (Sec 3.5 RFC
981 * 2616). zlib cannot handle compress. However, errors are
982 * handled further down when the response body is processed
984 char *start;
986 /* Find the first non-space letter */
987 for(start=k->p+17;
988 *start && ISSPACE(*start);
989 start++)
990 ; /* empty loop */
992 /* Record the content-encoding for later use */
993 if (checkprefix("identity", start))
994 k->content_encoding = IDENTITY;
995 else if (checkprefix("deflate", start))
996 k->content_encoding = DEFLATE;
997 else if (checkprefix("gzip", start)
998 || checkprefix("x-gzip", start))
999 k->content_encoding = GZIP;
1000 else if (checkprefix("compress", start)
1001 || checkprefix("x-compress", start))
1002 k->content_encoding = COMPRESS;
1004 else if (checkprefix("Content-Range:", k->p)) {
1005 /* Content-Range: bytes [num]-
1006 Content-Range: bytes: [num]-
1007 Content-Range: [num]-
1009 The second format was added since Sun's webserver
1010 JavaWebServer/1.1.1 obviously sends the header this way!
1011 The third added since some servers use that!
1014 char *ptr = k->p + 14;
1016 /* Move forward until first digit */
1017 while(*ptr && !ISDIGIT(*ptr))
1018 ptr++;
1020 k->offset = curlx_strtoofft(ptr, NULL, 10);
1022 if (data->reqdata.resume_from == k->offset)
1023 /* we asked for a resume and we got it */
1024 k->content_range = TRUE;
1026 #if !defined(CURL_DISABLE_COOKIES)
1027 else if(data->cookies &&
1028 checkprefix("Set-Cookie:", k->p)) {
1029 Curl_share_lock(data, CURL_LOCK_DATA_COOKIE,
1030 CURL_LOCK_ACCESS_SINGLE);
1031 Curl_cookie_add(data,
1032 data->cookies, TRUE, k->p+11,
1033 /* If there is a custom-set Host: name, use it
1034 here, or else use real peer host name. */
1035 conn->allocptr.cookiehost?
1036 conn->allocptr.cookiehost:conn->host.name,
1037 data->reqdata.path);
1038 Curl_share_unlock(data, CURL_LOCK_DATA_COOKIE);
1040 #endif
1041 else if(checkprefix("Last-Modified:", k->p) &&
1042 (data->set.timecondition || data->set.get_filetime) ) {
1043 time_t secs=time(NULL);
1044 k->timeofdoc = curl_getdate(k->p+strlen("Last-Modified:"),
1045 &secs);
1046 if(data->set.get_filetime)
1047 data->info.filetime = (long)k->timeofdoc;
1049 else if((checkprefix("WWW-Authenticate:", k->p) &&
1050 (401 == k->httpcode)) ||
1051 (checkprefix("Proxy-authenticate:", k->p) &&
1052 (407 == k->httpcode))) {
1053 result = Curl_http_input_auth(conn, k->httpcode, k->p);
1054 if(result)
1055 return result;
1057 else if ((k->httpcode >= 300 && k->httpcode < 400) &&
1058 checkprefix("Location:", k->p)) {
1059 if(data->set.http_follow_location) {
1060 /* this is the URL that the server advices us to get instead */
1061 char *ptr;
1062 char *start=k->p;
1063 char backup;
1065 start += 9; /* pass "Location:" */
1067 /* Skip spaces and tabs. We do this to support multiple
1068 white spaces after the "Location:" keyword. */
1069 while(*start && ISSPACE(*start ))
1070 start++;
1072 /* Scan through the string from the end to find the last
1073 non-space. k->end_ptr points to the actual terminating zero
1074 letter, move pointer one letter back and start from
1075 there. This logic strips off trailing whitespace, but keeps
1076 any embedded whitespace. */
1077 ptr = k->end_ptr-1;
1078 while((ptr>=start) && ISSPACE(*ptr))
1079 ptr--;
1080 ptr++;
1082 backup = *ptr; /* store the ending letter */
1083 if(ptr != start) {
1084 *ptr = '\0'; /* zero terminate */
1085 data->reqdata.newurl = strdup(start); /* clone string */
1086 *ptr = backup; /* restore ending letter */
1087 if(!data->reqdata.newurl)
1088 return CURLE_OUT_OF_MEMORY;
1092 #endif /* CURL_DISABLE_HTTP */
1095 * End of header-checks. Write them to the client.
1098 writetype = CLIENTWRITE_HEADER;
1099 if (data->set.include_header)
1100 writetype |= CLIENTWRITE_BODY;
1102 if(data->set.verbose)
1103 Curl_debug(data, CURLINFO_HEADER_IN,
1104 k->p, (size_t)k->hbuflen, conn);
1106 result = Curl_client_write(conn, writetype, k->p, k->hbuflen);
1107 if(result)
1108 return result;
1110 data->info.header_size += (long)k->hbuflen;
1111 conn->headerbytecount += (long)k->hbuflen;
1113 /* reset hbufp pointer && hbuflen */
1114 k->hbufp = data->state.headerbuff;
1115 k->hbuflen = 0;
1117 while (!stop_reading && *k->str); /* header line within buffer */
1119 if(stop_reading)
1120 /* We've stopped dealing with input, get out of the do-while loop */
1121 break;
1123 /* We might have reached the end of the header part here, but
1124 there might be a non-header part left in the end of the read
1125 buffer. */
1127 } /* end if header mode */
1129 /* This is not an 'else if' since it may be a rest from the header
1130 parsing, where the beginning of the buffer is headers and the end
1131 is non-headers. */
1132 if (k->str && !k->header && (nread > 0 || is_empty_data)) {
1134 if(0 == k->bodywrites && !is_empty_data) {
1135 /* These checks are only made the first time we are about to
1136 write a piece of the body */
1137 if(conn->protocol&PROT_HTTP) {
1138 /* HTTP-only checks */
1140 if (data->reqdata.newurl) {
1141 if(conn->bits.close) {
1142 /* Abort after the headers if "follow Location" is set
1143 and we're set to close anyway. */
1144 k->keepon &= ~KEEP_READ;
1145 *done = TRUE;
1146 return CURLE_OK;
1148 /* We have a new url to load, but since we want to be able
1149 to re-use this connection properly, we read the full
1150 response in "ignore more" */
1151 k->ignorebody = TRUE;
1152 infof(data, "Ignoring the response-body\n");
1154 if (data->reqdata.resume_from && !k->content_range &&
1155 (data->set.httpreq==HTTPREQ_GET) &&
1156 !k->ignorebody) {
1157 /* we wanted to resume a download, although the server doesn't
1158 * seem to support this and we did this with a GET (if it
1159 * wasn't a GET we did a POST or PUT resume) */
1160 failf(data, "HTTP server doesn't seem to support "
1161 "byte ranges. Cannot resume.");
1162 return CURLE_HTTP_RANGE_ERROR;
1165 if(data->set.timecondition && !data->reqdata.range) {
1166 /* A time condition has been set AND no ranges have been
1167 requested. This seems to be what chapter 13.3.4 of
1168 RFC 2616 defines to be the correct action for a
1169 HTTP/1.1 client */
1170 if((k->timeofdoc > 0) && (data->set.timevalue > 0)) {
1171 switch(data->set.timecondition) {
1172 case CURL_TIMECOND_IFMODSINCE:
1173 default:
1174 if(k->timeofdoc < data->set.timevalue) {
1175 infof(data,
1176 "The requested document is not new enough\n");
1177 *done = TRUE;
1178 return CURLE_OK;
1180 break;
1181 case CURL_TIMECOND_IFUNMODSINCE:
1182 if(k->timeofdoc > data->set.timevalue) {
1183 infof(data,
1184 "The requested document is not old enough\n");
1185 *done = TRUE;
1186 return CURLE_OK;
1188 break;
1189 } /* switch */
1190 } /* two valid time strings */
1191 } /* we have a time condition */
1193 } /* this is HTTP */
1194 } /* this is the first time we write a body part */
1195 k->bodywrites++;
1197 /* pass data to the debug function before it gets "dechunked" */
1198 if(data->set.verbose) {
1199 if(k->badheader) {
1200 Curl_debug(data, CURLINFO_DATA_IN, data->state.headerbuff,
1201 (size_t)k->hbuflen, conn);
1202 if(k->badheader == HEADER_PARTHEADER)
1203 Curl_debug(data, CURLINFO_DATA_IN,
1204 k->str, (size_t)nread, conn);
1206 else
1207 Curl_debug(data, CURLINFO_DATA_IN,
1208 k->str, (size_t)nread, conn);
1211 #ifndef CURL_DISABLE_HTTP
1212 if(conn->bits.chunk) {
1214 * Bless me father for I have sinned. Here comes a chunked
1215 * transfer flying and we need to decode this properly. While
1216 * the name says read, this function both reads and writes away
1217 * the data. The returned 'nread' holds the number of actual
1218 * data it wrote to the client. */
1220 CHUNKcode res =
1221 Curl_httpchunk_read(conn, k->str, nread, &nread);
1223 if(CHUNKE_OK < res) {
1224 if(CHUNKE_WRITE_ERROR == res) {
1225 failf(data, "Failed writing data");
1226 return CURLE_WRITE_ERROR;
1228 failf(data, "Received problem %d in the chunky parser", res);
1229 return CURLE_RECV_ERROR;
1231 else if(CHUNKE_STOP == res) {
1232 /* we're done reading chunks! */
1233 k->keepon &= ~KEEP_READ; /* read no more */
1235 /* There are now possibly N number of bytes at the end of the
1236 str buffer that weren't written to the client, but we don't
1237 care about them right now. */
1239 /* If it returned OK, we just keep going */
1241 #endif /* CURL_DISABLE_HTTP */
1243 if((-1 != k->maxdownload) &&
1244 (k->bytecount + nread >= k->maxdownload)) {
1245 /* The 'excess' amount below can't be more than BUFSIZE which
1246 always will fit in a size_t */
1247 size_t excess = (size_t)(k->bytecount + nread - k->maxdownload);
1248 if (excess > 0 && !k->ignorebody) {
1249 infof(data,
1250 "Rewinding stream by : %d"
1251 " bytes on url %s (size = %" FORMAT_OFF_T
1252 ", maxdownload = %" FORMAT_OFF_T
1253 ", bytecount = %" FORMAT_OFF_T ", nread = %d)\n",
1254 excess, conn->data->reqdata.path,
1255 k->size, k->maxdownload, k->bytecount, nread);
1256 read_rewind(conn, excess);
1259 nread = (ssize_t) (k->maxdownload - k->bytecount);
1260 if(nread < 0 ) /* this should be unusual */
1261 nread = 0;
1263 k->keepon &= ~KEEP_READ; /* we're done reading */
1266 k->bytecount += nread;
1268 Curl_pgrsSetDownloadCounter(data, k->bytecount);
1270 if(!conn->bits.chunk && (nread || k->badheader || is_empty_data)) {
1271 /* If this is chunky transfer, it was already written */
1273 if(k->badheader && !k->ignorebody) {
1274 /* we parsed a piece of data wrongly assuming it was a header
1275 and now we output it as body instead */
1276 result = Curl_client_write(conn, CLIENTWRITE_BODY,
1277 data->state.headerbuff,
1278 k->hbuflen);
1279 if(result)
1280 return result;
1282 if(k->badheader < HEADER_ALLBAD) {
1283 /* This switch handles various content encodings. If there's an
1284 error here, be sure to check over the almost identical code
1285 in http_chunks.c.
1286 Make sure that ALL_CONTENT_ENCODINGS contains all the
1287 encodings handled here. */
1288 #ifdef HAVE_LIBZ
1289 switch (k->content_encoding) {
1290 case IDENTITY:
1291 #endif
1292 /* This is the default when the server sends no
1293 Content-Encoding header. See Curl_readwrite_init; the
1294 memset() call initializes k->content_encoding to zero. */
1295 if(!k->ignorebody)
1296 result = Curl_client_write(conn, CLIENTWRITE_BODY, k->str,
1297 nread);
1298 #ifdef HAVE_LIBZ
1299 break;
1301 case DEFLATE:
1302 /* Assume CLIENTWRITE_BODY; headers are not encoded. */
1303 if(!k->ignorebody)
1304 result = Curl_unencode_deflate_write(conn, k, nread);
1305 break;
1307 case GZIP:
1308 /* Assume CLIENTWRITE_BODY; headers are not encoded. */
1309 if(!k->ignorebody)
1310 result = Curl_unencode_gzip_write(conn, k, nread);
1311 break;
1313 case COMPRESS:
1314 default:
1315 failf (data, "Unrecognized content encoding type. "
1316 "libcurl understands `identity', `deflate' and `gzip' "
1317 "content encodings.");
1318 result = CURLE_BAD_CONTENT_ENCODING;
1319 break;
1321 #endif
1323 k->badheader = HEADER_NORMAL; /* taken care of now */
1325 if(result)
1326 return result;
1329 } /* if (! header and data to read ) */
1331 if (is_empty_data) {
1332 /* if we received nothing, the server closed the connection and we
1333 are done */
1334 k->keepon &= ~KEEP_READ;
1337 } while(data_pending(conn));
1339 } /* if( read from socket ) */
1341 /* If we still have writing to do, we check if we have a writable
1342 socket. */
1343 if((k->keepon & KEEP_WRITE) && (select_res & CSELECT_OUT)) {
1344 /* write */
1346 int i, si;
1347 ssize_t bytes_written;
1348 bool writedone=TRUE;
1350 if ((k->bytecount == 0) && (k->writebytecount == 0))
1351 Curl_pgrsTime(data, TIMER_STARTTRANSFER);
1353 didwhat |= KEEP_WRITE;
1356 * We loop here to do the READ and SEND loop until we run out of
1357 * data to send or until we get EWOULDBLOCK back
1359 do {
1361 /* only read more data if there's no upload data already
1362 present in the upload buffer */
1363 if(0 == data->reqdata.upload_present) {
1364 /* init the "upload from here" pointer */
1365 data->reqdata.upload_fromhere = k->uploadbuf;
1367 if(!k->upload_done) {
1368 /* HTTP pollution, this should be written nicer to become more
1369 protocol agnostic. */
1370 int fillcount;
1372 if(k->wait100_after_headers &&
1373 (data->reqdata.proto.http->sending == HTTPSEND_BODY)) {
1374 /* If this call is to send body data, we must take some action:
1375 We have sent off the full HTTP 1.1 request, and we shall now
1376 go into the Expect: 100 state and await such a header */
1377 k->wait100_after_headers = FALSE; /* headers sent */
1378 k->write_after_100_header = TRUE; /* wait for the header */
1379 k->keepon &= ~KEEP_WRITE; /* disable writing */
1380 k->start100 = Curl_tvnow(); /* timeout count starts now */
1381 didwhat &= ~KEEP_WRITE; /* we didn't write anything actually */
1382 break;
1385 result = Curl_fillreadbuffer(conn, BUFSIZE, &fillcount);
1386 if(result)
1387 return result;
1389 nread = (ssize_t)fillcount;
1391 else
1392 nread = 0; /* we're done uploading/reading */
1394 /* the signed int typecase of nread of for systems that has
1395 unsigned size_t */
1396 if (nread<=0) {
1397 /* done */
1398 k->keepon &= ~KEEP_WRITE; /* we're done writing */
1399 writedone = TRUE;
1401 if(conn->bits.rewindaftersend) {
1402 result = Curl_readrewind(conn);
1403 if(result)
1404 return result;
1406 break;
1409 /* store number of bytes available for upload */
1410 data->reqdata.upload_present = nread;
1412 /* convert LF to CRLF if so asked */
1413 #ifdef CURL_DO_LINEEND_CONV
1414 /* always convert if we're FTPing in ASCII mode */
1415 if ((data->set.crlf) || (data->set.prefer_ascii)) {
1416 #else
1417 if (data->set.crlf) {
1418 #endif /* CURL_DO_LINEEND_CONV */
1419 if(data->state.scratch == NULL)
1420 data->state.scratch = malloc(2*BUFSIZE);
1421 if(data->state.scratch == NULL) {
1422 failf (data, "Failed to alloc scratch buffer!");
1423 return CURLE_OUT_OF_MEMORY;
1426 * ASCII/EBCDIC Note: This is presumably a text (not binary)
1427 * transfer so the data should already be in ASCII.
1428 * That means the hex values for ASCII CR (0x0d) & LF (0x0a)
1429 * must be used instead of the escape sequences \r & \n.
1431 for(i = 0, si = 0; i < nread; i++, si++) {
1432 if (data->reqdata.upload_fromhere[i] == 0x0a) {
1433 data->state.scratch[si++] = 0x0d;
1434 data->state.scratch[si] = 0x0a;
1435 if (!data->set.crlf) {
1436 /* we're here only because FTP is in ASCII mode...
1437 bump infilesize for the LF we just added */
1438 data->set.infilesize++;
1441 else
1442 data->state.scratch[si] = data->reqdata.upload_fromhere[i];
1444 if(si != nread) {
1445 /* only perform the special operation if we really did replace
1446 anything */
1447 nread = si;
1449 /* upload from the new (replaced) buffer instead */
1450 data->reqdata.upload_fromhere = data->state.scratch;
1452 /* set the new amount too */
1453 data->reqdata.upload_present = nread;
1457 else {
1458 /* We have a partial buffer left from a previous "round". Use
1459 that instead of reading more data */
1462 /* write to socket (send away data) */
1463 result = Curl_write(conn,
1464 conn->writesockfd, /* socket to send to */
1465 data->reqdata.upload_fromhere, /* buffer pointer */
1466 data->reqdata.upload_present, /* buffer size */
1467 &bytes_written); /* actually send away */
1468 if(result)
1469 return result;
1471 if(data->set.verbose)
1472 /* show the data before we change the pointer upload_fromhere */
1473 Curl_debug(data, CURLINFO_DATA_OUT, data->reqdata.upload_fromhere,
1474 (size_t)bytes_written, conn);
1476 if(data->reqdata.upload_present != bytes_written) {
1477 /* we only wrote a part of the buffer (if anything), deal with it! */
1479 /* store the amount of bytes left in the buffer to write */
1480 data->reqdata.upload_present -= bytes_written;
1482 /* advance the pointer where to find the buffer when the next send
1483 is to happen */
1484 data->reqdata.upload_fromhere += bytes_written;
1486 writedone = TRUE; /* we are done, stop the loop */
1488 else {
1489 /* we've uploaded that buffer now */
1490 data->reqdata.upload_fromhere = k->uploadbuf;
1491 data->reqdata.upload_present = 0; /* no more bytes left */
1493 if(k->upload_done) {
1494 /* switch off writing, we're done! */
1495 k->keepon &= ~KEEP_WRITE; /* we're done writing */
1496 writedone = TRUE;
1500 k->writebytecount += bytes_written;
1501 Curl_pgrsSetUploadCounter(data, k->writebytecount);
1503 } while(!writedone); /* loop until we're done writing! */
1507 } while(0); /* just to break out from! */
1509 k->now = Curl_tvnow();
1510 if(didwhat) {
1511 /* Update read/write counters */
1512 if(k->bytecountp)
1513 *k->bytecountp = k->bytecount; /* read count */
1514 if(k->writebytecountp)
1515 *k->writebytecountp = k->writebytecount; /* write count */
1517 else {
1518 /* no read no write, this is a timeout? */
1519 if (k->write_after_100_header) {
1520 /* This should allow some time for the header to arrive, but only a
1521 very short time as otherwise it'll be too much wasted times too
1522 often. */
1524 /* Quoting RFC2616, section "8.2.3 Use of the 100 (Continue) Status":
1526 Therefore, when a client sends this header field to an origin server
1527 (possibly via a proxy) from which it has never seen a 100 (Continue)
1528 status, the client SHOULD NOT wait for an indefinite period before
1529 sending the request body.
1533 long ms = Curl_tvdiff(k->now, k->start100);
1534 if(ms > CURL_TIMEOUT_EXPECT_100) {
1535 /* we've waited long enough, continue anyway */
1536 k->write_after_100_header = FALSE;
1537 k->keepon |= KEEP_WRITE;
1542 if(Curl_pgrsUpdate(conn))
1543 result = CURLE_ABORTED_BY_CALLBACK;
1544 else
1545 result = Curl_speedcheck(data, k->now);
1546 if (result)
1547 return result;
1549 if (data->set.timeout &&
1550 ((Curl_tvdiff(k->now, k->start)/1000) >= data->set.timeout)) {
1551 if (k->size != -1) {
1552 failf(data, "Operation timed out after %d seconds with %"
1553 FORMAT_OFF_T " out of %" FORMAT_OFF_T " bytes received",
1554 data->set.timeout, k->bytecount, k->size);
1555 } else {
1556 failf(data, "Operation timed out after %d seconds with %"
1557 FORMAT_OFF_T " bytes received",
1558 data->set.timeout, k->bytecount);
1560 return CURLE_OPERATION_TIMEOUTED;
1563 if(!k->keepon) {
1565 * The transfer has been performed. Just make some general checks before
1566 * returning.
1569 if(!(conn->bits.no_body) && (k->size != -1) &&
1570 (k->bytecount != k->size) &&
1571 #ifdef CURL_DO_LINEEND_CONV
1572 /* Most FTP servers don't adjust their file SIZE response for CRLFs,
1573 so we'll check to see if the discrepancy can be explained
1574 by the number of CRLFs we've changed to LFs.
1576 (k->bytecount != (k->size + data->state.crlf_conversions)) &&
1577 #endif /* CURL_DO_LINEEND_CONV */
1578 !data->reqdata.newurl) {
1579 failf(data, "transfer closed with %" FORMAT_OFF_T
1580 " bytes remaining to read",
1581 k->size - k->bytecount);
1582 return CURLE_PARTIAL_FILE;
1584 else if(!(conn->bits.no_body) &&
1585 conn->bits.chunk &&
1586 (data->reqdata.proto.http->chunk.state != CHUNK_STOP)) {
1588 * In chunked mode, return an error if the connection is closed prior to
1589 * the empty (terminiating) chunk is read.
1591 * The condition above used to check for
1592 * conn->proto.http->chunk.datasize != 0 which is true after reading
1593 * *any* chunk, not just the empty chunk.
1596 failf(data, "transfer closed with outstanding read data remaining");
1597 return CURLE_PARTIAL_FILE;
1599 if(Curl_pgrsUpdate(conn))
1600 return CURLE_ABORTED_BY_CALLBACK;
1603 /* Now update the "done" boolean we return */
1604 *done = (bool)(0 == (k->keepon&(KEEP_READ|KEEP_WRITE)));
1606 return CURLE_OK;
1611 * Curl_readwrite_init() inits the readwrite session. This is inited each time for a
1612 * transfer, sometimes multiple times on the same SessionHandle
1615 CURLcode Curl_readwrite_init(struct connectdata *conn)
1617 struct SessionHandle *data = conn->data;
1618 struct Curl_transfer_keeper *k = &data->reqdata.keep;
1620 /* NB: the content encoding software depends on this initialization of
1621 Curl_transfer_keeper.*/
1622 memset(k, 0, sizeof(struct Curl_transfer_keeper));
1624 k->start = Curl_tvnow(); /* start time */
1625 k->now = k->start; /* current time is now */
1626 k->header = TRUE; /* assume header */
1627 k->httpversion = -1; /* unknown at this point */
1629 k->size = data->reqdata.size;
1630 k->maxdownload = data->reqdata.maxdownload;
1631 k->bytecountp = data->reqdata.bytecountp;
1632 k->writebytecountp = data->reqdata.writebytecountp;
1634 k->bytecount = 0;
1636 k->buf = data->state.buffer;
1637 k->uploadbuf = data->state.uploadbuffer;
1638 k->maxfd = (conn->sockfd>conn->writesockfd?
1639 conn->sockfd:conn->writesockfd)+1;
1640 k->hbufp = data->state.headerbuff;
1641 k->ignorebody=FALSE;
1643 Curl_pgrsTime(data, TIMER_PRETRANSFER);
1644 Curl_speedinit(data);
1646 Curl_pgrsSetUploadCounter(data, 0);
1647 Curl_pgrsSetDownloadCounter(data, 0);
1649 if (!conn->bits.getheader) {
1650 k->header = FALSE;
1651 if(k->size > 0)
1652 Curl_pgrsSetDownloadSize(data, k->size);
1654 /* we want header and/or body, if neither then don't do this! */
1655 if(conn->bits.getheader || !conn->bits.no_body) {
1657 if(conn->sockfd != CURL_SOCKET_BAD) {
1658 k->keepon |= KEEP_READ;
1661 if(conn->writesockfd != CURL_SOCKET_BAD) {
1662 /* HTTP 1.1 magic:
1664 Even if we require a 100-return code before uploading data, we might
1665 need to write data before that since the REQUEST may not have been
1666 finished sent off just yet.
1668 Thus, we must check if the request has been sent before we set the
1669 state info where we wait for the 100-return code
1671 if (data->state.expect100header &&
1672 (data->reqdata.proto.http->sending == HTTPSEND_BODY)) {
1673 /* wait with write until we either got 100-continue or a timeout */
1674 k->write_after_100_header = TRUE;
1675 k->start100 = k->start;
1677 else {
1678 if(data->state.expect100header)
1679 /* when we've sent off the rest of the headers, we must await a
1680 100-continue */
1681 k->wait100_after_headers = TRUE;
1682 k->keepon |= KEEP_WRITE;
1687 return CURLE_OK;
1691 * Curl_single_getsock() gets called by the multi interface code when the app
1692 * has requested to get the sockets for the current connection. This function
1693 * will then be called once for every connection that the multi interface
1694 * keeps track of. This function will only be called for connections that are
1695 * in the proper state to have this information available.
1697 int Curl_single_getsock(struct connectdata *conn,
1698 curl_socket_t *sock, /* points to numsocks number
1699 of sockets */
1700 int numsocks)
1702 struct SessionHandle *data = conn->data;
1703 int bitmap = GETSOCK_BLANK;
1704 int index = 0;
1706 if(numsocks < 2)
1707 /* simple check but we might need two slots */
1708 return GETSOCK_BLANK;
1710 if(data->reqdata.keep.keepon & KEEP_READ) {
1711 bitmap |= GETSOCK_READSOCK(index);
1712 sock[index] = conn->sockfd;
1715 if(data->reqdata.keep.keepon & KEEP_WRITE) {
1717 if((conn->sockfd != conn->writesockfd) ||
1718 !(data->reqdata.keep.keepon & KEEP_READ)) {
1719 /* only if they are not the same socket or we didn't have a readable
1720 one, we increase index */
1721 if(data->reqdata.keep.keepon & KEEP_READ)
1722 index++; /* increase index if we need two entries */
1723 sock[index] = conn->writesockfd;
1726 bitmap |= GETSOCK_WRITESOCK(index);
1729 return bitmap;
1734 * Transfer()
1736 * This function is what performs the actual transfer. It is capable of
1737 * doing both ways simultaneously.
1738 * The transfer must already have been setup by a call to Curl_setup_transfer().
1740 * Note that headers are created in a preallocated buffer of a default size.
1741 * That buffer can be enlarged on demand, but it is never shrunken again.
1743 * Parts of this function was once written by the friendly Mark Butler
1744 * <butlerm@xmission.com>.
1747 static CURLcode
1748 Transfer(struct connectdata *conn)
1750 CURLcode result;
1751 struct SessionHandle *data = conn->data;
1752 struct Curl_transfer_keeper *k = &data->reqdata.keep;
1753 bool done=FALSE;
1755 if(!(conn->protocol & PROT_FILE))
1756 /* Only do this if we are not transferring FILE:, since the file: treatment
1757 is different*/
1758 Curl_readwrite_init(conn);
1760 if((conn->sockfd == CURL_SOCKET_BAD) &&
1761 (conn->writesockfd == CURL_SOCKET_BAD))
1762 /* nothing to read, nothing to write, we're already OK! */
1763 return CURLE_OK;
1765 /* we want header and/or body, if neither then don't do this! */
1766 if(!conn->bits.getheader && conn->bits.no_body)
1767 return CURLE_OK;
1769 while (!done) {
1770 curl_socket_t fd_read;
1771 curl_socket_t fd_write;
1773 /* limit-rate logic: if speed exceeds threshold, then do not include fd in
1774 select set. The current speed is recalculated in each Curl_readwrite()
1775 call */
1776 if ((k->keepon & KEEP_WRITE) &&
1777 (!data->set.max_send_speed ||
1778 (data->progress.ulspeed < data->set.max_send_speed) )) {
1779 fd_write = conn->writesockfd;
1780 k->keepon &= ~KEEP_WRITE_HOLD;
1782 else {
1783 fd_write = CURL_SOCKET_BAD;
1784 if(k->keepon & KEEP_WRITE)
1785 k->keepon |= KEEP_WRITE_HOLD; /* hold it */
1788 if ((k->keepon & KEEP_READ) &&
1789 (!data->set.max_recv_speed ||
1790 (data->progress.dlspeed < data->set.max_recv_speed)) ) {
1791 fd_read = conn->sockfd;
1792 k->keepon &= ~KEEP_READ_HOLD;
1794 else {
1795 fd_read = CURL_SOCKET_BAD;
1796 if(k->keepon & KEEP_READ)
1797 k->keepon |= KEEP_READ_HOLD; /* hold it */
1800 /* The *_HOLD logic is necessary since even though there might be no
1801 traffic during the select interval, we still call Curl_readwrite() for
1802 the timeout case and if we limit transfer speed we must make sure that
1803 this function doesn't transfer anything while in HOLD status. */
1805 switch (Curl_select(fd_read, fd_write, 1000)) {
1806 case -1: /* select() error, stop reading */
1807 #ifdef EINTR
1808 /* The EINTR is not serious, and it seems you might get this more
1809 ofen when using the lib in a multi-threaded environment! */
1810 if(errno == EINTR)
1812 else
1813 #endif
1814 done = TRUE; /* no more read or write */
1815 continue;
1816 case 0: /* timeout */
1817 default: /* readable descriptors */
1819 result = Curl_readwrite(conn, &done);
1820 break;
1822 if(result)
1823 return result;
1825 /* "done" signals to us if the transfer(s) are ready */
1828 return CURLE_OK;
1832 * Curl_pretransfer() is called immediately before a transfer starts.
1834 CURLcode Curl_pretransfer(struct SessionHandle *data)
1836 CURLcode res;
1837 if(!data->change.url) {
1838 /* we can't do anything wihout URL */
1839 failf(data, "No URL set!\n");
1840 return CURLE_URL_MALFORMAT;
1843 /* Init the SSL session ID cache here. We do it here since we want to do it
1844 after the *_setopt() calls (that could change the size of the cache) but
1845 before any transfer takes place. */
1846 res = Curl_ssl_initsessions(data, data->set.ssl.numsessions);
1847 if(res)
1848 return res;
1850 data->set.followlocation=0; /* reset the location-follow counter */
1851 data->state.this_is_a_follow = FALSE; /* reset this */
1852 data->state.errorbuf = FALSE; /* no error has occurred */
1854 data->state.authproblem = FALSE;
1855 data->state.authhost.want = data->set.httpauth;
1856 data->state.authproxy.want = data->set.proxyauth;
1858 /* If there is a list of cookie files to read, do it now! */
1859 if(data->change.cookielist) {
1860 Curl_cookie_loadfiles(data);
1863 /* Allow data->set.use_port to set which port to use. This needs to be
1864 * disabled for example when we follow Location: headers to URLs using
1865 * different ports! */
1866 data->state.allow_port = TRUE;
1868 #if defined(HAVE_SIGNAL) && defined(SIGPIPE) && !defined(HAVE_MSG_NOSIGNAL)
1869 /*************************************************************
1870 * Tell signal handler to ignore SIGPIPE
1871 *************************************************************/
1872 if(!data->set.no_signal)
1873 data->state.prev_signal = signal(SIGPIPE, SIG_IGN);
1874 #endif
1876 Curl_initinfo(data); /* reset session-specific information "variables" */
1877 Curl_pgrsStartNow(data);
1879 return CURLE_OK;
1883 * Curl_posttransfer() is called immediately after a transfer ends
1885 CURLcode Curl_posttransfer(struct SessionHandle *data)
1887 #if defined(HAVE_SIGNAL) && defined(SIGPIPE) && !defined(HAVE_MSG_NOSIGNAL)
1888 /* restore the signal handler for SIGPIPE before we get back */
1889 if(!data->set.no_signal)
1890 signal(SIGPIPE, data->state.prev_signal);
1891 #else
1892 (void)data; /* unused parameter */
1893 #endif
1895 if(!(data->progress.flags & PGRS_HIDE) &&
1896 !data->progress.callback)
1897 /* only output if we don't use a progress callback and we're not hidden */
1898 fprintf(data->set.err, "\n");
1900 return CURLE_OK;
1904 * strlen_url() returns the length of the given URL if the spaces within the
1905 * URL were properly URL encoded.
1907 static int strlen_url(char *url)
1909 char *ptr;
1910 int newlen=0;
1911 bool left=TRUE; /* left side of the ? */
1913 for(ptr=url; *ptr; ptr++) {
1914 switch(*ptr) {
1915 case '?':
1916 left=FALSE;
1917 default:
1918 newlen++;
1919 break;
1920 case ' ':
1921 if(left)
1922 newlen+=3;
1923 else
1924 newlen++;
1925 break;
1928 return newlen;
1931 /* strcpy_url() copies a url to a output buffer and URL-encodes the spaces in
1932 * the source URL accordingly.
1934 static void strcpy_url(char *output, char *url)
1936 /* we must add this with whitespace-replacing */
1937 bool left=TRUE;
1938 char *iptr;
1939 char *optr = output;
1940 for(iptr = url; /* read from here */
1941 *iptr; /* until zero byte */
1942 iptr++) {
1943 switch(*iptr) {
1944 case '?':
1945 left=FALSE;
1946 default:
1947 *optr++=*iptr;
1948 break;
1949 case ' ':
1950 if(left) {
1951 *optr++='%'; /* add a '%' */
1952 *optr++='2'; /* add a '2' */
1953 *optr++='0'; /* add a '0' */
1955 else
1956 *optr++='+'; /* add a '+' here */
1957 break;
1960 *optr=0; /* zero terminate output buffer */
1965 * Curl_follow() handles the URL redirect magic. Pass in the 'newurl' string
1966 * as given by the remote server and set up the new URL to request.
1968 CURLcode Curl_follow(struct SessionHandle *data,
1969 char *newurl, /* this 'newurl' is the Location: string,
1970 and it must be malloc()ed before passed
1971 here */
1972 bool retry) /* set TRUE if this is a request retry as
1973 opposed to a real redirect following */
1975 /* Location: redirect */
1976 char prot[16]; /* URL protocol string storage */
1977 char letter; /* used for a silly sscanf */
1978 size_t newlen;
1979 char *newest;
1981 if(!retry) {
1982 if ((data->set.maxredirs != -1) &&
1983 (data->set.followlocation >= data->set.maxredirs)) {
1984 failf(data,"Maximum (%d) redirects followed", data->set.maxredirs);
1985 return CURLE_TOO_MANY_REDIRECTS;
1988 /* mark the next request as a followed location: */
1989 data->state.this_is_a_follow = TRUE;
1991 data->set.followlocation++; /* count location-followers */
1994 if(data->set.http_auto_referer) {
1995 /* We are asked to automatically set the previous URL as the
1996 referer when we get the next URL. We pick the ->url field,
1997 which may or may not be 100% correct */
1999 if(data->change.referer_alloc)
2000 /* If we already have an allocated referer, free this first */
2001 free(data->change.referer);
2003 data->change.referer = strdup(data->change.url);
2004 data->change.referer_alloc = TRUE; /* yes, free this later */
2007 if(2 != sscanf(newurl, "%15[^?&/:]://%c", prot, &letter)) {
2008 /***
2009 *DANG* this is an RFC 2068 violation. The URL is supposed
2010 to be absolute and this doesn't seem to be that!
2012 Instead, we have to TRY to append this new path to the old URL
2013 to the right of the host part. Oh crap, this is doomed to cause
2014 problems in the future...
2016 char *protsep;
2017 char *pathsep;
2019 char *useurl = newurl;
2020 size_t urllen;
2022 /* we must make our own copy of the URL to play with, as it may
2023 point to read-only data */
2024 char *url_clone=strdup(data->change.url);
2026 if(!url_clone)
2027 return CURLE_OUT_OF_MEMORY; /* skip out of this NOW */
2029 /* protsep points to the start of the host name */
2030 protsep=strstr(url_clone, "//");
2031 if(!protsep)
2032 protsep=url_clone;
2033 else
2034 protsep+=2; /* pass the slashes */
2036 if('/' != newurl[0]) {
2037 int level=0;
2039 /* First we need to find out if there's a ?-letter in the URL,
2040 and cut it and the right-side of that off */
2041 pathsep = strchr(protsep, '?');
2042 if(pathsep)
2043 *pathsep=0;
2045 /* we have a relative path to append to the last slash if
2046 there's one available */
2047 pathsep = strrchr(protsep, '/');
2048 if(pathsep)
2049 *pathsep=0;
2051 /* Check if there's any slash after the host name, and if so,
2052 remember that position instead */
2053 pathsep = strchr(protsep, '/');
2054 if(pathsep)
2055 protsep = pathsep+1;
2056 else
2057 protsep = NULL;
2059 /* now deal with one "./" or any amount of "../" in the newurl
2060 and act accordingly */
2062 if((useurl[0] == '.') && (useurl[1] == '/'))
2063 useurl+=2; /* just skip the "./" */
2065 while((useurl[0] == '.') &&
2066 (useurl[1] == '.') &&
2067 (useurl[2] == '/')) {
2068 level++;
2069 useurl+=3; /* pass the "../" */
2072 if(protsep) {
2073 while(level--) {
2074 /* cut off one more level from the right of the original URL */
2075 pathsep = strrchr(protsep, '/');
2076 if(pathsep)
2077 *pathsep=0;
2078 else {
2079 *protsep=0;
2080 break;
2085 else {
2086 /* We got a new absolute path for this server, cut off from the
2087 first slash */
2088 pathsep = strchr(protsep, '/');
2089 if(pathsep) {
2090 /* When people use badly formatted URLs, such as
2091 "http://www.url.com?dir=/home/daniel" we must not use the first
2092 slash, if there's a ?-letter before it! */
2093 char *sep = strchr(protsep, '?');
2094 if(sep && (sep < pathsep))
2095 pathsep = sep;
2096 *pathsep=0;
2098 else {
2099 /* There was no slash. Now, since we might be operating on a badly
2100 formatted URL, such as "http://www.url.com?id=2380" which doesn't
2101 use a slash separator as it is supposed to, we need to check for a
2102 ?-letter as well! */
2103 pathsep = strchr(protsep, '?');
2104 if(pathsep)
2105 *pathsep=0;
2109 /* If the new part contains a space, this is a mighty stupid redirect
2110 but we still make an effort to do "right". To the left of a '?'
2111 letter we replace each space with %20 while it is replaced with '+'
2112 on the right side of the '?' letter.
2114 newlen = strlen_url(useurl);
2116 urllen = strlen(url_clone);
2118 newest=(char *)malloc( urllen + 1 + /* possible slash */
2119 newlen + 1 /* zero byte */);
2121 if(!newest) {
2122 free(url_clone); /* don't leak this */
2123 return CURLE_OUT_OF_MEMORY; /* go out from this */
2126 /* copy over the root url part */
2127 memcpy(newest, url_clone, urllen);
2129 /* check if we need to append a slash */
2130 if(('/' == useurl[0]) || (protsep && !*protsep))
2132 else
2133 newest[urllen++]='/';
2135 /* then append the new piece on the right side */
2136 strcpy_url(&newest[urllen], useurl);
2138 free(newurl); /* newurl is the allocated pointer */
2139 free(url_clone);
2140 newurl = newest;
2142 else {
2143 /* This is an absolute URL, don't allow the custom port number */
2144 data->state.allow_port = FALSE;
2146 if(strchr(newurl, ' ')) {
2147 /* This new URL contains at least one space, this is a mighty stupid
2148 redirect but we still make an effort to do "right". */
2149 newlen = strlen_url(newurl);
2151 newest = malloc(newlen+1); /* get memory for this */
2152 if(newest) {
2153 strcpy_url(newest, newurl); /* create a space-free URL */
2155 free(newurl); /* that was no good */
2156 newurl = newest; /* use this instead now */
2162 if(data->change.url_alloc)
2163 free(data->change.url);
2164 else
2165 data->change.url_alloc = TRUE; /* the URL is allocated */
2167 data->change.url = newurl;
2168 newurl = NULL; /* don't free! */
2170 infof(data, "Issue another request to this URL: '%s'\n", data->change.url);
2173 * We get here when the HTTP code is 300-399 (and 401). We need to perform
2174 * differently based on exactly what return code there was.
2176 * News from 7.10.6: we can also get here on a 401 or 407, in case we act on
2177 * a HTTP (proxy-) authentication scheme other than Basic.
2179 switch(data->info.httpcode) {
2180 /* 401 - Act on a www-authentication, we keep on moving and do the
2181 Authorization: XXXX header in the HTTP request code snippet */
2182 /* 407 - Act on a proxy-authentication, we keep on moving and do the
2183 Proxy-Authorization: XXXX header in the HTTP request code snippet */
2184 /* 300 - Multiple Choices */
2185 /* 306 - Not used */
2186 /* 307 - Temporary Redirect */
2187 default: /* for all above (and the unknown ones) */
2188 /* Some codes are explicitly mentioned since I've checked RFC2616 and they
2189 * seem to be OK to POST to.
2191 break;
2192 case 301: /* Moved Permanently */
2193 /* (quote from RFC2616, section 10.3.2):
2195 * Note: When automatically redirecting a POST request after receiving a
2196 * 301 status code, some existing HTTP/1.0 user agents will erroneously
2197 * change it into a GET request.
2199 * ----
2201 * Warning: Because most of importants user agents do this obvious RFC2616
2202 * violation, many webservers expect this misbehavior. So these servers
2203 * often answers to a POST request with an error page. To be sure that
2204 * libcurl gets the page that most user agents would get, libcurl has to
2205 * force GET:
2207 if( data->set.httpreq == HTTPREQ_POST
2208 || data->set.httpreq == HTTPREQ_POST_FORM) {
2209 infof(data,
2210 "Violate RFC 2616/10.3.2 and switch from POST to GET\n");
2211 data->set.httpreq = HTTPREQ_GET;
2213 break;
2214 case 302: /* Found */
2215 /* (From 10.3.3)
2217 Note: RFC 1945 and RFC 2068 specify that the client is not allowed
2218 to change the method on the redirected request. However, most
2219 existing user agent implementations treat 302 as if it were a 303
2220 response, performing a GET on the Location field-value regardless
2221 of the original request method. The status codes 303 and 307 have
2222 been added for servers that wish to make unambiguously clear which
2223 kind of reaction is expected of the client.
2225 (From 10.3.4)
2227 Note: Many pre-HTTP/1.1 user agents do not understand the 303
2228 status. When interoperability with such clients is a concern, the
2229 302 status code may be used instead, since most user agents react
2230 to a 302 response as described here for 303.
2232 case 303: /* See Other */
2233 /* Disable both types of POSTs, since doing a second POST when
2234 * following isn't what anyone would want! */
2235 if(data->set.httpreq != HTTPREQ_GET) {
2236 data->set.httpreq = HTTPREQ_GET; /* enforce GET request */
2237 infof(data, "Disables POST, goes with %s\n",
2238 data->set.opt_no_body?"HEAD":"GET");
2240 break;
2241 case 304: /* Not Modified */
2242 /* 304 means we did a conditional request and it was "Not modified".
2243 * We shouldn't get any Location: header in this response!
2245 break;
2246 case 305: /* Use Proxy */
2247 /* (quote from RFC2616, section 10.3.6):
2248 * "The requested resource MUST be accessed through the proxy given
2249 * by the Location field. The Location field gives the URI of the
2250 * proxy. The recipient is expected to repeat this single request
2251 * via the proxy. 305 responses MUST only be generated by origin
2252 * servers."
2254 break;
2256 Curl_pgrsTime(data, TIMER_REDIRECT);
2257 Curl_pgrsResetTimes(data);
2259 return CURLE_OK;
2262 static CURLcode
2263 Curl_connect_host(struct SessionHandle *data,
2264 struct connectdata **conn)
2266 CURLcode res = CURLE_OK;
2267 int urlchanged = FALSE;
2269 do {
2270 bool async;
2271 bool protocol_done=TRUE; /* will be TRUE always since this is only used
2272 within the easy interface */
2273 Curl_pgrsTime(data, TIMER_STARTSINGLE);
2274 data->change.url_changed = FALSE;
2275 res = Curl_connect(data, conn, &async, &protocol_done);
2277 if((CURLE_OK == res) && async) {
2278 /* Now, if async is TRUE here, we need to wait for the name
2279 to resolve */
2280 res = Curl_wait_for_resolv(*conn, NULL);
2281 if(CURLE_OK == res)
2282 /* Resolved, continue with the connection */
2283 res = Curl_async_resolved(*conn, &protocol_done);
2284 else
2285 /* if we can't resolve, we kill this "connection" now */
2286 (void)Curl_disconnect(*conn);
2288 if(res)
2289 break;
2291 /* If a callback (or something) has altered the URL we should use within
2292 the Curl_connect(), we detect it here and act as if we are redirected
2293 to the new URL */
2294 urlchanged = data->change.url_changed;
2295 if ((CURLE_OK == res) && urlchanged) {
2296 res = Curl_done(conn, res, FALSE);
2297 if(CURLE_OK == res) {
2298 char *gotourl = strdup(data->change.url);
2299 res = Curl_follow(data, gotourl, FALSE);
2300 if(res)
2301 free(gotourl);
2304 } while (urlchanged && res == CURLE_OK);
2306 return res;
2309 /* Returns TRUE and sets '*url' if a request retry is wanted */
2310 bool Curl_retry_request(struct connectdata *conn,
2311 char **url)
2313 bool retry = FALSE;
2314 struct SessionHandle *data = conn->data;
2316 if((data->reqdata.keep.bytecount+conn->headerbytecount == 0) &&
2317 conn->bits.reuse &&
2318 !conn->bits.no_body) {
2319 /* We got no data, we attempted to re-use a connection and yet we want a
2320 "body". This might happen if the connection was left alive when we were
2321 done using it before, but that was closed when we wanted to read from
2322 it again. Bad luck. Retry the same request on a fresh connect! */
2323 infof(conn->data, "Connection died, retrying a fresh connect\n");
2324 *url = strdup(conn->data->change.url);
2326 conn->bits.close = TRUE; /* close this connection */
2327 conn->bits.retry = TRUE; /* mark this as a connection we're about
2328 to retry. Marking it this way should
2329 prevent i.e HTTP transfers to return
2330 error just because nothing has been
2331 transfered! */
2332 retry = TRUE;
2335 return retry;
2339 * Curl_perform() is the internal high-level function that gets called by the
2340 * external curl_easy_perform() function. It inits, performs and cleans up a
2341 * single file transfer.
2343 CURLcode Curl_perform(struct SessionHandle *data)
2345 CURLcode res;
2346 CURLcode res2;
2347 struct connectdata *conn=NULL;
2348 char *newurl = NULL; /* possibly a new URL to follow to! */
2349 bool retry = FALSE;
2351 data->state.used_interface = Curl_if_easy;
2353 res = Curl_pretransfer(data);
2354 if(res)
2355 return res;
2358 * It is important that there is NO 'return' from this function at any other
2359 * place than falling down to the end of the function! This is because we
2360 * have cleanup stuff that must be done before we get back, and that is only
2361 * performed after this do-while loop.
2364 do {
2365 res = Curl_connect_host(data, &conn); /* primary connection */
2367 if(res == CURLE_OK) {
2368 bool do_done;
2369 if(data->set.connect_only) {
2370 /* keep connection open for application to use the socket */
2371 conn->bits.close = FALSE;
2372 res = Curl_done(&conn, CURLE_OK, FALSE);
2373 break;
2375 res = Curl_do(&conn, &do_done);
2377 if(res == CURLE_OK) {
2378 res = Transfer(conn); /* now fetch that URL please */
2379 if(res == CURLE_OK) {
2380 retry = Curl_retry_request(conn, &newurl);
2382 if(!retry)
2384 * We must duplicate the new URL here as the connection data may
2385 * be free()ed in the Curl_done() function.
2387 newurl = data->reqdata.newurl?strdup(data->reqdata.newurl):NULL;
2389 else {
2390 /* The transfer phase returned error, we mark the connection to get
2391 * closed to prevent being re-used. This is becasue we can't
2392 * possibly know if the connection is in a good shape or not now. */
2393 conn->bits.close = TRUE;
2395 if(CURL_SOCKET_BAD != conn->sock[SECONDARYSOCKET]) {
2396 /* if we failed anywhere, we must clean up the secondary socket if
2397 it was used */
2398 sclose(conn->sock[SECONDARYSOCKET]);
2399 conn->sock[SECONDARYSOCKET] = CURL_SOCKET_BAD;
2403 /* Always run Curl_done(), even if some of the previous calls
2404 failed, but return the previous (original) error code */
2405 res2 = Curl_done(&conn, res, FALSE);
2407 if(CURLE_OK == res)
2408 res = res2;
2410 else
2411 /* Curl_do() failed, clean up left-overs in the done-call */
2412 res2 = Curl_done(&conn, res, FALSE);
2415 * Important: 'conn' cannot be used here, since it may have been closed
2416 * in 'Curl_done' or other functions.
2419 if((res == CURLE_OK) && newurl) {
2420 res = Curl_follow(data, newurl, retry);
2421 if(CURLE_OK == res) {
2422 newurl = NULL;
2423 continue;
2427 break; /* it only reaches here when this shouldn't loop */
2429 } while(1); /* loop if Location: */
2431 if(newurl)
2432 free(newurl);
2434 if(res && !data->state.errorbuf) {
2436 * As an extra precaution: if no error string has been set and there was
2437 * an error, use the strerror() string or if things are so bad that not
2438 * even that is good, set a bad string that mentions the error code.
2440 const char *str = curl_easy_strerror(res);
2441 if(!str)
2442 failf(data, "unspecified error %d", (int)res);
2443 else
2444 failf(data, "%s", str);
2447 /* run post-transfer uncondionally, but don't clobber the return code if
2448 we already have an error code recorder */
2449 res2 = Curl_posttransfer(data);
2450 if(!res && res2)
2451 res = res2;
2453 return res;
2457 * Curl_setup_transfer() is called to setup some basic properties for the
2458 * upcoming transfer.
2460 CURLcode
2461 Curl_setup_transfer(
2462 struct connectdata *c_conn, /* connection data */
2463 int sockindex, /* socket index to read from or -1 */
2464 curl_off_t size, /* -1 if unknown at this point */
2465 bool getheader, /* TRUE if header parsing is wanted */
2466 curl_off_t *bytecountp, /* return number of bytes read or NULL */
2467 int writesockindex, /* socket index to write to, it may very
2468 well be the same we read from. -1
2469 disables */
2470 curl_off_t *writecountp /* return number of bytes written or
2471 NULL */
2474 struct connectdata *conn = (struct connectdata *)c_conn;
2475 struct SessionHandle *data = conn->data;
2477 if(!conn)
2478 return CURLE_BAD_FUNCTION_ARGUMENT;
2480 curlassert((sockindex <= 1) && (sockindex >= -1));
2482 /* now copy all input parameters */
2483 conn->sockfd = sockindex == -1 ?
2484 CURL_SOCKET_BAD : conn->sock[sockindex];
2485 conn->writesockfd = writesockindex == -1 ?
2486 CURL_SOCKET_BAD:conn->sock[writesockindex];
2487 conn->bits.getheader = getheader;
2489 data->reqdata.size = size;
2490 data->reqdata.bytecountp = bytecountp;
2491 data->reqdata.writebytecountp = writecountp;
2493 return CURLE_OK;