4 * Copyright (c) 2006-2011 Pacman Development Team <pacman-dev@archlinux.org>
5 * Copyright (c) 2002-2006 by Judd Vinet <jvinet@zeroflux.org>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
29 #include <sys/types.h>
34 #include <curl/curl.h>
39 #include "alpm_list.h"
46 static double prevprogress
; /* last download amount */
49 static const char *get_filename(const char *url
)
51 char *filename
= strrchr(url
, '/');
52 if(filename
!= NULL
) {
59 static char *get_fullpath(const char *path
, const char *filename
,
63 /* len = localpath len + filename len + suffix len + null */
64 size_t len
= strlen(path
) + strlen(filename
) + strlen(suffix
) + 1;
65 CALLOC(filepath
, len
, sizeof(char), RET_ERR(PM_ERR_MEMORY
, NULL
));
66 snprintf(filepath
, len
, "%s%s%s", path
, filename
, suffix
);
71 #define check_stop() if(dload_interrupted) { ret = -1; goto cleanup; }
72 enum sighandlers
{ OLD
= 0, NEW
= 1 };
74 static int dload_interrupted
;
75 static void inthandler(int signum
)
78 dload_interrupted
= 1;
81 static int curl_progress(void *file
, double dltotal
, double dlnow
,
82 double ultotal
, double ulnow
)
84 struct fileinfo
*dlfile
= (struct fileinfo
*)file
;
85 double current_size
, total_size
;
87 /* unused parameters */
91 /* SIGINT sent, abort by alerting curl */
92 if(dload_interrupted
) {
96 /* none of what follows matters if the front end has no callback */
97 if(handle
->dlcb
== NULL
) {
101 current_size
= dlfile
->initial_size
+ dlnow
;
102 total_size
= dlfile
->initial_size
+ dltotal
;
104 if(DOUBLE_EQ(dltotal
, 0) || DOUBLE_EQ(prevprogress
, total_size
)) {
108 /* initialize the progress bar here to avoid displaying it when
109 * a repo is up to date and nothing gets downloaded */
110 if(DOUBLE_EQ(prevprogress
, 0)) {
111 handle
->dlcb(dlfile
->filename
, 0, (long)dltotal
);
114 handle
->dlcb(dlfile
->filename
, (long)current_size
, (long)total_size
);
116 prevprogress
= current_size
;
121 static int curl_gethost(const char *url
, char *buffer
)
126 if(strncmp(url
, "file://", 7) == 0) {
127 strcpy(buffer
, _("disk"));
129 p
= strstr(url
, "//");
133 p
+= 2; /* jump over the found // */
134 hostlen
= strcspn(p
, "/");
136 /* buffer overflow imminent */
137 _alpm_log(PM_LOG_ERROR
, _("buffer overflow detected"));
140 snprintf(buffer
, hostlen
+ 1, "%s", p
);
146 static int utimes_long(const char *path
, long time
)
149 struct timeval tv
[2];
150 memset(&tv
, 0, sizeof(tv
));
151 tv
[0].tv_sec
= tv
[1].tv_sec
= time
;
152 return utimes(path
, tv
);
158 static int curl_download_internal(const char *url
, const char *localpath
,
159 int force
, int allow_resume
)
163 const char *useragent
;
164 const char *open_mode
= "wb";
165 char *destfile
, *tempfile
;
166 /* RFC1123 states applications should support this length */
168 char error_buffer
[CURL_ERROR_SIZE
];
170 long timecond
, remote_time
;
171 double remote_size
, bytes_dl
;
172 struct sigaction sig_pipe
[2], sig_int
[2];
173 struct fileinfo dlfile
;
175 dlfile
.initial_size
= 0.0;
176 dlfile
.filename
= get_filename(url
);
177 if(!dlfile
.filename
|| curl_gethost(url
, hostname
) != 0) {
178 _alpm_log(PM_LOG_ERROR
, _("url '%s' is invalid\n"), url
);
179 RET_ERR(PM_ERR_SERVER_BAD_URL
, -1);
182 destfile
= get_fullpath(localpath
, dlfile
.filename
, "");
183 tempfile
= get_fullpath(localpath
, dlfile
.filename
, ".part");
184 if(!destfile
|| !tempfile
) {
188 /* the curl_easy handle is initialized with the alpm handle, so we only need
189 * to reset the curl handle set parameters for each time it's used. */
190 curl_easy_reset(handle
->curl
);
191 curl_easy_setopt(handle
->curl
, CURLOPT_URL
, url
);
192 curl_easy_setopt(handle
->curl
, CURLOPT_FAILONERROR
, 1L);
193 curl_easy_setopt(handle
->curl
, CURLOPT_ERRORBUFFER
, error_buffer
);
194 curl_easy_setopt(handle
->curl
, CURLOPT_CONNECTTIMEOUT
, 10L);
195 curl_easy_setopt(handle
->curl
, CURLOPT_FILETIME
, 1L);
196 curl_easy_setopt(handle
->curl
, CURLOPT_NOPROGRESS
, 0L);
197 curl_easy_setopt(handle
->curl
, CURLOPT_FOLLOWLOCATION
, 1L);
198 curl_easy_setopt(handle
->curl
, CURLOPT_PROGRESSFUNCTION
, curl_progress
);
199 curl_easy_setopt(handle
->curl
, CURLOPT_PROGRESSDATA
, (void *)&dlfile
);
201 useragent
= getenv("HTTP_USER_AGENT");
202 if(useragent
!= NULL
) {
203 curl_easy_setopt(handle
->curl
, CURLOPT_USERAGENT
, useragent
);
206 /* TODO: no assuming here. the calling function should tell us what's kosher */
207 if(!force
&& stat(destfile
, &st
) == 0) {
208 /* assume its a sync, so we're starting from scratch. but, only download
209 * our local is out of date. */
210 curl_easy_setopt(handle
->curl
, CURLOPT_TIMECONDITION
, CURL_TIMECOND_IFMODSINCE
);
211 curl_easy_setopt(handle
->curl
, CURLOPT_TIMEVALUE
, (long)st
.st_mtime
);
212 } else if(stat(tempfile
, &st
) == 0 && allow_resume
) {
213 /* a previous partial download exists, resume from end of file. */
215 curl_easy_setopt(handle
->curl
, CURLOPT_RESUME_FROM
, (long)st
.st_size
);
216 _alpm_log(PM_LOG_DEBUG
, "tempfile found, attempting continuation");
217 dlfile
.initial_size
= (double)st
.st_size
;
220 localf
= fopen(tempfile
, open_mode
);
225 curl_easy_setopt(handle
->curl
, CURLOPT_WRITEDATA
, localf
);
227 /* ignore any SIGPIPE signals- these may occur if our FTP socket dies or
228 * something along those lines. Store the old signal handler first. */
229 sig_pipe
[NEW
].sa_handler
= SIG_IGN
;
230 sigemptyset(&sig_pipe
[NEW
].sa_mask
);
231 sig_pipe
[NEW
].sa_flags
= 0;
232 sigaction(SIGPIPE
, NULL
, &sig_pipe
[OLD
]);
233 sigaction(SIGPIPE
, &sig_pipe
[NEW
], NULL
);
235 dload_interrupted
= 0;
236 sig_int
[NEW
].sa_handler
= &inthandler
;
237 sigemptyset(&sig_int
[NEW
].sa_mask
);
238 sig_int
[NEW
].sa_flags
= 0;
239 sigaction(SIGINT
, NULL
, &sig_int
[OLD
]);
240 sigaction(SIGINT
, &sig_int
[NEW
], NULL
);
242 /* Progress 0 - initialize */
245 /* perform transfer */
246 handle
->curlerr
= curl_easy_perform(handle
->curl
);
248 /* was it a success? */
249 if(handle
->curlerr
== CURLE_ABORTED_BY_CALLBACK
) {
251 } else if(handle
->curlerr
!= CURLE_OK
) {
252 pm_errno
= PM_ERR_LIBCURL
;
253 _alpm_log(PM_LOG_ERROR
, _("failed retrieving file '%s' from %s : %s\n"),
254 dlfile
.filename
, hostname
, error_buffer
);
259 /* retrieve info about the state of the transfer */
260 curl_easy_getinfo(handle
->curl
, CURLINFO_FILETIME
, &remote_time
);
261 curl_easy_getinfo(handle
->curl
, CURLINFO_CONTENT_LENGTH_DOWNLOAD
, &remote_size
);
262 curl_easy_getinfo(handle
->curl
, CURLINFO_SIZE_DOWNLOAD
, &bytes_dl
);
263 curl_easy_getinfo(handle
->curl
, CURLINFO_CONDITION_UNMET
, &timecond
);
265 /* time condition was met and we didn't download anything. we need to
266 * clean up the 0 byte .part file that's left behind. */
267 if(timecond
== 1 && DOUBLE_EQ(bytes_dl
, 0)) {
273 /* remote_size isn't necessarily the full size of the file, just what the
274 * server reported as remaining to download. compare it to what curl reported
275 * as actually being transferred during curl_easy_perform() */
276 if(!DOUBLE_EQ(remote_size
, -1) && !DOUBLE_EQ(bytes_dl
, -1) &&
277 !DOUBLE_EQ(bytes_dl
, remote_size
)) {
278 pm_errno
= PM_ERR_RETRIEVE
;
279 _alpm_log(PM_LOG_ERROR
, _("%s appears to be truncated: %jd/%jd bytes\n"),
280 dlfile
.filename
, (intmax_t)bytes_dl
, (intmax_t)remote_size
);
289 utimes_long(tempfile
, remote_time
);
292 /* TODO: A signature download will need to return success here as well before
293 * we're willing to rotate the new file into place. */
295 rename(tempfile
, destfile
);
301 /* restore the old signal handlers */
302 sigaction(SIGINT
, &sig_int
[OLD
], NULL
);
303 sigaction(SIGPIPE
, &sig_pipe
[OLD
], NULL
);
304 /* if we were interrupted, trip the old handler */
305 if(dload_interrupted
) {
313 static int download(const char *url
, const char *localpath
,
316 if(handle
->fetchcb
== NULL
) {
318 return curl_download_internal(url
, localpath
, force
, 1);
320 RET_ERR(PM_ERR_EXTERNAL_DOWNLOAD
, -1);
323 int ret
= handle
->fetchcb(url
, localpath
, force
);
325 RET_ERR(PM_ERR_EXTERNAL_DOWNLOAD
, -1);
332 * Download a single file
333 * - servers must be a list of urls WITHOUT trailing slashes.
335 * RETURN: 0 for successful download
336 * 1 if the files are identical
339 int _alpm_download_single_file(const char *filename
,
340 alpm_list_t
*servers
, const char *localpath
,
346 ASSERT(servers
!= NULL
, RET_ERR(PM_ERR_SERVER_NONE
, -1));
348 for(i
= servers
; i
; i
= i
->next
) {
349 const char *server
= i
->data
;
350 char *fileurl
= NULL
;
353 /* print server + filename into a buffer */
354 len
= strlen(server
) + strlen(filename
) + 2;
355 CALLOC(fileurl
, len
, sizeof(char), RET_ERR(PM_ERR_MEMORY
, -1));
356 snprintf(fileurl
, len
, "%s/%s", server
, filename
);
358 ret
= download(fileurl
, localpath
, force
);
368 int _alpm_download_files(alpm_list_t
*files
,
369 alpm_list_t
*servers
, const char *localpath
)
374 for(lp
= files
; lp
; lp
= lp
->next
) {
375 char *filename
= lp
->data
;
376 if(_alpm_download_single_file(filename
, servers
,
377 localpath
, 0) == -1) {
385 /** Fetch a remote pkg. */
386 char SYMEXPORT
*alpm_fetch_pkgurl(const char *url
)
389 const char *filename
, *cachedir
;
394 filename
= get_filename(url
);
396 /* find a valid cache dir to download to */
397 cachedir
= _alpm_filecache_setup();
399 /* download the file */
400 ret
= download(url
, cachedir
, 0);
402 _alpm_log(PM_LOG_WARNING
, _("failed to download %s\n"), url
);
405 _alpm_log(PM_LOG_DEBUG
, "successfully downloaded %s\n", url
);
407 /* we should be able to find the file the second time around */
408 filepath
= _alpm_filecache_find(filename
);
412 /* vim: set ts=2 sw=2 noet: */