libalpm/dload: add allow_resume and reorder error checks
[pacman-ng.git] / lib / libalpm / dload.c
blob45fabaae2bee8eaf9329fbd9b11b84adc1a3ec9c
1 /*
2 * download.c
4 * Copyright (c) 2006-2011 Pacman Development Team <pacman-dev@archlinux.org>
5 * Copyright (c) 2002-2006 by Judd Vinet <jvinet@zeroflux.org>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
21 #include "config.h"
23 #include <stdlib.h>
24 #include <stdio.h>
25 #include <errno.h>
26 #include <string.h>
27 #include <unistd.h>
28 #include <sys/time.h>
29 #include <sys/types.h>
30 #include <sys/stat.h>
31 #include <signal.h>
33 #ifdef HAVE_LIBCURL
34 #include <curl/curl.h>
35 #endif
37 /* libalpm */
38 #include "dload.h"
39 #include "alpm_list.h"
40 #include "alpm.h"
41 #include "log.h"
42 #include "util.h"
43 #include "handle.h"
45 #ifdef HAVE_LIBCURL
46 static double prevprogress; /* last download amount */
47 #endif
49 static const char *get_filename(const char *url)
51 char *filename = strrchr(url, '/');
52 if(filename != NULL) {
53 filename++;
55 return filename;
58 #ifdef HAVE_LIBCURL
59 static char *get_fullpath(const char *path, const char *filename,
60 const char *suffix)
62 char *filepath;
63 /* len = localpath len + filename len + suffix len + null */
64 size_t len = strlen(path) + strlen(filename) + strlen(suffix) + 1;
65 CALLOC(filepath, len, sizeof(char), RET_ERR(PM_ERR_MEMORY, NULL));
66 snprintf(filepath, len, "%s%s%s", path, filename, suffix);
68 return filepath;
71 #define check_stop() if(dload_interrupted) { ret = -1; goto cleanup; }
72 enum sighandlers { OLD = 0, NEW = 1 };
74 static int dload_interrupted;
75 static void inthandler(int signum)
77 (void)signum;
78 dload_interrupted = 1;
81 static int curl_progress(void *file, double dltotal, double dlnow,
82 double ultotal, double ulnow)
84 struct fileinfo *dlfile = (struct fileinfo *)file;
85 double current_size, total_size;
87 /* unused parameters */
88 (void)ultotal;
89 (void)ulnow;
91 /* SIGINT sent, abort by alerting curl */
92 if(dload_interrupted) {
93 return 1;
96 /* none of what follows matters if the front end has no callback */
97 if(handle->dlcb == NULL) {
98 return 0;
101 current_size = dlfile->initial_size + dlnow;
102 total_size = dlfile->initial_size + dltotal;
104 if(DOUBLE_EQ(dltotal, 0) || DOUBLE_EQ(prevprogress, total_size)) {
105 return 0;
108 /* initialize the progress bar here to avoid displaying it when
109 * a repo is up to date and nothing gets downloaded */
110 if(DOUBLE_EQ(prevprogress, 0)) {
111 handle->dlcb(dlfile->filename, 0, (long)dltotal);
114 handle->dlcb(dlfile->filename, (long)current_size, (long)total_size);
116 prevprogress = current_size;
118 return 0;
121 static int curl_gethost(const char *url, char *buffer)
123 size_t hostlen;
124 char *p;
126 if(strncmp(url, "file://", 7) == 0) {
127 strcpy(buffer, _("disk"));
128 } else {
129 p = strstr(url, "//");
130 if(!p) {
131 return 1;
133 p += 2; /* jump over the found // */
134 hostlen = strcspn(p, "/");
135 if(hostlen > 255) {
136 /* buffer overflow imminent */
137 _alpm_log(PM_LOG_ERROR, _("buffer overflow detected"));
138 return 1;
140 snprintf(buffer, hostlen + 1, "%s", p);
143 return 0;
146 static int utimes_long(const char *path, long time)
148 if(time != -1) {
149 struct timeval tv[2];
150 memset(&tv, 0, sizeof(tv));
151 tv[0].tv_sec = tv[1].tv_sec = time;
152 return utimes(path, tv);
154 return 0;
158 static int curl_download_internal(const char *url, const char *localpath,
159 int force, int allow_resume)
161 int ret = -1;
162 FILE *localf = NULL;
163 const char *useragent;
164 const char *open_mode = "wb";
165 char *destfile, *tempfile;
166 /* RFC1123 states applications should support this length */
167 char hostname[256];
168 char error_buffer[CURL_ERROR_SIZE];
169 struct stat st;
170 long timecond, remote_time;
171 double remote_size, bytes_dl;
172 struct sigaction sig_pipe[2], sig_int[2];
173 struct fileinfo dlfile;
175 dlfile.initial_size = 0.0;
176 dlfile.filename = get_filename(url);
177 if(!dlfile.filename || curl_gethost(url, hostname) != 0) {
178 _alpm_log(PM_LOG_ERROR, _("url '%s' is invalid\n"), url);
179 RET_ERR(PM_ERR_SERVER_BAD_URL, -1);
182 destfile = get_fullpath(localpath, dlfile.filename, "");
183 tempfile = get_fullpath(localpath, dlfile.filename, ".part");
184 if(!destfile || !tempfile) {
185 goto cleanup;
188 /* the curl_easy handle is initialized with the alpm handle, so we only need
189 * to reset the curl handle set parameters for each time it's used. */
190 curl_easy_reset(handle->curl);
191 curl_easy_setopt(handle->curl, CURLOPT_URL, url);
192 curl_easy_setopt(handle->curl, CURLOPT_FAILONERROR, 1L);
193 curl_easy_setopt(handle->curl, CURLOPT_ERRORBUFFER, error_buffer);
194 curl_easy_setopt(handle->curl, CURLOPT_CONNECTTIMEOUT, 10L);
195 curl_easy_setopt(handle->curl, CURLOPT_FILETIME, 1L);
196 curl_easy_setopt(handle->curl, CURLOPT_NOPROGRESS, 0L);
197 curl_easy_setopt(handle->curl, CURLOPT_FOLLOWLOCATION, 1L);
198 curl_easy_setopt(handle->curl, CURLOPT_PROGRESSFUNCTION, curl_progress);
199 curl_easy_setopt(handle->curl, CURLOPT_PROGRESSDATA, (void *)&dlfile);
201 useragent = getenv("HTTP_USER_AGENT");
202 if(useragent != NULL) {
203 curl_easy_setopt(handle->curl, CURLOPT_USERAGENT, useragent);
206 /* TODO: no assuming here. the calling function should tell us what's kosher */
207 if(!force && stat(destfile, &st) == 0) {
208 /* assume its a sync, so we're starting from scratch. but, only download
209 * our local is out of date. */
210 curl_easy_setopt(handle->curl, CURLOPT_TIMECONDITION, CURL_TIMECOND_IFMODSINCE);
211 curl_easy_setopt(handle->curl, CURLOPT_TIMEVALUE, (long)st.st_mtime);
212 } else if(stat(tempfile, &st) == 0 && allow_resume) {
213 /* a previous partial download exists, resume from end of file. */
214 open_mode = "ab";
215 curl_easy_setopt(handle->curl, CURLOPT_RESUME_FROM, (long)st.st_size);
216 _alpm_log(PM_LOG_DEBUG, "tempfile found, attempting continuation");
217 dlfile.initial_size = (double)st.st_size;
220 localf = fopen(tempfile, open_mode);
221 if(localf == NULL) {
222 goto cleanup;
225 curl_easy_setopt(handle->curl, CURLOPT_WRITEDATA, localf);
227 /* ignore any SIGPIPE signals- these may occur if our FTP socket dies or
228 * something along those lines. Store the old signal handler first. */
229 sig_pipe[NEW].sa_handler = SIG_IGN;
230 sigemptyset(&sig_pipe[NEW].sa_mask);
231 sig_pipe[NEW].sa_flags = 0;
232 sigaction(SIGPIPE, NULL, &sig_pipe[OLD]);
233 sigaction(SIGPIPE, &sig_pipe[NEW], NULL);
235 dload_interrupted = 0;
236 sig_int[NEW].sa_handler = &inthandler;
237 sigemptyset(&sig_int[NEW].sa_mask);
238 sig_int[NEW].sa_flags = 0;
239 sigaction(SIGINT, NULL, &sig_int[OLD]);
240 sigaction(SIGINT, &sig_int[NEW], NULL);
242 /* Progress 0 - initialize */
243 prevprogress = 0;
245 /* perform transfer */
246 handle->curlerr = curl_easy_perform(handle->curl);
248 /* was it a success? */
249 if(handle->curlerr == CURLE_ABORTED_BY_CALLBACK) {
250 goto cleanup;
251 } else if(handle->curlerr != CURLE_OK) {
252 pm_errno = PM_ERR_LIBCURL;
253 _alpm_log(PM_LOG_ERROR, _("failed retrieving file '%s' from %s : %s\n"),
254 dlfile.filename, hostname, error_buffer);
255 unlink(tempfile);
256 goto cleanup;
259 /* retrieve info about the state of the transfer */
260 curl_easy_getinfo(handle->curl, CURLINFO_FILETIME, &remote_time);
261 curl_easy_getinfo(handle->curl, CURLINFO_CONTENT_LENGTH_DOWNLOAD, &remote_size);
262 curl_easy_getinfo(handle->curl, CURLINFO_SIZE_DOWNLOAD, &bytes_dl);
263 curl_easy_getinfo(handle->curl, CURLINFO_CONDITION_UNMET, &timecond);
265 /* time condition was met and we didn't download anything. we need to
266 * clean up the 0 byte .part file that's left behind. */
267 if(timecond == 1 && DOUBLE_EQ(bytes_dl, 0)) {
268 ret = 1;
269 unlink(tempfile);
270 goto cleanup;
273 /* remote_size isn't necessarily the full size of the file, just what the
274 * server reported as remaining to download. compare it to what curl reported
275 * as actually being transferred during curl_easy_perform() */
276 if(!DOUBLE_EQ(remote_size, -1) && !DOUBLE_EQ(bytes_dl, -1) &&
277 !DOUBLE_EQ(bytes_dl, remote_size)) {
278 pm_errno = PM_ERR_RETRIEVE;
279 _alpm_log(PM_LOG_ERROR, _("%s appears to be truncated: %jd/%jd bytes\n"),
280 dlfile.filename, (intmax_t)bytes_dl, (intmax_t)remote_size);
281 goto cleanup;
284 ret = 0;
286 cleanup:
287 if(localf != NULL) {
288 fclose(localf);
289 utimes_long(tempfile, remote_time);
292 /* TODO: A signature download will need to return success here as well before
293 * we're willing to rotate the new file into place. */
294 if(ret == 0) {
295 rename(tempfile, destfile);
298 FREE(tempfile);
299 FREE(destfile);
301 /* restore the old signal handlers */
302 sigaction(SIGINT, &sig_int[OLD], NULL);
303 sigaction(SIGPIPE, &sig_pipe[OLD], NULL);
304 /* if we were interrupted, trip the old handler */
305 if(dload_interrupted) {
306 raise(SIGINT);
309 return ret;
311 #endif
313 static int download(const char *url, const char *localpath,
314 int force)
316 if(handle->fetchcb == NULL) {
317 #ifdef HAVE_LIBCURL
318 return curl_download_internal(url, localpath, force, 1);
319 #else
320 RET_ERR(PM_ERR_EXTERNAL_DOWNLOAD, -1);
321 #endif
322 } else {
323 int ret = handle->fetchcb(url, localpath, force);
324 if(ret == -1) {
325 RET_ERR(PM_ERR_EXTERNAL_DOWNLOAD, -1);
327 return ret;
332 * Download a single file
333 * - servers must be a list of urls WITHOUT trailing slashes.
335 * RETURN: 0 for successful download
336 * 1 if the files are identical
337 * -1 on error
339 int _alpm_download_single_file(const char *filename,
340 alpm_list_t *servers, const char *localpath,
341 int force)
343 alpm_list_t *i;
344 int ret = -1;
346 ASSERT(servers != NULL, RET_ERR(PM_ERR_SERVER_NONE, -1));
348 for(i = servers; i; i = i->next) {
349 const char *server = i->data;
350 char *fileurl = NULL;
351 size_t len;
353 /* print server + filename into a buffer */
354 len = strlen(server) + strlen(filename) + 2;
355 CALLOC(fileurl, len, sizeof(char), RET_ERR(PM_ERR_MEMORY, -1));
356 snprintf(fileurl, len, "%s/%s", server, filename);
358 ret = download(fileurl, localpath, force);
359 FREE(fileurl);
360 if(ret != -1) {
361 break;
365 return ret;
368 int _alpm_download_files(alpm_list_t *files,
369 alpm_list_t *servers, const char *localpath)
371 int ret = 0;
372 alpm_list_t *lp;
374 for(lp = files; lp; lp = lp->next) {
375 char *filename = lp->data;
376 if(_alpm_download_single_file(filename, servers,
377 localpath, 0) == -1) {
378 ret++;
382 return ret;
385 /** Fetch a remote pkg. */
386 char SYMEXPORT *alpm_fetch_pkgurl(const char *url)
388 char *filepath;
389 const char *filename, *cachedir;
390 int ret;
392 ALPM_LOG_FUNC;
394 filename = get_filename(url);
396 /* find a valid cache dir to download to */
397 cachedir = _alpm_filecache_setup();
399 /* download the file */
400 ret = download(url, cachedir, 0);
401 if(ret == -1) {
402 _alpm_log(PM_LOG_WARNING, _("failed to download %s\n"), url);
403 return NULL;
405 _alpm_log(PM_LOG_DEBUG, "successfully downloaded %s\n", url);
407 /* we should be able to find the file the second time around */
408 filepath = _alpm_filecache_find(filename);
409 return filepath;
412 /* vim: set ts=2 sw=2 noet: */