From 75bfe825fc4018173b0d7a3c4029094edb52d93a Mon Sep 17 00:00:00 2001 From: Dave Reisner Date: Wed, 15 Dec 2010 19:57:31 -0500 Subject: add curl to alpm initialization and teardown routines Signed-off-by: Dave Reisner --- lib/libalpm/alpm.c | 13 +++++++++++++ lib/libalpm/dload.c | 4 ++++ lib/libalpm/error.c | 4 ++++ 3 files changed, 21 insertions(+) (limited to 'lib') diff --git a/lib/libalpm/alpm.c b/lib/libalpm/alpm.c index 4f95832d..e5b47444 100644 --- a/lib/libalpm/alpm.c +++ b/lib/libalpm/alpm.c @@ -23,6 +23,10 @@ #include "config.h" +#ifdef HAVE_LIBCURL +#include +#endif + /* connection caching setup */ #ifdef HAVE_LIBFETCH #include @@ -69,6 +73,11 @@ int SYMEXPORT alpm_initialize(void) fetchConnectionCacheInit(5, 1); #endif +#ifdef HAVE_LIBCURL + curl_global_init(CURL_GLOBAL_SSL); + handle->curl = curl_easy_init(); +#endif + return(0); } @@ -92,6 +101,10 @@ int SYMEXPORT alpm_release(void) fetchConnectionCacheClose(); #endif +#ifdef HAVE_LIBCURL + curl_global_cleanup(); +#endif + return(0); } diff --git a/lib/libalpm/dload.c b/lib/libalpm/dload.c index 7a98eb12..afe0dd40 100644 --- a/lib/libalpm/dload.c +++ b/lib/libalpm/dload.c @@ -35,6 +35,10 @@ #include /* MAXHOSTNAMELEN */ #endif +#ifdef HAVE_LIBCURL +#include +#endif + #ifdef HAVE_LIBFETCH #include #endif diff --git a/lib/libalpm/error.c b/lib/libalpm/error.c index 21fbb48f..aec97a1f 100644 --- a/lib/libalpm/error.c +++ b/lib/libalpm/error.c @@ -28,6 +28,10 @@ #include /* MAXHOSTNAMELEN */ #endif +#ifdef HAVE_LIBCURL +#include +#endif + #ifdef HAVE_LIBFETCH #include /* fetchLastErrString */ #endif -- cgit v1.2.3-70-g09d2 From 278c84710637a891c3407c3811f61b181359ea75 Mon Sep 17 00:00:00 2001 From: Dave Reisner Date: Sat, 15 Jan 2011 13:59:45 -0500 Subject: handle: Add CURL* and CURLcode vars to struct Adding the CURLcode is necessary in order to return an error string from pm_error. Unlike libfetch, curl returns numerical error numbers and does not maintain a staticly allocated string with the last error generated. Adding the curl object itself to the handle is advantageous (and encouraged by curl_easy_perform(3)) because the handle is reusable for successive operations. This cuts back on overhead when downloading multiple files in a single transaction. Signed-off-by: Dave Reisner --- lib/libalpm/handle.c | 6 ++++++ lib/libalpm/handle.h | 10 ++++++++++ 2 files changed, 16 insertions(+) (limited to 'lib') diff --git a/lib/libalpm/handle.c b/lib/libalpm/handle.c index d4ebe82a..34893fc6 100644 --- a/lib/libalpm/handle.c +++ b/lib/libalpm/handle.c @@ -71,6 +71,11 @@ void _alpm_handle_free(pmhandle_t *handle) closelog(); } +#ifdef HAVE_LIBCURL + /* release curl handle */ + curl_easy_cleanup(handle->curl); +#endif + /* free memory */ _alpm_trans_free(handle->trans); FREE(handle->root); @@ -85,6 +90,7 @@ void _alpm_handle_free(pmhandle_t *handle) FREELIST(handle->ignorepkg); FREELIST(handle->ignoregrp); FREE(handle); + } alpm_cb_log SYMEXPORT alpm_option_get_logcb() diff --git a/lib/libalpm/handle.h b/lib/libalpm/handle.h index 2d962fe6..de2dfa1a 100644 --- a/lib/libalpm/handle.h +++ b/lib/libalpm/handle.h @@ -29,6 +29,10 @@ #include "alpm.h" #include "trans.h" +#ifdef HAVE_LIBCURL +#include +#endif + typedef struct _pmhandle_t { /* internal usage */ pmdb_t *db_local; /* local db pointer */ @@ -37,6 +41,12 @@ typedef struct _pmhandle_t { FILE *lckstream; /* lock file stream pointer if one exists */ pmtrans_t *trans; +#ifdef HAVE_LIBCURL + /* libcurl handle */ + CURL *curl; /* reusable curl_easy handle */ + CURLcode curlerr; /* last error produced by curl */ +#endif + /* callback functions */ alpm_cb_log logcb; /* Log callback function */ alpm_cb_download dlcb; /* Download callback function */ -- cgit v1.2.3-70-g09d2 From 159e1b06a50c7e80171d88933698f107a7f5773a Mon Sep 17 00:00:00 2001 From: Dave Reisner Date: Wed, 12 Jan 2011 09:51:06 -0500 Subject: prefix fetch based functions with fetch_ Do this in preparation for implementing similar curl based functionality. We want the ability to test these side by side. Signed-off-by: Dave Reisner --- lib/libalpm/dload.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'lib') diff --git a/lib/libalpm/dload.c b/lib/libalpm/dload.c index afe0dd40..4109e451 100644 --- a/lib/libalpm/dload.c +++ b/lib/libalpm/dload.c @@ -80,7 +80,7 @@ static char *get_tempfile(const char *path, const char *filename) { return(tempfile); } -static const char *gethost(struct url *fileurl) +static const char *fetch_gethost(struct url *fileurl) { const char *host = _("disk"); if(strcmp(SCHEME_FILE, fileurl->scheme) != 0) { @@ -98,7 +98,7 @@ static void inthandler(int signum) #define check_stop() if(dload_interrupted) { ret = -1; goto cleanup; } enum sighandlers { OLD = 0, NEW = 1 }; -static int download_internal(const char *url, const char *localpath, +static int fetch_download_internal(const char *url, const char *localpath, int force) { FILE *localf = NULL; struct stat st; @@ -191,7 +191,7 @@ static int download_internal(const char *url, const char *localpath, if(fetchStat(fileurl, &ust, "") == -1) { pm_errno = PM_ERR_LIBFETCH; _alpm_log(PM_LOG_ERROR, _("failed retrieving file '%s' from %s : %s\n"), - filename, gethost(fileurl), fetchLastErrString); + filename, fetch_gethost(fileurl), fetchLastErrString); ret = -1; goto cleanup; } @@ -221,7 +221,7 @@ static int download_internal(const char *url, const char *localpath, if(fetchLastErrCode != 0 || dlf == NULL) { pm_errno = PM_ERR_LIBFETCH; _alpm_log(PM_LOG_ERROR, _("failed retrieving file '%s' from %s : %s\n"), - filename, gethost(fileurl), fetchLastErrString); + filename, fetch_gethost(fileurl), fetchLastErrString); ret = -1; goto cleanup; } else { @@ -279,7 +279,7 @@ static int download_internal(const char *url, const char *localpath, /* not PM_ERR_LIBFETCH here because libfetch error string might be empty */ pm_errno = PM_ERR_RETRIEVE; _alpm_log(PM_LOG_ERROR, _("failed retrieving file '%s' from %s\n"), - filename, gethost(fileurl)); + filename, fetch_gethost(fileurl)); ret = -1; goto cleanup; } @@ -352,7 +352,7 @@ static int download(const char *url, const char *localpath, int force) { if(handle->fetchcb == NULL) { #ifdef HAVE_LIBFETCH - return(download_internal(url, localpath, force)); + return(fetch_download_internal(url, localpath, force)); #else RET_ERR(PM_ERR_EXTERNAL_DOWNLOAD, -1); #endif -- cgit v1.2.3-70-g09d2 From a5b6a75787be01fc2a030b68eeaba07bc26db469 Mon Sep 17 00:00:00 2001 From: Dave Reisner Date: Fri, 14 Jan 2011 12:17:39 -0500 Subject: share code between libfetch and libcurl no actual code changes here. change preprocessor logic to include get_tempfile, get_destfile, signal handler enum, and the interrupt handler logic when either HAVE_LIBCURL or HAVE_LIBFETCH are defined. Signed-off-by: Dave Reisner --- lib/libalpm/dload.c | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) (limited to 'lib') diff --git a/lib/libalpm/dload.c b/lib/libalpm/dload.c index 4109e451..a08eda16 100644 --- a/lib/libalpm/dload.c +++ b/lib/libalpm/dload.c @@ -59,7 +59,7 @@ static char *get_filename(const char *url) { return(filename); } -#ifdef HAVE_LIBFETCH +#if defined(HAVE_LIBFETCH) || defined(HAVE_LIBCURL) static char *get_destfile(const char *path, const char *filename) { char *destfile; /* len = localpath len + filename len + null */ @@ -80,6 +80,17 @@ static char *get_tempfile(const char *path, const char *filename) { return(tempfile); } +#define check_stop() if(dload_interrupted) { ret = -1; goto cleanup; } +enum sighandlers { OLD = 0, NEW = 1 }; + +int dload_interrupted; +static void inthandler(int signum) +{ + dload_interrupted = 1; +} +#endif + +#ifdef HAVE_LIBFETCH static const char *fetch_gethost(struct url *fileurl) { const char *host = _("disk"); @@ -89,15 +100,6 @@ static const char *fetch_gethost(struct url *fileurl) return(host); } -int dload_interrupted; -static void inthandler(int signum) -{ - dload_interrupted = 1; -} - -#define check_stop() if(dload_interrupted) { ret = -1; goto cleanup; } -enum sighandlers { OLD = 0, NEW = 1 }; - static int fetch_download_internal(const char *url, const char *localpath, int force) { FILE *localf = NULL; -- cgit v1.2.3-70-g09d2 From 8a58648471e3a5311529955cd91c1a082be10056 Mon Sep 17 00:00:00 2001 From: Dave Reisner Date: Sat, 15 Jan 2011 15:38:16 -0500 Subject: handle error case for PM_ERR_LIBCURL Add PM_ERR_LIBCURL to error enum and handle case in error.c by returning curl_easy_strerror() based on the error number carried by the gloabl alpm handle. Signed-off-by: Dave Reisner --- lib/libalpm/alpm.h | 1 + lib/libalpm/error.c | 8 ++++++++ 2 files changed, 9 insertions(+) (limited to 'lib') diff --git a/lib/libalpm/alpm.h b/lib/libalpm/alpm.h index 0f3b7166..0b80506f 100644 --- a/lib/libalpm/alpm.h +++ b/lib/libalpm/alpm.h @@ -535,6 +535,7 @@ enum _pmerrno_t { /* External library errors */ PM_ERR_LIBARCHIVE, PM_ERR_LIBFETCH, + PM_ERR_LIBCURL, PM_ERR_EXTERNAL_DOWNLOAD }; diff --git a/lib/libalpm/error.c b/lib/libalpm/error.c index aec97a1f..19c7d92c 100644 --- a/lib/libalpm/error.c +++ b/lib/libalpm/error.c @@ -39,6 +39,7 @@ /* libalpm */ #include "util.h" #include "alpm.h" +#include "handle.h" const char SYMEXPORT *alpm_strerrorlast(void) { @@ -157,6 +158,13 @@ const char SYMEXPORT *alpm_strerror(int err) #else /* obviously shouldn't get here... */ return _("download library error"); +#endif + case PM_ERR_LIBCURL: +#ifdef HAVE_LIBCURL + return(curl_easy_strerror(handle->curlerr)); +#else + /* obviously shouldn't get here... */ + return _("download library error"); #endif case PM_ERR_EXTERNAL_DOWNLOAD: return _("error invoking external downloader"); -- cgit v1.2.3-70-g09d2 From 96e458b705eda4ddff7d6ec890cf1daf898e9186 Mon Sep 17 00:00:00 2001 From: Dave Reisner Date: Tue, 1 Feb 2011 15:38:37 -0500 Subject: dload.c: add curl_download_internal This is a feature complete re-implementation of the fetch based internal downloader, with a few improvements: * support for SSL * gzip and deflate compression on HTTP connections * reuses a single connection over the entire session for lower resource usage. Signed-off-by: Dave Reisner --- lib/libalpm/dload.c | 208 ++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 208 insertions(+) (limited to 'lib') diff --git a/lib/libalpm/dload.c b/lib/libalpm/dload.c index a08eda16..22fee543 100644 --- a/lib/libalpm/dload.c +++ b/lib/libalpm/dload.c @@ -350,6 +350,214 @@ cleanup: } #endif +#ifdef HAVE_LIBCURL +static int curl_progress(void *filename, double dltotal, double dlnow, + double ultotal, double ulnow) { + + /* unused parameters */ + (void)ultotal; + (void)ulnow; + + if(dltotal == 0) { + return(0); + } + + if(dload_interrupted) { + return(1); + } + + handle->dlcb((const char*)filename, (long)dlnow, (long)dltotal); + + return(0); +} + +static int curl_gethost(const char *url, char *buffer) { + int hostlen; + char *p; + + if(strncmp(url, "file://", 7) == 0) { + strcpy(buffer, _("disk")); + } else { + p = strstr(url, "//"); + if(!p) { + return(1); + } + p += 2; /* jump over the found // */ + hostlen = strcspn(p, "/"); + if(hostlen > 255) { + /* buffer overflow imminent */ + _alpm_log(PM_LOG_ERROR, _("buffer overflow detected")); + return(1); + } + snprintf(buffer, hostlen + 1, "%s", p); + } + + return(0); +} + +static int curl_download_internal(const char *url, const char *localpath, + int force) { + int ret = -1; + FILE *localf = NULL; + char *destfile, *filename, *tempfile; + char hostname[256]; /* RFC1123 states applications should support this length */ + struct stat st; + long httpresp, timecond, remote_time, local_time; + double remote_size, bytes_dl; + struct sigaction sig_pipe[2], sig_int[2]; + + filename = get_filename(url); + if(!filename || curl_gethost(url, hostname) != 0) { + _alpm_log(PM_LOG_ERROR, _("url '%s' is invalid\n"), url); + RET_ERR(PM_ERR_SERVER_BAD_URL, -1); + } + + destfile = get_destfile(localpath, filename); + tempfile = get_tempfile(localpath, filename); + + /* the curl_easy handle is initialized with the alpm handle, so we only need + * to reset the curl handle set parameters for each time it's used. */ + curl_easy_reset(handle->curl); + curl_easy_setopt(handle->curl, CURLOPT_URL, url); + curl_easy_setopt(handle->curl, CURLOPT_FAILONERROR, 1L); + curl_easy_setopt(handle->curl, CURLOPT_ENCODING, "deflate, gzip"); + curl_easy_setopt(handle->curl, CURLOPT_CONNECTTIMEOUT, 10L); + curl_easy_setopt(handle->curl, CURLOPT_FILETIME, 1L); + curl_easy_setopt(handle->curl, CURLOPT_NOPROGRESS, 0L); + curl_easy_setopt(handle->curl, CURLOPT_FOLLOWLOCATION, 1L); + curl_easy_setopt(handle->curl, CURLOPT_PROGRESSFUNCTION, curl_progress); + curl_easy_setopt(handle->curl, CURLOPT_PROGRESSDATA, filename); + + if(!force && stat(destfile, &st) == 0) { + /* assume its a sync, so we're starting from scratch. but, only download + * our local is out of date. */ + local_time = (long)st.st_mtime; + curl_easy_setopt(handle->curl, CURLOPT_TIMECONDITION, CURL_TIMECOND_IFMODSINCE); + curl_easy_setopt(handle->curl, CURLOPT_TIMEVALUE, local_time); + } else if(stat(tempfile, &st) == 0 && st.st_size > 0) { + /* assume its a partial package download. we do not support resuming of + * transfers on partially downloaded sync DBs. */ + localf = fopen(tempfile, "ab"); + curl_easy_setopt(handle->curl, CURLOPT_RESUME_FROM, (long)st.st_size); + _alpm_log(PM_LOG_DEBUG, "tempfile found, attempting continuation"); + } + + /* no destfile and no tempfile. start from scratch */ + if(localf == NULL) { + localf = fopen(tempfile, "wb"); + if(localf == NULL) { + goto cleanup; + } + } + + /* this has to be set _after_ figuring out which file we're opening */ + curl_easy_setopt(handle->curl, CURLOPT_WRITEDATA, localf); + + /* print proxy info for debug purposes */ + _alpm_log(PM_LOG_DEBUG, "HTTP_PROXY: %s\n", getenv("HTTP_PROXY")); + _alpm_log(PM_LOG_DEBUG, "http_proxy: %s\n", getenv("http_proxy")); + _alpm_log(PM_LOG_DEBUG, "FTP_PROXY: %s\n", getenv("FTP_PROXY")); + _alpm_log(PM_LOG_DEBUG, "ftp_proxy: %s\n", getenv("ftp_proxy")); + + /* ignore any SIGPIPE signals- these may occur if our FTP socket dies or + * something along those lines. Store the old signal handler first. */ + sig_pipe[NEW].sa_handler = SIG_IGN; + sigemptyset(&sig_pipe[NEW].sa_mask); + sig_pipe[NEW].sa_flags = 0; + sigaction(SIGPIPE, NULL, &sig_pipe[OLD]); + sigaction(SIGPIPE, &sig_pipe[NEW], NULL); + + dload_interrupted = 0; + sig_int[NEW].sa_handler = &inthandler; + sigemptyset(&sig_int[NEW].sa_mask); + sig_int[NEW].sa_flags = 0; + sigaction(SIGINT, NULL, &sig_int[OLD]); + sigaction(SIGINT, &sig_int[NEW], NULL); + + /* Progress 0 - initialize */ + if(handle->dlcb) { + handle->dlcb(filename, 0, 1); + } + + /* perform transfer */ + handle->curlerr = curl_easy_perform(handle->curl); + + /* retrieve info about the state of the transfer */ + curl_easy_getinfo(handle->curl, CURLINFO_HTTP_CODE, &httpresp); + curl_easy_getinfo(handle->curl, CURLINFO_FILETIME, &remote_time); + curl_easy_getinfo(handle->curl, CURLINFO_CONTENT_LENGTH_DOWNLOAD, &remote_size); + curl_easy_getinfo(handle->curl, CURLINFO_SIZE_DOWNLOAD, &bytes_dl); + curl_easy_getinfo(handle->curl, CURLINFO_CONDITION_UNMET, &timecond); + + /* time condition was met and we didn't download anything. we need to + * clean up the 0 byte .part file that's left behind. */ + if(bytes_dl == 0 && timecond == 1) { + ret = 1; + unlink(tempfile); + goto cleanup; + } + + if(handle->curlerr == CURLE_ABORTED_BY_CALLBACK) { + goto cleanup; + } else if(handle->curlerr != CURLE_OK) { + pm_errno = PM_ERR_LIBCURL; + _alpm_log(PM_LOG_ERROR, _("failed retrieving file '%s' from %s : %s\n"), + filename, hostname, curl_easy_strerror(handle->curlerr)); + unlink(tempfile); + goto cleanup; + } + + /* remote_size isn't necessarily the full size of the file, just what the + * server reported as remaining to download. compare it to what curl reported + * as actually being transferred during curl_easy_perform() */ + if((remote_size != -1 && bytes_dl != -1) && bytes_dl != remote_size) { + pm_errno = PM_ERR_RETRIEVE; + _alpm_log(PM_LOG_ERROR, _("%s appears to be truncated: %jd/%jd bytes\n"), + filename, (intmax_t)bytes_dl, (intmax_t)remote_size); + goto cleanup; + } + + fclose(localf); + localf = NULL; + + /* set the times on the file to the same as that of the remote file */ + if(remote_time != -1) { + struct timeval tv[2]; + memset(&tv, 0, sizeof(tv)); + tv[0].tv_sec = tv[1].tv_sec = remote_time; + utimes(tempfile, tv); + } + rename(tempfile, destfile); + ret = 0; + +cleanup: + FREE(tempfile); + FREE(destfile); + if(localf != NULL) { + /* if we still had a local file open, we got interrupted. set the mtimes on + * the file accordingly. */ + fflush(localf); + if(remote_time != -1) { + struct timeval tv[2]; + memset(&tv, 0, sizeof(tv)); + tv[0].tv_sec = tv[1].tv_sec = remote_time; + futimes(fileno(localf), tv); + } + fclose(localf); + } + + /* restore the old signal handlers */ + sigaction(SIGINT, &sig_int[OLD], NULL); + sigaction(SIGPIPE, &sig_pipe[OLD], NULL); + /* if we were interrupted, trip the old handler */ + if(dload_interrupted) { + raise(SIGINT); + } + + return(ret); +} +#endif + static int download(const char *url, const char *localpath, int force) { if(handle->fetchcb == NULL) { -- cgit v1.2.3-70-g09d2 From 4ad4527d104c915efa912d3e1e3a543fad7aca34 Mon Sep 17 00:00:00 2001 From: Dave Reisner Date: Wed, 2 Feb 2011 23:21:43 -0500 Subject: dload: temp patch to allow curl/fetch coexistance this is just some debuggery to allow pacman to operate with both fetch and curl at the same time. use the PACMANDL variable to control which library is used. Signed-off-by: Dave Reisner --- lib/libalpm/dload.c | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) (limited to 'lib') diff --git a/lib/libalpm/dload.c b/lib/libalpm/dload.c index 22fee543..51dda5e2 100644 --- a/lib/libalpm/dload.c +++ b/lib/libalpm/dload.c @@ -561,8 +561,22 @@ cleanup: static int download(const char *url, const char *localpath, int force) { if(handle->fetchcb == NULL) { -#ifdef HAVE_LIBFETCH +#if defined(HAVE_LIBFETCH) && defined(HAVE_LIBCURL) + const char *pmdownloader = getenv("PACMANDL"); + if(!pmdownloader || strcmp(pmdownloader, "curl") == 0) { + printf(">> using libcurl as internal downloader\n"); + return(curl_download_internal(url, localpath, force)); + } else if(strcmp(pmdownloader, "fetch") == 0) { + printf(">> using libfetch as internal downloader\n"); + return(fetch_download_internal(url, localpath, force)); + } else { + _alpm_log(PM_LOG_ERROR, "PACMANDL unset or invalid! Use `curl' or `fetch'\n"); + return(-1); + } +#elif HAVE_LIBFETCH return(fetch_download_internal(url, localpath, force)); +#elif HAVE_LIBCURL + return(curl_download_internal(url, localpath, force)); #else RET_ERR(PM_ERR_EXTERNAL_DOWNLOAD, -1); #endif -- cgit v1.2.3-70-g09d2 From f2eac18a6ec62db3ec53744064e05416024c1b30 Mon Sep 17 00:00:00 2001 From: Dave Reisner Date: Tue, 8 Feb 2011 21:05:53 -0500 Subject: Remove all traces of libfetch Signed-off-by: Dave Reisner --- configure.ac | 24 ----- lib/libalpm/alpm.c | 13 --- lib/libalpm/dload.c | 284 +--------------------------------------------------- lib/libalpm/dload.h | 2 - lib/libalpm/error.c | 19 ---- 5 files changed, 2 insertions(+), 340 deletions(-) (limited to 'lib') diff --git a/configure.ac b/configure.ac index 0ae9cba8..88dee2f9 100644 --- a/configure.ac +++ b/configure.ac @@ -93,11 +93,6 @@ AC_ARG_WITH(openssl, AS_HELP_STRING([--with-openssl], [use OpenSSL crypto implementations instead of internal routines]), [], [with_openssl=check]) -# Help line for libfetch -AC_ARG_WITH(fetch, - AS_HELP_STRING([--with-fetch], [use libfetch as an internal downloader]), - [], [with_fetch=no]) - # Help line for libcurl AC_ARG_WITH(curl, AS_HELP_STRING([--with-curl], [use libcurl as an internal downloader]), @@ -166,25 +161,6 @@ AS_IF([test "x$with_curl" != "xno"], AC_MSG_RESULT(no)) AM_CONDITIONAL([HAVE_LIBCURL], [test "x$ac_cv_lib_curl_curl_easy_perform" = "xyes"]) -# Enable or disable usage of libfetch -AC_MSG_CHECKING(whether to link with libfetch) -AS_IF([test "x$with_fetch" != "xno"], - [AC_MSG_RESULT(yes) - AC_CHECK_LIB([fetch], [fetchParseURL], , - [if test "x$with_fetch" != "xcheck"; then - AC_MSG_FAILURE([--with-fetch was given, but -lfetch was not found]) - fi], - [-lcrypto -ldl]) - # Check if libfetch supports connnection caching which we use - AS_IF([test "x$ac_cv_lib_fetch_fetchParseURL" = "xyes"], - [AC_CHECK_DECL(fetchConnectionCacheInit, , - AC_MSG_ERROR([libfetch must be version 2.28 or greater]), - [#include ]) - ]) - ], - AC_MSG_RESULT(no)) -AM_CONDITIONAL([HAVE_LIBFETCH], [test "x$ac_cv_lib_fetch_fetchParseURL" = "xyes"]) - # Checks for header files. AC_CHECK_HEADERS([fcntl.h glob.h libintl.h locale.h mntent.h string.h \ sys/ioctl.h sys/mount.h sys/param.h sys/statvfs.h \ diff --git a/lib/libalpm/alpm.c b/lib/libalpm/alpm.c index e5b47444..db2a63de 100644 --- a/lib/libalpm/alpm.c +++ b/lib/libalpm/alpm.c @@ -27,11 +27,6 @@ #include #endif -/* connection caching setup */ -#ifdef HAVE_LIBFETCH -#include -#endif - /* libalpm */ #include "alpm.h" #include "alpm_list.h" @@ -69,10 +64,6 @@ int SYMEXPORT alpm_initialize(void) bindtextdomain("libalpm", LOCALEDIR); #endif -#ifdef HAVE_LIBFETCH - fetchConnectionCacheInit(5, 1); -#endif - #ifdef HAVE_LIBCURL curl_global_init(CURL_GLOBAL_SSL); handle->curl = curl_easy_init(); @@ -97,10 +88,6 @@ int SYMEXPORT alpm_release(void) _alpm_handle_free(handle); handle = NULL; -#ifdef HAVE_LIBFETCH - fetchConnectionCacheClose(); -#endif - #ifdef HAVE_LIBCURL curl_global_cleanup(); #endif diff --git a/lib/libalpm/dload.c b/lib/libalpm/dload.c index 51dda5e2..25bb659b 100644 --- a/lib/libalpm/dload.c +++ b/lib/libalpm/dload.c @@ -39,10 +39,6 @@ #include #endif -#ifdef HAVE_LIBFETCH -#include -#endif - /* libalpm */ #include "dload.h" #include "alpm_list.h" @@ -59,7 +55,7 @@ static char *get_filename(const char *url) { return(filename); } -#if defined(HAVE_LIBFETCH) || defined(HAVE_LIBCURL) +#ifdef HAVE_LIBCURL static char *get_destfile(const char *path, const char *filename) { char *destfile; /* len = localpath len + filename len + null */ @@ -88,269 +84,7 @@ static void inthandler(int signum) { dload_interrupted = 1; } -#endif - -#ifdef HAVE_LIBFETCH -static const char *fetch_gethost(struct url *fileurl) -{ - const char *host = _("disk"); - if(strcmp(SCHEME_FILE, fileurl->scheme) != 0) { - host = fileurl->host; - } - return(host); -} - -static int fetch_download_internal(const char *url, const char *localpath, - int force) { - FILE *localf = NULL; - struct stat st; - int ret = 0; - off_t dl_thisfile = 0; - ssize_t nread = 0; - char *tempfile, *destfile, *filename; - struct sigaction sig_pipe[2], sig_int[2]; - - off_t local_size = 0; - time_t local_time = 0; - - struct url *fileurl; - struct url_stat ust; - fetchIO *dlf = NULL; - - char buffer[PM_DLBUF_LEN]; - - filename = get_filename(url); - if(!filename) { - _alpm_log(PM_LOG_ERROR, _("url '%s' is invalid\n"), url); - RET_ERR(PM_ERR_SERVER_BAD_URL, -1); - } - - fileurl = fetchParseURL(url); - if(!fileurl) { - _alpm_log(PM_LOG_ERROR, _("url '%s' is invalid\n"), url); - RET_ERR(PM_ERR_LIBFETCH, -1); - } - - destfile = get_destfile(localpath, filename); - tempfile = get_tempfile(localpath, filename); - - if(stat(tempfile, &st) == 0 && S_ISREG(st.st_mode) && st.st_size > 0) { - _alpm_log(PM_LOG_DEBUG, "tempfile found, attempting continuation\n"); - local_time = fileurl->last_modified = st.st_mtime; - local_size = fileurl->offset = (off_t)st.st_size; - dl_thisfile = st.st_size; - localf = fopen(tempfile, "ab"); - } else if(!force && stat(destfile, &st) == 0 && S_ISREG(st.st_mode) && st.st_size > 0) { - _alpm_log(PM_LOG_DEBUG, "destfile found, using mtime only\n"); - local_time = fileurl->last_modified = st.st_mtime; - local_size = /* no fu->off here */ (off_t)st.st_size; - } else { - _alpm_log(PM_LOG_DEBUG, "no file found matching criteria, starting from scratch\n"); - } - - /* pass the raw filename for passing to the callback function */ - _alpm_log(PM_LOG_DEBUG, "using '%s' for download progress\n", filename); - - /* print proxy info for debug purposes */ - _alpm_log(PM_LOG_DEBUG, "HTTP_PROXY: %s\n", getenv("HTTP_PROXY")); - _alpm_log(PM_LOG_DEBUG, "http_proxy: %s\n", getenv("http_proxy")); - _alpm_log(PM_LOG_DEBUG, "FTP_PROXY: %s\n", getenv("FTP_PROXY")); - _alpm_log(PM_LOG_DEBUG, "ftp_proxy: %s\n", getenv("ftp_proxy")); - - /* 10s timeout */ - fetchTimeout = 10; - - /* ignore any SIGPIPE signals- these may occur if our FTP socket dies or - * something along those lines. Store the old signal handler first. */ - sig_pipe[NEW].sa_handler = SIG_IGN; - sigemptyset(&sig_pipe[NEW].sa_mask); - sig_pipe[NEW].sa_flags = 0; - sigaction(SIGPIPE, NULL, &sig_pipe[OLD]); - sigaction(SIGPIPE, &sig_pipe[NEW], NULL); - - dload_interrupted = 0; - sig_int[NEW].sa_handler = &inthandler; - sigemptyset(&sig_int[NEW].sa_mask); - sig_int[NEW].sa_flags = 0; - sigaction(SIGINT, NULL, &sig_int[OLD]); - sigaction(SIGINT, &sig_int[NEW], NULL); - - /* NOTE: libfetch does not reset the error code, be sure to do it before - * calls into the library */ - - /* TODO: if we call fetchStat() and get a redirect (disabling automagic - * redirect following), we should repeat the file locator stuff and get a new - * filename rather than only base if off the first URL, and then verify - * get_filename() didn't return ''. Of course, libfetch might not even allow - * us to even get that URL...FS#22645. This would allow us to download things - * without totally puking like - * http://www.archlinux.org/packages/community/x86_64/exim/download/ */ - - /* find out the remote size *and* mtime in one go. there is a lot of - * trouble in trying to do both size and "if-modified-since" logic in a - * non-stat request, so avoid it. */ - fetchLastErrCode = 0; - if(fetchStat(fileurl, &ust, "") == -1) { - pm_errno = PM_ERR_LIBFETCH; - _alpm_log(PM_LOG_ERROR, _("failed retrieving file '%s' from %s : %s\n"), - filename, fetch_gethost(fileurl), fetchLastErrString); - ret = -1; - goto cleanup; - } - check_stop(); - - _alpm_log(PM_LOG_DEBUG, "ust.mtime: %ld local_time: %ld compare: %ld\n", - ust.mtime, local_time, local_time - ust.mtime); - _alpm_log(PM_LOG_DEBUG, "ust.size: %jd local_size: %jd compare: %jd\n", - (intmax_t)ust.size, (intmax_t)local_size, (intmax_t)(local_size - ust.size)); - if(!force && ust.mtime && ust.mtime == local_time - && ust.size && ust.size == local_size) { - /* the remote time and size values agreed with what we have, so move on - * because there is nothing more to do. */ - _alpm_log(PM_LOG_DEBUG, "files are identical, skipping %s\n", filename); - ret = 1; - goto cleanup; - } - if(!ust.mtime || ust.mtime != local_time) { - _alpm_log(PM_LOG_DEBUG, "mtimes were different or unavailable, downloading %s from beginning\n", filename); - fileurl->offset = 0; - } - - fetchLastErrCode = 0; - dlf = fetchGet(fileurl, ""); - check_stop(); - - if(fetchLastErrCode != 0 || dlf == NULL) { - pm_errno = PM_ERR_LIBFETCH; - _alpm_log(PM_LOG_ERROR, _("failed retrieving file '%s' from %s : %s\n"), - filename, fetch_gethost(fileurl), fetchLastErrString); - ret = -1; - goto cleanup; - } else { - _alpm_log(PM_LOG_DEBUG, "connected to %s successfully\n", fileurl->host); - } - - if(localf && fileurl->offset == 0) { - _alpm_log(PM_LOG_WARNING, _("resuming download of %s not possible; starting over\n"), filename); - fclose(localf); - localf = NULL; - } else if(fileurl->offset) { - _alpm_log(PM_LOG_DEBUG, "resuming download at position %jd\n", (intmax_t)fileurl->offset); - } - - - if(localf == NULL) { - _alpm_rmrf(tempfile); - fileurl->offset = (off_t)0; - dl_thisfile = 0; - localf = fopen(tempfile, "wb"); - if(localf == NULL) { /* still null? */ - pm_errno = PM_ERR_RETRIEVE; - _alpm_log(PM_LOG_ERROR, _("error writing to file '%s': %s\n"), - tempfile, strerror(errno)); - ret = -1; - goto cleanup; - } - } - - /* Progress 0 - initialize */ - if(handle->dlcb) { - handle->dlcb(filename, 0, ust.size); - } - - while((nread = fetchIO_read(dlf, buffer, PM_DLBUF_LEN)) > 0) { - check_stop(); - size_t nwritten = 0; - nwritten = fwrite(buffer, 1, (size_t)nread, localf); - if((nwritten != (size_t)nread) || ferror(localf)) { - pm_errno = PM_ERR_RETRIEVE; - _alpm_log(PM_LOG_ERROR, _("error writing to file '%s': %s\n"), - tempfile, strerror(errno)); - ret = -1; - goto cleanup; - } - dl_thisfile += nread; - - if(handle->dlcb) { - handle->dlcb(filename, dl_thisfile, ust.size); - } - } - - /* did the transfer complete normally? */ - if (nread == -1) { - /* not PM_ERR_LIBFETCH here because libfetch error string might be empty */ - pm_errno = PM_ERR_RETRIEVE; - _alpm_log(PM_LOG_ERROR, _("failed retrieving file '%s' from %s\n"), - filename, fetch_gethost(fileurl)); - ret = -1; - goto cleanup; - } - - if (ust.size != -1 && dl_thisfile < ust.size) { - pm_errno = PM_ERR_RETRIEVE; - _alpm_log(PM_LOG_ERROR, _("%s appears to be truncated: %jd/%jd bytes\n"), - filename, (intmax_t)dl_thisfile, (intmax_t)ust.size); - ret = -1; - goto cleanup; - } - - /* probably safer to close the file descriptors now before renaming the file, - * for example to make sure the buffers are flushed. - */ - fclose(localf); - localf = NULL; - fetchIO_close(dlf); - dlf = NULL; - - /* set the times on the file to the same as that of the remote file */ - if(ust.mtime) { - struct timeval tv[2]; - memset(&tv, 0, sizeof(tv)); - tv[0].tv_sec = ust.atime; - tv[1].tv_sec = ust.mtime; - utimes(tempfile, tv); - } - if(rename(tempfile, destfile)) { - _alpm_log(PM_LOG_ERROR, _("could not rename %s to %s (%s)\n"), - tempfile, destfile, strerror(errno)); - ret = -1; - } - ret = 0; - -cleanup: - FREE(tempfile); - FREE(destfile); - if(localf != NULL) { - /* if we still had a local file open, we got interrupted. set the mtimes on - * the file accordingly. */ - fflush(localf); - if(ust.mtime) { - struct timeval tv[2]; - memset(&tv, 0, sizeof(tv)); - tv[0].tv_sec = ust.atime; - tv[1].tv_sec = ust.mtime; - futimes(fileno(localf), tv); - } - fclose(localf); - } - if(dlf != NULL) { - fetchIO_close(dlf); - } - fetchFreeURL(fileurl); - - /* restore the old signal handlers */ - sigaction(SIGINT, &sig_int[OLD], NULL); - sigaction(SIGPIPE, &sig_pipe[OLD], NULL); - /* if we were interrupted, trip the old handler */ - if(dload_interrupted) { - raise(SIGINT); - } - - return(ret); -} -#endif -#ifdef HAVE_LIBCURL static int curl_progress(void *filename, double dltotal, double dlnow, double ultotal, double ulnow) { @@ -561,21 +295,7 @@ cleanup: static int download(const char *url, const char *localpath, int force) { if(handle->fetchcb == NULL) { -#if defined(HAVE_LIBFETCH) && defined(HAVE_LIBCURL) - const char *pmdownloader = getenv("PACMANDL"); - if(!pmdownloader || strcmp(pmdownloader, "curl") == 0) { - printf(">> using libcurl as internal downloader\n"); - return(curl_download_internal(url, localpath, force)); - } else if(strcmp(pmdownloader, "fetch") == 0) { - printf(">> using libfetch as internal downloader\n"); - return(fetch_download_internal(url, localpath, force)); - } else { - _alpm_log(PM_LOG_ERROR, "PACMANDL unset or invalid! Use `curl' or `fetch'\n"); - return(-1); - } -#elif HAVE_LIBFETCH - return(fetch_download_internal(url, localpath, force)); -#elif HAVE_LIBCURL +#ifdef HAVE_LIBCURL return(curl_download_internal(url, localpath, force)); #else RET_ERR(PM_ERR_EXTERNAL_DOWNLOAD, -1); diff --git a/lib/libalpm/dload.h b/lib/libalpm/dload.h index 63266491..e8d99b23 100644 --- a/lib/libalpm/dload.h +++ b/lib/libalpm/dload.h @@ -25,8 +25,6 @@ #include -#define PM_DLBUF_LEN (1024 * 16) - int _alpm_download_single_file(const char *filename, alpm_list_t *servers, const char *localpath, int force); diff --git a/lib/libalpm/error.c b/lib/libalpm/error.c index 19c7d92c..cf98cc75 100644 --- a/lib/libalpm/error.c +++ b/lib/libalpm/error.c @@ -20,22 +20,10 @@ #include "config.h" -/* TODO: needed for the libfetch stuff, unfortunately- we should kill it */ -#include -/* the following two are needed for FreeBSD's libfetch */ -#include /* PATH_MAX */ -#if defined(HAVE_SYS_PARAM_H) -#include /* MAXHOSTNAMELEN */ -#endif - #ifdef HAVE_LIBCURL #include #endif -#ifdef HAVE_LIBFETCH -#include /* fetchLastErrString */ -#endif - /* libalpm */ #include "util.h" #include "alpm.h" @@ -152,13 +140,6 @@ const char SYMEXPORT *alpm_strerror(int err) * requires the archive struct, so we can't. Just use a generic * error string instead. */ return _("libarchive error"); - case PM_ERR_LIBFETCH: -#ifdef HAVE_LIBFETCH - return fetchLastErrString; -#else - /* obviously shouldn't get here... */ - return _("download library error"); -#endif case PM_ERR_LIBCURL: #ifdef HAVE_LIBCURL return(curl_easy_strerror(handle->curlerr)); -- cgit v1.2.3-70-g09d2