index : pacman | |
Archlinux32 fork of pacman | gitolite user |
summaryrefslogtreecommitdiff |
-rw-r--r-- | INSTALL | 4 | ||||
-rw-r--r-- | configure.ac | 31 | ||||
-rw-r--r-- | contrib/Makefile.am | 5 | ||||
-rw-r--r-- | contrib/README | 3 | ||||
-rwxr-xr-x | contrib/wget-xdelta.sh.in | 70 | ||||
-rw-r--r-- | lib/libalpm/alpm.c | 14 | ||||
-rw-r--r-- | lib/libalpm/alpm.h | 1 | ||||
-rw-r--r-- | lib/libalpm/dload.c | 313 | ||||
-rw-r--r-- | lib/libalpm/dload.h | 2 | ||||
-rw-r--r-- | lib/libalpm/error.c | 19 | ||||
-rw-r--r-- | lib/libalpm/handle.c | 6 | ||||
-rw-r--r-- | lib/libalpm/handle.h | 10 |
@@ -16,8 +16,8 @@ properly build pacman. libarchive http://code.google.com/p/libarchive/ -libfetch -ftp://ftp.netbsd.org/pub/pkgsrc/current/pkgsrc/net/libfetch/README.html +libcurl +http://curl.haxx.se/libcurl/ Briefly, the shell commands `./configure; make; make install' should configure, build, and install this package. The following diff --git a/configure.ac b/configure.ac index 7498fc15..498e36da 100644 --- a/configure.ac +++ b/configure.ac @@ -93,10 +93,10 @@ AC_ARG_WITH(openssl, AS_HELP_STRING([--with-openssl], [use OpenSSL crypto implementations instead of internal routines]), [], [with_openssl=check]) -# Help line for libfetch -AC_ARG_WITH(fetch, - AS_HELP_STRING([--with-fetch], [use libfetch as an internal downloader]), - [], [with_fetch=check]) +# Help line for libcurl +AC_ARG_WITH(curl, + AS_HELP_STRING([--with-curl], [use libcurl as an internal downloader]), + [], [with_curl=check]) # Help line for documentation AC_ARG_ENABLE(doc, @@ -149,24 +149,17 @@ AS_IF([test "x$with_openssl" != "xno"], AC_MSG_RESULT(no)) AM_CONDITIONAL([HAVE_LIBSSL], [test "x$ac_cv_lib_ssl_MD5_Final" = "xyes"]) -# Enable or disable usage of libfetch -AC_MSG_CHECKING(whether to link with libfetch) -AS_IF([test "x$with_fetch" != "xno"], +# Enable or disable usage of libcurl +AC_MSG_CHECKING(whether to link with libcurl) +AS_IF([test "x$with_curl" != "xno"], [AC_MSG_RESULT(yes) - AC_CHECK_LIB([fetch], [fetchParseURL], , - [if test "x$with_fetch" != "xcheck"; then - AC_MSG_FAILURE([--with-fetch was given, but -lfetch was not found]) + AC_CHECK_LIB([curl], [curl_easy_perform], , + [if test "x$with_curl" != "xcheck"; then + AC_MSG_FAILURE([--with-curl was given, but -lcurl was not found]) fi], - [-lcrypto -ldl]) - # Check if libfetch supports connnection caching which we use - AS_IF([test "x$ac_cv_lib_fetch_fetchParseURL" = "xyes"], - [AC_CHECK_DECL(fetchConnectionCacheInit, , - AC_MSG_ERROR([libfetch must be version 2.28 or greater]), - [#include <fetch.h>]) - ]) - ], + [-lcurl])], AC_MSG_RESULT(no)) -AM_CONDITIONAL([HAVE_LIBFETCH], [test "x$ac_cv_lib_fetch_fetchParseURL" = "xyes"]) +AM_CONDITIONAL([HAVE_LIBCURL], [test "x$ac_cv_lib_curl_curl_easy_perform" = "xyes"]) # Checks for header files. AC_CHECK_HEADERS([fcntl.h glob.h libintl.h locale.h mntent.h string.h \ diff --git a/contrib/Makefile.am b/contrib/Makefile.am index 827d9ec0..e9f77940 100644 --- a/contrib/Makefile.am +++ b/contrib/Makefile.am @@ -3,8 +3,7 @@ OURSCRIPTS = \ pacdiff \ paclist \ pacscripts \ - pacsearch \ - wget-xdelta.sh + pacsearch OURFILES = \ bash_completion \ @@ -19,7 +18,6 @@ EXTRA_DIST = \ pacscripts.in \ pacsearch.in \ vimprojects \ - wget-xdelta.sh.in \ zsh_completion.in \ README @@ -55,7 +53,6 @@ paclist: $(srcdir)/paclist.in pacscripts: $(srcdir)/pacscripts.in pacsearch: $(srcdir)/pacsearch.in pactree: $(srcdir)/pactree.in -wget-xdelta.sh: $(srcdir)/wget-xdelta.sh.in zsh_completion: $(srcdir)/zsh_completion.in # vim:set ts=2 sw=2 noet: diff --git a/contrib/README b/contrib/README index 4f591012..04b656f3 100644 --- a/contrib/README +++ b/contrib/README @@ -28,6 +28,3 @@ database entries. Useful for reuse, or possible config file extension. vimprojects - a project file for the vim project plugin. -wget-xdelta.sh - A download script for pacman which allows binary deltas -generated with makepkg to be used instead of downloading full binary packages. -This should cut download sizes for some package upgrades significantly. diff --git a/contrib/wget-xdelta.sh.in b/contrib/wget-xdelta.sh.in deleted file mode 100755 index f2ac1c87..00000000 --- a/contrib/wget-xdelta.sh.in +++ /dev/null @@ -1,70 +0,0 @@ -#!@BASH_SHELL@ - -if [ -r "@sysconfdir@/makepkg.conf" ]; then - source @sysconfdir@/makepkg.conf -else - echo "wget-xdelta: Unable to find makepkg.conf" - exit 1 -fi - -if [ -r ~/.makepkg.conf ]; then - source ~/.makepkg.conf -fi - -out_file=$(basename $1) -file_url=$2 - -if ! [[ "$out_file" =~ "pkg.tar.gz" ]]; then - # If it's not a package file download as normal and exit. - #wget --passive-ftp -c -O "$out_file" "$file_url" - exit $? -fi - - -# Get the package name and version -[[ "$out_file" =~ "$CARCH" ]] && arch="-$CARCH" || arch="" -pkg_data=$(echo $out_file | \ - sed "s|^\(.*\)-\([[:alnum:]_\.]*-[[:alnum:]_\.]*\)${arch}${PKGEXT}.part|\1 \2|") -pkgname=$(echo $pkg_data | cut -d ' ' -f 1) -new_version=$(echo $pkg_data | cut -d ' ' -f 2) -base_url=${file_url%/*} - -# Look for the last version -for file in $(ls -r @localstatedir@/cache/pacman/pkg/${pkgname}-*-*{,-$CARCH}$PKGEXT 2>/dev/null); do - [[ "$file" =~ "$CARCH" ]] && arch="-$CARCH" || arch="" - check_version=$(echo $file | \ - sed "s|^.*/${pkgname}-\([[:alnum:]_\.]*-[[:alnum:]_\.]*\)${arch}$PKGEXT$|\1|" | \ - grep -v "^@localstatedir@/cache/pacman/pkg") - - [ "$check_version" = "" ] && continue - - vercmp=$(vercmp "$check_version" "$old_version") - if [ "$check_version" != "$new_version" -a $vercmp -gt 0 ]; then - old_version=$check_version - old_file=$file - fi -done - -if [ "$old_version" != "" -a "$old_version" != "$new_version" ]; then - # Great, we have a cached file, now calculate a patch name from it - delta_name="$pkgname-${old_version}_to_${new_version}-${CARCH}.delta" - - echo "wget-xdelta: Attempting to download delta $delta_name..." >&2 - if wget --passive-ftp -c "$base_url/$delta_name"; then - echo "wget-xdelta: Applying delta..." - if xdelta patch "$delta_name" "$old_file" "$out_file"; then - echo "wget-xdelta: Delta applied successfully!" - rm "$delta_name" - exit 0 - else - echo "wget-xdelta: Failed to apply delta!" - rm $delta_name - fi - fi - fi - -echo "wget-xdelta: Downloading new package..." -wget --passive-ftp -c -O "$out_file" "$file_url" -exit $? - -# vim:set ts=4 sw=4 noet: diff --git a/lib/libalpm/alpm.c b/lib/libalpm/alpm.c index 4f95832d..db2a63de 100644 --- a/lib/libalpm/alpm.c +++ b/lib/libalpm/alpm.c @@ -23,9 +23,8 @@ #include "config.h" -/* connection caching setup */ -#ifdef HAVE_LIBFETCH -#include <fetch.h> +#ifdef HAVE_LIBCURL +#include <curl/curl.h> #endif /* libalpm */ @@ -65,8 +64,9 @@ int SYMEXPORT alpm_initialize(void) bindtextdomain("libalpm", LOCALEDIR); #endif -#ifdef HAVE_LIBFETCH - fetchConnectionCacheInit(5, 1); +#ifdef HAVE_LIBCURL + curl_global_init(CURL_GLOBAL_SSL); + handle->curl = curl_easy_init(); #endif return(0); @@ -88,8 +88,8 @@ int SYMEXPORT alpm_release(void) _alpm_handle_free(handle); handle = NULL; -#ifdef HAVE_LIBFETCH - fetchConnectionCacheClose(); +#ifdef HAVE_LIBCURL + curl_global_cleanup(); #endif return(0); diff --git a/lib/libalpm/alpm.h b/lib/libalpm/alpm.h index 0f3b7166..0b80506f 100644 --- a/lib/libalpm/alpm.h +++ b/lib/libalpm/alpm.h @@ -535,6 +535,7 @@ enum _pmerrno_t { /* External library errors */ PM_ERR_LIBARCHIVE, PM_ERR_LIBFETCH, + PM_ERR_LIBCURL, PM_ERR_EXTERNAL_DOWNLOAD }; diff --git a/lib/libalpm/dload.c b/lib/libalpm/dload.c index 7a98eb12..344abaf8 100644 --- a/lib/libalpm/dload.c +++ b/lib/libalpm/dload.c @@ -29,14 +29,9 @@ #include <sys/types.h> #include <sys/stat.h> #include <signal.h> -/* the following two are needed for FreeBSD's libfetch */ -#include <limits.h> /* PATH_MAX */ -#if defined(HAVE_SYS_PARAM_H) -#include <sys/param.h> /* MAXHOSTNAMELEN */ -#endif -#ifdef HAVE_LIBFETCH -#include <fetch.h> +#ifdef HAVE_LIBCURL +#include <curl/curl.h> #endif /* libalpm */ @@ -47,6 +42,8 @@ #include "util.h" #include "handle.h" +static int prevprogress; /* last download amount */ + static char *get_filename(const char *url) { char *filename = strrchr(url, '/'); if(filename != NULL) { @@ -55,7 +52,7 @@ static char *get_filename(const char *url) { return(filename); } -#ifdef HAVE_LIBFETCH +#ifdef HAVE_LIBCURL static char *get_destfile(const char *path, const char *filename) { char *destfile; /* len = localpath len + filename len + null */ @@ -76,14 +73,8 @@ static char *get_tempfile(const char *path, const char *filename) { return(tempfile); } -static const char *gethost(struct url *fileurl) -{ - const char *host = _("disk"); - if(strcmp(SCHEME_FILE, fileurl->scheme) != 0) { - host = fileurl->host; - } - return(host); -} +#define check_stop() if(dload_interrupted) { ret = -1; goto cleanup; } +enum sighandlers { OLD = 0, NEW = 1 }; int dload_interrupted; static void inthandler(int signum) @@ -91,59 +82,109 @@ static void inthandler(int signum) dload_interrupted = 1; } -#define check_stop() if(dload_interrupted) { ret = -1; goto cleanup; } -enum sighandlers { OLD = 0, NEW = 1 }; +static int curl_progress(void *filename, double dltotal, double dlnow, + double ultotal, double ulnow) { + + /* unused parameters */ + (void)ultotal; + (void)ulnow; + + if(dltotal == 0 || prevprogress == dltotal) { + return(0); + } + + if(dload_interrupted) { + return(1); + } + + handle->dlcb((const char*)filename, (long)dlnow, (long)dltotal); + + prevprogress = dlnow; + + return(0); +} + +static int curl_gethost(const char *url, char *buffer) { + int hostlen; + char *p; -static int download_internal(const char *url, const char *localpath, + if(strncmp(url, "file://", 7) == 0) { + strcpy(buffer, _("disk")); + } else { + p = strstr(url, "//"); + if(!p) { + return(1); + } + p += 2; /* jump over the found // */ + hostlen = strcspn(p, "/"); + if(hostlen > 255) { + /* buffer overflow imminent */ + _alpm_log(PM_LOG_ERROR, _("buffer overflow detected")); + return(1); + } + snprintf(buffer, hostlen + 1, "%s", p); + } + + return(0); +} + +static int curl_download_internal(const char *url, const char *localpath, int force) { + int ret = -1; FILE *localf = NULL; + char *destfile, *filename, *tempfile; + char hostname[256]; /* RFC1123 states applications should support this length */ struct stat st; - int ret = 0; - off_t dl_thisfile = 0; - ssize_t nread = 0; - char *tempfile, *destfile, *filename; + long httpresp, timecond, remote_time, local_time; + double remote_size, bytes_dl; struct sigaction sig_pipe[2], sig_int[2]; - off_t local_size = 0; - time_t local_time = 0; - - struct url *fileurl; - struct url_stat ust; - fetchIO *dlf = NULL; - - char buffer[PM_DLBUF_LEN]; - filename = get_filename(url); - if(!filename) { + if(!filename || curl_gethost(url, hostname) != 0) { _alpm_log(PM_LOG_ERROR, _("url '%s' is invalid\n"), url); RET_ERR(PM_ERR_SERVER_BAD_URL, -1); } - fileurl = fetchParseURL(url); - if(!fileurl) { - _alpm_log(PM_LOG_ERROR, _("url '%s' is invalid\n"), url); - RET_ERR(PM_ERR_LIBFETCH, -1); - } - destfile = get_destfile(localpath, filename); - tempfile = get_tempfile(localpath, filename); - - if(stat(tempfile, &st) == 0 && S_ISREG(st.st_mode) && st.st_size > 0) { - _alpm_log(PM_LOG_DEBUG, "tempfile found, attempting continuation\n"); - local_time = fileurl->last_modified = st.st_mtime; - local_size = fileurl->offset = (off_t)st.st_size; - dl_thisfile = st.st_size; + tempfile = get_tempfile(localpath, filename); + + /* the curl_easy handle is initialized with the alpm handle, so we only need + * to reset the curl handle set parameters for each time it's used. */ + curl_easy_reset(handle->curl); + curl_easy_setopt(handle->curl, CURLOPT_URL, url); + curl_easy_setopt(handle->curl, CURLOPT_FAILONERROR, 1L); + curl_easy_setopt(handle->curl, CURLOPT_ENCODING, "deflate, gzip"); + curl_easy_setopt(handle->curl, CURLOPT_CONNECTTIMEOUT, 10L); + curl_easy_setopt(handle->curl, CURLOPT_FILETIME, 1L); + curl_easy_setopt(handle->curl, CURLOPT_NOPROGRESS, 0L); + curl_easy_setopt(handle->curl, CURLOPT_FOLLOWLOCATION, 1L); + curl_easy_setopt(handle->curl, CURLOPT_PROGRESSFUNCTION, curl_progress); + curl_easy_setopt(handle->curl, CURLOPT_PROGRESSDATA, filename); + + if(!force && stat(destfile, &st) == 0) { + /* assume its a sync, so we're starting from scratch. but, only download + * our local is out of date. */ + local_time = (long)st.st_mtime; + curl_easy_setopt(handle->curl, CURLOPT_TIMECONDITION, CURL_TIMECOND_IFMODSINCE); + curl_easy_setopt(handle->curl, CURLOPT_TIMEVALUE, local_time); + } else if(stat(tempfile, &st) == 0 && st.st_size > 0) { + /* assume its a partial package download. we do not support resuming of + * transfers on partially downloaded sync DBs. */ localf = fopen(tempfile, "ab"); - } else if(!force && stat(destfile, &st) == 0 && S_ISREG(st.st_mode) && st.st_size > 0) { - _alpm_log(PM_LOG_DEBUG, "destfile found, using mtime only\n"); - local_time = fileurl->last_modified = st.st_mtime; - local_size = /* no fu->off here */ (off_t)st.st_size; - } else { - _alpm_log(PM_LOG_DEBUG, "no file found matching criteria, starting from scratch\n"); + curl_easy_setopt(handle->curl, CURLOPT_RESUME_FROM, (long)st.st_size); + _alpm_log(PM_LOG_DEBUG, "tempfile found, attempting continuation"); } - /* pass the raw filename for passing to the callback function */ - _alpm_log(PM_LOG_DEBUG, "using '%s' for download progress\n", filename); + /* no destfile and no tempfile. start from scratch */ + if(localf == NULL) { + localf = fopen(tempfile, "wb"); + if(localf == NULL) { + goto cleanup; + } + } + + /* this has to be set _after_ figuring out which file we're opening */ + curl_easy_setopt(handle->curl, CURLOPT_WRITEDATA, localf); /* print proxy info for debug purposes */ _alpm_log(PM_LOG_DEBUG, "HTTP_PROXY: %s\n", getenv("HTTP_PROXY")); @@ -151,9 +192,6 @@ static int download_internal(const char *url, const char *localpath, _alpm_log(PM_LOG_DEBUG, "FTP_PROXY: %s\n", getenv("FTP_PROXY")); _alpm_log(PM_LOG_DEBUG, "ftp_proxy: %s\n", getenv("ftp_proxy")); - /* 10s timeout */ - fetchTimeout = 10; - /* ignore any SIGPIPE signals- these may occur if our FTP socket dies or * something along those lines. Store the old signal handler first. */ sig_pipe[NEW].sa_handler = SIG_IGN; @@ -169,146 +207,60 @@ static int download_internal(const char *url, const char *localpath, sigaction(SIGINT, NULL, &sig_int[OLD]); sigaction(SIGINT, &sig_int[NEW], NULL); - /* NOTE: libfetch does not reset the error code, be sure to do it before - * calls into the library */ - - /* TODO: if we call fetchStat() and get a redirect (disabling automagic - * redirect following), we should repeat the file locator stuff and get a new - * filename rather than only base if off the first URL, and then verify - * get_filename() didn't return ''. Of course, libfetch might not even allow - * us to even get that URL...FS#22645. This would allow us to download things - * without totally puking like - * http://www.archlinux.org/packages/community/x86_64/exim/download/ */ - - /* find out the remote size *and* mtime in one go. there is a lot of - * trouble in trying to do both size and "if-modified-since" logic in a - * non-stat request, so avoid it. */ - fetchLastErrCode = 0; - if(fetchStat(fileurl, &ust, "") == -1) { - pm_errno = PM_ERR_LIBFETCH; - _alpm_log(PM_LOG_ERROR, _("failed retrieving file '%s' from %s : %s\n"), - filename, gethost(fileurl), fetchLastErrString); - ret = -1; - goto cleanup; - } - check_stop(); - - _alpm_log(PM_LOG_DEBUG, "ust.mtime: %ld local_time: %ld compare: %ld\n", - ust.mtime, local_time, local_time - ust.mtime); - _alpm_log(PM_LOG_DEBUG, "ust.size: %jd local_size: %jd compare: %jd\n", - (intmax_t)ust.size, (intmax_t)local_size, (intmax_t)(local_size - ust.size)); - if(!force && ust.mtime && ust.mtime == local_time - && ust.size && ust.size == local_size) { - /* the remote time and size values agreed with what we have, so move on - * because there is nothing more to do. */ - _alpm_log(PM_LOG_DEBUG, "files are identical, skipping %s\n", filename); - ret = 1; - goto cleanup; - } - if(!ust.mtime || ust.mtime != local_time) { - _alpm_log(PM_LOG_DEBUG, "mtimes were different or unavailable, downloading %s from beginning\n", filename); - fileurl->offset = 0; - } - - fetchLastErrCode = 0; - dlf = fetchGet(fileurl, ""); - check_stop(); - - if(fetchLastErrCode != 0 || dlf == NULL) { - pm_errno = PM_ERR_LIBFETCH; - _alpm_log(PM_LOG_ERROR, _("failed retrieving file '%s' from %s : %s\n"), - filename, gethost(fileurl), fetchLastErrString); - ret = -1; - goto cleanup; - } else { - _alpm_log(PM_LOG_DEBUG, "connected to %s successfully\n", fileurl->host); - } - - if(localf && fileurl->offset == 0) { - _alpm_log(PM_LOG_WARNING, _("resuming download of %s not possible; starting over\n"), filename); - fclose(localf); - localf = NULL; - } else if(fileurl->offset) { - _alpm_log(PM_LOG_DEBUG, "resuming download at position %jd\n", (intmax_t)fileurl->offset); - } - - - if(localf == NULL) { - _alpm_rmrf(tempfile); - fileurl->offset = (off_t)0; - dl_thisfile = 0; - localf = fopen(tempfile, "wb"); - if(localf == NULL) { /* still null? */ - pm_errno = PM_ERR_RETRIEVE; - _alpm_log(PM_LOG_ERROR, _("error writing to file '%s': %s\n"), - tempfile, strerror(errno)); - ret = -1; - goto cleanup; - } - } - /* Progress 0 - initialize */ if(handle->dlcb) { - handle->dlcb(filename, 0, ust.size); + handle->dlcb(filename, 0, 1); } - while((nread = fetchIO_read(dlf, buffer, PM_DLBUF_LEN)) > 0) { - check_stop(); - size_t nwritten = 0; - nwritten = fwrite(buffer, 1, (size_t)nread, localf); - if((nwritten != (size_t)nread) || ferror(localf)) { - pm_errno = PM_ERR_RETRIEVE; - _alpm_log(PM_LOG_ERROR, _("error writing to file '%s': %s\n"), - tempfile, strerror(errno)); - ret = -1; - goto cleanup; - } - dl_thisfile += nread; + /* perform transfer */ + handle->curlerr = curl_easy_perform(handle->curl); - if(handle->dlcb) { - handle->dlcb(filename, dl_thisfile, ust.size); - } + /* retrieve info about the state of the transfer */ + curl_easy_getinfo(handle->curl, CURLINFO_HTTP_CODE, &httpresp); + curl_easy_getinfo(handle->curl, CURLINFO_FILETIME, &remote_time); + curl_easy_getinfo(handle->curl, CURLINFO_CONTENT_LENGTH_DOWNLOAD, &remote_size); + curl_easy_getinfo(handle->curl, CURLINFO_SIZE_DOWNLOAD, &bytes_dl); + curl_easy_getinfo(handle->curl, CURLINFO_CONDITION_UNMET, &timecond); + + /* time condition was met and we didn't download anything. we need to + * clean up the 0 byte .part file that's left behind. */ + if(bytes_dl == 0 && timecond == 1) { + ret = 1; + unlink(tempfile); + goto cleanup; } - /* did the transfer complete normally? */ - if (nread == -1) { - /* not PM_ERR_LIBFETCH here because libfetch error string might be empty */ - pm_errno = PM_ERR_RETRIEVE; - _alpm_log(PM_LOG_ERROR, _("failed retrieving file '%s' from %s\n"), - filename, gethost(fileurl)); - ret = -1; + if(handle->curlerr == CURLE_ABORTED_BY_CALLBACK) { + goto cleanup; + } else if(handle->curlerr != CURLE_OK) { + pm_errno = PM_ERR_LIBCURL; + _alpm_log(PM_LOG_ERROR, _("failed retrieving file '%s' from %s : %s\n"), + filename, hostname, curl_easy_strerror(handle->curlerr)); + unlink(tempfile); goto cleanup; } - if (ust.size != -1 && dl_thisfile < ust.size) { + /* remote_size isn't necessarily the full size of the file, just what the + * server reported as remaining to download. compare it to what curl reported + * as actually being transferred during curl_easy_perform() */ + if((remote_size != -1 && bytes_dl != -1) && bytes_dl != remote_size) { pm_errno = PM_ERR_RETRIEVE; _alpm_log(PM_LOG_ERROR, _("%s appears to be truncated: %jd/%jd bytes\n"), - filename, (intmax_t)dl_thisfile, (intmax_t)ust.size); - ret = -1; + filename, (intmax_t)bytes_dl, (intmax_t)remote_size); goto cleanup; } - /* probably safer to close the file descriptors now before renaming the file, - * for example to make sure the buffers are flushed. - */ fclose(localf); localf = NULL; - fetchIO_close(dlf); - dlf = NULL; /* set the times on the file to the same as that of the remote file */ - if(ust.mtime) { + if(remote_time != -1) { struct timeval tv[2]; memset(&tv, 0, sizeof(tv)); - tv[0].tv_sec = ust.atime; - tv[1].tv_sec = ust.mtime; + tv[0].tv_sec = tv[1].tv_sec = remote_time; utimes(tempfile, tv); } - if(rename(tempfile, destfile)) { - _alpm_log(PM_LOG_ERROR, _("could not rename %s to %s (%s)\n"), - tempfile, destfile, strerror(errno)); - ret = -1; - } + rename(tempfile, destfile); ret = 0; cleanup: @@ -318,19 +270,14 @@ cleanup: /* if we still had a local file open, we got interrupted. set the mtimes on * the file accordingly. */ fflush(localf); - if(ust.mtime) { + if(remote_time != -1) { struct timeval tv[2]; memset(&tv, 0, sizeof(tv)); - tv[0].tv_sec = ust.atime; - tv[1].tv_sec = ust.mtime; + tv[0].tv_sec = tv[1].tv_sec = remote_time; futimes(fileno(localf), tv); } fclose(localf); } - if(dlf != NULL) { - fetchIO_close(dlf); - } - fetchFreeURL(fileurl); /* restore the old signal handlers */ sigaction(SIGINT, &sig_int[OLD], NULL); @@ -347,8 +294,8 @@ cleanup: static int download(const char *url, const char *localpath, int force) { if(handle->fetchcb == NULL) { -#ifdef HAVE_LIBFETCH - return(download_internal(url, localpath, force)); +#ifdef HAVE_LIBCURL + return(curl_download_internal(url, localpath, force)); #else RET_ERR(PM_ERR_EXTERNAL_DOWNLOAD, -1); #endif diff --git a/lib/libalpm/dload.h b/lib/libalpm/dload.h index 63266491..e8d99b23 100644 --- a/lib/libalpm/dload.h +++ b/lib/libalpm/dload.h @@ -25,8 +25,6 @@ #include <time.h> -#define PM_DLBUF_LEN (1024 * 16) - int _alpm_download_single_file(const char *filename, alpm_list_t *servers, const char *localpath, int force); diff --git a/lib/libalpm/error.c b/lib/libalpm/error.c index 21fbb48f..cf98cc75 100644 --- a/lib/libalpm/error.c +++ b/lib/libalpm/error.c @@ -20,21 +20,14 @@ #include "config.h" -/* TODO: needed for the libfetch stuff, unfortunately- we should kill it */ -#include <stdio.h> -/* the following two are needed for FreeBSD's libfetch */ -#include <limits.h> /* PATH_MAX */ -#if defined(HAVE_SYS_PARAM_H) -#include <sys/param.h> /* MAXHOSTNAMELEN */ -#endif - -#ifdef HAVE_LIBFETCH -#include <fetch.h> /* fetchLastErrString */ +#ifdef HAVE_LIBCURL +#include <curl/curl.h> #endif /* libalpm */ #include "util.h" #include "alpm.h" +#include "handle.h" const char SYMEXPORT *alpm_strerrorlast(void) { @@ -147,9 +140,9 @@ const char SYMEXPORT *alpm_strerror(int err) * requires the archive struct, so we can't. Just use a generic * error string instead. */ return _("libarchive error"); - case PM_ERR_LIBFETCH: -#ifdef HAVE_LIBFETCH - return fetchLastErrString; + case PM_ERR_LIBCURL: +#ifdef HAVE_LIBCURL + return(curl_easy_strerror(handle->curlerr)); #else /* obviously shouldn't get here... */ return _("download library error"); diff --git a/lib/libalpm/handle.c b/lib/libalpm/handle.c index d4ebe82a..34893fc6 100644 --- a/lib/libalpm/handle.c +++ b/lib/libalpm/handle.c @@ -71,6 +71,11 @@ void _alpm_handle_free(pmhandle_t *handle) closelog(); } +#ifdef HAVE_LIBCURL + /* release curl handle */ + curl_easy_cleanup(handle->curl); +#endif + /* free memory */ _alpm_trans_free(handle->trans); FREE(handle->root); @@ -85,6 +90,7 @@ void _alpm_handle_free(pmhandle_t *handle) FREELIST(handle->ignorepkg); FREELIST(handle->ignoregrp); FREE(handle); + } alpm_cb_log SYMEXPORT alpm_option_get_logcb() diff --git a/lib/libalpm/handle.h b/lib/libalpm/handle.h index 2d962fe6..de2dfa1a 100644 --- a/lib/libalpm/handle.h +++ b/lib/libalpm/handle.h @@ -29,6 +29,10 @@ #include "alpm.h" #include "trans.h" +#ifdef HAVE_LIBCURL +#include <curl/curl.h> +#endif + typedef struct _pmhandle_t { /* internal usage */ pmdb_t *db_local; /* local db pointer */ @@ -37,6 +41,12 @@ typedef struct _pmhandle_t { FILE *lckstream; /* lock file stream pointer if one exists */ pmtrans_t *trans; +#ifdef HAVE_LIBCURL + /* libcurl handle */ + CURL *curl; /* reusable curl_easy handle */ + CURLcode curlerr; /* last error produced by curl */ +#endif + /* callback functions */ alpm_cb_log logcb; /* Log callback function */ alpm_cb_download dlcb; /* Download callback function */ |