index : pacman | |
Archlinux32 fork of pacman | gitolite user |
summaryrefslogtreecommitdiff |
author | Chantry Xavier <shiningxc@gmail.com> | 2008-02-27 22:36:53 +0100 |
---|---|---|
committer | Dan McGee <dan@archlinux.org> | 2008-03-09 12:03:22 -0500 |
commit | fc48dc3118318d4b26b63a9453cd23cf2158cba3 (patch) | |
tree | 0a663da4d43558c90a1f17063f72732b5c74900d /lib | |
parent | 74c5bd70cf97154f2fa09418bb87a9483a5359ff (diff) |
-rw-r--r-- | lib/libalpm/dload.c | 586 |
diff --git a/lib/libalpm/dload.c b/lib/libalpm/dload.c index f0c1ca90..2e50544b 100644 --- a/lib/libalpm/dload.c +++ b/lib/libalpm/dload.c @@ -35,22 +35,13 @@ #include "error.h" #include "handle.h" -/* Return a 'struct url' for this server, for downloading 'filename'. */ -static struct url *url_for_file(const char *url, const char *filename) +/* Build a 'struct url' from an url. */ +static struct url *url_for_string(const char *url) { struct url *ret = NULL; - char *buf = NULL; - int len; - - /* print url + filename into a buffer */ - len = strlen(url) + strlen(filename) + 2; - CALLOC(buf, len, sizeof(char), RET_ERR(PM_ERR_MEMORY, NULL)); - snprintf(buf, len, "%s/%s", url, filename); - - ret = downloadParseURL(buf); - FREE(buf); + ret = downloadParseURL(url); if(!ret) { - _alpm_log(PM_LOG_ERROR, _("url '%s' is invalid\n"), buf); + _alpm_log(PM_LOG_ERROR, _("url '%s' is invalid\n"), url); RET_ERR(PM_ERR_SERVER_BAD_URL, NULL); } @@ -68,302 +59,338 @@ static struct url *url_for_file(const char *url, const char *filename) return(ret); } -/* TODO temporary private declaration */ -int _alpm_downloadfiles_forreal(alpm_list_t *servers, const char *localpath, - alpm_list_t *files, time_t mtime1, time_t *mtime2); +static char *get_filename(const char *url) { + char *filename = strrchr(url, '/'); + if(filename != NULL) { + filename++; + } + return(filename); +} +static char *get_destfile(const char *path, const char *filename) { + char *destfile; + /* len = localpath len + filename len + null */ + int len = strlen(path) + strlen(filename) + 1; + CALLOC(destfile, len, sizeof(char), RET_ERR(PM_ERR_MEMORY, NULL)); + snprintf(destfile, len, "%s%s", path, filename); -/* TODO implement these as real functions */ -int _alpm_download_single_file(const char *filename, - alpm_list_t *servers, const char *localpath, - time_t mtimeold, time_t *mtimenew) -{ - alpm_list_t *files = NULL; - int ret; + return(destfile); +} + +static char *get_tempfile(const char *path, const char *filename) { + char *tempfile; + /* len = localpath len + filename len + '.part' len + null */ + int len = strlen(path) + strlen(filename) + 6; + CALLOC(tempfile, len, sizeof(char), RET_ERR(PM_ERR_MEMORY, NULL)); + snprintf(tempfile, len, "%s%s.part", path, filename); + + return(tempfile); +} + +static int download_internal(const char *url, const char *localpath, + time_t mtimeold, time_t *mtimenew) { + FILE *dlf, *localf = NULL; + struct url_stat ust; + struct stat st; + int chk_resume = 0; + int dl_thisfile = 0; + char *tempfile, *destfile, *filename; + int ret = 0; + struct url *fileurl = url_for_string(url); + + if(!fileurl) { + return(-1); + } + + filename = get_filename(url); + if(!filename) { + return(-1); + } + destfile = get_destfile(localpath, filename); + tempfile = get_tempfile(localpath, filename); + + /* pass the raw filename for passing to the callback function */ + _alpm_log(PM_LOG_DEBUG, "using '%s' for download progress\n", filename); + + if(stat(tempfile, &st) == 0 && st.st_size > 0) { + _alpm_log(PM_LOG_DEBUG, "existing file found, using it\n"); + fileurl->offset = (off_t)st.st_size; + dl_thisfile = st.st_size; + localf = fopen(tempfile, "ab"); + chk_resume = 1; + } else { + fileurl->offset = (off_t)0; + dl_thisfile = 0; + } + + /* libdownload does not reset the error code, reset it in + * the case of previous errors */ + downloadLastErrCode = 0; + + /* 10s timeout - TODO make a config option */ + downloadTimeout = 10000; + + dlf = downloadXGet(fileurl, &ust, (handle->nopassiveftp ? "" : "p")); + + if(downloadLastErrCode != 0 || dlf == NULL) { + const char *host = _("disk"); + if(strcmp(SCHEME_FILE, fileurl->scheme) != 0) { + host = fileurl->host; + } + _alpm_log(PM_LOG_ERROR, _("failed retrieving file '%s' from %s : %s\n"), + filename, host, downloadLastErrString); + ret = -1; + goto cleanup; + } else { + _alpm_log(PM_LOG_DEBUG, "connected to %s successfully\n", fileurl->host); + } - /* make a temp one element list */ - files = alpm_list_add(files, (char*)filename); + if(ust.mtime && mtimeold && ust.mtime == mtimeold) { + _alpm_log(PM_LOG_DEBUG, "mtimes are identical, skipping %s\n", filename); + ret = 1; + goto cleanup; + } - ret = _alpm_downloadfiles_forreal(servers, localpath, - files, mtimeold, mtimenew); + if(ust.mtime && mtimenew) { + *mtimenew = ust.mtime; + } - /* free list (data was NOT duplicated) */ - alpm_list_free(files); + if(chk_resume && fileurl->offset == 0) { + _alpm_log(PM_LOG_WARNING, _("cannot resume download, starting over\n")); + if(localf != NULL) { + fclose(localf); + localf = NULL; + } + } + + if(localf == NULL) { + _alpm_rmrf(tempfile); + fileurl->offset = (off_t)0; + dl_thisfile = 0; + localf = fopen(tempfile, "wb"); + if(localf == NULL) { /* still null? */ + _alpm_log(PM_LOG_ERROR, _("cannot write to file '%s'\n"), tempfile); + ret = -1; + goto cleanup; + } + } + + /* Progress 0 - initialize */ + if(handle->dlcb) { + handle->dlcb(filename, 0, ust.size); + } + + int nread = 0; + char buffer[PM_DLBUF_LEN]; + while((nread = fread(buffer, 1, PM_DLBUF_LEN, dlf)) > 0) { + if(ferror(dlf)) { + _alpm_log(PM_LOG_ERROR, _("error downloading '%s': %s\n"), + filename, downloadLastErrString); + ret = -1; + goto cleanup; + } + + int nwritten = 0; + while(nwritten < nread) { + nwritten += fwrite(buffer, 1, (nread - nwritten), localf); + if(ferror(localf)) { + _alpm_log(PM_LOG_ERROR, _("error writing to file '%s': %s\n"), + destfile, strerror(errno)); + ret = -1; + goto cleanup; + } + } + dl_thisfile += nread; + + if(handle->dlcb) { + handle->dlcb(filename, dl_thisfile, ust.size); + } + } + /* probably safer to close the file descriptors now before renaming the file, + * for example to make sure the buffers are flushed. + */ + fclose(localf); + localf = NULL; + fclose(dlf); + dlf = NULL; + + rename(tempfile, destfile); + ret = 0; + +cleanup: + FREE(tempfile); + FREE(destfile); + if(localf != NULL) { + fclose(localf); + } + if(dlf != NULL) { + fclose(dlf); + } + downloadFreeURL(fileurl); return(ret); } -int _alpm_download_files(alpm_list_t *files, - alpm_list_t *servers, const char *localpath) -{ - int ret; +static int download_external(const char *url, const char *localpath, + time_t mtimeold, time_t *mtimenew) { + int ret = 0; + int retval; + int usepart = 0; + char *ptr1, *ptr2; + char origCmd[PATH_MAX]; + char parsedCmd[PATH_MAX] = ""; + char cwd[PATH_MAX]; + char *destfile, *tempfile, *filename; + + filename = get_filename(url); + if(!filename) { + return(-1); + } + destfile = get_destfile(localpath, filename); + tempfile = get_tempfile(localpath, filename); + + /* replace all occurrences of %o with fn.part */ + strncpy(origCmd, handle->xfercommand, sizeof(origCmd)); + ptr1 = origCmd; + while((ptr2 = strstr(ptr1, "%o"))) { + usepart = 1; + ptr2[0] = '\0'; + strcat(parsedCmd, ptr1); + strcat(parsedCmd, tempfile); + ptr1 = ptr2 + 2; + } + strcat(parsedCmd, ptr1); + /* replace all occurrences of %u with the download URL */ + strncpy(origCmd, parsedCmd, sizeof(origCmd)); + parsedCmd[0] = '\0'; + ptr1 = origCmd; + while((ptr2 = strstr(ptr1, "%u"))) { + ptr2[0] = '\0'; + strcat(parsedCmd, ptr1); + strcat(parsedCmd, url); + ptr1 = ptr2 + 2; + } + strcat(parsedCmd, ptr1); + /* cwd to the download directory */ + getcwd(cwd, PATH_MAX); + if(chdir(localpath)) { + _alpm_log(PM_LOG_WARNING, _("could not chdir to %s\n"), localpath); + pm_errno = PM_ERR_CONNECT_FAILED; + ret = -1; + goto cleanup; + } + /* execute the parsed command via /bin/sh -c */ + _alpm_log(PM_LOG_DEBUG, "running command: %s\n", parsedCmd); + retval = system(parsedCmd); + + if(retval == -1) { + _alpm_log(PM_LOG_WARNING, _("running XferCommand: fork failed!\n")); + pm_errno = PM_ERR_FORK_FAILED; + ret = -1; + } else if(retval != 0) { + /* download failed */ + _alpm_log(PM_LOG_DEBUG, "XferCommand command returned non-zero status " + "code (%d)\n", retval); + ret = -1; + } else { + /* download was successful */ + if(usepart) { + rename(tempfile, destfile); + } + ret = 0; + } - ret = _alpm_downloadfiles_forreal(servers, localpath, - files, 0, NULL); +cleanup: + chdir(cwd); + if(ret == -1) { + /* hack to let an user the time to cancel a download */ + sleep(2); + } + FREE(destfile); + FREE(tempfile); return(ret); } +static int download(const char *url, const char *localpath, + time_t mtimeold, time_t *mtimenew) { + int ret; + const char *proto = "file://"; + int len = strlen(proto); + if(strncmp(url, proto, len) == 0) { + /* we can simply grab an absolute path from the file:// url by starting + * our path at the char following the proto (the root '/') + */ + const char *sourcefile = url + len; + const char *filename = get_filename(url); + const char *destfile = get_destfile(localpath, filename); + + if(_alpm_copyfile(sourcefile, destfile) == 0) { + return(0); + } else { + return(-1); + } + } + + if(handle->xfercommand == NULL) { + ret = download_internal(url, localpath, mtimeold, mtimenew); + } else { + ret = download_external(url, localpath, mtimeold, mtimenew); + } + return(ret); +} /* - * This is the real downloadfiles, used directly by sync_synctree() to check - * modtimes on remote files. - * - if mtime1 is non-NULL, then only download files if they are different - * than mtime1. - * - if *mtime2 is non-NULL, it will be filled with the mtime of the remote + * Download a single file + * - if mtimeold is non-NULL, then only download the file if it's different + * than mtimeold. + * - if *mtimenew is non-NULL, it will be filled with the mtime of the remote * file. * * RETURN: 0 for successful download * 1 if the mtimes are identical * -1 on error */ -int _alpm_downloadfiles_forreal(alpm_list_t *servers, const char *localpath, - alpm_list_t *files, time_t mtime1, time_t *mtime2) +int _alpm_download_single_file(const char *filename, + alpm_list_t *servers, const char *localpath, + time_t mtimeold, time_t *mtimenew) { - int dl_thisfile = 0; - alpm_list_t *lp; - alpm_list_t *complete = NULL; alpm_list_t *i; int ret = -1; - char *pkgname = NULL; - - ALPM_LOG_FUNC; - - if(files == NULL) { - return(0); - } for(i = servers; i; i = i->next) { const char *server = i->data; + char *fileurl = NULL; + int len; + + /* print server + filename into a buffer */ + len = strlen(server) + strlen(filename) + 2; + CALLOC(fileurl, len, sizeof(char), RET_ERR(PM_ERR_MEMORY, -1)); + snprintf(fileurl, len, "%s/%s", server, filename); + + ret = download(fileurl, localpath, mtimeold, mtimenew); + FREE(fileurl); + if(ret != -1) { + break; + } + } - /* get each file in the list */ - for(lp = files; lp; lp = lp->next) { - struct url *fileurl = NULL; - char realfile[PATH_MAX]; - char output[PATH_MAX]; - char *fn = (char *)lp->data; - - fileurl = url_for_file(server, fn); - if(!fileurl) { - goto cleanup; - } - - /* pass the raw filename for passing to the callback function */ - FREE(pkgname); - STRDUP(pkgname, fn, (void)0); - _alpm_log(PM_LOG_DEBUG, "using '%s' for download progress\n", pkgname); - - snprintf(realfile, PATH_MAX, "%s%s", localpath, fn); - snprintf(output, PATH_MAX, "%s%s.part", localpath, fn); - - if(alpm_list_find_str(complete, fn)) { - continue; - } + return(ret); +} - if(!handle->xfercommand - || !strcmp(fileurl->scheme, "file")) { - FILE *dlf, *localf = NULL; - struct url_stat ust; - struct stat st; - int chk_resume = 0; - - if(stat(output, &st) == 0 && st.st_size > 0) { - _alpm_log(PM_LOG_DEBUG, "existing file found, using it\n"); - fileurl->offset = (off_t)st.st_size; - dl_thisfile = st.st_size; - localf = fopen(output, "a"); - chk_resume = 1; - } else { - fileurl->offset = (off_t)0; - dl_thisfile = 0; - } - - /* libdownload does not reset the error code, reset it in - * the case of previous errors */ - downloadLastErrCode = 0; - - /* 10s timeout - TODO make a config option */ - downloadTimeout = 10000; - - dlf = downloadXGet(fileurl, &ust, (handle->nopassiveftp ? "" : "p")); - - if(downloadLastErrCode != 0 || dlf == NULL) { - const char *host = _("disk"); - if(strcmp(SCHEME_FILE, fileurl->scheme) != 0) { - host = fileurl->host; - } - _alpm_log(PM_LOG_ERROR, _("failed retrieving file '%s' from %s : %s\n"), - fn, host, downloadLastErrString); - if(localf != NULL) { - fclose(localf); - } - /* try the next server */ - downloadFreeURL(fileurl); - continue; - } else { - _alpm_log(PM_LOG_DEBUG, "connected to %s successfully\n", fileurl->host); - } - - if(ust.mtime && mtime1 && ust.mtime == mtime1) { - _alpm_log(PM_LOG_DEBUG, "mtimes are identical, skipping %s\n", fn); - complete = alpm_list_add(complete, fn); - if(localf != NULL) { - fclose(localf); - } - if(dlf != NULL) { - fclose(dlf); - } - downloadFreeURL(fileurl); - ret = 1; - goto cleanup; - } - - if(ust.mtime && mtime2) { - *mtime2 = ust.mtime; - } - - if(chk_resume && fileurl->offset == 0) { - _alpm_log(PM_LOG_WARNING, _("cannot resume download, starting over\n")); - if(localf != NULL) { - fclose(localf); - localf = NULL; - } - } - - if(localf == NULL) { - _alpm_rmrf(output); - fileurl->offset = (off_t)0; - dl_thisfile = 0; - localf = fopen(output, "w"); - if(localf == NULL) { /* still null? */ - _alpm_log(PM_LOG_ERROR, _("cannot write to file '%s'\n"), output); - if(dlf != NULL) { - fclose(dlf); - } - downloadFreeURL(fileurl); - goto cleanup; - } - } - - /* Progress 0 - initialize */ - if(handle->dlcb) { - handle->dlcb(pkgname, 0, ust.size); - } - - int nread = 0; - char buffer[PM_DLBUF_LEN]; - while((nread = fread(buffer, 1, PM_DLBUF_LEN, dlf)) > 0) { - if(ferror(dlf)) { - _alpm_log(PM_LOG_ERROR, _("error downloading '%s': %s\n"), - fn, downloadLastErrString); - fclose(localf); - fclose(dlf); - downloadFreeURL(fileurl); - goto cleanup; - } - - int nwritten = 0; - while(nwritten < nread) { - nwritten += fwrite(buffer, 1, (nread - nwritten), localf); - if(ferror(localf)) { - _alpm_log(PM_LOG_ERROR, _("error writing to file '%s': %s\n"), - realfile, strerror(errno)); - fclose(localf); - fclose(dlf); - downloadFreeURL(fileurl); - goto cleanup; - } - } - - if(nwritten != nread) { - - } - dl_thisfile += nread; - - if(handle->dlcb) { - handle->dlcb(pkgname, dl_thisfile, ust.size); - } - } - - downloadFreeURL(fileurl); - fclose(localf); - fclose(dlf); - rename(output, realfile); - complete = alpm_list_add(complete, fn); - } else { - int ret; - int usepart = 0; - char *ptr1, *ptr2; - char origCmd[PATH_MAX]; - char parsedCmd[PATH_MAX] = ""; - char url[PATH_MAX]; - char cwd[PATH_MAX]; - - /* build the full download url */ - snprintf(url, PATH_MAX, "%s://%s%s", fileurl->scheme, - fileurl->host, fileurl->doc); - /* we don't need this anymore */ - downloadFreeURL(fileurl); - - /* replace all occurrences of %o with fn.part */ - strncpy(origCmd, handle->xfercommand, sizeof(origCmd)); - ptr1 = origCmd; - while((ptr2 = strstr(ptr1, "%o"))) { - usepart = 1; - ptr2[0] = '\0'; - strcat(parsedCmd, ptr1); - strcat(parsedCmd, output); - ptr1 = ptr2 + 2; - } - strcat(parsedCmd, ptr1); - /* replace all occurrences of %u with the download URL */ - strncpy(origCmd, parsedCmd, sizeof(origCmd)); - parsedCmd[0] = '\0'; - ptr1 = origCmd; - while((ptr2 = strstr(ptr1, "%u"))) { - ptr2[0] = '\0'; - strcat(parsedCmd, ptr1); - strcat(parsedCmd, url); - ptr1 = ptr2 + 2; - } - strcat(parsedCmd, ptr1); - /* cwd to the download directory */ - getcwd(cwd, PATH_MAX); - if(chdir(localpath)) { - _alpm_log(PM_LOG_WARNING, _("could not chdir to %s\n"), localpath); - pm_errno = PM_ERR_CONNECT_FAILED; - goto cleanup; - } - /* execute the parsed command via /bin/sh -c */ - _alpm_log(PM_LOG_DEBUG, "running command: %s\n", parsedCmd); - ret = system(parsedCmd); - if(ret == -1) { - _alpm_log(PM_LOG_WARNING, _("running XferCommand: fork failed!\n")); - pm_errno = PM_ERR_FORK_FAILED; - goto cleanup; - } else if(ret != 0) { - /* download failed */ - _alpm_log(PM_LOG_DEBUG, "XferCommand command returned non-zero status code (%d)\n", ret); - } else { - /* download was successful */ - complete = alpm_list_add(complete, fn); - if(usepart) { - rename(output, realfile); - } - } - chdir(cwd); - } - } +int _alpm_download_files(alpm_list_t *files, + alpm_list_t *servers, const char *localpath) +{ + int ret = 0; + alpm_list_t *lp; - if(alpm_list_count(complete) == alpm_list_count(files)) { - ret = 0; - goto cleanup; + for(lp = files; lp; lp = lp->next) { + char *filename = lp->data; + if(_alpm_download_single_file(filename, servers, + localpath, 0, NULL) == -1) { + ret++; } } -cleanup: - FREE(pkgname); - alpm_list_free(complete); - return(ret); } @@ -374,19 +401,20 @@ cleanup: */ char SYMEXPORT *alpm_fetch_pkgurl(const char *url) { - /* TODO this method will not work at all right now */ char *filename, *filepath; const char *cachedir; + int ret; ALPM_LOG_FUNC; - filename = NULL; + filename = get_filename(url); /* find a valid cache dir to download to */ cachedir = _alpm_filecache_setup(); /* download the file */ - if(_alpm_download_single_file(NULL, NULL, cachedir, 0, NULL)) { + ret = download(url, cachedir, 0, NULL); + if(ret == -1) { _alpm_log(PM_LOG_WARNING, _("failed to download %s\n"), url); return(NULL); } |