Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add breakpoint resume function #92

Open
wants to merge 1 commit into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
10 changes: 6 additions & 4 deletions src/link.c
Original file line number Diff line number Diff line change
Expand Up @@ -200,11 +200,13 @@ static void LinkTable_uninitialised_fill(LinkTable *linktbl)
}
/*
* Block until the gaps are filled
* result is an invalid variable
*/
int n = curl_multi_perform_once();
int result = 0;
int n = curl_multi_perform_once(&result);
int i = 0;
int j = 0;
while ((i = curl_multi_perform_once())) {
while ((i = curl_multi_perform_once(&result))) {
if (CONFIG.log_type & debug) {
if (j) {
erase_string(stderr, STATUS_LEN, s);
Expand Down Expand Up @@ -846,7 +848,7 @@ TransferStruct Link_download_full(Link *link)
*/
long http_resp = 0;
do {
transfer_blocking(curl);
transfer_blocking(curl, ts.curr_size);
ret = curl_easy_getinfo(curl, CURLINFO_RESPONSE_CODE, &http_resp);
if (ret) {
lprintf(error, "%s", curl_easy_strerror(ret));
Expand Down Expand Up @@ -965,7 +967,7 @@ long Link_download(Link *link, char *output_buf, size_t req_size, off_t offset)

CURL *curl = Link_download_curl_setup(link, req_size, offset, &header, &ts);

transfer_blocking(curl);
transfer_blocking(curl, offset);

curl_off_t recv = Link_download_cleanup(curl, &header);

Expand Down
71 changes: 65 additions & 6 deletions src/network.c
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@
#include <string.h>
#include <stdio.h>
#include <unistd.h>
#include <stdbool.h>

/*
* ----------------- External variables ----------------------
Expand Down Expand Up @@ -110,9 +111,10 @@ curl_callback_unlock(CURL *handle, curl_lock_data data, void *userptr)
* \details Adapted from:
* https://curl.haxx.se/libcurl/c/10-at-a-time.html
*/
static void
static int
curl_process_msgs(CURLMsg *curl_msg, int n_running_curl, int n_mesgs)
{
int result = 0;
(void) n_running_curl;
(void) n_mesgs;
static volatile int slept = 0;
Expand Down Expand Up @@ -163,6 +165,7 @@ curl_process_msgs(CURLMsg *curl_msg, int n_running_curl, int n_mesgs)
lprintf(error, "%d - %s <%s>\n",
curl_msg->data.result,
curl_easy_strerror(curl_msg->data.result), url);
result = curl_msg->data.result;
}
curl_multi_remove_handle(curl_multi, curl);
/*
Expand All @@ -175,13 +178,50 @@ curl_process_msgs(CURLMsg *curl_msg, int n_running_curl, int n_mesgs)
} else {
lprintf(warning, "curl_msg->msg: %d\n", curl_msg->msg);
}
return result;
}

static int http_error_result(int http_response)
{
switch(http_response)
{
case 0: //eg connection down from kick-off ~suggest retrying till some max limit
case 200: //yay we at least got to our url
case 206: //Partial Content
break;

case 416:
//cannot d/l range ~ either cos no server support
//or cos we're asking for an invalid range ~ie: we already d/ld the file
printf("HTTP416: either the d/l is already complete or the http server cannot d/l a range\n");
default:
return 0;//suggest quitting on an unhandled error
}

return 1;
}

static int curl_error_result(int curl_result)
{
switch (curl_result)
{
case CURLE_OK:
case CURLE_COULDNT_CONNECT: //no network connectivity ?
case CURLE_OPERATION_TIMEDOUT: //cos of CURLOPT_LOW_SPEED_TIME
case CURLE_COULDNT_RESOLVE_HOST: //host/DNS down ?
break; //we'll keep trying
default://see: http://curl.haxx.se/libcurl/c/libcurl-errors.html
return 0;
}
return 1;
}


/**
* \details effectively based on
* https://curl.haxx.se/libcurl/c/multi-double.html
*/
int curl_multi_perform_once(void)
int curl_multi_perform_once(int *result)
{
lprintf(network_lock_debug,
"thread %x: locking transfer_lock;\n", pthread_self());
Expand All @@ -207,7 +247,12 @@ int curl_multi_perform_once(void)
int n_mesgs;
CURLMsg *curl_msg;
while ((curl_msg = curl_multi_info_read(curl_multi, &n_mesgs))) {
curl_process_msgs(curl_msg, n_running_curl, n_mesgs);
int nResult = curl_process_msgs(curl_msg, n_running_curl, n_mesgs);
if (!http_error_result(n_mesgs) || !curl_error_result(nResult)) {
*result = 1;
}else{
*result = 0;
}
}

lprintf(network_lock_debug,
Expand Down Expand Up @@ -272,7 +317,7 @@ void NetworkSystem_init(void)
crypto_lock_init();
}

void transfer_blocking(CURL *curl)
void transfer_blocking(CURL *curl, size_t start)
{
TransferStruct *ts;
CURLcode ret = curl_easy_getinfo(curl, CURLINFO_PRIVATE, &ts);
Expand All @@ -293,8 +338,22 @@ void transfer_blocking(CURL *curl)
"thread %x: unlocking transfer_lock;\n", pthread_self());
PTHREAD_MUTEX_UNLOCK(&transfer_lock);

while (ts->transferring) {
curl_multi_perform_once();
int result = 0;
bool restartDown = false;

while (ts->transferring && !restartDown) {
/*
* When the network is abnormal during the file download, start to resume the transfer
*/
if (0 != result) {
curl_multi_remove_handle(curl_multi,curl);
curl_easy_setopt(curl, CURLOPT_RESUME_FROM_LARGE, start);
res = curl_multi_add_handle(curl_multi, curl);
if (res > 0) {
lprintf(error, "%d, %s\n", res, curl_multi_strerror(res));
}
}
curl_multi_perform_once(&result);
}
}

Expand Down
4 changes: 2 additions & 2 deletions src/network.h
Original file line number Diff line number Diff line change
Expand Up @@ -26,13 +26,13 @@ typedef enum {
extern CURLSH *CURL_SHARE;

/** \brief perform one transfer cycle */
int curl_multi_perform_once(void);
int curl_multi_perform_once(int *result);

/** \brief initialise the network module */
void NetworkSystem_init(void);

/** \brief blocking file transfer */
void transfer_blocking(CURL *curl);
void transfer_blocking(CURL *curl, size_t start);

/** \brief non blocking file transfer */
void transfer_nonblocking(CURL *curl);
Expand Down