diff --git a/pkg/vere/king.c b/pkg/vere/king.c index 7446ae7199..74ff7f782d 100644 --- a/pkg/vere/king.c +++ b/pkg/vere/king.c @@ -251,6 +251,7 @@ _king_curl_bytes(c3_c* url_c, c3_w* len_w, c3_y** hun_y, c3_t veb_t) CURL *cul_u; CURLcode res_i; long cod_i; + c3_y try_y = 0; uv_buf_t buf_u = uv_buf_init(c3_malloc(1), 0); if ( !(cul_u = curl_easy_init()) ) { @@ -263,29 +264,34 @@ _king_curl_bytes(c3_c* url_c, c3_w* len_w, c3_y** hun_y, c3_t veb_t) curl_easy_setopt(cul_u, CURLOPT_WRITEFUNCTION, _king_curl_alloc); curl_easy_setopt(cul_u, CURLOPT_WRITEDATA, (void*)&buf_u); - res_i = curl_easy_perform(cul_u); - curl_easy_getinfo(cul_u, CURLINFO_RESPONSE_CODE, &cod_i); + while ( 5 > try_y ) { + sleep(try_y++); + res_i = curl_easy_perform(cul_u); + curl_easy_getinfo(cul_u, CURLINFO_RESPONSE_CODE, &cod_i); - // XX retry? - // - if ( CURLE_OK != res_i ) { - if ( veb_t ) { - u3l_log("curl: failed %s: %s", url_c, curl_easy_strerror(res_i)); + if ( CURLE_OK != res_i ) { + if ( veb_t ) { + u3l_log("curl: failed to fetch %s: %s", + url_c, curl_easy_strerror(res_i)); + } + ret_i = -1; } - ret_i = -1; - } - if ( 300 <= cod_i ) { - if ( veb_t ) { - u3l_log("curl: error %s: HTTP %ld", url_c, cod_i); + else if ( 300 <= cod_i ) { + if ( veb_t ) { + u3l_log("curl: error fetching %s: HTTP %ld", url_c, cod_i); + } + ret_i = -2; + } + else { + *len_w = buf_u.len; + *hun_y = (c3_y*)buf_u.base; + ret_i = 0; + break; } - ret_i = -2; } curl_easy_cleanup(cul_u); - *len_w = buf_u.len; - *hun_y = (c3_y*)buf_u.base; - return ret_i; } @@ -376,6 +382,7 @@ u3_king_next(c3_c* pac_c, c3_c** out_c) } // skip printfs on failed requests (/next is usually not present) + //REVIEW new retry logic means this case will take longer. make retries optional? // if ( _king_curl_bytes(url_c, &len_w, &hun_y, 0) ) { c3_free(url_c);