tool_operate.c (70557B)
1 /*************************************************************************** 2 * _ _ ____ _ 3 * Project ___| | | | _ \| | 4 * / __| | | | |_) | | 5 * | (__| |_| | _ <| |___ 6 * \___|\___/|_| \_\_____| 7 * 8 * Copyright (C) Daniel Stenberg, <daniel@haxx.se>, et al. 9 * 10 * This software is licensed as described in the file COPYING, which 11 * you should have received as part of this distribution. The terms 12 * are also available at https://curl.se/docs/copyright.html. 13 * 14 * You may opt to use, copy, modify, merge, publish, distribute and/or sell 15 * copies of the Software, and permit persons to whom the Software is 16 * furnished to do so, under the terms of the COPYING file. 17 * 18 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY 19 * KIND, either express or implied. 20 * 21 * SPDX-License-Identifier: curl 22 * 23 ***************************************************************************/ 24 #include "tool_setup.h" 25 26 #ifdef HAVE_FCNTL_H 27 # include <fcntl.h> 28 #endif 29 30 #ifdef HAVE_LOCALE_H 31 # include <locale.h> 32 #endif 33 34 #ifdef HAVE_SYS_SELECT_H 35 # include <sys/select.h> 36 #elif defined(HAVE_UNISTD_H) 37 # include <unistd.h> 38 #endif 39 40 #ifdef __VMS 41 # include <fabdef.h> 42 #endif 43 44 #ifdef __AMIGA__ 45 # include <proto/dos.h> 46 #endif 47 48 #ifdef HAVE_NETINET_IN_H 49 # include <netinet/in.h> 50 #endif 51 52 #ifdef HAVE_UV_H 53 /* this is for libuv-enabled debug builds only */ 54 #include <uv.h> 55 #endif 56 57 #include "tool_cfgable.h" 58 #include "tool_cb_dbg.h" 59 #include "tool_cb_hdr.h" 60 #include "tool_cb_prg.h" 61 #include "tool_cb_rea.h" 62 #include "tool_cb_see.h" 63 #include "tool_cb_soc.h" 64 #include "tool_cb_wrt.h" 65 #include "tool_dirhie.h" 66 #include "tool_doswin.h" 67 #include "tool_easysrc.h" 68 #include "tool_filetime.h" 69 #include "tool_getparam.h" 70 #include "tool_helpers.h" 71 #include "tool_findfile.h" 72 #include "tool_libinfo.h" 73 #include "tool_main.h" 74 #include "tool_msgs.h" 75 #include "tool_operate.h" 76 #include "tool_operhlp.h" 77 #include "tool_paramhlp.h" 78 #include "tool_parsecfg.h" 79 #include "tool_setopt.h" 80 #include "tool_ssls.h" 81 #include "tool_urlglob.h" 82 #include "tool_util.h" 83 #include "tool_writeout.h" 84 #include "tool_xattr.h" 85 #include "tool_vms.h" 86 #include "tool_help.h" 87 #include "tool_hugehelp.h" 88 #include "tool_progress.h" 89 #include "tool_ipfs.h" 90 #include "config2setopts.h" 91 92 #ifdef DEBUGBUILD 93 /* libcurl's debug-only curl_easy_perform_ev() */ 94 CURL_EXTERN CURLcode curl_easy_perform_ev(CURL *easy); 95 #endif 96 97 #include "memdebug.h" /* keep this as LAST include */ 98 99 #ifdef CURL_CA_EMBED 100 #ifndef CURL_DECLARED_CURL_CA_EMBED 101 #define CURL_DECLARED_CURL_CA_EMBED 102 extern const unsigned char curl_ca_embed[]; 103 #endif 104 #endif 105 106 #define CURL_CA_CERT_ERRORMSG \ 107 "More details here: https://curl.se/docs/sslcerts.html\n\n" \ 108 "curl failed to verify the legitimacy of the server and therefore " \ 109 "could not\nestablish a secure connection to it. To learn more about " \ 110 "this situation and\nhow to fix it, please visit the webpage mentioned " \ 111 "above.\n" 112 113 static CURLcode single_transfer(struct OperationConfig *config, 114 CURLSH *share, 115 bool *added, 116 bool *skipped); 117 static CURLcode create_transfer(struct GlobalConfig *global, 118 CURLSH *share, 119 bool *added, 120 bool *skipped); 121 122 static bool is_fatal_error(CURLcode code) 123 { 124 switch(code) { 125 case CURLE_FAILED_INIT: 126 case CURLE_OUT_OF_MEMORY: 127 case CURLE_UNKNOWN_OPTION: 128 case CURLE_BAD_FUNCTION_ARGUMENT: 129 /* critical error */ 130 return TRUE; 131 default: 132 break; 133 } 134 135 /* no error or not critical */ 136 return FALSE; 137 } 138 139 /* 140 * Check if a given string is a PKCS#11 URI 141 */ 142 static bool is_pkcs11_uri(const char *string) 143 { 144 if(curl_strnequal(string, "pkcs11:", 7)) { 145 return TRUE; 146 } 147 else { 148 return FALSE; 149 } 150 } 151 152 #ifdef __VMS 153 /* 154 * get_vms_file_size does what it takes to get the real size of the file 155 * 156 * For fixed files, find out the size of the EOF block and adjust. 157 * 158 * For all others, have to read the entire file in, discarding the contents. 159 * Most posted text files will be small, and binary files like zlib archives 160 * and CD/DVD images should be either a STREAM_LF format or a fixed format. 161 * 162 */ 163 static curl_off_t vms_realfilesize(const char *name, 164 const struct_stat *stat_buf) 165 { 166 char buffer[8192]; 167 curl_off_t count; 168 int ret_stat; 169 FILE * file; 170 171 /* !checksrc! disable FOPENMODE 1 */ 172 file = fopen(name, "r"); /* VMS */ 173 if(!file) { 174 return 0; 175 } 176 count = 0; 177 ret_stat = 1; 178 while(ret_stat > 0) { 179 ret_stat = fread(buffer, 1, sizeof(buffer), file); 180 if(ret_stat) 181 count += ret_stat; 182 } 183 fclose(file); 184 185 return count; 186 } 187 188 /* 189 * 190 * VmsSpecialSize checks to see if the stat st_size can be trusted and 191 * if not to call a routine to get the correct size. 192 * 193 */ 194 static curl_off_t VmsSpecialSize(const char *name, 195 const struct_stat *stat_buf) 196 { 197 switch(stat_buf->st_fab_rfm) { 198 case FAB$C_VAR: 199 case FAB$C_VFC: 200 return vms_realfilesize(name, stat_buf); 201 break; 202 default: 203 return stat_buf->st_size; 204 } 205 } 206 #endif /* __VMS */ 207 208 struct per_transfer *transfers; /* first node */ 209 static struct per_transfer *transfersl; /* last node */ 210 static curl_off_t all_pers; 211 212 /* add_per_transfer creates a new 'per_transfer' node in the linked 213 list of transfers */ 214 static CURLcode add_per_transfer(struct per_transfer **per) 215 { 216 struct per_transfer *p; 217 p = calloc(1, sizeof(struct per_transfer)); 218 if(!p) 219 return CURLE_OUT_OF_MEMORY; 220 if(!transfers) 221 /* first entry */ 222 transfersl = transfers = p; 223 else { 224 /* make the last node point to the new node */ 225 transfersl->next = p; 226 /* make the new node point back to the formerly last node */ 227 p->prev = transfersl; 228 /* move the last node pointer to the new entry */ 229 transfersl = p; 230 } 231 *per = p; 232 all_xfers++; /* count total number of transfers added */ 233 all_pers++; 234 235 return CURLE_OK; 236 } 237 238 /* Remove the specified transfer from the list (and free it), return the next 239 in line */ 240 static struct per_transfer *del_per_transfer(struct per_transfer *per) 241 { 242 struct per_transfer *n; 243 struct per_transfer *p; 244 DEBUGASSERT(transfers); 245 DEBUGASSERT(transfersl); 246 DEBUGASSERT(per); 247 248 n = per->next; 249 p = per->prev; 250 251 if(p) 252 p->next = n; 253 else 254 transfers = n; 255 256 if(n) 257 n->prev = p; 258 else 259 transfersl = p; 260 261 free(per); 262 all_pers--; 263 264 return n; 265 } 266 267 static CURLcode pre_transfer(struct GlobalConfig *global, 268 struct per_transfer *per) 269 { 270 curl_off_t uploadfilesize = -1; 271 struct_stat fileinfo; 272 CURLcode result = CURLE_OK; 273 #ifdef CURL_DISABLE_LIBCURL_OPTION 274 (void)global; /* otherwise used in the my_setopt macros */ 275 #else 276 struct OperationConfig *config = global->current; 277 #endif 278 279 if(per->uploadfile && !stdin_upload(per->uploadfile)) { 280 /* VMS Note: 281 * 282 * Reading binary from files can be a problem... Only FIXED, VAR 283 * etc WITHOUT implied CC will work. Others need a \n appended to 284 * a line 285 * 286 * - Stat gives a size but this is UNRELIABLE in VMS. E.g. 287 * a fixed file with implied CC needs to have a byte added for every 288 * record processed, this can be derived from Filesize & recordsize 289 * for VARiable record files the records need to be counted! for 290 * every record add 1 for linefeed and subtract 2 for the record 291 * header for VARIABLE header files only the bare record data needs 292 * to be considered with one appended if implied CC 293 */ 294 #ifdef __VMS 295 /* Calculate the real upload size for VMS */ 296 per->infd = -1; 297 if(stat(per->uploadfile, &fileinfo) == 0) { 298 fileinfo.st_size = VmsSpecialSize(uploadfile, &fileinfo); 299 switch(fileinfo.st_fab_rfm) { 300 case FAB$C_VAR: 301 case FAB$C_VFC: 302 case FAB$C_STMCR: 303 per->infd = open(per->uploadfile, O_RDONLY | CURL_O_BINARY); 304 break; 305 default: 306 per->infd = open(per->uploadfile, O_RDONLY | CURL_O_BINARY, 307 "rfm=stmlf", "ctx=stm"); 308 } 309 } 310 if(per->infd == -1) 311 #else 312 per->infd = open(per->uploadfile, O_RDONLY | CURL_O_BINARY); 313 if((per->infd == -1) || fstat(per->infd, &fileinfo)) 314 #endif 315 { 316 helpf(tool_stderr, "cannot open '%s'", per->uploadfile); 317 if(per->infd != -1) { 318 close(per->infd); 319 per->infd = STDIN_FILENO; 320 } 321 return CURLE_READ_ERROR; 322 } 323 per->infdopen = TRUE; 324 325 /* we ignore file size for char/block devices, sockets, etc. */ 326 if(S_ISREG(fileinfo.st_mode)) 327 uploadfilesize = fileinfo.st_size; 328 329 #ifdef DEBUGBUILD 330 /* allow dedicated test cases to override */ 331 { 332 char *ev = getenv("CURL_UPLOAD_SIZE"); 333 if(ev) { 334 int sz = atoi(ev); 335 uploadfilesize = (curl_off_t)sz; 336 } 337 } 338 #endif 339 340 if(uploadfilesize != -1) 341 my_setopt_offt(per->curl, CURLOPT_INFILESIZE_LARGE, uploadfilesize); 342 } 343 per->uploadfilesize = uploadfilesize; 344 per->start = curlx_now(); 345 return result; 346 } 347 348 void single_transfer_cleanup(struct OperationConfig *config) 349 { 350 if(config) { 351 struct State *state = &config->state; 352 /* Free list of remaining URLs */ 353 glob_cleanup(&state->urls); 354 tool_safefree(state->outfiles); 355 tool_safefree(state->uploadfile); 356 /* Free list of globbed upload files */ 357 glob_cleanup(&state->inglob); 358 } 359 } 360 361 static CURLcode retrycheck(struct OperationConfig *config, 362 struct per_transfer *per, 363 CURLcode result, 364 bool *retryp, 365 long *delayms) 366 { 367 CURL *curl = per->curl; 368 struct OutStruct *outs = &per->outs; 369 enum { 370 RETRY_NO, 371 RETRY_ALL_ERRORS, 372 RETRY_TIMEOUT, 373 RETRY_CONNREFUSED, 374 RETRY_HTTP, 375 RETRY_FTP, 376 RETRY_LAST /* not used */ 377 } retry = RETRY_NO; 378 long response = 0; 379 if((CURLE_OPERATION_TIMEDOUT == result) || 380 (CURLE_COULDNT_RESOLVE_HOST == result) || 381 (CURLE_COULDNT_RESOLVE_PROXY == result) || 382 (CURLE_FTP_ACCEPT_TIMEOUT == result)) 383 /* retry timeout always */ 384 retry = RETRY_TIMEOUT; 385 else if(config->retry_connrefused && 386 (CURLE_COULDNT_CONNECT == result)) { 387 long oserrno = 0; 388 curl_easy_getinfo(curl, CURLINFO_OS_ERRNO, &oserrno); 389 if(SOCKECONNREFUSED == oserrno) 390 retry = RETRY_CONNREFUSED; 391 } 392 else if((CURLE_OK == result) || 393 ((config->failonerror || config->failwithbody) && 394 (CURLE_HTTP_RETURNED_ERROR == result))) { 395 /* If it returned OK. _or_ failonerror was enabled and it 396 returned due to such an error, check for HTTP transient 397 errors to retry on. */ 398 const char *scheme; 399 curl_easy_getinfo(curl, CURLINFO_SCHEME, &scheme); 400 scheme = proto_token(scheme); 401 if(scheme == proto_http || scheme == proto_https) { 402 /* This was HTTP(S) */ 403 curl_easy_getinfo(curl, CURLINFO_RESPONSE_CODE, &response); 404 405 switch(response) { 406 case 408: /* Request Timeout */ 407 case 429: /* Too Many Requests (RFC6585) */ 408 case 500: /* Internal Server Error */ 409 case 502: /* Bad Gateway */ 410 case 503: /* Service Unavailable */ 411 case 504: /* Gateway Timeout */ 412 retry = RETRY_HTTP; 413 /* 414 * At this point, we have already written data to the output 415 * file (or terminal). If we write to a file, we must rewind 416 * or close/re-open the file so that the next attempt starts 417 * over from the beginning. 418 * 419 * For the upload case, we might need to start over reading from a 420 * previous point if we have uploaded something when this was 421 * returned. 422 */ 423 break; 424 } 425 } 426 } /* if CURLE_OK */ 427 else if(result) { 428 const char *scheme; 429 430 curl_easy_getinfo(curl, CURLINFO_RESPONSE_CODE, &response); 431 curl_easy_getinfo(curl, CURLINFO_SCHEME, &scheme); 432 scheme = proto_token(scheme); 433 434 if((scheme == proto_ftp || scheme == proto_ftps) && response / 100 == 4) 435 /* 436 * This is typically when the FTP server only allows a certain 437 * amount of users and we are not one of them. All 4xx codes 438 * are transient. 439 */ 440 retry = RETRY_FTP; 441 } 442 443 if(result && !retry && config->retry_all_errors) 444 retry = RETRY_ALL_ERRORS; 445 446 if(retry) { 447 long sleeptime = 0; 448 curl_off_t retry_after = 0; 449 static const char * const m[]={ 450 NULL, 451 "(retrying all errors)", 452 ": timeout", 453 ": connection refused", 454 ": HTTP error", 455 ": FTP error" 456 }; 457 458 sleeptime = per->retry_sleep; 459 if(RETRY_HTTP == retry) { 460 curl_easy_getinfo(curl, CURLINFO_RETRY_AFTER, &retry_after); 461 if(retry_after) { 462 /* store in a 'long', make sure it does not overflow */ 463 if(retry_after > LONG_MAX/1000) 464 sleeptime = LONG_MAX; 465 else if((retry_after * 1000) > sleeptime) 466 sleeptime = (long)retry_after * 1000; /* milliseconds */ 467 468 /* if adding retry_after seconds to the process would exceed the 469 maximum time allowed for retrying, then exit the retries right 470 away */ 471 if(config->retry_maxtime) { 472 curl_off_t seconds = curlx_timediff(curlx_now(), 473 per->retrystart)/1000; 474 475 if((CURL_OFF_T_MAX - retry_after < seconds) || 476 (seconds + retry_after > config->retry_maxtime)) { 477 warnf(config->global, "The Retry-After: time would " 478 "make this command line exceed the maximum allowed time " 479 "for retries."); 480 *retryp = FALSE; 481 return CURLE_OK; /* no retry */ 482 } 483 } 484 } 485 } 486 warnf(config->global, "Problem %s. " 487 "Will retry in %ld second%s. " 488 "%ld retr%s left.", 489 m[retry], sleeptime/1000L, 490 (sleeptime/1000L == 1 ? "" : "s"), 491 per->retry_remaining, 492 (per->retry_remaining > 1 ? "ies" : "y")); 493 494 per->retry_remaining--; 495 if(!config->retry_delay) { 496 per->retry_sleep *= 2; 497 if(per->retry_sleep > RETRY_SLEEP_MAX) 498 per->retry_sleep = RETRY_SLEEP_MAX; 499 } 500 501 if(outs->bytes && outs->filename && outs->stream) { 502 #ifndef __MINGW32CE__ 503 struct_stat fileinfo; 504 505 /* The output can be a named pipe or a character device etc that 506 cannot be truncated. Only truncate regular files. */ 507 if(!fstat(fileno(outs->stream), &fileinfo) && 508 S_ISREG(fileinfo.st_mode)) 509 #else 510 /* Windows CE's fileno() is bad so just skip the check */ 511 #endif 512 { 513 int rc; 514 /* We have written data to an output file, we truncate file */ 515 fflush(outs->stream); 516 notef(config->global, 517 "Throwing away %" CURL_FORMAT_CURL_OFF_T " bytes", 518 outs->bytes); 519 /* truncate file at the position where we started appending */ 520 #if defined(HAVE_FTRUNCATE) && !defined(__DJGPP__) && !defined(__AMIGA__) && \ 521 !defined(__MINGW32CE__) 522 if(ftruncate(fileno(outs->stream), outs->init)) { 523 /* when truncate fails, we cannot just append as then we will 524 create something strange, bail out */ 525 errorf(config->global, "Failed to truncate file"); 526 return CURLE_WRITE_ERROR; 527 } 528 /* now seek to the end of the file, the position where we 529 just truncated the file in a large file-safe way */ 530 rc = fseek(outs->stream, 0, SEEK_END); 531 #else 532 /* ftruncate is not available, so just reposition the file 533 to the location we would have truncated it. This will not 534 work properly with large files on 32-bit systems, but 535 most of those will have ftruncate. */ 536 rc = fseek(outs->stream, (long)outs->init, SEEK_SET); 537 #endif 538 if(rc) { 539 errorf(config->global, "Failed seeking to end of file"); 540 return CURLE_WRITE_ERROR; 541 } 542 outs->bytes = 0; /* clear for next round */ 543 } 544 } 545 *retryp = TRUE; 546 per->num_retries++; 547 *delayms = sleeptime; 548 result = CURLE_OK; 549 } 550 return result; 551 } 552 553 554 /* 555 * Call this after a transfer has completed. 556 */ 557 static CURLcode post_per_transfer(struct GlobalConfig *global, 558 struct per_transfer *per, 559 CURLcode result, 560 bool *retryp, 561 long *delay) /* milliseconds! */ 562 { 563 struct OutStruct *outs = &per->outs; 564 CURL *curl = per->curl; 565 struct OperationConfig *config = per->config; 566 int rc; 567 568 *retryp = FALSE; 569 *delay = 0; /* for no retry, keep it zero */ 570 571 if(!curl || !config) 572 return result; 573 574 if(per->uploadfile) { 575 if(!strcmp(per->uploadfile, ".") && per->infd > 0) { 576 #if defined(_WIN32) && !defined(CURL_WINDOWS_UWP) && !defined(UNDER_CE) 577 sclose(per->infd); 578 #else 579 warnf(per->config->global, "Closing per->infd != 0: FD == " 580 "%d. This behavior is only supported on desktop " 581 " Windows", per->infd); 582 #endif 583 } 584 } 585 else { 586 if(per->infdopen) { 587 close(per->infd); 588 } 589 } 590 591 if(per->skip) 592 goto skip; 593 594 #ifdef __VMS 595 if(is_vms_shell()) { 596 /* VMS DCL shell behavior */ 597 if(global->silent && !global->showerror) 598 vms_show = VMSSTS_HIDE; 599 } 600 else 601 #endif 602 if(!config->synthetic_error && result && 603 (!global->silent || global->showerror)) { 604 const char *msg = per->errorbuffer; 605 fprintf(tool_stderr, "curl: (%d) %s\n", result, 606 (msg && msg[0]) ? msg : curl_easy_strerror(result)); 607 if(result == CURLE_PEER_FAILED_VERIFICATION) 608 fputs(CURL_CA_CERT_ERRORMSG, tool_stderr); 609 } 610 else if(config->failwithbody) { 611 /* if HTTP response >= 400, return error */ 612 long code = 0; 613 curl_easy_getinfo(curl, CURLINFO_RESPONSE_CODE, &code); 614 if(code >= 400) { 615 if(!global->silent || global->showerror) 616 fprintf(tool_stderr, 617 "curl: (%d) The requested URL returned error: %ld\n", 618 CURLE_HTTP_RETURNED_ERROR, code); 619 result = CURLE_HTTP_RETURNED_ERROR; 620 } 621 } 622 /* Set file extended attributes */ 623 if(!result && config->xattr && outs->fopened && outs->stream) { 624 rc = fwrite_xattr(curl, per->url, fileno(outs->stream)); 625 if(rc) 626 warnf(config->global, "Error setting extended attributes on '%s': %s", 627 outs->filename, strerror(errno)); 628 } 629 630 if(!result && !outs->stream && !outs->bytes) { 631 /* we have received no data despite the transfer was successful 632 ==> force creation of an empty output file (if an output file 633 was specified) */ 634 long cond_unmet = 0L; 635 /* do not create (or even overwrite) the file in case we get no 636 data because of unmet condition */ 637 curl_easy_getinfo(curl, CURLINFO_CONDITION_UNMET, &cond_unmet); 638 if(!cond_unmet && !tool_create_output_file(outs, config)) 639 result = CURLE_WRITE_ERROR; 640 } 641 642 if(!outs->s_isreg && outs->stream) { 643 /* Dump standard stream buffered data */ 644 rc = fflush(outs->stream); 645 if(!result && rc) { 646 /* something went wrong in the writing process */ 647 result = CURLE_WRITE_ERROR; 648 errorf(global, "Failed writing body"); 649 } 650 } 651 652 #ifdef _WIN32 653 /* Discard incomplete UTF-8 sequence buffered from body */ 654 if(outs->utf8seq[0]) 655 memset(outs->utf8seq, 0, sizeof(outs->utf8seq)); 656 #endif 657 658 /* if retry-max-time is non-zero, make sure we have not exceeded the 659 time */ 660 if(per->retry_remaining && 661 (!config->retry_maxtime || 662 (curlx_timediff(curlx_now(), per->retrystart) < 663 config->retry_maxtime*1000L)) ) { 664 result = retrycheck(config, per, result, retryp, delay); 665 if(!result && *retryp) 666 return CURLE_OK; /* retry! */ 667 } 668 669 if((global->progressmode == CURL_PROGRESS_BAR) && 670 per->progressbar.calls) 671 /* if the custom progress bar has been displayed, we output a 672 newline here */ 673 fputs("\n", per->progressbar.out); 674 675 /* Close the outs file */ 676 if(outs->fopened && outs->stream) { 677 rc = fclose(outs->stream); 678 if(!result && rc) { 679 /* something went wrong in the writing process */ 680 result = CURLE_WRITE_ERROR; 681 errorf(config->global, "curl: (%d) Failed writing body", result); 682 } 683 if(result && config->rm_partial) { 684 struct_stat st; 685 if(!stat(outs->filename, &st) && 686 S_ISREG(st.st_mode)) { 687 if(!unlink(outs->filename)) 688 notef(global, "Removed output file: %s", outs->filename); 689 else 690 warnf(global, "Failed removing: %s", outs->filename); 691 } 692 else 693 warnf(global, "Skipping removal; not a regular file: %s", 694 outs->filename); 695 } 696 } 697 698 /* File time can only be set _after_ the file has been closed */ 699 if(!result && config->remote_time && outs->s_isreg && outs->filename) { 700 /* Ask libcurl if we got a remote file time */ 701 curl_off_t filetime = -1; 702 curl_easy_getinfo(curl, CURLINFO_FILETIME_T, &filetime); 703 setfiletime(filetime, outs->filename, global); 704 } 705 skip: 706 /* Write the --write-out data before cleanup but after result is final */ 707 if(config->writeout) 708 ourWriteOut(config, per, result); 709 710 /* Close function-local opened file descriptors */ 711 if(per->heads.fopened && per->heads.stream) 712 fclose(per->heads.stream); 713 714 if(per->heads.alloc_filename) 715 tool_safefree(per->heads.filename); 716 717 if(per->etag_save.fopened && per->etag_save.stream) 718 fclose(per->etag_save.stream); 719 720 if(per->etag_save.alloc_filename) 721 tool_safefree(per->etag_save.filename); 722 723 curl_easy_cleanup(per->curl); 724 if(outs->alloc_filename) 725 free(outs->filename); 726 free(per->url); 727 free(per->outfile); 728 free(per->uploadfile); 729 if(global->parallel) 730 free(per->errorbuffer); 731 curl_slist_free_all(per->hdrcbdata.headlist); 732 per->hdrcbdata.headlist = NULL; 733 return result; 734 } 735 736 static CURLcode set_cert_types(struct OperationConfig *config) 737 { 738 if(feature_ssl) { 739 /* Check if config->cert is a PKCS#11 URI and set the config->cert_type if 740 * necessary */ 741 if(config->cert && !config->cert_type && is_pkcs11_uri(config->cert)) { 742 config->cert_type = strdup("ENG"); 743 if(!config->cert_type) 744 return CURLE_OUT_OF_MEMORY; 745 } 746 747 /* Check if config->key is a PKCS#11 URI and set the config->key_type if 748 * necessary */ 749 if(config->key && !config->key_type && is_pkcs11_uri(config->key)) { 750 config->key_type = strdup("ENG"); 751 if(!config->key_type) 752 return CURLE_OUT_OF_MEMORY; 753 } 754 755 /* Check if config->proxy_cert is a PKCS#11 URI and set the 756 * config->proxy_type if necessary */ 757 if(config->proxy_cert && !config->proxy_cert_type && 758 is_pkcs11_uri(config->proxy_cert)) { 759 config->proxy_cert_type = strdup("ENG"); 760 if(!config->proxy_cert_type) 761 return CURLE_OUT_OF_MEMORY; 762 } 763 764 /* Check if config->proxy_key is a PKCS#11 URI and set the 765 * config->proxy_key_type if necessary */ 766 if(config->proxy_key && !config->proxy_key_type && 767 is_pkcs11_uri(config->proxy_key)) { 768 config->proxy_key_type = strdup("ENG"); 769 if(!config->proxy_key_type) 770 return CURLE_OUT_OF_MEMORY; 771 } 772 } 773 return CURLE_OK; 774 } 775 776 static CURLcode append2query(struct OperationConfig *config, 777 struct per_transfer *per, 778 const char *q) 779 { 780 CURLcode result = CURLE_OK; 781 CURLU *uh = curl_url(); 782 if(uh) { 783 CURLUcode uerr; 784 uerr = curl_url_set(uh, CURLUPART_URL, per->url, 785 CURLU_GUESS_SCHEME); 786 if(uerr) { 787 result = urlerr_cvt(uerr); 788 errorf(config->global, "(%d) Could not parse the URL, " 789 "failed to set query", result); 790 config->synthetic_error = TRUE; 791 } 792 else { 793 char *updated = NULL; 794 uerr = curl_url_set(uh, CURLUPART_QUERY, q, CURLU_APPENDQUERY); 795 if(!uerr) 796 uerr = curl_url_get(uh, CURLUPART_URL, &updated, 797 CURLU_GUESS_SCHEME); 798 if(uerr) 799 result = urlerr_cvt(uerr); 800 else { 801 free(per->url); /* free previous URL */ 802 per->url = updated; /* use our new URL instead! */ 803 } 804 } 805 curl_url_cleanup(uh); 806 } 807 return result; 808 } 809 810 static CURLcode etag_compare(struct OperationConfig *config) 811 { 812 CURLcode result = CURLE_OK; 813 char *etag_from_file = NULL; 814 char *header = NULL; 815 ParameterError pe; 816 817 /* open file for reading: */ 818 FILE *file = fopen(config->etag_compare_file, FOPEN_READTEXT); 819 if(!file) 820 warnf(config->global, "Failed to open %s: %s", config->etag_compare_file, 821 strerror(errno)); 822 823 if((PARAM_OK == file2string(&etag_from_file, file)) && 824 etag_from_file) { 825 header = aprintf("If-None-Match: %s", etag_from_file); 826 tool_safefree(etag_from_file); 827 } 828 else 829 header = aprintf("If-None-Match: \"\""); 830 831 if(!header) { 832 if(file) 833 fclose(file); 834 errorf(config->global, 835 "Failed to allocate memory for custom etag header"); 836 return CURLE_OUT_OF_MEMORY; 837 } 838 839 /* add Etag from file to list of custom headers */ 840 pe = add2list(&config->headers, header); 841 tool_safefree(header); 842 843 if(file) 844 fclose(file); 845 if(pe != PARAM_OK) 846 result = CURLE_OUT_OF_MEMORY; 847 return result; 848 } 849 850 static CURLcode etag_store(struct OperationConfig *config, 851 struct OutStruct *etag_save, 852 bool *skip) 853 { 854 if(config->create_dirs) { 855 CURLcode result = create_dir_hierarchy(config->etag_save_file, 856 config->global); 857 if(result) 858 return result; 859 } 860 861 /* open file for output: */ 862 if(strcmp(config->etag_save_file, "-")) { 863 FILE *newfile = fopen(config->etag_save_file, "ab"); 864 if(!newfile) { 865 struct State *state = &config->state; 866 warnf(config->global, "Failed creating file for saving etags: \"%s\". " 867 "Skip this transfer", config->etag_save_file); 868 tool_safefree(state->outfiles); 869 glob_cleanup(&state->urls); 870 *skip = TRUE; 871 return CURLE_OK; 872 } 873 else { 874 etag_save->filename = config->etag_save_file; 875 etag_save->s_isreg = TRUE; 876 etag_save->fopened = TRUE; 877 etag_save->stream = newfile; 878 } 879 } 880 else { 881 /* always use binary mode for protocol header output */ 882 CURLX_SET_BINMODE(etag_save->stream); 883 } 884 return CURLE_OK; 885 } 886 887 static CURLcode setup_headerfile(struct OperationConfig *config, 888 struct per_transfer *per, 889 struct OutStruct *heads) 890 { 891 /* open file for output: */ 892 if(!strcmp(config->headerfile, "%")) { 893 heads->stream = stderr; 894 /* use binary mode for protocol header output */ 895 CURLX_SET_BINMODE(heads->stream); 896 } 897 else if(strcmp(config->headerfile, "-")) { 898 FILE *newfile; 899 900 /* 901 * Since every transfer has its own file handle for dumping 902 * the headers, we need to open it in append mode, since transfers 903 * might finish in any order. 904 * The first transfer just clears the file. 905 * 906 * Consider placing the file handle inside the OperationConfig, so 907 * that it does not need to be opened/closed for every transfer. 908 */ 909 if(config->create_dirs) { 910 CURLcode result = create_dir_hierarchy(config->headerfile, 911 config->global); 912 /* create_dir_hierarchy shows error upon CURLE_WRITE_ERROR */ 913 if(result) 914 return result; 915 } 916 if(!per->prev || per->prev->config != config) { 917 newfile = fopen(config->headerfile, "wb"); 918 if(newfile) 919 fclose(newfile); 920 } 921 newfile = fopen(config->headerfile, "ab"); 922 923 if(!newfile) { 924 errorf(config->global, "Failed to open %s", config->headerfile); 925 return CURLE_WRITE_ERROR; 926 } 927 else { 928 heads->filename = config->headerfile; 929 heads->s_isreg = TRUE; 930 heads->fopened = TRUE; 931 heads->stream = newfile; 932 } 933 } 934 else { 935 /* always use binary mode for protocol header output */ 936 CURLX_SET_BINMODE(heads->stream); 937 } 938 return CURLE_OK; 939 } 940 941 static CURLcode setup_outfile(struct OperationConfig *config, 942 struct per_transfer *per, 943 struct OutStruct *outs, 944 bool *skipped) 945 { 946 /* 947 * We have specified a filename to store the result in, or we have 948 * decided we want to use the remote filename. 949 */ 950 struct State *state = &config->state; 951 struct GlobalConfig *global = config->global; 952 953 if(!per->outfile) { 954 /* extract the filename from the URL */ 955 CURLcode result = get_url_file_name(global, &per->outfile, per->url); 956 if(result) { 957 errorf(global, "Failed to extract a filename" 958 " from the URL to use for storage"); 959 return result; 960 } 961 } 962 else if(state->urls) { 963 /* fill '#1' ... '#9' terms from URL pattern */ 964 char *storefile = per->outfile; 965 CURLcode result = glob_match_url(&per->outfile, storefile, state->urls); 966 tool_safefree(storefile); 967 if(result) { 968 /* bad globbing */ 969 warnf(global, "bad output glob"); 970 return result; 971 } 972 if(!*per->outfile) { 973 warnf(global, "output glob produces empty string"); 974 return CURLE_WRITE_ERROR; 975 } 976 } 977 DEBUGASSERT(per->outfile); 978 979 if(config->output_dir && *config->output_dir) { 980 char *d = aprintf("%s/%s", config->output_dir, per->outfile); 981 if(!d) 982 return CURLE_WRITE_ERROR; 983 free(per->outfile); 984 per->outfile = d; 985 } 986 /* Create the directory hierarchy, if not pre-existent to a multiple 987 file output call */ 988 989 if(config->create_dirs) { 990 CURLcode result = create_dir_hierarchy(per->outfile, global); 991 /* create_dir_hierarchy shows error upon CURLE_WRITE_ERROR */ 992 if(result) 993 return result; 994 } 995 996 if(config->skip_existing) { 997 struct_stat fileinfo; 998 if(!stat(per->outfile, &fileinfo)) { 999 /* file is present */ 1000 notef(global, "skips transfer, \"%s\" exists locally", 1001 per->outfile); 1002 per->skip = TRUE; 1003 *skipped = TRUE; 1004 } 1005 } 1006 1007 if(config->resume_from_current) { 1008 /* We are told to continue from where we are now. Get the size 1009 of the file as it is now and open it for append instead */ 1010 struct_stat fileinfo; 1011 /* VMS -- Danger, the filesize is only valid for stream files */ 1012 if(0 == stat(per->outfile, &fileinfo)) 1013 /* set offset to current file size: */ 1014 config->resume_from = fileinfo.st_size; 1015 else 1016 /* let offset be 0 */ 1017 config->resume_from = 0; 1018 } 1019 1020 if(config->resume_from) { 1021 #ifdef __VMS 1022 /* open file for output, forcing VMS output format into stream 1023 mode which is needed for stat() call above to always work. */ 1024 FILE *file = fopen(outfile, "ab", 1025 "ctx=stm", "rfm=stmlf", "rat=cr", "mrs=0"); 1026 #else 1027 /* open file for output: */ 1028 FILE *file = fopen(per->outfile, "ab"); 1029 #endif 1030 if(!file) { 1031 errorf(global, "cannot open '%s'", per->outfile); 1032 return CURLE_WRITE_ERROR; 1033 } 1034 outs->fopened = TRUE; 1035 outs->stream = file; 1036 outs->init = config->resume_from; 1037 } 1038 else { 1039 outs->stream = NULL; /* open when needed */ 1040 } 1041 outs->filename = per->outfile; 1042 outs->s_isreg = TRUE; 1043 return CURLE_OK; 1044 } 1045 1046 static void check_stdin_upload(struct OperationConfig *config, 1047 struct per_transfer *per) 1048 { 1049 struct GlobalConfig *global = config->global; 1050 /* count to see if there are more than one auth bit set 1051 in the authtype field */ 1052 int authbits = 0; 1053 int bitcheck = 0; 1054 while(bitcheck < 32) { 1055 if(config->authtype & (1UL << bitcheck++)) { 1056 authbits++; 1057 if(authbits > 1) { 1058 /* more than one, we are done! */ 1059 break; 1060 } 1061 } 1062 } 1063 1064 /* 1065 * If the user has also selected --anyauth or --proxy-anyauth 1066 * we should warn them. 1067 */ 1068 if(config->proxyanyauth || (authbits > 1)) { 1069 warnf(global, 1070 "Using --anyauth or --proxy-anyauth with upload from stdin" 1071 " involves a big risk of it not working. Use a temporary" 1072 " file or a fixed auth type instead"); 1073 } 1074 1075 DEBUGASSERT(per->infdopen == FALSE); 1076 DEBUGASSERT(per->infd == STDIN_FILENO); 1077 1078 CURLX_SET_BINMODE(stdin); 1079 if(!strcmp(per->uploadfile, ".")) { 1080 #if defined(_WIN32) && !defined(CURL_WINDOWS_UWP) && !defined(UNDER_CE) 1081 /* non-blocking stdin behavior on Windows is challenging 1082 Spawn a new thread that will read from stdin and write 1083 out to a socket */ 1084 curl_socket_t f = win32_stdin_read_thread(global); 1085 1086 if(f == CURL_SOCKET_BAD) 1087 warnf(global, "win32_stdin_read_thread returned INVALID_SOCKET " 1088 "falling back to blocking mode"); 1089 else if(f > INT_MAX) { 1090 warnf(global, "win32_stdin_read_thread returned identifier " 1091 "larger than INT_MAX. This should not happen unless " 1092 "the upper 32 bits of a Windows socket have started " 1093 "being used for something... falling back to blocking " 1094 "mode"); 1095 sclose(f); 1096 } 1097 else 1098 per->infd = (int)f; 1099 #endif 1100 if(curlx_nonblock((curl_socket_t)per->infd, TRUE) < 0) 1101 warnf(global, 1102 "fcntl failed on fd=%d: %s", per->infd, strerror(errno)); 1103 } 1104 } 1105 1106 /* create the next (singular) transfer */ 1107 static CURLcode single_transfer(struct OperationConfig *config, 1108 CURLSH *share, 1109 bool *added, 1110 bool *skipped) 1111 { 1112 CURLcode result = CURLE_OK; 1113 struct getout *urlnode; 1114 struct GlobalConfig *global = config->global; 1115 bool orig_noprogress = global->noprogress; 1116 bool orig_isatty = global->isatty; 1117 struct State *state = &config->state; 1118 char *httpgetfields = state->httpgetfields; 1119 1120 *skipped = *added = FALSE; /* not yet */ 1121 1122 if(config->postfields) { 1123 if(config->use_httpget) { 1124 if(!httpgetfields) { 1125 /* Use the postfields data for an HTTP get */ 1126 httpgetfields = state->httpgetfields = config->postfields; 1127 config->postfields = NULL; 1128 if(SetHTTPrequest(config, (config->no_body ? TOOL_HTTPREQ_HEAD : 1129 TOOL_HTTPREQ_GET), &config->httpreq)) { 1130 result = CURLE_FAILED_INIT; 1131 } 1132 } 1133 } 1134 else { 1135 if(SetHTTPrequest(config, TOOL_HTTPREQ_SIMPLEPOST, &config->httpreq)) 1136 result = CURLE_FAILED_INIT; 1137 } 1138 if(result) 1139 goto fail; 1140 } 1141 if(!state->urlnode) { 1142 /* first time caller, setup things */ 1143 state->urlnode = config->url_list; 1144 state->infilenum = 1; 1145 } 1146 1147 result = set_cert_types(config); 1148 if(result) 1149 goto fail; 1150 1151 for(; state->urlnode; state->urlnode = urlnode->next) { 1152 static bool warn_more_options = FALSE; 1153 curl_off_t urlnum; 1154 1155 urlnode = state->urlnode; 1156 /* urlnode->url is the full URL or NULL */ 1157 if(!urlnode->url) { 1158 /* This node has no URL. Free node data without destroying the 1159 node itself nor modifying next pointer and continue to next */ 1160 urlnode->outset = urlnode->urlset = urlnode->useremote = 1161 urlnode->uploadset = urlnode->noupload = urlnode->noglob = FALSE; 1162 state->up = 0; 1163 if(!warn_more_options) { 1164 /* only show this once */ 1165 warnf(config->global, "Got more output options than URLs"); 1166 warn_more_options = TRUE; 1167 } 1168 continue; /* next URL please */ 1169 } 1170 1171 /* save outfile pattern before expansion */ 1172 if(urlnode->outfile && !state->outfiles) { 1173 state->outfiles = strdup(urlnode->outfile); 1174 if(!state->outfiles) { 1175 errorf(global, "out of memory"); 1176 result = CURLE_OUT_OF_MEMORY; 1177 break; 1178 } 1179 } 1180 1181 if(!config->globoff && urlnode->infile && !state->inglob) { 1182 /* Unless explicitly shut off */ 1183 result = glob_url(&state->inglob, urlnode->infile, &state->infilenum, 1184 (!global->silent || global->showerror) ? 1185 tool_stderr : NULL); 1186 if(result) 1187 break; 1188 } 1189 1190 1191 if(state->up || urlnode->infile) { 1192 if(!state->uploadfile) { 1193 if(state->inglob) { 1194 result = glob_next_url(&state->uploadfile, state->inglob); 1195 if(result == CURLE_OUT_OF_MEMORY) 1196 errorf(global, "out of memory"); 1197 } 1198 else if(!state->up) { 1199 /* copy the allocated string */ 1200 state->uploadfile = urlnode->infile; 1201 urlnode->infile = NULL; 1202 } 1203 } 1204 if(result) 1205 break; 1206 } 1207 1208 if(!state->urlnum) { 1209 if(!config->globoff && !urlnode->noglob) { 1210 /* Unless explicitly shut off, we expand '{...}' and '[...]' 1211 expressions and return total number of URLs in pattern set */ 1212 result = glob_url(&state->urls, urlnode->url, &state->urlnum, 1213 (!global->silent || global->showerror) ? 1214 tool_stderr : NULL); 1215 if(result) 1216 break; 1217 urlnum = state->urlnum; 1218 } 1219 else 1220 urlnum = 1; /* without globbing, this is a single URL */ 1221 } 1222 else 1223 urlnum = state->urlnum; 1224 1225 if(state->up < state->infilenum) { 1226 struct per_transfer *per = NULL; 1227 struct OutStruct *outs; 1228 struct OutStruct *heads; 1229 struct OutStruct *etag_save; 1230 struct HdrCbData *hdrcbdata = NULL; 1231 struct OutStruct etag_first; 1232 CURL *curl; 1233 1234 /* --etag-save */ 1235 memset(&etag_first, 0, sizeof(etag_first)); 1236 etag_save = &etag_first; 1237 etag_save->stream = stdout; 1238 1239 /* --etag-compare */ 1240 if(config->etag_compare_file) { 1241 result = etag_compare(config); 1242 if(result) 1243 break; 1244 } 1245 1246 if(config->etag_save_file) { 1247 bool badetag = FALSE; 1248 result = etag_store(config, etag_save, &badetag); 1249 if(result || badetag) 1250 break; 1251 } 1252 1253 curl = curl_easy_init(); 1254 if(curl) 1255 result = add_per_transfer(&per); 1256 else 1257 result = CURLE_OUT_OF_MEMORY; 1258 if(result) { 1259 curl_easy_cleanup(curl); 1260 if(etag_save->fopened) 1261 fclose(etag_save->stream); 1262 break; 1263 } 1264 per->etag_save = etag_first; /* copy the whole struct */ 1265 if(state->uploadfile) { 1266 per->uploadfile = strdup(state->uploadfile); 1267 if(!per->uploadfile) { 1268 curl_easy_cleanup(curl); 1269 result = CURLE_OUT_OF_MEMORY; 1270 break; 1271 } 1272 if(SetHTTPrequest(config, TOOL_HTTPREQ_PUT, &config->httpreq)) { 1273 tool_safefree(per->uploadfile); 1274 curl_easy_cleanup(curl); 1275 result = CURLE_FAILED_INIT; 1276 break; 1277 } 1278 } 1279 *added = TRUE; 1280 per->config = config; 1281 per->curl = curl; 1282 per->urlnum = (unsigned int)urlnode->num; 1283 1284 /* default headers output stream is stdout */ 1285 heads = &per->heads; 1286 heads->stream = stdout; 1287 1288 /* Single header file for all URLs */ 1289 if(config->headerfile) { 1290 result = setup_headerfile(config, per, heads); 1291 if(result) 1292 break; 1293 } 1294 hdrcbdata = &per->hdrcbdata; 1295 1296 outs = &per->outs; 1297 1298 per->outfile = NULL; 1299 per->infdopen = FALSE; 1300 per->infd = STDIN_FILENO; 1301 1302 /* default output stream is stdout */ 1303 outs->stream = stdout; 1304 1305 if(state->urls) { 1306 result = glob_next_url(&per->url, state->urls); 1307 if(result) 1308 break; 1309 } 1310 else if(!state->li) { 1311 per->url = strdup(urlnode->url); 1312 if(!per->url) { 1313 result = CURLE_OUT_OF_MEMORY; 1314 break; 1315 } 1316 } 1317 else 1318 per->url = NULL; 1319 if(!per->url) 1320 break; 1321 1322 if(state->outfiles) { 1323 per->outfile = strdup(state->outfiles); 1324 if(!per->outfile) { 1325 result = CURLE_OUT_OF_MEMORY; 1326 break; 1327 } 1328 } 1329 1330 if((urlnode->useremote || 1331 (per->outfile && strcmp("-", per->outfile)))) { 1332 result = setup_outfile(config, per, outs, skipped); 1333 if(result) 1334 break; 1335 } 1336 1337 if(per->uploadfile) { 1338 1339 if(stdin_upload(per->uploadfile)) 1340 check_stdin_upload(config, per); 1341 else { 1342 /* 1343 * We have specified a file to upload and it is not "-". 1344 */ 1345 result = add_file_name_to_url(per->curl, &per->url, 1346 per->uploadfile); 1347 if(result) 1348 break; 1349 } 1350 } 1351 1352 if(per->uploadfile && config->resume_from_current) 1353 config->resume_from = -1; /* -1 will then force get-it-yourself */ 1354 1355 if(output_expected(per->url, per->uploadfile) && outs->stream && 1356 isatty(fileno(outs->stream))) 1357 /* we send the output to a tty, therefore we switch off the progress 1358 meter */ 1359 per->noprogress = global->noprogress = global->isatty = TRUE; 1360 else { 1361 /* progress meter is per download, so restore config 1362 values */ 1363 per->noprogress = global->noprogress = orig_noprogress; 1364 global->isatty = orig_isatty; 1365 } 1366 1367 if(httpgetfields || config->query) { 1368 result = append2query(config, per, 1369 httpgetfields ? httpgetfields : config->query); 1370 if(result) 1371 break; 1372 } 1373 1374 if((!per->outfile || !strcmp(per->outfile, "-")) && 1375 !config->use_ascii) { 1376 /* We get the output to stdout and we have not got the ASCII/text 1377 flag, then set stdout to be binary */ 1378 CURLX_SET_BINMODE(stdout); 1379 } 1380 1381 /* explicitly passed to stdout means okaying binary gunk */ 1382 config->terminal_binary_ok = 1383 (per->outfile && !strcmp(per->outfile, "-")); 1384 1385 if(config->content_disposition && urlnode->useremote) 1386 hdrcbdata->honor_cd_filename = TRUE; 1387 else 1388 hdrcbdata->honor_cd_filename = FALSE; 1389 1390 hdrcbdata->outs = outs; 1391 hdrcbdata->heads = heads; 1392 hdrcbdata->etag_save = etag_save; 1393 hdrcbdata->global = global; 1394 hdrcbdata->config = config; 1395 1396 result = config2setopts(config, per, curl, share); 1397 if(result) 1398 break; 1399 1400 /* initialize retry vars for loop below */ 1401 per->retry_sleep_default = (config->retry_delay) ? 1402 config->retry_delay*1000L : RETRY_SLEEP_DEFAULT; /* ms */ 1403 per->retry_remaining = config->req_retry; 1404 per->retry_sleep = per->retry_sleep_default; /* ms */ 1405 per->retrystart = curlx_now(); 1406 1407 state->li++; 1408 /* Here's looping around each globbed URL */ 1409 if(state->li >= urlnum) { 1410 state->li = 0; 1411 state->urlnum = 0; /* forced reglob of URLs */ 1412 glob_cleanup(&state->urls); 1413 state->up++; 1414 tool_safefree(state->uploadfile); /* clear it to get the next */ 1415 } 1416 } 1417 else { 1418 /* Free this URL node data without destroying the 1419 node itself nor modifying next pointer. */ 1420 urlnode->outset = urlnode->urlset = urlnode->useremote = 1421 urlnode->uploadset = urlnode->noupload = urlnode->noglob = FALSE; 1422 glob_cleanup(&state->urls); 1423 state->urlnum = 0; 1424 1425 tool_safefree(state->outfiles); 1426 tool_safefree(state->uploadfile); 1427 /* Free list of globbed upload files */ 1428 glob_cleanup(&state->inglob); 1429 state->up = 0; 1430 continue; 1431 } 1432 break; 1433 } 1434 tool_safefree(state->outfiles); 1435 fail: 1436 if(!*added || result) { 1437 *added = FALSE; 1438 single_transfer_cleanup(config); 1439 } 1440 return result; 1441 } 1442 1443 static long all_added; /* number of easy handles currently added */ 1444 1445 /* 1446 * add_parallel_transfers() sets 'morep' to TRUE if there are more transfers 1447 * to add even after this call returns. sets 'addedp' to TRUE if one or more 1448 * transfers were added. 1449 */ 1450 static CURLcode add_parallel_transfers(struct GlobalConfig *global, 1451 CURLM *multi, 1452 CURLSH *share, 1453 bool *morep, 1454 bool *addedp) 1455 { 1456 struct per_transfer *per; 1457 CURLcode result = CURLE_OK; 1458 CURLMcode mcode; 1459 bool sleeping = FALSE; 1460 char *errorbuf; 1461 *addedp = FALSE; 1462 *morep = FALSE; 1463 if(all_pers < (global->parallel_max*2)) { 1464 bool skipped = FALSE; 1465 do { 1466 result = create_transfer(global, share, addedp, &skipped); 1467 if(result) 1468 return result; 1469 } while(skipped); 1470 } 1471 for(per = transfers; per && (all_added < global->parallel_max); 1472 per = per->next) { 1473 if(per->added || per->skip) 1474 /* already added or to be skipped */ 1475 continue; 1476 if(per->startat && (time(NULL) < per->startat)) { 1477 /* this is still delaying */ 1478 sleeping = TRUE; 1479 continue; 1480 } 1481 per->added = TRUE; 1482 1483 result = pre_transfer(global, per); 1484 if(result) 1485 return result; 1486 1487 errorbuf = malloc(CURL_ERROR_SIZE); 1488 if(!errorbuf) 1489 return CURLE_OUT_OF_MEMORY; 1490 1491 /* parallel connect means that we do not set PIPEWAIT since pipewait 1492 will make libcurl prefer multiplexing */ 1493 (void)curl_easy_setopt(per->curl, CURLOPT_PIPEWAIT, 1494 global->parallel_connect ? 0L : 1L); 1495 (void)curl_easy_setopt(per->curl, CURLOPT_PRIVATE, per); 1496 /* curl does not use signals, switching this on saves some system calls */ 1497 (void)curl_easy_setopt(per->curl, CURLOPT_NOSIGNAL, 1L); 1498 (void)curl_easy_setopt(per->curl, CURLOPT_XFERINFOFUNCTION, xferinfo_cb); 1499 (void)curl_easy_setopt(per->curl, CURLOPT_XFERINFODATA, per); 1500 (void)curl_easy_setopt(per->curl, CURLOPT_NOPROGRESS, 0L); 1501 #ifdef DEBUGBUILD 1502 if(getenv("CURL_FORBID_REUSE")) 1503 (void)curl_easy_setopt(per->curl, CURLOPT_FORBID_REUSE, 1L); 1504 #endif 1505 1506 mcode = curl_multi_add_handle(multi, per->curl); 1507 if(mcode) { 1508 DEBUGASSERT(mcode == CURLM_OUT_OF_MEMORY); 1509 result = CURLE_OUT_OF_MEMORY; 1510 } 1511 1512 if(!result) { 1513 bool getadded = FALSE; 1514 bool skipped = FALSE; 1515 do { 1516 result = create_transfer(global, share, &getadded, &skipped); 1517 if(result) 1518 break; 1519 } while(skipped); 1520 } 1521 if(result) { 1522 free(errorbuf); 1523 return result; 1524 } 1525 errorbuf[0] = 0; 1526 (void)curl_easy_setopt(per->curl, CURLOPT_ERRORBUFFER, errorbuf); 1527 per->errorbuffer = errorbuf; 1528 per->added = TRUE; 1529 all_added++; 1530 *addedp = TRUE; 1531 } 1532 *morep = (per || sleeping); 1533 return CURLE_OK; 1534 } 1535 1536 struct parastate { 1537 struct GlobalConfig *global; 1538 CURLM *multi; 1539 CURLSH *share; 1540 CURLMcode mcode; 1541 CURLcode result; 1542 int still_running; 1543 struct curltime start; 1544 bool more_transfers; 1545 bool added_transfers; 1546 /* wrapitup is set TRUE after a critical error occurs to end all transfers */ 1547 bool wrapitup; 1548 /* wrapitup_processed is set TRUE after the per transfer abort flag is set */ 1549 bool wrapitup_processed; 1550 time_t tick; 1551 }; 1552 1553 #if defined(DEBUGBUILD) && defined(USE_LIBUV) 1554 1555 #define DEBUG_UV 0 1556 1557 /* object to pass to the callbacks */ 1558 struct datauv { 1559 uv_timer_t timeout; 1560 uv_loop_t *loop; 1561 struct parastate *s; 1562 }; 1563 1564 struct contextuv { 1565 uv_poll_t poll_handle; 1566 curl_socket_t sockfd; 1567 struct datauv *uv; 1568 }; 1569 1570 static CURLcode check_finished(struct parastate *s); 1571 1572 static void check_multi_info(struct datauv *uv) 1573 { 1574 CURLcode result; 1575 1576 result = check_finished(uv->s); 1577 if(result && !uv->s->result) 1578 uv->s->result = result; 1579 1580 if(uv->s->more_transfers) { 1581 result = add_parallel_transfers(uv->s->global, uv->s->multi, 1582 uv->s->share, 1583 &uv->s->more_transfers, 1584 &uv->s->added_transfers); 1585 if(result && !uv->s->result) 1586 uv->s->result = result; 1587 if(result) 1588 uv_stop(uv->loop); 1589 } 1590 } 1591 1592 /* callback from libuv on socket activity */ 1593 static void on_uv_socket(uv_poll_t *req, int status, int events) 1594 { 1595 int flags = 0; 1596 struct contextuv *c = (struct contextuv *) req->data; 1597 (void)status; 1598 if(events & UV_READABLE) 1599 flags |= CURL_CSELECT_IN; 1600 if(events & UV_WRITABLE) 1601 flags |= CURL_CSELECT_OUT; 1602 1603 curl_multi_socket_action(c->uv->s->multi, c->sockfd, flags, 1604 &c->uv->s->still_running); 1605 } 1606 1607 /* callback from libuv when timeout expires */ 1608 static void on_uv_timeout(uv_timer_t *req) 1609 { 1610 struct datauv *uv = (struct datauv *) req->data; 1611 #if DEBUG_UV 1612 fprintf(tool_stderr, "parallel_event: on_uv_timeout\n"); 1613 #endif 1614 if(uv && uv->s) { 1615 curl_multi_socket_action(uv->s->multi, CURL_SOCKET_TIMEOUT, 0, 1616 &uv->s->still_running); 1617 check_multi_info(uv); 1618 } 1619 } 1620 1621 /* callback from libcurl to update the timeout expiry */ 1622 static int cb_timeout(CURLM *multi, long timeout_ms, 1623 struct datauv *uv) 1624 { 1625 (void)multi; 1626 #if DEBUG_UV 1627 fprintf(tool_stderr, "parallel_event: cb_timeout=%ld\n", timeout_ms); 1628 #endif 1629 if(timeout_ms < 0) 1630 uv_timer_stop(&uv->timeout); 1631 else { 1632 if(timeout_ms == 0) 1633 timeout_ms = 1; /* 0 means call curl_multi_socket_action asap but NOT 1634 within the callback itself */ 1635 uv_timer_start(&uv->timeout, on_uv_timeout, timeout_ms, 1636 0); /* do not repeat */ 1637 } 1638 return 0; 1639 } 1640 1641 static struct contextuv *create_context(curl_socket_t sockfd, 1642 struct datauv *uv) 1643 { 1644 struct contextuv *c; 1645 1646 c = (struct contextuv *) malloc(sizeof(*c)); 1647 1648 c->sockfd = sockfd; 1649 c->uv = uv; 1650 1651 uv_poll_init_socket(uv->loop, &c->poll_handle, sockfd); 1652 c->poll_handle.data = c; 1653 1654 return c; 1655 } 1656 1657 static void close_cb(uv_handle_t *handle) 1658 { 1659 struct contextuv *c = (struct contextuv *) handle->data; 1660 free(c); 1661 } 1662 1663 static void destroy_context(struct contextuv *c) 1664 { 1665 uv_close((uv_handle_t *) &c->poll_handle, close_cb); 1666 } 1667 1668 /* callback from libcurl to update socket activity to wait for */ 1669 static int cb_socket(CURL *easy, curl_socket_t s, int action, 1670 struct datauv *uv, 1671 void *socketp) 1672 { 1673 struct contextuv *c; 1674 int events = 0; 1675 (void)easy; 1676 1677 #if DEBUG_UV 1678 fprintf(tool_stderr, "parallel_event: cb_socket, fd=%d, action=%x, p=%p\n", 1679 (int)s, action, socketp); 1680 #endif 1681 switch(action) { 1682 case CURL_POLL_IN: 1683 case CURL_POLL_OUT: 1684 case CURL_POLL_INOUT: 1685 c = socketp ? 1686 (struct contextuv *) socketp : create_context(s, uv); 1687 1688 curl_multi_assign(uv->s->multi, s, c); 1689 1690 if(action != CURL_POLL_IN) 1691 events |= UV_WRITABLE; 1692 if(action != CURL_POLL_OUT) 1693 events |= UV_READABLE; 1694 1695 uv_poll_start(&c->poll_handle, events, on_uv_socket); 1696 break; 1697 case CURL_POLL_REMOVE: 1698 if(socketp) { 1699 c = (struct contextuv *)socketp; 1700 uv_poll_stop(&c->poll_handle); 1701 destroy_context(c); 1702 curl_multi_assign(uv->s->multi, s, NULL); 1703 /* check if we can do more now */ 1704 check_multi_info(uv); 1705 } 1706 break; 1707 default: 1708 abort(); 1709 } 1710 1711 return 0; 1712 } 1713 1714 static CURLcode parallel_event(struct parastate *s) 1715 { 1716 CURLcode result = CURLE_OK; 1717 struct datauv uv = { 0 }; 1718 1719 s->result = CURLE_OK; 1720 uv.s = s; 1721 uv.loop = uv_default_loop(); 1722 uv_timer_init(uv.loop, &uv.timeout); 1723 uv.timeout.data = &uv; 1724 1725 /* setup event callbacks */ 1726 curl_multi_setopt(s->multi, CURLMOPT_SOCKETFUNCTION, cb_socket); 1727 curl_multi_setopt(s->multi, CURLMOPT_SOCKETDATA, &uv); 1728 curl_multi_setopt(s->multi, CURLMOPT_TIMERFUNCTION, cb_timeout); 1729 curl_multi_setopt(s->multi, CURLMOPT_TIMERDATA, &uv); 1730 1731 /* kickstart the thing */ 1732 curl_multi_socket_action(s->multi, CURL_SOCKET_TIMEOUT, 0, 1733 &s->still_running); 1734 1735 while(!s->mcode && (s->still_running || s->more_transfers)) { 1736 #if DEBUG_UV 1737 fprintf(tool_stderr, "parallel_event: uv_run(), mcode=%d, %d running, " 1738 "%d more\n", s->mcode, uv.s->still_running, s->more_transfers); 1739 #endif 1740 uv_run(uv.loop, UV_RUN_DEFAULT); 1741 #if DEBUG_UV 1742 fprintf(tool_stderr, "parallel_event: uv_run() returned\n"); 1743 #endif 1744 1745 result = check_finished(s); 1746 if(result && !s->result) 1747 s->result = result; 1748 1749 /* early exit called */ 1750 if(s->wrapitup) { 1751 if(s->still_running && !s->wrapitup_processed) { 1752 struct per_transfer *per; 1753 for(per = transfers; per; per = per->next) { 1754 if(per->added) 1755 per->abort = TRUE; 1756 } 1757 s->wrapitup_processed = TRUE; 1758 } 1759 break; 1760 } 1761 1762 if(s->more_transfers) { 1763 result = add_parallel_transfers(s->global, s->multi, s->share, 1764 &s->more_transfers, &s->added_transfers); 1765 if(result && !s->result) 1766 s->result = result; 1767 } 1768 } 1769 1770 result = s->result; 1771 1772 /* Make sure to return some kind of error if there was a multi problem */ 1773 if(s->mcode) { 1774 result = (s->mcode == CURLM_OUT_OF_MEMORY) ? CURLE_OUT_OF_MEMORY : 1775 /* The other multi errors should never happen, so return 1776 something suitably generic */ 1777 CURLE_BAD_FUNCTION_ARGUMENT; 1778 } 1779 1780 /* We need to cleanup the multi here, since the uv context lives on the 1781 * stack and will be gone. multi_cleanup can triggere events! */ 1782 curl_multi_cleanup(s->multi); 1783 1784 #if DEBUG_UV 1785 fprintf(tool_stderr, "DONE parallel_event -> %d, mcode=%d, %d running, " 1786 "%d more\n", 1787 result, s->mcode, uv.s->still_running, s->more_transfers); 1788 #endif 1789 return result; 1790 } 1791 1792 #endif 1793 1794 static CURLcode check_finished(struct parastate *s) 1795 { 1796 CURLcode result = CURLE_OK; 1797 int rc; 1798 CURLMsg *msg; 1799 bool checkmore = FALSE; 1800 struct GlobalConfig *global = s->global; 1801 progress_meter(global, &s->start, FALSE); 1802 do { 1803 msg = curl_multi_info_read(s->multi, &rc); 1804 if(msg) { 1805 bool retry; 1806 long delay; 1807 struct per_transfer *ended; 1808 CURL *easy = msg->easy_handle; 1809 CURLcode tres = msg->data.result; 1810 curl_easy_getinfo(easy, CURLINFO_PRIVATE, (void *)&ended); 1811 curl_multi_remove_handle(s->multi, easy); 1812 1813 if(ended->abort && (tres == CURLE_ABORTED_BY_CALLBACK) && 1814 ended->errorbuffer) { 1815 msnprintf(ended->errorbuffer, CURL_ERROR_SIZE, 1816 "Transfer aborted due to critical error " 1817 "in another transfer"); 1818 } 1819 tres = post_per_transfer(global, ended, tres, &retry, &delay); 1820 progress_finalize(ended); /* before it goes away */ 1821 all_added--; /* one fewer added */ 1822 checkmore = TRUE; 1823 if(retry) { 1824 ended->added = FALSE; /* add it again */ 1825 /* we delay retries in full integer seconds only */ 1826 ended->startat = delay ? time(NULL) + delay/1000 : 0; 1827 } 1828 else { 1829 /* result receives this transfer's error unless the transfer was 1830 marked for abort due to a critical error in another transfer */ 1831 if(tres && (!ended->abort || !result)) 1832 result = tres; 1833 if(is_fatal_error(result) || (result && global->fail_early)) 1834 s->wrapitup = TRUE; 1835 (void)del_per_transfer(ended); 1836 } 1837 } 1838 } while(msg); 1839 if(!s->wrapitup) { 1840 if(!checkmore) { 1841 time_t tock = time(NULL); 1842 if(s->tick != tock) { 1843 checkmore = TRUE; 1844 s->tick = tock; 1845 } 1846 } 1847 if(checkmore) { 1848 /* one or more transfers completed, add more! */ 1849 CURLcode tres = add_parallel_transfers(global, 1850 s->multi, s->share, 1851 &s->more_transfers, 1852 &s->added_transfers); 1853 if(tres) 1854 result = tres; 1855 if(s->added_transfers) 1856 /* we added new ones, make sure the loop does not exit yet */ 1857 s->still_running = 1; 1858 } 1859 if(is_fatal_error(result) || (result && global->fail_early)) 1860 s->wrapitup = TRUE; 1861 } 1862 return result; 1863 } 1864 1865 static CURLcode parallel_transfers(struct GlobalConfig *global, 1866 CURLSH *share) 1867 { 1868 CURLcode result; 1869 struct parastate p; 1870 struct parastate *s = &p; 1871 s->share = share; 1872 s->mcode = CURLM_OK; 1873 s->result = CURLE_OK; 1874 s->still_running = 1; 1875 s->start = curlx_now(); 1876 s->wrapitup = FALSE; 1877 s->wrapitup_processed = FALSE; 1878 s->tick = time(NULL); 1879 s->global = global; 1880 s->multi = curl_multi_init(); 1881 if(!s->multi) 1882 return CURLE_OUT_OF_MEMORY; 1883 1884 result = add_parallel_transfers(global, s->multi, s->share, 1885 &s->more_transfers, &s->added_transfers); 1886 if(result) { 1887 curl_multi_cleanup(s->multi); 1888 return result; 1889 } 1890 1891 #ifdef DEBUGBUILD 1892 if(global->test_event_based) 1893 #ifdef USE_LIBUV 1894 return parallel_event(s); 1895 #else 1896 errorf(global, "Testing --parallel event-based requires libuv"); 1897 #endif 1898 else 1899 #endif 1900 1901 if(all_added) { 1902 while(!s->mcode && (s->still_running || s->more_transfers)) { 1903 /* If stopping prematurely (eg due to a --fail-early condition) then 1904 signal that any transfers in the multi should abort (via progress 1905 callback). */ 1906 if(s->wrapitup) { 1907 if(!s->still_running) 1908 break; 1909 if(!s->wrapitup_processed) { 1910 struct per_transfer *per; 1911 for(per = transfers; per; per = per->next) { 1912 if(per->added) 1913 per->abort = TRUE; 1914 } 1915 s->wrapitup_processed = TRUE; 1916 } 1917 } 1918 1919 s->mcode = curl_multi_poll(s->multi, NULL, 0, 1000, NULL); 1920 if(!s->mcode) 1921 s->mcode = curl_multi_perform(s->multi, &s->still_running); 1922 if(!s->mcode) 1923 result = check_finished(s); 1924 } 1925 1926 (void)progress_meter(global, &s->start, TRUE); 1927 } 1928 1929 /* Make sure to return some kind of error if there was a multi problem */ 1930 if(s->mcode) { 1931 result = (s->mcode == CURLM_OUT_OF_MEMORY) ? CURLE_OUT_OF_MEMORY : 1932 /* The other multi errors should never happen, so return 1933 something suitably generic */ 1934 CURLE_BAD_FUNCTION_ARGUMENT; 1935 } 1936 1937 curl_multi_cleanup(s->multi); 1938 1939 return result; 1940 } 1941 1942 static CURLcode serial_transfers(struct GlobalConfig *global, 1943 CURLSH *share) 1944 { 1945 CURLcode returncode = CURLE_OK; 1946 CURLcode result = CURLE_OK; 1947 struct per_transfer *per; 1948 bool added = FALSE; 1949 bool skipped = FALSE; 1950 1951 result = create_transfer(global, share, &added, &skipped); 1952 if(result) 1953 return result; 1954 if(!added) { 1955 errorf(global, "no transfer performed"); 1956 return CURLE_READ_ERROR; 1957 } 1958 for(per = transfers; per;) { 1959 bool retry; 1960 long delay_ms; 1961 bool bailout = FALSE; 1962 struct curltime start; 1963 1964 start = curlx_now(); 1965 if(!per->skip) { 1966 result = pre_transfer(global, per); 1967 if(result) 1968 break; 1969 1970 if(global->libcurl) { 1971 result = easysrc_perform(); 1972 if(result) 1973 break; 1974 } 1975 1976 #ifdef DEBUGBUILD 1977 if(getenv("CURL_FORBID_REUSE")) 1978 (void)curl_easy_setopt(per->curl, CURLOPT_FORBID_REUSE, 1L); 1979 1980 if(global->test_duphandle) { 1981 CURL *dup = curl_easy_duphandle(per->curl); 1982 curl_easy_cleanup(per->curl); 1983 per->curl = dup; 1984 if(!dup) { 1985 result = CURLE_OUT_OF_MEMORY; 1986 break; 1987 } 1988 /* a duplicate needs the share re-added */ 1989 (void)curl_easy_setopt(per->curl, CURLOPT_SHARE, share); 1990 } 1991 if(global->test_event_based) 1992 result = curl_easy_perform_ev(per->curl); 1993 else 1994 #endif 1995 result = curl_easy_perform(per->curl); 1996 } 1997 1998 returncode = post_per_transfer(global, per, result, &retry, &delay_ms); 1999 if(retry) { 2000 curlx_wait_ms(delay_ms); 2001 continue; 2002 } 2003 2004 /* Bail out upon critical errors or --fail-early */ 2005 if(is_fatal_error(returncode) || (returncode && global->fail_early)) 2006 bailout = TRUE; 2007 else { 2008 do { 2009 /* setup the next one just before we delete this */ 2010 result = create_transfer(global, share, &added, &skipped); 2011 if(result) { 2012 returncode = result; 2013 bailout = TRUE; 2014 break; 2015 } 2016 } while(skipped); 2017 } 2018 2019 per = del_per_transfer(per); 2020 2021 if(bailout) 2022 break; 2023 2024 if(per && global->ms_per_transfer) { 2025 /* how long time did the most recent transfer take in number of 2026 milliseconds */ 2027 timediff_t milli = curlx_timediff(curlx_now(), start); 2028 if(milli < global->ms_per_transfer) { 2029 notef(global, "Transfer took %" CURL_FORMAT_CURL_OFF_T " ms, " 2030 "waits %ldms as set by --rate", 2031 milli, (long)(global->ms_per_transfer - milli)); 2032 /* The transfer took less time than wanted. Wait a little. */ 2033 curlx_wait_ms((long)(global->ms_per_transfer - milli)); 2034 } 2035 } 2036 } 2037 if(returncode) 2038 /* returncode errors have priority */ 2039 result = returncode; 2040 2041 if(result) 2042 single_transfer_cleanup(global->current); 2043 2044 return result; 2045 } 2046 2047 static CURLcode is_using_schannel(int *using) 2048 { 2049 CURLcode result = CURLE_OK; 2050 static int using_schannel = -1; /* -1 = not checked 2051 0 = nope 2052 1 = yes */ 2053 if(using_schannel == -1) { 2054 CURL *curltls = curl_easy_init(); 2055 /* The TLS backend remains, so keep the info */ 2056 struct curl_tlssessioninfo *tls_backend_info = NULL; 2057 2058 if(!curltls) 2059 result = CURLE_OUT_OF_MEMORY; 2060 else { 2061 result = curl_easy_getinfo(curltls, CURLINFO_TLS_SSL_PTR, 2062 &tls_backend_info); 2063 if(!result) 2064 using_schannel = 2065 (tls_backend_info->backend == CURLSSLBACKEND_SCHANNEL); 2066 } 2067 curl_easy_cleanup(curltls); 2068 if(result) 2069 return result; 2070 } 2071 *using = using_schannel; 2072 return result; 2073 } 2074 2075 /* Set the CA cert locations specified in the environment. For Windows if no 2076 * environment-specified filename is found then check for CA bundle default 2077 * filename curl-ca-bundle.crt in the user's PATH. 2078 * 2079 * If Schannel is the selected SSL backend then these locations are ignored. 2080 * We allow setting CA location for Schannel only when explicitly specified by 2081 * the user via CURLOPT_CAINFO / --cacert. 2082 */ 2083 2084 static CURLcode cacertpaths(struct OperationConfig *config) 2085 { 2086 CURLcode result = CURLE_OUT_OF_MEMORY; 2087 char *env = curl_getenv("CURL_CA_BUNDLE"); 2088 if(env) { 2089 config->cacert = strdup(env); 2090 curl_free(env); 2091 if(!config->cacert) 2092 goto fail; 2093 } 2094 else { 2095 env = curl_getenv("SSL_CERT_DIR"); 2096 if(env) { 2097 config->capath = strdup(env); 2098 curl_free(env); 2099 if(!config->capath) 2100 goto fail; 2101 } 2102 env = curl_getenv("SSL_CERT_FILE"); 2103 if(env) { 2104 config->cacert = strdup(env); 2105 curl_free(env); 2106 if(!config->cacert) 2107 goto fail; 2108 } 2109 } 2110 2111 #ifdef _WIN32 2112 if(!env) { 2113 #if defined(CURL_CA_SEARCH_SAFE) 2114 char *cacert = NULL; 2115 FILE *cafile = tool_execpath("curl-ca-bundle.crt", &cacert); 2116 if(cafile) { 2117 fclose(cafile); 2118 config->cacert = strdup(cacert); 2119 } 2120 #elif !defined(CURL_WINDOWS_UWP) && !defined(UNDER_CE) && \ 2121 !defined(CURL_DISABLE_CA_SEARCH) 2122 result = FindWin32CACert(config, TEXT("curl-ca-bundle.crt")); 2123 if(result) 2124 goto fail; 2125 #endif 2126 } 2127 #endif 2128 return CURLE_OK; 2129 fail: 2130 free(config->capath); 2131 return result; 2132 } 2133 2134 /* setup a transfer for the given config */ 2135 static CURLcode transfer_per_config(struct OperationConfig *config, 2136 CURLSH *share, 2137 bool *added, 2138 bool *skipped) 2139 { 2140 CURLcode result = CURLE_OK; 2141 *added = FALSE; 2142 2143 /* Check we have a url */ 2144 if(!config->url_list || !config->url_list->url) { 2145 helpf(tool_stderr, "(%d) no URL specified", CURLE_FAILED_INIT); 2146 return CURLE_FAILED_INIT; 2147 } 2148 2149 /* On Windows we cannot set the path to curl-ca-bundle.crt at compile time. 2150 * We look for the file in two ways: 2151 * 1: look at the environment variable CURL_CA_BUNDLE for a path 2152 * 2: if #1 is not found, use the Windows API function SearchPath() 2153 * to find it along the app's path (includes app's dir and CWD) 2154 * 2155 * We support the environment variable thing for non-Windows platforms 2156 * too. Just for the sake of it. 2157 */ 2158 if(feature_ssl && 2159 !config->cacert && 2160 !config->capath && 2161 (!config->insecure_ok || (config->doh_url && !config->doh_insecure_ok))) { 2162 int using_schannel = -1; 2163 2164 result = is_using_schannel(&using_schannel); 2165 2166 /* With the addition of CAINFO support for Schannel, this search could 2167 * find a certificate bundle that was previously ignored. To maintain 2168 * backward compatibility, only perform this search if not using Schannel. 2169 */ 2170 if(!result && !using_schannel) 2171 result = cacertpaths(config); 2172 } 2173 2174 if(!result) 2175 result = single_transfer(config, share, added, skipped); 2176 2177 return result; 2178 } 2179 2180 /* 2181 * 'create_transfer' gets the details and sets up a new transfer if 'added' 2182 * returns TRUE. 2183 */ 2184 static CURLcode create_transfer(struct GlobalConfig *global, 2185 CURLSH *share, 2186 bool *added, 2187 bool *skipped) 2188 { 2189 CURLcode result = CURLE_OK; 2190 *added = FALSE; 2191 while(global->current) { 2192 result = transfer_per_config(global->current, share, added, skipped); 2193 if(!result && !*added) { 2194 /* when one set is drained, continue to next */ 2195 global->current = global->current->next; 2196 continue; 2197 } 2198 break; 2199 } 2200 return result; 2201 } 2202 2203 static CURLcode run_all_transfers(struct GlobalConfig *global, 2204 CURLSH *share, 2205 CURLcode result) 2206 { 2207 /* Save the values of noprogress and isatty to restore them later on */ 2208 bool orig_noprogress = global->noprogress; 2209 bool orig_isatty = global->isatty; 2210 struct per_transfer *per; 2211 2212 /* Time to actually do the transfers */ 2213 if(!result) { 2214 if(global->parallel) 2215 result = parallel_transfers(global, share); 2216 else 2217 result = serial_transfers(global, share); 2218 } 2219 2220 /* cleanup if there are any left */ 2221 for(per = transfers; per;) { 2222 bool retry; 2223 long delay; 2224 CURLcode result2 = post_per_transfer(global, per, result, &retry, &delay); 2225 if(!result) 2226 /* do not overwrite the original error */ 2227 result = result2; 2228 2229 /* Free list of given URLs */ 2230 clean_getout(per->config); 2231 2232 per = del_per_transfer(per); 2233 } 2234 2235 /* Reset the global config variables */ 2236 global->noprogress = orig_noprogress; 2237 global->isatty = orig_isatty; 2238 2239 2240 return result; 2241 } 2242 2243 CURLcode operate(struct GlobalConfig *global, int argc, argv_item_t argv[]) 2244 { 2245 CURLcode result = CURLE_OK; 2246 const char *first_arg; 2247 #ifdef UNDER_CE 2248 first_arg = argc > 1 ? strdup(argv[1]) : NULL; 2249 #else 2250 first_arg = argc > 1 ? convert_tchar_to_UTF8(argv[1]) : NULL; 2251 #endif 2252 2253 #ifdef HAVE_SETLOCALE 2254 /* Override locale for number parsing (only) */ 2255 setlocale(LC_ALL, ""); 2256 setlocale(LC_NUMERIC, "C"); 2257 #endif 2258 2259 /* Parse .curlrc if necessary */ 2260 if((argc == 1) || 2261 (first_arg && strncmp(first_arg, "-q", 2) && 2262 strcmp(first_arg, "--disable"))) { 2263 parseconfig(NULL, global); /* ignore possible failure */ 2264 2265 /* If we had no arguments then make sure a url was specified in .curlrc */ 2266 if((argc < 2) && (!global->first->url_list)) { 2267 helpf(tool_stderr, NULL); 2268 result = CURLE_FAILED_INIT; 2269 } 2270 } 2271 2272 unicodefree(first_arg); 2273 2274 if(!result) { 2275 /* Parse the command line arguments */ 2276 ParameterError res = parse_args(global, argc, argv); 2277 if(res) { 2278 result = CURLE_OK; 2279 2280 /* Check if we were asked for the help */ 2281 if(res == PARAM_HELP_REQUESTED) 2282 ; /* already done */ 2283 /* Check if we were asked for the manual */ 2284 else if(res == PARAM_MANUAL_REQUESTED) { 2285 #ifdef USE_MANUAL 2286 hugehelp(); 2287 #else 2288 warnf(global, 2289 "built-in manual was disabled at build-time"); 2290 #endif 2291 } 2292 /* Check if we were asked for the version information */ 2293 else if(res == PARAM_VERSION_INFO_REQUESTED) 2294 tool_version_info(); 2295 /* Check if we were asked to list the SSL engines */ 2296 else if(res == PARAM_ENGINES_REQUESTED) 2297 tool_list_engines(); 2298 /* Check if we were asked to dump the embedded CA bundle */ 2299 else if(res == PARAM_CA_EMBED_REQUESTED) { 2300 #ifdef CURL_CA_EMBED 2301 printf("%s", curl_ca_embed); 2302 #endif 2303 } 2304 else if(res == PARAM_LIBCURL_UNSUPPORTED_PROTOCOL) 2305 result = CURLE_UNSUPPORTED_PROTOCOL; 2306 else if(res == PARAM_READ_ERROR) 2307 result = CURLE_READ_ERROR; 2308 else 2309 result = CURLE_FAILED_INIT; 2310 } 2311 else { 2312 if(global->libcurl) { 2313 /* Initialise the libcurl source output */ 2314 result = easysrc_init(); 2315 } 2316 2317 /* Perform the main operations */ 2318 if(!result) { 2319 size_t count = 0; 2320 struct OperationConfig *operation = global->first; 2321 CURLSH *share = curl_share_init(); 2322 if(!share) { 2323 if(global->libcurl) { 2324 /* Cleanup the libcurl source output */ 2325 easysrc_cleanup(); 2326 } 2327 result = CURLE_OUT_OF_MEMORY; 2328 } 2329 2330 if(!result) { 2331 curl_share_setopt(share, CURLSHOPT_SHARE, CURL_LOCK_DATA_COOKIE); 2332 curl_share_setopt(share, CURLSHOPT_SHARE, CURL_LOCK_DATA_DNS); 2333 curl_share_setopt(share, CURLSHOPT_SHARE, 2334 CURL_LOCK_DATA_SSL_SESSION); 2335 /* Running parallel, use the multi connection cache */ 2336 if(!global->parallel) 2337 curl_share_setopt(share, CURLSHOPT_SHARE, CURL_LOCK_DATA_CONNECT); 2338 curl_share_setopt(share, CURLSHOPT_SHARE, CURL_LOCK_DATA_PSL); 2339 curl_share_setopt(share, CURLSHOPT_SHARE, CURL_LOCK_DATA_HSTS); 2340 2341 if(global->ssl_sessions && feature_ssls_export) 2342 result = tool_ssls_load(global->first, share, 2343 global->ssl_sessions); 2344 2345 if(!result) { 2346 /* Get the required arguments for each operation */ 2347 do { 2348 result = get_args(operation, count++); 2349 2350 operation = operation->next; 2351 } while(!result && operation); 2352 2353 /* Set the current operation pointer */ 2354 global->current = global->first; 2355 2356 /* now run! */ 2357 result = run_all_transfers(global, share, result); 2358 2359 if(global->ssl_sessions && feature_ssls_export) { 2360 CURLcode r2 = tool_ssls_save(global->first, share, 2361 global->ssl_sessions); 2362 if(r2 && !result) 2363 result = r2; 2364 } 2365 } 2366 2367 curl_share_cleanup(share); 2368 if(global->libcurl) { 2369 /* Cleanup the libcurl source output */ 2370 easysrc_cleanup(); 2371 2372 /* Dump the libcurl code if previously enabled */ 2373 dumpeasysrc(global); 2374 } 2375 } 2376 } 2377 else 2378 errorf(global, "out of memory"); 2379 } 2380 } 2381 2382 varcleanup(global); 2383 curl_free(global->knownhosts); 2384 2385 return result; 2386 }