gnunet-svn
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[GNUnet-SVN] [gnurl] 05/220: curl: support parallel transfers


From: gnunet
Subject: [GNUnet-SVN] [gnurl] 05/220: curl: support parallel transfers
Date: Thu, 12 Sep 2019 17:26:05 +0200

This is an automated email from the git hooks/post-receive script.

ng0 pushed a commit to branch master
in repository gnurl.

commit b88940850002a3f1c25bc6488b95ad30eb80d696
Author: Daniel Stenberg <address@hidden>
AuthorDate: Sat Jul 20 19:14:00 2019 +0200

    curl: support parallel transfers
    
    This is done by making sure each individual transfer is first added to a
    linked list as then they can be performed serially, or at will, in
    parallel.
    
    Closes #3804
---
 CMakeLists.txt                       |    1 +
 configure.ac                         |    1 +
 docs/cmdline-opts/Makefile.inc       |    5 +-
 docs/cmdline-opts/parallel-max.d     |    9 +
 docs/cmdline-opts/parallel.d         |    7 +
 src/Makefile.inc                     |    2 +
 src/tool_cb_hdr.c                    |   12 +-
 src/tool_cb_wrt.c                    |    8 +-
 src/tool_cfgable.h                   |   10 +-
 src/tool_getparam.c                  |   57 +-
 src/tool_help.c                      |    8 +-
 src/tool_help.h                      |    4 +-
 src/tool_main.c                      |   20 +-
 src/tool_main.h                      |    5 +-
 src/tool_metalink.c                  |   15 +-
 src/tool_metalink.h                  |    3 +-
 src/tool_operate.c                   | 1607 +++++++++++++++++++---------------
 src/tool_operate.h                   |   48 +-
 src/tool_operhlp.c                   |    8 +-
 src/tool_operhlp.h                   |    4 +-
 src/tool_parsecfg.c                  |    3 -
 src/tool_progress.c                  |  314 +++++++
 src/{tool_main.h => tool_progress.h} |   32 +-
 tests/data/test1002                  |    8 +
 tests/data/test1291                  |   10 +-
 tests/data/test1406                  |    2 +-
 tests/data/test1412                  |   18 +
 tests/data/test1418                  |   13 +
 tests/data/test153                   |   24 +-
 tests/data/test2006                  |    4 -
 tests/data/test2007                  |    4 -
 tests/data/test2008                  |    4 -
 tests/data/test2009                  |    4 -
 tests/data/test2010                  |    4 -
 34 files changed, 1476 insertions(+), 802 deletions(-)

diff --git a/CMakeLists.txt b/CMakeLists.txt
index 29cb5f598..af367e522 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -860,6 +860,7 @@ check_symbol_exists(strlcat       "${CURL_INCLUDES}" 
HAVE_STRLCAT)
 check_symbol_exists(getpwuid      "${CURL_INCLUDES}" HAVE_GETPWUID)
 check_symbol_exists(getpwuid_r    "${CURL_INCLUDES}" HAVE_GETPWUID_R)
 check_symbol_exists(geteuid       "${CURL_INCLUDES}" HAVE_GETEUID)
+check_symbol_exists(usleep        "${CURL_INCLUDES}" HAVE_USLEEP)
 check_symbol_exists(utime         "${CURL_INCLUDES}" HAVE_UTIME)
 check_symbol_exists(gmtime_r      "${CURL_INCLUDES}" HAVE_GMTIME_R)
 check_symbol_exists(localtime_r   "${CURL_INCLUDES}" HAVE_LOCALTIME_R)
diff --git a/configure.ac b/configure.ac
index cbf038080..7dd148085 100755
--- a/configure.ac
+++ b/configure.ac
@@ -3671,6 +3671,7 @@ AC_CHECK_FUNCS([fnmatch \
   setlocale \
   setmode \
   setrlimit \
+  usleep \
   utime \
   utimes
 ],[
diff --git a/docs/cmdline-opts/Makefile.inc b/docs/cmdline-opts/Makefile.inc
index 7a8af6f9e..886fa6caf 100644
--- a/docs/cmdline-opts/Makefile.inc
+++ b/docs/cmdline-opts/Makefile.inc
@@ -100,7 +100,10 @@ DPAGES =                                   \
   noproxy.d                                    \
   ntlm.d ntlm-wb.d                             \
   oauth2-bearer.d                              \
-  output.d pass.d                              \
+  output.d                                      \
+  pass.d                                       \
+  parallel.d                                    \
+  parallel-max.d                                \
   path-as-is.d                                 \
   pinnedpubkey.d                               \
   post301.d                                    \
diff --git a/docs/cmdline-opts/parallel-max.d b/docs/cmdline-opts/parallel-max.d
new file mode 100644
index 000000000..a8c79c743
--- /dev/null
+++ b/docs/cmdline-opts/parallel-max.d
@@ -0,0 +1,9 @@
+Long: parallel-max
+Help: Maximum concurrency for parallel transfers
+Added: 7.66.0
+See-also: parallel
+---
+When asked to do parallel transfers, using --parallel, this option controls
+the maximum amount of transfers to do simultaneously.
+
+The default is 50.
diff --git a/docs/cmdline-opts/parallel.d b/docs/cmdline-opts/parallel.d
new file mode 100644
index 000000000..fac84e624
--- /dev/null
+++ b/docs/cmdline-opts/parallel.d
@@ -0,0 +1,7 @@
+Short: Z
+Long: parallel
+Help: Perform transfers in parallel
+Added: 7.66.0
+---
+Makes curl perform its transfers in parallel as compared to the regular serial
+manner.
diff --git a/src/Makefile.inc b/src/Makefile.inc
index e1e8306bd..dd6b9d336 100644
--- a/src/Makefile.inc
+++ b/src/Makefile.inc
@@ -54,6 +54,7 @@ CURL_CFILES = \
   tool_panykey.c \
   tool_paramhlp.c \
   tool_parsecfg.c \
+  tool_progress.c \
   tool_strdup.c \
   tool_setopt.c \
   tool_sleep.c \
@@ -95,6 +96,7 @@ CURL_HFILES = \
   tool_panykey.h \
   tool_paramhlp.h \
   tool_parsecfg.h \
+  tool_progress.h \
   tool_sdecls.h \
   tool_setopt.h \
   tool_setup.h \
diff --git a/src/tool_cb_hdr.c b/src/tool_cb_hdr.c
index 3844904c9..b0880f186 100644
--- a/src/tool_cb_hdr.c
+++ b/src/tool_cb_hdr.c
@@ -5,7 +5,7 @@
  *                            | (__| |_| |  _ <| |___
  *                             \___|\___/|_| \_\_____|
  *
- * Copyright (C) 1998 - 2018, Daniel Stenberg, <address@hidden>, et al.
+ * Copyright (C) 1998 - 2019, Daniel Stenberg, <address@hidden>, et al.
  *
  * This software is licensed as described in the file COPYING, which
  * you should have received as part of this distribution. The terms
@@ -32,6 +32,7 @@
 #include "tool_msgs.h"
 #include "tool_cb_hdr.h"
 #include "tool_cb_wrt.h"
+#include "tool_operate.h"
 
 #include "memdebug.h" /* keep this as LAST include */
 
@@ -54,9 +55,10 @@ static char *parse_filename(const char *ptr, size_t len);
 
 size_t tool_header_cb(char *ptr, size_t size, size_t nmemb, void *userdata)
 {
-  struct HdrCbData *hdrcbdata = userdata;
-  struct OutStruct *outs = hdrcbdata->outs;
-  struct OutStruct *heads = hdrcbdata->heads;
+  struct per_transfer *per = userdata;
+  struct HdrCbData *hdrcbdata = &per->hdrcbdata;
+  struct OutStruct *outs = &per->outs;
+  struct OutStruct *heads = &per->heads;
   const char *str = ptr;
   const size_t cb = size * nmemb;
   const char *end = (char *)ptr + cb;
@@ -100,7 +102,7 @@ size_t tool_header_cb(char *ptr, size_t size, size_t nmemb, 
void *userdata)
    * Content-Disposition header specifying a filename property.
    */
 
-  curl_easy_getinfo(outs->config->easy, CURLINFO_PROTOCOL, &protocol);
+  curl_easy_getinfo(per->curl, CURLINFO_PROTOCOL, &protocol);
   if(hdrcbdata->honor_cd_filename &&
      (cb > 20) && checkprefix("Content-disposition:", str) &&
      (protocol & (CURLPROTO_HTTPS|CURLPROTO_HTTP))) {
diff --git a/src/tool_cb_wrt.c b/src/tool_cb_wrt.c
index 2f699f326..0f47b4d0f 100644
--- a/src/tool_cb_wrt.c
+++ b/src/tool_cb_wrt.c
@@ -5,7 +5,7 @@
  *                            | (__| |_| |  _ <| |___
  *                             \___|\___/|_| \_\_____|
  *
- * Copyright (C) 1998 - 2018, Daniel Stenberg, <address@hidden>, et al.
+ * Copyright (C) 1998 - 2019, Daniel Stenberg, <address@hidden>, et al.
  *
  * This software is licensed as described in the file COPYING, which
  * you should have received as part of this distribution. The terms
@@ -28,6 +28,7 @@
 #include "tool_cfgable.h"
 #include "tool_msgs.h"
 #include "tool_cb_wrt.h"
+#include "tool_operate.h"
 
 #include "memdebug.h" /* keep this as LAST include */
 
@@ -75,7 +76,8 @@ bool tool_create_output_file(struct OutStruct *outs)
 size_t tool_write_cb(char *buffer, size_t sz, size_t nmemb, void *userdata)
 {
   size_t rc;
-  struct OutStruct *outs = userdata;
+  struct per_transfer *per = userdata;
+  struct OutStruct *outs = &per->outs;
   struct OperationConfig *config = outs->config;
   size_t bytes = sz * nmemb;
   bool is_tty = config->global->isatty;
@@ -202,7 +204,7 @@ size_t tool_write_cb(char *buffer, size_t sz, size_t nmemb, 
void *userdata)
 
   if(config->readbusy) {
     config->readbusy = FALSE;
-    curl_easy_pause(config->easy, CURLPAUSE_CONT);
+    curl_easy_pause(per->curl, CURLPAUSE_CONT);
   }
 
   if(config->nobuffer) {
diff --git a/src/tool_cfgable.h b/src/tool_cfgable.h
index e374a7f0e..848123e7c 100644
--- a/src/tool_cfgable.h
+++ b/src/tool_cfgable.h
@@ -38,7 +38,6 @@ typedef enum {
 struct GlobalConfig;
 
 struct OperationConfig {
-  CURL *easy;               /* A copy of the handle from GlobalConfig */
   bool remote_time;
   char *random_file;
   char *egd_file;
@@ -242,9 +241,6 @@ struct OperationConfig {
   bool use_metalink;        /* process given URLs as metalink XML file */
   metalinkfile *metalinkfile_list; /* point to the first node */
   metalinkfile *metalinkfile_last; /* point to the last/current node */
-#ifdef CURLDEBUG
-  bool test_event_based;
-#endif
   char *oauth_bearer;             /* OAuth 2.0 bearer token */
   bool nonpn;                     /* enable/disable TLS NPN extension */
   bool noalpn;                    /* enable/disable TLS ALPN extension */
@@ -268,7 +264,6 @@ struct OperationConfig {
 };
 
 struct GlobalConfig {
-  CURL *easy;                     /* Once we have one, we keep it here */
   int showerror;                  /* -1 == unset, default => show errors
                                       0 => -s is used to NOT show errors
                                       1 => -S has been used to show errors */
@@ -286,6 +281,11 @@ struct GlobalConfig {
   char *libcurl;                  /* Output libcurl code to this file name */
   bool fail_early;                /* exit on first transfer error */
   bool styled_output;             /* enable fancy output style detection */
+#ifdef CURLDEBUG
+  bool test_event_based;
+#endif
+  bool parallel;
+  long parallel_max;
   struct OperationConfig *first;
   struct OperationConfig *current;
   struct OperationConfig *last;   /* Always last in the struct */
diff --git a/src/tool_getparam.c b/src/tool_getparam.c
index b347121f8..ae0902613 100644
--- a/src/tool_getparam.c
+++ b/src/tool_getparam.c
@@ -40,6 +40,7 @@
 #include "tool_msgs.h"
 #include "tool_paramhlp.h"
 #include "tool_parsecfg.h"
+#include "tool_main.h"
 
 #include "memdebug.h" /* keep this as LAST include */
 
@@ -316,6 +317,8 @@ static const struct LongShort aliases[]= {
   {"Y",  "speed-limit",              ARG_STRING},
   {"y",  "speed-time",               ARG_STRING},
   {"z",  "time-cond",                ARG_STRING},
+  {"Z",  "parallel",                 ARG_BOOL},
+  {"Zb", "parallel-max",             ARG_STRING},
   {"#",  "progress-bar",             ARG_BOOL},
   {":",  "next",                     ARG_NONE},
 };
@@ -1104,7 +1107,7 @@ ParameterError getparameter(const char *flag, /* f or 
-long-flag */
         break;
       case 'L': /* --test-event */
 #ifdef CURLDEBUG
-        config->test_event_based = toggle;
+        global->test_event_based = toggle;
 #else
         warnf(global, "--test-event is ignored unless a debug build!\n");
 #endif
@@ -1356,7 +1359,7 @@ ParameterError getparameter(const char *flag, /* f or 
-long-flag */
           size = 0;
         }
         else {
-          char *enc = curl_easy_escape(config->easy, postdata, (int)size);
+          char *enc = curl_easy_escape(NULL, postdata, (int)size);
           Curl_safefree(postdata); /* no matter if it worked or not */
           if(enc) {
             /* now make a string with the name from above and append the
@@ -2127,6 +2130,21 @@ ParameterError getparameter(const char *flag, /* f or 
-long-flag */
       if(!config->low_speed_time)
         config->low_speed_time = 30;
       break;
+    case 'Z':
+      switch(subletter) {
+      case '\0':  /* --parallel */
+        global->parallel = toggle;
+        break;
+      case 'b':   /* --parallel-max */
+        err = str2unum(&global->parallel_max, nextarg);
+        if(err)
+          return err;
+        if((global->parallel_max > MAX_PARALLEL) ||
+           (global->parallel_max < 1))
+          global->parallel_max = PARALLEL_DEFAULT;
+        break;
+      }
+      break;
     case 'z': /* time condition coming up */
       switch(*nextarg) {
       case '+':
@@ -2176,14 +2194,14 @@ ParameterError getparameter(const char *flag, /* f or 
-long-flag */
   return PARAM_OK;
 }
 
-ParameterError parse_args(struct GlobalConfig *config, int argc,
+ParameterError parse_args(struct GlobalConfig *global, int argc,
                           argv_item_t argv[])
 {
   int i;
   bool stillflags;
   char *orig_opt = NULL;
   ParameterError result = PARAM_OK;
-  struct OperationConfig *operation = config->first;
+  struct OperationConfig *config = global->first;
 
   for(i = 1, stillflags = TRUE; i < argc && !result; i++) {
     orig_opt = argv[i];
@@ -2199,31 +2217,28 @@ ParameterError parse_args(struct GlobalConfig *config, 
int argc,
       else {
         char *nextarg = (i < (argc - 1)) ? argv[i + 1] : NULL;
 
-        result = getparameter(flag, nextarg, &passarg, config, operation);
+        result = getparameter(flag, nextarg, &passarg, global, config);
         if(result == PARAM_NEXT_OPERATION) {
           /* Reset result as PARAM_NEXT_OPERATION is only used here and not
              returned from this function */
           result = PARAM_OK;
 
-          if(operation->url_list && operation->url_list->url) {
+          if(config->url_list && config->url_list->url) {
             /* Allocate the next config */
-            operation->next = malloc(sizeof(struct OperationConfig));
-            if(operation->next) {
+            config->next = malloc(sizeof(struct OperationConfig));
+            if(config->next) {
               /* Initialise the newly created config */
-              config_init(operation->next);
-
-              /* Copy the easy handle */
-              operation->next->easy = config->easy;
+              config_init(config->next);
 
               /* Set the global config pointer */
-              operation->next->global = config;
+              config->next->global = global;
 
-              /* Update the last operation pointer */
-              config->last = operation->next;
+              /* Update the last config pointer */
+              global->last = config->next;
 
               /* Move onto the new config */
-              operation->next->prev = operation;
-              operation = operation->next;
+              config->next->prev = config;
+              config = config->next;
             }
             else
               result = PARAM_NO_MEM;
@@ -2237,8 +2252,8 @@ ParameterError parse_args(struct GlobalConfig *config, 
int argc,
       bool used;
 
       /* Just add the URL please */
-      result = getparameter((char *)"--url", argv[i], &used, config,
-                            operation);
+      result = getparameter((char *)"--url", argv[i], &used, global,
+                            config);
     }
   }
 
@@ -2249,9 +2264,9 @@ ParameterError parse_args(struct GlobalConfig *config, 
int argc,
     const char *reason = param2text(result);
 
     if(orig_opt && strcmp(":", orig_opt))
-      helpf(config->errors, "option %s: %s\n", orig_opt, reason);
+      helpf(global->errors, "option %s: %s\n", orig_opt, reason);
     else
-      helpf(config->errors, "%s\n", reason);
+      helpf(global->errors, "%s\n", reason);
   }
 
   return result;
diff --git a/src/tool_help.c b/src/tool_help.c
index 9209a13dd..a8f285a00 100644
--- a/src/tool_help.c
+++ b/src/tool_help.c
@@ -273,6 +273,10 @@ static const struct helptxt helptext[] = {
    "OAuth 2 Bearer Token"},
   {"-o, --output <file>",
    "Write to file instead of stdout"},
+  {"-Z, --parallel",
+   "Perform transfers in parallel"},
+  {"    --parallel-max",
+   "Maximum concurrency for parallel transfers"},
   {"    --pass <phrase>",
    "Pass phrase for the private key"},
   {"    --path-as-is",
@@ -602,8 +606,9 @@ void tool_version_info(void)
   }
 }
 
-void tool_list_engines(CURL *curl)
+void tool_list_engines(void)
 {
+  CURL *curl = curl_easy_init();
   struct curl_slist *engines = NULL;
 
   /* Get the list of engines */
@@ -620,4 +625,5 @@ void tool_list_engines(CURL *curl)
 
   /* Cleanup the list of engines */
   curl_slist_free_all(engines);
+  curl_easy_cleanup(curl);
 }
diff --git a/src/tool_help.h b/src/tool_help.h
index 0289f3015..bfb5dcdf3 100644
--- a/src/tool_help.h
+++ b/src/tool_help.h
@@ -7,7 +7,7 @@
  *                            | (__| |_| |  _ <| |___
  *                             \___|\___/|_| \_\_____|
  *
- * Copyright (C) 1998 - 2014, Daniel Stenberg, <address@hidden>, et al.
+ * Copyright (C) 1998 - 2019, Daniel Stenberg, <address@hidden>, et al.
  *
  * This software is licensed as described in the file COPYING, which
  * you should have received as part of this distribution. The terms
@@ -24,7 +24,7 @@
 #include "tool_setup.h"
 
 void tool_help(void);
-void tool_list_engines(CURL *curl);
+void tool_list_engines(void);
 void tool_version_info(void);
 
 #endif /* HEADER_CURL_TOOL_HELP_H */
diff --git a/src/tool_main.c b/src/tool_main.c
index 7d1e62b79..d41e96536 100644
--- a/src/tool_main.c
+++ b/src/tool_main.c
@@ -149,6 +149,7 @@ static CURLcode main_init(struct GlobalConfig *config)
   config->showerror = -1;             /* Will show errors */
   config->errors = stderr;            /* Default errors to stderr */
   config->styled_output = TRUE;       /* enable detection */
+  config->parallel_max = PARALLEL_DEFAULT;
 
   /* Allocate the initial operate config */
   config->first = config->last = malloc(sizeof(struct OperationConfig));
@@ -160,19 +161,9 @@ static CURLcode main_init(struct GlobalConfig *config)
       result = get_libcurl_info();
 
       if(!result) {
-        /* Get a curl handle to use for all forthcoming curl transfers */
-        config->easy = curl_easy_init();
-        if(config->easy) {
-          /* Initialise the config */
-          config_init(config->first);
-          config->first->easy = config->easy;
-          config->first->global = config;
-        }
-        else {
-          helpf(stderr, "error initializing curl easy handle\n");
-          result = CURLE_FAILED_INIT;
-          free(config->first);
-        }
+        /* Initialise the config */
+        config_init(config->first);
+        config->first->global = config;
       }
       else {
         helpf(stderr, "error retrieving curl library information\n");
@@ -214,9 +205,6 @@ static void free_globalconfig(struct GlobalConfig *config)
 static void main_free(struct GlobalConfig *config)
 {
   /* Cleanup the easy handle */
-  curl_easy_cleanup(config->easy);
-  config->easy = NULL;
-
   /* Main cleanup */
   curl_global_cleanup();
   convert_cleanup();
diff --git a/src/tool_main.h b/src/tool_main.h
index 868818816..a68287ec8 100644
--- a/src/tool_main.h
+++ b/src/tool_main.h
@@ -7,7 +7,7 @@
  *                            | (__| |_| |  _ <| |___
  *                             \___|\___/|_| \_\_____|
  *
- * Copyright (C) 1998 - 2012, Daniel Stenberg, <address@hidden>, et al.
+ * Copyright (C) 1998 - 2019, Daniel Stenberg, <address@hidden>, et al.
  *
  * This software is licensed as described in the file COPYING, which
  * you should have received as part of this distribution. The terms
@@ -28,6 +28,9 @@
 #define RETRY_SLEEP_DEFAULT 1000L   /* ms */
 #define RETRY_SLEEP_MAX     600000L /* ms == 10 minutes */
 
+#define MAX_PARALLEL 300 /* conservative */
+#define PARALLEL_DEFAULT 50
+
 #ifndef STDIN_FILENO
 #  define STDIN_FILENO  fileno(stdin)
 #endif
diff --git a/src/tool_metalink.c b/src/tool_metalink.c
index 28aa71707..0740407f9 100644
--- a/src/tool_metalink.c
+++ b/src/tool_metalink.c
@@ -5,7 +5,7 @@
  *                            | (__| |_| |  _ <| |___
  *                             \___|\___/|_| \_\_____|
  *
- * Copyright (C) 1998 - 2018, Daniel Stenberg, <address@hidden>, et al.
+ * Copyright (C) 1998 - 2019, Daniel Stenberg, <address@hidden>, et al.
  *
  * This software is licensed as described in the file COPYING, which
  * you should have received as part of this distribution. The terms
@@ -104,6 +104,7 @@ struct win32_crypto_hash {
 #include "tool_paramhlp.h"
 #include "tool_cfgable.h"
 #include "tool_metalink.h"
+#include "tool_operate.h"
 #include "tool_msgs.h"
 
 #include "memdebug.h" /* keep this as LAST include */
@@ -674,8 +675,9 @@ int metalink_check_hash(struct GlobalConfig *config,
   return rv;
 }
 
-static metalink_checksum *new_metalink_checksum_from_hex_digest
-(const metalink_digest_def *digest_def, const char *hex_digest)
+static metalink_checksum *
+checksum_from_hex_digest(const metalink_digest_def *digest_def,
+                         const char *hex_digest)
 {
   metalink_checksum *chksum;
   unsigned char *digest;
@@ -754,8 +756,8 @@ static metalinkfile *new_metalinkfile(metalink_file_t 
*fileinfo)
         if(curl_strequal(digest_alias->alias_name, (*p)->type) &&
            check_hex_digest((*p)->hash, digest_alias->digest_def)) {
           f->checksum =
-            new_metalink_checksum_from_hex_digest(digest_alias->digest_def,
-                                                  (*p)->hash);
+            checksum_from_hex_digest(digest_alias->digest_def,
+                                     (*p)->hash);
           break;
         }
       }
@@ -891,7 +893,8 @@ int parse_metalink(struct OperationConfig *config, struct 
OutStruct *outs,
 size_t metalink_write_cb(void *buffer, size_t sz, size_t nmemb,
                          void *userdata)
 {
-  struct OutStruct *outs = userdata;
+  struct per_transfer *per = userdata;
+  struct OutStruct *outs = &per->outs;
   struct OperationConfig *config = outs->config;
   int rv;
 
diff --git a/src/tool_metalink.h b/src/tool_metalink.h
index 7ee2736a6..1e367033c 100644
--- a/src/tool_metalink.h
+++ b/src/tool_metalink.h
@@ -7,7 +7,7 @@
  *                            | (__| |_| |  _ <| |___
  *                             \___|\___/|_| \_\_____|
  *
- * Copyright (C) 1998 - 2014, Daniel Stenberg, <address@hidden>, et al.
+ * Copyright (C) 1998 - 2014, 2019, Daniel Stenberg, <address@hidden>, et al.
  *
  * This software is licensed as described in the file COPYING, which
  * you should have received as part of this distribution. The terms
@@ -22,6 +22,7 @@
  *
  ***************************************************************************/
 #include "tool_setup.h"
+#include "tool_sdecls.h"
 
 struct GlobalConfig;
 struct OperationConfig;
diff --git a/src/tool_operate.c b/src/tool_operate.c
index bf9a9b8d8..1b3bec9cf 100644
--- a/src/tool_operate.c
+++ b/src/tool_operate.c
@@ -75,6 +75,7 @@
 #include "tool_vms.h"
 #include "tool_help.h"
 #include "tool_hugehelp.h"
+#include "tool_progress.h"
 
 #include "memdebug.h" /* keep this as LAST include */
 
@@ -98,6 +99,11 @@ CURLcode curl_easy_perform_ev(CURL *easy);
   "this situation and\nhow to fix it, please visit the web page mentioned " \
   "above.\n"
 
+static CURLcode create_transfers(struct GlobalConfig *global,
+                                 struct OperationConfig *config,
+                                 CURLSH *share,
+                                 bool capath_from_env);
+
 static bool is_fatal_error(CURLcode code)
 {
   switch(code) {
@@ -187,135 +193,462 @@ static curl_off_t VmsSpecialSize(const char *name,
 
 #define BUFFER_SIZE (100*1024)
 
-static CURLcode operate_do(struct GlobalConfig *global,
-                           struct OperationConfig *config)
+struct per_transfer *transfers; /* first node */
+static struct per_transfer *transfersl; /* last node */
+
+static CURLcode add_transfer(struct per_transfer **per)
 {
-  char errorbuffer[CURL_ERROR_SIZE];
-  struct ProgressData progressbar;
-  struct getout *urlnode;
+  struct per_transfer *p;
+  p = calloc(sizeof(struct per_transfer), 1);
+  if(!p)
+    return CURLE_OUT_OF_MEMORY;
+  if(!transfers)
+    /* first entry */
+    transfersl = transfers = p;
+  else {
+    /* make the last node point to the new node */
+    transfersl->next = p;
+    /* make the new node point back to the formerly last node */
+    p->prev = transfersl;
+    /* move the last node pointer to the new entry */
+    transfersl = p;
+  }
+  *per = p;
+  all_xfers++; /* count total number of transfers added */
+  return CURLE_OK;
+}
 
-  struct HdrCbData hdrcbdata;
-  struct OutStruct heads;
+/* Remove the specified transfer from the list (and free it), return the next
+   in line */
+static struct per_transfer *del_transfer(struct per_transfer *per)
+{
+  struct per_transfer *n;
+  struct per_transfer *p;
+  DEBUGASSERT(transfers);
+  DEBUGASSERT(transfersl);
+  DEBUGASSERT(per);
 
-  metalinkfile *mlfile_last = NULL;
+  n = per->next;
+  p = per->prev;
 
-  CURL *curl = config->easy;
-  char *httpgetfields = NULL;
+  if(p)
+    p->next = n;
+  else
+    transfers = n;
+
+  if(n)
+    n->prev = p;
+  else
+    transfersl = p;
+
+  free(per);
 
+  return n;
+}
+
+static CURLcode pre_transfer(struct GlobalConfig *global,
+                             struct per_transfer *per)
+{
+  curl_off_t uploadfilesize = -1;
+  struct_stat fileinfo;
   CURLcode result = CURLE_OK;
-  unsigned long li;
-  bool capath_from_env;
 
-  /* Save the values of noprogress and isatty to restore them later on */
-  bool orig_noprogress = global->noprogress;
-  bool orig_isatty = global->isatty;
+  if(per->separator_err)
+    fprintf(global->errors, "%s\n", per->separator_err);
+  if(per->separator)
+    printf("%s\n", per->separator);
 
-  errorbuffer[0] = '\0';
+  if(per->uploadfile && !stdin_upload(per->uploadfile)) {
+    /* VMS Note:
+     *
+     * Reading binary from files can be a problem...  Only FIXED, VAR
+     * etc WITHOUT implied CC will work Others need a \n appended to a
+     * line
+     *
+     * - Stat gives a size but this is UNRELIABLE in VMS As a f.e. a
+     * fixed file with implied CC needs to have a byte added for every
+     * record processed, this can by derived from Filesize & recordsize
+     * for VARiable record files the records need to be counted!  for
+     * every record add 1 for linefeed and subtract 2 for the record
+     * header for VARIABLE header files only the bare record data needs
+     * to be considered with one appended if implied CC
+     */
+#ifdef __VMS
+    /* Calculate the real upload size for VMS */
+    per->infd = -1;
+    if(stat(per->uploadfile, &fileinfo) == 0) {
+      fileinfo.st_size = VmsSpecialSize(uploadfile, &fileinfo);
+      switch(fileinfo.st_fab_rfm) {
+      case FAB$C_VAR:
+      case FAB$C_VFC:
+      case FAB$C_STMCR:
+        per->infd = open(per->uploadfile, O_RDONLY | O_BINARY);
+        break;
+      default:
+        per->infd = open(per->uploadfile, O_RDONLY | O_BINARY,
+                        "rfm=stmlf", "ctx=stm");
+      }
+    }
+    if(per->infd == -1)
+#else
+      per->infd = open(per->uploadfile, O_RDONLY | O_BINARY);
+    if((per->infd == -1) || fstat(per->infd, &fileinfo))
+#endif
+    {
+      helpf(global->errors, "Can't open '%s'!\n", per->uploadfile);
+      if(per->infd != -1) {
+        close(per->infd);
+        per->infd = STDIN_FILENO;
+      }
+      return CURLE_READ_ERROR;
+    }
+    per->infdopen = TRUE;
 
-  /* default headers output stream is stdout */
-  memset(&hdrcbdata, 0, sizeof(struct HdrCbData));
-  memset(&heads, 0, sizeof(struct OutStruct));
-  heads.stream = stdout;
-  heads.config = config;
+    /* we ignore file size for char/block devices, sockets, etc. */
+    if(S_ISREG(fileinfo.st_mode))
+      uploadfilesize = fileinfo.st_size;
 
-  /*
-  ** Beyond this point no return'ing from this function allowed.
-  ** Jump to label 'quit_curl' in order to abandon this function
-  ** from outside of nested loops further down below.
-  */
+    if(uploadfilesize != -1)
+      my_setopt(per->curl, CURLOPT_INFILESIZE_LARGE, uploadfilesize);
+    per->input.fd = per->infd;
+  }
+  show_error:
+  return result;
+}
 
-  /* Check we have a url */
-  if(!config->url_list || !config->url_list->url) {
-    helpf(global->errors, "no URL specified!\n");
-    result = CURLE_FAILED_INIT;
-    goto quit_curl;
+/*
+ * Call this after a transfer has completed.
+ */
+static CURLcode post_transfer(struct GlobalConfig *global,
+                              CURLSH *share,
+                              struct per_transfer *per,
+                              CURLcode result,
+                              bool *retryp)
+{
+  struct OutStruct *outs = &per->outs;
+  CURL *curl = per->curl;
+  struct OperationConfig *config = per->config;
+
+  *retryp = FALSE;
+
+  if(per->infdopen)
+    close(per->infd);
+
+#ifdef __VMS
+  if(is_vms_shell()) {
+    /* VMS DCL shell behavior */
+    if(!global->showerror)
+      vms_show = VMSSTS_HIDE;
   }
+  else
+#endif
+    if(config->synthetic_error) {
+      ;
+    }
+    else if(result && global->showerror) {
+      fprintf(global->errors, "curl: (%d) %s\n", result,
+              (per->errorbuffer[0]) ? per->errorbuffer :
+              curl_easy_strerror(result));
+      if(result == CURLE_PEER_FAILED_VERIFICATION)
+        fputs(CURL_CA_CERT_ERRORMSG, global->errors);
+    }
 
-  /* On WIN32 we can't set the path to curl-ca-bundle.crt
-   * at compile time. So we look here for the file in two ways:
-   * 1: look at the environment variable CURL_CA_BUNDLE for a path
-   * 2: if #1 isn't found, use the windows API function SearchPath()
-   *    to find it along the app's path (includes app's dir and CWD)
-   *
-   * We support the environment variable thing for non-Windows platforms
-   * too. Just for the sake of it.
-   */
-  capath_from_env = false;
-  if(!config->cacert &&
-     !config->capath &&
-     !config->insecure_ok) {
-    struct curl_tlssessioninfo *tls_backend_info = NULL;
+  /* Set file extended attributes */
+  if(!result && config->xattr && outs->fopened && outs->stream) {
+    int rc = fwrite_xattr(curl, fileno(outs->stream));
+    if(rc)
+      warnf(config->global, "Error setting extended attributes: %s\n",
+            strerror(errno));
+  }
 
-    /* With the addition of CAINFO support for Schannel, this search could find
-     * a certificate bundle that was previously ignored. To maintain backward
-     * compatibility, only perform this search if not using Schannel.
-     */
-    result = curl_easy_getinfo(config->easy,
-                               CURLINFO_TLS_SSL_PTR,
-                               &tls_backend_info);
-    if(result) {
-      goto quit_curl;
+  if(!result && !outs->stream && !outs->bytes) {
+    /* we have received no data despite the transfer was successful
+       ==> force cration of an empty output file (if an output file
+       was specified) */
+    long cond_unmet = 0L;
+    /* do not create (or even overwrite) the file in case we get no
+       data because of unmet condition */
+    curl_easy_getinfo(curl, CURLINFO_CONDITION_UNMET, &cond_unmet);
+    if(!cond_unmet && !tool_create_output_file(outs))
+      result = CURLE_WRITE_ERROR;
+  }
+
+  if(!outs->s_isreg && outs->stream) {
+    /* Dump standard stream buffered data */
+    int rc = fflush(outs->stream);
+    if(!result && rc) {
+      /* something went wrong in the writing process */
+      result = CURLE_WRITE_ERROR;
+      fprintf(global->errors, "(%d) Failed writing body\n", result);
     }
+  }
 
-    /* Set the CA cert locations specified in the environment. For Windows if
-     * no environment-specified filename is found then check for CA bundle
-     * default filename curl-ca-bundle.crt in the user's PATH.
-     *
-     * If Schannel is the selected SSL backend then these locations are
-     * ignored. We allow setting CA location for schannel only when explicitly
-     * specified by the user via CURLOPT_CAINFO / --cacert.
-     */
-    if(tls_backend_info->backend != CURLSSLBACKEND_SCHANNEL) {
-      char *env;
-      env = curlx_getenv("CURL_CA_BUNDLE");
-      if(env) {
-        config->cacert = strdup(env);
-        if(!config->cacert) {
-          curl_free(env);
-          helpf(global->errors, "out of memory\n");
-          result = CURLE_OUT_OF_MEMORY;
-          goto quit_curl;
+#ifdef USE_METALINK
+  if(per->metalink && !per->metalink_next_res)
+    fprintf(global->errors, "Metalink: fetching (%s) from (%s) OK\n",
+            per->mlfile->filename, per->this_url);
+
+  if(!per->metalink && config->use_metalink && result == CURLE_OK) {
+    int rv = parse_metalink(config, outs, per->this_url);
+    if(!rv) {
+      fprintf(config->global->errors, "Metalink: parsing (%s) OK\n",
+              per->this_url);
+    }
+    else if(rv == -1)
+      fprintf(config->global->errors, "Metalink: parsing (%s) FAILED\n",
+              per->this_url);
+    result = create_transfers(global, config, share, FALSE);
+  }
+  else if(per->metalink && result == CURLE_OK && !per->metalink_next_res) {
+    int rv;
+    (void)fflush(outs->stream);
+    rv = metalink_check_hash(global, per->mlfile, outs->filename);
+    if(!rv)
+      per->metalink_next_res = 1;
+  }
+#else
+  (void)share;
+#endif /* USE_METALINK */
+
+#ifdef USE_METALINK
+  if(outs->metalink_parser)
+    metalink_parser_context_delete(outs->metalink_parser);
+#endif /* USE_METALINK */
+
+  if(outs->is_cd_filename && outs->stream && !global->mute &&
+     outs->filename)
+    printf("curl: Saved to filename '%s'\n", outs->filename);
+
+  /* if retry-max-time is non-zero, make sure we haven't exceeded the
+     time */
+  if(per->retry_numretries &&
+     (!config->retry_maxtime ||
+      (tvdiff(tvnow(), per->retrystart) <
+       config->retry_maxtime*1000L)) ) {
+    enum {
+      RETRY_NO,
+      RETRY_TIMEOUT,
+      RETRY_CONNREFUSED,
+      RETRY_HTTP,
+      RETRY_FTP,
+      RETRY_LAST /* not used */
+    } retry = RETRY_NO;
+    long response;
+    if((CURLE_OPERATION_TIMEDOUT == result) ||
+       (CURLE_COULDNT_RESOLVE_HOST == result) ||
+       (CURLE_COULDNT_RESOLVE_PROXY == result) ||
+       (CURLE_FTP_ACCEPT_TIMEOUT == result))
+      /* retry timeout always */
+      retry = RETRY_TIMEOUT;
+    else if(config->retry_connrefused &&
+            (CURLE_COULDNT_CONNECT == result)) {
+      long oserrno;
+      curl_easy_getinfo(curl, CURLINFO_OS_ERRNO, &oserrno);
+      if(ECONNREFUSED == oserrno)
+        retry = RETRY_CONNREFUSED;
+    }
+    else if((CURLE_OK == result) ||
+            (config->failonerror &&
+             (CURLE_HTTP_RETURNED_ERROR == result))) {
+      /* If it returned OK. _or_ failonerror was enabled and it
+         returned due to such an error, check for HTTP transient
+         errors to retry on. */
+      char *effective_url = NULL;
+      curl_easy_getinfo(curl, CURLINFO_EFFECTIVE_URL, &effective_url);
+      if(effective_url &&
+         checkprefix("http", effective_url)) {
+        /* This was HTTP(S) */
+        curl_easy_getinfo(curl, CURLINFO_RESPONSE_CODE, &response);
+
+        switch(response) {
+        case 500: /* Internal Server Error */
+        case 502: /* Bad Gateway */
+        case 503: /* Service Unavailable */
+        case 504: /* Gateway Timeout */
+          retry = RETRY_HTTP;
+          /*
+           * At this point, we have already written data to the output
+           * file (or terminal). If we write to a file, we must rewind
+           * or close/re-open the file so that the next attempt starts
+           * over from the beginning.
+           *
+           * TODO: similar action for the upload case. We might need
+           * to start over reading from a previous point if we have
+           * uploaded something when this was returned.
+           */
+          break;
         }
       }
-      else {
-        env = curlx_getenv("SSL_CERT_DIR");
-        if(env) {
-          config->capath = strdup(env);
-          if(!config->capath) {
-            curl_free(env);
-            helpf(global->errors, "out of memory\n");
-            result = CURLE_OUT_OF_MEMORY;
-            goto quit_curl;
-          }
-          capath_from_env = true;
+    } /* if CURLE_OK */
+    else if(result) {
+      long protocol;
+
+      curl_easy_getinfo(curl, CURLINFO_RESPONSE_CODE, &response);
+      curl_easy_getinfo(curl, CURLINFO_PROTOCOL, &protocol);
+
+      if((protocol == CURLPROTO_FTP || protocol == CURLPROTO_FTPS) &&
+         response / 100 == 4)
+        /*
+         * This is typically when the FTP server only allows a certain
+         * amount of users and we are not one of them.  All 4xx codes
+         * are transient.
+         */
+        retry = RETRY_FTP;
+    }
+
+    if(retry) {
+      static const char * const m[]={
+        NULL,
+        "timeout",
+        "connection refused",
+        "HTTP error",
+        "FTP error"
+      };
+
+      warnf(config->global, "Transient problem: %s "
+            "Will retry in %ld seconds. "
+            "%ld retries left.\n",
+            m[retry], per->retry_sleep/1000L, per->retry_numretries);
+
+      tool_go_sleep(per->retry_sleep);
+      per->retry_numretries--;
+      if(!config->retry_delay) {
+        per->retry_sleep *= 2;
+        if(per->retry_sleep > RETRY_SLEEP_MAX)
+          per->retry_sleep = RETRY_SLEEP_MAX;
+      }
+      if(outs->bytes && outs->filename && outs->stream) {
+        int rc;
+        /* We have written data to a output file, we truncate file
+         */
+        if(!global->mute)
+          fprintf(global->errors, "Throwing away %"
+                  CURL_FORMAT_CURL_OFF_T " bytes\n",
+                  outs->bytes);
+        fflush(outs->stream);
+        /* truncate file at the position where we started appending */
+#ifdef HAVE_FTRUNCATE
+        if(ftruncate(fileno(outs->stream), outs->init)) {
+          /* when truncate fails, we can't just append as then we'll
+             create something strange, bail out */
+          if(!global->mute)
+            fprintf(global->errors,
+                    "failed to truncate, exiting\n");
+          return CURLE_WRITE_ERROR;
         }
-        else {
-          env = curlx_getenv("SSL_CERT_FILE");
-          if(env) {
-            config->cacert = strdup(env);
-            if(!config->cacert) {
-              curl_free(env);
-              helpf(global->errors, "out of memory\n");
-              result = CURLE_OUT_OF_MEMORY;
-              goto quit_curl;
-            }
-          }
+        /* now seek to the end of the file, the position where we
+           just truncated the file in a large file-safe way */
+        rc = fseek(outs->stream, 0, SEEK_END);
+#else
+        /* ftruncate is not available, so just reposition the file
+           to the location we would have truncated it. This won't
+           work properly with large files on 32-bit systems, but
+           most of those will have ftruncate. */
+        rc = fseek(outs->stream, (long)outs->init, SEEK_SET);
+#endif
+        if(rc) {
+          if(!global->mute)
+            fprintf(global->errors,
+                    "failed seeking to end of file, exiting\n");
+          return CURLE_WRITE_ERROR;
         }
+        outs->bytes = 0; /* clear for next round */
       }
-
-      if(env)
-        curl_free(env);
-#ifdef WIN32
-      else {
-        result = FindWin32CACert(config, tls_backend_info->backend,
-                                 "curl-ca-bundle.crt");
-        if(result)
-          goto quit_curl;
+      *retryp = TRUE; /* curl_easy_perform loop */
+      return CURLE_OK;
+    }
+  } /* if retry_numretries */
+  else if(per->metalink) {
+    /* Metalink: Decide to try the next resource or not. Try the next resource
+       if download was not successful. */
+    long response;
+    if(CURLE_OK == result) {
+      /* TODO We want to try next resource when download was
+         not successful. How to know that? */
+      char *effective_url = NULL;
+      curl_easy_getinfo(curl, CURLINFO_EFFECTIVE_URL, &effective_url);
+      if(effective_url &&
+         curl_strnequal(effective_url, "http", 4)) {
+        /* This was HTTP(S) */
+        curl_easy_getinfo(curl, CURLINFO_RESPONSE_CODE, &response);
+        if(response != 200 && response != 206) {
+          per->metalink_next_res = 1;
+          fprintf(global->errors,
+                  "Metalink: fetching (%s) from (%s) FAILED "
+                  "(HTTP status code %ld)\n",
+                  per->mlfile->filename, per->this_url, response);
+        }
       }
-#endif
+    }
+    else {
+      per->metalink_next_res = 1;
+      fprintf(global->errors,
+              "Metalink: fetching (%s) from (%s) FAILED (%s)\n",
+              per->mlfile->filename, per->this_url,
+              curl_easy_strerror(result));
     }
   }
 
+  if((global->progressmode == CURL_PROGRESS_BAR) &&
+     per->progressbar.calls)
+    /* if the custom progress bar has been displayed, we output a
+       newline here */
+    fputs("\n", per->progressbar.out);
+
+  if(config->writeout)
+    ourWriteOut(per->curl, &per->outs, config->writeout);
+
+  /* Close the outs file */
+  if(outs->fopened && outs->stream) {
+    int rc = fclose(outs->stream);
+    if(!result && rc) {
+      /* something went wrong in the writing process */
+      result = CURLE_WRITE_ERROR;
+      fprintf(global->errors, "(%d) Failed writing body\n", result);
+    }
+  }
+
+  /* File time can only be set _after_ the file has been closed */
+  if(!result && config->remote_time && outs->s_isreg && outs->filename) {
+    /* Ask libcurl if we got a remote file time */
+    curl_off_t filetime = -1;
+    curl_easy_getinfo(curl, CURLINFO_FILETIME_T, &filetime);
+    setfiletime(filetime, outs->filename, config->global->errors);
+  }
+
+  /* Close function-local opened file descriptors */
+  if(per->heads.fopened && per->heads.stream)
+    fclose(per->heads.stream);
+
+  if(per->heads.alloc_filename)
+    Curl_safefree(per->heads.filename);
+
+  curl_easy_cleanup(per->curl);
+  if(outs->alloc_filename)
+    free(outs->filename);
+  free(per->this_url);
+  free(per->separator_err);
+  free(per->separator);
+  free(per->outfile);
+  free(per->uploadfile);
+
+  return CURLE_OK;
+}
+
+/* go through the list of URLs and configs and add transfers */
+
+static CURLcode create_transfers(struct GlobalConfig *global,
+                                 struct OperationConfig *config,
+                                 CURLSH *share,
+                                 bool capath_from_env)
+{
+  CURLcode result = CURLE_OK;
+  struct getout *urlnode;
+  metalinkfile *mlfile_last = NULL;
+  bool orig_noprogress = global->noprogress;
+  bool orig_isatty = global->isatty;
+  char *httpgetfields = NULL;
+
   if(config->postfields) {
     if(config->use_httpget) {
       /* Use the postfields data for a http get */
@@ -341,44 +674,14 @@ static CURLcode operate_do(struct GlobalConfig *global,
     }
   }
 
-  /* Single header file for all URLs */
-  if(config->headerfile) {
-    /* open file for output: */
-    if(strcmp(config->headerfile, "-")) {
-      FILE *newfile = fopen(config->headerfile, "wb");
-      if(!newfile) {
-        warnf(config->global, "Failed to open %s\n", config->headerfile);
-        result = CURLE_WRITE_ERROR;
-        goto quit_curl;
-      }
-      else {
-        heads.filename = config->headerfile;
-        heads.s_isreg = TRUE;
-        heads.fopened = TRUE;
-        heads.stream = newfile;
-      }
-    }
-    else {
-      /* always use binary mode for protocol header output */
-      set_binmode(heads.stream);
-    }
-  }
-
-  /*
-  ** Nested loops start here.
-  */
-
-  /* loop through the list of given URLs */
-
   for(urlnode = config->url_list; urlnode; urlnode = urlnode->next) {
-
+    unsigned long li;
     unsigned long up; /* upload file counter within a single upload glob */
     char *infiles; /* might be a glob pattern */
     char *outfiles;
     unsigned long infilenum;
     URLGlob *inglob;
-
-    int metalink = 0; /* nonzero for metalink download. */
+    bool metalink = FALSE; /* metalink download? */
     metalinkfile *mlfile;
     metalink_resource *mlres;
 
@@ -461,8 +764,6 @@ static CURLcode operate_do(struct GlobalConfig *global,
             result = CURLE_OUT_OF_MEMORY;
           }
         }
-        else
-          uploadfile = NULL;
         if(!uploadfile)
           break;
       }
@@ -490,65 +791,102 @@ static CURLcode operate_do(struct GlobalConfig *global,
 
       /* Here's looping around each globbed URL */
       for(li = 0 ; li < urlnum; li++) {
+        struct per_transfer *per;
+        struct OutStruct *outs;
+        struct InStruct *input;
+        struct OutStruct *heads;
+        struct HdrCbData *hdrcbdata = NULL;
+        CURL *curl = curl_easy_init();
+
+        result = add_transfer(&per);
+        if(result || !curl) {
+          free(uploadfile);
+          curl_easy_cleanup(curl);
+          result = CURLE_OUT_OF_MEMORY;
+          goto show_error;
+        }
+        per->config = config;
+        per->curl = curl;
+        per->uploadfile = uploadfile;
+
+        /* default headers output stream is stdout */
+        heads = &per->heads;
+        heads->stream = stdout;
+        heads->config = config;
+
+        /* Single header file for all URLs */
+        if(config->headerfile) {
+          /* open file for output: */
+          if(strcmp(config->headerfile, "-")) {
+            FILE *newfile = fopen(config->headerfile, "wb");
+            if(!newfile) {
+              warnf(config->global, "Failed to open %s\n", config->headerfile);
+              result = CURLE_WRITE_ERROR;
+              goto quit_curl;
+            }
+            else {
+              heads->filename = config->headerfile;
+              heads->s_isreg = TRUE;
+              heads->fopened = TRUE;
+              heads->stream = newfile;
+            }
+          }
+          else {
+            /* always use binary mode for protocol header output */
+            set_binmode(heads->stream);
+          }
+        }
+
+
+        hdrcbdata = &per->hdrcbdata;
+
+        outs = &per->outs;
+        input = &per->input;
 
-        int infd;
-        bool infdopen;
-        char *outfile;
-        struct OutStruct outs;
-        struct InStruct input;
-        struct timeval retrystart;
-        curl_off_t uploadfilesize;
-        long retry_numretries;
-        long retry_sleep_default;
-        long retry_sleep;
-        char *this_url = NULL;
-        int metalink_next_res = 0;
-
-        outfile = NULL;
-        infdopen = FALSE;
-        infd = STDIN_FILENO;
-        uploadfilesize = -1; /* -1 means unknown */
+        per->outfile = NULL;
+        per->infdopen = FALSE;
+        per->infd = STDIN_FILENO;
 
         /* default output stream is stdout */
-        memset(&outs, 0, sizeof(struct OutStruct));
-        outs.stream = stdout;
-        outs.config = config;
+        outs->stream = stdout;
+        outs->config = config;
 
         if(metalink) {
           /* For Metalink download, use name in Metalink file as
              filename. */
-          outfile = strdup(mlfile->filename);
-          if(!outfile) {
+          per->outfile = strdup(mlfile->filename);
+          if(!per->outfile) {
             result = CURLE_OUT_OF_MEMORY;
             goto show_error;
           }
-          this_url = strdup(mlres->url);
-          if(!this_url) {
+          per->this_url = strdup(mlres->url);
+          if(!per->this_url) {
             result = CURLE_OUT_OF_MEMORY;
             goto show_error;
           }
+          per->mlfile = mlfile;
         }
         else {
           if(urls) {
-            result = glob_next_url(&this_url, urls);
+            result = glob_next_url(&per->this_url, urls);
             if(result)
               goto show_error;
           }
           else if(!li) {
-            this_url = strdup(urlnode->url);
-            if(!this_url) {
+            per->this_url = strdup(urlnode->url);
+            if(!per->this_url) {
               result = CURLE_OUT_OF_MEMORY;
               goto show_error;
             }
           }
           else
-            this_url = NULL;
-          if(!this_url)
+            per->this_url = NULL;
+          if(!per->this_url)
             break;
 
           if(outfiles) {
-            outfile = strdup(outfiles);
-            if(!outfile) {
+            per->outfile = strdup(outfiles);
+            if(!per->outfile) {
               result = CURLE_OUT_OF_MEMORY;
               goto show_error;
             }
@@ -556,7 +894,7 @@ static CURLcode operate_do(struct GlobalConfig *global,
         }
 
         if(((urlnode->flags&GETOUT_USEREMOTE) ||
-            (outfile && strcmp("-", outfile))) &&
+            (per->outfile && strcmp("-", per->outfile))) &&
            (metalink || !config->use_metalink)) {
 
           /*
@@ -564,12 +902,12 @@ static CURLcode operate_do(struct GlobalConfig *global,
            * decided we want to use the remote file name.
            */
 
-          if(!outfile) {
+          if(!per->outfile) {
             /* extract the file name from the URL */
-            result = get_url_file_name(&outfile, this_url);
+            result = get_url_file_name(&per->outfile, per->this_url);
             if(result)
               goto show_error;
-            if(!*outfile && !config->content_disposition) {
+            if(!*per->outfile && !config->content_disposition) {
               helpf(global->errors, "Remote file name has no length!\n");
               result = CURLE_WRITE_ERROR;
               goto quit_urls;
@@ -577,8 +915,8 @@ static CURLcode operate_do(struct GlobalConfig *global,
           }
           else if(urls) {
             /* fill '#1' ... '#9' terms from URL pattern */
-            char *storefile = outfile;
-            result = glob_match_url(&outfile, storefile, urls);
+            char *storefile = per->outfile;
+            result = glob_match_url(&per->outfile, storefile, urls);
             Curl_safefree(storefile);
             if(result) {
               /* bad globbing */
@@ -591,7 +929,7 @@ static CURLcode operate_do(struct GlobalConfig *global,
              file output call */
 
           if(config->create_dirs || metalink) {
-            result = create_dir_hierarchy(outfile, global->errors);
+            result = create_dir_hierarchy(per->outfile, global->errors);
             /* create_dir_hierarchy shows error upon CURLE_WRITE_ERROR */
             if(result == CURLE_WRITE_ERROR)
               goto quit_urls;
@@ -603,7 +941,7 @@ static CURLcode operate_do(struct GlobalConfig *global,
           if((urlnode->flags & GETOUT_USEREMOTE)
              && config->content_disposition) {
             /* Our header callback MIGHT set the filename */
-            DEBUGASSERT(!outs.filename);
+            DEBUGASSERT(!outs->filename);
           }
 
           if(config->resume_from_current) {
@@ -611,7 +949,7 @@ static CURLcode operate_do(struct GlobalConfig *global,
                of the file as it is now and open it for append instead */
             struct_stat fileinfo;
             /* VMS -- Danger, the filesize is only valid for stream files */
-            if(0 == stat(outfile, &fileinfo))
+            if(0 == stat(per->outfile, &fileinfo))
               /* set offset to current file size: */
               config->resume_from = fileinfo.st_size;
             else
@@ -627,87 +965,36 @@ static CURLcode operate_do(struct GlobalConfig *global,
                                "ctx=stm", "rfm=stmlf", "rat=cr", "mrs=0");
 #else
             /* open file for output: */
-            FILE *file = fopen(outfile, config->resume_from?"ab":"wb");
+            FILE *file = fopen(per->outfile, config->resume_from?"ab":"wb");
 #endif
             if(!file) {
-              helpf(global->errors, "Can't open '%s'!\n", outfile);
+              helpf(global->errors, "Can't open '%s'!\n", per->outfile);
               result = CURLE_WRITE_ERROR;
               goto quit_urls;
             }
-            outs.fopened = TRUE;
-            outs.stream = file;
-            outs.init = config->resume_from;
+            outs->fopened = TRUE;
+            outs->stream = file;
+            outs->init = config->resume_from;
           }
           else {
-            outs.stream = NULL; /* open when needed */
+            outs->stream = NULL; /* open when needed */
           }
-          outs.filename = outfile;
-          outs.s_isreg = TRUE;
+          outs->filename = per->outfile;
+          outs->s_isreg = TRUE;
         }
 
-        if(uploadfile && !stdin_upload(uploadfile)) {
+        if(per->uploadfile && !stdin_upload(per->uploadfile)) {
           /*
            * We have specified a file to upload and it isn't "-".
            */
-          struct_stat fileinfo;
-
-          this_url = add_file_name_to_url(curl, this_url, uploadfile);
-          if(!this_url) {
+          char *nurl = add_file_name_to_url(per->this_url, per->uploadfile);
+          if(!nurl) {
             result = CURLE_OUT_OF_MEMORY;
             goto show_error;
           }
-          /* VMS Note:
-           *
-           * Reading binary from files can be a problem...  Only FIXED, VAR
-           * etc WITHOUT implied CC will work Others need a \n appended to a
-           * line
-           *
-           * - Stat gives a size but this is UNRELIABLE in VMS As a f.e. a
-           * fixed file with implied CC needs to have a byte added for every
-           * record processed, this can by derived from Filesize & recordsize
-           * for VARiable record files the records need to be counted!  for
-           * every record add 1 for linefeed and subtract 2 for the record
-           * header for VARIABLE header files only the bare record data needs
-           * to be considered with one appended if implied CC
-           */
-#ifdef __VMS
-          /* Calculate the real upload size for VMS */
-          infd = -1;
-          if(stat(uploadfile, &fileinfo) == 0) {
-            fileinfo.st_size = VmsSpecialSize(uploadfile, &fileinfo);
-            switch(fileinfo.st_fab_rfm) {
-            case FAB$C_VAR:
-            case FAB$C_VFC:
-            case FAB$C_STMCR:
-              infd = open(uploadfile, O_RDONLY | O_BINARY);
-              break;
-            default:
-              infd = open(uploadfile, O_RDONLY | O_BINARY,
-                          "rfm=stmlf", "ctx=stm");
-            }
-          }
-          if(infd == -1)
-#else
-          infd = open(uploadfile, O_RDONLY | O_BINARY);
-          if((infd == -1) || fstat(infd, &fileinfo))
-#endif
-          {
-            helpf(global->errors, "Can't open '%s'!\n", uploadfile);
-            if(infd != -1) {
-              close(infd);
-              infd = STDIN_FILENO;
-            }
-            result = CURLE_READ_ERROR;
-            goto quit_urls;
-          }
-          infdopen = TRUE;
-
-          /* we ignore file size for char/block devices, sockets, etc. */
-          if(S_ISREG(fileinfo.st_mode))
-            uploadfilesize = fileinfo.st_size;
-
+          per->this_url = nurl;
         }
-        else if(uploadfile && stdin_upload(uploadfile)) {
+        else if(per->uploadfile && stdin_upload(per->uploadfile)) {
           /* count to see if there are more than one auth bit set
              in the authtype field */
           int authbits = 0;
@@ -733,22 +1020,22 @@ static CURLcode operate_do(struct GlobalConfig *global,
                   " file or a fixed auth type instead!\n");
           }
 
-          DEBUGASSERT(infdopen == FALSE);
-          DEBUGASSERT(infd == STDIN_FILENO);
+          DEBUGASSERT(per->infdopen == FALSE);
+          DEBUGASSERT(per->infd == STDIN_FILENO);
 
           set_binmode(stdin);
-          if(!strcmp(uploadfile, ".")) {
-            if(curlx_nonblock((curl_socket_t)infd, TRUE) < 0)
+          if(!strcmp(per->uploadfile, ".")) {
+            if(curlx_nonblock((curl_socket_t)per->infd, TRUE) < 0)
               warnf(config->global,
-                    "fcntl failed on fd=%d: %s\n", infd, strerror(errno));
+                    "fcntl failed on fd=%d: %s\n", per->infd, strerror(errno));
           }
         }
 
-        if(uploadfile && config->resume_from_current)
+        if(per->uploadfile && config->resume_from_current)
           config->resume_from = -1; /* -1 will then force get-it-yourself */
 
-        if(output_expected(this_url, uploadfile) && outs.stream &&
-           isatty(fileno(outs.stream)))
+        if(output_expected(per->this_url, per->uploadfile) && outs->stream &&
+           isatty(fileno(outs->stream)))
           /* we send the output to a tty, therefore we switch off the progress
              meter */
           global->noprogress = global->isatty = TRUE;
@@ -760,20 +1047,22 @@ static CURLcode operate_do(struct GlobalConfig *global,
         }
 
         if(urlnum > 1 && !global->mute) {
-          fprintf(global->errors, "\n[%lu/%lu]: %s --> %s\n",
-                  li + 1, urlnum, this_url, outfile ? outfile : "<stdout>");
+          per->separator_err =
+            aprintf("\n[%lu/%lu]: %s --> %s",
+                    li + 1, urlnum, per->this_url,
+                    per->outfile ? per->outfile : "<stdout>");
           if(separator)
-            printf("%s%s\n", CURLseparator, this_url);
+            per->separator = aprintf("%s%s", CURLseparator, per->this_url);
         }
         if(httpgetfields) {
           char *urlbuffer;
           /* Find out whether the url contains a file name */
-          const char *pc = strstr(this_url, "://");
+          const char *pc = strstr(per->this_url, "://");
           char sep = '?';
           if(pc)
             pc += 3;
           else
-            pc = this_url;
+            pc = per->this_url;
 
           pc = strrchr(pc, '/'); /* check for a slash */
 
@@ -789,33 +1078,39 @@ static CURLcode operate_do(struct GlobalConfig *global,
            * Then append ? followed by the get fields to the url.
            */
           if(pc)
-            urlbuffer = aprintf("%s%c%s", this_url, sep, httpgetfields);
+            urlbuffer = aprintf("%s%c%s", per->this_url, sep, httpgetfields);
           else
             /* Append  / before the ? to create a well-formed url
                if the url contains a hostname only
             */
-            urlbuffer = aprintf("%s/?%s", this_url, httpgetfields);
+            urlbuffer = aprintf("%s/?%s", per->this_url, httpgetfields);
 
           if(!urlbuffer) {
             result = CURLE_OUT_OF_MEMORY;
             goto show_error;
           }
 
-          Curl_safefree(this_url); /* free previous URL */
-          this_url = urlbuffer; /* use our new URL instead! */
+          Curl_safefree(per->this_url); /* free previous URL */
+          per->this_url = urlbuffer; /* use our new URL instead! */
         }
 
         if(!global->errors)
           global->errors = stderr;
 
-        if((!outfile || !strcmp(outfile, "-")) && !config->use_ascii) {
+        if((!per->outfile || !strcmp(per->outfile, "-")) &&
+           !config->use_ascii) {
           /* We get the output to stdout and we have not got the ASCII/text
              flag, then set stdout to be binary */
           set_binmode(stdout);
         }
 
         /* explicitly passed to stdout means okaying binary gunk */
-        config->terminal_binary_ok = (outfile && !strcmp(outfile, "-"));
+        config->terminal_binary_ok =
+          (per->outfile && !strcmp(per->outfile, "-"));
+
+        /* avoid having this setopt added to the --libcurl source
+           output */
+        curl_easy_setopt(curl, CURLOPT_SHARE, share);
 
         if(!config->tcp_nodelay)
           my_setopt(curl, CURLOPT_TCP_NODELAY, 0L);
@@ -824,8 +1119,8 @@ static CURLcode operate_do(struct GlobalConfig *global,
           my_setopt(curl, CURLOPT_TCP_FASTOPEN, 1L);
 
         /* where to store */
-        my_setopt(curl, CURLOPT_WRITEDATA, &outs);
-        my_setopt(curl, CURLOPT_INTERLEAVEDATA, &outs);
+        my_setopt(curl, CURLOPT_WRITEDATA, per);
+        my_setopt(curl, CURLOPT_INTERLEAVEDATA, per);
 
         if(metalink || !config->use_metalink)
           /* what call to write */
@@ -838,8 +1133,7 @@ static CURLcode operate_do(struct GlobalConfig *global,
 #endif /* USE_METALINK */
 
         /* for uploads */
-        input.fd = infd;
-        input.config = config;
+        input->config = config;
         /* Note that if CURLOPT_READFUNCTION is fread (the default), then
          * lib/telnet.c will Curl_poll() on the input file descriptor
          * rather then calling the READFUNCTION at regular intervals.
@@ -847,13 +1141,13 @@ static CURLcode operate_do(struct GlobalConfig *global,
          * behaviour, by omitting to set the READFUNCTION & READDATA options,
          * have not been determined.
          */
-        my_setopt(curl, CURLOPT_READDATA, &input);
+        my_setopt(curl, CURLOPT_READDATA, input);
         /* what call to read */
         my_setopt(curl, CURLOPT_READFUNCTION, tool_read_cb);
 
         /* in 7.18.0, the CURLOPT_SEEKFUNCTION/DATA pair is taking over what
            CURLOPT_IOCTLFUNCTION/DATA pair previously provided for seeking */
-        my_setopt(curl, CURLOPT_SEEKDATA, &input);
+        my_setopt(curl, CURLOPT_SEEKDATA, input);
         my_setopt(curl, CURLOPT_SEEKFUNCTION, tool_seek_cb);
 
         if(config->recvpersecond &&
@@ -863,10 +1157,7 @@ static CURLcode operate_do(struct GlobalConfig *global,
         else
           my_setopt(curl, CURLOPT_BUFFERSIZE, (long)BUFFER_SIZE);
 
-        /* size of uploaded file: */
-        if(uploadfilesize != -1)
-          my_setopt(curl, CURLOPT_INFILESIZE_LARGE, uploadfilesize);
-        my_setopt_str(curl, CURLOPT_URL, this_url);     /* what to fetch */
+        my_setopt_str(curl, CURLOPT_URL, per->this_url);
         my_setopt(curl, CURLOPT_NOPROGRESS, global->noprogress?1L:0L);
         if(config->no_body)
           my_setopt(curl, CURLOPT_NOBODY, 1L);
@@ -915,7 +1206,7 @@ static CURLcode operate_do(struct GlobalConfig *global,
 
         my_setopt(curl, CURLOPT_FAILONERROR, config->failonerror?1L:0L);
         my_setopt(curl, CURLOPT_REQUEST_TARGET, config->request_target);
-        my_setopt(curl, CURLOPT_UPLOAD, uploadfile?1L:0L);
+        my_setopt(curl, CURLOPT_UPLOAD, per->uploadfile?1L:0L);
         my_setopt(curl, CURLOPT_DIRLISTONLY, config->dirlistonly?1L:0L);
         my_setopt(curl, CURLOPT_APPEND, config->ftp_append?1L:0L);
 
@@ -934,7 +1225,7 @@ static CURLcode operate_do(struct GlobalConfig *global,
           my_setopt_str(curl, CURLOPT_LOGIN_OPTIONS, config->login_options);
         my_setopt_str(curl, CURLOPT_USERPWD, config->userpwd);
         my_setopt_str(curl, CURLOPT_RANGE, config->range);
-        my_setopt(curl, CURLOPT_ERRORBUFFER, errorbuffer);
+        my_setopt(curl, CURLOPT_ERRORBUFFER, per->errorbuffer);
         my_setopt(curl, CURLOPT_TIMEOUT_MS, (long)(config->timeout * 1000));
 
         switch(config->httpreq) {
@@ -1230,14 +1521,14 @@ static CURLcode operate_do(struct GlobalConfig *global,
         /* three new ones in libcurl 7.3: */
         my_setopt_str(curl, CURLOPT_INTERFACE, config->iface);
         my_setopt_str(curl, CURLOPT_KRBLEVEL, config->krblevel);
+        progressbarinit(&per->progressbar, config);
 
-        progressbarinit(&progressbar, config);
         if((global->progressmode == CURL_PROGRESS_BAR) &&
            !global->noprogress && !global->mute) {
           /* we want the alternative style, then we have to implement it
              ourselves! */
           my_setopt(curl, CURLOPT_XFERINFOFUNCTION, tool_progress_cb);
-          my_setopt(curl, CURLOPT_XFERINFODATA, &progressbar);
+          my_setopt(curl, CURLOPT_XFERINFODATA, &per->progressbar);
         }
 
         /* new in libcurl 7.24.0: */
@@ -1420,17 +1711,17 @@ static CURLcode operate_do(struct GlobalConfig *global,
 
         if(config->content_disposition
            && (urlnode->flags & GETOUT_USEREMOTE))
-          hdrcbdata.honor_cd_filename = TRUE;
+          hdrcbdata->honor_cd_filename = TRUE;
         else
-          hdrcbdata.honor_cd_filename = FALSE;
+          hdrcbdata->honor_cd_filename = FALSE;
 
-        hdrcbdata.outs = &outs;
-        hdrcbdata.heads = &heads;
-        hdrcbdata.global = global;
-        hdrcbdata.config = config;
+        hdrcbdata->outs = outs;
+        hdrcbdata->heads = heads;
+        hdrcbdata->global = global;
+        hdrcbdata->config = config;
 
         my_setopt(curl, CURLOPT_HEADERFUNCTION, tool_header_cb);
-        my_setopt(curl, CURLOPT_HEADERDATA, &hdrcbdata);
+        my_setopt(curl, CURLOPT_HEADERDATA, per);
 
         if(config->resolve)
           /* new in 7.21.3 */
@@ -1536,397 +1827,40 @@ static CURLcode operate_do(struct GlobalConfig *global,
           my_setopt_str(curl, CURLOPT_ALTSVC, config->altsvc);
 #endif
 
-        /* initialize retry vars for loop below */
-        retry_sleep_default = (config->retry_delay) ?
-          config->retry_delay*1000L : RETRY_SLEEP_DEFAULT; /* ms */
-
-        retry_numretries = config->req_retry;
-        retry_sleep = retry_sleep_default; /* ms */
-        retrystart = tvnow();
-
-#ifndef CURL_DISABLE_LIBCURL_OPTION
-        if(global->libcurl) {
-          result = easysrc_perform();
-          if(result)
-            goto show_error;
-        }
-#endif
-
         for(;;) {
 #ifdef USE_METALINK
           if(!metalink && config->use_metalink) {
-            /* If outs.metalink_parser is non-NULL, delete it first. */
-            if(outs.metalink_parser)
-              metalink_parser_context_delete(outs.metalink_parser);
-            outs.metalink_parser = metalink_parser_context_new();
-            if(outs.metalink_parser == NULL) {
+            outs->metalink_parser = metalink_parser_context_new();
+            if(outs->metalink_parser == NULL) {
               result = CURLE_OUT_OF_MEMORY;
               goto show_error;
             }
             fprintf(config->global->errors,
-                    "Metalink: parsing (%s) metalink/XML...\n", this_url);
+                    "Metalink: parsing (%s) metalink/XML...\n", per->this_url);
           }
           else if(metalink)
             fprintf(config->global->errors,
                     "Metalink: fetching (%s) from (%s)...\n",
-                    mlfile->filename, this_url);
+                    mlfile->filename, per->this_url);
 #endif /* USE_METALINK */
 
-#ifdef CURLDEBUG
-          if(config->test_event_based)
-            result = curl_easy_perform_ev(curl);
-          else
-#endif
-          result = curl_easy_perform(curl);
-
-          if(!result && !outs.stream && !outs.bytes) {
-            /* we have received no data despite the transfer was successful
-               ==> force cration of an empty output file (if an output file
-               was specified) */
-            long cond_unmet = 0L;
-            /* do not create (or even overwrite) the file in case we get no
-               data because of unmet condition */
-            curl_easy_getinfo(curl, CURLINFO_CONDITION_UNMET, &cond_unmet);
-            if(!cond_unmet && !tool_create_output_file(&outs))
-              result = CURLE_WRITE_ERROR;
-          }
+          per->metalink = metalink;
+          /* initialize retry vars for loop below */
+          per->retry_sleep_default = (config->retry_delay) ?
+            config->retry_delay*1000L : RETRY_SLEEP_DEFAULT; /* ms */
+          per->retry_numretries = config->req_retry;
+          per->retry_sleep = per->retry_sleep_default; /* ms */
+          per->retrystart = tvnow();
 
-          if(outs.is_cd_filename && outs.stream && !global->mute &&
-             outs.filename)
-            printf("curl: Saved to filename '%s'\n", outs.filename);
-
-          /* if retry-max-time is non-zero, make sure we haven't exceeded the
-             time */
-          if(retry_numretries &&
-             (!config->retry_maxtime ||
-              (tvdiff(tvnow(), retrystart) <
-               config->retry_maxtime*1000L)) ) {
-            enum {
-              RETRY_NO,
-              RETRY_TIMEOUT,
-              RETRY_CONNREFUSED,
-              RETRY_HTTP,
-              RETRY_FTP,
-              RETRY_LAST /* not used */
-            } retry = RETRY_NO;
-            long response;
-            if((CURLE_OPERATION_TIMEDOUT == result) ||
-               (CURLE_COULDNT_RESOLVE_HOST == result) ||
-               (CURLE_COULDNT_RESOLVE_PROXY == result) ||
-               (CURLE_FTP_ACCEPT_TIMEOUT == result))
-              /* retry timeout always */
-              retry = RETRY_TIMEOUT;
-            else if(config->retry_connrefused &&
-                    (CURLE_COULDNT_CONNECT == result)) {
-              long oserrno;
-              curl_easy_getinfo(curl, CURLINFO_OS_ERRNO, &oserrno);
-              if(ECONNREFUSED == oserrno)
-                retry = RETRY_CONNREFUSED;
-            }
-            else if((CURLE_OK == result) ||
-                    (config->failonerror &&
-                     (CURLE_HTTP_RETURNED_ERROR == result))) {
-              /* If it returned OK. _or_ failonerror was enabled and it
-                 returned due to such an error, check for HTTP transient
-                 errors to retry on. */
-              char *effective_url = NULL;
-              curl_easy_getinfo(curl, CURLINFO_EFFECTIVE_URL, &effective_url);
-              if(effective_url &&
-                 checkprefix("http", effective_url)) {
-                /* This was HTTP(S) */
-                curl_easy_getinfo(curl, CURLINFO_RESPONSE_CODE, &response);
-
-                switch(response) {
-                case 408: /* Request Timeout */
-                case 500: /* Internal Server Error */
-                case 502: /* Bad Gateway */
-                case 503: /* Service Unavailable */
-                case 504: /* Gateway Timeout */
-                  retry = RETRY_HTTP;
-                  /*
-                   * At this point, we have already written data to the output
-                   * file (or terminal). If we write to a file, we must rewind
-                   * or close/re-open the file so that the next attempt starts
-                   * over from the beginning.
-                   */
-                  break;
-                }
-              }
-            } /* if CURLE_OK */
-            else if(result) {
-              long protocol;
-
-              curl_easy_getinfo(curl, CURLINFO_RESPONSE_CODE, &response);
-              curl_easy_getinfo(curl, CURLINFO_PROTOCOL, &protocol);
-
-              if((protocol == CURLPROTO_FTP || protocol == CURLPROTO_FTPS) &&
-                 response / 100 == 4)
-                /*
-                 * This is typically when the FTP server only allows a certain
-                 * amount of users and we are not one of them.  All 4xx codes
-                 * are transient.
-                 */
-                retry = RETRY_FTP;
-            }
-
-            if(retry) {
-              static const char * const m[]={
-                NULL,
-                "timeout",
-                "connection refused",
-                "HTTP error",
-                "FTP error"
-              };
-
-              warnf(config->global, "Transient problem: %s "
-                    "Will retry in %ld seconds. "
-                    "%ld retries left.\n",
-                    m[retry], retry_sleep/1000L, retry_numretries);
-
-              tool_go_sleep(retry_sleep);
-              retry_numretries--;
-              if(!config->retry_delay) {
-                retry_sleep *= 2;
-                if(retry_sleep > RETRY_SLEEP_MAX)
-                  retry_sleep = RETRY_SLEEP_MAX;
-              }
-              if(outs.bytes && outs.filename && outs.stream) {
-                int rc;
-                /* We have written data to a output file, we truncate file
-                 */
-                if(!global->mute)
-                  fprintf(global->errors, "Throwing away %"
-                          CURL_FORMAT_CURL_OFF_T " bytes\n",
-                          outs.bytes);
-                fflush(outs.stream);
-                /* truncate file at the position where we started appending */
-#ifdef HAVE_FTRUNCATE
-                if(ftruncate(fileno(outs.stream), outs.init)) {
-                  /* when truncate fails, we can't just append as then we'll
-                     create something strange, bail out */
-                  if(!global->mute)
-                    fprintf(global->errors,
-                            "failed to truncate, exiting\n");
-                  result = CURLE_WRITE_ERROR;
-                  goto quit_urls;
-                }
-                /* now seek to the end of the file, the position where we
-                   just truncated the file in a large file-safe way */
-                rc = fseek(outs.stream, 0, SEEK_END);
-#else
-                /* ftruncate is not available, so just reposition the file
-                   to the location we would have truncated it. This won't
-                   work properly with large files on 32-bit systems, but
-                   most of those will have ftruncate. */
-                rc = fseek(outs.stream, (long)outs.init, SEEK_SET);
-#endif
-                if(rc) {
-                  if(!global->mute)
-                    fprintf(global->errors,
-                            "failed seeking to end of file, exiting\n");
-                  result = CURLE_WRITE_ERROR;
-                  goto quit_urls;
-                }
-                outs.bytes = 0; /* clear for next round */
-              }
-              continue; /* curl_easy_perform loop */
-            }
-          } /* if retry_numretries */
-          else if(metalink) {
-            /* Metalink: Decide to try the next resource or
-               not. Basically, we want to try the next resource if
-               download was not successful. */
-            long response;
-            if(CURLE_OK == result) {
-              char *effective_url = NULL;
-              curl_easy_getinfo(curl, CURLINFO_EFFECTIVE_URL, &effective_url);
-              if(effective_url &&
-                 curl_strnequal(effective_url, "http", 4)) {
-                /* This was HTTP(S) */
-                curl_easy_getinfo(curl, CURLINFO_RESPONSE_CODE, &response);
-                if(response != 200 && response != 206) {
-                  metalink_next_res = 1;
-                  fprintf(global->errors,
-                          "Metalink: fetching (%s) from (%s) FAILED "
-                          "(HTTP status code %ld)\n",
-                          mlfile->filename, this_url, response);
-                }
-              }
-            }
-            else {
-              metalink_next_res = 1;
-              fprintf(global->errors,
-                      "Metalink: fetching (%s) from (%s) FAILED (%s)\n",
-                      mlfile->filename, this_url,
-                      (errorbuffer[0]) ?
-                      errorbuffer : curl_easy_strerror(result));
-            }
-          }
-          if(metalink && !metalink_next_res)
-            fprintf(global->errors, "Metalink: fetching (%s) from (%s) OK\n",
-                    mlfile->filename, this_url);
 
           /* In all ordinary cases, just break out of loop here */
           break; /* curl_easy_perform loop */
 
         }
-
-        if((global->progressmode == CURL_PROGRESS_BAR) &&
-           progressbar.calls)
-          /* if the custom progress bar has been displayed, we output a
-             newline here */
-          fputs("\n", progressbar.out);
-
-        if(config->writeout)
-          ourWriteOut(curl, &outs, config->writeout);
-
-        /*
-        ** Code within this loop may jump directly here to label 'show_error'
-        ** in order to display an error message for CURLcode stored in 'res'
-        ** variable and exit loop once that necessary writing and cleanup
-        ** in label 'quit_urls' has been done.
-        */
-
-        show_error:
-
-#ifdef __VMS
-        if(is_vms_shell()) {
-          /* VMS DCL shell behavior */
-          if(!global->showerror)
-            vms_show = VMSSTS_HIDE;
-        }
-        else
-#endif
-        if(config->synthetic_error) {
-          ;
-        }
-        else if(result && global->showerror) {
-          fprintf(global->errors, "curl: (%d) %s\n", result, (errorbuffer[0]) ?
-                  errorbuffer : curl_easy_strerror(result));
-          if(result == CURLE_PEER_FAILED_VERIFICATION)
-            fputs(CURL_CA_CERT_ERRORMSG, global->errors);
-        }
-
-        /* Fall through comment to 'quit_urls' label */
-
-        /*
-        ** Upon error condition and always that a message has already been
-        ** displayed, code within this loop may jump directly here to label
-        ** 'quit_urls' otherwise it should jump to 'show_error' label above.
-        **
-        ** When 'res' variable is _not_ CURLE_OK loop will exit once that
-        ** all code following 'quit_urls' has been executed. Otherwise it
-        ** will loop to the beginning from where it may exit if there are
-        ** no more urls left.
-        */
-
-        quit_urls:
-
-        /* Set file extended attributes */
-        if(!result && config->xattr && outs.fopened && outs.stream) {
-          int rc = fwrite_xattr(curl, fileno(outs.stream));
-          if(rc)
-            warnf(config->global, "Error setting extended attributes: %s\n",
-                  strerror(errno));
-        }
-
-        /* Close the file */
-        if(outs.fopened && outs.stream) {
-          int rc = fclose(outs.stream);
-          if(!result && rc) {
-            /* something went wrong in the writing process */
-            result = CURLE_WRITE_ERROR;
-            fprintf(global->errors, "(%d) Failed writing body\n", result);
-          }
-        }
-        else if(!outs.s_isreg && outs.stream) {
-          /* Dump standard stream buffered data */
-          int rc = fflush(outs.stream);
-          if(!result && rc) {
-            /* something went wrong in the writing process */
-            result = CURLE_WRITE_ERROR;
-            fprintf(global->errors, "(%d) Failed writing body\n", result);
-          }
-        }
-
-#ifdef __AMIGA__
-        if(!result && outs.s_isreg && outs.filename) {
-          /* Set the url (up to 80 chars) as comment for the file */
-          if(strlen(urlnode->url) > 78)
-            urlnode->url[79] = '\0';
-          SetComment(outs.filename, urlnode->url);
-        }
-#endif
-
-        /* File time can only be set _after_ the file has been closed */
-        if(!result && config->remote_time && outs.s_isreg && outs.filename) {
-          /* Ask libcurl if we got a remote file time */
-          curl_off_t filetime = -1;
-          curl_easy_getinfo(curl, CURLINFO_FILETIME_T, &filetime);
-          setfiletime(filetime, outs.filename, config->global->errors);
-        }
-
-#ifdef USE_METALINK
-        if(!metalink && config->use_metalink && result == CURLE_OK) {
-          int rv = parse_metalink(config, &outs, this_url);
-          if(rv == 0)
-            fprintf(config->global->errors, "Metalink: parsing (%s) OK\n",
-                    this_url);
-          else if(rv == -1)
-            fprintf(config->global->errors, "Metalink: parsing (%s) FAILED\n",
-                    this_url);
-        }
-        else if(metalink && result == CURLE_OK && !metalink_next_res) {
-          int rv = metalink_check_hash(global, mlfile, outs.filename);
-          if(rv == 0) {
-            metalink_next_res = 1;
-          }
-        }
-#endif /* USE_METALINK */
-
-        /* No more business with this output struct */
-        if(outs.alloc_filename)
-          Curl_safefree(outs.filename);
-#ifdef USE_METALINK
-        if(outs.metalink_parser)
-          metalink_parser_context_delete(outs.metalink_parser);
-#endif /* USE_METALINK */
-        memset(&outs, 0, sizeof(struct OutStruct));
-        hdrcbdata.outs = NULL;
-
-        /* Free loop-local allocated memory and close loop-local opened fd */
-
-        Curl_safefree(outfile);
-        Curl_safefree(this_url);
-
-        if(infdopen)
-          close(infd);
-
-        if(metalink) {
-          /* Should exit if error is fatal. */
-          if(is_fatal_error(result)) {
-            break;
-          }
-          if(!metalink_next_res)
-            break;
-          mlres = mlres->next;
-          if(mlres == NULL)
-            break;
-        }
-        else if(urlnum > 1) {
-          /* when url globbing, exit loop upon critical error */
-          if(is_fatal_error(result))
-            break;
-        }
-        else if(result)
-          /* when not url globbing, exit loop upon any error */
-          break;
-
       } /* loop to the next URL */
 
-      /* Free loop-local allocated memory */
-
-      Curl_safefree(uploadfile);
+      show_error:
+      quit_urls:
 
       if(urls) {
         /* Free list of remaining URLs */
@@ -1962,41 +1896,334 @@ static CURLcode operate_do(struct GlobalConfig *global,
     Curl_safefree(urlnode->infile);
     urlnode->flags = 0;
 
-    /*
-    ** Bail out upon critical errors or --fail-early
-    */
-    if(is_fatal_error(result) || (result && global->fail_early))
-      goto quit_curl;
-
   } /* for-loop through all URLs */
+  quit_curl:
+
+  /* Free function-local referenced allocated memory */
+  Curl_safefree(httpgetfields);
+
+  return result;
+}
+
+/* portable millisecond sleep */
+static void wait_ms(int ms)
+{
+#if defined(MSDOS)
+  delay(ms);
+#elif defined(WIN32)
+  Sleep(ms);
+#elif defined(HAVE_USLEEP)
+  usleep(1000 * ms);
+#else
+  struct timeval pending_tv;
+  pending_tv.tv_sec = ms / 1000;
+  pending_tv.tv_usec = (ms % 1000) * 1000;
+  (void)select(0, NULL, NULL, NULL, &pending_tv);
+#endif
+}
+
+static long all_added; /* number of easy handles currently added */
+
+static int add_parallel_transfers(struct GlobalConfig *global,
+                                  CURLM *multi)
+{
+  struct per_transfer *per;
+  CURLcode result;
+  CURLMcode mcode;
+  for(per = transfers; per && (all_added < global->parallel_max);
+      per = per->next) {
+    if(per->added)
+      /* already added */
+      continue;
+
+    result = pre_transfer(global, per);
+    if(result)
+      break;
+
+    (void)curl_easy_setopt(per->curl, CURLOPT_PRIVATE, per);
+    (void)curl_easy_setopt(per->curl, CURLOPT_XFERINFOFUNCTION, xferinfo_cb);
+    (void)curl_easy_setopt(per->curl, CURLOPT_XFERINFODATA, per);
+
+    mcode = curl_multi_add_handle(multi, per->curl);
+    if(mcode)
+      return CURLE_OUT_OF_MEMORY;
+    per->added = TRUE;
+    all_added++;
+  }
+  return CURLE_OK;
+}
+
+static CURLcode parallel_transfers(struct GlobalConfig *global,
+                                   CURLSH *share)
+{
+  CURLM *multi;
+  bool done = FALSE;
+  CURLMcode mcode = CURLM_OK;
+  CURLcode result = CURLE_OK;
+  int still_running = 1;
+  struct timeval start = tvnow();
+
+  multi = curl_multi_init();
+  if(!multi)
+    return CURLE_OUT_OF_MEMORY;
+
+  result = add_parallel_transfers(global, multi);
+  if(result)
+    return result;
+
+  while(!done && !mcode && still_running) {
+    int numfds;
+
+    mcode = curl_multi_wait(multi, NULL, 0, 1000, &numfds);
+
+    if(!mcode) {
+      if(!numfds) {
+        long sleep_ms;
+
+        /* If it returns without any filedescriptor instantly, we need to
+           avoid busy-looping during periods where it has nothing particular
+           to wait for */
+        curl_multi_timeout(multi, &sleep_ms);
+        if(sleep_ms) {
+          if(sleep_ms > 1000)
+            sleep_ms = 1000;
+          wait_ms((int)sleep_ms);
+        }
+      }
+
+      mcode = curl_multi_perform(multi, &still_running);
+    }
+
+    progress_meter(global, &start, FALSE);
+
+    if(!mcode) {
+      int rc;
+      CURLMsg *msg;
+      bool removed = FALSE;
+      do {
+        msg = curl_multi_info_read(multi, &rc);
+        if(msg) {
+          bool retry;
+          struct per_transfer *ended;
+          CURL *easy = msg->easy_handle;
+          result = msg->data.result;
+          curl_easy_getinfo(easy, CURLINFO_PRIVATE, (void *)&ended);
+          curl_multi_remove_handle(multi, easy);
+
+          result = post_transfer(global, share, ended, result, &retry);
+          if(retry)
+            continue;
+          progress_finalize(ended); /* before it goes away */
+          all_added--; /* one fewer added */
+          removed = TRUE;
+          (void)del_transfer(ended);
+        }
+      } while(msg);
+      if(removed)
+        /* one or more transfers completed, add more! */
+        (void)add_parallel_transfers(global, multi);
+    }
+  }
+
+  (void)progress_meter(global, &start, TRUE);
+
+  /* Make sure to return some kind of error if there was a multi problem */
+  if(mcode) {
+    result = (mcode == CURLM_OUT_OF_MEMORY) ? CURLE_OUT_OF_MEMORY :
+      /* The other multi errors should never happen, so return
+         something suitably generic */
+      CURLE_BAD_FUNCTION_ARGUMENT;
+  }
+
+  curl_multi_cleanup(multi);
+
+  return result;
+}
+
+static CURLcode serial_transfers(struct GlobalConfig *global,
+                                 CURLSH *share)
+{
+  CURLcode returncode = CURLE_OK;
+  CURLcode result = CURLE_OK;
+  struct per_transfer *per;
+  for(per = transfers; per;) {
+    bool retry;
+    result = pre_transfer(global, per);
+    if(result)
+      break;
+
+#ifndef CURL_DISABLE_LIBCURL_OPTION
+    if(global->libcurl) {
+      result = easysrc_perform();
+      if(result)
+        break;
+    }
+#endif
+#ifdef CURLDEBUG
+    if(global->test_event_based)
+      result = curl_easy_perform_ev(per->curl);
+    else
+#endif
+      result = curl_easy_perform(per->curl);
+
+    /* store the result of the actual transfer */
+    returncode = result;
+
+    result = post_transfer(global, share, per, result, &retry);
+    if(retry)
+      continue;
+    per = del_transfer(per);
+
+    /* Bail out upon critical errors or --fail-early */
+    if(result || is_fatal_error(returncode) ||
+       (returncode && global->fail_early))
+      break;
+  }
+  if(returncode)
+    /* returncode errors have priority */
+    result = returncode;
+  return result;
+}
+
+static CURLcode operate_do(struct GlobalConfig *global,
+                           struct OperationConfig *config,
+                           CURLSH *share)
+{
+  CURLcode result = CURLE_OK;
+  bool capath_from_env;
 
   /*
-  ** Nested loops end here.
+  ** Beyond this point no return'ing from this function allowed.
+  ** Jump to label 'quit_curl' in order to abandon this function
+  ** from outside of nested loops further down below.
   */
 
-  quit_curl:
+  /* Check we have a url */
+  if(!config->url_list || !config->url_list->url) {
+    helpf(global->errors, "no URL specified!\n");
+    return CURLE_FAILED_INIT;
+  }
 
-  /* Reset the global config variables */
-  global->noprogress = orig_noprogress;
-  global->isatty = orig_isatty;
+  /* On WIN32 we can't set the path to curl-ca-bundle.crt
+   * at compile time. So we look here for the file in two ways:
+   * 1: look at the environment variable CURL_CA_BUNDLE for a path
+   * 2: if #1 isn't found, use the windows API function SearchPath()
+   *    to find it along the app's path (includes app's dir and CWD)
+   *
+   * We support the environment variable thing for non-Windows platforms
+   * too. Just for the sake of it.
+   */
+  capath_from_env = false;
+  if(!config->cacert &&
+     !config->capath &&
+     !config->insecure_ok) {
+    CURL *curltls = curl_easy_init();
+    struct curl_tlssessioninfo *tls_backend_info = NULL;
 
-  /* Free function-local referenced allocated memory */
-  Curl_safefree(httpgetfields);
+    /* With the addition of CAINFO support for Schannel, this search could find
+     * a certificate bundle that was previously ignored. To maintain backward
+     * compatibility, only perform this search if not using Schannel.
+     */
+    result = curl_easy_getinfo(curltls, CURLINFO_TLS_SSL_PTR,
+                               &tls_backend_info);
+    if(result)
+      return result;
 
-  /* Free list of given URLs */
-  clean_getout(config);
+    /* Set the CA cert locations specified in the environment. For Windows if
+     * no environment-specified filename is found then check for CA bundle
+     * default filename curl-ca-bundle.crt in the user's PATH.
+     *
+     * If Schannel is the selected SSL backend then these locations are
+     * ignored. We allow setting CA location for schannel only when explicitly
+     * specified by the user via CURLOPT_CAINFO / --cacert.
+     */
+    if(tls_backend_info->backend != CURLSSLBACKEND_SCHANNEL) {
+      char *env;
+      env = curlx_getenv("CURL_CA_BUNDLE");
+      if(env) {
+        config->cacert = strdup(env);
+        if(!config->cacert) {
+          curl_free(env);
+          helpf(global->errors, "out of memory\n");
+          return CURLE_OUT_OF_MEMORY;
+        }
+      }
+      else {
+        env = curlx_getenv("SSL_CERT_DIR");
+        if(env) {
+          config->capath = strdup(env);
+          if(!config->capath) {
+            curl_free(env);
+            helpf(global->errors, "out of memory\n");
+            return CURLE_OUT_OF_MEMORY;
+          }
+          capath_from_env = true;
+        }
+        else {
+          env = curlx_getenv("SSL_CERT_FILE");
+          if(env) {
+            config->cacert = strdup(env);
+            if(!config->cacert) {
+              curl_free(env);
+              helpf(global->errors, "out of memory\n");
+              return CURLE_OUT_OF_MEMORY;
+            }
+          }
+        }
+      }
+
+      if(env)
+        curl_free(env);
+#ifdef WIN32
+      else {
+        result = FindWin32CACert(config, tls_backend_info->backend,
+                                 "curl-ca-bundle.crt");
+      }
+#endif
+    }
+    curl_easy_cleanup(curltls);
+  }
 
-  hdrcbdata.heads = NULL;
+  if(!result)
+    /* loop through the list of given URLs */
+    result = create_transfers(global, config, share, capath_from_env);
 
-  /* Close function-local opened file descriptors */
-  if(heads.fopened && heads.stream)
-    fclose(heads.stream);
+  return result;
+}
+
+static CURLcode operate_transfers(struct GlobalConfig *global,
+                                  CURLSH *share,
+                                  CURLcode result)
+{
+  /* Save the values of noprogress and isatty to restore them later on */
+  bool orig_noprogress = global->noprogress;
+  bool orig_isatty = global->isatty;
+  struct per_transfer *per;
+
+  /* Time to actually do the transfers */
+  if(!result) {
+    if(global->parallel)
+      result = parallel_transfers(global, share);
+    else
+      result = serial_transfers(global, share);
+  }
 
-  if(heads.alloc_filename)
-    Curl_safefree(heads.filename);
+  /* cleanup if there are any left */
+  for(per = transfers; per;) {
+    bool retry;
+    (void)post_transfer(global, share, per, result, &retry);
+    /* Free list of given URLs */
+    clean_getout(per->config);
+
+    /* Release metalink related resources here */
+    clean_metalink(per->config);
+    per = del_transfer(per);
+  }
+
+  /* Reset the global config variables */
+  global->noprogress = orig_noprogress;
+  global->isatty = orig_isatty;
 
-  /* Release metalink related resources here */
-  clean_metalink(config);
 
   return result;
 }
@@ -2040,7 +2267,7 @@ CURLcode operate(struct GlobalConfig *config, int argc, 
argv_item_t argv[])
         tool_version_info();
       /* Check if we were asked to list the SSL engines */
       else if(res == PARAM_ENGINES_REQUESTED)
-        tool_list_engines(config->easy);
+        tool_list_engines();
       else if(res == PARAM_LIBCURL_UNSUPPORTED_PROTOCOL)
         result = CURLE_UNSUPPORTED_PROTOCOL;
       else
@@ -2058,27 +2285,39 @@ CURLcode operate(struct GlobalConfig *config, int argc, 
argv_item_t argv[])
       if(!result) {
         size_t count = 0;
         struct OperationConfig *operation = config->first;
+        CURLSH *share = curl_share_init();
+        if(!share) {
+          /* Cleanup the libcurl source output */
+          easysrc_cleanup();
+          return CURLE_OUT_OF_MEMORY;
+        }
+
+        curl_share_setopt(share, CURLSHOPT_SHARE, CURL_LOCK_DATA_COOKIE);
+        curl_share_setopt(share, CURLSHOPT_SHARE, CURL_LOCK_DATA_DNS);
+        curl_share_setopt(share, CURLSHOPT_SHARE, CURL_LOCK_DATA_SSL_SESSION);
+        curl_share_setopt(share, CURLSHOPT_SHARE, CURL_LOCK_DATA_CONNECT);
+        curl_share_setopt(share, CURLSHOPT_SHARE, CURL_LOCK_DATA_PSL);
 
         /* Get the required arguments for each operation */
-        while(!result && operation) {
+        do {
           result = get_args(operation, count++);
 
           operation = operation->next;
-        }
+        } while(!result && operation);
 
         /* Set the current operation pointer */
         config->current = config->first;
 
-        /* Perform each operation */
+        /* Setup all transfers */
         while(!result && config->current) {
-          result = operate_do(config, config->current);
-
+          result = operate_do(config, config->current, share);
           config->current = config->current->next;
-
-          if(config->current && config->current->easy)
-            curl_easy_reset(config->current->easy);
         }
 
+        /* now run! */
+        result = operate_transfers(config, share, result);
+
+        curl_share_cleanup(share);
 #ifndef CURL_DISABLE_LIBCURL_OPTION
         if(config->libcurl) {
           /* Cleanup the libcurl source output */
diff --git a/src/tool_operate.h b/src/tool_operate.h
index b84388bc5..60257fc60 100644
--- a/src/tool_operate.h
+++ b/src/tool_operate.h
@@ -7,7 +7,7 @@
  *                            | (__| |_| |  _ <| |___
  *                             \___|\___/|_| \_\_____|
  *
- * Copyright (C) 1998 - 2014, Daniel Stenberg, <address@hidden>, et al.
+ * Copyright (C) 1998 - 2019, Daniel Stenberg, <address@hidden>, et al.
  *
  * This software is licensed as described in the file COPYING, which
  * you should have received as part of this distribution. The terms
@@ -22,7 +22,53 @@
  *
  ***************************************************************************/
 #include "tool_setup.h"
+#include "tool_cb_hdr.h"
+#include "tool_cb_prg.h"
+#include "tool_sdecls.h"
+
+struct per_transfer {
+  /* double linked */
+  struct per_transfer *next;
+  struct per_transfer *prev;
+  struct OperationConfig *config; /* for this transfer */
+  CURL *curl;
+  long retry_numretries;
+  long retry_sleep_default;
+  long retry_sleep;
+  struct timeval retrystart;
+  bool metalink; /* nonzero for metalink download. */
+  bool metalink_next_res;
+  metalinkfile *mlfile;
+  metalink_resource *mlres;
+  char *this_url;
+  char *outfile;
+  bool infdopen; /* TRUE if infd needs closing */
+  int infd;
+  struct ProgressData progressbar;
+  struct OutStruct outs;
+  struct OutStruct heads;
+  struct InStruct input;
+  struct HdrCbData hdrcbdata;
+  char errorbuffer[CURL_ERROR_SIZE];
+
+  bool added; /* set TRUE when added to the multi handle */
+
+  /* for parallel progress bar */
+  curl_off_t dltotal;
+  curl_off_t dlnow;
+  curl_off_t ultotal;
+  curl_off_t ulnow;
+  bool dltotal_added; /* if the total has been added from this */
+  bool ultotal_added;
+
+  /* NULL or malloced */
+  char *separator_err;
+  char *separator;
+  char *uploadfile;
+};
 
 CURLcode operate(struct GlobalConfig *config, int argc, argv_item_t argv[]);
 
+extern struct per_transfer *transfers; /* first node */
+
 #endif /* HEADER_CURL_TOOL_OPERATE_H */
diff --git a/src/tool_operhlp.c b/src/tool_operhlp.c
index c3a826278..f3fcc699f 100644
--- a/src/tool_operhlp.c
+++ b/src/tool_operhlp.c
@@ -5,7 +5,7 @@
  *                            | (__| |_| |  _ <| |___
  *                             \___|\___/|_| \_\_____|
  *
- * Copyright (C) 1998 - 2018, Daniel Stenberg, <address@hidden>, et al.
+ * Copyright (C) 1998 - 2019, Daniel Stenberg, <address@hidden>, et al.
  *
  * This software is licensed as described in the file COPYING, which
  * you should have received as part of this distribution. The terms
@@ -71,10 +71,13 @@ bool stdin_upload(const char *uploadfile)
  * Adds the file name to the URL if it doesn't already have one.
  * url will be freed before return if the returned pointer is different
  */
-char *add_file_name_to_url(CURL *curl, char *url, const char *filename)
+char *add_file_name_to_url(char *url, const char *filename)
 {
   /* If no file name part is given in the URL, we add this file name */
   char *ptr = strstr(url, "://");
+  CURL *curl = curl_easy_init(); /* for url escaping */
+  if(!curl)
+    return NULL; /* error! */
   if(ptr)
     ptr += 3;
   else
@@ -120,6 +123,7 @@ char *add_file_name_to_url(CURL *curl, char *url, const 
char *filename)
     else
       Curl_safefree(url);
   }
+  curl_easy_cleanup(curl);
   return url;
 }
 
diff --git a/src/tool_operhlp.h b/src/tool_operhlp.h
index 90c854929..1e2f02741 100644
--- a/src/tool_operhlp.h
+++ b/src/tool_operhlp.h
@@ -7,7 +7,7 @@
  *                            | (__| |_| |  _ <| |___
  *                             \___|\___/|_| \_\_____|
  *
- * Copyright (C) 1998 - 2014, Daniel Stenberg, <address@hidden>, et al.
+ * Copyright (C) 1998 - 2019, Daniel Stenberg, <address@hidden>, et al.
  *
  * This software is licensed as described in the file COPYING, which
  * you should have received as part of this distribution. The terms
@@ -31,7 +31,7 @@ bool output_expected(const char *url, const char *uploadfile);
 
 bool stdin_upload(const char *uploadfile);
 
-char *add_file_name_to_url(CURL *curl, char *url, const char *filename);
+char *add_file_name_to_url(char *url, const char *filename);
 
 CURLcode get_url_file_name(char **filename, const char *url);
 
diff --git a/src/tool_parsecfg.c b/src/tool_parsecfg.c
index 36c7bccf0..8647cafed 100644
--- a/src/tool_parsecfg.c
+++ b/src/tool_parsecfg.c
@@ -230,9 +230,6 @@ int parseconfig(const char *filename, struct GlobalConfig 
*global)
             /* Initialise the newly created config */
             config_init(operation->next);
 
-            /* Copy the easy handle */
-            operation->next->easy = global->easy;
-
             /* Set the global config pointer */
             operation->next->global = global;
 
diff --git a/src/tool_progress.c b/src/tool_progress.c
new file mode 100644
index 000000000..a2667f38e
--- /dev/null
+++ b/src/tool_progress.c
@@ -0,0 +1,314 @@
+/***************************************************************************
+ *                                  _   _ ____  _
+ *  Project                     ___| | | |  _ \| |
+ *                             / __| | | | |_) | |
+ *                            | (__| |_| |  _ <| |___
+ *                             \___|\___/|_| \_\_____|
+ *
+ * Copyright (C) 1998 - 2019, Daniel Stenberg, <address@hidden>, et al.
+ *
+ * This software is licensed as described in the file COPYING, which
+ * you should have received as part of this distribution. The terms
+ * are also available at https://curl.haxx.se/docs/copyright.html.
+ *
+ * You may opt to use, copy, modify, merge, publish, distribute and/or sell
+ * copies of the Software, and permit persons to whom the Software is
+ * furnished to do so, under the terms of the COPYING file.
+ *
+ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
+ * KIND, either express or implied.
+ *
+ ***************************************************************************/
+#include "tool_setup.h"
+#include "tool_operate.h"
+#include "tool_progress.h"
+#include "tool_util.h"
+
+#define ENABLE_CURLX_PRINTF
+/* use our own printf() functions */
+#include "curlx.h"
+
+/* The point of this function would be to return a string of the input data,
+   but never longer than 5 columns (+ one zero byte).
+   Add suffix k, M, G when suitable... */
+static char *max5data(curl_off_t bytes, char *max5)
+{
+#define ONE_KILOBYTE  CURL_OFF_T_C(1024)
+#define ONE_MEGABYTE (CURL_OFF_T_C(1024) * ONE_KILOBYTE)
+#define ONE_GIGABYTE (CURL_OFF_T_C(1024) * ONE_MEGABYTE)
+#define ONE_TERABYTE (CURL_OFF_T_C(1024) * ONE_GIGABYTE)
+#define ONE_PETABYTE (CURL_OFF_T_C(1024) * ONE_TERABYTE)
+
+  if(bytes < CURL_OFF_T_C(100000))
+    msnprintf(max5, 6, "%5" CURL_FORMAT_CURL_OFF_T, bytes);
+
+  else if(bytes < CURL_OFF_T_C(10000) * ONE_KILOBYTE)
+    msnprintf(max5, 6, "%4" CURL_FORMAT_CURL_OFF_T "k", bytes/ONE_KILOBYTE);
+
+  else if(bytes < CURL_OFF_T_C(100) * ONE_MEGABYTE)
+    /* 'XX.XM' is good as long as we're less than 100 megs */
+    msnprintf(max5, 6, "%2" CURL_FORMAT_CURL_OFF_T ".%0"
+              CURL_FORMAT_CURL_OFF_T "M", bytes/ONE_MEGABYTE,
+              (bytes%ONE_MEGABYTE) / (ONE_MEGABYTE/CURL_OFF_T_C(10)) );
+
+#if (CURL_SIZEOF_CURL_OFF_T > 4)
+
+  else if(bytes < CURL_OFF_T_C(10000) * ONE_MEGABYTE)
+    /* 'XXXXM' is good until we're at 10000MB or above */
+    msnprintf(max5, 6, "%4" CURL_FORMAT_CURL_OFF_T "M", bytes/ONE_MEGABYTE);
+
+  else if(bytes < CURL_OFF_T_C(100) * ONE_GIGABYTE)
+    /* 10000 MB - 100 GB, we show it as XX.XG */
+    msnprintf(max5, 6, "%2" CURL_FORMAT_CURL_OFF_T ".%0"
+              CURL_FORMAT_CURL_OFF_T "G", bytes/ONE_GIGABYTE,
+              (bytes%ONE_GIGABYTE) / (ONE_GIGABYTE/CURL_OFF_T_C(10)) );
+
+  else if(bytes < CURL_OFF_T_C(10000) * ONE_GIGABYTE)
+    /* up to 10000GB, display without decimal: XXXXG */
+    msnprintf(max5, 6, "%4" CURL_FORMAT_CURL_OFF_T "G", bytes/ONE_GIGABYTE);
+
+  else if(bytes < CURL_OFF_T_C(10000) * ONE_TERABYTE)
+    /* up to 10000TB, display without decimal: XXXXT */
+    msnprintf(max5, 6, "%4" CURL_FORMAT_CURL_OFF_T "T", bytes/ONE_TERABYTE);
+
+  else
+    /* up to 10000PB, display without decimal: XXXXP */
+    msnprintf(max5, 6, "%4" CURL_FORMAT_CURL_OFF_T "P", bytes/ONE_PETABYTE);
+
+    /* 16384 petabytes (16 exabytes) is the maximum a 64 bit unsigned number
+       can hold, but our data type is signed so 8192PB will be the maximum. */
+
+#else
+
+  else
+    msnprintf(max5, 6, "%4" CURL_FORMAT_CURL_OFF_T "M", bytes/ONE_MEGABYTE);
+
+#endif
+
+  return max5;
+}
+
+int xferinfo_cb(void *clientp,
+                curl_off_t dltotal,
+                curl_off_t dlnow,
+                curl_off_t ultotal,
+                curl_off_t ulnow)
+{
+  struct per_transfer *per = clientp;
+  per->dltotal = dltotal;
+  per->dlnow = dlnow;
+  per->ultotal = ultotal;
+  per->ulnow = ulnow;
+  return 0;
+}
+
+/* Provide a string that is 2 + 1 + 2 + 1 + 2 = 8 letters long (plus the zero
+   byte) */
+static void time2str(char *r, curl_off_t seconds)
+{
+  curl_off_t h;
+  if(seconds <= 0) {
+    strcpy(r, "--:--:--");
+    return;
+  }
+  h = seconds / CURL_OFF_T_C(3600);
+  if(h <= CURL_OFF_T_C(99)) {
+    curl_off_t m = (seconds - (h*CURL_OFF_T_C(3600))) / CURL_OFF_T_C(60);
+    curl_off_t s = (seconds - (h*CURL_OFF_T_C(3600))) - (m*CURL_OFF_T_C(60));
+    msnprintf(r, 9, "%2" CURL_FORMAT_CURL_OFF_T ":%02" CURL_FORMAT_CURL_OFF_T
+              ":%02" CURL_FORMAT_CURL_OFF_T, h, m, s);
+  }
+  else {
+    /* this equals to more than 99 hours, switch to a more suitable output
+       format to fit within the limits. */
+    curl_off_t d = seconds / CURL_OFF_T_C(86400);
+    h = (seconds - (d*CURL_OFF_T_C(86400))) / CURL_OFF_T_C(3600);
+    if(d <= CURL_OFF_T_C(999))
+      msnprintf(r, 9, "%3" CURL_FORMAT_CURL_OFF_T
+                "d %02" CURL_FORMAT_CURL_OFF_T "h", d, h);
+    else
+      msnprintf(r, 9, "%7" CURL_FORMAT_CURL_OFF_T "d", d);
+  }
+}
+
+static curl_off_t all_dltotal = 0;
+static curl_off_t all_ultotal = 0;
+static curl_off_t all_dlalready = 0;
+static curl_off_t all_ulalready = 0;
+
+curl_off_t all_xfers = 0;   /* current total */
+
+struct speedcount {
+  curl_off_t dl;
+  curl_off_t ul;
+  struct timeval stamp;
+};
+#define SPEEDCNT 10
+static unsigned int speedindex;
+static bool indexwrapped;
+static struct speedcount speedstore[SPEEDCNT];
+
+/*
+  |DL% UL%  Dled  Uled  Xfers  Live   Qd Total     Current  Left    Speed
+  |  6 --   9.9G     0     2     2     0  0:00:40  0:00:02  0:00:37 4087M
+*/
+bool progress_meter(struct GlobalConfig *global,
+                    struct timeval *start,
+                    bool final)
+{
+  static struct timeval stamp;
+  static bool header = FALSE;
+  struct timeval now;
+  long diff;
+
+  if(global->noprogress)
+    return FALSE;
+
+  now = tvnow();
+  diff = tvdiff(now, stamp);
+
+  if(!header) {
+    header = TRUE;
+    fputs("DL% UL%  Dled  Uled  Xfers  Live   Qd "
+          "Total     Current  Left    Speed\n",
+          global->errors);
+  }
+  if(final || (diff > 500)) {
+    char time_left[10];
+    char time_total[10];
+    char time_spent[10];
+    char buffer[3][6];
+    curl_off_t spent = tvdiff(now, *start)/1000;
+    char dlpercen[4]="--";
+    char ulpercen[4]="--";
+    struct per_transfer *per;
+    curl_off_t all_dlnow = 0;
+    curl_off_t all_ulnow = 0;
+    bool dlknown = TRUE;
+    bool ulknown = TRUE;
+    curl_off_t all_running = 0; /* in progress */
+    curl_off_t all_queued = 0;  /* pending */
+    curl_off_t speed = 0;
+    unsigned int i;
+    stamp = now;
+
+    /* first add the amounts of the already completed transfers */
+    all_dlnow += all_dlalready;
+    all_ulnow += all_ulalready;
+
+    for(per = transfers; per; per = per->next) {
+      all_dlnow += per->dlnow;
+      all_ulnow += per->ulnow;
+      if(!per->dltotal)
+        dlknown = FALSE;
+      else if(!per->dltotal_added) {
+        /* only add this amount once */
+        all_dltotal += per->dltotal;
+        per->dltotal_added = TRUE;
+      }
+      if(!per->ultotal)
+        ulknown = FALSE;
+      else if(!per->ultotal_added) {
+        /* only add this amount once */
+        all_ultotal += per->ultotal;
+        per->ultotal_added = TRUE;
+      }
+      if(!per->added)
+        all_queued++;
+      else
+        all_running++;
+    }
+    if(dlknown && all_dltotal)
+      /* TODO: handle integer overflow */
+      msnprintf(dlpercen, sizeof(dlpercen), "%3d",
+                all_dlnow * 100 / all_dltotal);
+    if(ulknown && all_ultotal)
+      /* TODO: handle integer overflow */
+      msnprintf(ulpercen, sizeof(ulpercen), "%3d",
+                all_ulnow * 100 / all_ultotal);
+
+    /* get the transfer speed, the higher of the two */
+
+    i = speedindex;
+    speedstore[i].dl = all_dlnow;
+    speedstore[i].ul = all_ulnow;
+    speedstore[i].stamp = now;
+    if(++speedindex >= SPEEDCNT) {
+      indexwrapped = TRUE;
+      speedindex = 0;
+    }
+
+    {
+      long deltams;
+      curl_off_t dl;
+      curl_off_t ul;
+      curl_off_t dls;
+      curl_off_t uls;
+      if(indexwrapped) {
+        /* 'speedindex' is the oldest stored data */
+        deltams = tvdiff(now, speedstore[speedindex].stamp);
+        dl = all_dlnow - speedstore[speedindex].dl;
+        ul = all_ulnow - speedstore[speedindex].ul;
+      }
+      else {
+        /* since the beginning */
+        deltams = tvdiff(now, *start);
+        dl = all_dlnow;
+        ul = all_ulnow;
+      }
+      dls = (curl_off_t)((double)dl / ((double)deltams/1000.0));
+      uls = (curl_off_t)((double)ul / ((double)deltams/1000.0));
+      speed = dls > uls ? dls : uls;
+    }
+
+
+    if(dlknown && speed) {
+      curl_off_t est = all_dltotal / speed;
+      curl_off_t left = (all_dltotal - all_dlnow) / speed;
+      time2str(time_left, left);
+      time2str(time_total, est);
+    }
+    else {
+      time2str(time_left, 0);
+      time2str(time_total, 0);
+    }
+    time2str(time_spent, spent);
+
+    fprintf(global->errors,
+            "\r"
+            "%-3s " /* percent downloaded */
+            "%-3s " /* percent uploaded */
+            "%s " /* Dled */
+            "%s " /* Uled */
+            "%5" CURL_FORMAT_CURL_OFF_T " " /* Xfers */
+            "%5" CURL_FORMAT_CURL_OFF_T " " /* Live */
+            "%5" CURL_FORMAT_CURL_OFF_T " " /* Queued */
+            "%s "  /* Total time */
+            "%s "  /* Current time */
+            "%s "  /* Time left */
+            "%s "  /* Speed */
+            "%5s" /* final newline */,
+
+            dlpercen,  /* 3 letters */
+            ulpercen,  /* 3 letters */
+            max5data(all_dlnow, buffer[0]),
+            max5data(all_ulnow, buffer[1]),
+            all_xfers,
+            all_running,
+            all_queued,
+            time_total,
+            time_spent,
+            time_left,
+            max5data(speed, buffer[2]), /* speed */
+            final ? "\n" :"");
+    return TRUE;
+  }
+  return FALSE;
+}
+
+void progress_finalize(struct per_transfer *per)
+{
+  /* get the numbers before this transfer goes away */
+  all_dlalready += per->dlnow;
+  all_ulalready += per->ulnow;
+}
diff --git a/src/tool_main.h b/src/tool_progress.h
similarity index 63%
copy from src/tool_main.h
copy to src/tool_progress.h
index 868818816..34b609816 100644
--- a/src/tool_main.h
+++ b/src/tool_progress.h
@@ -1,5 +1,5 @@
-#ifndef HEADER_CURL_TOOL_MAIN_H
-#define HEADER_CURL_TOOL_MAIN_H
+#ifndef HEADER_CURL_TOOL_PROGRESS_H
+#define HEADER_CURL_TOOL_PROGRESS_H
 /***************************************************************************
  *                                  _   _ ____  _
  *  Project                     ___| | | |  _ \| |
@@ -7,7 +7,7 @@
  *                            | (__| |_| |  _ <| |___
  *                             \___|\___/|_| \_\_____|
  *
- * Copyright (C) 1998 - 2012, Daniel Stenberg, <address@hidden>, et al.
+ * Copyright (C) 1998 - 2019, Daniel Stenberg, <address@hidden>, et al.
  *
  * This software is licensed as described in the file COPYING, which
  * you should have received as part of this distribution. The terms
@@ -23,21 +23,17 @@
  ***************************************************************************/
 #include "tool_setup.h"
 
-#define DEFAULT_MAXREDIRS  50L
+int xferinfo_cb(void *clientp,
+                curl_off_t dltotal,
+                curl_off_t dlnow,
+                curl_off_t ultotal,
+                curl_off_t ulnow);
 
-#define RETRY_SLEEP_DEFAULT 1000L   /* ms */
-#define RETRY_SLEEP_MAX     600000L /* ms == 10 minutes */
+bool progress_meter(struct GlobalConfig *global,
+                    struct timeval *start,
+                    bool final);
+void progress_finalize(struct per_transfer *per);
 
-#ifndef STDIN_FILENO
-#  define STDIN_FILENO  fileno(stdin)
-#endif
+extern curl_off_t all_xfers;   /* total number */
 
-#ifndef STDOUT_FILENO
-#  define STDOUT_FILENO  fileno(stdout)
-#endif
-
-#ifndef STDERR_FILENO
-#  define STDERR_FILENO  fileno(stderr)
-#endif
-
-#endif /* HEADER_CURL_TOOL_MAIN_H */
+#endif /* HEADER_CURL_TOOL_PROGRESS_H */
diff --git a/tests/data/test1002 b/tests/data/test1002
index d12046e5e..c20995d90 100644
--- a/tests/data/test1002
+++ b/tests/data/test1002
@@ -103,6 +103,14 @@ Expect: 100-continue
 st
 GET http://%HOSTIP:%HTTPPORT/1002.upload2 HTTP/1.1
 Host: %HOSTIP:%HTTPPORT
+Content-Range: bytes 2-4/5
+User-Agent: curl/7.16.1
+Accept: */*
+Proxy-Connection: Keep-Alive
+Content-Length: 0
+
+GET http://%HOSTIP:%HTTPPORT/1002.upload2 HTTP/1.1
+Host: %HOSTIP:%HTTPPORT
 Authorization: Digest username="auser", realm="testrealm", nonce="1053604144", 
uri="/1002.upload2", response="d711f0d2042786d930de635ba0d1a1d0"
 Content-Range: bytes 2-4/5
 User-Agent: curl/7.16.1
diff --git a/tests/data/test1291 b/tests/data/test1291
index 3f1575184..a2e505fc9 100644
--- a/tests/data/test1291
+++ b/tests/data/test1291
@@ -21,7 +21,7 @@ HTTP PUT
 none
 </server>
 <name>
-Attempt to upload 100K files but fail immediately
+Attempt to upload 1000 files but fail immediately
 </name>
 <command>
 -K log/cmd1291 --fail-early
@@ -31,7 +31,7 @@ XXXXXXXx
 </file>
 # generate the config file
 <precheck>
-perl -e 'for(1 .. 100000) { 
printf("upload-file=log/upload-this\nurl=htttttp://non-existing-host.haxx.se/upload/1291\n",
 $_);}' > log/cmd1291;
+perl -e 'for(1 .. 1000) { 
printf("upload-file=log/upload-this\nurl=htttttp://non-existing-host.haxx.se/upload/1291\n",
 $_);}' > log/cmd1291;
 </precheck>
 </client>
 
@@ -40,11 +40,5 @@ perl -e 'for(1 .. 100000) { 
printf("upload-file=log/upload-this\nurl=htttttp://n
 <errorcode>
 1
 </errorcode>
-
-# we disable valgrind here since it takes 40+ seconds even on a fairly snappy
-# machine
-<valgrind>
-disable
-</valgrind>
 </verify>
 </testcase>
diff --git a/tests/data/test1406 b/tests/data/test1406
index ab835d3cb..8803c846e 100644
--- a/tests/data/test1406
+++ b/tests/data/test1406
@@ -76,13 +76,13 @@ int main(int argc, char *argv[])
 
   hnd = curl_easy_init();
   curl_easy_setopt(hnd, CURLOPT_BUFFERSIZE, 102400L);
-  curl_easy_setopt(hnd, CURLOPT_INFILESIZE_LARGE, (curl_off_t)38);
   curl_easy_setopt(hnd, CURLOPT_URL, "smtp://%HOSTIP:%SMTPPORT/1406");
   curl_easy_setopt(hnd, CURLOPT_UPLOAD, 1L);
   curl_easy_setopt(hnd, CURLOPT_VERBOSE, 1L);
   curl_easy_setopt(hnd, CURLOPT_TCP_KEEPALIVE, 1L);
   curl_easy_setopt(hnd, CURLOPT_MAIL_FROM, "address@hidden");
   curl_easy_setopt(hnd, CURLOPT_MAIL_RCPT, slist1);
+  curl_easy_setopt(hnd, CURLOPT_INFILESIZE_LARGE, (curl_off_t)38);
 
   /* Here is a list of options the curl code used that cannot get generated
      as source easily. You may select to either not use them or implement
diff --git a/tests/data/test1412 b/tests/data/test1412
index ae63290e9..36d3d1d93 100644
--- a/tests/data/test1412
+++ b/tests/data/test1412
@@ -25,6 +25,19 @@ Connection: close
 This is not the real page
 </data>
 
+# The second URL will get this response
+<data1>
+HTTP/1.1 401 Authorization Required swsclose
+Server: Apache/1.3.27 (Darwin) PHP/4.1.2
+WWW-Authenticate: Blackmagic realm="gimme all yer s3cr3ts"
+WWW-Authenticate: Basic realm="gimme all yer s3cr3ts"
+WWW-Authenticate: Digest realm="gimme all yer s3cr3ts", nonce="11223344"
+Content-Type: text/html; charset=iso-8859-1
+Connection: close
+
+This is not the real page
+</data1>
+
 # This is supposed to be returned when the server gets a
 # Authorization: Digest line passed-in from the client
 <data1000>
@@ -109,6 +122,11 @@ Accept: */*
 
 GET /14120001 HTTP/1.1
 Host: %HOSTIP:%HTTPPORT
+User-Agent: curl/7.10.5 (i686-pc-linux-gnu) libcurl/7.10.5 OpenSSL/0.9.7a ipv6 
zlib/1.1.3
+Accept: */*
+
+GET /14120001 HTTP/1.1
+Host: %HOSTIP:%HTTPPORT
 Authorization: Digest username="testuser", realm="gimme all yer s3cr3ts", 
nonce="11223344", uri="/14120001", response="0085df91870374c8bf4e94415e7fbf8e"
 User-Agent: curl/7.10.5 (i686-pc-linux-gnu) libcurl/7.10.5 OpenSSL/0.9.7a ipv6 
zlib/1.1.3
 Accept: */*
diff --git a/tests/data/test1418 b/tests/data/test1418
index b3a2f23b2..c137b1c59 100644
--- a/tests/data/test1418
+++ b/tests/data/test1418
@@ -22,6 +22,15 @@ WWW-Authenticate: Basic
 Please auth with me
 </data>
 
+<data3>
+HTTP/1.1 401 Authentication please!
+Content-Length: 20
+WWW-Authenticate: Digest realm="loonie", nonce="314156592"
+WWW-Authenticate: Basic
+
+Please auth with me
+</data3>
+
 # This is supposed to be returned when the server gets the second
 # Authorization: NTLM line passed-in from the client
 <data1000>
@@ -99,6 +108,10 @@ Accept: */*
 
 GET /14180003 HTTP/1.1
 Host: %HOSTIP:%HTTPPORT
+Accept: */*
+
+GET /14180003 HTTP/1.1
+Host: %HOSTIP:%HTTPPORT
 Authorization: Digest username="testuser", realm="loonie", nonce="314156592", 
uri="/14180003", response="1c6390a67bac3283a9b023402f3b3540"
 Accept: */*
 
diff --git a/tests/data/test153 b/tests/data/test153
index f679de4ea..77f7adb01 100644
--- a/tests/data/test153
+++ b/tests/data/test153
@@ -9,7 +9,7 @@ HTTP Digest auth
 
 # Server-side
 <reply>
-# reply back and ask for Digest auth
+# First reply back and ask for Digest auth
 <data1>
 HTTP/1.1 401 Authorization Required swsclose
 Server: Apache/1.3.27 (Darwin) PHP/4.1.2
@@ -20,6 +20,17 @@ Content-Length: 26
 This is not the real page
 </data1>
 
+# second reply back
+<data2>
+HTTP/1.1 401 Authorization Required swsclose
+Server: Apache/1.3.27 (Darwin) PHP/4.1.2
+WWW-Authenticate: Digest realm="testrealm", nonce="1053604145"
+Content-Type: text/html; charset=iso-8859-1
+Content-Length: 26
+
+This is not the real page
+</data2>
+
 # This is supposed to be returned when the server gets a
 # Authorization: Digest line passed-in from the client
 <data1001>
@@ -93,6 +104,11 @@ Accept: */*
 
 GET /1530002 HTTP/1.1
 Host: %HOSTIP:%HTTPPORT
+User-Agent: curl/7.11.0-CVS (i686-pc-linux-gnu) libcurl/7.11.0-CVS 
OpenSSL/0.9.6b ipv6 zlib/1.1.4 GSS
+Accept: */*
+
+GET /1530002 HTTP/1.1
+Host: %HOSTIP:%HTTPPORT
 Authorization: Digest username="testuser", realm="testrealm", 
nonce="1053604145", uri="/1530002", response="f84511b014fdd0ba6494f42871079c32"
 User-Agent: curl/7.11.0-CVS (i686-pc-linux-gnu) libcurl/7.11.0-CVS 
OpenSSL/0.9.6b ipv6 zlib/1.1.4 GSS
 Accept: */*
@@ -117,6 +133,12 @@ Content-Type: text/html; charset=iso-8859-1
 Content-Length: 23
 
 This IS the real page!
+HTTP/1.1 401 Authorization Required swsclose
+Server: Apache/1.3.27 (Darwin) PHP/4.1.2
+WWW-Authenticate: Digest realm="testrealm", nonce="1053604145"
+Content-Type: text/html; charset=iso-8859-1
+Content-Length: 26
+
 HTTP/1.1 401 Authorization re-negotiation please swsbounce
 Server: Apache/1.3.27 (Darwin) PHP/4.1.2
 WWW-Authenticate: Digest realm="testrealm", algorithm=MD5, nonce="999999", 
stale=true, qop="auth"
diff --git a/tests/data/test2006 b/tests/data/test2006
index 3acbdaee2..4d08e0aad 100644
--- a/tests/data/test2006
+++ b/tests/data/test2006
@@ -86,10 +86,6 @@ Accept: */*
 Some data delivered from an HTTP resource
 </file1>
 <file2 name="log/heads2006">
-Content-Length: 496
-Accept-ranges: bytes
-
-
 HTTP/1.1 200 OK
 Date: Thu, 21 Jun 2012 14:49:01 GMT
 Server: test-server/fake
diff --git a/tests/data/test2007 b/tests/data/test2007
index b169c4906..bb4d5cde9 100644
--- a/tests/data/test2007
+++ b/tests/data/test2007
@@ -90,10 +90,6 @@ Something delivered from an HTTP resource
 s/Last-Modified:.*//
 </stripfile2>
 <file2 name="log/heads2007">
-Content-Length: 496
-Accept-ranges: bytes
-
-
 HTTP/1.1 200 OK
 Date: Thu, 21 Jun 2012 14:50:02 GMT
 Server: test-server/fake
diff --git a/tests/data/test2008 b/tests/data/test2008
index 012f221c4..d6bbf6b4b 100644
--- a/tests/data/test2008
+++ b/tests/data/test2008
@@ -82,10 +82,6 @@ Some stuff delivered from an HTTP resource
 s/Last-Modified:.*//
 </stripfile2>
 <file2 name="log/heads2008">
-Content-Length: 496
-Accept-ranges: bytes
-
-
 HTTP/1.1 200 OK
 Date: Thu, 21 Jun 2012 15:23:48 GMT
 Server: test-server/fake
diff --git a/tests/data/test2009 b/tests/data/test2009
index b0e5c6c66..1a9335851 100644
--- a/tests/data/test2009
+++ b/tests/data/test2009
@@ -83,10 +83,6 @@ Some contents delivered from an HTTP resource
 s/Last-Modified:.*//
 </stripfile2>
 <file2 name="log/heads2009">
-Content-Length: 496
-Accept-ranges: bytes
-
-
 HTTP/1.1 200 OK
 Date: Thu, 21 Jun 2012 16:27:17 GMT
 Server: test-server/fake
diff --git a/tests/data/test2010 b/tests/data/test2010
index 33bb309eb..1f5320fe9 100644
--- a/tests/data/test2010
+++ b/tests/data/test2010
@@ -82,10 +82,6 @@ Contents delivered from an HTTP resource
 s/Last-Modified:.*//
 </stripfile2>
 <file2 name="log/heads2010">
-Content-Length: 496
-Accept-ranges: bytes
-
-
 HTTP/1.1 200 OK
 Date: Thu, 21 Jun 2012 17:37:27 GMT
 Server: test-server/fake

-- 
To stop receiving notification emails like this one, please contact
address@hidden.



reply via email to

[Prev in Thread] Current Thread [Next in Thread]