multi curl using http2 failed when limiting max host connections #2416

Closed
terrywh opened this Issue Mar 22, 2018 · 6 comments

Comments

Projects
None yet
3 participants
@terrywh

terrywh commented Mar 22, 2018

we encounter something:

by setting "http2_max_requests 1;" in nginx and using libcurl (multi handle + libuv) and it gives the error Failure when receiving data from the peer (almost) every time (every 10 ~ 15 request there will be a success).

(nginx v1.12.2 with http2 module | curl v7.58.0/7.59.0 | nghttp2 v1.29).

with the same version, using curl cmd, everyting is fine.

By capture tcp packet we see that the data has finished transfering and follow by "Encrypted Alert" and FIN from server.

so i guess that it has something to do with connection reusing inside multi handle.

I managed to reproduce this problom somehow by little modifing to the exmple of multi-uv.c as shown below:

#include <stdio.h>
#include <stdlib.h>
#include <uv.h>
#include <curl/curl.h>

uv_loop_t *loop;
CURLM *curl_handle;
uv_timer_t timeout;

typedef struct curl_context_s {
  uv_poll_t poll_handle;
  curl_socket_t sockfd;
} curl_context_t;

static curl_context_t* create_curl_context(curl_socket_t sockfd)
{
  curl_context_t *context;

  context = (curl_context_t *) malloc(sizeof *context);

  context->sockfd = sockfd;

  uv_poll_init_socket(loop, &context->poll_handle, sockfd);
  context->poll_handle.data = context;

  return context;
}

static void curl_close_cb(uv_handle_t *handle)
{
  curl_context_t *context = (curl_context_t *) handle->data;
  free(context);
}

static void destroy_curl_context(curl_context_t *context)
{
  uv_close((uv_handle_t *) &context->poll_handle, curl_close_cb);
}

static void add_download(const char *url, int num)
{
  char filename[50];
  FILE *file;
  CURL *handle;

  snprintf(filename, 50, "%d.download", num);

  file = fopen(filename, "wb");
  if(!file) {
    fprintf(stderr, "Error opening %s\n", filename);
    return;
  }

  handle = curl_easy_init();
  curl_easy_setopt(handle, CURLOPT_WRITEDATA, file);
  curl_easy_setopt(handle, CURLOPT_PRIVATE, file);
  curl_easy_setopt(handle, CURLOPT_URL, url);
  curl_easy_setopt(handle, CURLOPT_HTTP_VERSION, CURL_HTTP_VERSION_2TLS);
  curl_easy_setopt(handle, CURLOPT_SSL_VERIFYHOST, 0);
	curl_easy_setopt(handle, CURLOPT_SSL_VERIFYPEER, 0);
  curl_easy_setopt(handle, CURLOPT_TIMEOUT_MS, 5000);
  curl_multi_add_handle(curl_handle, handle);
  fprintf(stderr, "Added download %s -> %s\n", url, filename);
}

static void check_multi_info(void)
{
  char *done_url;
  CURLMsg *message;
  int pending;
  CURL *easy_handle;
  FILE *file;

  while((message = curl_multi_info_read(curl_handle, &pending))) {
    switch(message->msg) {
    case CURLMSG_DONE:
      easy_handle = message->easy_handle;

      curl_easy_getinfo(easy_handle, CURLINFO_EFFECTIVE_URL, &done_url);
      curl_easy_getinfo(easy_handle, CURLINFO_PRIVATE, &file);
      printf("%s DONE\n", done_url);

      curl_multi_remove_handle(curl_handle, easy_handle);
      curl_easy_cleanup(easy_handle);
      if(file) {
        fclose(file);
      }
      break;

    default:
      fprintf(stderr, "CURLMSG default\n");
      break;
    }
  }
}

static void curl_perform(uv_poll_t *req, int status, int events)
{
  int running_handles;
  int flags = 0;
  curl_context_t *context;

  if(events & UV_READABLE)
    flags |= CURL_CSELECT_IN;
  if(events & UV_WRITABLE)
    flags |= CURL_CSELECT_OUT;

  context = (curl_context_t *) req->data;

  curl_multi_socket_action(curl_handle, context->sockfd, flags,
                           &running_handles);

  check_multi_info();
}

static void on_timeout(uv_timer_t *req)
{
  int running_handles;
  curl_multi_socket_action(curl_handle, CURL_SOCKET_TIMEOUT, 0,
                           &running_handles);
  check_multi_info();
}

static int start_timeout(CURLM *multi, long timeout_ms, void *userp)
{
  if(timeout_ms < 0) {
    uv_timer_stop(&timeout);
  }
  else {
    if(timeout_ms == 0)
      timeout_ms = 1;
    uv_timer_start(&timeout, on_timeout, timeout_ms, 0);
  }
  return 0;
}

static int handle_socket(CURL *easy, curl_socket_t s, int action, void *userp,
                  void *socketp)
{
  curl_context_t *curl_context;
  int events = 0;

  switch(action) {
  case CURL_POLL_IN:
  case CURL_POLL_OUT:
  case CURL_POLL_INOUT:
    curl_context = socketp ?
      (curl_context_t *) socketp : create_curl_context(s);

    curl_multi_assign(curl_handle, s, (void *) curl_context);

    if(action != CURL_POLL_IN)
      events |= UV_WRITABLE;
    if(action != CURL_POLL_OUT)
      events |= UV_READABLE;

    uv_poll_start(&curl_context->poll_handle, events, curl_perform);
    break;
  case CURL_POLL_REMOVE:
    if(socketp) {
      uv_poll_stop(&((curl_context_t*)socketp)->poll_handle);
      destroy_curl_context((curl_context_t*) socketp);
      curl_multi_assign(curl_handle, s, NULL);
    }
    break;
  default:
    abort();
  }

  return 0;
}

int main(int argc, char **argv)
{
  loop = uv_default_loop();

  if(argc <= 1)
    return 0;

  if(curl_global_init(CURL_GLOBAL_ALL)) {
    fprintf(stderr, "Could not init curl\n");
    return 1;
  }

  uv_timer_init(loop, &timeout);

  curl_handle = curl_multi_init();
  curl_multi_setopt(curl_handle, CURLMOPT_SOCKETFUNCTION, handle_socket);
  curl_multi_setopt(curl_handle, CURLMOPT_TIMERFUNCTION, start_timeout);
  curl_multi_setopt(curl_handle, CURLMOPT_PIPELINING, CURLPIPE_HTTP1 | CURLPIPE_MULTIPLEX);
   // **this options seems to causing the problem**
// --------------------------------------------
 curl_multi_setopt(curl_handle, CURLMOPT_MAX_HOST_CONNECTIONS, 2);
// --------------------------------------------
  curl_multi_setopt(curl_handle, CURLMOPT_MAX_PIPELINE_LENGTH, 4);

  while(argc-- > 1) {
    add_download(argv[argc], argc);
  }

  uv_run(loop, UV_RUN_DEFAULT);
  curl_multi_cleanup(curl_handle);

  return 0;
}

compile:

 gcc -o multi-uv -I./deps/curl/include -I./deps/libuv/include multi-uv.c ./deps/curl/lib/.libs/libcurl.a ./deps/libuv/.libs/libuv.a ./deps/nghttp2/bin/lib/libnghttp2.a ./deps/c-ares/bin/lib/libcares.a -lpthread -lssl -lcrypto -lz

run:

./multi-uv https://10.110.32.193/ https://10.110.32.193/ https://10.110.32.193/ https://10.110.32.193/ https://10.110.32.193/ https://10.110.32.193/ https://10.110.32.193/ https://10.110.32.193/ https://10.110.32.193/ https://10.110.32.193/  https://10.110.32.193/  https://10.110.32.193/  https://10.110.32.193/ https://10.110.32.193/ https://10.110.32.193/ https://10.110.32.193/ https://10.110.32.193/ https://10.110.32.193/ https://10.110.32.193/ https://10.110.32.193/  https://10.110.32.193/  https://10.110.32.193/  https://10.110.32.193/  https://10.110.32.193/  https://10.110.32.193/  https://10.110.32.193/  https://10.110.32.193/  https://10.110.32.193/  https://10.110.32.193/  https://10.110.32.193/  https://10.110.32.193/ https://10.110.32.193/ https://10.110.32.193/ https://10.110.32.193/ https://10.110.32.193/ https://10.110.32.193/ https://10.110.32.193/ https://10.110.32.193/ https://10.110.32.193/ https://10.110.32.193/ https://10.110.32.193/ https://10.110.32.193/ https://10.110.32.193/ https://10.110.32.193/ https://10.110.32.193/ https://10.110.32.193/

when doing this i set http2_max_requests 10; in nginx config files.

and it will produce some files with 0 sizes:

image

@bagder

This comment has been minimized.

Show comment Hide comment
@bagder

bagder Mar 26, 2018

Member

How many "max concurrent streams" does the server advertise?

Member

bagder commented Mar 26, 2018

How many "max concurrent streams" does the server advertise?

@terrywh

This comment has been minimized.

Show comment Hide comment
@terrywh

terrywh Mar 27, 2018

these http2 configs are used (in a vhost):

http2_max_concurrent_streams 128;
http2_idle_timeout  15m;
http2_max_requests  10;
http2_recv_timeout 60s;

terrywh commented Mar 27, 2018

these http2 configs are used (in a vhost):

http2_max_concurrent_streams 128;
http2_idle_timeout  15m;
http2_max_requests  10;
http2_recv_timeout 60s;
@bagder

This comment has been minimized.

Show comment Hide comment
@bagder

bagder Mar 27, 2018

Member

Thanks for the added details. I've installed an nginx myself with a config using similar options. I can reproduce this problem. My slightly amended version of your code: issue-2416.c. It uses a single fixed URL and lets the user specify how many to fetch in parallel.

Member

bagder commented Mar 27, 2018

Thanks for the added details. I've installed an nginx myself with a config using similar options. I can reproduce this problem. My slightly amended version of your code: issue-2416.c. It uses a single fixed URL and lets the user specify how many to fetch in parallel.

@bagder

This comment has been minimized.

Show comment Hide comment
@bagder

bagder Mar 28, 2018

Member

Status update:

There's at least two problems in libcurl that this test case shows and suffers from. (pro tip: setting SSLKEYLOGFILE and running wireshark in parallel to this test case is awesome)

  1. libcurl doesn't properly react on the GOAWAY that the server sends after N requests. A feedback for @tatsuhiro-t would be that it's not really clearly documented exactly what we get back from nghttp2 when we get that frame over the wire. Right now, it leads to a REFUSED_STREAM error that we don't handle properly. It should lead to libcurl trying the request again on another or a fresh connection. GOAWAY should lead to this connection getting closed and not used for any further requests.

  2. Due to the CURLMOPT_MAX_HOST_CONNECTIONS setting, there's a "queue" of transfers that want to use the connection that gets the GOAWAY. That queue is then wrongly treated as a reason for the code to not close the connection - since it thinks there are pending transfers to get sent over it! Of course, pending transfers like this should not be associated with a fixed connection.

By setting CURLMOPT_MAX_HOST_CONNECTIONS to 1 in the test application and http2_max_requests to 5 in the nginx config, this problem can be triggered already at 6-7 transfers (I use 4KB body sizes).

I'm working on fixes for these issues.

Member

bagder commented Mar 28, 2018

Status update:

There's at least two problems in libcurl that this test case shows and suffers from. (pro tip: setting SSLKEYLOGFILE and running wireshark in parallel to this test case is awesome)

  1. libcurl doesn't properly react on the GOAWAY that the server sends after N requests. A feedback for @tatsuhiro-t would be that it's not really clearly documented exactly what we get back from nghttp2 when we get that frame over the wire. Right now, it leads to a REFUSED_STREAM error that we don't handle properly. It should lead to libcurl trying the request again on another or a fresh connection. GOAWAY should lead to this connection getting closed and not used for any further requests.

  2. Due to the CURLMOPT_MAX_HOST_CONNECTIONS setting, there's a "queue" of transfers that want to use the connection that gets the GOAWAY. That queue is then wrongly treated as a reason for the code to not close the connection - since it thinks there are pending transfers to get sent over it! Of course, pending transfers like this should not be associated with a fixed connection.

By setting CURLMOPT_MAX_HOST_CONNECTIONS to 1 in the test application and http2_max_requests to 5 in the nginx config, this problem can be triggered already at 6-7 transfers (I use 4KB body sizes).

I'm working on fixes for these issues.

@tatsuhiro-t

This comment has been minimized.

Show comment Hide comment
@tatsuhiro-t

tatsuhiro-t Mar 29, 2018

Contributor

The error code which indicates that stream was closed because of incoming GOAWAY is REFUSED_STREAM, and it is passed to on_stream_close_callback. I admit that I haven't documented it in nghttp2 manual.

Contributor

tatsuhiro-t commented Mar 29, 2018

The error code which indicates that stream was closed because of incoming GOAWAY is REFUSED_STREAM, and it is passed to on_stream_close_callback. I admit that I haven't documented it in nghttp2 manual.

@bagder

This comment has been minimized.

Show comment Hide comment
@bagder

bagder Mar 29, 2018

Member

Thanks for confirming this @tatsuhiro-t, I figured it was like that!

Member

bagder commented Mar 29, 2018

Thanks for confirming this @tatsuhiro-t, I figured it was like that!

bagder added a commit that referenced this issue Apr 19, 2018

http2: handle GOAWAY properly
When receiving REFUSED_STREAM, mark the connection for close and retry
streams accordingly on another/fresh connection.

Reported-by: Terry Wu
Fixes #2416

bagder added a commit that referenced this issue Apr 19, 2018

http2: handle GOAWAY properly
When receiving REFUSED_STREAM, mark the connection for close and retry
streams accordingly on another/fresh connection.

Reported-by: Terry Wu
Fixes #2416

@bagder bagder closed this in d122df5 Apr 20, 2018

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment