From ce9f7ef97faae3ae910013e74d4b7d0c7d869aee Mon Sep 17 00:00:00 2001 From: Michael Graeb Date: Mon, 9 Mar 2020 12:01:15 -0700 Subject: [PATCH 01/35] revamp encoder API (wip) --- include/aws/http/private/h2_connection.h | 4 +- include/aws/http/private/h2_frames.h | 250 +++++------ source/h2_connection.c | 66 ++- source/h2_frames.c | 537 ++++++++++++----------- source/h2_stream.c | 61 +-- 5 files changed, 439 insertions(+), 479 deletions(-) diff --git a/include/aws/http/private/h2_connection.h b/include/aws/http/private/h2_connection.h index f18b42273..856c3dd27 100644 --- a/include/aws/http/private/h2_connection.h +++ b/include/aws/http/private/h2_connection.h @@ -52,7 +52,7 @@ struct aws_h2_connection { * Any stream in this list is also in the active_streams_map. */ struct aws_linked_list outgoing_streams_list; - /* List using aws_h2_frame_base.node. + /* List using aws_h2_frame.node. * Queues all frames (except DATA frames) for connection to send. * When queue is empty, then we send DATA frames from the outgoing_streams_list */ struct aws_linked_list outgoing_frames_queue; @@ -101,6 +101,6 @@ AWS_EXTERN_C_END * Frames are sent into FIFO order. * Do not enqueue DATA frames, these are sent by other means when the frame queue is empty. */ -void aws_h2_connection_enqueue_outgoing_frame(struct aws_h2_connection *connection, struct aws_h2_frame_base *frame); +void aws_h2_connection_enqueue_outgoing_frame(struct aws_h2_connection *connection, struct aws_h2_frame *frame); #endif /* AWS_HTTP_H2_CONNECTION_H */ diff --git a/include/aws/http/private/h2_frames.h b/include/aws/http/private/h2_frames.h index 65641dccf..bb4a6579d 100644 --- a/include/aws/http/private/h2_frames.h +++ b/include/aws/http/private/h2_frames.h @@ -74,6 +74,11 @@ enum aws_h2_settings { AWS_H2_SETTINGS_END_RANGE, /* End of known values */ }; +/* Payload must fit in 3 bytes */ +#define AWS_H2_PAYLOAD_MAX (0x00FFFFFF) + +#define AWS_H2_WINDOW_UPDATE_MAX (0x7FFFFFFF) + /* This magic string must be the very first thing a client sends to the server. * See RFC-7540 3.5 - HTTP/2 Connection Preface */ extern const struct aws_byte_cursor aws_h2_connection_preface_client_string; @@ -99,28 +104,21 @@ struct aws_h2_frame_header_block { struct aws_array_list header_fields; }; -/* Present in every h2 frame */ -struct aws_h2_frame_base { - uint8_t type; /* aws_h2_frame_type */ +/** + * A frame to be encoded. + * (in the case of HEADERS and PUSH_PROMISE, it might turn into multiple frames due to CONTINUATION) + */ +struct aws_h2_frame { + const struct aws_h2_frame_vtable *vtable; + struct aws_allocator *alloc; + enum aws_h2_frame_type type; uint32_t stream_id; struct aws_linked_list_node node; }; -/* Represents a DATA frame */ -struct aws_h2_frame_data { - struct aws_h2_frame_base base; - - /* Flags */ - bool end_stream; /* AWS_H2_FRAME_F_END_STREAM */ - - /* Payload */ - uint8_t pad_length; /* Set to 0 to disable AWS_H2_FRAME_F_PADDED */ - struct aws_byte_cursor data; -}; - /* Represents a HEADERS frame */ struct aws_h2_frame_headers { - struct aws_h2_frame_base base; + struct aws_h2_frame base; /* Flags */ bool end_stream; /* AWS_H2_FRAME_F_END_STREAM */ @@ -135,7 +133,7 @@ struct aws_h2_frame_headers { /* Represents a PRIORITY frame */ struct aws_h2_frame_priority { - struct aws_h2_frame_base base; + struct aws_h2_frame base; /* Payload */ struct aws_h2_frame_priority_settings priority; @@ -143,7 +141,7 @@ struct aws_h2_frame_priority { /* Represents a RST_STREAM frame */ struct aws_h2_frame_rst_stream { - struct aws_h2_frame_base base; + struct aws_h2_frame base; /* Payload */ enum aws_h2_error_codes error_code; @@ -157,7 +155,7 @@ struct aws_h2_frame_setting { /* Represents a SETTINGS frame */ struct aws_h2_frame_settings { - struct aws_h2_frame_base base; + struct aws_h2_frame base; /* Flags */ bool ack; /* AWS_H2_FRAME_F_ACK */ @@ -169,7 +167,7 @@ struct aws_h2_frame_settings { /* Represents a PUSH_PROMISE frame */ struct aws_h2_frame_push_promise { - struct aws_h2_frame_base base; + struct aws_h2_frame base; /* Flags */ bool end_headers; /* AWS_H2_FRAME_F_END_HEADERS */ @@ -184,7 +182,7 @@ struct aws_h2_frame_push_promise { /* Represents a PING frame */ struct aws_h2_frame_ping { - struct aws_h2_frame_base base; + struct aws_h2_frame base; /* Flags */ bool ack; /* AWS_H2_FRAME_F_ACK */ @@ -195,7 +193,7 @@ struct aws_h2_frame_ping { /* Represents a GOAWAY frame */ struct aws_h2_frame_goaway { - struct aws_h2_frame_base base; + struct aws_h2_frame base; /* Payload */ uint32_t last_stream_id; @@ -205,55 +203,38 @@ struct aws_h2_frame_goaway { /* Represents a WINDOW_UPDATE frame */ struct aws_h2_frame_window_update { - struct aws_h2_frame_base base; + struct aws_h2_frame base; /* Payload */ uint32_t window_size_increment; }; -/* Represents a CONTINUATION frame */ -struct aws_h2_frame_continuation { - struct aws_h2_frame_base base; - - /* Flags */ - bool end_headers; /* AWS_H2_FRAME_F_END_HEADERS */ - - /* Payload */ - struct aws_h2_frame_header_block header_block; -}; - /* Used to encode a frame */ struct aws_h2_frame_encoder { /* Larger state */ struct aws_allocator *allocator; struct aws_hpack_context *hpack; - bool use_huffman; + struct aws_h2_frame *current_frame; + bool has_errored; + bool use_huffman; /* #TODO on string-by-string basis, use huffman if it makes the string smaller */ }; -AWS_EXTERN_C_BEGIN +typedef void aws_h2_frame_destroy_fn(struct aws_h2_frame *frame_base); +typedef int aws_h2_frame_encode_fn( + struct aws_h2_frame *frame_base, + struct aws_h2_frame_encoder *encoder, + struct aws_byte_buf *output, + bool *complete); -/* #TODO: remove each frame type's specific clean_up() function from API */ -AWS_HTTP_API -void aws_h2_frame_clean_up(struct aws_h2_frame_base *frame); +struct aws_h2_frame_vtable { + aws_h2_frame_destroy_fn *destroy; + aws_h2_frame_encode_fn *encode; +}; -AWS_HTTP_API -const char *aws_h2_frame_type_to_str(enum aws_h2_frame_type type); +AWS_EXTERN_C_BEGIN -/* Internal methods exposed for testing purposes only */ -AWS_HTTP_API -int aws_h2_frame_header_block_init(struct aws_h2_frame_header_block *header_block, struct aws_allocator *allocator); AWS_HTTP_API -void aws_h2_frame_header_block_clean_up(struct aws_h2_frame_header_block *header_block); -AWS_HTTP_API -int aws_h2_frame_header_block_get_encoded_length( - const struct aws_h2_frame_header_block *header_block, - const struct aws_h2_frame_encoder *encoder, - size_t *length); -AWS_HTTP_API -int aws_h2_frame_header_block_encode( - const struct aws_h2_frame_header_block *header_block, - struct aws_h2_frame_encoder *encoder, - struct aws_byte_buf *output); +const char *aws_h2_frame_type_to_str(enum aws_h2_frame_type type); /** * The process of encoding a frame looks like: @@ -265,113 +246,106 @@ int aws_h2_frame_encoder_init(struct aws_h2_frame_encoder *encoder, struct aws_a AWS_HTTP_API void aws_h2_frame_encoder_clean_up(struct aws_h2_frame_encoder *encoder); -/* #TODO: remove each frame type's specific encode() function from API */ +/** + * Attempt to encode frame into output buffer. + * AWS_OP_ERR is returned if encoder encounters an unrecoverable error. + * frame_complete will be set true if the frame finished encoding. + * + * If frame_complete is false then we MUST call aws_h2_encode_frame() again + * with all the same inputs, when we have a fresh buffer (it would be illegal + * to encode a different frame). + */ AWS_HTTP_API int aws_h2_encode_frame( struct aws_h2_frame_encoder *encoder, - struct aws_h2_frame_base *frame, - struct aws_byte_buf *output); - -AWS_HTTP_API -int aws_h2_frame_data_init(struct aws_h2_frame_data *frame, struct aws_allocator *allocator); -AWS_HTTP_API -void aws_h2_frame_data_clean_up(struct aws_h2_frame_data *frame); + struct aws_h2_frame *frame, + struct aws_byte_buf *output, + bool *frame_complete); +/** + * Attempt to encode a DATA frame into the output buffer. + * AWS_OP_ERR is returned if encoder encounters an unrecoverable error. + * body_complete will be set true if encoder reaches the end of the body_stream. + * + * Each call to this function encodes a complete DATA frame, or nothing at all, + * so it's always safe to encode a different frame type or the body of a different stream + * after calling this. + */ AWS_HTTP_API -int aws_h2_frame_data_encode( - struct aws_h2_frame_data *frame, +int aws_h2_encode_data_frame( struct aws_h2_frame_encoder *encoder, - struct aws_byte_buf *output); + uint32_t stream_id, + struct aws_input_stream *body_stream, + bool end_stream, + uint8_t padding, + struct aws_byte_buf *output, + bool *body_complete); AWS_HTTP_API -int aws_h2_frame_headers_init(struct aws_h2_frame_headers *frame, struct aws_allocator *allocator); -AWS_HTTP_API -void aws_h2_frame_headers_clean_up(struct aws_h2_frame_headers *frame); -AWS_HTTP_API -int aws_h2_frame_headers_encode( - struct aws_h2_frame_headers *frame, - struct aws_h2_frame_encoder *encoder, - struct aws_byte_buf *output); +void aws_h2_frame_destroy(struct aws_h2_frame *frame); +/** + * This frame type may actually end up encoding multiple frames + * (HEADERS followed by 0 or more CONTINUATION frames). + */ AWS_HTTP_API -int aws_h2_frame_priority_init(struct aws_h2_frame_priority *frame, struct aws_allocator *allocator); -AWS_HTTP_API -void aws_h2_frame_priority_clean_up(struct aws_h2_frame_priority *frame); -AWS_HTTP_API -int aws_h2_frame_priority_encode( - struct aws_h2_frame_priority *frame, - struct aws_h2_frame_encoder *encoder, - struct aws_byte_buf *output); +struct aws_h2_frame *aws_h2_frame_new_headers( + struct aws_allocator *allocator, + uint32_t stream_id, + const struct aws_http_headers *headers, + bool end_stream, + uint8_t padding, + const struct aws_h2_frame_priority_settings *optional_priority); AWS_HTTP_API -int aws_h2_frame_rst_stream_init(struct aws_h2_frame_rst_stream *frame, struct aws_allocator *allocator); -AWS_HTTP_API -void aws_h2_frame_rst_stream_clean_up(struct aws_h2_frame_rst_stream *frame); -AWS_HTTP_API -int aws_h2_frame_rst_stream_encode( - struct aws_h2_frame_rst_stream *frame, - struct aws_h2_frame_encoder *encoder, - struct aws_byte_buf *output); +struct aws_h2_frame *aws_h2_frame_new_priority( + struct aws_allocator *allocator, + uint32_t stream_id, + const struct aws_h2_frame_priority_settings *priority); AWS_HTTP_API -int aws_h2_frame_settings_init(struct aws_h2_frame_settings *frame, struct aws_allocator *allocator); -AWS_HTTP_API -void aws_h2_frame_settings_clean_up(struct aws_h2_frame_settings *frame); -AWS_HTTP_API -int aws_h2_frame_settings_encode( - struct aws_h2_frame_settings *frame, - struct aws_h2_frame_encoder *encoder, - struct aws_byte_buf *output); +struct aws_h2_frame *aws_h2_frame_new_rst_stream( + struct aws_allocator *allocator, + uint32_t stream_id, + enum aws_h2_error_codes error_code); AWS_HTTP_API -int aws_h2_frame_push_promise_init(struct aws_h2_frame_push_promise *frame, struct aws_allocator *allocator); -AWS_HTTP_API -void aws_h2_frame_push_promise_clean_up(struct aws_h2_frame_push_promise *frame); -AWS_HTTP_API -int aws_h2_frame_push_promise_encode( - struct aws_h2_frame_push_promise *frame, - struct aws_h2_frame_encoder *encoder, - struct aws_byte_buf *output); +struct aws_h2_frame *aws_h2_frame_new_settings( + struct aws_allocator *allocator, + const struct aws_h2_frame_setting *settings_array, + size_t num_settings, + bool ack); +/** + * This frame type may actually end up encoding multiple frames + * (PUSH_PROMISE followed 0 or more CONTINUATION frames). + */ AWS_HTTP_API -int aws_h2_frame_ping_init(struct aws_h2_frame_ping *frame, struct aws_allocator *allocator); -AWS_HTTP_API -void aws_h2_frame_ping_clean_up(struct aws_h2_frame_ping *frame); -AWS_HTTP_API -int aws_h2_frame_ping_encode( - struct aws_h2_frame_ping *frame, - struct aws_h2_frame_encoder *encoder, - struct aws_byte_buf *output); +struct aws_h2_frame *aws_h2_frame_new_push_promise( + struct aws_allocator *allocator, + uint32_t stream_id, + uint32_t promised_stream_id, + const struct aws_http_headers *headers, + uint8_t padding); AWS_HTTP_API -int aws_h2_frame_goaway_init(struct aws_h2_frame_goaway *frame, struct aws_allocator *allocator); -AWS_HTTP_API -void aws_h2_frame_goaway_clean_up(struct aws_h2_frame_goaway *frame); -AWS_HTTP_API -int aws_h2_frame_goaway_encode( - struct aws_h2_frame_goaway *frame, - struct aws_h2_frame_encoder *encoder, - struct aws_byte_buf *output); +struct aws_h2_frame *aws_h2_frame_new_ping( + struct aws_allocator *allocator, + bool ack, + const uint8_t opaque_data[AWS_H2_PING_DATA_SIZE]); AWS_HTTP_API -int aws_h2_frame_window_update_init(struct aws_h2_frame_window_update *frame, struct aws_allocator *allocator); -AWS_HTTP_API -void aws_h2_frame_window_update_clean_up(struct aws_h2_frame_window_update *frame); -AWS_HTTP_API -int aws_h2_frame_window_update_encode( - struct aws_h2_frame_window_update *frame, - struct aws_h2_frame_encoder *encoder, - struct aws_byte_buf *output); +struct aws_h2_frame *aws_h2_frame_new_goaway( + struct aws_allocator *allocator, + uint32_t last_stream_id, + enum aws_h2_error_codes error_code, + struct aws_byte_cursor debug_data); AWS_HTTP_API -int aws_h2_frame_continuation_init(struct aws_h2_frame_continuation *frame, struct aws_allocator *allocator); -AWS_HTTP_API -void aws_h2_frame_continuation_clean_up(struct aws_h2_frame_continuation *frame); -AWS_HTTP_API -int aws_h2_frame_continuation_encode( - struct aws_h2_frame_continuation *frame, - struct aws_h2_frame_encoder *encoder, - struct aws_byte_buf *output); +struct aws_h2_frame *aws_h2_frame_new_window_update( + struct aws_allocator *allocator, + uint32_t stream_id, + uint32_t window_size_increment); AWS_EXTERN_C_END diff --git a/source/h2_connection.c b/source/h2_connection.c index 86a19d5da..427a310c7 100644 --- a/source/h2_connection.c +++ b/source/h2_connection.c @@ -279,9 +279,8 @@ static void s_handler_destroy(struct aws_channel_handler *handler) { struct aws_linked_list *outgoing_frames_queue = &connection->thread_data.outgoing_frames_queue; while (!aws_linked_list_empty(outgoing_frames_queue)) { struct aws_linked_list_node *node = aws_linked_list_pop_front(outgoing_frames_queue); - struct aws_h2_frame_base *frame = AWS_CONTAINER_OF(node, struct aws_h2_frame_base, node); - aws_h2_frame_clean_up(frame); - aws_mem_release(connection->base.alloc, frame); + struct aws_h2_frame *frame = AWS_CONTAINER_OF(node, struct aws_h2_frame, node); + aws_h2_frame_destroy(frame); } aws_h2_decoder_destroy(connection->thread_data.decoder); @@ -291,7 +290,7 @@ static void s_handler_destroy(struct aws_channel_handler *handler) { aws_mem_release(connection->base.alloc, connection); } -void aws_h2_connection_enqueue_outgoing_frame(struct aws_h2_connection *connection, struct aws_h2_frame_base *frame) { +void aws_h2_connection_enqueue_outgoing_frame(struct aws_h2_connection *connection, struct aws_h2_frame *frame) { AWS_PRECONDITION(frame->type != AWS_H2_FRAME_T_DATA); AWS_PRECONDITION(aws_channel_thread_is_callers_thread(connection->base.channel_slot->channel)); @@ -370,22 +369,31 @@ static void s_outgoing_frames_task(struct aws_channel_task *task, void *arg, enu /* Write as many frames from outgoing_frames_queue as possible. */ while (!aws_linked_list_empty(outgoing_frames_queue)) { struct aws_linked_list_node *frame_node = aws_linked_list_front(outgoing_frames_queue); - struct aws_h2_frame_base *frame = AWS_CONTAINER_OF(frame_node, struct aws_h2_frame_base, node); + struct aws_h2_frame *frame = AWS_CONTAINER_OF(frame_node, struct aws_h2_frame, node); - /* #TODO actual functionality to query min required space for a frame */ - const size_t min_required_bytes = 1024; - const size_t available_bytes = msg->message_data.capacity - msg->message_data.len; - if (available_bytes < min_required_bytes) { + bool frame_complete; + if (aws_h2_encode_frame(&connection->thread_data.encoder, frame, &msg->message_data, &frame_complete)) { + CONNECTION_LOGF( + ERROR, + connection, + "Error encoding frame: type=%s stream=%" PRIu32 " error=%s", + aws_h2_frame_type_to_str(frame->type), + frame->stream_id, + aws_error_name(aws_last_error())); + goto error; + } + + if (!frame_complete) { if (msg->message_data.len == 0) { /* We're in trouble if an empty message isn't big enough for this frame to do any work with */ CONNECTION_LOGF( ERROR, connection, - "Cannot encode %s frame requiring %zu bytes, max available space is %zu", + "Message is too small for encoder. frame-type=%s stream=%" PRIu32 " available-space=%zu", aws_h2_frame_type_to_str(frame->type), - min_required_bytes, - available_bytes); - aws_raise_error(AWS_ERROR_INVALID_STATE); + frame->stream_id, + msg->message_data.capacity); + aws_raise_error(AWS_ERROR_SHORT_BUFFER); goto error; } @@ -393,22 +401,9 @@ static void s_outgoing_frames_task(struct aws_channel_task *task, void *arg, enu goto done_encoding; } - /* #TODO some way for frame to say it's not done yet. - * Necessary for HEADERS that will split across CONTINUATION frames */ - if (aws_h2_encode_frame(&connection->thread_data.encoder, frame, &msg->message_data)) { - CONNECTION_LOGF( - ERROR, - connection, - "Error encoding frame of type %s: %s", - aws_h2_frame_type_to_str(frame->type), - aws_error_name(aws_last_error())); - goto error; - } - /* Done encoding frame, pop from queue and cleanup*/ aws_linked_list_remove(frame_node); - aws_h2_frame_clean_up(frame); - aws_mem_release(connection->base.alloc, frame); + aws_h2_frame_destroy(frame); num_frames_encoded++; } @@ -525,27 +520,18 @@ static int s_send_connection_preface_client_string(struct aws_h2_connection *con return AWS_OP_ERR; } -/* #TODO actually fill with settings */ /* #TODO track which SETTINGS frames have been ACK'd */ static int s_enqueue_settings_frame(struct aws_h2_connection *connection) { struct aws_allocator *alloc = connection->base.alloc; - struct aws_h2_frame_settings *settings_frame = aws_mem_calloc(alloc, 1, sizeof(struct aws_h2_frame_settings)); + /* #TODO actually fill with settings */ + struct aws_h2_frame *settings_frame = aws_h2_frame_new_settings(alloc, NULL, 0, false /*ack*/); if (!settings_frame) { - goto error_alloc; - } - - if (aws_h2_frame_settings_init(settings_frame, alloc)) { - goto error_init; + return AWS_OP_ERR; } - aws_h2_connection_enqueue_outgoing_frame(connection, &settings_frame->base); + aws_h2_connection_enqueue_outgoing_frame(connection, settings_frame); return AWS_OP_SUCCESS; - -error_init: - aws_mem_release(alloc, settings_frame); -error_alloc: - return AWS_OP_ERR; } static void s_handler_installed(struct aws_channel_handler *handler, struct aws_channel_slot *slot) { diff --git a/source/h2_frames.c b/source/h2_frames.c index cc5574211..e2063f940 100644 --- a/source/h2_frames.c +++ b/source/h2_frames.c @@ -23,10 +23,17 @@ #include /* #TODO: Don't raise AWS_H2_ERR_* enums, raise AWS_ERROR_* . + * Actually, maybe do NOT raise H2-specific errors, because those are for *receiving* bad data, + * and errors from the encoder are user error??? * Also, if encoder raises error corresponding to AWS_H2_ERR, should * we send that code in the GOAWAY, or always treat encoder errors as AWS_H2_ERR_INTERNAL? * Like, you're only supposed to inform peer of errors that were their fault, right? */ +/* #TODO: when is the right time to validate every possible input? + * while encoding? while making new frame? in actual user-facing API? */ + +/* #TODO: use add_checked and mul_checked */ + const struct aws_byte_cursor aws_h2_connection_preface_client_string = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n"); @@ -38,6 +45,14 @@ static const uint8_t s_indexed_header_field_mask = 1 << 7; static const uint8_t s_literal_save_field_mask = 1 << 6; static const uint8_t s_literal_no_forward_save_mask = 1 << 4; +#define DEFINE_FRAME_VTABLE(NAME) \ + static aws_h2_frame_destroy_fn s_frame_##NAME##_destroy; \ + static aws_h2_frame_encode_fn s_frame_##NAME##_encode; \ + static const struct aws_h2_frame_vtable s_frame_##NAME##_vtable = { \ + .destroy = s_frame_##NAME##_destroy, \ + .encode = s_frame_##NAME##_encode, \ + } + const char *aws_h2_frame_type_to_str(enum aws_h2_frame_type type) { switch (type) { case AWS_H2_FRAME_T_DATA: @@ -94,6 +109,7 @@ static int s_frame_priority_settings_encode( return AWS_OP_SUCCESS; } +#if 0 /*********************************************************************************************************************** * Header Block **********************************************************************************************************************/ @@ -260,17 +276,33 @@ int aws_h2_frame_header_block_encode( return AWS_OP_SUCCESS; } - +#endif // 0 /*********************************************************************************************************************** - * Common Header + * Common Frame Prefix **********************************************************************************************************************/ +static const size_t s_frame_prefix_length = 24; + +static void s_init_frame_base( + struct aws_h2_frame *frame_base, + struct aws_allocator *alloc, + enum aws_h2_frame_type type, + const struct aws_h2_frame_vtable *vtable, + uint32_t stream_id) { + + frame_base->vtable = vtable; + frame_base->alloc = alloc; + frame_base->type = type; + frame_base->stream_id = stream_id; +} + static int s_frame_prefix_encode( - struct aws_h2_frame_base *frame_base, + struct aws_h2_frame *frame_base, size_t length, uint8_t flags, struct aws_byte_buf *output) { AWS_PRECONDITION(frame_base); AWS_PRECONDITION(output); + AWS_PRECONDITION(!(frame_base->stream_id & s_u32_top_bit_mask), "Invalid stream ID"); AWS_LOGF( AWS_LL_TRACE, @@ -282,8 +314,9 @@ static int s_frame_prefix_encode( flags); /* Length must fit in 24 bits */ - if (length > 0x00FFFFFF) { - return aws_raise_error(AWS_H2_ERR_FRAME_SIZE_ERROR); + /* #TODO Check against SETTINGS_MAX_FRAME_SIZE */ + if (length > AWS_H2_PAYLOAD_MAX) { + return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } /* Write length */ @@ -305,7 +338,7 @@ static int s_frame_prefix_encode( return AWS_OP_SUCCESS; } - +#if 0 /*********************************************************************************************************************** * Encoder **********************************************************************************************************************/ @@ -369,7 +402,7 @@ int aws_h2_frame_data_encode( length += 1 + frame->pad_length; } - /* Write the header data */ + /* Write the frame prefix */ if (s_frame_prefix_encode(&frame->base, length, flags, output)) { goto write_error; } @@ -447,7 +480,7 @@ int aws_h2_frame_headers_encode( length += s_frame_priority_settings_size; } - /* Write the header data */ + /* Write the frame prefix */ if (s_frame_prefix_encode(&frame->base, length, flags, output)) { goto write_error; } @@ -485,145 +518,188 @@ int aws_h2_frame_headers_encode( output->len = output_init_len; return aws_raise_error(AWS_ERROR_HTTP_COMPRESSION); } - +#endif // 0 /*********************************************************************************************************************** * PRIORITY **********************************************************************************************************************/ +DEFINE_FRAME_VTABLE(priority); static const size_t s_frame_priority_length = 5; -int aws_h2_frame_priority_init(struct aws_h2_frame_priority *frame, struct aws_allocator *allocator) { - (void)allocator; +struct aws_h2_frame *aws_h2_frame_new_priority( + struct aws_allocator *allocator, + uint32_t stream_id, + const struct aws_h2_frame_priority_settings *priority) { - AWS_ZERO_STRUCT(*frame); - frame->base.type = AWS_H2_FRAME_T_PRIORITY; + struct aws_h2_frame_priority *frame = aws_mem_calloc(allocator, 1, sizeof(struct aws_h2_frame_priority)); + if (!frame) { + return NULL; + } - return AWS_OP_SUCCESS; + s_init_frame_base(&frame->base, allocator, AWS_H2_FRAME_T_PRIORITY, &s_frame_priority_vtable, stream_id); + frame->priority = *priority; + + return &frame->base; } -void aws_h2_frame_priority_clean_up(struct aws_h2_frame_priority *frame) { - AWS_PRECONDITION(frame); - (void)frame; +static void s_frame_priority_destroy(struct aws_h2_frame *frame_base) { + aws_mem_release(frame_base->alloc, frame_base); } -int aws_h2_frame_priority_encode( - struct aws_h2_frame_priority *frame, +static int s_frame_priority_encode( + struct aws_h2_frame *frame_base, struct aws_h2_frame_encoder *encoder, - struct aws_byte_buf *output) { - AWS_PRECONDITION(frame); - AWS_PRECONDITION(encoder); - AWS_PRECONDITION(output); + struct aws_byte_buf *output, + bool *complete) { (void)encoder; + struct aws_h2_frame_priority *frame = AWS_CONTAINER_OF(frame_base, struct aws_h2_frame_priority, base); - const size_t output_init_len = output->len; + const size_t total_len = s_frame_prefix_length + s_frame_priority_length; + const size_t space_available = output->capacity - output->len; - /* Write the header data */ + /* If we can't encode the whole frame at once, try again later */ + if (total_len < space_available) { + *complete = false; + return AWS_OP_SUCCESS; + } + + /* Write the frame prefix */ if (s_frame_prefix_encode(&frame->base, s_frame_priority_length, 0, output)) { - goto write_error; + return AWS_OP_ERR; } /* Write the priority settings */ if (s_frame_priority_settings_encode(&frame->priority, output)) { - goto write_error; + return AWS_OP_ERR; } + *complete = true; return AWS_OP_SUCCESS; - -write_error: - output->len = output_init_len; - return aws_raise_error(AWS_ERROR_SHORT_BUFFER); } /*********************************************************************************************************************** * RST_STREAM **********************************************************************************************************************/ +DEFINE_FRAME_VTABLE(rst_stream); static const size_t s_frame_rst_stream_length = 4; -int aws_h2_frame_rst_stream_init(struct aws_h2_frame_rst_stream *frame, struct aws_allocator *allocator) { - (void)allocator; +struct aws_h2_frame *aws_h2_frame_new_rst_stream( + struct aws_allocator *allocator, + uint32_t stream_id, + enum aws_h2_error_codes error_code) { - AWS_ZERO_STRUCT(*frame); - frame->base.type = AWS_H2_FRAME_T_RST_STREAM; + struct aws_h2_frame_rst_stream *frame = aws_mem_calloc(allocator, 1, sizeof(struct aws_h2_frame_rst_stream)); + if (!frame) { + return NULL; + } + + s_init_frame_base(&frame->base, allocator, AWS_H2_FRAME_T_RST_STREAM, &s_frame_rst_stream_vtable, stream_id); + frame->error_code = error_code; return AWS_OP_SUCCESS; } -void aws_h2_frame_rst_stream_clean_up(struct aws_h2_frame_rst_stream *frame) { - AWS_PRECONDITION(frame); - (void)frame; +static void s_frame_rst_stream_destroy(struct aws_h2_frame *frame_base) { + aws_mem_release(frame_base->alloc, frame_base); } -int aws_h2_frame_rst_stream_encode( - struct aws_h2_frame_rst_stream *frame, +static int s_frame_rst_stream_encode( + struct aws_h2_frame *frame_base, struct aws_h2_frame_encoder *encoder, - struct aws_byte_buf *output) { - AWS_PRECONDITION(frame); - AWS_PRECONDITION(encoder); - AWS_PRECONDITION(output); + struct aws_byte_buf *output, + bool *complete) { (void)encoder; + struct aws_h2_frame_rst_stream *frame = AWS_CONTAINER_OF(frame_base, struct aws_h2_frame_rst_stream, base); - const size_t output_init_len = output->len; + const size_t total_len = s_frame_prefix_length + s_frame_rst_stream_length; + const size_t space_available = output->capacity - output->len; + + /* If we can't encode the whole frame at once, try again later */ + if (total_len < space_available) { + *complete = false; + return AWS_OP_SUCCESS; + } - /* Write the header data */ + /* Write the frame prefix */ if (s_frame_prefix_encode(&frame->base, s_frame_rst_stream_length, 0, output)) { - goto write_error; + return AWS_OP_ERR; } /* Write the error_code */ if (!aws_byte_buf_write_be32(output, frame->error_code)) { - goto write_error; + return aws_raise_error(AWS_ERROR_SHORT_BUFFER); } + *complete = true; return AWS_OP_SUCCESS; - -write_error: - output->len = output_init_len; - return aws_raise_error(AWS_ERROR_SHORT_BUFFER); } /*********************************************************************************************************************** * SETTINGS **********************************************************************************************************************/ -int aws_h2_frame_settings_init(struct aws_h2_frame_settings *frame, struct aws_allocator *allocator) { - (void)allocator; +DEFINE_FRAME_VTABLE(settings); +static const size_t s_frame_setting_length = 6; + +struct aws_h2_frame *aws_h2_frame_new_settings( + struct aws_allocator *allocator, + const struct aws_h2_frame_setting *settings_array, + size_t num_settings, + bool ack) { + + AWS_PRECONDITION(!ack || num_settings == 0, "Settings ACK must be empty"); + AWS_PRECONDITION(settings_array || num_settings == 0); + + struct aws_h2_frame_settings *frame; + struct aws_h2_frame_setting *array_alloc; + const size_t sizeof_settings_array = sizeof(struct aws_h2_frame_setting) * num_settings; + if (!aws_mem_acquire_many( + allocator, 2, &frame, sizeof(struct aws_h2_frame_settings), &array_alloc, sizeof_settings_array)) { + return NULL; + } AWS_ZERO_STRUCT(*frame); - frame->base.type = AWS_H2_FRAME_T_SETTINGS; + s_init_frame_base(&frame->base, allocator, AWS_H2_FRAME_T_SETTINGS, &s_frame_settings_vtable, 0); + frame->ack = ack; + frame->settings_count = num_settings; + if (num_settings) { + frame->settings_array = memcpy(array_alloc, settings_array, sizeof_settings_array); + } - return AWS_OP_SUCCESS; + return &frame->base; } -void aws_h2_frame_settings_clean_up(struct aws_h2_frame_settings *frame) { - AWS_PRECONDITION(frame); - (void)frame; +static void s_frame_settings_destroy(struct aws_h2_frame *frame_base) { + aws_mem_release(frame_base->alloc, frame_base); } -int aws_h2_frame_settings_encode( - struct aws_h2_frame_settings *frame, +static int s_frame_settings_encode( + struct aws_h2_frame *frame_base, struct aws_h2_frame_encoder *encoder, - struct aws_byte_buf *output) { - AWS_PRECONDITION(frame); - AWS_PRECONDITION(encoder); - AWS_PRECONDITION(output); - AWS_PRECONDITION(!frame->settings_count || frame->settings_array); + struct aws_byte_buf *output, + bool *complete) { (void)encoder; + struct aws_h2_frame_settings *frame = AWS_CONTAINER_OF(frame_base, struct aws_h2_frame_settings, base); - const size_t output_init_len = output->len; + const size_t payload_len = frame->settings_count * s_frame_setting_length; + const size_t total_len = s_frame_prefix_length + payload_len; + const size_t space_available = output->capacity - output->len; + + /* If we can't encode the whole frame at once, try again later */ + if (total_len < space_available) { + *complete = false; + return AWS_OP_SUCCESS; + } - /* Write the header data */ + /* Write the frame prefix */ uint8_t flags = 0; if (frame->ack) { - if (frame->settings_count != 0) { - aws_raise_error(AWS_ERROR_HTTP_PROTOCOL_ERROR); - goto write_error; - } flags |= AWS_H2_FRAME_F_ACK; } - if (s_frame_prefix_encode(&frame->base, frame->settings_count * 6, flags, output)) { - goto write_error; + if (s_frame_prefix_encode(&frame->base, payload_len, flags, output)) { + return AWS_OP_ERR; } /* Write the payload */ @@ -631,18 +707,15 @@ int aws_h2_frame_settings_encode( if (!aws_byte_buf_write_be16(output, frame->settings_array[i].id) || !aws_byte_buf_write_be32(output, frame->settings_array[i].value)) { - aws_raise_error(AWS_ERROR_SHORT_BUFFER); - goto write_error; + return aws_raise_error(AWS_ERROR_SHORT_BUFFER); } } + *complete = true; return AWS_OP_SUCCESS; - -write_error: - output->len = output_init_len; - return AWS_OP_ERR; } +#if 0 /*********************************************************************************************************************** * PUSH_PROMISE **********************************************************************************************************************/ @@ -723,171 +796,198 @@ int aws_h2_frame_push_promise_encode( output->len = output_init_len; return aws_raise_error(AWS_ERROR_HTTP_COMPRESSION); } - +#endif // 0 /*********************************************************************************************************************** * PING **********************************************************************************************************************/ -int aws_h2_frame_ping_init(struct aws_h2_frame_ping *frame, struct aws_allocator *allocator) { - (void)allocator; +DEFINE_FRAME_VTABLE(ping); - AWS_ZERO_STRUCT(*frame); - frame->base.type = AWS_H2_FRAME_T_PING; +struct aws_h2_frame *aws_h2_frame_new_ping( + struct aws_allocator *allocator, + bool ack, + const uint8_t opaque_data[AWS_H2_PING_DATA_SIZE]) { - return AWS_OP_SUCCESS; + struct aws_h2_frame_ping *frame = aws_mem_calloc(allocator, 1, sizeof(struct aws_h2_frame_ping)); + if (!frame) { + return NULL; + } + + s_init_frame_base(&frame->base, allocator, AWS_H2_FRAME_T_PING, &s_frame_ping_vtable, 0); + frame->ack = ack; + memcpy(frame->opaque_data, opaque_data, AWS_H2_PING_DATA_SIZE); + + return &frame->base; } -void aws_h2_frame_ping_clean_up(struct aws_h2_frame_ping *frame) { - AWS_PRECONDITION(frame); - (void)frame; +static void s_frame_ping_destroy(struct aws_h2_frame *frame_base) { + aws_mem_release(frame_base->alloc, frame_base); } -int aws_h2_frame_ping_encode( - struct aws_h2_frame_ping *frame, +static int s_frame_ping_encode( + struct aws_h2_frame *frame_base, struct aws_h2_frame_encoder *encoder, - struct aws_byte_buf *output) { - AWS_PRECONDITION(frame); - AWS_PRECONDITION(encoder); - AWS_PRECONDITION(output); + struct aws_byte_buf *output, + bool *complete) { (void)encoder; + struct aws_h2_frame_ping *frame = AWS_CONTAINER_OF(frame_base, struct aws_h2_frame_ping, base); - if (frame->base.stream_id != 0) { - return aws_raise_error(AWS_ERROR_HTTP_PROTOCOL_ERROR); - } + const size_t total_len = s_frame_prefix_length + AWS_H2_PING_DATA_SIZE; + const size_t space_available = output->capacity - output->len; - const size_t output_init_len = output->len; + /* If we can't encode the whole frame at once, try again later */ + if (total_len < space_available) { + *complete = false; + return AWS_OP_SUCCESS; + } - /* Write the header data */ + /* Write the frame prefix */ uint8_t flags = 0; if (frame->ack) { flags |= AWS_H2_FRAME_F_ACK; } if (s_frame_prefix_encode(&frame->base, AWS_H2_PING_DATA_SIZE, flags, output)) { - goto write_error; + return AWS_OP_ERR; } /* Write the opaque_data */ if (!aws_byte_buf_write(output, frame->opaque_data, AWS_H2_PING_DATA_SIZE)) { - goto write_error; + return aws_raise_error(AWS_ERROR_SHORT_BUFFER); } + *complete = true; return AWS_OP_SUCCESS; - -write_error: - output->len = output_init_len; - return aws_raise_error(AWS_H2_ERR_FRAME_SIZE_ERROR); } /*********************************************************************************************************************** * GOAWAY **********************************************************************************************************************/ -int aws_h2_frame_goaway_init(struct aws_h2_frame_goaway *frame, struct aws_allocator *allocator) { - (void)allocator; +DEFINE_FRAME_VTABLE(goaway); - AWS_ZERO_STRUCT(*frame); - frame->base.type = AWS_H2_FRAME_T_GOAWAY; +struct aws_h2_frame *aws_h2_frame_new_goaway( + struct aws_allocator *allocator, + uint32_t last_stream_id, + enum aws_h2_error_codes error_code, + struct aws_byte_cursor debug_data) { - return AWS_OP_SUCCESS; + struct aws_h2_frame_goaway *frame = aws_mem_calloc(allocator, 1, sizeof(struct aws_h2_frame_goaway)); + if (!frame) { + return NULL; + } + + s_init_frame_base(&frame->base, allocator, AWS_H2_FRAME_T_GOAWAY, &s_frame_goaway_vtable, 0); + frame->last_stream_id = last_stream_id; + frame->error_code = error_code, frame->debug_data = debug_data; + + return &frame->base; } -void aws_h2_frame_goaway_clean_up(struct aws_h2_frame_goaway *frame) { - AWS_PRECONDITION(frame); - (void)frame; +static void s_frame_goaway_destroy(struct aws_h2_frame *frame_base) { + aws_mem_release(frame_base->alloc, frame_base); } -int aws_h2_frame_goaway_encode( - struct aws_h2_frame_goaway *frame, +static int s_frame_goaway_encode( + struct aws_h2_frame *frame_base, struct aws_h2_frame_encoder *encoder, - struct aws_byte_buf *output) { - AWS_PRECONDITION(frame); - AWS_PRECONDITION(encoder); - AWS_PRECONDITION(output); + struct aws_byte_buf *output, + bool *complete) { (void)encoder; + struct aws_h2_frame_goaway *frame = AWS_CONTAINER_OF(frame_base, struct aws_h2_frame_goaway, base); - if (frame->base.stream_id != 0) { - return aws_raise_error(AWS_ERROR_HTTP_PROTOCOL_ERROR); - } + /* # TODO: handle max payload len. simply truncate debug data? */ + const size_t payload_len = 8 + frame->debug_data.len; + const size_t total_len = s_frame_prefix_length + payload_len; + const size_t space_available = output->capacity - output->len; - const size_t output_init_len = output->len; + /* If we can't encode the whole frame at once, try again later */ + if (total_len < space_available) { + *complete = false; + return AWS_OP_SUCCESS; + } - /* Write the header data */ - size_t length = 8 + frame->debug_data.len; - if (s_frame_prefix_encode(&frame->base, length, 0, output)) { - goto write_error; + /* Write the frame prefix */ + if (s_frame_prefix_encode(&frame->base, payload_len, 0, output)) { + return AWS_OP_ERR; } /* Write the payload */ - if (!aws_byte_buf_write_be32(output, frame->last_stream_id & s_31_bit_mask)) { - goto write_error; - } - if (!aws_byte_buf_write_be32(output, frame->error_code)) { - goto write_error; - } - if (!aws_byte_buf_write_from_whole_cursor(output, frame->debug_data)) { - goto write_error; + if (!aws_byte_buf_write_be32(output, frame->last_stream_id & s_31_bit_mask) || + !aws_byte_buf_write_be32(output, frame->error_code) || + !aws_byte_buf_write_from_whole_cursor(output, frame->debug_data)) { + + return aws_raise_error(AWS_ERROR_SHORT_BUFFER); } + *complete = true; return AWS_OP_SUCCESS; - -write_error: - output->len = output_init_len; - return aws_raise_error(AWS_ERROR_SHORT_BUFFER); } /*********************************************************************************************************************** * WINDOW_UPDATE **********************************************************************************************************************/ +DEFINE_FRAME_VTABLE(window_update); static const size_t s_frame_window_update_length = 4; -int aws_h2_frame_window_update_init(struct aws_h2_frame_window_update *frame, struct aws_allocator *allocator) { - (void)allocator; +struct aws_h2_frame *aws_h2_frame_new_window_update( + struct aws_allocator *allocator, + uint32_t stream_id, + uint32_t window_size_increment) { - AWS_ZERO_STRUCT(*frame); - frame->base.type = AWS_H2_FRAME_T_WINDOW_UPDATE; + if (window_size_increment > AWS_H2_WINDOW_UPDATE_MAX) { + aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); + return NULL; + } - return AWS_OP_SUCCESS; + struct aws_h2_frame_window_update *frame = aws_mem_calloc(allocator, 1, sizeof(struct aws_h2_frame_window_update)); + if (!frame) { + return NULL; + } + + s_init_frame_base(&frame->base, allocator, AWS_H2_FRAME_T_WINDOW_UPDATE, &s_frame_window_update_vtable, stream_id); + frame->window_size_increment = window_size_increment; + + return &frame->base; } -void aws_h2_frame_window_update_clean_up(struct aws_h2_frame_window_update *frame) { - AWS_PRECONDITION(frame); - (void)frame; +static void s_frame_window_update_destroy(struct aws_h2_frame *frame_base) { + aws_mem_release(frame_base->alloc, frame_base); } -int aws_h2_frame_window_update_encode( - struct aws_h2_frame_window_update *frame, +static int s_frame_window_update_encode( + struct aws_h2_frame *frame_base, struct aws_h2_frame_encoder *encoder, - struct aws_byte_buf *output) { - AWS_PRECONDITION(frame); - AWS_PRECONDITION(encoder); - AWS_PRECONDITION(output); + struct aws_byte_buf *output, + bool *complete) { (void)encoder; + struct aws_h2_frame_window_update *frame = AWS_CONTAINER_OF(frame_base, struct aws_h2_frame_window_update, base); - const size_t output_init_len = output->len; + const size_t total_len = s_frame_prefix_length + s_frame_window_update_length; + const size_t space_available = output->capacity - output->len; - if (frame->window_size_increment & s_u32_top_bit_mask) { - return aws_raise_error(AWS_ERROR_HTTP_PROTOCOL_ERROR); + /* If we can't encode the whole frame at once, try again later */ + if (total_len < space_available) { + *complete = false; + return AWS_OP_SUCCESS; } - /* Write the header data */ + /* Write the frame prefix */ if (s_frame_prefix_encode(&frame->base, s_frame_window_update_length, 0, output)) { - goto write_error; + return AWS_OP_ERR; } /* Write the error_code */ if (!aws_byte_buf_write_be32(output, frame->window_size_increment)) { - goto write_error; + return aws_raise_error(AWS_ERROR_SHORT_BUFFER); } + *complete = true; return AWS_OP_SUCCESS; - -write_error: - output->len = output_init_len; - return aws_raise_error(AWS_ERROR_SHORT_BUFFER); } +#if 0 /*********************************************************************************************************************** * CONTINUATION **********************************************************************************************************************/ @@ -926,7 +1026,7 @@ int aws_h2_frame_continuation_encode( flags |= AWS_H2_FRAME_F_END_HEADERS; } - /* Write the header data */ + /* Write the frame prefix */ if (s_frame_prefix_encode(&frame->base, length, flags, output)) { goto write_error; } @@ -946,100 +1046,33 @@ int aws_h2_frame_continuation_encode( output->len = output_init_len; return aws_raise_error(AWS_ERROR_HTTP_COMPRESSION); } - -void aws_h2_frame_clean_up(struct aws_h2_frame_base *frame) { - switch (frame->type) { - case AWS_H2_FRAME_T_DATA: - aws_h2_frame_data_clean_up(AWS_CONTAINER_OF(frame, struct aws_h2_frame_data, base)); - break; - - case AWS_H2_FRAME_T_HEADERS: - aws_h2_frame_headers_clean_up(AWS_CONTAINER_OF(frame, struct aws_h2_frame_headers, base)); - break; - - case AWS_H2_FRAME_T_PRIORITY: - aws_h2_frame_priority_clean_up(AWS_CONTAINER_OF(frame, struct aws_h2_frame_priority, base)); - break; - - case AWS_H2_FRAME_T_RST_STREAM: - aws_h2_frame_rst_stream_clean_up(AWS_CONTAINER_OF(frame, struct aws_h2_frame_rst_stream, base)); - break; - - case AWS_H2_FRAME_T_SETTINGS: - aws_h2_frame_settings_clean_up(AWS_CONTAINER_OF(frame, struct aws_h2_frame_settings, base)); - break; - - case AWS_H2_FRAME_T_PUSH_PROMISE: - aws_h2_frame_push_promise_clean_up(AWS_CONTAINER_OF(frame, struct aws_h2_frame_push_promise, base)); - break; - - case AWS_H2_FRAME_T_PING: - aws_h2_frame_ping_clean_up(AWS_CONTAINER_OF(frame, struct aws_h2_frame_ping, base)); - break; - - case AWS_H2_FRAME_T_GOAWAY: - aws_h2_frame_goaway_clean_up(AWS_CONTAINER_OF(frame, struct aws_h2_frame_goaway, base)); - break; - - case AWS_H2_FRAME_T_WINDOW_UPDATE: - aws_h2_frame_window_update_clean_up(AWS_CONTAINER_OF(frame, struct aws_h2_frame_window_update, base)); - break; - - case AWS_H2_FRAME_T_CONTINUATION: - aws_h2_frame_continuation_clean_up(AWS_CONTAINER_OF(frame, struct aws_h2_frame_continuation, base)); - break; - - default: - AWS_ASSERT(0); +#endif +void aws_h2_frame_destroy(struct aws_h2_frame *frame) { + if (frame) { + frame->vtable->destroy(frame); } } int aws_h2_encode_frame( struct aws_h2_frame_encoder *encoder, - struct aws_h2_frame_base *frame, - struct aws_byte_buf *output) { + struct aws_h2_frame *frame, + struct aws_byte_buf *output, + bool *frame_complete) { - switch (frame->type) { - case AWS_H2_FRAME_T_DATA: - return aws_h2_frame_data_encode(AWS_CONTAINER_OF(frame, struct aws_h2_frame_data, base), encoder, output); - - case AWS_H2_FRAME_T_HEADERS: - return aws_h2_frame_headers_encode( - AWS_CONTAINER_OF(frame, struct aws_h2_frame_headers, base), encoder, output); - - case AWS_H2_FRAME_T_PRIORITY: - return aws_h2_frame_priority_encode( - AWS_CONTAINER_OF(frame, struct aws_h2_frame_priority, base), encoder, output); - - case AWS_H2_FRAME_T_RST_STREAM: - return aws_h2_frame_rst_stream_encode( - AWS_CONTAINER_OF(frame, struct aws_h2_frame_rst_stream, base), encoder, output); - - case AWS_H2_FRAME_T_SETTINGS: - return aws_h2_frame_settings_encode( - AWS_CONTAINER_OF(frame, struct aws_h2_frame_settings, base), encoder, output); - - case AWS_H2_FRAME_T_PUSH_PROMISE: - return aws_h2_frame_push_promise_encode( - AWS_CONTAINER_OF(frame, struct aws_h2_frame_push_promise, base), encoder, output); - - case AWS_H2_FRAME_T_PING: - return aws_h2_frame_ping_encode(AWS_CONTAINER_OF(frame, struct aws_h2_frame_ping, base), encoder, output); - - case AWS_H2_FRAME_T_GOAWAY: - return aws_h2_frame_goaway_encode( - AWS_CONTAINER_OF(frame, struct aws_h2_frame_goaway, base), encoder, output); - - case AWS_H2_FRAME_T_WINDOW_UPDATE: - return aws_h2_frame_window_update_encode( - AWS_CONTAINER_OF(frame, struct aws_h2_frame_window_update, base), encoder, output); + AWS_PRECONDITION(encoder); + AWS_PRECONDITION(frame); + AWS_PRECONDITION(output); + AWS_PRECONDITION(frame_complete); + AWS_PRECONDITION(!encoder->has_errored && "Cannot encode after error"); + AWS_PRECONDITION(!encoder->current_frame || (encoder->current_frame == frame) && "Must resume current frame"); - case AWS_H2_FRAME_T_CONTINUATION: - return aws_h2_frame_continuation_encode( - AWS_CONTAINER_OF(frame, struct aws_h2_frame_continuation, base), encoder, output); + *frame_complete = false; - default: - AWS_ASSERT(0); - return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); + if (frame->vtable->encode(frame, encoder, output, frame_complete)) { + encoder->has_errored = true; + return AWS_OP_ERR; } + + encoder->current_frame = *frame_complete ? NULL : frame; + return AWS_OP_SUCCESS; } diff --git a/source/h2_stream.c b/source/h2_stream.c index 65376dc1d..0b9c3fee5 100644 --- a/source/h2_stream.c +++ b/source/h2_stream.c @@ -112,71 +112,38 @@ enum aws_h2_stream_state aws_h2_stream_get_state(const struct aws_h2_stream *str return stream->thread_data.state; } -static struct aws_h2_frame_headers *s_new_headers_frame( - struct aws_allocator *alloc, - const struct aws_http_message *message) { - - struct aws_h2_frame_headers *headers_frame = aws_mem_calloc(alloc, 1, sizeof(struct aws_h2_frame_headers)); - if (!headers_frame) { - goto error_alloc; - } - - if (aws_h2_frame_headers_init(headers_frame, alloc)) { - goto error_init; - } - - /* #TODO headers frame needs to respect max frame size, and use CONTINUATION */ - const size_t num_headers = aws_http_message_get_header_count(message); - for (size_t i = 0; i < num_headers; ++i) { - struct aws_http_header header_field; - - aws_http_message_get_header(message, &header_field, i); - if (aws_array_list_push_back(&headers_frame->header_block.header_fields, &header_field)) { - goto error_push_back; - } - } - - headers_frame->end_headers = true; - - if (!aws_http_message_get_body_stream(message)) { - headers_frame->end_stream = true; - } - - return headers_frame; - -error_push_back: - aws_h2_frame_clean_up(&headers_frame->base); -error_init: - aws_mem_release(alloc, headers_frame); -error_alloc: - return NULL; -} - int aws_h2_stream_on_activated(struct aws_h2_stream *stream, bool *out_has_outgoing_data) { AWS_PRECONDITION_ON_CHANNEL_THREAD(stream); struct aws_h2_connection *connection = s_get_h2_connection(stream); /* Create HEADERS frame */ - struct aws_h2_frame_headers *headers_frame = - s_new_headers_frame(stream->base.alloc, stream->thread_data.outgoing_message); + const struct aws_http_message *msg = stream->thread_data.outgoing_message; + bool has_body_stream = aws_http_message_get_body_stream(msg) != NULL; + struct aws_h2_frame *headers_frame = aws_h2_frame_new_headers( + stream->base.alloc, + stream->base.id, + aws_http_message_get_const_headers(msg), + !has_body_stream /* end_stream */, + 0 /* padding - not currently configurable via public API */, + NULL /* priority - not currently configurable via public API */); + if (!headers_frame) { AWS_H2_STREAM_LOGF(ERROR, stream, "Failed to create HEADERS frame: %s", aws_error_name(aws_last_error())); goto error; } - if (aws_http_message_get_body_stream(stream->thread_data.outgoing_message)) { + if (has_body_stream) { /* If stream has DATA to send, put it in the outgoing_streams_list, and we'll send data later */ stream->thread_data.state = AWS_H2_STREAM_STATE_OPEN; - *out_has_outgoing_data = true; + *out_has_outgoing_data = has_body_stream; } else { /* If stream has no body, then HEADERS frame marks the end of outgoing data */ - headers_frame->end_stream = true; stream->thread_data.state = AWS_H2_STREAM_STATE_HALF_CLOSED_LOCAL; - *out_has_outgoing_data = false; + *out_has_outgoing_data = has_body_stream; } - aws_h2_connection_enqueue_outgoing_frame(connection, &headers_frame->base); + aws_h2_connection_enqueue_outgoing_frame(connection, headers_frame); return AWS_OP_SUCCESS; error: From 525a0b75af0caf87163a2144b68f1c7c57f2303a Mon Sep 17 00:00:00 2001 From: Michael Graeb Date: Tue, 10 Mar 2020 12:49:07 -0700 Subject: [PATCH 02/35] hpack pre-encode cmd. Do encoding in 2 passes, so we know exactly how much buffer space we'll need. I will probably revert this. I thought this was clever, but seeing it implemented it's just so much more complicated than writing everything to a dynamic buffer, then copying it into the aws_io_mesage. --- include/aws/http/private/hpack.h | 95 ++++++++- source/h2_frames.c | 2 + source/hpack.c | 335 +++++++++++++++++++++++++++---- 3 files changed, 386 insertions(+), 46 deletions(-) diff --git a/include/aws/http/private/hpack.h b/include/aws/http/private/hpack.h index df91d69d5..3ccf22769 100644 --- a/include/aws/http/private/hpack.h +++ b/include/aws/http/private/hpack.h @@ -43,6 +43,57 @@ struct aws_hpack_decode_result { } data; }; +enum aws_hpack_entry_type { + AWS_HPACK_ENTRY_INDEXED_HEADER_FIELD, /* RFC-7541 6.1 */ + AWS_HPACK_ENTRY_LITERAL_HEADER_FIELD_INCREMENTAL_INDEXING_INDEXED_NAME, /* RFC-7541 6.2.1 */ + AWS_HPACK_ENTRY_LITERAL_HEADER_FIELD_INCREMENTAL_INDEXING_NEW_NAME, + AWS_HPACK_ENTRY_LITERAL_HEADER_FIELD_WITHOUT_INDEXING_INDEXED_NAME, /* RFC-7541 6.2.2 */ + AWS_HPACK_ENTRY_LITERAL_HEADER_FIELD_WITHOUT_INDEXING_NEW_NAME, + AWS_HPACK_ENTRY_LITERAL_HEADER_FIELD_NEVER_INDEXED_INDEXED_NAME, /* RFC-7541 6.2.3 */ + AWS_HPACK_ENTRY_LITERAL_HEADER_FIELD_NEVER_INDEXED_NEW_NAME, + AWS_HPACK_ENTRY_DYNAMIC_TABLE_RESIZE, /* RFC-7541 6.3 */ + AWS_HPACK_ENTRY_TYPE_COUNT, +}; + +/** + * HPACK encoding is performed as a 2 step process. + * The first step how each entry will be encoded, and how long the entry will be. + * The second step actully encodes output to a buffer. + */ +struct aws_hpack_encoder_cmd { + size_t encoded_length; + + union { + struct { + struct aws_byte_cursor name_cursor; /* name to encode (if new-name type) */ + struct aws_byte_cursor value_cursor; /* value to encode (if literal type) */ + size_t index; /* index (if indexed type) */ + size_t name_encoded_str_length; /* length of encoded name string, excluding integer (if new-name type) */ + size_t value_encoded_str_length; /* length of encoded value string, excluding integer (if literal type) */ + bool name_uses_huffman; /* encode new name with huffman (if new-name type) */ + bool value_uses_huffman; /* encode value with huffman (if literal type) */ + } header; + + size_t dynamic_table_resize; + } data; + + uint8_t type; /* aws_hpack_entry_type */ +}; + +/** + * Controls whether non-indexed strings will use Huffman encoding. + * In SMALLEST mode, strings will only be sent with Huffman encoding if it makes them smaller. + * + * Note: This does not control compression via "indexing", + * for that, see `aws_http_header_compression`. + * This only controls how string values are encoded when they're not already in a table. + */ +enum aws_hpack_huffman_mode { + AWS_HPACK_HUFFMAN_NEVER, + AWS_HPACK_HUFFMAN_ALWAYS, + AWS_HPACK_HUFFMAN_SMALLEST, +}; + AWS_EXTERN_C_BEGIN /* Library-level init and shutdown */ @@ -75,6 +126,39 @@ int aws_hpack_decode( struct aws_byte_cursor *to_decode, struct aws_hpack_decode_result *result); +/** + * Initialize cmd with details for encoding a header-field. + * This function will mutate the hpack context, so any error is unrecoverable. + * cmds must be fed to aws_hpack_encode() in the order they are initialized. + */ +AWS_HTTP_API +int aws_hpack_pre_encode_header( + struct aws_hpack_context *context, + const struct aws_http_header *header, + enum aws_hpack_huffman_mode huffman_mode, + struct aws_hpack_encoder_cmd *cmd); + +/** + * Initialize cmd with details for encoding a Dynamic Table Size Update (RFC-7541 6.3). + * cmds must be fed to aws_hpack_encode() in the order they are initialized. + */ +AWS_HTTP_API +void aws_hpack_pre_encode_dynamic_table_resize( + struct aws_hpack_context *context, + size_t size, + struct aws_hpack_encoder_cmd *cmd); + +/** + * Encode a cmd to the output buffer. + * At least cmd->encode_length must be available in the buffer. + * cmds must have been initialized in the order that they are passed to aws_hpack_encode(). + */ +AWS_HTTP_API +int aws_hpack_encode( + struct aws_hpack_context *context, + const struct aws_hpack_encoder_cmd *cmd, + struct aws_byte_buf *output); + /* Returns the hpack size of a header (name.len + value.len + 32) [4.1] */ AWS_HTTP_API size_t aws_hpack_get_header_size(const struct aws_http_header *header); @@ -113,16 +197,21 @@ int aws_hpack_decode_integer( uint64_t *integer, bool *complete); +/* #TODOD remove from public API? */ AWS_HTTP_API -size_t aws_hpack_get_encoded_length_string( +int aws_hpack_pre_encode_string( struct aws_hpack_context *context, struct aws_byte_cursor to_encode, - bool huffman_encode); + enum aws_hpack_huffman_mode huffman_mode, + size_t *out_str_length, + bool *out_use_huffman, + size_t *in_out_sum_total_length); AWS_HTTP_API int aws_hpack_encode_string( struct aws_hpack_context *context, - struct aws_byte_cursor *to_encode, + struct aws_byte_cursor to_encode, + size_t encoded_str_length, bool huffman_encode, struct aws_byte_buf *output); diff --git a/source/h2_frames.c b/source/h2_frames.c index e2063f940..94f847dad 100644 --- a/source/h2_frames.c +++ b/source/h2_frames.c @@ -204,6 +204,8 @@ int aws_h2_frame_header_block_encode( aws_array_list_get_at_ptr(&header_block->header_fields, (void **)&field, i); AWS_ASSERT(field); + /* #TODO don't use index unless header is USE_CACHE */ + /* #TODO need to update hpack as we go or we'll be using wrong indices */ bool found_value = true; const size_t index = aws_hpack_find_index(encoder->hpack, field, &found_value); diff --git a/source/hpack.c b/source/hpack.c index 79bae1221..16a5fe62f 100644 --- a/source/hpack.c +++ b/source/hpack.c @@ -25,6 +25,10 @@ /* #TODO split hpack encoder/decoder into different types */ +/* #TODO logging pass */ + +/* #TODO test empty strings */ + /* RFC-7540 6.5.2 */ const size_t s_hpack_dynamic_table_initial_size = 4096; const size_t s_hpack_dynamic_table_initial_elements = 512; @@ -611,6 +615,7 @@ int aws_hpack_insert_header(struct aws_hpack_context *context, const struct aws_ /* If for whatever reason this new header is bigger than the total table size, burn everything to the ground. */ if (AWS_UNLIKELY(header_size > context->dynamic_table.max_size)) { + /* #TODO handle this. It's not an error. It should simply result in an empty table RFC-7541 4.4 */ goto error; } @@ -782,80 +787,105 @@ int aws_hpack_decode_integer( return AWS_OP_SUCCESS; } -size_t aws_hpack_get_encoded_length_string( +int aws_hpack_pre_encode_string( struct aws_hpack_context *context, struct aws_byte_cursor to_encode, - bool huffman_encode) { + enum aws_hpack_huffman_mode huffman_mode, + size_t *out_str_length, + bool *out_use_huffman, + size_t *in_out_sum_total_length) { AWS_PRECONDITION(context); - AWS_PRECONDITION(to_encode.ptr && to_encode.len); + AWS_PRECONDITION(aws_byte_cursor_is_valid(&to_encode)); + AWS_PRECONDITION(out_str_length); + AWS_PRECONDITION(out_use_huffman); + AWS_PRECONDITION(in_out_sum_total_length); + + /* Get length of encoded string */ + switch (huffman_mode) { + case AWS_HPACK_HUFFMAN_NEVER: + *out_str_length = to_encode.len; + *out_use_huffman = false; + break; + case AWS_HPACK_HUFFMAN_ALWAYS: + *out_str_length = aws_huffman_get_encoded_length(&context->encoder, to_encode); + *out_use_huffman = true; + break; + case AWS_HPACK_HUFFMAN_SMALLEST: + *out_str_length = aws_huffman_get_encoded_length(&context->encoder, to_encode); + if (*out_str_length < to_encode.len) { + *out_use_huffman = true; + } else { + *out_str_length = to_encode.len; + *out_use_huffman = false; + } + break; - size_t length = 0; + default: + return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); + } - /* Get the header length */ - size_t encoded_length; - if (huffman_encode) { - encoded_length = aws_huffman_get_encoded_length(&context->encoder, to_encode); - } else { - encoded_length = to_encode.len; + /* Get length of encoded integer */ + size_t int_length = aws_hpack_get_encoded_length_integer(*out_str_length, 7); + + /* Sum */ + size_t total_length; + if (aws_add_size_checked(int_length, *out_str_length, &total_length)) { + return AWS_OP_ERR; } - length += aws_hpack_get_encoded_length_integer(encoded_length, 7); - /* Add the string length */ - length += encoded_length; + if (aws_add_size_checked(total_length, *in_out_sum_total_length, in_out_sum_total_length)) { + return AWS_OP_ERR; + } - return length; + return AWS_OP_SUCCESS; } int aws_hpack_encode_string( struct aws_hpack_context *context, - struct aws_byte_cursor *to_encode, + struct aws_byte_cursor to_encode, + size_t encoded_str_length, bool huffman_encode, struct aws_byte_buf *output) { AWS_PRECONDITION(context); - AWS_PRECONDITION(to_encode); AWS_PRECONDITION(output); if (output->len == output->capacity) { return aws_raise_error(AWS_ERROR_SHORT_BUFFER); } - struct aws_byte_cursor to_encode_backup = *to_encode; - /* Write the use_huffman bit */ output->buffer[output->len] = huffman_encode << 7; - /* Write the header */ - size_t encoded_length; - if (huffman_encode) { - encoded_length = aws_huffman_get_encoded_length(&context->encoder, *to_encode); - } else { - encoded_length = to_encode->len; - } - if (aws_hpack_encode_integer(encoded_length, 7, output)) { - goto error; + /* Write the length of the string */ + if (aws_hpack_encode_integer(encoded_str_length, 7, output)) { + return AWS_OP_ERR; } - if (huffman_encode) { - aws_huffman_encoder_reset(&context->encoder); - int result = aws_huffman_encode(&context->encoder, to_encode, output); - if (result) { - goto error; - } - } else { - bool result = aws_byte_buf_write_from_whole_cursor(output, *to_encode); - if (!result) { - aws_raise_error(AWS_ERROR_SHORT_BUFFER); - goto error; + /* Write the string itself */ + if (to_encode.len) { + if (huffman_encode) { + aws_huffman_encoder_reset(&context->encoder); + int result = aws_huffman_encode(&context->encoder, &to_encode, output); + if (result) { + return AWS_OP_ERR; + } + + /* Huffman supports streaming encoding, but hpack only does whole-string encoding right now */ + if (to_encode.len > 0) { + return aws_raise_error(AWS_ERROR_SHORT_BUFFER); + } + } else { + bool result = aws_byte_buf_write_from_whole_cursor(output, to_encode); + if (!result) { + aws_raise_error(AWS_ERROR_SHORT_BUFFER); + return AWS_OP_ERR; + } } - aws_byte_cursor_advance(to_encode, to_encode->len); } - return AWS_OP_SUCCESS; -error: - *to_encode = to_encode_backup; - return AWS_OP_ERR; + return AWS_OP_SUCCESS; } int aws_hpack_decode_string( @@ -1174,3 +1204,222 @@ int aws_hpack_decode( context->progress_entry.state = HPACK_ENTRY_STATE_INIT; return AWS_OP_SUCCESS; } + +/* + * 1xxxxxxx: Indexed Header Field Representation + * 01xxxxxx: Literal Header Field with Incremental Indexing + * 001xxxxx: Dynamic Table Size Update + * 0001xxxx: Literal Header Field Never Indexed + * 0000xxxx: Literal Header Field without Indexing */ +static const uint8_t s_hpack_entry_num_prefix_bits[AWS_HPACK_ENTRY_TYPE_COUNT] = { + [AWS_HPACK_ENTRY_INDEXED_HEADER_FIELD] = 7, + [AWS_HPACK_ENTRY_LITERAL_HEADER_FIELD_INCREMENTAL_INDEXING_INDEXED_NAME] = 6, + [AWS_HPACK_ENTRY_LITERAL_HEADER_FIELD_INCREMENTAL_INDEXING_NEW_NAME] = 6, + [AWS_HPACK_ENTRY_DYNAMIC_TABLE_RESIZE] = 5, + [AWS_HPACK_ENTRY_LITERAL_HEADER_FIELD_NEVER_INDEXED_INDEXED_NAME] = 4, + [AWS_HPACK_ENTRY_LITERAL_HEADER_FIELD_NEVER_INDEXED_NEW_NAME] = 4, + [AWS_HPACK_ENTRY_LITERAL_HEADER_FIELD_WITHOUT_INDEXING_INDEXED_NAME] = 4, + [AWS_HPACK_ENTRY_LITERAL_HEADER_FIELD_WITHOUT_INDEXING_NEW_NAME] = 4, +}; + +static const uint8_t s_hpack_entry_starting_bit_pattern[AWS_HPACK_ENTRY_TYPE_COUNT] = { + [AWS_HPACK_ENTRY_INDEXED_HEADER_FIELD] = 1 << 7, + [AWS_HPACK_ENTRY_LITERAL_HEADER_FIELD_INCREMENTAL_INDEXING_INDEXED_NAME] = 1 << 6, + [AWS_HPACK_ENTRY_LITERAL_HEADER_FIELD_INCREMENTAL_INDEXING_NEW_NAME] = 1 << 6, + [AWS_HPACK_ENTRY_DYNAMIC_TABLE_RESIZE] = 1 << 5, + [AWS_HPACK_ENTRY_LITERAL_HEADER_FIELD_NEVER_INDEXED_INDEXED_NAME] = 1 << 4, + [AWS_HPACK_ENTRY_LITERAL_HEADER_FIELD_NEVER_INDEXED_NEW_NAME] = 1 << 4, + [AWS_HPACK_ENTRY_LITERAL_HEADER_FIELD_WITHOUT_INDEXING_INDEXED_NAME] = 0 << 4, + [AWS_HPACK_ENTRY_LITERAL_HEADER_FIELD_WITHOUT_INDEXING_NEW_NAME] = 0 << 4, +}; + +static const bool s_hpack_entry_encodes_name_str[AWS_HPACK_ENTRY_TYPE_COUNT] = { + [AWS_HPACK_ENTRY_LITERAL_HEADER_FIELD_INCREMENTAL_INDEXING_NEW_NAME] = true, + [AWS_HPACK_ENTRY_LITERAL_HEADER_FIELD_WITHOUT_INDEXING_NEW_NAME] = true, + [AWS_HPACK_ENTRY_LITERAL_HEADER_FIELD_NEVER_INDEXED_NEW_NAME] = true, +}; + +static const bool s_hpack_entry_encodes_value_str[AWS_HPACK_ENTRY_TYPE_COUNT] = { + [AWS_HPACK_ENTRY_LITERAL_HEADER_FIELD_INCREMENTAL_INDEXING_INDEXED_NAME] = true, + [AWS_HPACK_ENTRY_LITERAL_HEADER_FIELD_INCREMENTAL_INDEXING_NEW_NAME] = true, + [AWS_HPACK_ENTRY_LITERAL_HEADER_FIELD_WITHOUT_INDEXING_INDEXED_NAME] = true, + [AWS_HPACK_ENTRY_LITERAL_HEADER_FIELD_WITHOUT_INDEXING_NEW_NAME] = true, + [AWS_HPACK_ENTRY_LITERAL_HEADER_FIELD_NEVER_INDEXED_INDEXED_NAME] = true, + [AWS_HPACK_ENTRY_LITERAL_HEADER_FIELD_NEVER_INDEXED_NEW_NAME] = true, +}; + +int aws_hpack_pre_encode_header( + struct aws_hpack_context *context, + const struct aws_http_header *header, + enum aws_hpack_huffman_mode huffman_mode, + struct aws_hpack_encoder_cmd *cmd) { + + AWS_PRECONDITION(cmd); + AWS_PRECONDITION(context); + AWS_PRECONDITION(header); + + AWS_ZERO_STRUCT(*cmd); + + /* Search for header-field in tables */ + bool found_indexed_value; + cmd->data.header.index = aws_hpack_find_index(context, header, &found_indexed_value); + + if (header->compression != AWS_HTTP_HEADER_COMPRESSION_USE_CACHE) { + /* If user doesn't want to use indexed value, then don't use it */ + found_indexed_value = false; + } + + if (cmd->data.header.index && found_indexed_value) { + /* Indexed header field - found header name and value together in a table */ + cmd->type = AWS_HPACK_ENTRY_INDEXED_HEADER_FIELD; + + /* will encode the one index, and DONE. */ + const size_t num_prefix_bits = s_hpack_entry_num_prefix_bits[cmd->type]; + cmd->encoded_length = aws_hpack_get_encoded_length_integer(cmd->data.header.index, num_prefix_bits); + return AWS_OP_SUCCESS; + } + + /* Else, Literal header field... */ + + if (cmd->data.header.index) { + /* Literal header field, indexed name - found header name in a table, but not header value */ + switch (header->compression) { + case AWS_HTTP_HEADER_COMPRESSION_USE_CACHE: + cmd->type = AWS_HPACK_ENTRY_LITERAL_HEADER_FIELD_INCREMENTAL_INDEXING_INDEXED_NAME; + break; + case AWS_HTTP_HEADER_COMPRESSION_NO_CACHE: + cmd->type = AWS_HPACK_ENTRY_LITERAL_HEADER_FIELD_WITHOUT_INDEXING_INDEXED_NAME; + break; + case AWS_HTTP_HEADER_COMPRESSION_NO_FORWARD_CACHE: + cmd->type = AWS_HPACK_ENTRY_LITERAL_HEADER_FIELD_NEVER_INDEXED_INDEXED_NAME; + break; + default: + return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); + } + + /* first encode index of name */ + const size_t num_prefix_bits = s_hpack_entry_num_prefix_bits[cmd->type]; + cmd->encoded_length = aws_hpack_get_encoded_length_integer(cmd->data.header.index, num_prefix_bits); + + } else { + /* Literal header field, new name - did not find header name or value in table, need to send both */ + switch (header->compression) { + case AWS_HTTP_HEADER_COMPRESSION_USE_CACHE: + cmd->type = AWS_HPACK_ENTRY_LITERAL_HEADER_FIELD_INCREMENTAL_INDEXING_NEW_NAME; + break; + case AWS_HTTP_HEADER_COMPRESSION_NO_CACHE: + cmd->type = AWS_HPACK_ENTRY_LITERAL_HEADER_FIELD_WITHOUT_INDEXING_NEW_NAME; + break; + case AWS_HTTP_HEADER_COMPRESSION_NO_FORWARD_CACHE: + cmd->type = AWS_HPACK_ENTRY_LITERAL_HEADER_FIELD_NEVER_INDEXED_NEW_NAME; + break; + default: + AWS_ASSERT(0); + } + + /* first encode 0 to show that header-name is not indexed */ + const size_t num_prefix_bits = s_hpack_entry_num_prefix_bits[cmd->type]; + cmd->encoded_length = aws_hpack_get_encoded_length_integer(0, num_prefix_bits); + + /* then encode header-name string */ + cmd->data.header.name_cursor = header->name; + if (aws_hpack_pre_encode_string( + context, + header->name, + huffman_mode, + &cmd->data.header.name_encoded_str_length, + &cmd->data.header.name_uses_huffman, + &cmd->encoded_length)) { + return AWS_OP_ERR; + } + } + + /* then encode header-value string */ + cmd->data.header.value_cursor = header->value; + if (aws_hpack_pre_encode_string( + context, + header->value, + huffman_mode, + &cmd->data.header.value_encoded_str_length, + &cmd->data.header.value_uses_huffman, + &cmd->encoded_length)) { + + return AWS_OP_ERR; + } + + /* If using cache (aka incremental indexing), update dynamic table with this header */ + if (header->compression == AWS_HTTP_HEADER_COMPRESSION_USE_CACHE) { + if (aws_hpack_insert_header(context, header)) { + return AWS_OP_ERR; + } + } + + return AWS_OP_SUCCESS; +} + +void aws_hpack_pre_encode_dynamic_table_resize( + struct aws_hpack_context *context, + size_t size, + struct aws_hpack_encoder_cmd *cmd) { + + AWS_PRECONDITION(context); + AWS_PRECONDITION(cmd); + + AWS_ZERO_STRUCT(*cmd); + + cmd->type = AWS_HPACK_ENTRY_DYNAMIC_TABLE_RESIZE; + cmd->encoded_length = + aws_hpack_get_encoded_length_integer(size, s_hpack_entry_num_prefix_bits[AWS_HPACK_ENTRY_DYNAMIC_TABLE_RESIZE]); + cmd->data.dynamic_table_resize = size; +} + +int aws_hpack_encode( + struct aws_hpack_context *context, + const struct aws_hpack_encoder_cmd *cmd, + struct aws_byte_buf *output) { + + AWS_PRECONDITION(context); + AWS_PRECONDITION(cmd); + AWS_PRECONDITION(output); + + const size_t space_available = output->capacity - output->len; + if (space_available < cmd->encoded_length) { + return aws_raise_error(AWS_ERROR_SHORT_BUFFER); + } + + /* Write starting bit pattern, along with first integer. */ + const uint8_t starting_bit_pattern = s_hpack_entry_starting_bit_pattern[cmd->type]; + const uint8_t num_prefix_bits = s_hpack_entry_num_prefix_bits[cmd->type]; + const size_t first_integer = + cmd->type == AWS_HPACK_ENTRY_DYNAMIC_TABLE_RESIZE ? cmd->data.dynamic_table_resize : cmd->data.header.index; + output->buffer[output->len] = starting_bit_pattern; + if (aws_hpack_encode_integer(first_integer, num_prefix_bits, output)) { + return AWS_OP_ERR; + } + + /* Write name string (if "new-name" type) */ + if (s_hpack_entry_encodes_name_str[cmd->type]) { + if (aws_hpack_encode_string( + context, + cmd->data.header.name_cursor, + cmd->data.header.name_encoded_str_length, + cmd->data.header.name_uses_huffman, + output)) { + return AWS_OP_ERR; + } + } + + /* Write value string (if "literal" type) */ + if (s_hpack_entry_encodes_value_str[cmd->type]) { + if (aws_hpack_encode_string( + context, + cmd->data.header.value_cursor, + cmd->data.header.value_encoded_str_length, + cmd->data.header.value_uses_huffman, + output)) { + return AWS_OP_ERR; + } + } + + return AWS_OP_SUCCESS; +} From db65d0c9d25dcd1be695fd51bad874caa3ddd72a Mon Sep 17 00:00:00 2001 From: Michael Graeb Date: Wed, 11 Mar 2020 13:21:57 -0700 Subject: [PATCH 03/35] simplify decoder state machine loop --- source/h2_decoder.c | 21 +++++++++------------ 1 file changed, 9 insertions(+), 12 deletions(-) diff --git a/source/h2_decoder.c b/source/h2_decoder.c index 0cad21b5a..084acb629 100644 --- a/source/h2_decoder.c +++ b/source/h2_decoder.c @@ -130,6 +130,7 @@ struct aws_h2_decoder { bool is_server; struct aws_byte_buf scratch; const struct decoder_state *state; + bool state_changed; /* HTTP/2 connection preface must be first thing received (RFC-7540 3.5): * Server must receive (client must send): magic string, then SETTINGS frame. @@ -240,23 +241,16 @@ int aws_h2_decode(struct aws_h2_decoder *decoder, struct aws_byte_cursor *data) AWS_FATAL_ASSERT(!decoder->has_errored); - /* Run decoder state machine until we're no longer consuming data or changing states. + /* Run decoder state machine until we're no longer changing states. * We don't simply loop `while(data->len)` because some states consume no data, * and these states should run even when there is no data left. */ - size_t prev_data_len = 0; - const struct decoder_state *prev_state = NULL; - while (prev_data_len != data->len || prev_state != decoder->state) { + for (decoder->state_changed = false; decoder->state_changed; decoder->state_changed = false) { - /* Stop if a state requires a minimum amount of data and there's nothing left to consume. */ const uint32_t bytes_required = decoder->state->bytes_required; AWS_ASSERT(bytes_required <= decoder->scratch.capacity); - if (bytes_required > 0 && data->len == 0) { - break; - } - const char *current_state_name = decoder->state->name; - prev_state = decoder->state; - prev_data_len = data->len; + const size_t prev_data_len = data->len; + (void)prev_data_len; if (!decoder->scratch.len && data->len >= bytes_required) { /* Easy case, there is no scratch and we have enough data, so just send it to the state */ @@ -334,6 +328,7 @@ static int s_decoder_switch_state(struct aws_h2_decoder *decoder, const struct d DECODER_LOGF(TRACE, decoder, "Moving from state '%s' to '%s'", decoder->state->name, state->name); decoder->scratch.len = 0; decoder->state = state; + decoder->state_changed = true; return AWS_OP_SUCCESS; } @@ -352,10 +347,12 @@ static int s_decoder_reset_state(struct aws_h2_decoder *decoder) { return aws_raise_error(AWS_ERROR_HTTP_INVALID_FRAME_SIZE); } + DECODER_LOGF(TRACE, decoder, "%s frame complete", aws_h2_frame_type_to_str(decoder->frame_in_progress.type)); + decoder->scratch.len = 0; decoder->state = &s_state_prefix; + decoder->state_changed = true; - DECODER_LOG(TRACE, decoder, "Resetting frame in progress"); AWS_ZERO_STRUCT(decoder->frame_in_progress); return AWS_OP_SUCCESS; } From d65ed7b3d28243e5ebc18fd7bad755957fd15949 Mon Sep 17 00:00:00 2001 From: Michael Graeb Date: Wed, 11 Mar 2020 13:24:42 -0700 Subject: [PATCH 04/35] hpack encoding expands output buffer ditch 2 pass approach, it was just too complicated --- include/aws/http/private/h2_frames.h | 3 +- include/aws/http/private/hpack.h | 85 +----- source/hpack.c | 441 ++++++++++++--------------- source/http.c | 3 + 4 files changed, 214 insertions(+), 318 deletions(-) diff --git a/include/aws/http/private/h2_frames.h b/include/aws/http/private/h2_frames.h index bb4a6579d..62649462d 100644 --- a/include/aws/http/private/h2_frames.h +++ b/include/aws/http/private/h2_frames.h @@ -17,6 +17,7 @@ */ #include +#include #include @@ -216,7 +217,7 @@ struct aws_h2_frame_encoder { struct aws_hpack_context *hpack; struct aws_h2_frame *current_frame; bool has_errored; - bool use_huffman; /* #TODO on string-by-string basis, use huffman if it makes the string smaller */ + enum aws_hpack_huffman_mode huffman_mode; }; typedef void aws_h2_frame_destroy_fn(struct aws_h2_frame *frame_base); diff --git a/include/aws/http/private/hpack.h b/include/aws/http/private/hpack.h index 3ccf22769..7db2bb0f8 100644 --- a/include/aws/http/private/hpack.h +++ b/include/aws/http/private/hpack.h @@ -43,43 +43,6 @@ struct aws_hpack_decode_result { } data; }; -enum aws_hpack_entry_type { - AWS_HPACK_ENTRY_INDEXED_HEADER_FIELD, /* RFC-7541 6.1 */ - AWS_HPACK_ENTRY_LITERAL_HEADER_FIELD_INCREMENTAL_INDEXING_INDEXED_NAME, /* RFC-7541 6.2.1 */ - AWS_HPACK_ENTRY_LITERAL_HEADER_FIELD_INCREMENTAL_INDEXING_NEW_NAME, - AWS_HPACK_ENTRY_LITERAL_HEADER_FIELD_WITHOUT_INDEXING_INDEXED_NAME, /* RFC-7541 6.2.2 */ - AWS_HPACK_ENTRY_LITERAL_HEADER_FIELD_WITHOUT_INDEXING_NEW_NAME, - AWS_HPACK_ENTRY_LITERAL_HEADER_FIELD_NEVER_INDEXED_INDEXED_NAME, /* RFC-7541 6.2.3 */ - AWS_HPACK_ENTRY_LITERAL_HEADER_FIELD_NEVER_INDEXED_NEW_NAME, - AWS_HPACK_ENTRY_DYNAMIC_TABLE_RESIZE, /* RFC-7541 6.3 */ - AWS_HPACK_ENTRY_TYPE_COUNT, -}; - -/** - * HPACK encoding is performed as a 2 step process. - * The first step how each entry will be encoded, and how long the entry will be. - * The second step actully encodes output to a buffer. - */ -struct aws_hpack_encoder_cmd { - size_t encoded_length; - - union { - struct { - struct aws_byte_cursor name_cursor; /* name to encode (if new-name type) */ - struct aws_byte_cursor value_cursor; /* value to encode (if literal type) */ - size_t index; /* index (if indexed type) */ - size_t name_encoded_str_length; /* length of encoded name string, excluding integer (if new-name type) */ - size_t value_encoded_str_length; /* length of encoded value string, excluding integer (if literal type) */ - bool name_uses_huffman; /* encode new name with huffman (if new-name type) */ - bool value_uses_huffman; /* encode value with huffman (if literal type) */ - } header; - - size_t dynamic_table_resize; - } data; - - uint8_t type; /* aws_hpack_entry_type */ -}; - /** * Controls whether non-indexed strings will use Huffman encoding. * In SMALLEST mode, strings will only be sent with Huffman encoding if it makes them smaller. @@ -127,36 +90,25 @@ int aws_hpack_decode( struct aws_hpack_decode_result *result); /** - * Initialize cmd with details for encoding a header-field. + * Encode a header-field into the output. * This function will mutate the hpack context, so any error is unrecoverable. - * cmds must be fed to aws_hpack_encode() in the order they are initialized. + * Note that output will be dynamically resized if it's too short. */ AWS_HTTP_API -int aws_hpack_pre_encode_header( +int aws_hpack_encode_header( struct aws_hpack_context *context, const struct aws_http_header *header, enum aws_hpack_huffman_mode huffman_mode, - struct aws_hpack_encoder_cmd *cmd); + struct aws_byte_buf *output); /** - * Initialize cmd with details for encoding a Dynamic Table Size Update (RFC-7541 6.3). - * cmds must be fed to aws_hpack_encode() in the order they are initialized. + * Encode a Dynamic Table Size Update (RFC-7541 6.3) into the output. + * Note that output will be dynamically resized if it's too short. */ AWS_HTTP_API -void aws_hpack_pre_encode_dynamic_table_resize( +int aws_hpack_encode_dynamic_table_resize( struct aws_hpack_context *context, size_t size, - struct aws_hpack_encoder_cmd *cmd); - -/** - * Encode a cmd to the output buffer. - * At least cmd->encode_length must be available in the buffer. - * cmds must have been initialized in the order that they are passed to aws_hpack_encode(). - */ -AWS_HTTP_API -int aws_hpack_encode( - struct aws_hpack_context *context, - const struct aws_hpack_encoder_cmd *cmd, struct aws_byte_buf *output); /* Returns the hpack size of a header (name.len + value.len + 32) [4.1] */ @@ -181,12 +133,10 @@ int aws_hpack_insert_header(struct aws_hpack_context *context, const struct aws_ AWS_HTTP_API int aws_hpack_resize_dynamic_table(struct aws_hpack_context *context, size_t new_max_size); -/* Public for testing purposes */ -AWS_HTTP_API -size_t aws_hpack_get_encoded_length_integer(uint64_t integer, uint8_t prefix_size); - +/* Public for testing purposes. + * Output will be dynamically resized if it's too short */ AWS_HTTP_API -int aws_hpack_encode_integer(uint64_t integer, uint8_t prefix_size, struct aws_byte_buf *output); +int aws_hpack_encode_integer(uint64_t integer, uint8_t starting_bits, uint8_t prefix_size, struct aws_byte_buf *output); /* Public for testing purposes */ AWS_HTTP_API @@ -197,22 +147,13 @@ int aws_hpack_decode_integer( uint64_t *integer, bool *complete); -/* #TODOD remove from public API? */ -AWS_HTTP_API -int aws_hpack_pre_encode_string( - struct aws_hpack_context *context, - struct aws_byte_cursor to_encode, - enum aws_hpack_huffman_mode huffman_mode, - size_t *out_str_length, - bool *out_use_huffman, - size_t *in_out_sum_total_length); - +/* Public for testing purposes. + * Output will be dynamically resized if it's too short */ AWS_HTTP_API int aws_hpack_encode_string( struct aws_hpack_context *context, struct aws_byte_cursor to_encode, - size_t encoded_str_length, - bool huffman_encode, + enum aws_hpack_huffman_mode huffman_mode, struct aws_byte_buf *output); /* Public for testing purposes */ diff --git a/source/hpack.c b/source/hpack.c index 16a5fe62f..051649050 100644 --- a/source/hpack.c +++ b/source/hpack.c @@ -51,6 +51,30 @@ static uint8_t s_masked_right_bits_u8(uint8_t num_masked_bits) { return UINT8_MAX >> cut_bits; } +static int s_append_u8_dynamic(struct aws_byte_buf *output, uint8_t u8) { + struct aws_byte_cursor cursor = aws_byte_cursor_from_array(&u8, 1); + return aws_byte_buf_append_dynamic(output, &cursor); +} + +/* If buffer isn't big enough, grow it intelligently */ +static int s_ensure_space(struct aws_byte_buf *output, size_t required_space) { + size_t available_space = output->capacity - output->len; + if (required_space <= available_space) { + return AWS_OP_SUCCESS; + } + + /* Capacity must grow to at least this size */ + size_t required_capacity; + if (aws_add_size_checked(output->len, required_space, &required_capacity)) { + return AWS_OP_ERR; + } + + /* Prefer to double capacity, but if that's not enough grow to exactly required_capacity */ + size_t double_capacity = aws_add_size_saturating(output->capacity, output->capacity); + size_t reserve = required_capacity > double_capacity ? required_capacity : double_capacity; + return aws_byte_buf_reserve(output, reserve); +} + size_t aws_hpack_get_encoded_length_integer(uint64_t integer, uint8_t prefix_size) { const uint8_t prefix_mask = s_masked_right_bits_u8(prefix_size); @@ -70,37 +94,38 @@ size_t aws_hpack_get_encoded_length_integer(uint64_t integer, uint8_t prefix_siz } } -int aws_hpack_encode_integer(uint64_t integer, uint8_t prefix_size, struct aws_byte_buf *output) { +int aws_hpack_encode_integer( + uint64_t integer, + uint8_t starting_bits, + uint8_t prefix_size, + struct aws_byte_buf *output) { AWS_ASSERT(prefix_size <= 8); - const struct aws_byte_buf output_backup = *output; - - if (output->len == output->capacity) { - return aws_raise_error(AWS_ERROR_SHORT_BUFFER); - } const uint8_t prefix_mask = s_masked_right_bits_u8(prefix_size); + AWS_ASSERT((starting_bits & prefix_mask) == 0); + + const size_t output_len_backup = output->len; if (integer < prefix_mask) { /* If the integer fits inside the specified number of bits but won't be all 1's, just write it */ /* Just write out the bits we care about */ - output->buffer[output->len] = (output->buffer[output->len] & ~prefix_mask) | (uint8_t)integer; - ++output->len; + uint8_t first_byte = starting_bits | (uint8_t)integer; + if (s_append_u8_dynamic(output, first_byte)) { + goto error; + } } else { /* Set all of the bits in the first octet to 1 */ - output->buffer[output->len] = (output->buffer[output->len] & ~prefix_mask) | prefix_mask; - ++output->len; + uint8_t first_byte = starting_bits | prefix_mask; + if (s_append_u8_dynamic(output, first_byte)) { + goto error; + } integer -= prefix_mask; const uint64_t hi_57bit_mask = UINT64_MAX - (UINT8_MAX >> 1); do { - if (output->len == output->capacity) { - *output = output_backup; - return aws_raise_error(AWS_ERROR_SHORT_BUFFER); - } - /* Take top 7 bits from the integer */ uint8_t this_octet = integer % 128; if (integer & hi_57bit_mask) { @@ -108,7 +133,9 @@ int aws_hpack_encode_integer(uint64_t integer, uint8_t prefix_size, struct aws_b this_octet += 128; } - aws_byte_buf_write_u8(output, this_octet); + if (s_append_u8_dynamic(output, this_octet)) { + goto error; + } /* Remove the written bits */ integer >>= 7; @@ -116,6 +143,9 @@ int aws_hpack_encode_integer(uint64_t integer, uint8_t prefix_size, struct aws_b } return AWS_OP_SUCCESS; +error: + output->len = output_len_backup; + return AWS_OP_ERR; } struct aws_http_header s_static_header_table[] = { @@ -433,6 +463,8 @@ static const struct aws_http_header *s_get_header_u64(const struct aws_hpack_con return aws_hpack_get_header(context, (size_t)index); } +/* #TODO pass option whether or not to search for values + * #TODO I suspect we're not 100% doing the right thing with empty values */ size_t aws_hpack_find_index( const struct aws_hpack_context *context, const struct aws_http_header *header, @@ -787,105 +819,90 @@ int aws_hpack_decode_integer( return AWS_OP_SUCCESS; } -int aws_hpack_pre_encode_string( +int aws_hpack_encode_string( struct aws_hpack_context *context, struct aws_byte_cursor to_encode, enum aws_hpack_huffman_mode huffman_mode, - size_t *out_str_length, - bool *out_use_huffman, - size_t *in_out_sum_total_length) { + struct aws_byte_buf *output) { AWS_PRECONDITION(context); AWS_PRECONDITION(aws_byte_cursor_is_valid(&to_encode)); - AWS_PRECONDITION(out_str_length); - AWS_PRECONDITION(out_use_huffman); - AWS_PRECONDITION(in_out_sum_total_length); + AWS_PRECONDITION(output); - /* Get length of encoded string */ + const size_t output_len_backup = output->len; + + /* Determine length of encoded string (and whether or not to use huffman) */ + uint8_t use_huffman; + size_t str_length; switch (huffman_mode) { case AWS_HPACK_HUFFMAN_NEVER: - *out_str_length = to_encode.len; - *out_use_huffman = false; + use_huffman = 0; + str_length = to_encode.len; break; + case AWS_HPACK_HUFFMAN_ALWAYS: - *out_str_length = aws_huffman_get_encoded_length(&context->encoder, to_encode); - *out_use_huffman = true; + use_huffman = 1; + str_length = aws_huffman_get_encoded_length(&context->encoder, to_encode); break; + case AWS_HPACK_HUFFMAN_SMALLEST: - *out_str_length = aws_huffman_get_encoded_length(&context->encoder, to_encode); - if (*out_str_length < to_encode.len) { - *out_use_huffman = true; + str_length = aws_huffman_get_encoded_length(&context->encoder, to_encode); + if (str_length < to_encode.len) { + use_huffman = 1; } else { - *out_str_length = to_encode.len; - *out_use_huffman = false; + str_length = to_encode.len; + use_huffman = 0; } break; default: - return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); - } - - /* Get length of encoded integer */ - size_t int_length = aws_hpack_get_encoded_length_integer(*out_str_length, 7); - - /* Sum */ - size_t total_length; - if (aws_add_size_checked(int_length, *out_str_length, &total_length)) { - return AWS_OP_ERR; - } - - if (aws_add_size_checked(total_length, *in_out_sum_total_length, in_out_sum_total_length)) { - return AWS_OP_ERR; - } - - return AWS_OP_SUCCESS; -} - -int aws_hpack_encode_string( - struct aws_hpack_context *context, - struct aws_byte_cursor to_encode, - size_t encoded_str_length, - bool huffman_encode, - struct aws_byte_buf *output) { - - AWS_PRECONDITION(context); - AWS_PRECONDITION(output); - - if (output->len == output->capacity) { - return aws_raise_error(AWS_ERROR_SHORT_BUFFER); + aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); + goto error; } - /* Write the use_huffman bit */ - output->buffer[output->len] = huffman_encode << 7; + /* + * String literals are encoded like so (RFC-7541 5.2): + * H is whether or not data is huffman-encoded. + * + * 0 1 2 3 4 5 6 7 + * +---+---+---+---+---+---+---+---+ + * | H | String Length (7+) | + * +---+---------------------------+ + * | String Data (Length octets) | + * +-------------------------------+ + */ - /* Write the length of the string */ - if (aws_hpack_encode_integer(encoded_str_length, 7, output)) { - return AWS_OP_ERR; + /* Encode string length */ + uint8_t starting_bits = use_huffman << 7; + if (aws_hpack_encode_integer(str_length, starting_bits, 7, output)) { + goto error; } - /* Write the string itself */ - if (to_encode.len) { - if (huffman_encode) { - aws_huffman_encoder_reset(&context->encoder); - int result = aws_huffman_encode(&context->encoder, &to_encode, output); - if (result) { - return AWS_OP_ERR; + /* Encode string data */ + if (str_length > 0) { + if (use_huffman) { + /* Huffman encoder doesn't grow buffer, so we ensure it's big enough here */ + if (s_ensure_space(output, str_length)) { + goto error; } - /* Huffman supports streaming encoding, but hpack only does whole-string encoding right now */ - if (to_encode.len > 0) { - return aws_raise_error(AWS_ERROR_SHORT_BUFFER); + if (aws_huffman_encode(&context->encoder, &to_encode, output)) { + goto error; } + } else { - bool result = aws_byte_buf_write_from_whole_cursor(output, to_encode); - if (!result) { - aws_raise_error(AWS_ERROR_SHORT_BUFFER); - return AWS_OP_ERR; + if (aws_byte_buf_append_dynamic(output, &to_encode)) { + goto error; } } } return AWS_OP_SUCCESS; + +error: + output->len = output_len_backup; + aws_huffman_encoder_reset(&context->encoder); + return AWS_OP_ERR; } int aws_hpack_decode_string( @@ -1205,221 +1222,155 @@ int aws_hpack_decode( return AWS_OP_SUCCESS; } -/* +/* All types that HPACK might encode/decode (RFC-7541 6 - Binary Format) */ +enum aws_hpack_entry_type { + AWS_HPACK_ENTRY_INDEXED_HEADER_FIELD, /* RFC-7541 6.1 */ + AWS_HPACK_ENTRY_LITERAL_HEADER_FIELD_WITH_INCREMENTAL_INDEXING, /* RFC-7541 6.2.1 */ + AWS_HPACK_ENTRY_LITERAL_HEADER_FIELD_WITHOUT_INDEXING, /* RFC-7541 6.2.2 */ + AWS_HPACK_ENTRY_LITERAL_HEADER_FIELD_NEVER_INDEXED, /* RFC-7541 6.2.3 */ + AWS_HPACK_ENTRY_DYNAMIC_TABLE_RESIZE, /* RFC-7541 6.3 */ + AWS_HPACK_ENTRY_TYPE_COUNT, +}; + +/** + * First byte each entry type looks like this (RFC-7541 6): + * The "xxxxx" part is the "N-bit prefix" of the entry's first encoded integer. + * * 1xxxxxxx: Indexed Header Field Representation * 01xxxxxx: Literal Header Field with Incremental Indexing * 001xxxxx: Dynamic Table Size Update * 0001xxxx: Literal Header Field Never Indexed - * 0000xxxx: Literal Header Field without Indexing */ -static const uint8_t s_hpack_entry_num_prefix_bits[AWS_HPACK_ENTRY_TYPE_COUNT] = { - [AWS_HPACK_ENTRY_INDEXED_HEADER_FIELD] = 7, - [AWS_HPACK_ENTRY_LITERAL_HEADER_FIELD_INCREMENTAL_INDEXING_INDEXED_NAME] = 6, - [AWS_HPACK_ENTRY_LITERAL_HEADER_FIELD_INCREMENTAL_INDEXING_NEW_NAME] = 6, - [AWS_HPACK_ENTRY_DYNAMIC_TABLE_RESIZE] = 5, - [AWS_HPACK_ENTRY_LITERAL_HEADER_FIELD_NEVER_INDEXED_INDEXED_NAME] = 4, - [AWS_HPACK_ENTRY_LITERAL_HEADER_FIELD_NEVER_INDEXED_NEW_NAME] = 4, - [AWS_HPACK_ENTRY_LITERAL_HEADER_FIELD_WITHOUT_INDEXING_INDEXED_NAME] = 4, - [AWS_HPACK_ENTRY_LITERAL_HEADER_FIELD_WITHOUT_INDEXING_NEW_NAME] = 4, -}; - + * 0000xxxx: Literal Header Field without Indexing + */ static const uint8_t s_hpack_entry_starting_bit_pattern[AWS_HPACK_ENTRY_TYPE_COUNT] = { [AWS_HPACK_ENTRY_INDEXED_HEADER_FIELD] = 1 << 7, - [AWS_HPACK_ENTRY_LITERAL_HEADER_FIELD_INCREMENTAL_INDEXING_INDEXED_NAME] = 1 << 6, - [AWS_HPACK_ENTRY_LITERAL_HEADER_FIELD_INCREMENTAL_INDEXING_NEW_NAME] = 1 << 6, + [AWS_HPACK_ENTRY_LITERAL_HEADER_FIELD_WITH_INCREMENTAL_INDEXING] = 1 << 6, [AWS_HPACK_ENTRY_DYNAMIC_TABLE_RESIZE] = 1 << 5, - [AWS_HPACK_ENTRY_LITERAL_HEADER_FIELD_NEVER_INDEXED_INDEXED_NAME] = 1 << 4, - [AWS_HPACK_ENTRY_LITERAL_HEADER_FIELD_NEVER_INDEXED_NEW_NAME] = 1 << 4, - [AWS_HPACK_ENTRY_LITERAL_HEADER_FIELD_WITHOUT_INDEXING_INDEXED_NAME] = 0 << 4, - [AWS_HPACK_ENTRY_LITERAL_HEADER_FIELD_WITHOUT_INDEXING_NEW_NAME] = 0 << 4, + [AWS_HPACK_ENTRY_LITERAL_HEADER_FIELD_NEVER_INDEXED] = 1 << 4, + [AWS_HPACK_ENTRY_LITERAL_HEADER_FIELD_WITHOUT_INDEXING] = 0 << 4, }; -static const bool s_hpack_entry_encodes_name_str[AWS_HPACK_ENTRY_TYPE_COUNT] = { - [AWS_HPACK_ENTRY_LITERAL_HEADER_FIELD_INCREMENTAL_INDEXING_NEW_NAME] = true, - [AWS_HPACK_ENTRY_LITERAL_HEADER_FIELD_WITHOUT_INDEXING_NEW_NAME] = true, - [AWS_HPACK_ENTRY_LITERAL_HEADER_FIELD_NEVER_INDEXED_NEW_NAME] = true, +static const uint8_t s_hpack_entry_num_prefix_bits[AWS_HPACK_ENTRY_TYPE_COUNT] = { + [AWS_HPACK_ENTRY_INDEXED_HEADER_FIELD] = 7, + [AWS_HPACK_ENTRY_LITERAL_HEADER_FIELD_WITH_INCREMENTAL_INDEXING] = 6, + [AWS_HPACK_ENTRY_DYNAMIC_TABLE_RESIZE] = 5, + [AWS_HPACK_ENTRY_LITERAL_HEADER_FIELD_NEVER_INDEXED] = 4, + [AWS_HPACK_ENTRY_LITERAL_HEADER_FIELD_WITHOUT_INDEXING] = 4, }; -static const bool s_hpack_entry_encodes_value_str[AWS_HPACK_ENTRY_TYPE_COUNT] = { - [AWS_HPACK_ENTRY_LITERAL_HEADER_FIELD_INCREMENTAL_INDEXING_INDEXED_NAME] = true, - [AWS_HPACK_ENTRY_LITERAL_HEADER_FIELD_INCREMENTAL_INDEXING_NEW_NAME] = true, - [AWS_HPACK_ENTRY_LITERAL_HEADER_FIELD_WITHOUT_INDEXING_INDEXED_NAME] = true, - [AWS_HPACK_ENTRY_LITERAL_HEADER_FIELD_WITHOUT_INDEXING_NEW_NAME] = true, - [AWS_HPACK_ENTRY_LITERAL_HEADER_FIELD_NEVER_INDEXED_INDEXED_NAME] = true, - [AWS_HPACK_ENTRY_LITERAL_HEADER_FIELD_NEVER_INDEXED_NEW_NAME] = true, -}; +static int s_convert_http_compression_to_literal_entry_type( + enum aws_http_header_compression compression, + enum aws_hpack_entry_type *out_entry_type) { + + switch (compression) { + case AWS_HTTP_HEADER_COMPRESSION_USE_CACHE: + *out_entry_type = AWS_HPACK_ENTRY_LITERAL_HEADER_FIELD_WITH_INCREMENTAL_INDEXING; + return AWS_OP_SUCCESS; -int aws_hpack_pre_encode_header( + case AWS_HTTP_HEADER_COMPRESSION_NO_CACHE: + *out_entry_type = AWS_HPACK_ENTRY_LITERAL_HEADER_FIELD_WITHOUT_INDEXING; + return AWS_OP_SUCCESS; + + case AWS_HTTP_HEADER_COMPRESSION_NO_FORWARD_CACHE: + *out_entry_type = AWS_HPACK_ENTRY_LITERAL_HEADER_FIELD_NEVER_INDEXED; + return AWS_OP_SUCCESS; + } + + return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); +} + +int aws_hpack_encode_header( struct aws_hpack_context *context, const struct aws_http_header *header, enum aws_hpack_huffman_mode huffman_mode, - struct aws_hpack_encoder_cmd *cmd) { + struct aws_byte_buf *output) { - AWS_PRECONDITION(cmd); AWS_PRECONDITION(context); AWS_PRECONDITION(header); + AWS_PRECONDITION(output); - AWS_ZERO_STRUCT(*cmd); + size_t output_len_backup = output->len; /* Search for header-field in tables */ bool found_indexed_value; - cmd->data.header.index = aws_hpack_find_index(context, header, &found_indexed_value); + size_t header_index = aws_hpack_find_index(context, header, &found_indexed_value); if (header->compression != AWS_HTTP_HEADER_COMPRESSION_USE_CACHE) { /* If user doesn't want to use indexed value, then don't use it */ found_indexed_value = false; } - if (cmd->data.header.index && found_indexed_value) { - /* Indexed header field - found header name and value together in a table */ - cmd->type = AWS_HPACK_ENTRY_INDEXED_HEADER_FIELD; + if (header_index && found_indexed_value) { + /* Indexed header field */ + const enum aws_hpack_entry_type entry_type = AWS_HPACK_ENTRY_INDEXED_HEADER_FIELD; + + /* encode the one index (along with the entry type), and we're done! */ + uint8_t starting_bit_pattern = s_hpack_entry_starting_bit_pattern[entry_type]; + uint8_t num_prefix_bits = s_hpack_entry_num_prefix_bits[entry_type]; + if (aws_hpack_encode_integer(header_index, starting_bit_pattern, num_prefix_bits, output)) { + goto error; + } - /* will encode the one index, and DONE. */ - const size_t num_prefix_bits = s_hpack_entry_num_prefix_bits[cmd->type]; - cmd->encoded_length = aws_hpack_get_encoded_length_integer(cmd->data.header.index, num_prefix_bits); return AWS_OP_SUCCESS; } /* Else, Literal header field... */ - if (cmd->data.header.index) { - /* Literal header field, indexed name - found header name in a table, but not header value */ - switch (header->compression) { - case AWS_HTTP_HEADER_COMPRESSION_USE_CACHE: - cmd->type = AWS_HPACK_ENTRY_LITERAL_HEADER_FIELD_INCREMENTAL_INDEXING_INDEXED_NAME; - break; - case AWS_HTTP_HEADER_COMPRESSION_NO_CACHE: - cmd->type = AWS_HPACK_ENTRY_LITERAL_HEADER_FIELD_WITHOUT_INDEXING_INDEXED_NAME; - break; - case AWS_HTTP_HEADER_COMPRESSION_NO_FORWARD_CACHE: - cmd->type = AWS_HPACK_ENTRY_LITERAL_HEADER_FIELD_NEVER_INDEXED_INDEXED_NAME; - break; - default: - return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); - } + /* determine exactly which type of literal header-field to encode. */ + enum aws_hpack_entry_type literal_entry_type; + if (s_convert_http_compression_to_literal_entry_type(header->compression, &literal_entry_type)) { + goto error; + } + + /* the entry type makes up the first few bits of the next integer we encode */ + uint8_t starting_bit_pattern = s_hpack_entry_starting_bit_pattern[literal_entry_type]; + uint8_t num_prefix_bits = s_hpack_entry_num_prefix_bits[literal_entry_type]; - /* first encode index of name */ - const size_t num_prefix_bits = s_hpack_entry_num_prefix_bits[cmd->type]; - cmd->encoded_length = aws_hpack_get_encoded_length_integer(cmd->data.header.index, num_prefix_bits); + if (header_index) { + /* Literal header field, indexed name */ + /* first encode the index of name */ + if (aws_hpack_encode_integer(header_index, starting_bit_pattern, num_prefix_bits, output)) { + goto error; + } } else { - /* Literal header field, new name - did not find header name or value in table, need to send both */ - switch (header->compression) { - case AWS_HTTP_HEADER_COMPRESSION_USE_CACHE: - cmd->type = AWS_HPACK_ENTRY_LITERAL_HEADER_FIELD_INCREMENTAL_INDEXING_NEW_NAME; - break; - case AWS_HTTP_HEADER_COMPRESSION_NO_CACHE: - cmd->type = AWS_HPACK_ENTRY_LITERAL_HEADER_FIELD_WITHOUT_INDEXING_NEW_NAME; - break; - case AWS_HTTP_HEADER_COMPRESSION_NO_FORWARD_CACHE: - cmd->type = AWS_HPACK_ENTRY_LITERAL_HEADER_FIELD_NEVER_INDEXED_NEW_NAME; - break; - default: - AWS_ASSERT(0); + /* Literal header field, new name */ + + /* first encode index of 0 to indicate that header-name is not indexed */ + if (aws_hpack_encode_integer(0, starting_bit_pattern, num_prefix_bits, output)) { + goto error; } - /* first encode 0 to show that header-name is not indexed */ - const size_t num_prefix_bits = s_hpack_entry_num_prefix_bits[cmd->type]; - cmd->encoded_length = aws_hpack_get_encoded_length_integer(0, num_prefix_bits); - - /* then encode header-name string */ - cmd->data.header.name_cursor = header->name; - if (aws_hpack_pre_encode_string( - context, - header->name, - huffman_mode, - &cmd->data.header.name_encoded_str_length, - &cmd->data.header.name_uses_huffman, - &cmd->encoded_length)) { - return AWS_OP_ERR; + /* next encode header-name string */ + if (aws_hpack_encode_string(context, header->name, huffman_mode, output)) { + goto error; } } - /* then encode header-value string */ - cmd->data.header.value_cursor = header->value; - if (aws_hpack_pre_encode_string( - context, - header->value, - huffman_mode, - &cmd->data.header.value_encoded_str_length, - &cmd->data.header.value_uses_huffman, - &cmd->encoded_length)) { - - return AWS_OP_ERR; + /* then encode header-value string, and we're done encoding! */ + if (aws_hpack_encode_string(context, header->value, huffman_mode, output)) { + goto error; } - /* If using cache (aka incremental indexing), update dynamic table with this header */ - if (header->compression == AWS_HTTP_HEADER_COMPRESSION_USE_CACHE) { + /* if "incremental indexing" type, insert header into the dynamic table */ + if (AWS_HPACK_ENTRY_LITERAL_HEADER_FIELD_WITH_INCREMENTAL_INDEXING == literal_entry_type) { if (aws_hpack_insert_header(context, header)) { - return AWS_OP_ERR; + goto error; } } return AWS_OP_SUCCESS; +error: + output->len = output_len_backup; + return AWS_OP_ERR; } -void aws_hpack_pre_encode_dynamic_table_resize( - struct aws_hpack_context *context, - size_t size, - struct aws_hpack_encoder_cmd *cmd) { - - AWS_PRECONDITION(context); - AWS_PRECONDITION(cmd); - - AWS_ZERO_STRUCT(*cmd); - - cmd->type = AWS_HPACK_ENTRY_DYNAMIC_TABLE_RESIZE; - cmd->encoded_length = - aws_hpack_get_encoded_length_integer(size, s_hpack_entry_num_prefix_bits[AWS_HPACK_ENTRY_DYNAMIC_TABLE_RESIZE]); - cmd->data.dynamic_table_resize = size; -} - -int aws_hpack_encode( - struct aws_hpack_context *context, - const struct aws_hpack_encoder_cmd *cmd, - struct aws_byte_buf *output) { +int aws_hpack_encode_dynamic_table_resize(struct aws_hpack_context *context, size_t size, struct aws_byte_buf *output) { AWS_PRECONDITION(context); - AWS_PRECONDITION(cmd); AWS_PRECONDITION(output); - const size_t space_available = output->capacity - output->len; - if (space_available < cmd->encoded_length) { - return aws_raise_error(AWS_ERROR_SHORT_BUFFER); - } - - /* Write starting bit pattern, along with first integer. */ - const uint8_t starting_bit_pattern = s_hpack_entry_starting_bit_pattern[cmd->type]; - const uint8_t num_prefix_bits = s_hpack_entry_num_prefix_bits[cmd->type]; - const size_t first_integer = - cmd->type == AWS_HPACK_ENTRY_DYNAMIC_TABLE_RESIZE ? cmd->data.dynamic_table_resize : cmd->data.header.index; - output->buffer[output->len] = starting_bit_pattern; - if (aws_hpack_encode_integer(first_integer, num_prefix_bits, output)) { - return AWS_OP_ERR; - } - - /* Write name string (if "new-name" type) */ - if (s_hpack_entry_encodes_name_str[cmd->type]) { - if (aws_hpack_encode_string( - context, - cmd->data.header.name_cursor, - cmd->data.header.name_encoded_str_length, - cmd->data.header.name_uses_huffman, - output)) { - return AWS_OP_ERR; - } - } - - /* Write value string (if "literal" type) */ - if (s_hpack_entry_encodes_value_str[cmd->type]) { - if (aws_hpack_encode_string( - context, - cmd->data.header.value_cursor, - cmd->data.header.value_encoded_str_length, - cmd->data.header.value_uses_huffman, - output)) { - return AWS_OP_ERR; - } - } - - return AWS_OP_SUCCESS; + uint8_t starting_bit_pattern = s_hpack_entry_starting_bit_pattern[AWS_HPACK_ENTRY_DYNAMIC_TABLE_RESIZE]; + uint8_t num_prefix_bits = s_hpack_entry_num_prefix_bits[AWS_HPACK_ENTRY_DYNAMIC_TABLE_RESIZE]; + return aws_hpack_encode_integer(size, starting_bit_pattern, num_prefix_bits, output); } diff --git a/source/http.c b/source/http.c index 7174778a1..e9bb29184 100644 --- a/source/http.c +++ b/source/http.c @@ -17,6 +17,7 @@ #include #include +#include #include #include @@ -418,6 +419,7 @@ void aws_http_library_init(struct aws_allocator *alloc) { s_library_initialized = true; aws_io_library_init(alloc); + aws_compression_library_init(alloc); aws_register_error_info(&s_error_list); aws_register_log_subject_info_list(&s_log_subject_list); s_methods_init(alloc); @@ -438,6 +440,7 @@ void aws_http_library_clean_up(void) { s_headers_clean_up(); s_versions_clean_up(); aws_hpack_static_table_clean_up(); + aws_compression_library_clean_up(); aws_io_library_clean_up(); } From 4ac13c347b2d68bf74dacab5d73def05f92c5073 Mon Sep 17 00:00:00 2001 From: Michael Graeb Date: Fri, 13 Mar 2020 10:57:17 -0700 Subject: [PATCH 05/35] Revamp HEADERS/PUSH_PROMISE/DATA frames --- include/aws/http/private/h2_frames.h | 41 +- include/aws/http/private/hpack.h | 21 +- source/h2_connection.c | 1 + source/h2_frames.c | 882 +++++++++++++++------------ source/hpack.c | 28 +- 5 files changed, 543 insertions(+), 430 deletions(-) diff --git a/include/aws/http/private/h2_frames.h b/include/aws/http/private/h2_frames.h index 62649462d..3d666a7b0 100644 --- a/include/aws/http/private/h2_frames.h +++ b/include/aws/http/private/h2_frames.h @@ -16,8 +16,8 @@ * permissions and limitations under the License. */ -#include #include +#include #include @@ -79,6 +79,7 @@ enum aws_h2_settings { #define AWS_H2_PAYLOAD_MAX (0x00FFFFFF) #define AWS_H2_WINDOW_UPDATE_MAX (0x7FFFFFFF) +#define AWS_H2_STREAM_ID_MAX (0x7FFFFFFF) /* This magic string must be the very first thing a client sends to the server. * See RFC-7540 3.5 - HTTP/2 Connection Preface */ @@ -101,8 +102,20 @@ struct aws_h2_frame_priority_settings { }; struct aws_h2_frame_header_block { - /* array_list of aws_http_header */ - struct aws_array_list header_fields; + const struct aws_http_headers *headers; + + /* state */ + + enum { + AWS_H2_HEADER_BLOCK_STATE_INIT, + AWS_H2_HEADER_BLOCK_STATE_FIRST_FRAME, + AWS_H2_HEADER_BLOCK_STATE_CONTINUATION, + AWS_H2_HEADER_BLOCK_STATE_COMPLETE, + AWS_H2_HEADER_BLOCK_STATE_ERROR, + } state; + + struct aws_byte_buf whole_encoded_block; /* entire header block is encoded here */ + struct aws_byte_cursor encoded_block_cursor; /* tracks progress sending encoded header-block in fragments */ }; /** @@ -117,13 +130,13 @@ struct aws_h2_frame { struct aws_linked_list_node node; }; -/* Represents a HEADERS frame */ +/* Represents a HEADERS header-block. + * (HEADERS frame followed 0 or more CONTINUATION frames) */ struct aws_h2_frame_headers { struct aws_h2_frame base; /* Flags */ bool end_stream; /* AWS_H2_FRAME_F_END_STREAM */ - bool end_headers; /* AWS_H2_FRAME_F_END_HEADERS */ bool has_priority; /* AWS_H2_FRAME_F_PRIORITY */ /* Payload */ @@ -166,13 +179,11 @@ struct aws_h2_frame_settings { size_t settings_count; }; -/* Represents a PUSH_PROMISE frame */ +/* Represents a PUSH_PROMISE header-block. + * (PUSH_PROMISE frame followed by 0 or more CONTINUATION frames) */ struct aws_h2_frame_push_promise { struct aws_h2_frame base; - /* Flags */ - bool end_headers; /* AWS_H2_FRAME_F_END_HEADERS */ - /* Payload */ uint8_t pad_length; /* Set to 0 to disable AWS_H2_FRAME_F_PADDED */ uint32_t promised_stream_id; @@ -237,6 +248,10 @@ AWS_EXTERN_C_BEGIN AWS_HTTP_API const char *aws_h2_frame_type_to_str(enum aws_h2_frame_type type); +/* Raises AWS_ERROR_INVALID_ARGUMENT if stream_id is 0 or exceeds AWS_H2_MAX_STREAM_ID */ +AWS_HTTP_API +int aws_h2_validate_stream_id(uint32_t stream_id); + /** * The process of encoding a frame looks like: * 1. Create a encoder object on the stack and initialize with aws_h2_frame_encoder_init @@ -277,8 +292,8 @@ int aws_h2_encode_data_frame( struct aws_h2_frame_encoder *encoder, uint32_t stream_id, struct aws_input_stream *body_stream, - bool end_stream, - uint8_t padding, + bool body_ends_stream, + uint8_t pad_length, struct aws_byte_buf *output, bool *body_complete); @@ -295,7 +310,7 @@ struct aws_h2_frame *aws_h2_frame_new_headers( uint32_t stream_id, const struct aws_http_headers *headers, bool end_stream, - uint8_t padding, + uint8_t pad_length, const struct aws_h2_frame_priority_settings *optional_priority); AWS_HTTP_API @@ -327,7 +342,7 @@ struct aws_h2_frame *aws_h2_frame_new_push_promise( uint32_t stream_id, uint32_t promised_stream_id, const struct aws_http_headers *headers, - uint8_t padding); + uint8_t pad_length); AWS_HTTP_API struct aws_h2_frame *aws_h2_frame_new_ping( diff --git a/include/aws/http/private/hpack.h b/include/aws/http/private/hpack.h index 7db2bb0f8..eec53ec86 100644 --- a/include/aws/http/private/hpack.h +++ b/include/aws/http/private/hpack.h @@ -15,7 +15,7 @@ * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ -#include +#include struct aws_byte_buf; struct aws_byte_cursor; @@ -90,25 +90,24 @@ int aws_hpack_decode( struct aws_hpack_decode_result *result); /** - * Encode a header-field into the output. - * This function will mutate the hpack context, so any error is unrecoverable. + * Must be called at start of header-block. + * If the table has been resized, then a Dynamic Table Size Update (RFC-7541 6.3) is encoded. + * This behavior is described in RFC-7541 4.2. * Note that output will be dynamically resized if it's too short. */ AWS_HTTP_API -int aws_hpack_encode_header( - struct aws_hpack_context *context, - const struct aws_http_header *header, - enum aws_hpack_huffman_mode huffman_mode, - struct aws_byte_buf *output); +int aws_hpack_encode_header_block_start(struct aws_hpack_context *context, struct aws_byte_buf *output); /** - * Encode a Dynamic Table Size Update (RFC-7541 6.3) into the output. + * Encode a header-field into the output. + * This function will mutate the hpack context, so any error is unrecoverable. * Note that output will be dynamically resized if it's too short. */ AWS_HTTP_API -int aws_hpack_encode_dynamic_table_resize( +int aws_hpack_encode_header( struct aws_hpack_context *context, - size_t size, + const struct aws_http_header *header, + enum aws_hpack_huffman_mode huffman_mode, struct aws_byte_buf *output); /* Returns the hpack size of a header (name.len + value.len + 32) [4.1] */ diff --git a/source/h2_connection.c b/source/h2_connection.c index 427a310c7..0c499fd27 100644 --- a/source/h2_connection.c +++ b/source/h2_connection.c @@ -430,6 +430,7 @@ static void s_outgoing_frames_task(struct aws_channel_task *task, void *arg, enu * - Stream is complete if it is also done receiving (weird edge case, but theoretically possible) * Else stream has not sent all data: * - Move stream to back of outgoing_streams_list ("round-robin" DATA frames from available streams) + * - Beware getting into a loop, don't read from the same stream twice */ CONNECTION_LOG(ERROR, connection, "DATA frames not supported yet"); aws_raise_error(AWS_ERROR_UNIMPLEMENTED); diff --git a/source/h2_frames.c b/source/h2_frames.c index 94f847dad..ca732035f 100644 --- a/source/h2_frames.c +++ b/source/h2_frames.c @@ -20,6 +20,8 @@ #include +#include + #include /* #TODO: Don't raise AWS_H2_ERR_* enums, raise AWS_ERROR_* . @@ -41,9 +43,11 @@ const struct aws_byte_cursor aws_h2_connection_preface_client_string = static const uint32_t s_31_bit_mask = UINT32_MAX >> 1; static const uint32_t s_u32_top_bit_mask = UINT32_MAX << 31; -static const uint8_t s_indexed_header_field_mask = 1 << 7; -static const uint8_t s_literal_save_field_mask = 1 << 6; -static const uint8_t s_literal_no_forward_save_mask = 1 << 4; +/* All frames begin with a fixed 9-octet prefix */ +static const size_t s_frame_prefix_length = 9; + +/* Bytes to initially reserve for encoding of an entire header block. Buffer will grow if necessary. */ +static const size_t s_encoded_header_block_reserve = 128; /* Value pulled from thin air */ #define DEFINE_FRAME_VTABLE(NAME) \ static aws_h2_frame_destroy_fn s_frame_##NAME##_destroy; \ @@ -53,6 +57,13 @@ static const uint8_t s_literal_no_forward_save_mask = 1 << 4; .encode = s_frame_##NAME##_encode, \ } +static int s_frame_prefix_encode( + enum aws_h2_frame_type type, + uint32_t stream_id, + size_t length, + uint8_t flags, + struct aws_byte_buf *output); + const char *aws_h2_frame_type_to_str(enum aws_h2_frame_type type) { switch (type) { case AWS_H2_FRAME_T_DATA: @@ -80,6 +91,43 @@ const char *aws_h2_frame_type_to_str(enum aws_h2_frame_type type) { } } +int aws_h2_validate_stream_id(uint32_t stream_id) { + if (stream_id == 0 || stream_id > AWS_H2_STREAM_ID_MAX) { + return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); + } + return AWS_OP_SUCCESS; +} + +/** + * Determine max frame payload length that will: + * 1) fit in output's available space + * 2) obey encoders current MAX_FRAME_SIZE + * + * Assumes no part of the frame has been written yet to output. + * The total length of the would be: returned-payload-len + s_frame_prefix_length + * + * Raises error if there is not enough space available for even a frame prefix. + */ +static int s_get_max_contiguous_payload_length( + const struct aws_h2_frame_encoder *encoder, + const struct aws_byte_buf *output, + size_t *max_payload_length) { + + const size_t space_available = output->capacity - output->len; + + size_t max_payload_given_space_available; + if (aws_sub_size_checked(space_available, s_frame_prefix_length, &max_payload_given_space_available)) { + return aws_raise_error(AWS_ERROR_SHORT_BUFFER); + } + + /* #TODO actually check against encoder's current MAX_FRAME_SIZE */ + (void)encoder; + const size_t max_payload_given_settings = AWS_H2_PAYLOAD_MAX; + + *max_payload_length = aws_min_size(max_payload_given_space_available, max_payload_given_settings); + return AWS_OP_SUCCESS; +} + /*********************************************************************************************************************** * Priority **********************************************************************************************************************/ @@ -109,181 +157,268 @@ static int s_frame_priority_settings_encode( return AWS_OP_SUCCESS; } -#if 0 /*********************************************************************************************************************** * Header Block **********************************************************************************************************************/ -int aws_h2_frame_header_block_init(struct aws_h2_frame_header_block *header_block, struct aws_allocator *allocator) { +int aws_h2_frame_header_block_init( + struct aws_h2_frame_header_block *header_block, + struct aws_allocator *allocator, + const struct aws_http_headers *headers) { + AWS_PRECONDITION(header_block); AWS_PRECONDITION(allocator); + AWS_PRECONDITION(headers); - return aws_array_list_init_dynamic(&header_block->header_fields, allocator, 0, sizeof(struct aws_http_header)); -} -void aws_h2_frame_header_block_clean_up(struct aws_h2_frame_header_block *header_block) { - AWS_PRECONDITION(header_block); + AWS_ZERO_STRUCT(*header_block); + if (aws_byte_buf_init(&header_block->whole_encoded_block, allocator, s_encoded_header_block_reserve)) { + return AWS_OP_ERR; + } - aws_array_list_clean_up(&header_block->header_fields); + aws_http_headers_acquire((struct aws_http_headers *)headers); + header_block->headers = headers; + return AWS_OP_SUCCESS; } -int aws_h2_frame_header_block_get_encoded_length( - const struct aws_h2_frame_header_block *header_block, - const struct aws_h2_frame_encoder *encoder, - size_t *length) { +void aws_h2_frame_header_block_clean_up(struct aws_h2_frame_header_block *header_block) { AWS_PRECONDITION(header_block); - AWS_PRECONDITION(encoder); - AWS_PRECONDITION(length); + aws_http_headers_release((struct aws_http_headers *)header_block->headers); + aws_byte_buf_clean_up(&header_block->whole_encoded_block); + AWS_ZERO_STRUCT(*header_block); +} - *length = 0; +/** + * Encode whole entire header-block. + * The output is intended to be copied into actual frames by something that cares about frame size. + * Output will be dynamically resized if it's too short. + */ +static int s_pre_encode_whole_header_block( + struct aws_h2_frame_encoder *encoder, + const struct aws_http_headers *headers, + struct aws_byte_buf *output) { - AWS_PRECONDITION(header_block); - AWS_PRECONDITION(length); + if (aws_hpack_encode_header_block_start(encoder->hpack, output)) { + return AWS_OP_ERR; + } - const size_t num_headers = aws_array_list_length(&header_block->header_fields); + const size_t num_headers = aws_http_headers_count(headers); for (size_t i = 0; i < num_headers; ++i) { + struct aws_http_header header; + aws_http_headers_get_index(headers, i, &header); + if (aws_hpack_encode_header(encoder->hpack, &header, encoder->huffman_mode, output)) { + return AWS_OP_ERR; + } + } - const struct aws_http_header *field = NULL; - aws_array_list_get_at_ptr(&header_block->header_fields, (void **)&field, i); - AWS_ASSERT(field); - - bool found_value = false; - const size_t index = aws_hpack_find_index(encoder->hpack, field, &found_value); - - uint8_t prefix_size; - /* If a value was found, this is an indexed header */ - if (found_value) { - prefix_size = 7; - } else { - /* If not indexed, determine the appropriate flags and prefixes */ - switch (field->compression) { - case AWS_HTTP_HEADER_COMPRESSION_USE_CACHE: - prefix_size = 6; - break; - case AWS_HTTP_HEADER_COMPRESSION_NO_CACHE: - prefix_size = 4; - break; - case AWS_HTTP_HEADER_COMPRESSION_NO_FORWARD_CACHE: - prefix_size = 5; - break; - default: - /* Unreachable */ - AWS_FATAL_ASSERT(false); - } + return AWS_OP_SUCCESS; +} + +/* Encode the header-block's next frame (or encode nothing if output buffer is too small). + * Pass in the details of the block's first frame (HEADERS or PUSH_PROMISE). */ +int s_encode_single_header_block_frame( + struct aws_h2_frame_header_block *header_block, + const struct aws_h2_frame *first_frame, + uint8_t first_frame_pad_length, + const struct aws_h2_frame_priority_settings *first_frame_priority_settings, + bool first_frame_end_stream, + const uint32_t *first_frame_promised_stream_id, + struct aws_h2_frame_encoder *encoder, + struct aws_byte_buf *output, + bool *waiting_for_more_space) { + + AWS_ASSERT(first_frame->type == AWS_H2_FRAME_T_HEADERS || first_frame->type == AWS_H2_FRAME_T_PUSH_PROMISE); + + /* + * Figure out the details of the next frame to encode. + * The first frame will be either HEADERS or PUSH_PROMISE. + * All subsequent frames will be CONTINUATION + */ + + uint32_t stream_id = first_frame->stream_id; + enum aws_h2_frame_type frame_type; + uint8_t flags = 0; + uint8_t pad_length = 0; + const struct aws_h2_frame_priority_settings *priority_settings = NULL; + const uint32_t *promised_stream_id = NULL; + uint32_t payload_overhead = 0; /* Amount of payload holding things other than header-block (padding, etc) */ + + if (header_block->state == AWS_H2_HEADER_BLOCK_STATE_FIRST_FRAME) { + frame_type = first_frame->type; + + if (first_frame_pad_length > 0) { + flags |= AWS_H2_FRAME_F_PADDED; + pad_length = first_frame_pad_length; + payload_overhead += 1 + pad_length; } - /* Write the index if indexed, or 0 to signal literal name */ - *length += aws_hpack_get_encoded_length_integer(index, prefix_size); + if (first_frame_priority_settings) { + priority_settings = first_frame_priority_settings; + flags |= AWS_H2_FRAME_F_PRIORITY; + payload_overhead += s_frame_priority_settings_size; + } - if (!found_value) { - /* If not an indexed header, check if the name needs to be written */ - if (!index) { - *length += aws_hpack_get_encoded_length_string(encoder->hpack, field->name, encoder->use_huffman); - } + if (first_frame_end_stream) { + flags |= AWS_H2_FRAME_F_END_STREAM; + } - /* Value must be written if the field isn't pure indexed */ - *length += aws_hpack_get_encoded_length_string(encoder->hpack, field->value, encoder->use_huffman); + if (first_frame_promised_stream_id) { + promised_stream_id = first_frame_promised_stream_id; + payload_overhead += 4; } + + } else /* CONTINUATION */ { + frame_type = AWS_H2_FRAME_T_CONTINUATION; } - return AWS_OP_SUCCESS; -} + /* + * Figure out what size header-block fragment should go in this frame. + */ -int aws_h2_frame_header_block_encode( - const struct aws_h2_frame_header_block *header_block, - struct aws_h2_frame_encoder *encoder, - struct aws_byte_buf *output) { - AWS_PRECONDITION(header_block); - AWS_PRECONDITION(encoder); - AWS_PRECONDITION(output); + size_t max_payload; + if (s_get_max_contiguous_payload_length(encoder, output, &max_payload)) { + goto handle_waiting_for_more_space; + } - const size_t num_headers = aws_array_list_length(&header_block->header_fields); - AWS_LOGF(AWS_LL_TRACE, AWS_LS_HTTP_FRAMES, "Encoding header block with %zu headers", num_headers); + size_t max_fragment; + if (aws_sub_size_checked(max_payload, payload_overhead, &max_fragment)) { + goto handle_waiting_for_more_space; + } - for (size_t i = 0; i < num_headers; ++i) { + const size_t fragment_len = aws_min_size(max_fragment, header_block->encoded_block_cursor.len); + if (fragment_len == header_block->encoded_block_cursor.len) { + /* This will finish the header-block */ + flags |= AWS_H2_FRAME_F_END_HEADERS; + } else { + /* If we're not finishing the header-block, is it even worth trying to send this frame now? */ + const size_t even_worth_sending_threshold = s_frame_prefix_length + payload_overhead; + if (fragment_len < even_worth_sending_threshold) { + goto handle_waiting_for_more_space; + } + } + + /* + * Ok, it fits! Write the frame + */ + + /* Write the frame prefix */ + if (s_frame_prefix_encode(frame_type, stream_id, fragment_len + payload_overhead, flags, output)) { + goto error; + } - const struct aws_http_header *field = NULL; - aws_array_list_get_at_ptr(&header_block->header_fields, (void **)&field, i); - AWS_ASSERT(field); - - /* #TODO don't use index unless header is USE_CACHE */ - /* #TODO need to update hpack as we go or we'll be using wrong indices */ - bool found_value = true; - const size_t index = aws_hpack_find_index(encoder->hpack, field, &found_value); - - uint8_t mask; - uint8_t prefix_size; - /* If a value was found, this is an indexed header */ - if (found_value) { - mask = s_indexed_header_field_mask; - prefix_size = 7; - } else { - /* If not indexed, determine the appropriate flags and prefixes */ - switch (field->compression) { - case AWS_HTTP_HEADER_COMPRESSION_USE_CACHE: - mask = s_literal_save_field_mask; - prefix_size = 6; - break; - case AWS_HTTP_HEADER_COMPRESSION_NO_CACHE: - mask = 0; /* No bits set, just 4 bit prefix */ - prefix_size = 4; - break; - case AWS_HTTP_HEADER_COMPRESSION_NO_FORWARD_CACHE: - mask = s_literal_no_forward_save_mask; - prefix_size = 4; - break; - default: - /* Unreachable */ - AWS_FATAL_ASSERT(false); - } + /* Write pad length */ + if (flags & AWS_H2_FRAME_F_PADDED) { + AWS_ASSERT(frame_type != AWS_H2_FRAME_T_CONTINUATION); + if (!aws_byte_buf_write_u8(output, pad_length)) { + aws_raise_error(AWS_ERROR_SHORT_BUFFER); + goto error; } + } - const size_t before_len = output->len; + /* Write priority */ + if (flags & AWS_H2_FRAME_F_PRIORITY) { + AWS_ASSERT(frame_type == AWS_H2_FRAME_T_HEADERS); + if (s_frame_priority_settings_encode(priority_settings, output)) { + goto error; + } + } - /* Write the top bits to signal representation */ - output->buffer[output->len] = mask; + /* Write promised stream ID */ + if (promised_stream_id) { + AWS_ASSERT(frame_type == AWS_H2_FRAME_T_PUSH_PROMISE); + if (!aws_byte_buf_write_be32(output, *promised_stream_id & s_31_bit_mask)) { + aws_raise_error(AWS_ERROR_SHORT_BUFFER); + goto error; + } + } - /* Write the index if indexed, or 0 to signal literal name */ - if (aws_hpack_encode_integer(index, prefix_size, output)) { - return AWS_OP_ERR; + /* Write header-block fragment */ + if (fragment_len > 0) { + struct aws_byte_cursor fragment = header_block->encoded_block_cursor; + fragment.len = fragment_len; + if (!aws_byte_buf_write_from_whole_cursor(output, fragment)) { + aws_raise_error(AWS_ERROR_SHORT_BUFFER); + goto error; } + } - /* Names and values must be copied to avoid modifying the original struct */ - struct aws_byte_cursor scratch; - - if (!found_value) { - /* If not an indexed header, check if the name needs to be written */ - if (!index) { - scratch = field->name; - if (aws_hpack_encode_string(encoder->hpack, &scratch, encoder->use_huffman, output)) { - return AWS_OP_ERR; - } - AWS_ASSERT(scratch.len == 0); - } - - /* Value must be written if the field isn't pure indexed */ - scratch = field->value; - if (aws_hpack_encode_string(encoder->hpack, &scratch, encoder->use_huffman, output)) { - return AWS_OP_ERR; - } - AWS_ASSERT(scratch.len == 0); - - if (field->compression == AWS_HTTP_HEADER_COMPRESSION_USE_CACHE) { - /* Save for next time */ - aws_hpack_insert_header(encoder->hpack, field); - } + /* Write padding */ + if (flags & AWS_H2_FRAME_F_PADDED) { + if (!aws_byte_buf_write_u8_n(output, 0, pad_length)) { + aws_raise_error(AWS_ERROR_SHORT_BUFFER); + goto error; } + } - const size_t encoded_bytes = output->len - before_len; - AWS_LOGF(AWS_LL_TRACE, AWS_LS_HTTP_FRAMES, "Encoded header %zu as %zu bytes", i, encoded_bytes); + /* Success! Wrote entire frame. It's safe to change state now */ + header_block->state = flags & AWS_H2_FRAME_F_END_HEADERS ? AWS_H2_HEADER_BLOCK_STATE_COMPLETE + : AWS_H2_HEADER_BLOCK_STATE_CONTINUATION; + aws_byte_cursor_advance(&header_block->encoded_block_cursor, fragment_len); + *waiting_for_more_space = false; + return AWS_OP_SUCCESS; + +handle_waiting_for_more_space: + *waiting_for_more_space = true; + return AWS_OP_SUCCESS; + +error: + header_block->state = AWS_H2_HEADER_BLOCK_STATE_ERROR; + return AWS_OP_ERR; +} + +int aws_h2_frame_header_block_encode( + struct aws_h2_frame_header_block *header_block, + const struct aws_h2_frame *first_frame, + uint8_t first_frame_pad_length, + const struct aws_h2_frame_priority_settings *first_frame_priority_settings, + bool first_frame_end_stream, + const uint32_t *first_frame_promised_stream_id, + struct aws_h2_frame_encoder *encoder, + struct aws_byte_buf *output, + bool *complete) { + + *complete = false; + + if (header_block->state < AWS_H2_HEADER_BLOCK_STATE_COMPLETE) { + aws_raise_error(AWS_ERROR_INVALID_STATE); + goto error; + } + + /* Pre-encode the entire header-block the first time we're called. */ + if (header_block->state == AWS_H2_HEADER_BLOCK_STATE_INIT) { + if (s_pre_encode_whole_header_block(encoder, header_block->headers, output)) { + goto error; + } + header_block->state = AWS_H2_HEADER_BLOCK_STATE_FIRST_FRAME; + } + + /* Write frames (HEADER or PUSH_PROMISE, followed by N CONTINUATION frames) + * until we're done writing header-block or the buffer is too full to continue */ + bool waiting_for_more_space = false; + while (header_block->state < AWS_H2_HEADER_BLOCK_STATE_COMPLETE && !waiting_for_more_space) { + if (s_encode_single_header_block_frame( + header_block, + first_frame, + first_frame_pad_length, + first_frame_priority_settings, + first_frame_end_stream, + first_frame_promised_stream_id, + encoder, + output, + &waiting_for_more_space)) { + goto error; + } } + *complete = header_block->state == AWS_H2_HEADER_BLOCK_STATE_COMPLETE; return AWS_OP_SUCCESS; + +error: + header_block->state = AWS_H2_HEADER_BLOCK_STATE_ERROR; + return AWS_OP_ERR; } -#endif // 0 + /*********************************************************************************************************************** * Common Frame Prefix **********************************************************************************************************************/ -static const size_t s_frame_prefix_length = 24; - static void s_init_frame_base( struct aws_h2_frame *frame_base, struct aws_allocator *alloc, @@ -298,20 +433,20 @@ static void s_init_frame_base( } static int s_frame_prefix_encode( - struct aws_h2_frame *frame_base, + enum aws_h2_frame_type type, + uint32_t stream_id, size_t length, uint8_t flags, struct aws_byte_buf *output) { - AWS_PRECONDITION(frame_base); AWS_PRECONDITION(output); - AWS_PRECONDITION(!(frame_base->stream_id & s_u32_top_bit_mask), "Invalid stream ID"); + AWS_PRECONDITION(!(stream_id & s_u32_top_bit_mask), "Invalid stream ID"); AWS_LOGF( AWS_LL_TRACE, AWS_LS_HTTP_FRAMES, "Beginning encode of frame %s: stream: %" PRIu32 " payload length: %zu flags: %" PRIu8, - aws_h2_frame_type_to_str(frame_base->type), - frame_base->stream_id, + aws_h2_frame_type_to_str(type), + stream_id, length, flags); @@ -326,7 +461,7 @@ static int s_frame_prefix_encode( return aws_raise_error(AWS_ERROR_SHORT_BUFFER); } /* Write type */ - if (!aws_byte_buf_write_u8(output, frame_base->type)) { + if (!aws_byte_buf_write_u8(output, type)) { return aws_raise_error(AWS_ERROR_SHORT_BUFFER); } /* Write flags */ @@ -334,13 +469,13 @@ static int s_frame_prefix_encode( return aws_raise_error(AWS_ERROR_SHORT_BUFFER); } /* Write stream id (with reserved first bit) */ - if (!aws_byte_buf_write_be32(output, frame_base->stream_id & s_31_bit_mask)) { + if (!aws_byte_buf_write_be32(output, stream_id & s_31_bit_mask)) { return aws_raise_error(AWS_ERROR_SHORT_BUFFER); } return AWS_OP_SUCCESS; } -#if 0 + /*********************************************************************************************************************** * Encoder **********************************************************************************************************************/ @@ -367,160 +502,189 @@ void aws_h2_frame_encoder_clean_up(struct aws_h2_frame_encoder *encoder) { /*********************************************************************************************************************** * DATA **********************************************************************************************************************/ -int aws_h2_frame_data_init(struct aws_h2_frame_data *frame, struct aws_allocator *allocator) { - (void)allocator; - - AWS_ZERO_STRUCT(*frame); - frame->base.type = AWS_H2_FRAME_T_DATA; - - return AWS_OP_SUCCESS; -} -void aws_h2_frame_data_clean_up(struct aws_h2_frame_data *frame) { - AWS_PRECONDITION(frame); - (void)frame; -} - -int aws_h2_frame_data_encode( - struct aws_h2_frame_data *frame, +int aws_h2_encode_data_frame( struct aws_h2_frame_encoder *encoder, - struct aws_byte_buf *output) { - AWS_PRECONDITION(frame); + uint32_t stream_id, + struct aws_input_stream *body_stream, + bool body_ends_stream, + uint8_t pad_length, + struct aws_byte_buf *output, + bool *body_complete) { + AWS_PRECONDITION(encoder); + AWS_PRECONDITION(body_stream); AWS_PRECONDITION(output); + AWS_PRECONDITION(body_complete); - (void)encoder; + if (aws_h2_validate_stream_id(stream_id)) { + AWS_LOGF_ERROR(AWS_LS_HTTP_FRAMES, "Invalid body stream ID"); + return AWS_OP_ERR; + } - const size_t output_init_len = output->len; + *body_complete = false; + uint8_t flags = 0; - /* Calculate length & flags */ - size_t length = frame->data.len; + /* + * The payload length must precede everything else, but we don't know how + * much data we'll get from the body-stream until we actually read it. + * Therefore, we determine the exact location that the body data should go, + * then stream the body directly into that part of the output buffer. + * Then we will go and write the other parts of the frame in around it. + */ + + size_t bytes_preceding_body = s_frame_prefix_length; + size_t payload_overhead = 0; /* Amount of "payload" that will not contain body (padding) */ + if (pad_length > 0) { + flags |= AWS_H2_FRAME_F_PADDED; - uint8_t flags = 0; - if (frame->end_stream) { - flags |= AWS_H2_FRAME_F_END_STREAM; + /* Padding len is 1st byte of payload (padding itself goes at end of payload) */ + bytes_preceding_body += 1; + payload_overhead = 1 + pad_length; } - if (frame->pad_length) { - flags |= AWS_H2_FRAME_F_PADDED; - length += 1 + frame->pad_length; + + /* Max amount of payload we can do right now */ + size_t max_payload; + if (s_get_max_contiguous_payload_length(encoder, output, &max_payload)) { + goto handle_waiting_for_more_space; + } + + /* Max amount of body we can fit in the payload*/ + size_t max_body; + if (aws_sub_size_checked(max_payload, payload_overhead, &max_body) || max_body == 0) { + goto handle_waiting_for_more_space; + } + + /* Limit where body can go by making a sub-buffer */ + struct aws_byte_buf body_sub_buf = + aws_byte_buf_from_empty_array(output->buffer + output->len + bytes_preceding_body, max_body); + + /* Read body into sub-buffer */ + if (aws_input_stream_read(body_stream, &body_sub_buf)) { + goto error; + } + + /* Check if we've reached the end of the body */ + struct aws_stream_status body_status; + if (aws_input_stream_get_status(body_stream, &body_status)) { + goto error; + } + + if (body_status.is_end_of_stream) { + *body_complete = true; + if (body_ends_stream) { + flags |= AWS_H2_FRAME_F_END_STREAM; + } + } else { + if (body_sub_buf.len == 0) { + /* This frame would have no useful information, don't even bother sending it */ + goto handle_nothing_to_send_right_now; + } } + /* + * Write in the other parts of the frame. + */ + /* Write the frame prefix */ - if (s_frame_prefix_encode(&frame->base, length, flags, output)) { - goto write_error; + const size_t payload_len = body_sub_buf.len + payload_overhead; + if (s_frame_prefix_encode(AWS_H2_FRAME_T_DATA, stream_id, payload_len, flags, output)) { + goto error; } /* Write pad length */ - if (frame->pad_length) { - if (!aws_byte_buf_write_u8(output, frame->pad_length)) { - goto write_error; + if (flags & AWS_H2_FRAME_F_PADDED) { + if (!aws_byte_buf_write_u8(output, pad_length)) { + aws_raise_error(AWS_ERROR_SHORT_BUFFER); + goto error; } } - /* Write data */ - if (!aws_byte_buf_write_from_whole_cursor(output, frame->data)) { - goto write_error; - } + + /* Increment output->len to jump over the body that we already wrote in */ + AWS_ASSERT(output->buffer + output->len == body_sub_buf.buffer && "Streamed DATA to wrong position"); + output->len += body_sub_buf.len; + /* Write padding */ - for (size_t i = 0; i < frame->pad_length; ++i) { - if (!aws_byte_buf_write_u8(output, 0)) { - goto write_error; + if (flags & AWS_H2_FRAME_F_PADDED) { + if (!aws_byte_buf_write_u8_n(output, 0, pad_length)) { + aws_raise_error(AWS_ERROR_SHORT_BUFFER); + goto error; } } return AWS_OP_SUCCESS; -write_error: - output->len = output_init_len; - return aws_raise_error(AWS_H2_ERR_FRAME_SIZE_ERROR); +handle_waiting_for_more_space: + return AWS_OP_SUCCESS; + +handle_nothing_to_send_right_now: + return AWS_OP_SUCCESS; + +error: + return AWS_OP_ERR; } /*********************************************************************************************************************** * HEADERS **********************************************************************************************************************/ -int aws_h2_frame_headers_init(struct aws_h2_frame_headers *frame, struct aws_allocator *allocator) { - (void)allocator; - - AWS_ZERO_STRUCT(*frame); - frame->base.type = AWS_H2_FRAME_T_HEADERS; - - return aws_h2_frame_header_block_init(&frame->header_block, allocator); -} -void aws_h2_frame_headers_clean_up(struct aws_h2_frame_headers *frame) { - AWS_PRECONDITION(frame); +DEFINE_FRAME_VTABLE(headers); - aws_h2_frame_header_block_clean_up(&frame->header_block); -} - -int aws_h2_frame_headers_encode( - struct aws_h2_frame_headers *frame, - struct aws_h2_frame_encoder *encoder, - struct aws_byte_buf *output) { - AWS_PRECONDITION(frame); - AWS_PRECONDITION(encoder); - AWS_PRECONDITION(output); - - const size_t output_init_len = output->len; - - /* Calculate length & flags */ - size_t length = 0; - if (aws_h2_frame_header_block_get_encoded_length(&frame->header_block, encoder, &length)) { - goto compression_error; - } +struct aws_h2_frame *aws_h2_frame_new_headers( + struct aws_allocator *allocator, + uint32_t stream_id, + const struct aws_http_headers *headers, + bool end_stream, + uint8_t pad_length, + const struct aws_h2_frame_priority_settings *optional_priority) { - uint8_t flags = 0; - if (frame->end_stream) { - flags |= AWS_H2_FRAME_F_END_STREAM; - } - if (frame->end_headers) { - flags |= AWS_H2_FRAME_F_END_HEADERS; - } - if (frame->pad_length) { - flags |= AWS_H2_FRAME_F_PADDED; - length += 1 + frame->pad_length; - } - if (frame->has_priority) { - flags |= AWS_H2_FRAME_F_PRIORITY; - length += s_frame_priority_settings_size; + struct aws_h2_frame_headers *frame = aws_mem_calloc(allocator, 1, sizeof(struct aws_h2_frame_headers)); + if (!frame) { + return NULL; } - /* Write the frame prefix */ - if (s_frame_prefix_encode(&frame->base, length, flags, output)) { - goto write_error; + s_init_frame_base(&frame->base, allocator, AWS_H2_FRAME_T_HEADERS, &s_frame_headers_vtable, stream_id); + frame->end_stream = end_stream; + frame->pad_length = pad_length; + if (optional_priority) { + frame->has_priority = true; + frame->priority = *optional_priority; } - /* Write pad length */ - if (frame->pad_length) { - if (!aws_byte_buf_write_u8(output, frame->pad_length)) { - goto write_error; - } - } - /* Write priority */ - if (frame->has_priority) { - if (s_frame_priority_settings_encode(&frame->priority, output)) { - goto write_error; - } - } - /* Write data */ - if (aws_h2_frame_header_block_encode(&frame->header_block, encoder, output)) { - goto compression_error; - } - /* Write padding */ - for (size_t i = 0; i < frame->pad_length; ++i) { - if (!aws_byte_buf_write_u8(output, 0)) { - goto write_error; - } + if (aws_h2_frame_header_block_init(&frame->header_block, allocator, headers)) { + goto error; } - return AWS_OP_SUCCESS; + return &frame->base; +error: + aws_mem_release(allocator, frame); + return NULL; +} +static void s_frame_headers_destroy(struct aws_h2_frame *frame_base) { + struct aws_h2_frame_headers *frame = AWS_CONTAINER_OF(frame_base, struct aws_h2_frame_headers, base); + aws_h2_frame_header_block_clean_up(&frame->header_block); + aws_mem_release(frame->base.alloc, frame); +} -write_error: - output->len = output_init_len; - return aws_raise_error(AWS_H2_ERR_FRAME_SIZE_ERROR); +static int s_frame_headers_encode( + struct aws_h2_frame *frame_base, + struct aws_h2_frame_encoder *encoder, + struct aws_byte_buf *output, + bool *complete) { -compression_error: - output->len = output_init_len; - return aws_raise_error(AWS_ERROR_HTTP_COMPRESSION); + (void)encoder; + struct aws_h2_frame_headers *frame = AWS_CONTAINER_OF(frame_base, struct aws_h2_frame_headers, base); + + return aws_h2_frame_header_block_encode( + &frame->header_block, + &frame->base, + frame->pad_length, + frame->has_priority ? &frame->priority : NULL, + frame->end_stream, + NULL /* HEADERS doesn't have promised_stream_id */, + encoder, + output, + complete); } -#endif // 0 + /*********************************************************************************************************************** * PRIORITY **********************************************************************************************************************/ @@ -566,7 +730,7 @@ static int s_frame_priority_encode( } /* Write the frame prefix */ - if (s_frame_prefix_encode(&frame->base, s_frame_priority_length, 0, output)) { + if (s_frame_prefix_encode(frame->base.type, frame->base.stream_id, s_frame_priority_length, 0, output)) { return AWS_OP_ERR; } @@ -624,7 +788,7 @@ static int s_frame_rst_stream_encode( } /* Write the frame prefix */ - if (s_frame_prefix_encode(&frame->base, s_frame_rst_stream_length, 0, output)) { + if (s_frame_prefix_encode(frame->base.type, frame->base.stream_id, s_frame_rst_stream_length, 0, output)) { return AWS_OP_ERR; } @@ -700,7 +864,7 @@ static int s_frame_settings_encode( flags |= AWS_H2_FRAME_F_ACK; } - if (s_frame_prefix_encode(&frame->base, payload_len, flags, output)) { + if (s_frame_prefix_encode(frame->base.type, frame->base.stream_id, payload_len, flags, output)) { return AWS_OP_ERR; } @@ -717,88 +881,64 @@ static int s_frame_settings_encode( return AWS_OP_SUCCESS; } -#if 0 /*********************************************************************************************************************** * PUSH_PROMISE **********************************************************************************************************************/ -int aws_h2_frame_push_promise_init(struct aws_h2_frame_push_promise *frame, struct aws_allocator *allocator) { - (void)allocator; - - AWS_ZERO_STRUCT(*frame); - frame->base.type = AWS_H2_FRAME_T_PUSH_PROMISE; - - return aws_h2_frame_header_block_init(&frame->header_block, allocator); -} -void aws_h2_frame_push_promise_clean_up(struct aws_h2_frame_push_promise *frame) { - AWS_PRECONDITION(frame); - - aws_h2_frame_header_block_clean_up(&frame->header_block); -} - -int aws_h2_frame_push_promise_encode( - struct aws_h2_frame_push_promise *frame, - struct aws_h2_frame_encoder *encoder, - struct aws_byte_buf *output) { - AWS_PRECONDITION(frame); - AWS_PRECONDITION(encoder); - AWS_PRECONDITION(output); +DEFINE_FRAME_VTABLE(push_promise); - if (frame->promised_stream_id & s_u32_top_bit_mask) { - return aws_raise_error(AWS_ERROR_HTTP_PROTOCOL_ERROR); - } - - const size_t output_init_len = output->len; - - /* Write header */ - uint8_t flags = 0; - size_t length = 0; - if (aws_h2_frame_header_block_get_encoded_length(&frame->header_block, encoder, &length)) { - goto compression_error; - } - length += 4; /* Account for promised stream id */ - if (frame->pad_length) { - flags |= AWS_H2_FRAME_F_PADDED; - length += frame->pad_length + 1; - } - if (frame->end_headers) { - flags |= AWS_H2_FRAME_F_END_HEADERS; - } - if (s_frame_prefix_encode(&frame->base, length, flags, output)) { - goto write_error; - } +struct aws_h2_frame *aws_h2_frame_new_push_promise( + struct aws_allocator *allocator, + uint32_t stream_id, + uint32_t promised_stream_id, + const struct aws_http_headers *headers, + uint8_t pad_length) { - /* Write pad length */ - if (frame->pad_length) { - aws_byte_buf_write_u8(output, frame->pad_length); + struct aws_h2_frame_push_promise *frame = aws_mem_calloc(allocator, 1, sizeof(struct aws_h2_frame_push_promise)); + if (!frame) { + return NULL; } - /* Write new stream id */ - const uint32_t stream_id_bytes = frame->promised_stream_id & s_31_bit_mask; - aws_byte_buf_write_be32(output, stream_id_bytes); + s_init_frame_base(&frame->base, allocator, AWS_H2_FRAME_T_PUSH_PROMISE, &s_frame_push_promise_vtable, stream_id); + frame->promised_stream_id = promised_stream_id; + frame->pad_length = pad_length; - /* Write header block fragment */ - if (aws_h2_frame_header_block_encode(&frame->header_block, encoder, output)) { - goto compression_error; + if (aws_h2_frame_header_block_init(&frame->header_block, allocator, headers)) { + goto error; } - /* Write padding */ - for (size_t i = 0; i < frame->pad_length; ++i) { - if (!aws_byte_buf_write_u8(output, 0)) { - goto write_error; - } - } + return &frame->base; +error: + aws_mem_release(allocator, frame); + return NULL; +} - return AWS_OP_SUCCESS; +static void s_frame_push_promise_destroy(struct aws_h2_frame *frame_base) { + struct aws_h2_frame_push_promise *frame = AWS_CONTAINER_OF(frame_base, struct aws_h2_frame_push_promise, base); + aws_h2_frame_header_block_clean_up(&frame->header_block); + aws_mem_release(frame->base.alloc, frame); +} -write_error: - output->len = output_init_len; - return aws_raise_error(AWS_ERROR_SHORT_BUFFER); +static int s_frame_push_promise_encode( + struct aws_h2_frame *frame_base, + struct aws_h2_frame_encoder *encoder, + struct aws_byte_buf *output, + bool *complete) { -compression_error: - output->len = output_init_len; - return aws_raise_error(AWS_ERROR_HTTP_COMPRESSION); + (void)encoder; + struct aws_h2_frame_push_promise *frame = AWS_CONTAINER_OF(frame_base, struct aws_h2_frame_push_promise, base); + + return aws_h2_frame_header_block_encode( + &frame->header_block, + &frame->base, + frame->pad_length, + NULL /* PUSH_PROMISE doesn't have priority_settings */, + false /* PUSH_PROMISE doesn't have end_stream flag */, + &frame->promised_stream_id, + encoder, + output, + complete); } -#endif // 0 + /*********************************************************************************************************************** * PING **********************************************************************************************************************/ @@ -849,7 +989,7 @@ static int s_frame_ping_encode( flags |= AWS_H2_FRAME_F_ACK; } - if (s_frame_prefix_encode(&frame->base, AWS_H2_PING_DATA_SIZE, flags, output)) { + if (s_frame_prefix_encode(frame->base.type, frame->base.stream_id, AWS_H2_PING_DATA_SIZE, flags, output)) { return AWS_OP_ERR; } @@ -910,7 +1050,7 @@ static int s_frame_goaway_encode( } /* Write the frame prefix */ - if (s_frame_prefix_encode(&frame->base, payload_len, 0, output)) { + if (s_frame_prefix_encode(frame->base.type, frame->base.stream_id, payload_len, 0, output)) { return AWS_OP_ERR; } @@ -976,7 +1116,7 @@ static int s_frame_window_update_encode( } /* Write the frame prefix */ - if (s_frame_prefix_encode(&frame->base, s_frame_window_update_length, 0, output)) { + if (s_frame_prefix_encode(frame->base.type, frame->base.stream_id, s_frame_window_update_length, 0, output)) { return AWS_OP_ERR; } @@ -989,66 +1129,6 @@ static int s_frame_window_update_encode( return AWS_OP_SUCCESS; } -#if 0 -/*********************************************************************************************************************** - * CONTINUATION - **********************************************************************************************************************/ -int aws_h2_frame_continuation_init(struct aws_h2_frame_continuation *frame, struct aws_allocator *allocator) { - (void)allocator; - - AWS_ZERO_STRUCT(*frame); - frame->base.type = AWS_H2_FRAME_T_CONTINUATION; - - return aws_h2_frame_header_block_init(&frame->header_block, allocator); -} -void aws_h2_frame_continuation_clean_up(struct aws_h2_frame_continuation *frame) { - AWS_PRECONDITION(frame); - - aws_h2_frame_header_block_clean_up(&frame->header_block); -} - -int aws_h2_frame_continuation_encode( - struct aws_h2_frame_continuation *frame, - struct aws_h2_frame_encoder *encoder, - struct aws_byte_buf *output) { - AWS_PRECONDITION(frame); - AWS_PRECONDITION(encoder); - AWS_PRECONDITION(output); - - const size_t output_init_len = output->len; - - /* Calculate length & flags */ - size_t length = 0; - if (aws_h2_frame_header_block_get_encoded_length(&frame->header_block, encoder, &length)) { - goto compression_error; - } - - uint8_t flags = 0; - if (frame->end_headers) { - flags |= AWS_H2_FRAME_F_END_HEADERS; - } - - /* Write the frame prefix */ - if (s_frame_prefix_encode(&frame->base, length, flags, output)) { - goto write_error; - } - - /* Write the header block */ - if (aws_h2_frame_header_block_encode(&frame->header_block, encoder, output)) { - goto compression_error; - } - - return AWS_OP_SUCCESS; - -write_error: - output->len = output_init_len; - return aws_raise_error(AWS_H2_ERR_FRAME_SIZE_ERROR); - -compression_error: - output->len = output_init_len; - return aws_raise_error(AWS_ERROR_HTTP_COMPRESSION); -} -#endif void aws_h2_frame_destroy(struct aws_h2_frame *frame) { if (frame) { frame->vtable->destroy(frame); @@ -1065,8 +1145,16 @@ int aws_h2_encode_frame( AWS_PRECONDITION(frame); AWS_PRECONDITION(output); AWS_PRECONDITION(frame_complete); - AWS_PRECONDITION(!encoder->has_errored && "Cannot encode after error"); - AWS_PRECONDITION(!encoder->current_frame || (encoder->current_frame == frame) && "Must resume current frame"); + + if (encoder->has_errored) { + AWS_LOGF_ERROR(AWS_LS_HTTP_FRAMES, "Encoder cannot be used again after an error"); + return aws_raise_error(AWS_ERROR_INVALID_STATE); + } + + if (encoder->current_frame && (encoder->current_frame != frame)) { + AWS_LOGF_ERROR(AWS_LS_HTTP_FRAMES, "Cannot encode new frame until previous frames completes"); + return aws_raise_error(AWS_ERROR_INVALID_STATE); + } *frame_complete = false; diff --git a/source/hpack.c b/source/hpack.c index 051649050..5699b320c 100644 --- a/source/hpack.c +++ b/source/hpack.c @@ -1352,7 +1352,7 @@ int aws_hpack_encode_header( goto error; } - /* if "incremental indexing" type, insert header into the dynamic table */ + /* if "incremental indexing" type, insert header into the dynamic table. */ if (AWS_HPACK_ENTRY_LITERAL_HEADER_FIELD_WITH_INCREMENTAL_INDEXING == literal_entry_type) { if (aws_hpack_insert_header(context, header)) { goto error; @@ -1365,12 +1365,22 @@ int aws_hpack_encode_header( return AWS_OP_ERR; } -int aws_hpack_encode_dynamic_table_resize(struct aws_hpack_context *context, size_t size, struct aws_byte_buf *output) { - - AWS_PRECONDITION(context); - AWS_PRECONDITION(output); - - uint8_t starting_bit_pattern = s_hpack_entry_starting_bit_pattern[AWS_HPACK_ENTRY_DYNAMIC_TABLE_RESIZE]; - uint8_t num_prefix_bits = s_hpack_entry_num_prefix_bits[AWS_HPACK_ENTRY_DYNAMIC_TABLE_RESIZE]; - return aws_hpack_encode_integer(size, starting_bit_pattern, num_prefix_bits, output); +int aws_hpack_encode_header_block_start(struct aws_hpack_context *context, struct aws_byte_buf *output) { +#if 0 // #TODO finish hooking this up + if (context->dynamic_table_size_update.pending) { + if (s_encode_dynamic_table_resize(context, context->dynamic_table_size_update.value, output)) { + uint8_t starting_bit_pattern = s_hpack_entry_starting_bit_pattern[AWS_HPACK_ENTRY_DYNAMIC_TABLE_RESIZE]; + uint8_t num_prefix_bits = s_hpack_entry_num_prefix_bits[AWS_HPACK_ENTRY_DYNAMIC_TABLE_RESIZE]; + if (aws_hpack_encode_integer(size, starting_bit_pattern, num_prefix_bits, output)) { + return AWS_OP_ERR; + } + } + context->dynamic_table_size_update.pending = false; + context->dynamic_table_size_update.value = SIZE_MAX; + } +#else + (void)context; + (void)output; +#endif + return AWS_OP_SUCCESS; } From 2171e907a264a9db91bf335e3e636e48d138681d Mon Sep 17 00:00:00 2001 From: Michael Graeb Date: Mon, 16 Mar 2020 16:16:02 -0700 Subject: [PATCH 06/35] all tests passing again --- include/aws/http/http.h | 2 - include/aws/http/private/h2_frames.h | 69 +-- include/aws/http/private/hpack.h | 25 +- include/aws/http/request_response.h | 7 + source/h2_connection.c | 2 +- source/h2_decoder.c | 5 +- source/h2_frames.c | 806 ++++++++++++++------------- source/hpack.c | 56 +- source/http.c | 2 - source/request_response.c | 25 +- tests/CMakeLists.txt | 1 - tests/test_h2_encoder.c | 233 ++++---- tests/test_h2_headers.c | 188 +++---- tests/test_hpack.c | 67 +-- 14 files changed, 760 insertions(+), 728 deletions(-) diff --git a/include/aws/http/http.h b/include/aws/http/http.h index 2af980319..a83d59ee0 100644 --- a/include/aws/http/http.h +++ b/include/aws/http/http.h @@ -68,8 +68,6 @@ enum aws_http_log_subject { AWS_LS_HTTP_CONNECTION_MANAGER, AWS_LS_HTTP_WEBSOCKET, AWS_LS_HTTP_WEBSOCKET_SETUP, - - AWS_LS_HTTP_FRAMES, }; enum aws_http_version { diff --git a/include/aws/http/private/h2_frames.h b/include/aws/http/private/h2_frames.h index 3d666a7b0..a556baad5 100644 --- a/include/aws/http/private/h2_frames.h +++ b/include/aws/http/private/h2_frames.h @@ -16,7 +16,6 @@ * permissions and limitations under the License. */ -#include #include #include @@ -81,6 +80,12 @@ enum aws_h2_settings { #define AWS_H2_WINDOW_UPDATE_MAX (0x7FFFFFFF) #define AWS_H2_STREAM_ID_MAX (0x7FFFFFFF) +/* Legal min(inclusive) and max(inclusive) for each setting */ +extern const uint32_t aws_h2_settings_bounds[AWS_H2_SETTINGS_END_RANGE][2]; + +/* Initial values for settings RFC-7540 6.5.2 */ +extern const uint32_t aws_h2_settings_initial[AWS_H2_SETTINGS_END_RANGE]; + /* This magic string must be the very first thing a client sends to the server. * See RFC-7540 3.5 - HTTP/2 Connection Preface */ extern const struct aws_byte_cursor aws_h2_connection_preface_client_string; @@ -101,23 +106,6 @@ struct aws_h2_frame_priority_settings { uint8_t weight; }; -struct aws_h2_frame_header_block { - const struct aws_http_headers *headers; - - /* state */ - - enum { - AWS_H2_HEADER_BLOCK_STATE_INIT, - AWS_H2_HEADER_BLOCK_STATE_FIRST_FRAME, - AWS_H2_HEADER_BLOCK_STATE_CONTINUATION, - AWS_H2_HEADER_BLOCK_STATE_COMPLETE, - AWS_H2_HEADER_BLOCK_STATE_ERROR, - } state; - - struct aws_byte_buf whole_encoded_block; /* entire header block is encoded here */ - struct aws_byte_cursor encoded_block_cursor; /* tracks progress sending encoded header-block in fragments */ -}; - /** * A frame to be encoded. * (in the case of HEADERS and PUSH_PROMISE, it might turn into multiple frames due to CONTINUATION) @@ -130,19 +118,33 @@ struct aws_h2_frame { struct aws_linked_list_node node; }; -/* Represents a HEADERS header-block. - * (HEADERS frame followed 0 or more CONTINUATION frames) */ +/* Represents a HEADERS or PUSH_PROMISE frame (followed by zero or more CONTINUATION frames) */ struct aws_h2_frame_headers { struct aws_h2_frame base; - /* Flags */ + /* Common data */ + const struct aws_http_headers *headers; + uint8_t pad_length; /* Set to 0 to disable AWS_H2_FRAME_F_PADDED */ + + /* HEADERS-only data */ bool end_stream; /* AWS_H2_FRAME_F_END_STREAM */ bool has_priority; /* AWS_H2_FRAME_F_PRIORITY */ - - /* Payload */ - uint8_t pad_length; /* Set to 0 to disable AWS_H2_FRAME_F_PADDED */ struct aws_h2_frame_priority_settings priority; - struct aws_h2_frame_header_block header_block; + + /* PUSH_PROMISE-only data */ + uint32_t promised_stream_id; + + /* State */ + enum { + AWS_H2_HEADERS_STATE_INIT, + AWS_H2_HEADERS_STATE_FIRST_FRAME, + AWS_H2_HEADERS_STATE_CONTINUATION, + AWS_H2_HEADERS_STATE_COMPLETE, + AWS_H2_HEADERS_STATE_ERROR, + } state; + + struct aws_byte_buf whole_encoded_header_block; + struct aws_byte_cursor header_block_cursor; /* tracks progress sending encoded header-block in fragments */ }; /* Represents a PRIORITY frame */ @@ -179,17 +181,6 @@ struct aws_h2_frame_settings { size_t settings_count; }; -/* Represents a PUSH_PROMISE header-block. - * (PUSH_PROMISE frame followed by 0 or more CONTINUATION frames) */ -struct aws_h2_frame_push_promise { - struct aws_h2_frame base; - - /* Payload */ - uint8_t pad_length; /* Set to 0 to disable AWS_H2_FRAME_F_PADDED */ - uint32_t promised_stream_id; - struct aws_h2_frame_header_block header_block; -}; - #define AWS_H2_PING_DATA_SIZE (8) /* Represents a PING frame */ @@ -223,12 +214,11 @@ struct aws_h2_frame_window_update { /* Used to encode a frame */ struct aws_h2_frame_encoder { - /* Larger state */ struct aws_allocator *allocator; + const void *logging_id; struct aws_hpack_context *hpack; struct aws_h2_frame *current_frame; bool has_errored; - enum aws_hpack_huffman_mode huffman_mode; }; typedef void aws_h2_frame_destroy_fn(struct aws_h2_frame *frame_base); @@ -258,7 +248,8 @@ int aws_h2_validate_stream_id(uint32_t stream_id); * 2. Encode the frame using aws_h2_frame_*_encode */ AWS_HTTP_API -int aws_h2_frame_encoder_init(struct aws_h2_frame_encoder *encoder, struct aws_allocator *allocator); +int aws_h2_frame_encoder_init(struct aws_h2_frame_encoder *encoder, struct aws_allocator *allocator, void *logging_id); + AWS_HTTP_API void aws_h2_frame_encoder_clean_up(struct aws_h2_frame_encoder *encoder); diff --git a/include/aws/http/private/hpack.h b/include/aws/http/private/hpack.h index eec53ec86..7cd6e53af 100644 --- a/include/aws/http/private/hpack.h +++ b/include/aws/http/private/hpack.h @@ -52,9 +52,9 @@ struct aws_hpack_decode_result { * This only controls how string values are encoded when they're not already in a table. */ enum aws_hpack_huffman_mode { + AWS_HPACK_HUFFMAN_SMALLEST, AWS_HPACK_HUFFMAN_NEVER, AWS_HPACK_HUFFMAN_ALWAYS, - AWS_HPACK_HUFFMAN_SMALLEST, }; AWS_EXTERN_C_BEGIN @@ -90,24 +90,13 @@ int aws_hpack_decode( struct aws_hpack_decode_result *result); /** - * Must be called at start of header-block. - * If the table has been resized, then a Dynamic Table Size Update (RFC-7541 6.3) is encoded. - * This behavior is described in RFC-7541 4.2. + * Encode header-block into the output. + * This function will mutate the hpack context, so an error means the context can no longer be used. * Note that output will be dynamically resized if it's too short. */ -AWS_HTTP_API -int aws_hpack_encode_header_block_start(struct aws_hpack_context *context, struct aws_byte_buf *output); - -/** - * Encode a header-field into the output. - * This function will mutate the hpack context, so any error is unrecoverable. - * Note that output will be dynamically resized if it's too short. - */ -AWS_HTTP_API -int aws_hpack_encode_header( +int aws_hpack_encode_header_block( struct aws_hpack_context *context, - const struct aws_http_header *header, - enum aws_hpack_huffman_mode huffman_mode, + const struct aws_http_headers *headers, struct aws_byte_buf *output); /* Returns the hpack size of a header (name.len + value.len + 32) [4.1] */ @@ -132,6 +121,9 @@ int aws_hpack_insert_header(struct aws_hpack_context *context, const struct aws_ AWS_HTTP_API int aws_hpack_resize_dynamic_table(struct aws_hpack_context *context, size_t new_max_size); +AWS_HTTP_API +void aws_hpack_set_huffman_mode(struct aws_hpack_context *context, enum aws_hpack_huffman_mode mode); + /* Public for testing purposes. * Output will be dynamically resized if it's too short */ AWS_HTTP_API @@ -152,7 +144,6 @@ AWS_HTTP_API int aws_hpack_encode_string( struct aws_hpack_context *context, struct aws_byte_cursor to_encode, - enum aws_hpack_huffman_mode huffman_mode, struct aws_byte_buf *output); /* Public for testing purposes */ diff --git a/include/aws/http/request_response.h b/include/aws/http/request_response.h index a51103efb..e82a1eecf 100644 --- a/include/aws/http/request_response.h +++ b/include/aws/http/request_response.h @@ -362,6 +362,13 @@ void aws_http_headers_release(struct aws_http_headers *headers); * The underlying strings are copied. */ AWS_HTTP_API +int aws_http_headers_add_v2(struct aws_http_headers *headers, const struct aws_http_header *header); + +/** + * Add a header with default compression settings. + * The underlying strings are copied. + */ +AWS_HTTP_API int aws_http_headers_add(struct aws_http_headers *headers, struct aws_byte_cursor name, struct aws_byte_cursor value); /** diff --git a/source/h2_connection.c b/source/h2_connection.c index 0c499fd27..73ca094b1 100644 --- a/source/h2_connection.c +++ b/source/h2_connection.c @@ -219,7 +219,7 @@ static struct aws_h2_connection *s_connection_new( goto error; } - if (aws_h2_frame_encoder_init(&connection->thread_data.encoder, alloc)) { + if (aws_h2_frame_encoder_init(&connection->thread_data.encoder, alloc, &connection->base)) { CONNECTION_LOGF( ERROR, connection, "Encoder init error %d (%s)", aws_last_error(), aws_error_name(aws_last_error())); goto error; diff --git a/source/h2_decoder.c b/source/h2_decoder.c index 084acb629..9ba900c1b 100644 --- a/source/h2_decoder.c +++ b/source/h2_decoder.c @@ -244,7 +244,8 @@ int aws_h2_decode(struct aws_h2_decoder *decoder, struct aws_byte_cursor *data) /* Run decoder state machine until we're no longer changing states. * We don't simply loop `while(data->len)` because some states consume no data, * and these states should run even when there is no data left. */ - for (decoder->state_changed = false; decoder->state_changed; decoder->state_changed = false) { + do { + decoder->state_changed = false; const uint32_t bytes_required = decoder->state->bytes_required; AWS_ASSERT(bytes_required <= decoder->scratch.capacity); @@ -302,7 +303,7 @@ int aws_h2_decode(struct aws_h2_decoder *decoder, struct aws_byte_cursor *data) decoder->scratch.len); } } - } + } while (decoder->state_changed); return AWS_OP_SUCCESS; diff --git a/source/h2_frames.c b/source/h2_frames.c index ca732035f..647642f79 100644 --- a/source/h2_frames.c +++ b/source/h2_frames.c @@ -36,9 +36,49 @@ /* #TODO: use add_checked and mul_checked */ +#define ENCODER_LOGF(level, encoder, text, ...) \ + AWS_LOGF_##level(AWS_LS_HTTP_ENCODER, "id=%p " text, (encoder)->logging_id, __VA_ARGS__) + +#define ENCODER_LOG(level, encoder, text) ENCODER_LOGF(level, encoder, "%s", text) + const struct aws_byte_cursor aws_h2_connection_preface_client_string = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n"); +/* Initial values and bounds are from RFC-7540 6.5.2 */ +const uint32_t aws_h2_settings_initial[AWS_H2_SETTINGS_END_RANGE] = { + [AWS_H2_SETTINGS_HEADER_TABLE_SIZE] = 4096, + [AWS_H2_SETTINGS_ENABLE_PUSH] = 1, + [AWS_H2_SETTINGS_MAX_CONCURRENT_STREAMS] = UINT32_MAX, /* "Initially there is no limit to this value" */ + [AWS_H2_SETTINGS_INITIAL_WINDOW_SIZE] = 65535, + [AWS_H2_SETTINGS_MAX_FRAME_SIZE] = 16384, + [AWS_H2_SETTINGS_MAX_HEADER_LIST_SIZE] = UINT32_MAX, /* "The initial value of this setting is unlimited" */ +}; + +const uint32_t aws_h2_settings_bounds[AWS_H2_SETTINGS_END_RANGE][2] = { + [AWS_H2_SETTINGS_HEADER_TABLE_SIZE][0] = 0, + [AWS_H2_SETTINGS_HEADER_TABLE_SIZE][1] = UINT32_MAX, + + [AWS_H2_SETTINGS_ENABLE_PUSH][0] = 0, + [AWS_H2_SETTINGS_ENABLE_PUSH][1] = 1, + + [AWS_H2_SETTINGS_MAX_CONCURRENT_STREAMS][0] = 0, + [AWS_H2_SETTINGS_MAX_CONCURRENT_STREAMS][1] = UINT32_MAX, + + [AWS_H2_SETTINGS_INITIAL_WINDOW_SIZE][0] = 0, + [AWS_H2_SETTINGS_INITIAL_WINDOW_SIZE][1] = AWS_H2_WINDOW_UPDATE_MAX, + + [AWS_H2_SETTINGS_MAX_FRAME_SIZE][0] = 16384, + [AWS_H2_SETTINGS_MAX_FRAME_SIZE][1] = AWS_H2_PAYLOAD_MAX, + + [AWS_H2_SETTINGS_MAX_HEADER_LIST_SIZE][0] = 0, + [AWS_H2_SETTINGS_MAX_HEADER_LIST_SIZE][1] = UINT32_MAX, +}; + +/* Put constraints on frames that could get very large given crazy inputs. + * This isn't dictated by the spec, it's here to avoid edge cases where + * were never have a big enough output buffer to encode the frame. */ +static const size_t s_settings_and_goaway_payload_limit = 8192; + /* Stream ids & dependencies should only write the bottom 31 bits */ static const uint32_t s_31_bit_mask = UINT32_MAX >> 1; static const uint32_t s_u32_top_bit_mask = UINT32_MAX << 31; @@ -57,13 +97,6 @@ static const size_t s_encoded_header_block_reserve = 128; /* Value pulled from t .encode = s_frame_##NAME##_encode, \ } -static int s_frame_prefix_encode( - enum aws_h2_frame_type type, - uint32_t stream_id, - size_t length, - uint8_t flags, - struct aws_byte_buf *output); - const char *aws_h2_frame_type_to_str(enum aws_h2_frame_type type) { switch (type) { case AWS_H2_FRAME_T_DATA: @@ -104,7 +137,7 @@ int aws_h2_validate_stream_id(uint32_t stream_id) { * 2) obey encoders current MAX_FRAME_SIZE * * Assumes no part of the frame has been written yet to output. - * The total length of the would be: returned-payload-len + s_frame_prefix_length + * The total length of the frame would be: returned-payload-len + s_frame_prefix_length * * Raises error if there is not enough space available for even a frame prefix. */ @@ -157,265 +190,6 @@ static int s_frame_priority_settings_encode( return AWS_OP_SUCCESS; } -/*********************************************************************************************************************** - * Header Block - **********************************************************************************************************************/ -int aws_h2_frame_header_block_init( - struct aws_h2_frame_header_block *header_block, - struct aws_allocator *allocator, - const struct aws_http_headers *headers) { - - AWS_PRECONDITION(header_block); - AWS_PRECONDITION(allocator); - AWS_PRECONDITION(headers); - - AWS_ZERO_STRUCT(*header_block); - if (aws_byte_buf_init(&header_block->whole_encoded_block, allocator, s_encoded_header_block_reserve)) { - return AWS_OP_ERR; - } - - aws_http_headers_acquire((struct aws_http_headers *)headers); - header_block->headers = headers; - return AWS_OP_SUCCESS; -} - -void aws_h2_frame_header_block_clean_up(struct aws_h2_frame_header_block *header_block) { - AWS_PRECONDITION(header_block); - aws_http_headers_release((struct aws_http_headers *)header_block->headers); - aws_byte_buf_clean_up(&header_block->whole_encoded_block); - AWS_ZERO_STRUCT(*header_block); -} - -/** - * Encode whole entire header-block. - * The output is intended to be copied into actual frames by something that cares about frame size. - * Output will be dynamically resized if it's too short. - */ -static int s_pre_encode_whole_header_block( - struct aws_h2_frame_encoder *encoder, - const struct aws_http_headers *headers, - struct aws_byte_buf *output) { - - if (aws_hpack_encode_header_block_start(encoder->hpack, output)) { - return AWS_OP_ERR; - } - - const size_t num_headers = aws_http_headers_count(headers); - for (size_t i = 0; i < num_headers; ++i) { - struct aws_http_header header; - aws_http_headers_get_index(headers, i, &header); - if (aws_hpack_encode_header(encoder->hpack, &header, encoder->huffman_mode, output)) { - return AWS_OP_ERR; - } - } - - return AWS_OP_SUCCESS; -} - -/* Encode the header-block's next frame (or encode nothing if output buffer is too small). - * Pass in the details of the block's first frame (HEADERS or PUSH_PROMISE). */ -int s_encode_single_header_block_frame( - struct aws_h2_frame_header_block *header_block, - const struct aws_h2_frame *first_frame, - uint8_t first_frame_pad_length, - const struct aws_h2_frame_priority_settings *first_frame_priority_settings, - bool first_frame_end_stream, - const uint32_t *first_frame_promised_stream_id, - struct aws_h2_frame_encoder *encoder, - struct aws_byte_buf *output, - bool *waiting_for_more_space) { - - AWS_ASSERT(first_frame->type == AWS_H2_FRAME_T_HEADERS || first_frame->type == AWS_H2_FRAME_T_PUSH_PROMISE); - - /* - * Figure out the details of the next frame to encode. - * The first frame will be either HEADERS or PUSH_PROMISE. - * All subsequent frames will be CONTINUATION - */ - - uint32_t stream_id = first_frame->stream_id; - enum aws_h2_frame_type frame_type; - uint8_t flags = 0; - uint8_t pad_length = 0; - const struct aws_h2_frame_priority_settings *priority_settings = NULL; - const uint32_t *promised_stream_id = NULL; - uint32_t payload_overhead = 0; /* Amount of payload holding things other than header-block (padding, etc) */ - - if (header_block->state == AWS_H2_HEADER_BLOCK_STATE_FIRST_FRAME) { - frame_type = first_frame->type; - - if (first_frame_pad_length > 0) { - flags |= AWS_H2_FRAME_F_PADDED; - pad_length = first_frame_pad_length; - payload_overhead += 1 + pad_length; - } - - if (first_frame_priority_settings) { - priority_settings = first_frame_priority_settings; - flags |= AWS_H2_FRAME_F_PRIORITY; - payload_overhead += s_frame_priority_settings_size; - } - - if (first_frame_end_stream) { - flags |= AWS_H2_FRAME_F_END_STREAM; - } - - if (first_frame_promised_stream_id) { - promised_stream_id = first_frame_promised_stream_id; - payload_overhead += 4; - } - - } else /* CONTINUATION */ { - frame_type = AWS_H2_FRAME_T_CONTINUATION; - } - - /* - * Figure out what size header-block fragment should go in this frame. - */ - - size_t max_payload; - if (s_get_max_contiguous_payload_length(encoder, output, &max_payload)) { - goto handle_waiting_for_more_space; - } - - size_t max_fragment; - if (aws_sub_size_checked(max_payload, payload_overhead, &max_fragment)) { - goto handle_waiting_for_more_space; - } - - const size_t fragment_len = aws_min_size(max_fragment, header_block->encoded_block_cursor.len); - if (fragment_len == header_block->encoded_block_cursor.len) { - /* This will finish the header-block */ - flags |= AWS_H2_FRAME_F_END_HEADERS; - } else { - /* If we're not finishing the header-block, is it even worth trying to send this frame now? */ - const size_t even_worth_sending_threshold = s_frame_prefix_length + payload_overhead; - if (fragment_len < even_worth_sending_threshold) { - goto handle_waiting_for_more_space; - } - } - - /* - * Ok, it fits! Write the frame - */ - - /* Write the frame prefix */ - if (s_frame_prefix_encode(frame_type, stream_id, fragment_len + payload_overhead, flags, output)) { - goto error; - } - - /* Write pad length */ - if (flags & AWS_H2_FRAME_F_PADDED) { - AWS_ASSERT(frame_type != AWS_H2_FRAME_T_CONTINUATION); - if (!aws_byte_buf_write_u8(output, pad_length)) { - aws_raise_error(AWS_ERROR_SHORT_BUFFER); - goto error; - } - } - - /* Write priority */ - if (flags & AWS_H2_FRAME_F_PRIORITY) { - AWS_ASSERT(frame_type == AWS_H2_FRAME_T_HEADERS); - if (s_frame_priority_settings_encode(priority_settings, output)) { - goto error; - } - } - - /* Write promised stream ID */ - if (promised_stream_id) { - AWS_ASSERT(frame_type == AWS_H2_FRAME_T_PUSH_PROMISE); - if (!aws_byte_buf_write_be32(output, *promised_stream_id & s_31_bit_mask)) { - aws_raise_error(AWS_ERROR_SHORT_BUFFER); - goto error; - } - } - - /* Write header-block fragment */ - if (fragment_len > 0) { - struct aws_byte_cursor fragment = header_block->encoded_block_cursor; - fragment.len = fragment_len; - if (!aws_byte_buf_write_from_whole_cursor(output, fragment)) { - aws_raise_error(AWS_ERROR_SHORT_BUFFER); - goto error; - } - } - - /* Write padding */ - if (flags & AWS_H2_FRAME_F_PADDED) { - if (!aws_byte_buf_write_u8_n(output, 0, pad_length)) { - aws_raise_error(AWS_ERROR_SHORT_BUFFER); - goto error; - } - } - - /* Success! Wrote entire frame. It's safe to change state now */ - header_block->state = flags & AWS_H2_FRAME_F_END_HEADERS ? AWS_H2_HEADER_BLOCK_STATE_COMPLETE - : AWS_H2_HEADER_BLOCK_STATE_CONTINUATION; - aws_byte_cursor_advance(&header_block->encoded_block_cursor, fragment_len); - *waiting_for_more_space = false; - return AWS_OP_SUCCESS; - -handle_waiting_for_more_space: - *waiting_for_more_space = true; - return AWS_OP_SUCCESS; - -error: - header_block->state = AWS_H2_HEADER_BLOCK_STATE_ERROR; - return AWS_OP_ERR; -} - -int aws_h2_frame_header_block_encode( - struct aws_h2_frame_header_block *header_block, - const struct aws_h2_frame *first_frame, - uint8_t first_frame_pad_length, - const struct aws_h2_frame_priority_settings *first_frame_priority_settings, - bool first_frame_end_stream, - const uint32_t *first_frame_promised_stream_id, - struct aws_h2_frame_encoder *encoder, - struct aws_byte_buf *output, - bool *complete) { - - *complete = false; - - if (header_block->state < AWS_H2_HEADER_BLOCK_STATE_COMPLETE) { - aws_raise_error(AWS_ERROR_INVALID_STATE); - goto error; - } - - /* Pre-encode the entire header-block the first time we're called. */ - if (header_block->state == AWS_H2_HEADER_BLOCK_STATE_INIT) { - if (s_pre_encode_whole_header_block(encoder, header_block->headers, output)) { - goto error; - } - header_block->state = AWS_H2_HEADER_BLOCK_STATE_FIRST_FRAME; - } - - /* Write frames (HEADER or PUSH_PROMISE, followed by N CONTINUATION frames) - * until we're done writing header-block or the buffer is too full to continue */ - bool waiting_for_more_space = false; - while (header_block->state < AWS_H2_HEADER_BLOCK_STATE_COMPLETE && !waiting_for_more_space) { - if (s_encode_single_header_block_frame( - header_block, - first_frame, - first_frame_pad_length, - first_frame_priority_settings, - first_frame_end_stream, - first_frame_promised_stream_id, - encoder, - output, - &waiting_for_more_space)) { - goto error; - } - } - - *complete = header_block->state == AWS_H2_HEADER_BLOCK_STATE_COMPLETE; - return AWS_OP_SUCCESS; - -error: - header_block->state = AWS_H2_HEADER_BLOCK_STATE_ERROR; - return AWS_OP_ERR; -} - /*********************************************************************************************************************** * Common Frame Prefix **********************************************************************************************************************/ @@ -433,26 +207,28 @@ static void s_init_frame_base( } static int s_frame_prefix_encode( + struct aws_h2_frame_encoder *encoder, enum aws_h2_frame_type type, uint32_t stream_id, size_t length, uint8_t flags, struct aws_byte_buf *output) { + AWS_PRECONDITION(encoder); AWS_PRECONDITION(output); AWS_PRECONDITION(!(stream_id & s_u32_top_bit_mask), "Invalid stream ID"); - AWS_LOGF( - AWS_LL_TRACE, - AWS_LS_HTTP_FRAMES, - "Beginning encode of frame %s: stream: %" PRIu32 " payload length: %zu flags: %" PRIu8, + ENCODER_LOGF( + TRACE, + encoder, + "Encoding frame: type=%s stream_id=%" PRIu32 " payload_length=%zu flags=0x%02X", aws_h2_frame_type_to_str(type), stream_id, length, flags); /* Length must fit in 24 bits */ - /* #TODO Check against SETTINGS_MAX_FRAME_SIZE */ if (length > AWS_H2_PAYLOAD_MAX) { + ENCODER_LOGF(ERROR, encoder, "Payload size %zu exceeds max for HTTP/2", length); return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } @@ -479,12 +255,14 @@ static int s_frame_prefix_encode( /*********************************************************************************************************************** * Encoder **********************************************************************************************************************/ -int aws_h2_frame_encoder_init(struct aws_h2_frame_encoder *encoder, struct aws_allocator *allocator) { +int aws_h2_frame_encoder_init(struct aws_h2_frame_encoder *encoder, struct aws_allocator *allocator, void *logging_id) { + AWS_PRECONDITION(encoder); AWS_PRECONDITION(allocator); AWS_ZERO_STRUCT(*encoder); encoder->allocator = allocator; + encoder->logging_id = logging_id; encoder->hpack = aws_hpack_context_new(allocator, AWS_LS_HTTP_ENCODER, encoder); if (!encoder->hpack) { @@ -517,7 +295,6 @@ int aws_h2_encode_data_frame( AWS_PRECONDITION(body_complete); if (aws_h2_validate_stream_id(stream_id)) { - AWS_LOGF_ERROR(AWS_LS_HTTP_FRAMES, "Invalid body stream ID"); return AWS_OP_ERR; } @@ -525,7 +302,7 @@ int aws_h2_encode_data_frame( uint8_t flags = 0; /* - * The payload length must precede everything else, but we don't know how + * Payload-length is the first thing encoded in a frame, but we don't know how * much data we'll get from the body-stream until we actually read it. * Therefore, we determine the exact location that the body data should go, * then stream the body directly into that part of the output buffer. @@ -587,7 +364,7 @@ int aws_h2_encode_data_frame( /* Write the frame prefix */ const size_t payload_len = body_sub_buf.len + payload_overhead; - if (s_frame_prefix_encode(AWS_H2_FRAME_T_DATA, stream_id, payload_len, flags, output)) { + if (s_frame_prefix_encode(encoder, AWS_H2_FRAME_T_DATA, stream_id, payload_len, flags, output)) { goto error; } @@ -614,9 +391,11 @@ int aws_h2_encode_data_frame( return AWS_OP_SUCCESS; handle_waiting_for_more_space: + ENCODER_LOGF(TRACE, encoder, "Insufficient space to encode DATA for stream %" PRIu32 " right now", stream_id); return AWS_OP_SUCCESS; handle_nothing_to_send_right_now: + ENCODER_LOGF(INFO, encoder, "Stream %" PRIu32 " produced 0 bytes of body data", stream_id); return AWS_OP_SUCCESS; error: @@ -624,65 +403,338 @@ int aws_h2_encode_data_frame( } /*********************************************************************************************************************** - * HEADERS + * HEADERS / PUSH_PROMISE **********************************************************************************************************************/ DEFINE_FRAME_VTABLE(headers); +DEFINE_FRAME_VTABLE(push_promise); -struct aws_h2_frame *aws_h2_frame_new_headers( +static struct aws_h2_frame *s_frame_new_headers_or_push_promise( struct aws_allocator *allocator, + enum aws_h2_frame_type frame_type, uint32_t stream_id, const struct aws_http_headers *headers, - bool end_stream, uint8_t pad_length, - const struct aws_h2_frame_priority_settings *optional_priority) { + bool end_stream, + const struct aws_h2_frame_priority_settings *optional_priority, + uint32_t promised_stream_id) { + + AWS_PRECONDITION(allocator); + AWS_PRECONDITION(frame_type == AWS_H2_FRAME_T_HEADERS || frame_type == AWS_H2_FRAME_T_PUSH_PROMISE); + AWS_PRECONDITION(headers); + + /* Validate args */ + + if (aws_h2_validate_stream_id(stream_id)) { + return NULL; + } + + if (frame_type == AWS_H2_FRAME_T_PUSH_PROMISE) { + if (aws_h2_validate_stream_id(promised_stream_id)) { + return NULL; + } + } + + if (optional_priority && aws_h2_validate_stream_id(optional_priority->stream_dependency)) { + return NULL; + } + + /* Create */ struct aws_h2_frame_headers *frame = aws_mem_calloc(allocator, 1, sizeof(struct aws_h2_frame_headers)); if (!frame) { return NULL; } - s_init_frame_base(&frame->base, allocator, AWS_H2_FRAME_T_HEADERS, &s_frame_headers_vtable, stream_id); - frame->end_stream = end_stream; - frame->pad_length = pad_length; - if (optional_priority) { - frame->has_priority = true; - frame->priority = *optional_priority; + if (aws_byte_buf_init(&frame->whole_encoded_header_block, allocator, s_encoded_header_block_reserve)) { + goto error; } - if (aws_h2_frame_header_block_init(&frame->header_block, allocator, headers)) { - goto error; + const struct aws_h2_frame_vtable *vtable; + if (frame_type == AWS_H2_FRAME_T_HEADERS) { + vtable = &s_frame_headers_vtable; + frame->end_stream = end_stream; + if (optional_priority) { + frame->has_priority = true; + frame->priority = *optional_priority; + } + } else { + vtable = &s_frame_push_promise_vtable; + frame->promised_stream_id = promised_stream_id; } + s_init_frame_base(&frame->base, allocator, frame_type, vtable, stream_id); + + aws_http_headers_acquire((struct aws_http_headers *)headers); + frame->headers = headers; + frame->pad_length = pad_length; + return &frame->base; + error: - aws_mem_release(allocator, frame); + s_frame_headers_destroy(&frame->base); return NULL; } + +struct aws_h2_frame *aws_h2_frame_new_headers( + struct aws_allocator *allocator, + uint32_t stream_id, + const struct aws_http_headers *headers, + bool end_stream, + uint8_t pad_length, + const struct aws_h2_frame_priority_settings *optional_priority) { + + return s_frame_new_headers_or_push_promise( + allocator, + AWS_H2_FRAME_T_HEADERS, + stream_id, + headers, + pad_length, + end_stream, + optional_priority, + 0 /* HEADERS doesn't have promised_stream_id */); +} + +struct aws_h2_frame *aws_h2_frame_new_push_promise( + struct aws_allocator *allocator, + uint32_t stream_id, + uint32_t promised_stream_id, + const struct aws_http_headers *headers, + uint8_t pad_length) { + + return s_frame_new_headers_or_push_promise( + allocator, + AWS_H2_FRAME_T_PUSH_PROMISE, + stream_id, + headers, + pad_length, + false /* PUSH_PROMISE doesn't have end_stream flag */, + NULL /* PUSH_PROMISE doesn't have priority_settings */, + promised_stream_id); +} + static void s_frame_headers_destroy(struct aws_h2_frame *frame_base) { struct aws_h2_frame_headers *frame = AWS_CONTAINER_OF(frame_base, struct aws_h2_frame_headers, base); - aws_h2_frame_header_block_clean_up(&frame->header_block); + aws_http_headers_release((struct aws_http_headers *)frame->headers); + aws_byte_buf_clean_up(&frame->whole_encoded_header_block); aws_mem_release(frame->base.alloc, frame); } +static void s_frame_push_promise_destroy(struct aws_h2_frame *frame_base) { + s_frame_headers_destroy(frame_base); +} + +/* Encode the next frame for this header-block (or encode nothing if output buffer is too small). */ +int s_encode_single_header_block_frame( + struct aws_h2_frame_headers *frame, + struct aws_h2_frame_encoder *encoder, + struct aws_byte_buf *output, + bool *waiting_for_more_space) { + + /* + * Figure out the details of the next frame to encode. + * The first frame will be either HEADERS or PUSH_PROMISE. + * All subsequent frames will be CONTINUATION + */ + + enum aws_h2_frame_type frame_type; + uint8_t flags = 0; + uint8_t pad_length = 0; + const struct aws_h2_frame_priority_settings *priority_settings = NULL; + const uint32_t *promised_stream_id = NULL; + uint32_t payload_overhead = 0; /* Amount of payload holding things other than header-block (padding, etc) */ + + if (frame->state == AWS_H2_HEADERS_STATE_FIRST_FRAME) { + frame_type = frame->base.type; + + if (frame->pad_length > 0) { + flags |= AWS_H2_FRAME_F_PADDED; + pad_length = frame->pad_length; + payload_overhead += 1 + pad_length; + } + + if (frame->has_priority) { + priority_settings = &frame->priority; + flags |= AWS_H2_FRAME_F_PRIORITY; + payload_overhead += s_frame_priority_settings_size; + } + + if (frame->end_stream) { + flags |= AWS_H2_FRAME_F_END_STREAM; + } + + if (frame_type == AWS_H2_FRAME_T_PUSH_PROMISE) { + promised_stream_id = &frame->promised_stream_id; + payload_overhead += 4; + } + + } else /* CONTINUATION */ { + frame_type = AWS_H2_FRAME_T_CONTINUATION; + } + + /* + * Figure out what size header-block fragment should go in this frame. + */ + + size_t max_payload; + if (s_get_max_contiguous_payload_length(encoder, output, &max_payload)) { + goto handle_waiting_for_more_space; + } + + size_t max_fragment; + if (aws_sub_size_checked(max_payload, payload_overhead, &max_fragment)) { + goto handle_waiting_for_more_space; + } + + const size_t fragment_len = aws_min_size(max_fragment, frame->header_block_cursor.len); + if (fragment_len == frame->header_block_cursor.len) { + /* This will finish the header-block */ + flags |= AWS_H2_FRAME_F_END_HEADERS; + } else { + /* If we're not finishing the header-block, is it even worth trying to send this frame now? */ + const size_t even_worth_sending_threshold = s_frame_prefix_length + payload_overhead; + if (fragment_len < even_worth_sending_threshold) { + goto handle_waiting_for_more_space; + } + } + + /* + * Ok, it fits! Write the frame + */ + + /* Write the frame prefix */ + const size_t payload_len = fragment_len + payload_overhead; + if (s_frame_prefix_encode(encoder, frame_type, frame->base.stream_id, payload_len, flags, output)) { + goto error; + } + + /* Write pad length */ + if (flags & AWS_H2_FRAME_F_PADDED) { + AWS_ASSERT(frame_type != AWS_H2_FRAME_T_CONTINUATION); + if (!aws_byte_buf_write_u8(output, pad_length)) { + aws_raise_error(AWS_ERROR_SHORT_BUFFER); + goto error; + } + } + + /* Write priority */ + if (flags & AWS_H2_FRAME_F_PRIORITY) { + AWS_ASSERT(frame_type == AWS_H2_FRAME_T_HEADERS); + if (s_frame_priority_settings_encode(priority_settings, output)) { + goto error; + } + } + + /* Write promised stream ID */ + if (promised_stream_id) { + AWS_ASSERT(frame_type == AWS_H2_FRAME_T_PUSH_PROMISE); + if (!aws_byte_buf_write_be32(output, *promised_stream_id & s_31_bit_mask)) { + aws_raise_error(AWS_ERROR_SHORT_BUFFER); + goto error; + } + } + + /* Write header-block fragment */ + if (fragment_len > 0) { + struct aws_byte_cursor fragment = aws_byte_cursor_advance(&frame->header_block_cursor, fragment_len); + if (!aws_byte_buf_write_from_whole_cursor(output, fragment)) { + aws_raise_error(AWS_ERROR_SHORT_BUFFER); + goto error; + } + } + + /* Write padding */ + if (flags & AWS_H2_FRAME_F_PADDED) { + if (!aws_byte_buf_write_u8_n(output, 0, pad_length)) { + aws_raise_error(AWS_ERROR_SHORT_BUFFER); + goto error; + } + } + + /* Success! Wrote entire frame. It's safe to change state now */ + frame->state = + flags & AWS_H2_FRAME_F_END_HEADERS ? AWS_H2_HEADERS_STATE_COMPLETE : AWS_H2_HEADERS_STATE_CONTINUATION; + *waiting_for_more_space = false; + return AWS_OP_SUCCESS; + +handle_waiting_for_more_space: + ENCODER_LOGF( + TRACE, + encoder, + "Insufficient space to encode %s for stream %" PRIu32 " right now", + aws_h2_frame_type_to_str(frame->base.type), + frame->base.stream_id); + *waiting_for_more_space = true; + return AWS_OP_SUCCESS; + +error: + frame->state = AWS_H2_HEADERS_STATE_ERROR; + return AWS_OP_ERR; +} + static int s_frame_headers_encode( struct aws_h2_frame *frame_base, struct aws_h2_frame_encoder *encoder, struct aws_byte_buf *output, bool *complete) { - (void)encoder; struct aws_h2_frame_headers *frame = AWS_CONTAINER_OF(frame_base, struct aws_h2_frame_headers, base); - return aws_h2_frame_header_block_encode( - &frame->header_block, - &frame->base, - frame->pad_length, - frame->has_priority ? &frame->priority : NULL, - frame->end_stream, - NULL /* HEADERS doesn't have promised_stream_id */, - encoder, - output, - complete); + if (frame->state >= AWS_H2_HEADERS_STATE_COMPLETE) { + aws_raise_error(AWS_ERROR_INVALID_STATE); + goto error; + } + + /* Pre-encode the entire header-block into another buffer + * the first time we're called. */ + if (frame->state == AWS_H2_HEADERS_STATE_INIT) { + if (aws_hpack_encode_header_block(encoder->hpack, frame->headers, &frame->whole_encoded_header_block)) { + ENCODER_LOGF( + ERROR, + encoder, + "Error doing HPACK encoding on %s of stream %" PRIu32 ": %s", + aws_h2_frame_type_to_str(frame->base.type), + frame->base.stream_id, + aws_error_name(aws_last_error())); + goto error; + } + + frame->header_block_cursor = aws_byte_cursor_from_buf(&frame->whole_encoded_header_block); + frame->state = AWS_H2_HEADERS_STATE_FIRST_FRAME; + } + + /* Write frames (HEADER or PUSH_PROMISE, followed by N CONTINUATION frames) + * until we're done writing header-block or the buffer is too full to continue */ + bool waiting_for_more_space = false; + while (frame->state < AWS_H2_HEADERS_STATE_COMPLETE && !waiting_for_more_space) { + if (s_encode_single_header_block_frame(frame, encoder, output, &waiting_for_more_space)) { + goto error; + } + } + + if (waiting_for_more_space) { + ENCODER_LOGF( + TRACE, + encoder, + "Insufficient space to finish encoding %s header-block for stream %" PRIu32 " right now", + aws_h2_frame_type_to_str(frame->base.type), + frame->base.stream_id); + } + + *complete = frame->state == AWS_H2_HEADERS_STATE_COMPLETE; + return AWS_OP_SUCCESS; + +error: + frame->state = AWS_H2_HEADERS_STATE_ERROR; + return AWS_OP_ERR; +} + +static int s_frame_push_promise_encode( + struct aws_h2_frame *frame_base, + struct aws_h2_frame_encoder *encoder, + struct aws_byte_buf *output, + bool *complete) { + + return s_frame_headers_encode(frame_base, encoder, output, complete); } /*********************************************************************************************************************** @@ -696,6 +748,13 @@ struct aws_h2_frame *aws_h2_frame_new_priority( uint32_t stream_id, const struct aws_h2_frame_priority_settings *priority) { + AWS_PRECONDITION(allocator); + AWS_PRECONDITION(priority); + + if (aws_h2_validate_stream_id(stream_id) || aws_h2_validate_stream_id(priority->stream_dependency)) { + return NULL; + } + struct aws_h2_frame_priority *frame = aws_mem_calloc(allocator, 1, sizeof(struct aws_h2_frame_priority)); if (!frame) { return NULL; @@ -724,13 +783,19 @@ static int s_frame_priority_encode( const size_t space_available = output->capacity - output->len; /* If we can't encode the whole frame at once, try again later */ - if (total_len < space_available) { + if (total_len > space_available) { + ENCODER_LOGF( + TRACE, + encoder, + "Insufficient space to encode PRIORITY for stream %" PRIu32 " right now", + frame->base.stream_id); + *complete = false; return AWS_OP_SUCCESS; } /* Write the frame prefix */ - if (s_frame_prefix_encode(frame->base.type, frame->base.stream_id, s_frame_priority_length, 0, output)) { + if (s_frame_prefix_encode(encoder, frame->base.type, frame->base.stream_id, s_frame_priority_length, 0, output)) { return AWS_OP_ERR; } @@ -754,6 +819,10 @@ struct aws_h2_frame *aws_h2_frame_new_rst_stream( uint32_t stream_id, enum aws_h2_error_codes error_code) { + if (aws_h2_validate_stream_id(stream_id)) { + return NULL; + } + struct aws_h2_frame_rst_stream *frame = aws_mem_calloc(allocator, 1, sizeof(struct aws_h2_frame_rst_stream)); if (!frame) { return NULL; @@ -762,7 +831,7 @@ struct aws_h2_frame *aws_h2_frame_new_rst_stream( s_init_frame_base(&frame->base, allocator, AWS_H2_FRAME_T_RST_STREAM, &s_frame_rst_stream_vtable, stream_id); frame->error_code = error_code; - return AWS_OP_SUCCESS; + return &frame->base; } static void s_frame_rst_stream_destroy(struct aws_h2_frame *frame_base) { @@ -782,13 +851,18 @@ static int s_frame_rst_stream_encode( const size_t space_available = output->capacity - output->len; /* If we can't encode the whole frame at once, try again later */ - if (total_len < space_available) { + if (total_len > space_available) { + ENCODER_LOGF( + TRACE, + encoder, + "Insufficient space to encode RST_STREAM for stream %" PRIu32 " right now", + frame->base.stream_id); *complete = false; return AWS_OP_SUCCESS; } /* Write the frame prefix */ - if (s_frame_prefix_encode(frame->base.type, frame->base.stream_id, s_frame_rst_stream_length, 0, output)) { + if (s_frame_prefix_encode(encoder, frame->base.type, frame->base.stream_id, s_frame_rst_stream_length, 0, output)) { return AWS_OP_ERR; } @@ -816,6 +890,14 @@ struct aws_h2_frame *aws_h2_frame_new_settings( AWS_PRECONDITION(!ack || num_settings == 0, "Settings ACK must be empty"); AWS_PRECONDITION(settings_array || num_settings == 0); + /* Check against insane edge case of too many settings to fit in a frame. + * Arbitrarily choosing half the default payload size */ + size_t max_settings = s_settings_and_goaway_payload_limit / s_frame_setting_length; + if (num_settings > max_settings) { + aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); + return NULL; + } + struct aws_h2_frame_settings *frame; struct aws_h2_frame_setting *array_alloc; const size_t sizeof_settings_array = sizeof(struct aws_h2_frame_setting) * num_settings; @@ -849,11 +931,11 @@ static int s_frame_settings_encode( struct aws_h2_frame_settings *frame = AWS_CONTAINER_OF(frame_base, struct aws_h2_frame_settings, base); const size_t payload_len = frame->settings_count * s_frame_setting_length; - const size_t total_len = s_frame_prefix_length + payload_len; - const size_t space_available = output->capacity - output->len; /* If we can't encode the whole frame at once, try again later */ - if (total_len < space_available) { + size_t max_payload; + if (s_get_max_contiguous_payload_length(encoder, output, &max_payload) || max_payload < payload_len) { + ENCODER_LOG(TRACE, encoder, "Insufficient space to encode SETTINGS right now"); *complete = false; return AWS_OP_SUCCESS; } @@ -864,7 +946,7 @@ static int s_frame_settings_encode( flags |= AWS_H2_FRAME_F_ACK; } - if (s_frame_prefix_encode(frame->base.type, frame->base.stream_id, payload_len, flags, output)) { + if (s_frame_prefix_encode(encoder, frame->base.type, frame->base.stream_id, payload_len, flags, output)) { return AWS_OP_ERR; } @@ -881,64 +963,6 @@ static int s_frame_settings_encode( return AWS_OP_SUCCESS; } -/*********************************************************************************************************************** - * PUSH_PROMISE - **********************************************************************************************************************/ -DEFINE_FRAME_VTABLE(push_promise); - -struct aws_h2_frame *aws_h2_frame_new_push_promise( - struct aws_allocator *allocator, - uint32_t stream_id, - uint32_t promised_stream_id, - const struct aws_http_headers *headers, - uint8_t pad_length) { - - struct aws_h2_frame_push_promise *frame = aws_mem_calloc(allocator, 1, sizeof(struct aws_h2_frame_push_promise)); - if (!frame) { - return NULL; - } - - s_init_frame_base(&frame->base, allocator, AWS_H2_FRAME_T_PUSH_PROMISE, &s_frame_push_promise_vtable, stream_id); - frame->promised_stream_id = promised_stream_id; - frame->pad_length = pad_length; - - if (aws_h2_frame_header_block_init(&frame->header_block, allocator, headers)) { - goto error; - } - - return &frame->base; -error: - aws_mem_release(allocator, frame); - return NULL; -} - -static void s_frame_push_promise_destroy(struct aws_h2_frame *frame_base) { - struct aws_h2_frame_push_promise *frame = AWS_CONTAINER_OF(frame_base, struct aws_h2_frame_push_promise, base); - aws_h2_frame_header_block_clean_up(&frame->header_block); - aws_mem_release(frame->base.alloc, frame); -} - -static int s_frame_push_promise_encode( - struct aws_h2_frame *frame_base, - struct aws_h2_frame_encoder *encoder, - struct aws_byte_buf *output, - bool *complete) { - - (void)encoder; - struct aws_h2_frame_push_promise *frame = AWS_CONTAINER_OF(frame_base, struct aws_h2_frame_push_promise, base); - - return aws_h2_frame_header_block_encode( - &frame->header_block, - &frame->base, - frame->pad_length, - NULL /* PUSH_PROMISE doesn't have priority_settings */, - false /* PUSH_PROMISE doesn't have end_stream flag */, - &frame->promised_stream_id, - encoder, - output, - complete); -} - /*********************************************************************************************************************** * PING **********************************************************************************************************************/ @@ -978,7 +1002,8 @@ static int s_frame_ping_encode( const size_t space_available = output->capacity - output->len; /* If we can't encode the whole frame at once, try again later */ - if (total_len < space_available) { + if (total_len > space_available) { + ENCODER_LOG(TRACE, encoder, "Insufficient space to encode PING right now"); *complete = false; return AWS_OP_SUCCESS; } @@ -989,7 +1014,7 @@ static int s_frame_ping_encode( flags |= AWS_H2_FRAME_F_ACK; } - if (s_frame_prefix_encode(frame->base.type, frame->base.stream_id, AWS_H2_PING_DATA_SIZE, flags, output)) { + if (s_frame_prefix_encode(encoder, frame->base.type, frame->base.stream_id, AWS_H2_PING_DATA_SIZE, flags, output)) { return AWS_OP_ERR; } @@ -1013,6 +1038,18 @@ struct aws_h2_frame *aws_h2_frame_new_goaway( enum aws_h2_error_codes error_code, struct aws_byte_cursor debug_data) { + /* If debug_data is too long, don't sent it. + * It's more important that the GOAWAY frame gets sent. */ + if (debug_data.len > s_settings_and_goaway_payload_limit) { + AWS_LOGF_WARN( + AWS_LS_HTTP_ENCODER, + "Sending GOAWAY without debug-data. Debug-data size %zu exceeds internal limit of %zu", + debug_data.len, + s_settings_and_goaway_payload_limit); + + debug_data.len = 0; + } + struct aws_h2_frame_goaway *frame = aws_mem_calloc(allocator, 1, sizeof(struct aws_h2_frame_goaway)); if (!frame) { return NULL; @@ -1038,19 +1075,19 @@ static int s_frame_goaway_encode( (void)encoder; struct aws_h2_frame_goaway *frame = AWS_CONTAINER_OF(frame_base, struct aws_h2_frame_goaway, base); - /* # TODO: handle max payload len. simply truncate debug data? */ const size_t payload_len = 8 + frame->debug_data.len; const size_t total_len = s_frame_prefix_length + payload_len; const size_t space_available = output->capacity - output->len; /* If we can't encode the whole frame at once, try again later */ - if (total_len < space_available) { + if (total_len > space_available) { + ENCODER_LOG(TRACE, encoder, "Insufficient space to encode GOAWAY right now"); *complete = false; return AWS_OP_SUCCESS; } /* Write the frame prefix */ - if (s_frame_prefix_encode(frame->base.type, frame->base.stream_id, payload_len, 0, output)) { + if (s_frame_prefix_encode(encoder, frame->base.type, frame->base.stream_id, payload_len, 0, output)) { return AWS_OP_ERR; } @@ -1077,6 +1114,12 @@ struct aws_h2_frame *aws_h2_frame_new_window_update( uint32_t stream_id, uint32_t window_size_increment) { + /* Note: stream_id may be zero or non-zero */ + if (stream_id > AWS_H2_STREAM_ID_MAX) { + aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); + return NULL; + } + if (window_size_increment > AWS_H2_WINDOW_UPDATE_MAX) { aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); return NULL; @@ -1110,13 +1153,15 @@ static int s_frame_window_update_encode( const size_t space_available = output->capacity - output->len; /* If we can't encode the whole frame at once, try again later */ - if (total_len < space_available) { + if (total_len > space_available) { + ENCODER_LOG(TRACE, encoder, "Insufficient space to encode PING right now"); *complete = false; return AWS_OP_SUCCESS; } /* Write the frame prefix */ - if (s_frame_prefix_encode(frame->base.type, frame->base.stream_id, s_frame_window_update_length, 0, output)) { + if (s_frame_prefix_encode( + encoder, frame->base.type, frame->base.stream_id, s_frame_window_update_length, 0, output)) { return AWS_OP_ERR; } @@ -1147,18 +1192,25 @@ int aws_h2_encode_frame( AWS_PRECONDITION(frame_complete); if (encoder->has_errored) { - AWS_LOGF_ERROR(AWS_LS_HTTP_FRAMES, "Encoder cannot be used again after an error"); + ENCODER_LOG(ERROR, encoder, "Encoder cannot be used again after an error"); return aws_raise_error(AWS_ERROR_INVALID_STATE); } if (encoder->current_frame && (encoder->current_frame != frame)) { - AWS_LOGF_ERROR(AWS_LS_HTTP_FRAMES, "Cannot encode new frame until previous frames completes"); + ENCODER_LOG(ERROR, encoder, "Cannot encode new frame until previous frame completes"); return aws_raise_error(AWS_ERROR_INVALID_STATE); } *frame_complete = false; if (frame->vtable->encode(frame, encoder, output, frame_complete)) { + ENCODER_LOGF( + ERROR, + encoder, + "Failed to encode frame type=%s stream_id=%" PRIu32 ", %s", + aws_h2_frame_type_to_str(frame->type), + frame->stream_id, + aws_error_name(aws_last_error())); encoder->has_errored = true; return AWS_OP_ERR; } diff --git a/source/hpack.c b/source/hpack.c index 5699b320c..b0a6e75e6 100644 --- a/source/hpack.c +++ b/source/hpack.c @@ -75,25 +75,6 @@ static int s_ensure_space(struct aws_byte_buf *output, size_t required_space) { return aws_byte_buf_reserve(output, reserve); } -size_t aws_hpack_get_encoded_length_integer(uint64_t integer, uint8_t prefix_size) { - const uint8_t prefix_mask = s_masked_right_bits_u8(prefix_size); - - if (integer < prefix_mask) { - /* If the integer fits inside the specified number of bits but won't be all 1's, then that's all she wrote */ - - return 1; - } else { - integer -= prefix_mask; - - size_t num_bytes = 1; - do { - ++num_bytes; - integer >>= 7; - } while (integer); - return num_bytes; - } -} - int aws_hpack_encode_integer( uint64_t integer, uint8_t starting_bits, @@ -249,6 +230,7 @@ void aws_hpack_static_table_clean_up() { struct aws_hpack_context { struct aws_allocator *allocator; + enum aws_hpack_huffman_mode huffman_mode; enum aws_http_log_subject log_subject; void *log_id; @@ -343,6 +325,7 @@ struct aws_hpack_context *aws_hpack_context_new( return NULL; } context->allocator = allocator; + context->huffman_mode = AWS_HPACK_HUFFMAN_SMALLEST; context->log_subject = log_subject; context->log_id = log_id; @@ -422,6 +405,10 @@ void aws_hpack_context_destroy(struct aws_hpack_context *context) { aws_mem_release(context->allocator, context); } +void aws_hpack_set_huffman_mode(struct aws_hpack_context *context, enum aws_hpack_huffman_mode mode) { + context->huffman_mode = mode; +} + size_t aws_hpack_get_header_size(const struct aws_http_header *header) { return header->name.len + header->value.len + 32; } @@ -822,7 +809,6 @@ int aws_hpack_decode_integer( int aws_hpack_encode_string( struct aws_hpack_context *context, struct aws_byte_cursor to_encode, - enum aws_hpack_huffman_mode huffman_mode, struct aws_byte_buf *output) { AWS_PRECONDITION(context); @@ -834,7 +820,7 @@ int aws_hpack_encode_string( /* Determine length of encoded string (and whether or not to use huffman) */ uint8_t use_huffman; size_t str_length; - switch (huffman_mode) { + switch (context->huffman_mode) { case AWS_HPACK_HUFFMAN_NEVER: use_huffman = 0; str_length = to_encode.len; @@ -1279,10 +1265,9 @@ static int s_convert_http_compression_to_literal_entry_type( return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } -int aws_hpack_encode_header( +static int s_encode_header_field( struct aws_hpack_context *context, const struct aws_http_header *header, - enum aws_hpack_huffman_mode huffman_mode, struct aws_byte_buf *output) { AWS_PRECONDITION(context); @@ -1342,13 +1327,13 @@ int aws_hpack_encode_header( } /* next encode header-name string */ - if (aws_hpack_encode_string(context, header->name, huffman_mode, output)) { + if (aws_hpack_encode_string(context, header->name, output)) { goto error; } } /* then encode header-value string, and we're done encoding! */ - if (aws_hpack_encode_string(context, header->value, huffman_mode, output)) { + if (aws_hpack_encode_string(context, header->value, output)) { goto error; } @@ -1365,8 +1350,14 @@ int aws_hpack_encode_header( return AWS_OP_ERR; } -int aws_hpack_encode_header_block_start(struct aws_hpack_context *context, struct aws_byte_buf *output) { +int aws_hpack_encode_header_block( + struct aws_hpack_context *context, + const struct aws_http_headers *headers, + struct aws_byte_buf *output) { + #if 0 // #TODO finish hooking this up + /* Encode a dynamic table size update at the beginning of the first header-block + * following the change to the dynamic table size RFC-7541 4.2 */ if (context->dynamic_table_size_update.pending) { if (s_encode_dynamic_table_resize(context, context->dynamic_table_size_update.value, output)) { uint8_t starting_bit_pattern = s_hpack_entry_starting_bit_pattern[AWS_HPACK_ENTRY_DYNAMIC_TABLE_RESIZE]; @@ -1378,9 +1369,16 @@ int aws_hpack_encode_header_block_start(struct aws_hpack_context *context, struc context->dynamic_table_size_update.pending = false; context->dynamic_table_size_update.value = SIZE_MAX; } -#else - (void)context; - (void)output; #endif + + const size_t num_headers = aws_http_headers_count(headers); + for (size_t i = 0; i < num_headers; ++i) { + struct aws_http_header header; + aws_http_headers_get_index(headers, i, &header); + if (s_encode_header_field(context, &header, output)) { + return AWS_OP_ERR; + } + } + return AWS_OP_SUCCESS; } diff --git a/source/http.c b/source/http.c index e9bb29184..2a3b23782 100644 --- a/source/http.c +++ b/source/http.c @@ -143,8 +143,6 @@ static struct aws_log_subject_info s_log_subject_infos[] = { DEFINE_LOG_SUBJECT_INFO(AWS_LS_HTTP_CONNECTION_MANAGER, "connection-manager", "HTTP connection manager"), DEFINE_LOG_SUBJECT_INFO(AWS_LS_HTTP_WEBSOCKET, "websocket", "Websocket"), DEFINE_LOG_SUBJECT_INFO(AWS_LS_HTTP_WEBSOCKET_SETUP, "websocket-setup", "Websocket setup"), - - DEFINE_LOG_SUBJECT_INFO(AWS_LS_HTTP_FRAMES, "http-frames", "HTTP frame library"), }; static struct aws_log_subject_info_list s_log_subject_list = { diff --git a/source/request_response.c b/source/request_response.c index 9b2bd0866..2a14d7c23 100644 --- a/source/request_response.c +++ b/source/request_response.c @@ -97,21 +97,21 @@ void aws_http_headers_acquire(struct aws_http_headers *headers) { aws_atomic_fetch_add(&headers->refcount, 1); } -int aws_http_headers_add(struct aws_http_headers *headers, struct aws_byte_cursor name, struct aws_byte_cursor value) { +int aws_http_headers_add_v2(struct aws_http_headers *headers, const struct aws_http_header *header) { AWS_PRECONDITION(headers); - AWS_PRECONDITION(aws_byte_cursor_is_valid(&name) && aws_byte_cursor_is_valid(&value)); + AWS_PRECONDITION(header); + AWS_PRECONDITION(aws_byte_cursor_is_valid(&header->name) && aws_byte_cursor_is_valid(&header->value)); - if (name.len == 0) { + if (header->name.len == 0) { return aws_raise_error(AWS_ERROR_HTTP_INVALID_HEADER_NAME); } size_t total_len; - if (aws_add_size_checked(name.len, value.len, &total_len)) { + if (aws_add_size_checked(header->name.len, header->value.len, &total_len)) { return AWS_OP_ERR; } - struct aws_http_header header = {.name = name, .value = value}; - + struct aws_http_header header_copy = *header; /* Store our own copy of the strings. * We put the name and value into the same allocation. */ uint8_t *strmem = aws_mem_acquire(headers->alloc, total_len); @@ -120,10 +120,10 @@ int aws_http_headers_add(struct aws_http_headers *headers, struct aws_byte_curso } struct aws_byte_buf strbuf = aws_byte_buf_from_empty_array(strmem, total_len); - aws_byte_buf_append_and_update(&strbuf, &header.name); - aws_byte_buf_append_and_update(&strbuf, &header.value); + aws_byte_buf_append_and_update(&strbuf, &header_copy.name); + aws_byte_buf_append_and_update(&strbuf, &header_copy.value); - if (aws_array_list_push_back(&headers->array_list, &header)) { + if (aws_array_list_push_back(&headers->array_list, &header_copy)) { goto error; } @@ -134,6 +134,11 @@ int aws_http_headers_add(struct aws_http_headers *headers, struct aws_byte_curso return AWS_OP_ERR; } +int aws_http_headers_add(struct aws_http_headers *headers, struct aws_byte_cursor name, struct aws_byte_cursor value) { + struct aws_http_header header = {.name = name, .value = value}; + return aws_http_headers_add_v2(headers, &header); +} + void aws_http_headers_clear(struct aws_http_headers *headers) { AWS_PRECONDITION(headers); @@ -235,7 +240,7 @@ int aws_http_headers_add_array(struct aws_http_headers *headers, const struct aw const size_t orig_count = aws_http_headers_count(headers); for (size_t i = 0; i < count; ++i) { - if (aws_http_headers_add(headers, array[i].name, array[i].value)) { + if (aws_http_headers_add_v2(headers, &array[i])) { goto error; } } diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index d7853fc94..49024220c 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -222,7 +222,6 @@ add_test_case(h2_encoder_push_promise) add_test_case(h2_encoder_ping) add_test_case(h2_encoder_goaway) add_test_case(h2_encoder_window_update) -add_test_case(h2_encoder_continuation) add_test_case(h2_decoder_sanity_check) add_h2_decoder_test_set(h2_decoder_data) diff --git a/tests/test_h2_encoder.c b/tests/test_h2_encoder.c index 9b1e8d39d..5d18491b6 100644 --- a/tests/test_h2_encoder.c +++ b/tests/test_h2_encoder.c @@ -15,6 +15,7 @@ #include #include +#include static int s_fixture_init(struct aws_allocator *allocator, void *ctx) { (void)ctx; @@ -34,22 +35,31 @@ static int s_fixture_clean_up(struct aws_allocator *allocator, int setup_res, vo AWS_TEST_CASE_FIXTURE(NAME, s_fixture_init, s_test_##NAME, s_fixture_clean_up, NULL); \ static int s_test_##NAME(struct aws_allocator *allocator, void *ctx) +#define DEFINE_STATIC_HEADER(_key, _value, _behavior) \ + { \ + .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(_key), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(_value), \ + .compression = AWS_HTTP_HEADER_COMPRESSION_##_behavior, \ + } + /* Run the given frame's encoder and check that it outputs the expected bytes */ -static int s_encode( +static int s_encode_frame( struct aws_allocator *allocator, - struct aws_h2_frame_base *frame, + struct aws_h2_frame *frame, const uint8_t *expected, size_t expected_size) { struct aws_h2_frame_encoder encoder; - ASSERT_SUCCESS(aws_h2_frame_encoder_init(&encoder, allocator)); + ASSERT_SUCCESS(aws_h2_frame_encoder_init(&encoder, allocator, NULL /*logging_id*/)); struct aws_byte_buf buffer; + /* Allocate more room than necessary, easier to debug the full output than a failed aws_h2_encode_frame() call */ ASSERT_SUCCESS(aws_byte_buf_init(&buffer, allocator, expected_size * 2)); - ASSERT_SUCCESS(aws_h2_encode_frame(&encoder, frame, &buffer)); + bool frame_complete; + ASSERT_SUCCESS(aws_h2_encode_frame(&encoder, frame, &buffer, &frame_complete)); ASSERT_BIN_ARRAYS_EQUALS(expected, expected_size, buffer.buffer, buffer.len); + ASSERT_UINT_EQUALS(true, frame_complete); aws_byte_buf_clean_up(&buffer); aws_h2_frame_encoder_clean_up(&encoder); @@ -59,12 +69,15 @@ static int s_encode( TEST_CASE(h2_encoder_data) { (void)ctx; - struct aws_h2_frame_data frame; - ASSERT_SUCCESS(aws_h2_frame_data_init(&frame, allocator)); - frame.base.stream_id = 0x76543210; - frame.end_stream = true; - frame.pad_length = 2; - frame.data = aws_byte_cursor_from_c_str("hello"); + struct aws_h2_frame_encoder encoder; + ASSERT_SUCCESS(aws_h2_frame_encoder_init(&encoder, allocator, NULL /*logging_id*/)); + + struct aws_byte_buf output; + ASSERT_SUCCESS(aws_byte_buf_init(&output, allocator, 1024)); + + struct aws_byte_cursor body_src = aws_byte_cursor_from_c_str("hello"); + struct aws_input_stream *body = aws_input_stream_new_from_cursor(allocator, &body_src); + ASSERT_NOT_NULL(body); /* clang-format off */ uint8_t expected[] = { @@ -79,30 +92,48 @@ TEST_CASE(h2_encoder_data) { }; /* clang-format on */ - ASSERT_SUCCESS(s_encode(allocator, &frame.base, expected, sizeof(expected))); - aws_h2_frame_data_clean_up(&frame); + bool body_complete; + ASSERT_SUCCESS(aws_h2_encode_data_frame( + &encoder, + 0x76543210 /*stream_id*/, + body, + true /*body_ends_stream*/, + 2 /*pad_length*/, + &output, + &body_complete)); + + ASSERT_BIN_ARRAYS_EQUALS(expected, sizeof(expected), output.buffer, output.len); + ASSERT_UINT_EQUALS(true, body_complete); + + aws_byte_buf_clean_up(&output); + aws_input_stream_destroy(body); + aws_h2_frame_encoder_clean_up(&encoder); return AWS_OP_SUCCESS; } TEST_CASE(h2_encoder_headers) { (void)ctx; - struct aws_h2_frame_headers frame; - ASSERT_SUCCESS(aws_h2_frame_headers_init(&frame, allocator)); - frame.base.stream_id = 0x76543210; - frame.end_headers = true; - frame.end_stream = true; - frame.pad_length = 2; - frame.has_priority = true; - frame.priority.stream_dependency_exclusive = true; - frame.priority.stream_dependency = 0x01234567; - frame.priority.weight = 9; + struct aws_http_headers *headers = aws_http_headers_new(allocator); + ASSERT_NOT_NULL(headers); - /* Intentionally leaving header block fragment empty. Header block encoding is tested elsewhere */ + struct aws_http_header h = DEFINE_STATIC_HEADER(":status", "302", USE_CACHE); + + ASSERT_SUCCESS(aws_http_headers_add_v2(headers, &h)); + + struct aws_h2_frame_priority_settings priority = { + .stream_dependency_exclusive = true, + .stream_dependency = 0x01234567, + .weight = 9, + }; + + struct aws_h2_frame *frame = aws_h2_frame_new_headers( + allocator, 0x76543210 /*stream_id*/, headers, true /*end_stream*/, 2 /*pad_length*/, &priority); + ASSERT_NOT_NULL(frame); /* clang-format off */ uint8_t expected[] = { - 0x00, 0x00, 0x08, /* Length (24) */ + 0x00, 0x00, 12, /* Length (24) */ AWS_H2_FRAME_T_HEADERS, /* Type (8) */ AWS_H2_FRAME_F_END_STREAM | AWS_H2_FRAME_F_END_HEADERS | AWS_H2_FRAME_F_PADDED | AWS_H2_FRAME_F_PRIORITY, /* Flags (8) */ 0x76, 0x54, 0x32, 0x10, /* Reserved (1) | Stream Identifier (31) */ @@ -110,25 +141,28 @@ TEST_CASE(h2_encoder_headers) { 0x02, /* Pad Length (8) - F_PADDED */ 0x81, 0x23, 0x45, 0x67, /* Exclusive (1) | Stream Dependency (31) - F_PRIORITY*/ 0x09, /* Weight (8) - F_PRIORITY */ - /* Header Block Fragment (*) */ + 0x48, 0x82, 0x64, 0x02, /* ":status: 302" - indexed name, huffman-compressed value */ 0x00, 0x00 /* Padding (*) - F_PADDED */ }; /* clang-format on */ - ASSERT_SUCCESS(s_encode(allocator, &frame.base, expected, sizeof(expected))); - aws_h2_frame_headers_clean_up(&frame); + ASSERT_SUCCESS(s_encode_frame(allocator, frame, expected, sizeof(expected))); + aws_h2_frame_destroy(frame); + aws_http_headers_release(headers); return AWS_OP_SUCCESS; } TEST_CASE(h2_encoder_priority) { (void)ctx; - struct aws_h2_frame_priority frame; - ASSERT_SUCCESS(aws_h2_frame_priority_init(&frame, allocator)); - frame.base.stream_id = 0x76543210; - frame.priority.stream_dependency_exclusive = true; - frame.priority.stream_dependency = 0x01234567; - frame.priority.weight = 9; + struct aws_h2_frame_priority_settings priority = { + .stream_dependency_exclusive = true, + .stream_dependency = 0x01234567, + .weight = 9, + }; + + struct aws_h2_frame *frame = aws_h2_frame_new_priority(allocator, 0x76543210 /*stream_id*/, &priority); + ASSERT_NOT_NULL(frame); /* clang-format off */ uint8_t expected[] = { @@ -142,19 +176,17 @@ TEST_CASE(h2_encoder_priority) { }; /* clang-format on */ - ASSERT_SUCCESS(s_encode(allocator, &frame.base, expected, sizeof(expected))); - - aws_h2_frame_priority_clean_up(&frame); + ASSERT_SUCCESS(s_encode_frame(allocator, frame, expected, sizeof(expected))); + aws_h2_frame_destroy(frame); return AWS_OP_SUCCESS; } TEST_CASE(h2_encoder_rst_stream) { (void)ctx; - struct aws_h2_frame_rst_stream frame; - ASSERT_SUCCESS(aws_h2_frame_rst_stream_init(&frame, allocator)); - frame.base.stream_id = 0x76543210; - frame.error_code = 0xFEEDBEEF; + struct aws_h2_frame *frame = + aws_h2_frame_new_rst_stream(allocator, 0x76543210 /*stream_id*/, 0xFEEDBEEF /*error_code*/); + ASSERT_NOT_NULL(frame); /* clang-format off */ uint8_t expected[] = { @@ -167,8 +199,8 @@ TEST_CASE(h2_encoder_rst_stream) { }; /* clang-format on */ - ASSERT_SUCCESS(s_encode(allocator, &frame.base, expected, sizeof(expected))); - aws_h2_frame_rst_stream_clean_up(&frame); + ASSERT_SUCCESS(s_encode_frame(allocator, frame, expected, sizeof(expected))); + aws_h2_frame_destroy(frame); return AWS_OP_SUCCESS; } @@ -181,10 +213,9 @@ TEST_CASE(h2_encoder_settings) { {.id = 0xFFFF, .value = 0xFFFFFFFF}, /* max value */ }; - struct aws_h2_frame_settings frame; - ASSERT_SUCCESS(aws_h2_frame_settings_init(&frame, allocator)); - frame.settings_array = settings; - frame.settings_count = AWS_ARRAY_SIZE(settings); + struct aws_h2_frame *frame = + aws_h2_frame_new_settings(allocator, settings, AWS_ARRAY_SIZE(settings), false /*ack*/); + ASSERT_NOT_NULL(frame); /* clang-format off */ uint8_t expected[] = { @@ -202,17 +233,17 @@ TEST_CASE(h2_encoder_settings) { }; /* clang-format on */ - ASSERT_SUCCESS(s_encode(allocator, &frame.base, expected, sizeof(expected))); - aws_h2_frame_settings_clean_up(&frame); + ASSERT_SUCCESS(s_encode_frame(allocator, frame, expected, sizeof(expected))); + aws_h2_frame_destroy(frame); return AWS_OP_SUCCESS; } TEST_CASE(h2_encoder_settings_ack) { (void)ctx; - struct aws_h2_frame_settings frame; - ASSERT_SUCCESS(aws_h2_frame_settings_init(&frame, allocator)); - frame.ack = true; + struct aws_h2_frame *frame = + aws_h2_frame_new_settings(allocator, NULL /*settings_array*/, 0 /*num_settings*/, true /*ack*/); + ASSERT_NOT_NULL(frame); /* clang-format off */ uint8_t expected[] = { @@ -224,51 +255,58 @@ TEST_CASE(h2_encoder_settings_ack) { }; /* clang-format on */ - ASSERT_SUCCESS(s_encode(allocator, &frame.base, expected, sizeof(expected))); - aws_h2_frame_settings_clean_up(&frame); + ASSERT_SUCCESS(s_encode_frame(allocator, frame, expected, sizeof(expected))); + aws_h2_frame_destroy(frame); return AWS_OP_SUCCESS; } TEST_CASE(h2_encoder_push_promise) { (void)ctx; - struct aws_h2_frame_push_promise frame; - ASSERT_SUCCESS(aws_h2_frame_push_promise_init(&frame, allocator)); - frame.base.stream_id = 0x00000001; - frame.promised_stream_id = 0x76543210; - frame.end_headers = true; - frame.pad_length = 2; + struct aws_http_header headers_array[] = { + DEFINE_STATIC_HEADER(":method", "GET", USE_CACHE), + DEFINE_STATIC_HEADER(":scheme", "http", USE_CACHE), + DEFINE_STATIC_HEADER(":path", "/", USE_CACHE), + DEFINE_STATIC_HEADER(":authority", "www.example.com", USE_CACHE), + }; + struct aws_http_headers *headers = aws_http_headers_new(allocator); + ASSERT_NOT_NULL(headers); + ASSERT_SUCCESS(aws_http_headers_add_array(headers, headers_array, AWS_ARRAY_SIZE(headers_array))); - /* Intentionally leaving header block fragment empty. Header block encoding is tested elsewhere */ + struct aws_h2_frame *frame = aws_h2_frame_new_push_promise( + allocator, 0x00000001 /*stream_id*/, 0x76543210 /*promised_stream_id*/, headers, 2 /*pad_length*/); + ASSERT_NOT_NULL(frame); /* clang-format off */ uint8_t expected[] = { - 0x00, 0x00, 0x07, /* Length (24) */ + 0x00, 0x00, 24, /* Length (24) */ AWS_H2_FRAME_T_PUSH_PROMISE,/* Type (8) */ AWS_H2_FRAME_F_END_HEADERS | AWS_H2_FRAME_F_PADDED, /* Flags (8) */ 0x00, 0x00, 0x00, 0x01, /* Reserved (1) | Stream Identifier (31) */ /* PUSH_PROMISE */ 0x02, /* Pad Length (8) | F_PADDED */ 0x76, 0x54, 0x32, 0x10, /* Reserved (1) | Promised Stream ID (31) */ - /* Header Block Fragment (*) */ + + /* Header Block Fragment (*) (values from RFC-7541 example C.4.1) */ + 0x82, 0x86, 0x84, 0x41, 0x8c, 0xf1, 0xe3, 0xc2, 0xe5, 0xf2, 0x3a, 0x6b, 0xa0, 0xab, 0x90, 0xf4, 0xff, + 0x00, 0x00, /* Padding (*) | F_PADDED*/ }; /* clang-format on */ - ASSERT_SUCCESS(s_encode(allocator, &frame.base, expected, sizeof(expected))); - aws_h2_frame_push_promise_clean_up(&frame); + ASSERT_SUCCESS(s_encode_frame(allocator, frame, expected, sizeof(expected))); + aws_h2_frame_destroy(frame); + aws_http_headers_release(headers); return AWS_OP_SUCCESS; } TEST_CASE(h2_encoder_ping) { (void)ctx; - struct aws_h2_frame_ping frame; - ASSERT_SUCCESS(aws_h2_frame_ping_init(&frame, allocator)); - frame.ack = true; - for (uint8_t i = 0; i < AWS_H2_PING_DATA_SIZE; ++i) { - frame.opaque_data[i] = i; - } + uint8_t opaque_data[AWS_H2_PING_DATA_SIZE] = {0, 1, 2, 3, 4, 5, 6, 7}; + + struct aws_h2_frame *frame = aws_h2_frame_new_ping(allocator, true /*ack*/, opaque_data); + ASSERT_NOT_NULL(frame); /* clang-format off */ uint8_t expected[] = { @@ -281,19 +319,20 @@ TEST_CASE(h2_encoder_ping) { }; /* clang-format on */ - ASSERT_SUCCESS(s_encode(allocator, &frame.base, expected, sizeof(expected))); - aws_h2_frame_ping_clean_up(&frame); + ASSERT_SUCCESS(s_encode_frame(allocator, frame, expected, sizeof(expected))); + aws_h2_frame_destroy(frame); return AWS_OP_SUCCESS; } TEST_CASE(h2_encoder_goaway) { (void)ctx; - struct aws_h2_frame_goaway frame; - ASSERT_SUCCESS(aws_h2_frame_goaway_init(&frame, allocator)); - frame.last_stream_id = 0x77665544; - frame.error_code = 0xFFEEDDCC; - frame.debug_data = aws_byte_cursor_from_c_str("goodbye"); + struct aws_h2_frame *frame = aws_h2_frame_new_goaway( + allocator, + 0x77665544 /*last_stream_id*/, + 0xFFEEDDCC /*error_code*/, + aws_byte_cursor_from_c_str("goodbye") /*debug_data*/); + ASSERT_NOT_NULL(frame); /* clang-format off */ uint8_t expected[] = { @@ -308,18 +347,17 @@ TEST_CASE(h2_encoder_goaway) { }; /* clang-format on */ - ASSERT_SUCCESS(s_encode(allocator, &frame.base, expected, sizeof(expected))); - aws_h2_frame_goaway_clean_up(&frame); + ASSERT_SUCCESS(s_encode_frame(allocator, frame, expected, sizeof(expected))); + aws_h2_frame_destroy(frame); return AWS_OP_SUCCESS; } TEST_CASE(h2_encoder_window_update) { (void)ctx; - struct aws_h2_frame_window_update frame; - ASSERT_SUCCESS(aws_h2_frame_window_update_init(&frame, allocator)); - frame.base.stream_id = 0x76543210; - frame.window_size_increment = 0x7FFFFFFF; + struct aws_h2_frame *frame = + aws_h2_frame_new_window_update(allocator, 0x76543210 /*stream_id*/, 0x7FFFFFFF /*window_size_increment*/); + ASSERT_NOT_NULL(frame); /* clang-format off */ uint8_t expected[] = { @@ -332,32 +370,7 @@ TEST_CASE(h2_encoder_window_update) { }; /* clang-format on */ - ASSERT_SUCCESS(s_encode(allocator, &frame.base, expected, sizeof(expected))); - aws_h2_frame_window_update_clean_up(&frame); - return AWS_OP_SUCCESS; -} - -TEST_CASE(h2_encoder_continuation) { - (void)ctx; - - struct aws_h2_frame_continuation frame; - ASSERT_SUCCESS(aws_h2_frame_continuation_init(&frame, allocator)); - frame.base.stream_id = 0x76543210; - frame.end_headers = true; - - /* Intentionally leaving header block fragment empty. Header block encoding is tested elsewhere */ - - /* clang-format off */ - uint8_t expected[] = { - 0x00, 0x00, 0x00, /* Length (24) */ - AWS_H2_FRAME_T_CONTINUATION,/* Type (8) */ - AWS_H2_FRAME_F_END_HEADERS, /* Flags (8) */ - 0x76, 0x54, 0x32, 0x10, /* Reserved (1) | Stream Identifier (31) */ - /* CONTINUATION */ - }; - /* clang-format on */ - - ASSERT_SUCCESS(s_encode(allocator, &frame.base, expected, sizeof(expected))); - aws_h2_frame_continuation_clean_up(&frame); + ASSERT_SUCCESS(s_encode_frame(allocator, frame, expected, sizeof(expected))); + aws_h2_frame_destroy(frame); return AWS_OP_SUCCESS; } diff --git a/tests/test_h2_headers.c b/tests/test_h2_headers.c index a123c1712..5dcbcc299 100644 --- a/tests/test_h2_headers.c +++ b/tests/test_h2_headers.c @@ -29,25 +29,23 @@ typedef int(header_clean_up_fn)(void *); /* Header compare function */ static int s_header_block_eq( - const struct aws_array_list *l_header_fields, - const struct aws_array_list *r_header_fields) { + const struct aws_http_headers *l_header_fields, + const struct aws_http_headers *r_header_fields) { - const size_t l_size = aws_array_list_length(l_header_fields); - const size_t r_size = aws_array_list_length(r_header_fields); + const size_t l_size = aws_http_headers_count(l_header_fields); + const size_t r_size = aws_http_headers_count(r_header_fields); ASSERT_UINT_EQUALS(l_size, r_size); for (size_t i = 0; i < l_size; ++i) { - const struct aws_http_header *l_field = NULL; - aws_array_list_get_at_ptr(l_header_fields, (void **)&l_field, i); - AWS_FATAL_ASSERT(l_field); + struct aws_http_header l_field; + ASSERT_SUCCESS(aws_http_headers_get_index(l_header_fields, i, &l_field)); - const struct aws_http_header *r_field = NULL; - aws_array_list_get_at_ptr(r_header_fields, (void **)&r_field, i); - AWS_FATAL_ASSERT(r_field); + struct aws_http_header r_field; + ASSERT_SUCCESS(aws_http_headers_get_index(r_header_fields, i, &r_field)); - ASSERT_INT_EQUALS(l_field->compression, r_field->compression); - ASSERT_TRUE(aws_byte_cursor_eq(&l_field->name, &r_field->name)); - ASSERT_TRUE(aws_byte_cursor_eq(&l_field->value, &r_field->value)); + ASSERT_INT_EQUALS(l_field.compression, r_field.compression); + ASSERT_TRUE(aws_byte_cursor_eq(&l_field.name, &r_field.name)); + ASSERT_TRUE(aws_byte_cursor_eq(&l_field.value, &r_field.value)); } return AWS_OP_SUCCESS; @@ -62,13 +60,12 @@ struct header_test_fixture { struct aws_allocator *allocator; bool one_byte_at_a_time; /* T: decode one byte at a time. F: decode whole buffer at once */ - struct aws_h2_frame_encoder encoder; + struct aws_hpack_context *encoder; struct aws_hpack_context *decoder; - struct aws_h2_frame_headers headers_to_encode; + struct aws_http_headers *headers_to_encode; struct aws_byte_buf expected_encoding_buf; - struct aws_array_list decoded_headers; /* array_list of aws_http_header */ - struct aws_byte_buf decoder_storage_buf; /* string storage */ + struct aws_http_headers *decoded_headers; }; static int s_header_test_before(struct aws_allocator *allocator, void *ctx) { @@ -78,17 +75,16 @@ static int s_header_test_before(struct aws_allocator *allocator, void *ctx) { aws_http_library_init(allocator); - ASSERT_SUCCESS(aws_h2_frame_encoder_init(&fixture->encoder, allocator)); - + fixture->encoder = aws_hpack_context_new(allocator, AWS_LS_HTTP_ENCODER, NULL); + ASSERT_NOT_NULL(fixture->encoder); fixture->decoder = aws_hpack_context_new(allocator, AWS_LS_HTTP_DECODER, NULL); ASSERT_NOT_NULL(fixture->decoder); - ASSERT_SUCCESS(aws_h2_frame_headers_init(&fixture->headers_to_encode, allocator)); - fixture->headers_to_encode.base.stream_id = 1; + fixture->headers_to_encode = aws_http_headers_new(allocator); + ASSERT_NOT_NULL(fixture->headers_to_encode); ASSERT_SUCCESS(aws_byte_buf_init(&fixture->expected_encoding_buf, allocator, S_BUFFER_SIZE)); - ASSERT_SUCCESS( - aws_array_list_init_dynamic(&fixture->decoded_headers, allocator, 8, sizeof(struct aws_http_header))); - ASSERT_SUCCESS(aws_byte_buf_init(&fixture->decoder_storage_buf, allocator, S_BUFFER_SIZE)); + fixture->decoded_headers = aws_http_headers_new(allocator); + ASSERT_NOT_NULL(fixture->decoded_headers); return AWS_OP_SUCCESS; } @@ -106,27 +102,18 @@ static int s_header_test_run(struct aws_allocator *allocator, void *ctx) { struct aws_byte_buf output_buffer; ASSERT_SUCCESS(aws_byte_buf_init(&output_buffer, allocator, S_BUFFER_SIZE)); - /* Encode the header */ - ASSERT_SUCCESS(aws_h2_frame_headers_encode(&fixture->headers_to_encode, &fixture->encoder, &output_buffer)); - - /* Skip past the 9 byte frame header, this is tested elsewhere */ - struct aws_byte_cursor header_block_output = aws_byte_cursor_from_buf(&output_buffer); - aws_byte_cursor_advance(&header_block_output, 9); + /* Encode the headers */ + ASSERT_SUCCESS(aws_hpack_encode_header_block(fixture->encoder, fixture->headers_to_encode, &output_buffer)); - /* Compare the remainder of encoded output against the expected header block fragment */ + /* Compare the encoded output against the expected header block fragment */ ASSERT_BIN_ARRAYS_EQUALS( fixture->expected_encoding_buf.buffer, fixture->expected_encoding_buf.len, - header_block_output.ptr, - header_block_output.len); + output_buffer.buffer, + output_buffer.len); /* Decode */ - - /* Skip past the 9 byte frame header, this is tested elsewhere */ struct aws_byte_cursor payload = aws_byte_cursor_from_buf(&output_buffer); - aws_byte_cursor_advance(&payload, 9); - - /* Decode the buffer */ while (payload.len) { struct aws_hpack_decode_result result; @@ -139,19 +126,12 @@ static int s_header_test_run(struct aws_allocator *allocator, void *ctx) { } if (result.type == AWS_HPACK_DECODE_T_HEADER_FIELD) { - struct aws_http_header header_field = result.data.header_field; - - /* Backup string values */ - ASSERT_SUCCESS(aws_byte_buf_append_and_update(&fixture->decoder_storage_buf, &header_field.name)); - ASSERT_SUCCESS(aws_byte_buf_append_and_update(&fixture->decoder_storage_buf, &header_field.value)); - - ASSERT_SUCCESS(aws_array_list_push_back(&fixture->decoded_headers, &header_field)); + ASSERT_SUCCESS(aws_http_headers_add_v2(fixture->decoded_headers, &result.data.header_field)); } } /* Compare the headers */ - ASSERT_SUCCESS( - s_header_block_eq(&fixture->headers_to_encode.header_block.header_fields, &fixture->decoded_headers)); + ASSERT_SUCCESS(s_header_block_eq(fixture->headers_to_encode, fixture->decoded_headers)); aws_byte_buf_clean_up(&output_buffer); return AWS_OP_SUCCESS; @@ -170,12 +150,11 @@ static int s_header_test_after(struct aws_allocator *allocator, int setup_res, v } /* Tear down the fixture */ - aws_byte_buf_clean_up(&fixture->decoder_storage_buf); - aws_array_list_clean_up(&fixture->decoded_headers); - aws_h2_frame_headers_clean_up(&fixture->headers_to_encode); + aws_http_headers_release(fixture->decoded_headers); aws_byte_buf_clean_up(&fixture->expected_encoding_buf); + aws_http_headers_release(fixture->headers_to_encode); aws_hpack_context_destroy(fixture->decoder); - aws_h2_frame_encoder_clean_up(&fixture->encoder); + aws_hpack_context_destroy(fixture->encoder); } aws_http_library_clean_up(); @@ -200,10 +179,9 @@ static int s_header_test_after(struct aws_allocator *allocator, int setup_res, v s_header_test_after, \ &s_##t_name##_one_byte_at_a_time_fixture) -#define DEFINE_STATIC_HEADER(_name, _key, _value, _behavior) \ - static const struct aws_http_header _name = { \ - .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(_key), \ - .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(_value), \ +#define DEFINE_STATIC_HEADER(_key, _value, _behavior) \ + { \ + .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(_key), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(_value), \ .compression = AWS_HTTP_HEADER_COMPRESSION_##_behavior, \ } @@ -217,8 +195,12 @@ HEADER_TEST(h2_header_empty_payload, s_test_empty_payload, NULL); /* RFC-7541 - Header Field Representation Examples - C.2.1. Literal Header Field with Indexing */ static int s_test_ex_2_1_init(struct header_test_fixture *fixture) { - DEFINE_STATIC_HEADER(header, "custom-key", "custom-header", USE_CACHE); - aws_array_list_push_back(&fixture->headers_to_encode.header_block.header_fields, &header); + aws_hpack_set_huffman_mode(fixture->encoder, AWS_HPACK_HUFFMAN_NEVER); + + struct aws_http_header headers[] = { + DEFINE_STATIC_HEADER("custom-key", "custom-header", USE_CACHE), + }; + ASSERT_SUCCESS(aws_http_headers_add_array(fixture->headers_to_encode, headers, AWS_ARRAY_SIZE(headers))); static const uint8_t encoded[] = { 0x40, 0x0a, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x2d, 0x6b, 0x65, 0x79, 0x0d, @@ -233,8 +215,12 @@ HEADER_TEST(h2_header_ex_2_1, s_test_ex_2_1_init, NULL); /* RFC-7541 - Header Field Representation Examples - C.2.2. Literal Header Field without Indexing */ static int s_test_ex_2_2_init(struct header_test_fixture *fixture) { - DEFINE_STATIC_HEADER(header, ":path", "/sample/path", NO_CACHE); - aws_array_list_push_back(&fixture->headers_to_encode.header_block.header_fields, &header); + aws_hpack_set_huffman_mode(fixture->encoder, AWS_HPACK_HUFFMAN_NEVER); + + struct aws_http_header headers[] = { + DEFINE_STATIC_HEADER(":path", "/sample/path", NO_CACHE), + }; + ASSERT_SUCCESS(aws_http_headers_add_array(fixture->headers_to_encode, headers, AWS_ARRAY_SIZE(headers))); static const uint8_t encoded[] = { 0x04, 0x0c, 0x2f, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x2f, 0x70, 0x61, 0x74, 0x68}; @@ -248,8 +234,12 @@ HEADER_TEST(h2_header_ex_2_2, s_test_ex_2_2_init, NULL); /* RFC-7541 - Header Field Representation Examples - C.2.3. Literal Header Field Never Indexed */ static int s_test_ex_2_3_init(struct header_test_fixture *fixture) { - DEFINE_STATIC_HEADER(header, "password", "secret", NO_FORWARD_CACHE); - aws_array_list_push_back(&fixture->headers_to_encode.header_block.header_fields, &header); + aws_hpack_set_huffman_mode(fixture->encoder, AWS_HPACK_HUFFMAN_NEVER); + + struct aws_http_header headers[] = { + DEFINE_STATIC_HEADER("password", "secret", NO_FORWARD_CACHE), + }; + ASSERT_SUCCESS(aws_http_headers_add_array(fixture->headers_to_encode, headers, AWS_ARRAY_SIZE(headers))); static const uint8_t encoded[] = { 0x10, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x06, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74}; @@ -263,8 +253,12 @@ HEADER_TEST(h2_header_ex_2_3, s_test_ex_2_3_init, NULL); /* RFC-7541 - Header Field Representation Examples - C.2.3. Indexed Header Field */ static int s_test_ex_2_4_init(struct header_test_fixture *fixture) { - DEFINE_STATIC_HEADER(header, ":method", "GET", USE_CACHE); - aws_array_list_push_back(&fixture->headers_to_encode.header_block.header_fields, &header); + aws_hpack_set_huffman_mode(fixture->encoder, AWS_HPACK_HUFFMAN_NEVER); + + struct aws_http_header headers[] = { + DEFINE_STATIC_HEADER(":method", "GET", USE_CACHE), + }; + ASSERT_SUCCESS(aws_http_headers_add_array(fixture->headers_to_encode, headers, AWS_ARRAY_SIZE(headers))); static const uint8_t encoded[] = { 0x82, @@ -278,15 +272,15 @@ HEADER_TEST(h2_header_ex_2_4, s_test_ex_2_4_init, NULL); /* RFC-7541 - Request Examples without Huffman Coding - C.3.1. First Request */ static int s_test_ex_3_1_init(struct header_test_fixture *fixture) { - DEFINE_STATIC_HEADER(header_method, ":method", "GET", USE_CACHE); - DEFINE_STATIC_HEADER(header_scheme, ":scheme", "http", USE_CACHE); - DEFINE_STATIC_HEADER(header_path, ":path", "/", USE_CACHE); - DEFINE_STATIC_HEADER(header_authority, ":authority", "www.example.com", USE_CACHE); + aws_hpack_set_huffman_mode(fixture->encoder, AWS_HPACK_HUFFMAN_NEVER); - aws_array_list_push_back(&fixture->headers_to_encode.header_block.header_fields, &header_method); - aws_array_list_push_back(&fixture->headers_to_encode.header_block.header_fields, &header_scheme); - aws_array_list_push_back(&fixture->headers_to_encode.header_block.header_fields, &header_path); - aws_array_list_push_back(&fixture->headers_to_encode.header_block.header_fields, &header_authority); + struct aws_http_header headers[] = { + DEFINE_STATIC_HEADER(":method", "GET", USE_CACHE), + DEFINE_STATIC_HEADER(":scheme", "http", USE_CACHE), + DEFINE_STATIC_HEADER(":path", "/", USE_CACHE), + DEFINE_STATIC_HEADER(":authority", "www.example.com", USE_CACHE), + }; + ASSERT_SUCCESS(aws_http_headers_add_array(fixture->headers_to_encode, headers, AWS_ARRAY_SIZE(headers))); static const uint8_t encoded[] = { 0x82, 0x86, 0x84, 0x41, 0x0f, 0x77, 0x77, 0x77, 0x2e, 0x65, @@ -301,17 +295,15 @@ HEADER_TEST(h2_header_ex_3_1, s_test_ex_3_1_init, NULL); /* RFC-7541 - Request Examples with Huffman Coding - C.4.1. First Request */ static int s_test_ex_4_1_init(struct header_test_fixture *fixture) { - fixture->encoder.use_huffman = true; + aws_hpack_set_huffman_mode(fixture->encoder, AWS_HPACK_HUFFMAN_ALWAYS); - DEFINE_STATIC_HEADER(header_method, ":method", "GET", USE_CACHE); - DEFINE_STATIC_HEADER(header_scheme, ":scheme", "http", USE_CACHE); - DEFINE_STATIC_HEADER(header_path, ":path", "/", USE_CACHE); - DEFINE_STATIC_HEADER(header_authority, ":authority", "www.example.com", USE_CACHE); - - aws_array_list_push_back(&fixture->headers_to_encode.header_block.header_fields, &header_method); - aws_array_list_push_back(&fixture->headers_to_encode.header_block.header_fields, &header_scheme); - aws_array_list_push_back(&fixture->headers_to_encode.header_block.header_fields, &header_path); - aws_array_list_push_back(&fixture->headers_to_encode.header_block.header_fields, &header_authority); + struct aws_http_header headers[] = { + DEFINE_STATIC_HEADER(":method", "GET", USE_CACHE), + DEFINE_STATIC_HEADER(":scheme", "http", USE_CACHE), + DEFINE_STATIC_HEADER(":path", "/", USE_CACHE), + DEFINE_STATIC_HEADER(":authority", "www.example.com", USE_CACHE), + }; + ASSERT_SUCCESS(aws_http_headers_add_array(fixture->headers_to_encode, headers, AWS_ARRAY_SIZE(headers))); static const uint8_t encoded[] = { 0x82, 0x86, 0x84, 0x41, 0x8c, 0xf1, 0xe3, 0xc2, 0xe5, 0xf2, 0x3a, 0x6b, 0xa0, 0xab, 0x90, 0xf4, 0xff}; @@ -325,15 +317,15 @@ HEADER_TEST(h2_header_ex_4_1, s_test_ex_4_1_init, NULL); /* RFC-7541 - Response Examples without Huffman Coding - C.5.1. First Response */ static int s_test_ex_5_1_init(struct header_test_fixture *fixture) { - DEFINE_STATIC_HEADER(header_status, ":status", "302", USE_CACHE); - DEFINE_STATIC_HEADER(header_cache_control, "cache-control", "private", USE_CACHE); - DEFINE_STATIC_HEADER(header_date, "date", "Mon, 21 Oct 2013 20:13:21 GMT", USE_CACHE); - DEFINE_STATIC_HEADER(header_location, "location", "https://www.example.com", USE_CACHE); + aws_hpack_set_huffman_mode(fixture->encoder, AWS_HPACK_HUFFMAN_NEVER); - aws_array_list_push_back(&fixture->headers_to_encode.header_block.header_fields, &header_status); - aws_array_list_push_back(&fixture->headers_to_encode.header_block.header_fields, &header_cache_control); - aws_array_list_push_back(&fixture->headers_to_encode.header_block.header_fields, &header_date); - aws_array_list_push_back(&fixture->headers_to_encode.header_block.header_fields, &header_location); + struct aws_http_header headers[] = { + DEFINE_STATIC_HEADER(":status", "302", USE_CACHE), + DEFINE_STATIC_HEADER("cache-control", "private", USE_CACHE), + DEFINE_STATIC_HEADER("date", "Mon, 21 Oct 2013 20:13:21 GMT", USE_CACHE), + DEFINE_STATIC_HEADER("location", "https://www.example.com", USE_CACHE), + }; + ASSERT_SUCCESS(aws_http_headers_add_array(fixture->headers_to_encode, headers, AWS_ARRAY_SIZE(headers))); static const uint8_t encoded[] = { 0x48, 0x03, 0x33, 0x30, 0x32, 0x58, 0x07, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x61, 0x1d, 0x4d, 0x6f, @@ -350,17 +342,15 @@ HEADER_TEST(h2_header_ex_5_1, s_test_ex_5_1_init, NULL); /* RFC-7541 - Response Examples with Huffman Coding - C.6.1. First Response */ static int s_test_ex_6_1_init(struct header_test_fixture *fixture) { - fixture->encoder.use_huffman = true; + aws_hpack_set_huffman_mode(fixture->encoder, AWS_HPACK_HUFFMAN_ALWAYS); - DEFINE_STATIC_HEADER(header_status, ":status", "302", USE_CACHE); - DEFINE_STATIC_HEADER(header_cache_control, "cache-control", "private", USE_CACHE); - DEFINE_STATIC_HEADER(header_date, "date", "Mon, 21 Oct 2013 20:13:21 GMT", USE_CACHE); - DEFINE_STATIC_HEADER(header_location, "location", "https://www.example.com", USE_CACHE); - - aws_array_list_push_back(&fixture->headers_to_encode.header_block.header_fields, &header_status); - aws_array_list_push_back(&fixture->headers_to_encode.header_block.header_fields, &header_cache_control); - aws_array_list_push_back(&fixture->headers_to_encode.header_block.header_fields, &header_date); - aws_array_list_push_back(&fixture->headers_to_encode.header_block.header_fields, &header_location); + struct aws_http_header headers[] = { + DEFINE_STATIC_HEADER(":status", "302", USE_CACHE), + DEFINE_STATIC_HEADER("cache-control", "private", USE_CACHE), + DEFINE_STATIC_HEADER("date", "Mon, 21 Oct 2013 20:13:21 GMT", USE_CACHE), + DEFINE_STATIC_HEADER("location", "https://www.example.com", USE_CACHE), + }; + ASSERT_SUCCESS(aws_http_headers_add_array(fixture->headers_to_encode, headers, AWS_ARRAY_SIZE(headers))); static const uint8_t encoded[] = { 0x48, 0x82, 0x64, 0x02, 0x58, 0x85, 0xae, 0xc3, 0x77, 0x1a, 0x4b, 0x61, 0x96, 0xd0, 0x7a, 0xbe, 0x94, 0x10, diff --git a/tests/test_hpack.c b/tests/test_hpack.c index eb2c340df..ed473445f 100644 --- a/tests/test_hpack.c +++ b/tests/test_hpack.c @@ -18,6 +18,8 @@ #include +/* #TODO test that buffer is resized if space is insufficient */ + AWS_TEST_CASE(hpack_encode_integer, test_hpack_encode_integer) static int test_hpack_encode_integer(struct aws_allocator *allocator, void *ctx) { (void)allocator; @@ -28,13 +30,12 @@ static int test_hpack_encode_integer(struct aws_allocator *allocator, void *ctx) uint8_t zeros[4]; AWS_ZERO_ARRAY(zeros); - uint8_t output_buffer[4]; - struct aws_byte_buf output_buf; + struct aws_byte_buf output; + ASSERT_SUCCESS(aws_byte_buf_init(&output, allocator, 4)); /* Test 10 in 5 bits */ - AWS_ZERO_ARRAY(output_buffer); - output_buf = aws_byte_buf_from_empty_array(output_buffer, AWS_ARRAY_SIZE(output_buffer)); - ASSERT_SUCCESS(aws_hpack_encode_integer(10, 5, &output_buf)); + aws_byte_buf_secure_zero(&output); + ASSERT_SUCCESS(aws_hpack_encode_integer(10, 0, 5, &output)); /** * Expected: * 0 1 2 3 4 5 6 7 @@ -42,15 +43,13 @@ static int test_hpack_encode_integer(struct aws_allocator *allocator, void *ctx) * | X | X | X | 0 | 1 | 0 | 1 | 0 | 10 * +---+---+---+---+---+---+---+---+ */ - ASSERT_UINT_EQUALS(1, output_buf.len); - ASSERT_UINT_EQUALS(10, output_buffer[0]); - ASSERT_BIN_ARRAYS_EQUALS(zeros, 3, &output_buffer[1], 3); - ASSERT_UINT_EQUALS(1, aws_hpack_get_encoded_length_integer(10, 5)); + ASSERT_UINT_EQUALS(1, output.len); + ASSERT_UINT_EQUALS(10, output.buffer[0]); + ASSERT_BIN_ARRAYS_EQUALS(zeros, 3, &output.buffer[1], 3); /* Test full first byte (6 bits) */ - AWS_ZERO_ARRAY(output_buffer); - output_buf = aws_byte_buf_from_empty_array(output_buffer, AWS_ARRAY_SIZE(output_buffer)); - ASSERT_SUCCESS(aws_hpack_encode_integer(63, 6, &output_buf)); + aws_byte_buf_secure_zero(&output); + ASSERT_SUCCESS(aws_hpack_encode_integer(63, 0, 6, &output)); /** * Expected: * 0 1 2 3 4 5 6 7 @@ -60,16 +59,14 @@ static int test_hpack_encode_integer(struct aws_allocator *allocator, void *ctx) * | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 * +---+---+---+---+---+---+---+---+ */ - ASSERT_UINT_EQUALS(2, output_buf.len); - ASSERT_UINT_EQUALS(63, output_buffer[0]); - ASSERT_UINT_EQUALS(0, output_buffer[1]); - ASSERT_BIN_ARRAYS_EQUALS(zeros, 2, &output_buffer[2], 2); - ASSERT_UINT_EQUALS(2, aws_hpack_get_encoded_length_integer(63, 6)); + ASSERT_UINT_EQUALS(2, output.len); + ASSERT_UINT_EQUALS(63, output.buffer[0]); + ASSERT_UINT_EQUALS(0, output.buffer[1]); + ASSERT_BIN_ARRAYS_EQUALS(zeros, 2, &output.buffer[2], 2); /* Test 42 in 8 bits */ - AWS_ZERO_ARRAY(output_buffer); - output_buf = aws_byte_buf_from_empty_array(output_buffer, AWS_ARRAY_SIZE(output_buffer)); - ASSERT_SUCCESS(aws_hpack_encode_integer(42, 8, &output_buf)); + aws_byte_buf_secure_zero(&output); + ASSERT_SUCCESS(aws_hpack_encode_integer(42, 0, 8, &output)); /** * Expected: * 0 1 2 3 4 5 6 7 @@ -77,15 +74,13 @@ static int test_hpack_encode_integer(struct aws_allocator *allocator, void *ctx) * | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 42 * +---+---+---+---+---+---+---+---+ */ - ASSERT_UINT_EQUALS(1, output_buf.len); - ASSERT_UINT_EQUALS(42, output_buffer[0]); - ASSERT_BIN_ARRAYS_EQUALS(zeros, 3, &output_buffer[1], 3); - ASSERT_UINT_EQUALS(1, aws_hpack_get_encoded_length_integer(42, 8)); + ASSERT_UINT_EQUALS(1, output.len); + ASSERT_UINT_EQUALS(42, output.buffer[0]); + ASSERT_BIN_ARRAYS_EQUALS(zeros, 3, &output.buffer[1], 3); /* Test 1337 with 5bit prefix */ - AWS_ZERO_ARRAY(output_buffer); - output_buf = aws_byte_buf_from_empty_array(output_buffer, AWS_ARRAY_SIZE(output_buffer)); - ASSERT_SUCCESS(aws_hpack_encode_integer(1337, 5, &output_buf)); + aws_byte_buf_secure_zero(&output); + ASSERT_SUCCESS(aws_hpack_encode_integer(1337, 0, 5, &output)); /** * Expected: * 0 1 2 3 4 5 6 7 @@ -95,19 +90,13 @@ static int test_hpack_encode_integer(struct aws_allocator *allocator, void *ctx) * | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 10 * +---+---+---+---+---+---+---+---+ */ - ASSERT_UINT_EQUALS(3, output_buf.len); - ASSERT_UINT_EQUALS(UINT8_MAX >> 3, output_buffer[0]); - ASSERT_UINT_EQUALS(154, output_buffer[1]); - ASSERT_UINT_EQUALS(10, output_buffer[2]); - ASSERT_UINT_EQUALS(0, output_buffer[3]); - ASSERT_UINT_EQUALS(3, aws_hpack_get_encoded_length_integer(1337, 5)); - - /* Test 1337 with 5bit prefix and insufficient output space */ - AWS_ZERO_ARRAY(output_buffer); - output_buf = aws_byte_buf_from_empty_array(output_buffer, 2); - ASSERT_FAILS(aws_hpack_encode_integer(1337, 5, &output_buf)); - ASSERT_UINT_EQUALS(0, output_buf.len); + ASSERT_UINT_EQUALS(3, output.len); + ASSERT_UINT_EQUALS(UINT8_MAX >> 3, output.buffer[0]); + ASSERT_UINT_EQUALS(154, output.buffer[1]); + ASSERT_UINT_EQUALS(10, output.buffer[2]); + ASSERT_UINT_EQUALS(0, output.buffer[3]); + aws_byte_buf_clean_up(&output); return AWS_OP_SUCCESS; } From fa207f39ca20b24a550b4103d7d5e4c86c5bace9 Mon Sep 17 00:00:00 2001 From: "Jonathan M. Henson" Date: Fri, 6 Mar 2020 15:03:12 -0800 Subject: [PATCH 07/35] Exposed options for toggling read back pressure behavior, updated to match aws-c-io api changes. (#194) --- include/aws/http/connection.h | 5 ++++ include/aws/http/connection_manager.h | 5 ++++ include/aws/http/server.h | 5 ++++ source/connection.c | 36 +++++++++++++-------------- source/connection_manager.c | 13 +++++++--- 5 files changed, 41 insertions(+), 23 deletions(-) diff --git a/include/aws/http/connection.h b/include/aws/http/connection.h index d59eb29df..dc8dfb5f3 100644 --- a/include/aws/http/connection.h +++ b/include/aws/http/connection.h @@ -208,6 +208,11 @@ struct aws_http_client_connection_options { * See `aws_http_on_client_connection_shutdown_fn`. */ aws_http_on_client_connection_shutdown_fn *on_shutdown; + + /** + * If set to true, read back pressure mechanism will be enabled. + */ + bool enable_read_back_pressure; }; /** diff --git a/include/aws/http/connection_manager.h b/include/aws/http/connection_manager.h index 8c12d5640..acd7d99b1 100644 --- a/include/aws/http/connection_manager.h +++ b/include/aws/http/connection_manager.h @@ -64,6 +64,11 @@ struct aws_http_connection_manager_options { */ void *shutdown_complete_user_data; aws_http_connection_manager_shutdown_complete_fn *shutdown_complete_callback; + + /** + * If set to true, the read back pressure mechanism will be enabled. + */ + bool enable_read_back_pressure; }; AWS_EXTERN_C_BEGIN diff --git a/include/aws/http/server.h b/include/aws/http/server.h index fa7029336..ef9eadfcd 100644 --- a/include/aws/http/server.h +++ b/include/aws/http/server.h @@ -103,6 +103,11 @@ struct aws_http_server_options { * Optional. */ aws_http_server_on_destroy_fn *on_destroy_complete; + + /** + * If set to true, read back pressure mechanism will be enabled. + */ + bool enable_read_back_pressure; }; /** diff --git a/source/connection.c b/source/connection.c index 343c5085b..c8b3b485a 100644 --- a/source/connection.c +++ b/source/connection.c @@ -487,26 +487,23 @@ struct aws_http_server *aws_http_server_new(const struct aws_http_server_options s_server_lock_synced_data(server); if (options->tls_options) { server->is_using_tls = true; - - server->socket = aws_server_bootstrap_new_tls_socket_listener( - options->bootstrap, - options->endpoint, - options->socket_options, - options->tls_options, - s_server_bootstrap_on_accept_channel_setup, - s_server_bootstrap_on_accept_channel_shutdown, - s_server_bootstrap_on_server_listener_destroy, - server); - } else { - server->socket = aws_server_bootstrap_new_socket_listener( - options->bootstrap, - options->endpoint, - options->socket_options, - s_server_bootstrap_on_accept_channel_setup, - s_server_bootstrap_on_accept_channel_shutdown, - s_server_bootstrap_on_server_listener_destroy, - server); } + + struct aws_server_socket_channel_bootstrap_options bootstrap_options = { + .enable_read_back_pressure = options->enable_read_back_pressure, + .tls_options = options->tls_options, + .bootstrap = options->bootstrap, + .socket_options = options->socket_options, + .incoming_callback = s_server_bootstrap_on_accept_channel_setup, + .shutdown_callback = s_server_bootstrap_on_accept_channel_shutdown, + .destroy_callback = s_server_bootstrap_on_server_listener_destroy, + .host_name = options->endpoint->address, + .port = options->endpoint->port, + .user_data = server, + }; + + server->socket = aws_server_bootstrap_new_socket_listener(&bootstrap_options); + s_server_unlock_synced_data(server); if (!server->socket) { @@ -769,6 +766,7 @@ int aws_http_client_connect_internal( .tls_options = options->tls_options, .setup_callback = s_client_bootstrap_on_channel_setup, .shutdown_callback = s_client_bootstrap_on_channel_shutdown, + .enable_read_back_pressure = options->enable_read_back_pressure, .user_data = http_bootstrap, }; diff --git a/source/connection_manager.c b/source/connection_manager.c index 2ec5b4e93..cce7f11d9 100644 --- a/source/connection_manager.c +++ b/source/connection_manager.c @@ -207,6 +207,11 @@ struct aws_http_connection_manager { * a hybrid atomic/lock solution felt excessively complicated and delicate. */ size_t external_ref_count; + + /* + * if set to true, read back pressure mechanism will be enabled. + */ + bool enable_read_back_pressure; }; struct aws_http_connection_manager_snapshot { @@ -560,12 +565,11 @@ struct aws_http_connection_manager *aws_http_connection_manager_new( } struct aws_http_connection_manager *manager = - aws_mem_acquire(allocator, sizeof(struct aws_http_connection_manager)); + aws_mem_calloc(allocator, 1, sizeof(struct aws_http_connection_manager)); if (manager == NULL) { return NULL; } - AWS_ZERO_STRUCT(*manager); manager->allocator = allocator; if (aws_mutex_init(&manager->lock)) { @@ -612,6 +616,7 @@ struct aws_http_connection_manager *aws_http_connection_manager_new( manager->external_ref_count = 1; manager->shutdown_complete_callback = options->shutdown_complete_callback; manager->shutdown_complete_user_data = options->shutdown_complete_user_data; + manager->enable_read_back_pressure = options->enable_read_back_pressure; AWS_LOGF_INFO(AWS_LS_HTTP_CONNECTION_MANAGER, "id=%p: Successfully created", (void *)manager); @@ -686,6 +691,7 @@ static int s_aws_http_connection_manager_new_connection(struct aws_http_connecti options.socket_options = &manager->socket_options; options.on_setup = s_aws_http_connection_manager_on_connection_setup; options.on_shutdown = s_aws_http_connection_manager_on_connection_shutdown; + options.enable_read_back_pressure = manager->enable_read_back_pressure; if (aws_http_connection_monitoring_options_is_valid(&manager->monitoring_options)) { options.monitoring_options = &manager->monitoring_options; @@ -844,13 +850,12 @@ void aws_http_connection_manager_acquire_connection( AWS_LOGF_DEBUG(AWS_LS_HTTP_CONNECTION_MANAGER, "id=%p: Acquire connection", (void *)manager); struct aws_http_connection_acquisition *request = - aws_mem_acquire(manager->allocator, sizeof(struct aws_http_connection_acquisition)); + aws_mem_calloc(manager->allocator, 1, sizeof(struct aws_http_connection_acquisition)); if (request == NULL) { callback(NULL, aws_last_error(), user_data); return; } - AWS_ZERO_STRUCT(*request); request->callback = callback; request->user_data = user_data; From 041ab771a5ad859738ea20a8769e8cadd094ec50 Mon Sep 17 00:00:00 2001 From: Michael Graeb Date: Mon, 9 Mar 2020 18:28:34 -0700 Subject: [PATCH 08/35] Fix bug when new request has same memory address as old request. (#195) As seen in real life --- source/h1_connection.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/source/h1_connection.c b/source/h1_connection.c index b8f765cf4..045e6cd09 100644 --- a/source/h1_connection.c +++ b/source/h1_connection.c @@ -650,7 +650,7 @@ static void s_client_update_incoming_stream_ptr(struct h1_connection *connection */ static struct aws_h1_stream *s_update_outgoing_stream_ptr(struct h1_connection *connection) { struct aws_h1_stream *current = connection->thread_data.outgoing_stream; - struct aws_h1_stream *prev = current; + bool current_changed = false; int err; /* If current stream is done sending data... */ @@ -683,6 +683,7 @@ static struct aws_h1_stream *s_update_outgoing_stream_ptr(struct h1_connection * } current = NULL; + current_changed = true; } /* If current stream is NULL, look for more work. */ @@ -720,6 +721,7 @@ static struct aws_h1_stream *s_update_outgoing_stream_ptr(struct h1_connection * /* We found a stream to work on! */ current = stream; + current_changed = true; break; } @@ -733,7 +735,7 @@ static struct aws_h1_stream *s_update_outgoing_stream_ptr(struct h1_connection * } /* Update current incoming and outgoing streams. */ - if (prev != current) { + if (current_changed) { AWS_LOGF_TRACE( AWS_LS_HTTP_CONNECTION, "id=%p: Current outgoing stream is now %p.", From db6778c72702d3a290bba37d9f673c18d6662fed Mon Sep 17 00:00:00 2001 From: Justin Boswell Date: Tue, 10 Mar 2020 16:50:43 -0700 Subject: [PATCH 09/35] Enabled compilation on VS 2015 (#196) * Enabled compilation on VS 2015 * Fix VS narrowing warning * Updated to v0.5.3 of builder --- .github/workflows/ci.yml | 36 +++++++++++++++++++++++++++--------- source/hpack.c | 2 +- 2 files changed, 28 insertions(+), 10 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 9f0f5eea8..382a3ebe1 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -7,10 +7,11 @@ on: - '!master' env: - BUILDER_VERSION: v0.3.1 + BUILDER_VERSION: v0.5.3 BUILDER_HOST: https://d19elf31gohf1l.cloudfront.net PACKAGE_NAME: aws-c-http LINUX_BASE_IMAGE: ubuntu-16-x64 + RUN: ${{ github.run_id }}-${{ github.run_number }} jobs: linux-compat: @@ -29,7 +30,7 @@ jobs: echo "${{ secrets.GITHUB_TOKEN }}" | docker login docker.pkg.github.com -u awslabs --password-stdin export DOCKER_IMAGE=docker.pkg.github.com/awslabs/aws-crt-builder/aws-crt-${{ matrix.image }}:${{ env.BUILDER_VERSION }} docker pull $DOCKER_IMAGE - docker run --env GITHUB_REF $DOCKER_IMAGE -p ${{ env.PACKAGE_NAME }} build manylinux-default-default-default-default + docker run --env GITHUB_REF $DOCKER_IMAGE build -p ${{ env.PACKAGE_NAME }} downstream al2: runs-on: ubuntu-latest @@ -40,7 +41,7 @@ jobs: echo "${{ secrets.GITHUB_TOKEN }}" | docker login docker.pkg.github.com -u awslabs --password-stdin export DOCKER_IMAGE=docker.pkg.github.com/awslabs/aws-crt-builder/aws-crt-al2-x64:${{ env.BUILDER_VERSION }} docker pull $DOCKER_IMAGE - docker run --env GITHUB_REF $DOCKER_IMAGE -p ${{ env.PACKAGE_NAME }} build al2-default-default-default-default-downstream + docker run --env GITHUB_REF $DOCKER_IMAGE build -p ${{ env.PACKAGE_NAME }} downstream clang-compat: runs-on: ubuntu-latest @@ -54,7 +55,7 @@ jobs: echo "${{ secrets.GITHUB_TOKEN }}" | docker login docker.pkg.github.com -u awslabs --password-stdin export DOCKER_IMAGE=docker.pkg.github.com/awslabs/aws-crt-builder/aws-crt-${{ env.LINUX_BASE_IMAGE }}:${{ env.BUILDER_VERSION }} docker pull $DOCKER_IMAGE - docker run --env GITHUB_REF $DOCKER_IMAGE -p ${{ env.PACKAGE_NAME }} build linux-clang-${{ matrix.version }}-linux-x64 + docker run --env GITHUB_REF $DOCKER_IMAGE build -p ${{ env.PACKAGE_NAME }} --compiler=clang-${{ matrix.version }} gcc-compat: runs-on: ubuntu-latest @@ -68,23 +69,40 @@ jobs: echo "${{ secrets.GITHUB_TOKEN }}" | docker login docker.pkg.github.com -u awslabs --password-stdin export DOCKER_IMAGE=docker.pkg.github.com/awslabs/aws-crt-builder/aws-crt-${{ env.LINUX_BASE_IMAGE }}:${{ env.BUILDER_VERSION }} docker pull $DOCKER_IMAGE - docker run --env GITHUB_REF $DOCKER_IMAGE -p ${{ env.PACKAGE_NAME }} build linux-gcc-${{ matrix.version }}-linux-x64 + docker run --env GITHUB_REF $DOCKER_IMAGE build -p ${{ env.PACKAGE_NAME }} --compiler=gcc-${{ matrix.version }} windows: runs-on: windows-latest steps: - name: Build ${{ env.PACKAGE_NAME }} + consumers run: | - python -c "from urllib.request import urlretrieve; urlretrieve('${{ env.BUILDER_HOST }}/${{ env.BUILDER_VERSION }}/builder', 'builder.pyz')" - python builder.pyz -p ${{ env.PACKAGE_NAME }} build + python -c "from urllib.request import urlretrieve; urlretrieve('${{ env.BUILDER_HOST }}/${{ env.BUILDER_VERSION }}/builder.pyz?run=${{ env.RUN }}', 'builder.pyz')" + python builder.pyz build -p ${{ env.PACKAGE_NAME }} + + windows-vc14: + runs-on: windows-latest + strategy: + matrix: + arch: [x86, x64] + steps: + - uses: ilammy/msvc-dev-cmd@v1 + with: + toolset: 14.0 + arch: ${{ matrix.arch }} + uwp: false + spectre: true + - name: Build ${{ env.PACKAGE_NAME }} + consumers + run: | + python -c "from urllib.request import urlretrieve; urlretrieve('${{ env.BUILDER_HOST }}/${{ env.BUILDER_VERSION }}/builder.pyz?run=${{ env.RUN }}', 'builder.pyz')" + python builder.pyz build -p ${{ env.PACKAGE_NAME }} osx: runs-on: macos-latest steps: - name: Build ${{ env.PACKAGE_NAME }} + consumers run: | - python3 -c "from urllib.request import urlretrieve; urlretrieve('${{ env.BUILDER_HOST }}/${{ env.BUILDER_VERSION }}/builder', 'builder')" + python3 -c "from urllib.request import urlretrieve; urlretrieve('${{ env.BUILDER_HOST }}/${{ env.BUILDER_VERSION }}/builder.pyz?run=${{ env.RUN }}', 'builder')" chmod a+x builder - ./builder -p ${{ env.PACKAGE_NAME }} build default-downstream + ./builder build -p ${{ env.PACKAGE_NAME }} downstream diff --git a/source/hpack.c b/source/hpack.c index b0a6e75e6..ad1959275 100644 --- a/source/hpack.c +++ b/source/hpack.c @@ -1180,7 +1180,7 @@ int aws_hpack_decode( HPACK_LOG(ERROR, context, "Dynamic table update size is absurdly large"); return aws_raise_error(AWS_ERROR_HTTP_COMPRESSION); } - size_t size = *size64; + size_t size = (size_t)*size64; HPACK_LOGF(TRACE, context, "Dynamic table size update %zu", size); if (aws_hpack_resize_dynamic_table(context, size)) { From dd7a6155a4ea62dad8db3ac79c022645ac041598 Mon Sep 17 00:00:00 2001 From: "Jonathan M. Henson" Date: Fri, 13 Mar 2020 10:50:00 -0700 Subject: [PATCH 10/35] Fix proxies connect and tests (#198) Ignore connection: close on 200/OK responses to a CONNECT Request, since the proxy is obviously drunk and needs to hail an uber to get home from the bar safely. Fix the broken tests from the tcp back pressure refactor in aws-c-io. --- .github/workflows/ci.yml | 2 +- builder.json | 7 ++----- include/aws/http/private/http_impl.h | 2 ++ source/h1_connection.c | 9 ++++++++- source/h1_stream.c | 1 + source/http.c | 1 + tests/proxy_test_helper.c | 10 +++++++--- tests/test_connection.c | 8 ++++++-- tests/test_h1_client.c | 6 ++++-- tests/test_proxy.c | 4 ++-- tests/test_websocket_handler.c | 22 +++++++++++++++------- 11 files changed, 49 insertions(+), 23 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 382a3ebe1..aeb055c7d 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -7,7 +7,7 @@ on: - '!master' env: - BUILDER_VERSION: v0.5.3 + BUILDER_VERSION: v0.5.8 BUILDER_HOST: https://d19elf31gohf1l.cloudfront.net PACKAGE_NAME: aws-c-http LINUX_BASE_IMAGE: ubuntu-16-x64 diff --git a/builder.json b/builder.json index 8033334ad..d72e590e2 100644 --- a/builder.json +++ b/builder.json @@ -13,11 +13,8 @@ "downstream": [ { "name": "aws-c-auth" } ], - "test": [ - ["ctest", "{build_dir}", "-v", "--output-on-failure"], + "test_steps": [ + "test", ["{python}", "{project_dir}/integration-testing/http_client_test.py", "{install_dir}/bin/elasticurl{exe}"] - ], - "cmake_args": [ - "-DS2N_NO_PQ_ASM=ON" ] } diff --git a/include/aws/http/private/http_impl.h b/include/aws/http/private/http_impl.h index 03cd5800d..0cfff6a0b 100644 --- a/include/aws/http/private/http_impl.h +++ b/include/aws/http/private/http_impl.h @@ -26,6 +26,7 @@ enum aws_http_method { AWS_HTTP_METHOD_UNKNOWN, /* Unrecognized value. */ AWS_HTTP_METHOD_GET, AWS_HTTP_METHOD_HEAD, + AWS_HTTP_METHOD_CONNECT, AWS_HTTP_METHOD_COUNT, /* Number of enums */ }; @@ -50,6 +51,7 @@ enum aws_http_status { AWS_HTTP_STATUS_UNKNOWN = -1, /* Invalid status code. Not using 0 because it's technically a legal value */ AWS_HTTP_STATUS_100_CONTINUE = 100, AWS_HTTP_STATUS_101_SWITCHING_PROTOCOLS = 101, + AWS_HTTP_STATUS_200_OK = 200, AWS_HTTP_STATUS_204_NO_CONTENT = 204, AWS_HTTP_STATUS_304_NOT_MODIFIED = 304, }; diff --git a/source/h1_connection.c b/source/h1_connection.c index 045e6cd09..731224139 100644 --- a/source/h1_connection.c +++ b/source/h1_connection.c @@ -985,7 +985,14 @@ static int s_decoder_on_header(const struct aws_h1_decoded_header *header, void /* RFC-7230 section 6.1. * "Connection: close" header signals that a connection will not persist after the current request/response */ if (header->name == AWS_HTTP_HEADER_CONNECTION) { - if (aws_byte_cursor_eq_c_str(&header->value_data, "close")) { + /* Certain L7 proxies send a connection close header on a 200/OK response to a CONNECT request. This is nutty + * behavior, but the obviously desired behavior on a 200 CONNECT response is to leave the connection open + * for the tunneling. */ + bool ignore_connection_close = incoming_stream->base.request_method == AWS_HTTP_METHOD_CONNECT && + incoming_stream->base.client_data && + incoming_stream->base.client_data->response_status == AWS_HTTP_STATUS_200_OK; + + if (!ignore_connection_close && aws_byte_cursor_eq_c_str_ignore_case(&header->value_data, "close")) { AWS_LOGF_TRACE( AWS_LS_HTTP_STREAM, "id=%p: Received 'Connection: close' header. This will be the final stream on this connection.", diff --git a/source/h1_stream.c b/source/h1_stream.c index 222ed2ccf..1f030c94d 100644 --- a/source/h1_stream.c +++ b/source/h1_stream.c @@ -15,6 +15,7 @@ #include #include + #include static void s_stream_destroy(struct aws_http_stream *stream_base) { diff --git a/source/http.c b/source/http.c index 2a3b23782..663b81e72 100644 --- a/source/http.c +++ b/source/http.c @@ -201,6 +201,7 @@ static struct aws_byte_cursor s_method_enum_to_str[AWS_HTTP_METHOD_COUNT]; /* fo static void s_methods_init(struct aws_allocator *alloc) { s_method_enum_to_str[AWS_HTTP_METHOD_GET] = aws_http_method_get; s_method_enum_to_str[AWS_HTTP_METHOD_HEAD] = aws_http_method_head; + s_method_enum_to_str[AWS_HTTP_METHOD_CONNECT] = aws_http_method_connect; s_init_str_to_enum_hash_table( &s_method_str_to_enum, diff --git a/tests/proxy_test_helper.c b/tests/proxy_test_helper.c index e84cc4a89..d71d30111 100644 --- a/tests/proxy_test_helper.c +++ b/tests/proxy_test_helper.c @@ -254,7 +254,9 @@ int proxy_tester_create_testing_channel_connection(struct proxy_tester *tester) tester->testing_channel->channel_shutdown = s_testing_channel_shutdown_callback; tester->testing_channel->channel_shutdown_user_data = tester; - struct aws_http_connection *connection = aws_http_connection_new_http1_1_client(tester->alloc, SIZE_MAX); + /* Use small window so that we can observe it opening in tests. + * Channel may wait until the window is small before issuing the increment command. */ + struct aws_http_connection *connection = aws_http_connection_new_http1_1_client(tester->alloc, 256); ASSERT_NOT_NULL(connection); connection->user_data = tester->http_bootstrap->user_data; @@ -266,9 +268,9 @@ int proxy_tester_create_testing_channel_connection(struct proxy_tester *tester) ASSERT_SUCCESS(aws_channel_slot_insert_end(tester->testing_channel->channel, slot)); ASSERT_SUCCESS(aws_channel_slot_set_handler(slot, &connection->channel_handler)); connection->vtable->on_channel_handler_installed(&connection->channel_handler, slot); + testing_channel_drain_queued_tasks(tester->testing_channel); tester->client_connection = connection; - testing_channel_drain_queued_tasks(tester->testing_channel); return AWS_OP_SUCCESS; } @@ -309,7 +311,9 @@ int proxy_tester_send_connect_response(struct proxy_tester *tester) { if (tester->failure_type == PTFT_CONNECT_REQUEST) { response_string = "HTTP/1.0 401 Unauthorized\r\n\r\n"; } else { - response_string = "HTTP/1.0 200 Connection established\r\n\r\n"; + /* adding close here because it's an edge case we need to exercise. The desired behavior is that it has + * absolutely no effect. */ + response_string = "HTTP/1.0 200 Connection established\r\nconnection: close\r\n\r\n"; } /* send response */ diff --git a/tests/test_connection.c b/tests/test_connection.c index 32f31cdb8..3626a60de 100644 --- a/tests/test_connection.c +++ b/tests/test_connection.c @@ -530,7 +530,7 @@ static bool s_tester_new_client_shutdown_pred(void *user_data) { return tester->new_client_shut_down; } -/* when we shutdown the server, no more new connection will be accept */ +/* when we shutdown the server, no more new connection will be accepted */ static int s_test_connection_server_shutting_down_new_connection_setup_fail( struct aws_allocator *allocator, void *ctx) { @@ -591,9 +591,13 @@ static int s_test_connection_server_shutting_down_new_connection_setup_fail( ASSERT_FAILS(s_tester_wait(&tester, s_tester_connection_setup_pred)); /* wait for the client side connection */ s_tester_wait(&tester, s_tester_new_client_setup_pred); - if (tester.new_client_connection) { + + if (tester.new_client_connection && !tester.client_connection_is_shutdown) { /* wait for it to shut down, we do not need to call shut down, the socket will know */ ASSERT_SUCCESS(s_tester_wait(&tester, s_tester_new_client_shutdown_pred)); + } + + if (tester.new_client_connection) { aws_http_connection_release(tester.new_client_connection); } diff --git a/tests/test_h1_client.c b/tests/test_h1_client.c index 5f038665e..7c7c9b4af 100644 --- a/tests/test_h1_client.c +++ b/tests/test_h1_client.c @@ -71,7 +71,9 @@ static int s_tester_init(struct tester *tester, struct aws_allocator *alloc) { struct aws_testing_channel_options test_channel_options = {.clock_fn = aws_high_res_clock_get_ticks}; ASSERT_SUCCESS(testing_channel_init(&tester->testing_channel, alloc, &test_channel_options)); - tester->connection = aws_http_connection_new_http1_1_client(alloc, SIZE_MAX); + /* Use small window so that we can observe it opening in tests. + * Channel may wait until the window is small before issuing the increment command. */ + tester->connection = aws_http_connection_new_http1_1_client(alloc, 256); ASSERT_NOT_NULL(tester->connection); struct aws_channel_slot *slot = aws_channel_slot_new(tester->testing_channel.channel); @@ -1501,7 +1503,7 @@ H1_CLIENT_TEST_CASE(h1_client_window_shrinks_if_user_says_so) { /* check result */ size_t window_update = testing_channel_last_window_update(&tester.testing_channel); size_t message_sans_body = strlen(response_str) - 9; - ASSERT_TRUE(window_update == message_sans_body); + ASSERT_UINT_EQUALS(message_sans_body, window_update); /* clean up */ ASSERT_SUCCESS(s_response_tester_clean_up(&response)); diff --git a/tests/test_proxy.c b/tests/test_proxy.c index 92d257cf7..f1b7e0a1e 100644 --- a/tests/test_proxy.c +++ b/tests/test_proxy.c @@ -326,8 +326,8 @@ static int s_test_https_proxy_connection_failure_tls(struct aws_allocator *alloc ASSERT_SUCCESS(proxy_tester_verify_connection_attempt_was_to_proxy( &tester, aws_byte_cursor_from_c_str(s_proxy_host_name), s_proxy_port)); - ASSERT_TRUE(tester.client_connection == NULL); - ASSERT_TRUE(tester.wait_result != AWS_ERROR_SUCCESS); + ASSERT_NULL(tester.client_connection); + ASSERT_TRUE(AWS_ERROR_SUCCESS != tester.wait_result); ASSERT_SUCCESS(proxy_tester_clean_up(&tester)); diff --git a/tests/test_websocket_handler.c b/tests/test_websocket_handler.c index b5a7c377d..5232642fa 100644 --- a/tests/test_websocket_handler.c +++ b/tests/test_websocket_handler.c @@ -24,6 +24,10 @@ # pragma warning(disable : 4204) /* non-constant aggregate initializer */ #endif +/* Use small window so that we can observe it opening in tests. + * Channel may wait until the window is small before issuing the increment command. */ +static const size_t s_default_initial_window_size = 256; + #define TEST_CASE(NAME) \ AWS_TEST_CASE(NAME, s_test_##NAME); \ static int s_test_##NAME(struct aws_allocator *allocator, void *ctx) @@ -487,7 +491,7 @@ static int s_tester_init(struct tester *tester, struct aws_allocator *alloc) { struct aws_websocket_handler_options ws_options = { .allocator = alloc, .channel = tester->testing_channel.channel, - .initial_window_size = SIZE_MAX, + .initial_window_size = s_default_initial_window_size, .user_data = tester, .on_incoming_frame_begin = s_on_incoming_frame_begin, .on_incoming_frame_payload = s_on_incoming_frame_payload, @@ -496,6 +500,7 @@ static int s_tester_init(struct tester *tester, struct aws_allocator *alloc) { }; tester->websocket = aws_websocket_handler_new(&ws_options); ASSERT_NOT_NULL(tester->websocket); + testing_channel_drain_queued_tasks(&tester->testing_channel); aws_websocket_decoder_init(&tester->written_frame_decoder, s_on_written_frame, s_on_written_frame_payload, tester); aws_websocket_encoder_init(&tester->readpush_encoder, s_stream_readpush_payload, tester); @@ -530,6 +535,8 @@ static int s_install_downstream_handler(struct tester *tester, size_t initial_wi tester->is_midchannel_handler = true; ASSERT_SUCCESS(testing_channel_install_downstream_handler(&tester->testing_channel, initial_window)); + testing_channel_drain_queued_tasks(&tester->testing_channel); + return AWS_OP_SUCCESS; } @@ -1693,6 +1700,7 @@ static int s_window_manual_increment_common(struct aws_allocator *allocator, boo /* Assert that window did not fully re-open*/ uint64_t frame_minus_payload_size = aws_websocket_frame_encoded_size(&pushing.def) - pushing.def.payload_length; + ASSERT_UINT_EQUALS(frame_minus_payload_size, testing_channel_last_window_update(&tester.testing_channel)); /* Manually increment window */ @@ -1722,7 +1730,7 @@ TEST_CASE(websocket_midchannel_sanity_check) { (void)ctx; struct tester tester; ASSERT_SUCCESS(s_tester_init(&tester, allocator)); - ASSERT_SUCCESS(s_install_downstream_handler(&tester, SIZE_MAX)); + ASSERT_SUCCESS(s_install_downstream_handler(&tester, s_default_initial_window_size)); ASSERT_SUCCESS(s_tester_clean_up(&tester)); return AWS_OP_SUCCESS; } @@ -1731,7 +1739,7 @@ TEST_CASE(websocket_midchannel_write_message) { (void)ctx; struct tester tester; ASSERT_SUCCESS(s_tester_init(&tester, allocator)); - ASSERT_SUCCESS(s_install_downstream_handler(&tester, SIZE_MAX)); + ASSERT_SUCCESS(s_install_downstream_handler(&tester, s_default_initial_window_size)); /* Write data */ struct aws_byte_cursor writing = aws_byte_cursor_from_c_str("My hat it has three corners"); @@ -1749,7 +1757,7 @@ TEST_CASE(websocket_midchannel_write_multiple_messages) { (void)ctx; struct tester tester; ASSERT_SUCCESS(s_tester_init(&tester, allocator)); - ASSERT_SUCCESS(s_install_downstream_handler(&tester, SIZE_MAX)); + ASSERT_SUCCESS(s_install_downstream_handler(&tester, s_default_initial_window_size)); struct aws_byte_cursor writing[] = { aws_byte_cursor_from_c_str("My hat it has three corners."), @@ -1774,7 +1782,7 @@ TEST_CASE(websocket_midchannel_write_huge_message) { (void)ctx; struct tester tester; ASSERT_SUCCESS(s_tester_init(&tester, allocator)); - ASSERT_SUCCESS(s_install_downstream_handler(&tester, SIZE_MAX)); + ASSERT_SUCCESS(s_install_downstream_handler(&tester, s_default_initial_window_size)); /* Fill big buffer with random data */ struct aws_byte_buf writing; @@ -1801,7 +1809,7 @@ TEST_CASE(websocket_midchannel_read_message) { (void)ctx; struct tester tester; ASSERT_SUCCESS(s_tester_init(&tester, allocator)); - ASSERT_SUCCESS(s_install_downstream_handler(&tester, SIZE_MAX)); + ASSERT_SUCCESS(s_install_downstream_handler(&tester, s_default_initial_window_size)); struct readpush_frame pushing = { .payload = aws_byte_cursor_from_c_str("Hello hello can you hear me Joe?"), @@ -1822,7 +1830,7 @@ TEST_CASE(websocket_midchannel_read_multiple_messages) { (void)ctx; struct tester tester; ASSERT_SUCCESS(s_tester_init(&tester, allocator)); - ASSERT_SUCCESS(s_install_downstream_handler(&tester, SIZE_MAX)); + ASSERT_SUCCESS(s_install_downstream_handler(&tester, s_default_initial_window_size)); /* Read a mix of different frame types, most of which shouldn't get passed along to next handler. */ struct readpush_frame pushing[] = { From e1de8aadb0c416f3d8e45a44284f096efa9b3c10 Mon Sep 17 00:00:00 2001 From: Michael Graeb Date: Wed, 18 Mar 2020 17:04:37 -0700 Subject: [PATCH 11/35] Fix fuzz tests --- include/aws/http/private/h2_frames.h | 8 +- source/h2_frames.c | 9 +- source/hpack.c | 2 + tests/fuzz/fuzz_h2_decoder_correct.c | 424 ++++++++++++++++----------- 4 files changed, 258 insertions(+), 185 deletions(-) diff --git a/include/aws/http/private/h2_frames.h b/include/aws/http/private/h2_frames.h index a556baad5..4bb5819b0 100644 --- a/include/aws/http/private/h2_frames.h +++ b/include/aws/http/private/h2_frames.h @@ -160,7 +160,7 @@ struct aws_h2_frame_rst_stream { struct aws_h2_frame base; /* Payload */ - enum aws_h2_error_codes error_code; + uint32_t error_code; }; /* A h2 setting and its value, used in SETTINGS frame */ @@ -200,7 +200,7 @@ struct aws_h2_frame_goaway { /* Payload */ uint32_t last_stream_id; - enum aws_h2_error_codes error_code; + uint32_t error_code; struct aws_byte_cursor debug_data; }; @@ -314,7 +314,7 @@ AWS_HTTP_API struct aws_h2_frame *aws_h2_frame_new_rst_stream( struct aws_allocator *allocator, uint32_t stream_id, - enum aws_h2_error_codes error_code); + uint32_t error_code); AWS_HTTP_API struct aws_h2_frame *aws_h2_frame_new_settings( @@ -345,7 +345,7 @@ AWS_HTTP_API struct aws_h2_frame *aws_h2_frame_new_goaway( struct aws_allocator *allocator, uint32_t last_stream_id, - enum aws_h2_error_codes error_code, + uint32_t error_code, struct aws_byte_cursor debug_data); AWS_HTTP_API diff --git a/source/h2_frames.c b/source/h2_frames.c index 647642f79..c5effe0d7 100644 --- a/source/h2_frames.c +++ b/source/h2_frames.c @@ -264,7 +264,7 @@ int aws_h2_frame_encoder_init(struct aws_h2_frame_encoder *encoder, struct aws_a encoder->allocator = allocator; encoder->logging_id = logging_id; - encoder->hpack = aws_hpack_context_new(allocator, AWS_LS_HTTP_ENCODER, encoder); + encoder->hpack = aws_hpack_context_new(allocator, AWS_LS_HTTP_ENCODER, logging_id); if (!encoder->hpack) { return AWS_OP_ERR; } @@ -817,7 +817,7 @@ static const size_t s_frame_rst_stream_length = 4; struct aws_h2_frame *aws_h2_frame_new_rst_stream( struct aws_allocator *allocator, uint32_t stream_id, - enum aws_h2_error_codes error_code) { + uint32_t error_code) { if (aws_h2_validate_stream_id(stream_id)) { return NULL; @@ -1035,7 +1035,7 @@ DEFINE_FRAME_VTABLE(goaway); struct aws_h2_frame *aws_h2_frame_new_goaway( struct aws_allocator *allocator, uint32_t last_stream_id, - enum aws_h2_error_codes error_code, + uint32_t error_code, struct aws_byte_cursor debug_data) { /* If debug_data is too long, don't sent it. @@ -1057,7 +1057,8 @@ struct aws_h2_frame *aws_h2_frame_new_goaway( s_init_frame_base(&frame->base, allocator, AWS_H2_FRAME_T_GOAWAY, &s_frame_goaway_vtable, 0); frame->last_stream_id = last_stream_id; - frame->error_code = error_code, frame->debug_data = debug_data; + frame->error_code = error_code; + frame->debug_data = debug_data; return &frame->base; } diff --git a/source/hpack.c b/source/hpack.c index ad1959275..fe52256d3 100644 --- a/source/hpack.c +++ b/source/hpack.c @@ -861,6 +861,7 @@ int aws_hpack_encode_string( /* Encode string length */ uint8_t starting_bits = use_huffman << 7; if (aws_hpack_encode_integer(str_length, starting_bits, 7, output)) { + HPACK_LOGF(ERROR, context, "Error encoding HPACK integer: %s", aws_error_name(aws_last_error())); goto error; } @@ -873,6 +874,7 @@ int aws_hpack_encode_string( } if (aws_huffman_encode(&context->encoder, &to_encode, output)) { + HPACK_LOGF(ERROR, context, "Error from Huffman encoder: %s", aws_error_name(aws_last_error())); goto error; } diff --git a/tests/fuzz/fuzz_h2_decoder_correct.c b/tests/fuzz/fuzz_h2_decoder_correct.c index c79e02170..c672a2b9b 100644 --- a/tests/fuzz/fuzz_h2_decoder_correct.c +++ b/tests/fuzz/fuzz_h2_decoder_correct.c @@ -20,13 +20,18 @@ #include #include +#include + +#include #include -static const uint32_t FRAME_HEADER_SIZE = 3 + 1 + 1 + 4; +static const uint32_t FRAME_PREFIX_SIZE = 3 + 1 + 1 + 4; static const uint32_t MAX_PAYLOAD_SIZE = 16384; -static void s_generate_header_block(struct aws_byte_cursor *input, struct aws_h2_frame_header_block *header_block) { +static struct aws_http_headers *s_generate_headers(struct aws_allocator *allocator, struct aws_byte_cursor *input) { + + struct aws_http_headers *headers = aws_http_headers_new(allocator); /* Requires 4 bytes: type, size, and then 1 each for name & value */ while (input->len >= 4) { @@ -73,40 +78,54 @@ static void s_generate_header_block(struct aws_byte_cursor *input, struct aws_h2 header.name = aws_byte_cursor_advance(input, name_len); header.value = aws_byte_cursor_advance(input, value_len); - aws_array_list_push_back(&header_block->header_fields, &header); + aws_http_headers_add_v2(headers, &header); } + + return headers; } -static void s_generate_stream_id(struct aws_byte_cursor *input, uint32_t *stream_id) { - aws_byte_cursor_read_be32(input, stream_id); - /* Top bit of stream-id is ignored by decoder */ - if ((*stream_id & (UINT32_MAX >> 1)) == 0) { - *stream_id = 1; - } +static uint32_t s_generate_stream_id(struct aws_byte_cursor *input) { + uint32_t stream_id = 0; + aws_byte_cursor_read_be32(input, &stream_id); + return aws_min_u32(AWS_H2_STREAM_ID_MAX, aws_max_u32(1, stream_id)); } /* Server-initiated stream-IDs must be even */ -static void s_generate_even_stream_id(struct aws_byte_cursor *input, uint32_t *stream_id) { - aws_byte_cursor_read_be32(input, stream_id); +static uint32_t s_generate_even_stream_id(struct aws_byte_cursor *input) { + uint32_t stream_id = 0; + aws_byte_cursor_read_be32(input, &stream_id); + stream_id = aws_min_u32(AWS_H2_STREAM_ID_MAX, aws_max_u32(2, stream_id)); - if (*stream_id % 2 != 0) { - *stream_id += 1; + if (stream_id % 2 != 0) { + stream_id -= 1; } - /* Top bit of stream-id is ignored by decoder */ - if ((*stream_id & (UINT32_MAX >> 1)) == 0) { - *stream_id = 2; - } + return stream_id; } -AWS_EXTERN_C_BEGIN +static struct aws_h2_frame_priority_settings s_generate_priority(struct aws_byte_cursor *input) { + struct aws_h2_frame_priority_settings priority; + priority.stream_dependency = s_generate_stream_id(input); -int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) { + uint8_t exclusive = 0; + aws_byte_cursor_read_u8(input, &exclusive); + priority.stream_dependency_exclusive = (bool)exclusive; - if (size < FRAME_HEADER_SIZE) { - return 0; - } + aws_byte_cursor_read_u8(input, &priority.weight); + + return priority; +} +AWS_EXTERN_C_BEGIN + +/** + * This test generates valid frames from the random input. + * It feeds these frames through the encoder and ensures that they're output without error. + * Then it feeds the encoder's output to the decoder and ensures that it does not report an error. + * It does not currently investigate the outputs to see if they line up with they inputs, + * it just checks for errors from the encoder & decoder. + */ +int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) { /* Setup allocator and parameters */ struct aws_allocator *allocator = aws_mem_tracer_new(aws_default_allocator(), NULL, AWS_MEMTRACE_BYTES, 0); struct aws_byte_cursor input = aws_byte_cursor_from_array(data, size); @@ -125,7 +144,7 @@ int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) { /* Create the encoder */ struct aws_h2_frame_encoder encoder; - aws_h2_frame_encoder_init(&encoder, allocator); + aws_h2_frame_encoder_init(&encoder, allocator, NULL /*logging_id*/); /* Create the decoder */ const struct aws_h2_decoder_vtable decoder_vtable = {0}; @@ -138,204 +157,255 @@ int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) { /* Init the buffer */ struct aws_byte_buf frame_data; - aws_byte_buf_init(&frame_data, allocator, FRAME_HEADER_SIZE + MAX_PAYLOAD_SIZE); + aws_byte_buf_init(&frame_data, allocator, FRAME_PREFIX_SIZE + MAX_PAYLOAD_SIZE); - /* Generate the frame to decode */ - { - uint8_t frame_type = 0; - aws_byte_cursor_read_u8(&input, &frame_type); + /* + * Generate the frame to decode + */ - /* Hijack the top bit of the type to figure out if we should use huffman encoding */ - encoder.use_huffman = (frame_type >> 7) == 1; + uint8_t frame_type = 0; + aws_byte_cursor_read_u8(&input, &frame_type); - switch (frame_type % (AWS_H2_FRAME_T_UNKNOWN + 1)) { - case AWS_H2_FRAME_T_DATA: { - struct aws_h2_frame_data frame; - aws_h2_frame_data_init(&frame, allocator); + /* figure out if we should use huffman encoding */ + uint8_t huffman_choice = 0; + aws_byte_cursor_read_u8(&input, &huffman_choice); + aws_hpack_set_huffman_mode(encoder.hpack, huffman_choice % 3); - s_generate_stream_id(&input, &frame.base.stream_id); - aws_byte_cursor_read_u8(&input, &frame.pad_length); + switch (frame_type % (AWS_H2_FRAME_T_UNKNOWN + 1)) { + case AWS_H2_FRAME_T_DATA: { + uint32_t stream_id = s_generate_stream_id(&input); - uint32_t payload_len = input.len; - if (payload_len > MAX_PAYLOAD_SIZE - frame.pad_length) { - payload_len = MAX_PAYLOAD_SIZE - frame.pad_length; - } + uint8_t flags = 0; + aws_byte_cursor_read_u8(&input, &flags); + bool body_ends_stream = flags & AWS_H2_FRAME_F_END_STREAM; - frame.data = aws_byte_cursor_advance(&input, payload_len); + uint8_t pad_length = 0; + aws_byte_cursor_read_u8(&input, &pad_length); - aws_h2_frame_data_encode(&frame, &encoder, &frame_data); - aws_h2_frame_data_clean_up(&frame); - break; - } - case AWS_H2_FRAME_T_HEADERS: { - struct aws_h2_frame_headers frame; - aws_h2_frame_headers_init(&frame, allocator); + /* Allow body to exceed available space. Data encoder should just write what it can fit */ + struct aws_input_stream *body = aws_input_stream_new_from_cursor(allocator, &input); - s_generate_stream_id(&input, &frame.base.stream_id); + bool body_complete; + AWS_FATAL_ASSERT( + aws_h2_encode_data_frame( + &encoder, stream_id, body, (bool)body_ends_stream, pad_length, &frame_data, &body_complete) == + AWS_OP_SUCCESS); - s_generate_header_block(&input, &frame.header_block); + struct aws_stream_status body_status; + aws_input_stream_get_status(body, &body_status); + AWS_FATAL_ASSERT(body_complete == body_status.is_end_of_stream) + aws_input_stream_destroy(body); + break; + } + case AWS_H2_FRAME_T_HEADERS: { + uint32_t stream_id = s_generate_stream_id(&input); - aws_h2_frame_headers_encode(&frame, &encoder, &frame_data); - aws_h2_frame_headers_clean_up(&frame); - break; - } - case AWS_H2_FRAME_T_PRIORITY: { - struct aws_h2_frame_priority frame; - aws_h2_frame_priority_init(&frame, allocator); + uint8_t flags = 0; + aws_byte_cursor_read_u8(&input, &flags); + bool end_stream = flags & AWS_H2_FRAME_F_END_STREAM; + bool use_priority = flags & AWS_H2_FRAME_F_PRIORITY; - s_generate_stream_id(&input, &frame.base.stream_id); + uint8_t pad_length = 0; + aws_byte_cursor_read_u8(&input, &pad_length); - uint32_t stream_dependency = 0; - aws_byte_cursor_read_be32(&input, &stream_dependency); + struct aws_h2_frame_priority_settings priority = s_generate_priority(&input); + struct aws_h2_frame_priority_settings *priority_ptr = use_priority ? &priority : NULL; - frame.priority.stream_dependency = stream_dependency & (UINT32_MAX >> 1); - frame.priority.stream_dependency_exclusive = stream_dependency & (1ULL << 31); + /* generate headers last since it uses up the rest of input */ + struct aws_http_headers *headers = s_generate_headers(allocator, &input); - aws_byte_cursor_read_u8(&input, &frame.priority.weight); + struct aws_h2_frame *frame = + aws_h2_frame_new_headers(allocator, stream_id, headers, end_stream, pad_length, priority_ptr); + AWS_FATAL_ASSERT(frame); - aws_h2_frame_priority_encode(&frame, &encoder, &frame_data); - aws_h2_frame_priority_clean_up(&frame); - break; - } - case AWS_H2_FRAME_T_RST_STREAM: { - struct aws_h2_frame_rst_stream frame; - aws_h2_frame_rst_stream_init(&frame, allocator); + bool frame_complete; + AWS_FATAL_ASSERT(aws_h2_encode_frame(&encoder, frame, &frame_data, &frame_complete) == AWS_OP_SUCCESS); + AWS_FATAL_ASSERT(frame_complete == true); - s_generate_stream_id(&input, &frame.base.stream_id); + aws_h2_frame_destroy(frame); + aws_http_headers_release(headers); + break; + } + case AWS_H2_FRAME_T_PRIORITY: { + uint32_t stream_id = s_generate_stream_id(&input); + struct aws_h2_frame_priority_settings priority = s_generate_priority(&input); - aws_byte_cursor_read_be32(&input, &frame.error_code); + struct aws_h2_frame *frame = aws_h2_frame_new_priority(allocator, stream_id, &priority); + AWS_FATAL_ASSERT(frame); - aws_h2_frame_rst_stream_encode(&frame, &encoder, &frame_data); - aws_h2_frame_rst_stream_clean_up(&frame); - break; - } - case AWS_H2_FRAME_T_SETTINGS: { - struct aws_h2_frame_settings frame; - aws_h2_frame_settings_init(&frame, allocator); - frame.settings_count = input.len / 6; - frame.settings_array = - aws_mem_calloc(allocator, frame.settings_count, sizeof(struct aws_h2_frame_settings)); - - for (size_t i = 0; i < frame.settings_count; ++i) { - aws_byte_cursor_read_be16(&input, &frame.settings_array[i].id); - aws_byte_cursor_read_be32(&input, &frame.settings_array[i].value); - } + bool frame_complete; + AWS_FATAL_ASSERT(aws_h2_encode_frame(&encoder, frame, &frame_data, &frame_complete) == AWS_OP_SUCCESS); + AWS_FATAL_ASSERT(frame_complete == true); - aws_h2_frame_settings_encode(&frame, &encoder, &frame_data); - aws_mem_release(allocator, frame.settings_array); - aws_h2_frame_settings_clean_up(&frame); - break; - } - case AWS_H2_FRAME_T_PUSH_PROMISE: { - struct aws_h2_frame_push_promise frame; - aws_h2_frame_push_promise_init(&frame, allocator); + aws_h2_frame_destroy(frame); + break; + } + case AWS_H2_FRAME_T_RST_STREAM: { + uint32_t stream_id = s_generate_stream_id(&input); - s_generate_stream_id(&input, &frame.base.stream_id); - s_generate_even_stream_id(&input, &frame.promised_stream_id); + uint32_t error_code = 0; + aws_byte_cursor_read_be32(&input, &error_code); - s_generate_header_block(&input, &frame.header_block); + struct aws_h2_frame *frame = aws_h2_frame_new_rst_stream(allocator, stream_id, error_code); + AWS_FATAL_ASSERT(frame); - aws_h2_frame_push_promise_encode(&frame, &encoder, &frame_data); - aws_h2_frame_push_promise_clean_up(&frame); - break; - } - case AWS_H2_FRAME_T_PING: { - struct aws_h2_frame_ping frame; - aws_h2_frame_ping_init(&frame, allocator); - - if (input.len >= AWS_H2_PING_DATA_SIZE) { - memcpy(frame.opaque_data, input.ptr, AWS_H2_PING_DATA_SIZE); - aws_byte_cursor_advance(&input, AWS_H2_PING_DATA_SIZE); - frame.ack = frame.opaque_data[0] != 0; - } else if (input.len >= 1) { - frame.ack = *input.ptr != 0; - } + bool frame_complete; + AWS_FATAL_ASSERT(aws_h2_encode_frame(&encoder, frame, &frame_data, &frame_complete) == AWS_OP_SUCCESS); + AWS_FATAL_ASSERT(frame_complete == true); - aws_h2_frame_ping_encode(&frame, &encoder, &frame_data); - aws_h2_frame_ping_clean_up(&frame); - break; + aws_h2_frame_destroy(frame); + break; + } + case AWS_H2_FRAME_T_SETTINGS: { + uint8_t flags = 0; + aws_byte_cursor_read_u8(&input, &flags); + + bool ack = flags & AWS_H2_FRAME_F_ACK; + + size_t settings_count = 0; + struct aws_h2_frame_setting *settings_array = NULL; + + if (!ack) { + /* There is an internal limit to the number of settings, but it's pretty high */ + settings_count = aws_min_size(input.len / 6, 1024); + if (settings_count > 0) { + settings_array = aws_mem_calloc(allocator, settings_count, sizeof(struct aws_h2_frame_settings)); + for (size_t i = 0; i < settings_count; ++i) { + aws_byte_cursor_read_be16(&input, &settings_array[i].id); + aws_byte_cursor_read_be32(&input, &settings_array[i].value); + } + } } - case AWS_H2_FRAME_T_GOAWAY: { - struct aws_h2_frame_goaway frame; - aws_h2_frame_goaway_init(&frame, allocator); - aws_byte_cursor_read_be32(&input, &frame.last_stream_id); - aws_byte_cursor_read_be32(&input, &frame.error_code); + struct aws_h2_frame *frame = aws_h2_frame_new_settings(allocator, settings_array, settings_count, ack); + AWS_FATAL_ASSERT(frame); - uint32_t debug_data_size = input.len; - if (debug_data_size > MAX_PAYLOAD_SIZE - 8) { - debug_data_size = MAX_PAYLOAD_SIZE - 8; - } - frame.debug_data = aws_byte_cursor_advance(&input, debug_data_size); + bool frame_complete; + AWS_FATAL_ASSERT(aws_h2_encode_frame(&encoder, frame, &frame_data, &frame_complete) == AWS_OP_SUCCESS); + AWS_FATAL_ASSERT(frame_complete == true); - aws_h2_frame_goaway_encode(&frame, &encoder, &frame_data); - aws_h2_frame_goaway_clean_up(&frame); - break; - } - case AWS_H2_FRAME_T_WINDOW_UPDATE: { - struct aws_h2_frame_window_update frame; - aws_h2_frame_window_update_init(&frame, allocator); + aws_h2_frame_destroy(frame); + aws_mem_release(allocator, settings_array); + break; + } + case AWS_H2_FRAME_T_PUSH_PROMISE: { + uint32_t stream_id = s_generate_stream_id(&input); + uint32_t promised_stream_id = s_generate_even_stream_id(&input); - /* WINDOW_UPDATE's stream-id can be zero or non-zero */ - aws_byte_cursor_read_be32(&input, &frame.base.stream_id); + uint8_t pad_length = 0; + aws_byte_cursor_read_u8(&input, &pad_length); - aws_byte_cursor_read_be32(&input, &frame.window_size_increment); + /* generate headers last since it uses up the rest of input */ + struct aws_http_headers *headers = s_generate_headers(allocator, &input); - aws_h2_frame_window_update_encode(&frame, &encoder, &frame_data); - aws_h2_frame_window_update_clean_up(&frame); - break; + struct aws_h2_frame *frame = + aws_h2_frame_new_push_promise(allocator, stream_id, promised_stream_id, headers, pad_length); + AWS_FATAL_ASSERT(frame); + + bool frame_complete; + AWS_FATAL_ASSERT(aws_h2_encode_frame(&encoder, frame, &frame_data, &frame_complete) == AWS_OP_SUCCESS); + AWS_FATAL_ASSERT(frame_complete == true); + + aws_h2_frame_destroy(frame); + aws_http_headers_release(headers); + break; + } + case AWS_H2_FRAME_T_PING: { + uint8_t flags; + aws_byte_cursor_read_u8(&input, &flags); + bool ack = flags & AWS_H2_FRAME_F_ACK; + + uint8_t opaque_data[AWS_H2_PING_DATA_SIZE] = {0}; + size_t copy_len = aws_min_size(input.len, AWS_H2_PING_DATA_SIZE); + if (copy_len > 0) { + struct aws_byte_cursor copy = aws_byte_cursor_advance(&input, copy_len); + memcpy(opaque_data, copy.ptr, copy.len); } - case AWS_H2_FRAME_T_CONTINUATION: { - uint32_t stream_id; - s_generate_stream_id(&input, &stream_id); - /* HEADERS frame must precede CONTINUATION */ - struct aws_h2_frame_headers headers_frame; - aws_h2_frame_headers_init(&headers_frame, allocator); + struct aws_h2_frame *frame = aws_h2_frame_new_ping(allocator, ack, opaque_data); + AWS_FATAL_ASSERT(frame); - headers_frame.base.stream_id = stream_id; + bool frame_complete; + AWS_FATAL_ASSERT(aws_h2_encode_frame(&encoder, frame, &frame_data, &frame_complete) == AWS_OP_SUCCESS); + AWS_FATAL_ASSERT(frame_complete == true); - aws_h2_frame_headers_encode(&headers_frame, &encoder, &frame_data); - aws_h2_frame_headers_clean_up(&headers_frame); + aws_h2_frame_destroy(frame); + break; + } + case AWS_H2_FRAME_T_GOAWAY: { + uint32_t last_stream_id = s_generate_stream_id(&input); - /* Now do the CONTINUATION frame */ - struct aws_h2_frame_continuation continuation_frame; - aws_h2_frame_continuation_init(&continuation_frame, allocator); + uint32_t error_code = 0; + aws_byte_cursor_read_be32(&input, &error_code); - continuation_frame.base.stream_id = stream_id; - s_generate_header_block(&input, &continuation_frame.header_block); + uint32_t debug_data_size = aws_min_u32(input.len, MAX_PAYLOAD_SIZE - FRAME_PREFIX_SIZE); + struct aws_byte_cursor debug_data = aws_byte_cursor_advance(&input, debug_data_size); - aws_h2_frame_continuation_encode(&continuation_frame, &encoder, &frame_data); - aws_h2_frame_continuation_clean_up(&continuation_frame); - break; - } - case AWS_H2_FRAME_T_UNKNOWN: { - /* #YOLO roll our own frame */ - uint32_t payload_length = input.len - (FRAME_HEADER_SIZE - 1); - if (payload_length > MAX_PAYLOAD_SIZE) { - payload_length = MAX_PAYLOAD_SIZE; - } + struct aws_h2_frame *frame = aws_h2_frame_new_goaway(allocator, last_stream_id, error_code, debug_data); + AWS_FATAL_ASSERT(frame); - /* Write payload length */ - aws_byte_buf_write_be24(&frame_data, payload_length); + bool frame_complete; + AWS_FATAL_ASSERT(aws_h2_encode_frame(&encoder, frame, &frame_data, &frame_complete) == AWS_OP_SUCCESS); + AWS_FATAL_ASSERT(frame_complete == true); - /* Write type */ - aws_byte_buf_write_u8(&frame_data, frame_type); + aws_h2_frame_destroy(frame); + break; + } + case AWS_H2_FRAME_T_WINDOW_UPDATE: { + /* WINDOW_UPDATE's stream-id can be zero or non-zero */ + uint32_t stream_id = 0; + aws_byte_cursor_read_be32(&input, &stream_id); + stream_id = aws_min_u32(stream_id, AWS_H2_STREAM_ID_MAX); - /* Write flags & stream id */ - aws_byte_buf_write_from_whole_cursor(&frame_data, aws_byte_cursor_advance(&input, 5)); + uint32_t window_size_increment = 0; + aws_byte_cursor_read_be32(&input, &window_size_increment); + window_size_increment = aws_min_u32(window_size_increment, AWS_H2_WINDOW_UPDATE_MAX); - /* Write payload */ - aws_byte_buf_write_from_whole_cursor(&frame_data, aws_byte_cursor_advance(&input, payload_length)); - break; - } - default: { - AWS_FATAL_ASSERT(false); - } + struct aws_h2_frame *frame = aws_h2_frame_new_window_update(allocator, stream_id, window_size_increment); + AWS_FATAL_ASSERT(frame); + + bool frame_complete; + AWS_FATAL_ASSERT(aws_h2_encode_frame(&encoder, frame, &frame_data, &frame_complete) == AWS_OP_SUCCESS); + AWS_FATAL_ASSERT(frame_complete == true); + + aws_h2_frame_destroy(frame); + break; + } + case AWS_H2_FRAME_T_CONTINUATION: + /* We don't directly create CONTINUATION frames (they occur when HEADERS or PUSH_PROMISE gets too big) */ + frame_type = AWS_H2_FRAME_T_UNKNOWN; + /* fallthrough */ + case AWS_H2_FRAME_T_UNKNOWN: { + /* #YOLO roll our own frame */ + uint32_t payload_length = aws_min_u32(input.len, MAX_PAYLOAD_SIZE - FRAME_PREFIX_SIZE); + + /* Write payload length */ + aws_byte_buf_write_be24(&frame_data, payload_length); + + /* Write type */ + aws_byte_buf_write_u8(&frame_data, frame_type); + + /* Write flags */ + uint8_t flags = 0; + aws_byte_cursor_read_u8(&input, &flags); + aws_byte_buf_write_u8(&frame_data, flags); + + /* Write stream-id */ + uint32_t stream_id = 0; + aws_byte_cursor_read_be32(&input, &stream_id); + aws_byte_buf_write_be32(&frame_data, stream_id); + + /* Write payload */ + aws_byte_buf_write_from_whole_cursor(&frame_data, aws_byte_cursor_advance(&input, payload_length)); + break; + } + default: { + AWS_FATAL_ASSERT(false); } } /* Decode whatever we got */ + AWS_FATAL_ASSERT(frame_data.len > 0); struct aws_byte_cursor to_decode = aws_byte_cursor_from_buf(&frame_data); int err = aws_h2_decode(decoder, &to_decode); AWS_FATAL_ASSERT(err == AWS_OP_SUCCESS); @@ -351,7 +421,7 @@ int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) { atexit(aws_http_library_clean_up); /* Check for leaks */ - ASSERT_UINT_EQUALS(0, aws_mem_tracer_count(allocator)); + AWS_FATAL_ASSERT(aws_mem_tracer_count(allocator) == 0); allocator = aws_mem_tracer_destroy(allocator); return 0; From 281ac170bd54019331e34c0a0d859284dc0c1790 Mon Sep 17 00:00:00 2001 From: Michael Graeb Date: Wed, 18 Mar 2020 17:12:16 -0700 Subject: [PATCH 12/35] clang-format --- source/http.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/source/http.c b/source/http.c index 9fd14911d..868feca2e 100644 --- a/source/http.c +++ b/source/http.c @@ -14,10 +14,10 @@ */ #include +#include #include #include #include -#include #include #include @@ -122,7 +122,7 @@ static struct aws_error_info s_errors[] = { AWS_DEFINE_ERROR_INFO_HTTP( AWS_ERROR_HTTP_INVALID_FRAME_SIZE, "Received frame with an illegal frame size"), - AWS_DEFINE_ERROR_INFO_HTTP( + AWS_DEFINE_ERROR_INFO_HTTP( AWS_ERROR_HTTP_COMPRESSION, "Error compressing or decompressing HPACK headers"), }; From 707324578e1a15ce9e4cd377d13e4f3d0834edaa Mon Sep 17 00:00:00 2001 From: Michael Graeb Date: Wed, 18 Mar 2020 17:37:04 -0700 Subject: [PATCH 13/35] tweaks --- include/aws/http/private/h2_frames.h | 8 +++----- include/aws/http/request_response.h | 3 +-- source/h2_frames.c | 23 ++++++++--------------- 3 files changed, 12 insertions(+), 22 deletions(-) diff --git a/include/aws/http/private/h2_frames.h b/include/aws/http/private/h2_frames.h index 4bb5819b0..585e984e7 100644 --- a/include/aws/http/private/h2_frames.h +++ b/include/aws/http/private/h2_frames.h @@ -74,11 +74,9 @@ enum aws_h2_settings { AWS_H2_SETTINGS_END_RANGE, /* End of known values */ }; -/* Payload must fit in 3 bytes */ -#define AWS_H2_PAYLOAD_MAX (0x00FFFFFF) - -#define AWS_H2_WINDOW_UPDATE_MAX (0x7FFFFFFF) -#define AWS_H2_STREAM_ID_MAX (0x7FFFFFFF) +#define AWS_H2_PAYLOAD_MAX (0x00FFFFFF) /* must fit in 3 bytes */ +#define AWS_H2_WINDOW_UPDATE_MAX (0x7FFFFFFF) /* cannot use high bit */ +#define AWS_H2_STREAM_ID_MAX (0x7FFFFFFF) /* cannot use high bit */ /* Legal min(inclusive) and max(inclusive) for each setting */ extern const uint32_t aws_h2_settings_bounds[AWS_H2_SETTINGS_END_RANGE][2]; diff --git a/include/aws/http/request_response.h b/include/aws/http/request_response.h index ee2dccd22..a81c735e9 100644 --- a/include/aws/http/request_response.h +++ b/include/aws/http/request_response.h @@ -365,8 +365,7 @@ AWS_HTTP_API int aws_http_headers_add_v2(struct aws_http_headers *headers, const struct aws_http_header *header); /** - * Add a header with default compression settings. - * The underlying strings are copied. + * Deprecated. Use aws_http_headers_add_v2(). */ AWS_HTTP_API int aws_http_headers_add(struct aws_http_headers *headers, struct aws_byte_cursor name, struct aws_byte_cursor value); diff --git a/source/h2_frames.c b/source/h2_frames.c index c5effe0d7..9d5c85278 100644 --- a/source/h2_frames.c +++ b/source/h2_frames.c @@ -24,18 +24,6 @@ #include -/* #TODO: Don't raise AWS_H2_ERR_* enums, raise AWS_ERROR_* . - * Actually, maybe do NOT raise H2-specific errors, because those are for *receiving* bad data, - * and errors from the encoder are user error??? - * Also, if encoder raises error corresponding to AWS_H2_ERR, should - * we send that code in the GOAWAY, or always treat encoder errors as AWS_H2_ERR_INTERNAL? - * Like, you're only supposed to inform peer of errors that were their fault, right? */ - -/* #TODO: when is the right time to validate every possible input? - * while encoding? while making new frame? in actual user-facing API? */ - -/* #TODO: use add_checked and mul_checked */ - #define ENCODER_LOGF(level, encoder, text, ...) \ AWS_LOGF_##level(AWS_LS_HTTP_ENCODER, "id=%p " text, (encoder)->logging_id, __VA_ARGS__) @@ -331,7 +319,7 @@ int aws_h2_encode_data_frame( goto handle_waiting_for_more_space; } - /* Limit where body can go by making a sub-buffer */ + /* Use a sub-buffer to limit where body can go */ struct aws_byte_buf body_sub_buf = aws_byte_buf_from_empty_array(output->buffer + output->len + bytes_preceding_body, max_body); @@ -541,7 +529,7 @@ int s_encode_single_header_block_frame( uint8_t pad_length = 0; const struct aws_h2_frame_priority_settings *priority_settings = NULL; const uint32_t *promised_stream_id = NULL; - uint32_t payload_overhead = 0; /* Amount of payload holding things other than header-block (padding, etc) */ + size_t payload_overhead = 0; /* Amount of payload holding things other than header-block (padding, etc) */ if (frame->state == AWS_H2_HEADERS_STATE_FIRST_FRAME) { frame_type = frame->base.type; @@ -887,9 +875,14 @@ struct aws_h2_frame *aws_h2_frame_new_settings( size_t num_settings, bool ack) { - AWS_PRECONDITION(!ack || num_settings == 0, "Settings ACK must be empty"); AWS_PRECONDITION(settings_array || num_settings == 0); + /* Cannot send settings in an ACK frame */ + if (ack && num_settings > 0) { + aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); + return NULL; + } + /* Check against insane edge case of too many settings to fit in a frame. * Arbitrarily choosing half the default payload size */ size_t max_settings = s_settings_and_goaway_payload_limit / s_frame_setting_length; From 57fb1c89fa7a5d4dfd8239af34de7f16578d6475 Mon Sep 17 00:00:00 2001 From: Michael Graeb Date: Wed, 18 Mar 2020 20:03:22 -0700 Subject: [PATCH 14/35] tweaks --- source/h2_connection.c | 4 ++-- source/h2_frames.c | 32 ++++++++++---------------------- source/hpack.c | 2 -- 3 files changed, 12 insertions(+), 26 deletions(-) diff --git a/source/h2_connection.c b/source/h2_connection.c index 73ca094b1..9bd3c376f 100644 --- a/source/h2_connection.c +++ b/source/h2_connection.c @@ -393,7 +393,7 @@ static void s_outgoing_frames_task(struct aws_channel_task *task, void *arg, enu aws_h2_frame_type_to_str(frame->type), frame->stream_id, msg->message_data.capacity); - aws_raise_error(AWS_ERROR_SHORT_BUFFER); + aws_raise_error(AWS_ERROR_INVALID_STATE); goto error; } @@ -521,11 +521,11 @@ static int s_send_connection_preface_client_string(struct aws_h2_connection *con return AWS_OP_ERR; } +/* #TODO actually fill with settings */ /* #TODO track which SETTINGS frames have been ACK'd */ static int s_enqueue_settings_frame(struct aws_h2_connection *connection) { struct aws_allocator *alloc = connection->base.alloc; - /* #TODO actually fill with settings */ struct aws_h2_frame *settings_frame = aws_h2_frame_new_settings(alloc, NULL, 0, false /*ack*/); if (!settings_frame) { return AWS_OP_ERR; diff --git a/source/h2_frames.c b/source/h2_frames.c index 9d5c85278..9cce243ef 100644 --- a/source/h2_frames.c +++ b/source/h2_frames.c @@ -64,7 +64,7 @@ const uint32_t aws_h2_settings_bounds[AWS_H2_SETTINGS_END_RANGE][2] = { /* Put constraints on frames that could get very large given crazy inputs. * This isn't dictated by the spec, it's here to avoid edge cases where - * were never have a big enough output buffer to encode the frame. */ + * we'd never have a big enough output buffer to encode the frame. */ static const size_t s_settings_and_goaway_payload_limit = 8192; /* Stream ids & dependencies should only write the bottom 31 bits */ @@ -394,7 +394,6 @@ int aws_h2_encode_data_frame( * HEADERS / PUSH_PROMISE **********************************************************************************************************************/ DEFINE_FRAME_VTABLE(headers); -DEFINE_FRAME_VTABLE(push_promise); static struct aws_h2_frame *s_frame_new_headers_or_push_promise( struct aws_allocator *allocator, @@ -437,20 +436,17 @@ static struct aws_h2_frame *s_frame_new_headers_or_push_promise( goto error; } - const struct aws_h2_frame_vtable *vtable; if (frame_type == AWS_H2_FRAME_T_HEADERS) { - vtable = &s_frame_headers_vtable; frame->end_stream = end_stream; if (optional_priority) { frame->has_priority = true; frame->priority = *optional_priority; } } else { - vtable = &s_frame_push_promise_vtable; frame->promised_stream_id = promised_stream_id; } - s_init_frame_base(&frame->base, allocator, frame_type, vtable, stream_id); + s_init_frame_base(&frame->base, allocator, frame_type, &s_frame_headers_vtable, stream_id); aws_http_headers_acquire((struct aws_http_headers *)headers); frame->headers = headers; @@ -507,10 +503,6 @@ static void s_frame_headers_destroy(struct aws_h2_frame *frame_base) { aws_mem_release(frame->base.alloc, frame); } -static void s_frame_push_promise_destroy(struct aws_h2_frame *frame_base) { - s_frame_headers_destroy(frame_base); -} - /* Encode the next frame for this header-block (or encode nothing if output buffer is too small). */ int s_encode_single_header_block_frame( struct aws_h2_frame_headers *frame, @@ -716,15 +708,6 @@ static int s_frame_headers_encode( return AWS_OP_ERR; } -static int s_frame_push_promise_encode( - struct aws_h2_frame *frame_base, - struct aws_h2_frame_encoder *encoder, - struct aws_byte_buf *output, - bool *complete) { - - return s_frame_headers_encode(frame_base, encoder, output, complete); -} - /*********************************************************************************************************************** * PRIORITY **********************************************************************************************************************/ @@ -883,10 +866,15 @@ struct aws_h2_frame *aws_h2_frame_new_settings( return NULL; } - /* Check against insane edge case of too many settings to fit in a frame. - * Arbitrarily choosing half the default payload size */ + /* Check against insane edge case of too many settings to fit in a frame. */ size_t max_settings = s_settings_and_goaway_payload_limit / s_frame_setting_length; if (num_settings > max_settings) { + AWS_LOGF_ERROR( + AWS_LS_HTTP_ENCODER, + "Cannot create SETTINGS frame with %zu settings, this exceeds internal limit of %zu", + num_settings, + max_settings); + aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); return NULL; } @@ -1148,7 +1136,7 @@ static int s_frame_window_update_encode( /* If we can't encode the whole frame at once, try again later */ if (total_len > space_available) { - ENCODER_LOG(TRACE, encoder, "Insufficient space to encode PING right now"); + ENCODER_LOG(TRACE, encoder, "Insufficient space to encode WINDOW_UPDATE right now"); *complete = false; return AWS_OP_SUCCESS; } diff --git a/source/hpack.c b/source/hpack.c index fe52256d3..dba34d8f8 100644 --- a/source/hpack.c +++ b/source/hpack.c @@ -25,8 +25,6 @@ /* #TODO split hpack encoder/decoder into different types */ -/* #TODO logging pass */ - /* #TODO test empty strings */ /* RFC-7540 6.5.2 */ From 6b7ae83031f869065513416194fa5ccfcb25140d Mon Sep 17 00:00:00 2001 From: Michael Graeb Date: Thu, 19 Mar 2020 13:56:41 -0700 Subject: [PATCH 15/35] Simplify how most frames are encoded: - Use common struct - Pre-encode the entire frame - Incrementally copy that to aws_io_message whenever encode() is called. This is simpler/better because: 1) more shared code 2) unique payload-writing code all goes in the one new() function, instead of being spread across the new() and encode() functions 3) less chance of incorrect size calculations, since we're encoding to a buffer of the exact correct length --- include/aws/http/private/h2_frames.h | 91 +---- source/h2_frames.c | 544 +++++++++++++-------------- tests/fuzz/fuzz_h2_decoder_correct.c | 9 +- 3 files changed, 261 insertions(+), 383 deletions(-) diff --git a/include/aws/http/private/h2_frames.h b/include/aws/http/private/h2_frames.h index 585e984e7..eb73d1026 100644 --- a/include/aws/http/private/h2_frames.h +++ b/include/aws/http/private/h2_frames.h @@ -77,6 +77,7 @@ enum aws_h2_settings { #define AWS_H2_PAYLOAD_MAX (0x00FFFFFF) /* must fit in 3 bytes */ #define AWS_H2_WINDOW_UPDATE_MAX (0x7FFFFFFF) /* cannot use high bit */ #define AWS_H2_STREAM_ID_MAX (0x7FFFFFFF) /* cannot use high bit */ +#define AWS_H2_PING_DATA_SIZE (8) /* Legal min(inclusive) and max(inclusive) for each setting */ extern const uint32_t aws_h2_settings_bounds[AWS_H2_SETTINGS_END_RANGE][2]; @@ -116,100 +117,12 @@ struct aws_h2_frame { struct aws_linked_list_node node; }; -/* Represents a HEADERS or PUSH_PROMISE frame (followed by zero or more CONTINUATION frames) */ -struct aws_h2_frame_headers { - struct aws_h2_frame base; - - /* Common data */ - const struct aws_http_headers *headers; - uint8_t pad_length; /* Set to 0 to disable AWS_H2_FRAME_F_PADDED */ - - /* HEADERS-only data */ - bool end_stream; /* AWS_H2_FRAME_F_END_STREAM */ - bool has_priority; /* AWS_H2_FRAME_F_PRIORITY */ - struct aws_h2_frame_priority_settings priority; - - /* PUSH_PROMISE-only data */ - uint32_t promised_stream_id; - - /* State */ - enum { - AWS_H2_HEADERS_STATE_INIT, - AWS_H2_HEADERS_STATE_FIRST_FRAME, - AWS_H2_HEADERS_STATE_CONTINUATION, - AWS_H2_HEADERS_STATE_COMPLETE, - AWS_H2_HEADERS_STATE_ERROR, - } state; - - struct aws_byte_buf whole_encoded_header_block; - struct aws_byte_cursor header_block_cursor; /* tracks progress sending encoded header-block in fragments */ -}; - -/* Represents a PRIORITY frame */ -struct aws_h2_frame_priority { - struct aws_h2_frame base; - - /* Payload */ - struct aws_h2_frame_priority_settings priority; -}; - -/* Represents a RST_STREAM frame */ -struct aws_h2_frame_rst_stream { - struct aws_h2_frame base; - - /* Payload */ - uint32_t error_code; -}; - /* A h2 setting and its value, used in SETTINGS frame */ struct aws_h2_frame_setting { uint16_t id; /* aws_h2_settings */ uint32_t value; }; -/* Represents a SETTINGS frame */ -struct aws_h2_frame_settings { - struct aws_h2_frame base; - - /* Flags */ - bool ack; /* AWS_H2_FRAME_F_ACK */ - - /* Payload */ - struct aws_h2_frame_setting *settings_array; - size_t settings_count; -}; - -#define AWS_H2_PING_DATA_SIZE (8) - -/* Represents a PING frame */ -struct aws_h2_frame_ping { - struct aws_h2_frame base; - - /* Flags */ - bool ack; /* AWS_H2_FRAME_F_ACK */ - - /* Payload */ - uint8_t opaque_data[AWS_H2_PING_DATA_SIZE]; -}; - -/* Represents a GOAWAY frame */ -struct aws_h2_frame_goaway { - struct aws_h2_frame base; - - /* Payload */ - uint32_t last_stream_id; - uint32_t error_code; - struct aws_byte_cursor debug_data; -}; - -/* Represents a WINDOW_UPDATE frame */ -struct aws_h2_frame_window_update { - struct aws_h2_frame base; - - /* Payload */ - uint32_t window_size_increment; -}; - /* Used to encode a frame */ struct aws_h2_frame_encoder { struct aws_allocator *allocator; @@ -243,7 +156,7 @@ int aws_h2_validate_stream_id(uint32_t stream_id); /** * The process of encoding a frame looks like: * 1. Create a encoder object on the stack and initialize with aws_h2_frame_encoder_init - * 2. Encode the frame using aws_h2_frame_*_encode + * 2. Encode the frame using aws_h2_encode_frame() */ AWS_HTTP_API int aws_h2_frame_encoder_init(struct aws_h2_frame_encoder *encoder, struct aws_allocator *allocator, void *logging_id); diff --git a/source/h2_frames.c b/source/h2_frames.c index 9cce243ef..40bd40801 100644 --- a/source/h2_frames.c +++ b/source/h2_frames.c @@ -62,11 +62,6 @@ const uint32_t aws_h2_settings_bounds[AWS_H2_SETTINGS_END_RANGE][2] = { [AWS_H2_SETTINGS_MAX_HEADER_LIST_SIZE][1] = UINT32_MAX, }; -/* Put constraints on frames that could get very large given crazy inputs. - * This isn't dictated by the spec, it's here to avoid edge cases where - * we'd never have a big enough output buffer to encode the frame. */ -static const size_t s_settings_and_goaway_payload_limit = 8192; - /* Stream ids & dependencies should only write the bottom 31 bits */ static const uint32_t s_31_bit_mask = UINT32_MAX >> 1; static const uint32_t s_u32_top_bit_mask = UINT32_MAX << 31; @@ -164,6 +159,14 @@ static int s_frame_priority_settings_encode( return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } + /* PRIORITY is encoded as (RFC-7540 6.3): + * +-+-------------------------------------------------------------+ + * |E| Stream Dependency (31) | + * +-+-------------+-----------------------------------------------+ + * | Weight (8) | + * +-+-------------+ + */ + /* Write the top 4 bytes */ uint32_t top_bytes = priority->stream_dependency | ((uint32_t)priority->stream_dependency_exclusive << 31); if (!aws_byte_buf_write_be32(output, top_bytes)) { @@ -195,28 +198,26 @@ static void s_init_frame_base( } static int s_frame_prefix_encode( - struct aws_h2_frame_encoder *encoder, enum aws_h2_frame_type type, uint32_t stream_id, size_t length, uint8_t flags, struct aws_byte_buf *output) { - AWS_PRECONDITION(encoder); AWS_PRECONDITION(output); AWS_PRECONDITION(!(stream_id & s_u32_top_bit_mask), "Invalid stream ID"); - ENCODER_LOGF( - TRACE, - encoder, - "Encoding frame: type=%s stream_id=%" PRIu32 " payload_length=%zu flags=0x%02X", - aws_h2_frame_type_to_str(type), - stream_id, - length, - flags); + /* Frame prefix is encoded like this (RFC-7540 4.1): + * +-----------------------------------------------+ + * | Length (24) | + * +---------------+---------------+---------------+ + * | Type (8) | Flags (8) | + * +-+-------------+---------------+-------------------------------+ + * |R| Stream Identifier (31) | + * +=+=============================================================+ + */ /* Length must fit in 24 bits */ if (length > AWS_H2_PAYLOAD_MAX) { - ENCODER_LOGF(ERROR, encoder, "Payload size %zu exceeds max for HTTP/2", length); return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } @@ -352,7 +353,7 @@ int aws_h2_encode_data_frame( /* Write the frame prefix */ const size_t payload_len = body_sub_buf.len + payload_overhead; - if (s_frame_prefix_encode(encoder, AWS_H2_FRAME_T_DATA, stream_id, payload_len, flags, output)) { + if (s_frame_prefix_encode(AWS_H2_FRAME_T_DATA, stream_id, payload_len, flags, output)) { goto error; } @@ -395,6 +396,35 @@ int aws_h2_encode_data_frame( **********************************************************************************************************************/ DEFINE_FRAME_VTABLE(headers); +/* Represents a HEADERS or PUSH_PROMISE frame (followed by zero or more CONTINUATION frames) */ +struct aws_h2_frame_headers { + struct aws_h2_frame base; + + /* Common data */ + const struct aws_http_headers *headers; + uint8_t pad_length; /* Set to 0 to disable AWS_H2_FRAME_F_PADDED */ + + /* HEADERS-only data */ + bool end_stream; /* AWS_H2_FRAME_F_END_STREAM */ + bool has_priority; /* AWS_H2_FRAME_F_PRIORITY */ + struct aws_h2_frame_priority_settings priority; + + /* PUSH_PROMISE-only data */ + uint32_t promised_stream_id; + + /* State */ + enum { + AWS_H2_HEADERS_STATE_INIT, + AWS_H2_HEADERS_STATE_FIRST_FRAME, /* header-block pre-encoded, no frames written yet */ + AWS_H2_HEADERS_STATE_CONTINUATION, /* first frame written, need to write CONTINUATION frames now */ + AWS_H2_HEADERS_STATE_COMPLETE, + AWS_H2_HEADERS_STATE_ERROR, + } state; + + struct aws_byte_buf whole_encoded_header_block; + struct aws_byte_cursor header_block_cursor; /* tracks progress sending encoded header-block in fragments */ +}; + static struct aws_h2_frame *s_frame_new_headers_or_push_promise( struct aws_allocator *allocator, enum aws_h2_frame_type frame_type, @@ -583,7 +613,7 @@ int s_encode_single_header_block_frame( /* Write the frame prefix */ const size_t payload_len = fragment_len + payload_overhead; - if (s_frame_prefix_encode(encoder, frame_type, frame->base.stream_id, payload_len, flags, output)) { + if (s_frame_prefix_encode(frame_type, frame->base.stream_id, payload_len, flags, output)) { goto error; } @@ -709,147 +739,177 @@ static int s_frame_headers_encode( } /*********************************************************************************************************************** - * PRIORITY + * aws_h2_frame_prebuilt - Used by small simple frame types that we can pre-encode at the time of creation. + * The pre-encoded buffer is then just copied bit-by-bit during the actual "encode()" function. + * + * It's safe to pre-encode a frame if it doesn't query/mutate any external state. So PING is totally great + * to pre-encode, but HEADERS (which queries MAX_FRAME_SIZE and mutates the HPACK table) would be a bad candidate. **********************************************************************************************************************/ -DEFINE_FRAME_VTABLE(priority); -static const size_t s_frame_priority_length = 5; +struct aws_h2_frame_prebuilt { + struct aws_h2_frame base; + struct aws_byte_buf encoded_buf; /* pre-encoded H2 frame */ + struct aws_byte_cursor send_progress; /* tracks progress sending encoded buffer */ +}; -struct aws_h2_frame *aws_h2_frame_new_priority( +DEFINE_FRAME_VTABLE(prebuilt); + +/* Can't pre-encode a frame unless it's guaranteed to fit, regardless of current settings. */ +static size_t s_prebuilt_payload_max(void) { + return aws_h2_settings_bounds[AWS_H2_SETTINGS_MAX_FRAME_SIZE][0]; +} + +/* Create aws_h2_frame_prebuilt and encode frame prefix into frame->encoded_buf. + * Caller must encode the payload to fill the rest of the encoded_buf. */ +static struct aws_h2_frame_prebuilt *s_h2_frame_new_prebuilt( struct aws_allocator *allocator, + enum aws_h2_frame_type type, uint32_t stream_id, - const struct aws_h2_frame_priority_settings *priority) { + size_t payload_len, + uint8_t flags) { - AWS_PRECONDITION(allocator); - AWS_PRECONDITION(priority); - - if (aws_h2_validate_stream_id(stream_id) || aws_h2_validate_stream_id(priority->stream_dependency)) { + if (payload_len > s_prebuilt_payload_max()) { + aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); return NULL; } - struct aws_h2_frame_priority *frame = aws_mem_calloc(allocator, 1, sizeof(struct aws_h2_frame_priority)); - if (!frame) { + const size_t encoded_frame_len = s_frame_prefix_length + payload_len; + + /* Use single allocation for frame and buffer storage */ + struct aws_h2_frame_prebuilt *frame; + void *storage; + if (!aws_mem_acquire_many( + allocator, 2, &frame, sizeof(struct aws_h2_frame_prebuilt), &storage, encoded_frame_len)) { return NULL; } - s_init_frame_base(&frame->base, allocator, AWS_H2_FRAME_T_PRIORITY, &s_frame_priority_vtable, stream_id); - frame->priority = *priority; + AWS_ZERO_STRUCT(*frame); + s_init_frame_base(&frame->base, allocator, type, &s_frame_prebuilt_vtable, stream_id); + frame->encoded_buf = aws_byte_buf_from_empty_array(storage, encoded_frame_len); + frame->send_progress = aws_byte_cursor_from_array(storage, encoded_frame_len); - return &frame->base; + if (s_frame_prefix_encode(type, stream_id, payload_len, flags, &frame->encoded_buf)) { + goto error; + } + + return frame; + +error: + s_frame_prebuilt_destroy(&frame->base); + return NULL; } -static void s_frame_priority_destroy(struct aws_h2_frame *frame_base) { +static void s_frame_prebuilt_destroy(struct aws_h2_frame *frame_base) { aws_mem_release(frame_base->alloc, frame_base); } -static int s_frame_priority_encode( +static int s_frame_prebuilt_encode( struct aws_h2_frame *frame_base, struct aws_h2_frame_encoder *encoder, struct aws_byte_buf *output, bool *complete) { - (void)encoder; - struct aws_h2_frame_priority *frame = AWS_CONTAINER_OF(frame_base, struct aws_h2_frame_priority, base); + struct aws_h2_frame_prebuilt *frame = AWS_CONTAINER_OF(frame_base, struct aws_h2_frame_prebuilt, base); - const size_t total_len = s_frame_prefix_length + s_frame_priority_length; - const size_t space_available = output->capacity - output->len; - - /* If we can't encode the whole frame at once, try again later */ - if (total_len > space_available) { + if (frame->send_progress.len == frame->encoded_buf.len) { ENCODER_LOGF( TRACE, encoder, - "Insufficient space to encode PRIORITY for stream %" PRIu32 " right now", + "Encoding frame type=%s stream_id=%" PRIu32 " - begin", + aws_h2_frame_type_to_str(frame->base.type), frame->base.stream_id); - - *complete = false; - return AWS_OP_SUCCESS; } - /* Write the frame prefix */ - if (s_frame_prefix_encode(encoder, frame->base.type, frame->base.stream_id, s_frame_priority_length, 0, output)) { - return AWS_OP_ERR; + /* Write as much of the pre-encoded frame as will fit */ + size_t chunk_len = aws_min_size(frame->send_progress.len, output->capacity - output->len); + if (chunk_len) { + struct aws_byte_cursor chunk = aws_byte_cursor_advance(&frame->send_progress, chunk_len); + aws_byte_buf_write_from_whole_cursor(output, chunk); } - /* Write the priority settings */ - if (s_frame_priority_settings_encode(&frame->priority, output)) { - return AWS_OP_ERR; - } - - *complete = true; + *complete = frame->send_progress.len == 0; return AWS_OP_SUCCESS; } /*********************************************************************************************************************** - * RST_STREAM + * PRIORITY **********************************************************************************************************************/ -DEFINE_FRAME_VTABLE(rst_stream); -static const size_t s_frame_rst_stream_length = 4; - -struct aws_h2_frame *aws_h2_frame_new_rst_stream( +struct aws_h2_frame *aws_h2_frame_new_priority( struct aws_allocator *allocator, uint32_t stream_id, - uint32_t error_code) { + const struct aws_h2_frame_priority_settings *priority) { - if (aws_h2_validate_stream_id(stream_id)) { + AWS_PRECONDITION(allocator); + AWS_PRECONDITION(priority); + + if (aws_h2_validate_stream_id(stream_id) || aws_h2_validate_stream_id(priority->stream_dependency)) { return NULL; } - struct aws_h2_frame_rst_stream *frame = aws_mem_calloc(allocator, 1, sizeof(struct aws_h2_frame_rst_stream)); + /* PRIORITY can be pre-encoded */ + const uint8_t flags = 0; + const size_t payload_len = s_frame_priority_settings_size; + + struct aws_h2_frame_prebuilt *frame = + s_h2_frame_new_prebuilt(allocator, AWS_H2_FRAME_T_PRIORITY, stream_id, payload_len, flags); if (!frame) { return NULL; } - s_init_frame_base(&frame->base, allocator, AWS_H2_FRAME_T_RST_STREAM, &s_frame_rst_stream_vtable, stream_id); - frame->error_code = error_code; + /* Write the priority settings */ + if (s_frame_priority_settings_encode(priority, &frame->encoded_buf)) { + goto error; + } return &frame->base; +error: + aws_h2_frame_destroy(&frame->base); + return NULL; } -static void s_frame_rst_stream_destroy(struct aws_h2_frame *frame_base) { - aws_mem_release(frame_base->alloc, frame_base); -} +/*********************************************************************************************************************** + * RST_STREAM + **********************************************************************************************************************/ +static const size_t s_frame_rst_stream_length = 4; -static int s_frame_rst_stream_encode( - struct aws_h2_frame *frame_base, - struct aws_h2_frame_encoder *encoder, - struct aws_byte_buf *output, - bool *complete) { +struct aws_h2_frame *aws_h2_frame_new_rst_stream( + struct aws_allocator *allocator, + uint32_t stream_id, + uint32_t error_code) { - (void)encoder; - struct aws_h2_frame_rst_stream *frame = AWS_CONTAINER_OF(frame_base, struct aws_h2_frame_rst_stream, base); + if (aws_h2_validate_stream_id(stream_id)) { + return NULL; + } - const size_t total_len = s_frame_prefix_length + s_frame_rst_stream_length; - const size_t space_available = output->capacity - output->len; + /* RST_STREAM can be pre-encoded */ + const uint8_t flags = 0; + const size_t payload_len = s_frame_rst_stream_length; - /* If we can't encode the whole frame at once, try again later */ - if (total_len > space_available) { - ENCODER_LOGF( - TRACE, - encoder, - "Insufficient space to encode RST_STREAM for stream %" PRIu32 " right now", - frame->base.stream_id); - *complete = false; - return AWS_OP_SUCCESS; + struct aws_h2_frame_prebuilt *frame = + s_h2_frame_new_prebuilt(allocator, AWS_H2_FRAME_T_RST_STREAM, stream_id, payload_len, flags); + if (!frame) { + return NULL; } - /* Write the frame prefix */ - if (s_frame_prefix_encode(encoder, frame->base.type, frame->base.stream_id, s_frame_rst_stream_length, 0, output)) { - return AWS_OP_ERR; + /* Write RST_STREAM payload (RFC-7540 6.4): + * +---------------------------------------------------------------+ + * | Error Code (32) | + * +---------------------------------------------------------------+ + */ + if (!aws_byte_buf_write_be32(&frame->encoded_buf, error_code)) { + aws_raise_error(AWS_ERROR_SHORT_BUFFER); + goto error; } - /* Write the error_code */ - if (!aws_byte_buf_write_be32(output, frame->error_code)) { - return aws_raise_error(AWS_ERROR_SHORT_BUFFER); - } + return &frame->base; - *complete = true; - return AWS_OP_SUCCESS; +error: + aws_h2_frame_destroy(&frame->base); + return NULL; } /*********************************************************************************************************************** * SETTINGS **********************************************************************************************************************/ -DEFINE_FRAME_VTABLE(settings); static const size_t s_frame_setting_length = 6; struct aws_h2_frame *aws_h2_frame_new_settings( @@ -867,11 +927,11 @@ struct aws_h2_frame *aws_h2_frame_new_settings( } /* Check against insane edge case of too many settings to fit in a frame. */ - size_t max_settings = s_settings_and_goaway_payload_limit / s_frame_setting_length; + const size_t max_settings = s_prebuilt_payload_max() / s_frame_setting_length; if (num_settings > max_settings) { AWS_LOGF_ERROR( AWS_LS_HTTP_ENCODER, - "Cannot create SETTINGS frame with %zu settings, this exceeds internal limit of %zu", + "Cannot create SETTINGS frame with %zu settings, the limit is %zu.", num_settings, max_settings); @@ -879,139 +939,82 @@ struct aws_h2_frame *aws_h2_frame_new_settings( return NULL; } - struct aws_h2_frame_settings *frame; - struct aws_h2_frame_setting *array_alloc; - const size_t sizeof_settings_array = sizeof(struct aws_h2_frame_setting) * num_settings; - if (!aws_mem_acquire_many( - allocator, 2, &frame, sizeof(struct aws_h2_frame_settings), &array_alloc, sizeof_settings_array)) { - return NULL; - } - - AWS_ZERO_STRUCT(*frame); - s_init_frame_base(&frame->base, allocator, AWS_H2_FRAME_T_SETTINGS, &s_frame_settings_vtable, 0); - frame->ack = ack; - frame->settings_count = num_settings; - if (num_settings) { - frame->settings_array = memcpy(array_alloc, settings_array, sizeof_settings_array); - } - - return &frame->base; -} - -static void s_frame_settings_destroy(struct aws_h2_frame *frame_base) { - aws_mem_release(frame_base->alloc, frame_base); -} - -static int s_frame_settings_encode( - struct aws_h2_frame *frame_base, - struct aws_h2_frame_encoder *encoder, - struct aws_byte_buf *output, - bool *complete) { - - (void)encoder; - struct aws_h2_frame_settings *frame = AWS_CONTAINER_OF(frame_base, struct aws_h2_frame_settings, base); - - const size_t payload_len = frame->settings_count * s_frame_setting_length; - - /* If we can't encode the whole frame at once, try again later */ - size_t max_payload; - if (s_get_max_contiguous_payload_length(encoder, output, &max_payload) || max_payload < payload_len) { - ENCODER_LOG(TRACE, encoder, "Insufficient space to encode SETTINGS right now"); - *complete = false; - return AWS_OP_SUCCESS; - } - - /* Write the frame prefix */ - uint8_t flags = 0; - if (frame->ack) { - flags |= AWS_H2_FRAME_F_ACK; - } + /* SETTINGS can be pre-encoded */ + const uint8_t flags = ack ? AWS_H2_FRAME_F_ACK : 0; + const size_t payload_len = num_settings * s_frame_setting_length; + const uint32_t stream_id = 0; - if (s_frame_prefix_encode(encoder, frame->base.type, frame->base.stream_id, payload_len, flags, output)) { - return AWS_OP_ERR; + struct aws_h2_frame_prebuilt *frame = + s_h2_frame_new_prebuilt(allocator, AWS_H2_FRAME_T_SETTINGS, stream_id, payload_len, flags); + if (!frame) { + return NULL; } - /* Write the payload */ - for (size_t i = 0; i < frame->settings_count; ++i) { - if (!aws_byte_buf_write_be16(output, frame->settings_array[i].id) || - !aws_byte_buf_write_be32(output, frame->settings_array[i].value)) { + /* Write the settings, each one is encoded like (RFC-7540 6.5.1): + * +-------------------------------+ + * | Identifier (16) | + * +-------------------------------+-------------------------------+ + * | Value (32) | + * +---------------------------------------------------------------+ + */ + for (size_t i = 0; i < num_settings; ++i) { + if (!aws_byte_buf_write_be16(&frame->encoded_buf, settings_array[i].id) || + !aws_byte_buf_write_be32(&frame->encoded_buf, settings_array[i].value)) { - return aws_raise_error(AWS_ERROR_SHORT_BUFFER); + aws_raise_error(AWS_ERROR_SHORT_BUFFER); + goto error; } } - *complete = true; - return AWS_OP_SUCCESS; + return &frame->base; + +error: + aws_h2_frame_destroy(&frame->base); + return NULL; } /*********************************************************************************************************************** * PING **********************************************************************************************************************/ -DEFINE_FRAME_VTABLE(ping); - struct aws_h2_frame *aws_h2_frame_new_ping( struct aws_allocator *allocator, bool ack, const uint8_t opaque_data[AWS_H2_PING_DATA_SIZE]) { - struct aws_h2_frame_ping *frame = aws_mem_calloc(allocator, 1, sizeof(struct aws_h2_frame_ping)); + /* PING can be pre-encoded */ + const uint8_t flags = ack ? AWS_H2_FRAME_F_ACK : 0; + const size_t payload_len = AWS_H2_PING_DATA_SIZE; + const uint32_t stream_id = 0; + + struct aws_h2_frame_prebuilt *frame = + s_h2_frame_new_prebuilt(allocator, AWS_H2_FRAME_T_PING, stream_id, payload_len, flags); if (!frame) { return NULL; } - s_init_frame_base(&frame->base, allocator, AWS_H2_FRAME_T_PING, &s_frame_ping_vtable, 0); - frame->ack = ack; - memcpy(frame->opaque_data, opaque_data, AWS_H2_PING_DATA_SIZE); - - return &frame->base; -} - -static void s_frame_ping_destroy(struct aws_h2_frame *frame_base) { - aws_mem_release(frame_base->alloc, frame_base); -} - -static int s_frame_ping_encode( - struct aws_h2_frame *frame_base, - struct aws_h2_frame_encoder *encoder, - struct aws_byte_buf *output, - bool *complete) { - - (void)encoder; - struct aws_h2_frame_ping *frame = AWS_CONTAINER_OF(frame_base, struct aws_h2_frame_ping, base); - - const size_t total_len = s_frame_prefix_length + AWS_H2_PING_DATA_SIZE; - const size_t space_available = output->capacity - output->len; - - /* If we can't encode the whole frame at once, try again later */ - if (total_len > space_available) { - ENCODER_LOG(TRACE, encoder, "Insufficient space to encode PING right now"); - *complete = false; - return AWS_OP_SUCCESS; - } - - /* Write the frame prefix */ - uint8_t flags = 0; - if (frame->ack) { - flags |= AWS_H2_FRAME_F_ACK; - } - - if (s_frame_prefix_encode(encoder, frame->base.type, frame->base.stream_id, AWS_H2_PING_DATA_SIZE, flags, output)) { - return AWS_OP_ERR; + /* Write the PING payload (RFC-7540 6.7): + * +---------------------------------------------------------------+ + * | | + * | Opaque Data (64) | + * | | + * +---------------------------------------------------------------+ + */ + if (!aws_byte_buf_write(&frame->encoded_buf, opaque_data, AWS_H2_PING_DATA_SIZE)) { + aws_raise_error(AWS_ERROR_SHORT_BUFFER); + goto error; } - /* Write the opaque_data */ - if (!aws_byte_buf_write(output, frame->opaque_data, AWS_H2_PING_DATA_SIZE)) { - return aws_raise_error(AWS_ERROR_SHORT_BUFFER); - } + return &frame->base; - *complete = true; - return AWS_OP_SUCCESS; +error: + aws_h2_frame_destroy(&frame->base); + return NULL; } /*********************************************************************************************************************** * GOAWAY **********************************************************************************************************************/ -DEFINE_FRAME_VTABLE(goaway); +static const size_t s_frame_goaway_length_min = 8; struct aws_h2_frame *aws_h2_frame_new_goaway( struct aws_allocator *allocator, @@ -1021,74 +1024,54 @@ struct aws_h2_frame *aws_h2_frame_new_goaway( /* If debug_data is too long, don't sent it. * It's more important that the GOAWAY frame gets sent. */ - if (debug_data.len > s_settings_and_goaway_payload_limit) { + const size_t debug_data_max = s_prebuilt_payload_max() - s_frame_goaway_length_min; + if (debug_data.len > debug_data_max) { AWS_LOGF_WARN( AWS_LS_HTTP_ENCODER, "Sending GOAWAY without debug-data. Debug-data size %zu exceeds internal limit of %zu", debug_data.len, - s_settings_and_goaway_payload_limit); + debug_data_max); debug_data.len = 0; } - struct aws_h2_frame_goaway *frame = aws_mem_calloc(allocator, 1, sizeof(struct aws_h2_frame_goaway)); + /* GOAWAY can be pre-encoded */ + const uint8_t flags = 0; + const uint8_t payload_len = debug_data.len + s_frame_goaway_length_min; + const uint32_t stream_id = 0; + + struct aws_h2_frame_prebuilt *frame = + s_h2_frame_new_prebuilt(allocator, AWS_H2_FRAME_T_GOAWAY, stream_id, payload_len, flags); if (!frame) { return NULL; } - s_init_frame_base(&frame->base, allocator, AWS_H2_FRAME_T_GOAWAY, &s_frame_goaway_vtable, 0); - frame->last_stream_id = last_stream_id; - frame->error_code = error_code; - frame->debug_data = debug_data; - - return &frame->base; -} - -static void s_frame_goaway_destroy(struct aws_h2_frame *frame_base) { - aws_mem_release(frame_base->alloc, frame_base); -} - -static int s_frame_goaway_encode( - struct aws_h2_frame *frame_base, - struct aws_h2_frame_encoder *encoder, - struct aws_byte_buf *output, - bool *complete) { - - (void)encoder; - struct aws_h2_frame_goaway *frame = AWS_CONTAINER_OF(frame_base, struct aws_h2_frame_goaway, base); - - const size_t payload_len = 8 + frame->debug_data.len; - const size_t total_len = s_frame_prefix_length + payload_len; - const size_t space_available = output->capacity - output->len; - - /* If we can't encode the whole frame at once, try again later */ - if (total_len > space_available) { - ENCODER_LOG(TRACE, encoder, "Insufficient space to encode GOAWAY right now"); - *complete = false; - return AWS_OP_SUCCESS; - } - - /* Write the frame prefix */ - if (s_frame_prefix_encode(encoder, frame->base.type, frame->base.stream_id, payload_len, 0, output)) { - return AWS_OP_ERR; - } - - /* Write the payload */ - if (!aws_byte_buf_write_be32(output, frame->last_stream_id & s_31_bit_mask) || - !aws_byte_buf_write_be32(output, frame->error_code) || - !aws_byte_buf_write_from_whole_cursor(output, frame->debug_data)) { + /* Write the GOAWAY payload (RFC-7540 6.8): + * +-+-------------------------------------------------------------+ + * |R| Last-Stream-ID (31) | + * +-+-------------------------------------------------------------+ + * | Error Code (32) | + * +---------------------------------------------------------------+ + * | Additional Debug Data (*) | + * +---------------------------------------------------------------+ + */ + if (!aws_byte_buf_write_be32(&frame->encoded_buf, last_stream_id & s_31_bit_mask) || + !aws_byte_buf_write_be32(&frame->encoded_buf, error_code) || + !aws_byte_buf_write_from_whole_cursor(&frame->encoded_buf, debug_data)) { - return aws_raise_error(AWS_ERROR_SHORT_BUFFER); + aws_raise_error(AWS_ERROR_SHORT_BUFFER); + goto error; } - *complete = true; - return AWS_OP_SUCCESS; + return &frame->base; +error: + aws_h2_frame_destroy(&frame->base); + return NULL; } /*********************************************************************************************************************** * WINDOW_UPDATE **********************************************************************************************************************/ -DEFINE_FRAME_VTABLE(window_update); static const size_t s_frame_window_update_length = 4; struct aws_h2_frame *aws_h2_frame_new_window_update( @@ -1103,57 +1086,40 @@ struct aws_h2_frame *aws_h2_frame_new_window_update( } if (window_size_increment > AWS_H2_WINDOW_UPDATE_MAX) { + AWS_LOGF_ERROR( + AWS_LS_HTTP_ENCODER, + "Window increment size %" PRIu32 " exceeds HTTP/2 max %" PRIu32, + window_size_increment, + AWS_H2_WINDOW_UPDATE_MAX); aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); return NULL; } - struct aws_h2_frame_window_update *frame = aws_mem_calloc(allocator, 1, sizeof(struct aws_h2_frame_window_update)); + /* WINDOW_UPDATE can be pre-encoded */ + const uint8_t flags = 0; + const size_t payload_len = s_frame_window_update_length; + + struct aws_h2_frame_prebuilt *frame = + s_h2_frame_new_prebuilt(allocator, AWS_H2_FRAME_T_WINDOW_UPDATE, stream_id, payload_len, flags); if (!frame) { return NULL; } - s_init_frame_base(&frame->base, allocator, AWS_H2_FRAME_T_WINDOW_UPDATE, &s_frame_window_update_vtable, stream_id); - frame->window_size_increment = window_size_increment; - - return &frame->base; -} - -static void s_frame_window_update_destroy(struct aws_h2_frame *frame_base) { - aws_mem_release(frame_base->alloc, frame_base); -} - -static int s_frame_window_update_encode( - struct aws_h2_frame *frame_base, - struct aws_h2_frame_encoder *encoder, - struct aws_byte_buf *output, - bool *complete) { - - (void)encoder; - struct aws_h2_frame_window_update *frame = AWS_CONTAINER_OF(frame_base, struct aws_h2_frame_window_update, base); - - const size_t total_len = s_frame_prefix_length + s_frame_window_update_length; - const size_t space_available = output->capacity - output->len; - - /* If we can't encode the whole frame at once, try again later */ - if (total_len > space_available) { - ENCODER_LOG(TRACE, encoder, "Insufficient space to encode WINDOW_UPDATE right now"); - *complete = false; - return AWS_OP_SUCCESS; - } - - /* Write the frame prefix */ - if (s_frame_prefix_encode( - encoder, frame->base.type, frame->base.stream_id, s_frame_window_update_length, 0, output)) { - return AWS_OP_ERR; + /* Write the WINDOW_UPDATE payload (RFC-7540 6.9): + * +-+-------------------------------------------------------------+ + * |R| Window Size Increment (31) | + * +-+-------------------------------------------------------------+ + */ + if (!aws_byte_buf_write_be32(&frame->encoded_buf, window_size_increment)) { + aws_raise_error(AWS_ERROR_SHORT_BUFFER); + goto error; } - /* Write the error_code */ - if (!aws_byte_buf_write_be32(output, frame->window_size_increment)) { - return aws_raise_error(AWS_ERROR_SHORT_BUFFER); - } + return &frame->base; - *complete = true; - return AWS_OP_SUCCESS; +error: + aws_h2_frame_destroy(&frame->base); + return NULL; } void aws_h2_frame_destroy(struct aws_h2_frame *frame) { diff --git a/tests/fuzz/fuzz_h2_decoder_correct.c b/tests/fuzz/fuzz_h2_decoder_correct.c index c672a2b9b..9662762a0 100644 --- a/tests/fuzz/fuzz_h2_decoder_correct.c +++ b/tests/fuzz/fuzz_h2_decoder_correct.c @@ -266,10 +266,9 @@ int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) { struct aws_h2_frame_setting *settings_array = NULL; if (!ack) { - /* There is an internal limit to the number of settings, but it's pretty high */ - settings_count = aws_min_size(input.len / 6, 1024); + settings_count = aws_min_size(input.len / 6, MAX_PAYLOAD_SIZE); if (settings_count > 0) { - settings_array = aws_mem_calloc(allocator, settings_count, sizeof(struct aws_h2_frame_settings)); + settings_array = aws_mem_calloc(allocator, settings_count, sizeof(struct aws_h2_frame_setting)); for (size_t i = 0; i < settings_count; ++i) { aws_byte_cursor_read_be16(&input, &settings_array[i].id); aws_byte_cursor_read_be32(&input, &settings_array[i].value); @@ -338,8 +337,8 @@ int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) { uint32_t error_code = 0; aws_byte_cursor_read_be32(&input, &error_code); - uint32_t debug_data_size = aws_min_u32(input.len, MAX_PAYLOAD_SIZE - FRAME_PREFIX_SIZE); - struct aws_byte_cursor debug_data = aws_byte_cursor_advance(&input, debug_data_size); + /* Pass debug_data that might be too large (it will get truncated if necessary) */ + struct aws_byte_cursor debug_data = aws_byte_cursor_advance(&input, input.len); struct aws_h2_frame *frame = aws_h2_frame_new_goaway(allocator, last_stream_id, error_code, debug_data); AWS_FATAL_ASSERT(frame); From 1413e1a05be8f6c7825590c278dda5205255897c Mon Sep 17 00:00:00 2001 From: Michael Graeb Date: Thu, 19 Mar 2020 14:01:21 -0700 Subject: [PATCH 16/35] THANK YOU MSVC COMPILER WARNING YOU SAVED MY ASS --- source/h2_frames.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/h2_frames.c b/source/h2_frames.c index 40bd40801..ba033ec50 100644 --- a/source/h2_frames.c +++ b/source/h2_frames.c @@ -1037,7 +1037,7 @@ struct aws_h2_frame *aws_h2_frame_new_goaway( /* GOAWAY can be pre-encoded */ const uint8_t flags = 0; - const uint8_t payload_len = debug_data.len + s_frame_goaway_length_min; + const size_t payload_len = debug_data.len + s_frame_goaway_length_min; const uint32_t stream_id = 0; struct aws_h2_frame_prebuilt *frame = From 3a048e77805ab7801e98ee22bf2a1558555a65bf Mon Sep 17 00:00:00 2001 From: Michael Graeb Date: Thu, 19 Mar 2020 16:53:49 -0700 Subject: [PATCH 17/35] replaced lots of AWS_ERROR_SHORT_BUFFER error-handling with asserts. If this kind of error happens now, it's programmer error --- source/h2_frames.c | 207 +++++++++++++-------------------------------- 1 file changed, 61 insertions(+), 146 deletions(-) diff --git a/source/h2_frames.c b/source/h2_frames.c index ba033ec50..224b06cd6 100644 --- a/source/h2_frames.c +++ b/source/h2_frames.c @@ -149,15 +149,12 @@ static int s_get_max_contiguous_payload_length( **********************************************************************************************************************/ static size_t s_frame_priority_settings_size = 5; -static int s_frame_priority_settings_encode( +static void s_frame_priority_settings_encode( const struct aws_h2_frame_priority_settings *priority, struct aws_byte_buf *output) { AWS_PRECONDITION(priority); AWS_PRECONDITION(output); - - if (priority->stream_dependency & s_u32_top_bit_mask) { - return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); - } + AWS_PRECONDITION((priority->stream_dependency & s_u32_top_bit_mask) == 0); /* PRIORITY is encoded as (RFC-7540 6.3): * +-+-------------------------------------------------------------+ @@ -166,19 +163,16 @@ static int s_frame_priority_settings_encode( * | Weight (8) | * +-+-------------+ */ + bool all_wrote = true; /* Write the top 4 bytes */ uint32_t top_bytes = priority->stream_dependency | ((uint32_t)priority->stream_dependency_exclusive << 31); - if (!aws_byte_buf_write_be32(output, top_bytes)) { - return aws_raise_error(AWS_ERROR_SHORT_BUFFER); - } + all_wrote &= aws_byte_buf_write_be32(output, top_bytes); /* Write the priority weight */ - if (!aws_byte_buf_write_u8(output, priority->weight)) { - return aws_raise_error(AWS_ERROR_SHORT_BUFFER); - } + all_wrote &= aws_byte_buf_write_u8(output, priority->weight); - return AWS_OP_SUCCESS; + AWS_ASSERT(all_wrote); } /*********************************************************************************************************************** @@ -197,7 +191,7 @@ static void s_init_frame_base( frame_base->stream_id = stream_id; } -static int s_frame_prefix_encode( +static void s_frame_prefix_encode( enum aws_h2_frame_type type, uint32_t stream_id, size_t length, @@ -205,6 +199,7 @@ static int s_frame_prefix_encode( struct aws_byte_buf *output) { AWS_PRECONDITION(output); AWS_PRECONDITION(!(stream_id & s_u32_top_bit_mask), "Invalid stream ID"); + AWS_PRECONDITION(length <= AWS_H2_PAYLOAD_MAX); /* Frame prefix is encoded like this (RFC-7540 4.1): * +-----------------------------------------------+ @@ -215,30 +210,21 @@ static int s_frame_prefix_encode( * |R| Stream Identifier (31) | * +=+=============================================================+ */ - - /* Length must fit in 24 bits */ - if (length > AWS_H2_PAYLOAD_MAX) { - return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); - } + bool all_wrote = true; /* Write length */ - if (!aws_byte_buf_write_be24(output, (uint32_t)length)) { - return aws_raise_error(AWS_ERROR_SHORT_BUFFER); - } + all_wrote &= aws_byte_buf_write_be24(output, (uint32_t)length); + /* Write type */ - if (!aws_byte_buf_write_u8(output, type)) { - return aws_raise_error(AWS_ERROR_SHORT_BUFFER); - } + all_wrote &= aws_byte_buf_write_u8(output, type); + /* Write flags */ - if (!aws_byte_buf_write_u8(output, flags)) { - return aws_raise_error(AWS_ERROR_SHORT_BUFFER); - } + all_wrote &= aws_byte_buf_write_u8(output, flags); + /* Write stream id (with reserved first bit) */ - if (!aws_byte_buf_write_be32(output, stream_id & s_31_bit_mask)) { - return aws_raise_error(AWS_ERROR_SHORT_BUFFER); - } + all_wrote &= aws_byte_buf_write_be32(output, stream_id & s_31_bit_mask); - return AWS_OP_SUCCESS; + AWS_ASSERT(all_wrote); } /*********************************************************************************************************************** @@ -350,19 +336,15 @@ int aws_h2_encode_data_frame( /* * Write in the other parts of the frame. */ + bool all_wrote = true; /* Write the frame prefix */ const size_t payload_len = body_sub_buf.len + payload_overhead; - if (s_frame_prefix_encode(AWS_H2_FRAME_T_DATA, stream_id, payload_len, flags, output)) { - goto error; - } + s_frame_prefix_encode(AWS_H2_FRAME_T_DATA, stream_id, payload_len, flags, output); /* Write pad length */ if (flags & AWS_H2_FRAME_F_PADDED) { - if (!aws_byte_buf_write_u8(output, pad_length)) { - aws_raise_error(AWS_ERROR_SHORT_BUFFER); - goto error; - } + all_wrote &= aws_byte_buf_write_u8(output, pad_length); } /* Increment output->len to jump over the body that we already wrote in */ @@ -371,12 +353,10 @@ int aws_h2_encode_data_frame( /* Write padding */ if (flags & AWS_H2_FRAME_F_PADDED) { - if (!aws_byte_buf_write_u8_n(output, 0, pad_length)) { - aws_raise_error(AWS_ERROR_SHORT_BUFFER); - goto error; - } + all_wrote &= aws_byte_buf_write_u8_n(output, 0, pad_length); } + AWS_ASSERT(all_wrote); return AWS_OP_SUCCESS; handle_waiting_for_more_space: @@ -418,7 +398,6 @@ struct aws_h2_frame_headers { AWS_H2_HEADERS_STATE_FIRST_FRAME, /* header-block pre-encoded, no frames written yet */ AWS_H2_HEADERS_STATE_CONTINUATION, /* first frame written, need to write CONTINUATION frames now */ AWS_H2_HEADERS_STATE_COMPLETE, - AWS_H2_HEADERS_STATE_ERROR, } state; struct aws_byte_buf whole_encoded_header_block; @@ -534,7 +513,7 @@ static void s_frame_headers_destroy(struct aws_h2_frame *frame_base) { } /* Encode the next frame for this header-block (or encode nothing if output buffer is too small). */ -int s_encode_single_header_block_frame( +void s_encode_single_header_block_frame( struct aws_h2_frame_headers *frame, struct aws_h2_frame_encoder *encoder, struct aws_byte_buf *output, @@ -610,61 +589,48 @@ int s_encode_single_header_block_frame( /* * Ok, it fits! Write the frame */ + bool all_wrote = true; /* Write the frame prefix */ const size_t payload_len = fragment_len + payload_overhead; - if (s_frame_prefix_encode(frame_type, frame->base.stream_id, payload_len, flags, output)) { - goto error; - } + s_frame_prefix_encode(frame_type, frame->base.stream_id, payload_len, flags, output); /* Write pad length */ if (flags & AWS_H2_FRAME_F_PADDED) { AWS_ASSERT(frame_type != AWS_H2_FRAME_T_CONTINUATION); - if (!aws_byte_buf_write_u8(output, pad_length)) { - aws_raise_error(AWS_ERROR_SHORT_BUFFER); - goto error; - } + all_wrote &= aws_byte_buf_write_u8(output, pad_length); } /* Write priority */ if (flags & AWS_H2_FRAME_F_PRIORITY) { AWS_ASSERT(frame_type == AWS_H2_FRAME_T_HEADERS); - if (s_frame_priority_settings_encode(priority_settings, output)) { - goto error; - } + s_frame_priority_settings_encode(priority_settings, output); } /* Write promised stream ID */ if (promised_stream_id) { AWS_ASSERT(frame_type == AWS_H2_FRAME_T_PUSH_PROMISE); - if (!aws_byte_buf_write_be32(output, *promised_stream_id & s_31_bit_mask)) { - aws_raise_error(AWS_ERROR_SHORT_BUFFER); - goto error; - } + all_wrote &= aws_byte_buf_write_be32(output, *promised_stream_id & s_31_bit_mask); } /* Write header-block fragment */ if (fragment_len > 0) { struct aws_byte_cursor fragment = aws_byte_cursor_advance(&frame->header_block_cursor, fragment_len); - if (!aws_byte_buf_write_from_whole_cursor(output, fragment)) { - aws_raise_error(AWS_ERROR_SHORT_BUFFER); - goto error; - } + all_wrote &= aws_byte_buf_write_from_whole_cursor(output, fragment); } /* Write padding */ if (flags & AWS_H2_FRAME_F_PADDED) { - if (!aws_byte_buf_write_u8_n(output, 0, pad_length)) { - aws_raise_error(AWS_ERROR_SHORT_BUFFER); - goto error; - } + all_wrote &= aws_byte_buf_write_u8_n(output, 0, pad_length); } + AWS_ASSERT(all_wrote); + /* Success! Wrote entire frame. It's safe to change state now */ frame->state = flags & AWS_H2_FRAME_F_END_HEADERS ? AWS_H2_HEADERS_STATE_COMPLETE : AWS_H2_HEADERS_STATE_CONTINUATION; *waiting_for_more_space = false; - return AWS_OP_SUCCESS; + return; handle_waiting_for_more_space: ENCODER_LOGF( @@ -674,11 +640,6 @@ int s_encode_single_header_block_frame( aws_h2_frame_type_to_str(frame->base.type), frame->base.stream_id); *waiting_for_more_space = true; - return AWS_OP_SUCCESS; - -error: - frame->state = AWS_H2_HEADERS_STATE_ERROR; - return AWS_OP_ERR; } static int s_frame_headers_encode( @@ -689,11 +650,6 @@ static int s_frame_headers_encode( struct aws_h2_frame_headers *frame = AWS_CONTAINER_OF(frame_base, struct aws_h2_frame_headers, base); - if (frame->state >= AWS_H2_HEADERS_STATE_COMPLETE) { - aws_raise_error(AWS_ERROR_INVALID_STATE); - goto error; - } - /* Pre-encode the entire header-block into another buffer * the first time we're called. */ if (frame->state == AWS_H2_HEADERS_STATE_INIT) { @@ -716,9 +672,7 @@ static int s_frame_headers_encode( * until we're done writing header-block or the buffer is too full to continue */ bool waiting_for_more_space = false; while (frame->state < AWS_H2_HEADERS_STATE_COMPLETE && !waiting_for_more_space) { - if (s_encode_single_header_block_frame(frame, encoder, output, &waiting_for_more_space)) { - goto error; - } + s_encode_single_header_block_frame(frame, encoder, output, &waiting_for_more_space); } if (waiting_for_more_space) { @@ -734,7 +688,6 @@ static int s_frame_headers_encode( return AWS_OP_SUCCESS; error: - frame->state = AWS_H2_HEADERS_STATE_ERROR; return AWS_OP_ERR; } @@ -767,10 +720,7 @@ static struct aws_h2_frame_prebuilt *s_h2_frame_new_prebuilt( size_t payload_len, uint8_t flags) { - if (payload_len > s_prebuilt_payload_max()) { - aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); - return NULL; - } + AWS_PRECONDITION(payload_len <= s_prebuilt_payload_max()); const size_t encoded_frame_len = s_frame_prefix_length + payload_len; @@ -787,15 +737,10 @@ static struct aws_h2_frame_prebuilt *s_h2_frame_new_prebuilt( frame->encoded_buf = aws_byte_buf_from_empty_array(storage, encoded_frame_len); frame->send_progress = aws_byte_cursor_from_array(storage, encoded_frame_len); - if (s_frame_prefix_encode(type, stream_id, payload_len, flags, &frame->encoded_buf)) { - goto error; - } + /* Write frame prefix */ + s_frame_prefix_encode(type, stream_id, payload_len, flags, &frame->encoded_buf); return frame; - -error: - s_frame_prebuilt_destroy(&frame->base); - return NULL; } static void s_frame_prebuilt_destroy(struct aws_h2_frame *frame_base) { @@ -819,12 +764,13 @@ static int s_frame_prebuilt_encode( frame->base.stream_id); } + bool all_wrote = true; + /* Write as much of the pre-encoded frame as will fit */ size_t chunk_len = aws_min_size(frame->send_progress.len, output->capacity - output->len); - if (chunk_len) { - struct aws_byte_cursor chunk = aws_byte_cursor_advance(&frame->send_progress, chunk_len); - aws_byte_buf_write_from_whole_cursor(output, chunk); - } + struct aws_byte_cursor chunk = aws_byte_cursor_advance(&frame->send_progress, chunk_len); + all_wrote &= aws_byte_buf_write_from_whole_cursor(output, chunk); + AWS_ASSERT(all_wrote); *complete = frame->send_progress.len == 0; return AWS_OP_SUCCESS; @@ -856,14 +802,9 @@ struct aws_h2_frame *aws_h2_frame_new_priority( } /* Write the priority settings */ - if (s_frame_priority_settings_encode(priority, &frame->encoded_buf)) { - goto error; - } + s_frame_priority_settings_encode(priority, &frame->encoded_buf); return &frame->base; -error: - aws_h2_frame_destroy(&frame->base); - return NULL; } /*********************************************************************************************************************** @@ -895,16 +836,11 @@ struct aws_h2_frame *aws_h2_frame_new_rst_stream( * | Error Code (32) | * +---------------------------------------------------------------+ */ - if (!aws_byte_buf_write_be32(&frame->encoded_buf, error_code)) { - aws_raise_error(AWS_ERROR_SHORT_BUFFER); - goto error; - } + bool all_wrote = true; + all_wrote &= aws_byte_buf_write_be32(&frame->encoded_buf, error_code); + AWS_ASSERT(all_wrote); return &frame->base; - -error: - aws_h2_frame_destroy(&frame->base); - return NULL; } /*********************************************************************************************************************** @@ -957,20 +893,14 @@ struct aws_h2_frame *aws_h2_frame_new_settings( * | Value (32) | * +---------------------------------------------------------------+ */ + bool all_wrote = true; for (size_t i = 0; i < num_settings; ++i) { - if (!aws_byte_buf_write_be16(&frame->encoded_buf, settings_array[i].id) || - !aws_byte_buf_write_be32(&frame->encoded_buf, settings_array[i].value)) { - - aws_raise_error(AWS_ERROR_SHORT_BUFFER); - goto error; - } + all_wrote &= aws_byte_buf_write_be16(&frame->encoded_buf, settings_array[i].id); + all_wrote &= aws_byte_buf_write_be32(&frame->encoded_buf, settings_array[i].value); } + AWS_ASSERT(all_wrote); return &frame->base; - -error: - aws_h2_frame_destroy(&frame->base); - return NULL; } /*********************************************************************************************************************** @@ -999,16 +929,11 @@ struct aws_h2_frame *aws_h2_frame_new_ping( * | | * +---------------------------------------------------------------+ */ - if (!aws_byte_buf_write(&frame->encoded_buf, opaque_data, AWS_H2_PING_DATA_SIZE)) { - aws_raise_error(AWS_ERROR_SHORT_BUFFER); - goto error; - } + bool all_wrote = true; + all_wrote &= aws_byte_buf_write(&frame->encoded_buf, opaque_data, AWS_H2_PING_DATA_SIZE); + AWS_ASSERT(all_wrote); return &frame->base; - -error: - aws_h2_frame_destroy(&frame->base); - return NULL; } /*********************************************************************************************************************** @@ -1055,18 +980,13 @@ struct aws_h2_frame *aws_h2_frame_new_goaway( * | Additional Debug Data (*) | * +---------------------------------------------------------------+ */ - if (!aws_byte_buf_write_be32(&frame->encoded_buf, last_stream_id & s_31_bit_mask) || - !aws_byte_buf_write_be32(&frame->encoded_buf, error_code) || - !aws_byte_buf_write_from_whole_cursor(&frame->encoded_buf, debug_data)) { - - aws_raise_error(AWS_ERROR_SHORT_BUFFER); - goto error; - } + bool all_wrote = true; + all_wrote &= aws_byte_buf_write_be32(&frame->encoded_buf, last_stream_id & s_31_bit_mask); + all_wrote &= aws_byte_buf_write_be32(&frame->encoded_buf, error_code); + all_wrote &= aws_byte_buf_write_from_whole_cursor(&frame->encoded_buf, debug_data); + AWS_ASSERT(all_wrote); return &frame->base; -error: - aws_h2_frame_destroy(&frame->base); - return NULL; } /*********************************************************************************************************************** @@ -1110,16 +1030,11 @@ struct aws_h2_frame *aws_h2_frame_new_window_update( * |R| Window Size Increment (31) | * +-+-------------------------------------------------------------+ */ - if (!aws_byte_buf_write_be32(&frame->encoded_buf, window_size_increment)) { - aws_raise_error(AWS_ERROR_SHORT_BUFFER); - goto error; - } + bool all_wrote = true; + all_wrote &= aws_byte_buf_write_be32(&frame->encoded_buf, window_size_increment); + AWS_ASSERT(all_wrote); return &frame->base; - -error: - aws_h2_frame_destroy(&frame->base); - return NULL; } void aws_h2_frame_destroy(struct aws_h2_frame *frame) { From 2a987a9f478b84d45d2fc684c894644847f3d41d Mon Sep 17 00:00:00 2001 From: Michael Graeb Date: Thu, 19 Mar 2020 17:15:29 -0700 Subject: [PATCH 18/35] compiler warning --- source/h2_frames.c | 1 + 1 file changed, 1 insertion(+) diff --git a/source/h2_frames.c b/source/h2_frames.c index 224b06cd6..88e5166f4 100644 --- a/source/h2_frames.c +++ b/source/h2_frames.c @@ -155,6 +155,7 @@ static void s_frame_priority_settings_encode( AWS_PRECONDITION(priority); AWS_PRECONDITION(output); AWS_PRECONDITION((priority->stream_dependency & s_u32_top_bit_mask) == 0); + (void)s_u32_top_bit_mask; /* PRIORITY is encoded as (RFC-7540 6.3): * +-+-------------------------------------------------------------+ From 1fb8fbac6520f8e47dfaff480bda588a0e12facb Mon Sep 17 00:00:00 2001 From: Dengke Tang Date: Thu, 19 Mar 2020 17:47:40 -0700 Subject: [PATCH 19/35] insert ping ACK to the front of the queue --- source/h2_connection.c | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/source/h2_connection.c b/source/h2_connection.c index 86a19d5da..df29f2b41 100644 --- a/source/h2_connection.c +++ b/source/h2_connection.c @@ -298,6 +298,13 @@ void aws_h2_connection_enqueue_outgoing_frame(struct aws_h2_connection *connecti aws_linked_list_push_back(&connection->thread_data.outgoing_frames_queue, &frame->node); } +void aws_h2_connection_enqueue_outgoing_frame_from_head(struct aws_h2_connection *connection, struct aws_h2_frame_base *frame) { + AWS_PRECONDITION(frame->type != AWS_H2_FRAME_T_DATA); + AWS_PRECONDITION(aws_channel_thread_is_callers_thread(connection->base.channel_slot->channel)); + + aws_linked_list_push_front(&connection->thread_data.outgoing_frames_queue, &frame->node); +} + static void s_on_channel_write_complete( struct aws_channel *channel, struct aws_io_message *message, @@ -548,6 +555,19 @@ static int s_enqueue_settings_frame(struct aws_h2_connection *connection) { return AWS_OP_ERR; } +static int s_enqueue_ping_frame(struct aws_h2_connection *connection, struct aws_h2_frame_ping *ping_frame) { + + if (ping_frame->ack) { + /* PING responses SHOULD be given higher priority than any other frame, so it will be inserted at the head of the + * queue */ + aws_h2_connection_enqueue_outgoing_frame_from_head(connection, &ping_frame->base); + } + else { + aws_h2_connection_enqueue_outgoing_frame(connection, &ping_frame->base); + } + return AWS_OP_SUCCESS; +} + static void s_handler_installed(struct aws_channel_handler *handler, struct aws_channel_slot *slot) { AWS_PRECONDITION(aws_channel_thread_is_callers_thread(slot->channel)); struct aws_h2_connection *connection = handler->impl; From 618d038e77ee10c15859cd5308fe09282cd2e012 Mon Sep 17 00:00:00 2001 From: Dengke Tang Date: Fri, 20 Mar 2020 09:40:43 -0700 Subject: [PATCH 20/35] auto send ping ack frame back --- source/h2_connection.c | 39 +++++++++++++++++++++++++-------------- 1 file changed, 25 insertions(+), 14 deletions(-) diff --git a/source/h2_connection.c b/source/h2_connection.c index a9ee7605f..f70396b5d 100644 --- a/source/h2_connection.c +++ b/source/h2_connection.c @@ -61,6 +61,8 @@ static struct aws_http_stream *s_connection_make_request( static void s_cross_thread_work_task(struct aws_channel_task *task, void *arg, enum aws_task_status status); static void s_outgoing_frames_task(struct aws_channel_task *task, void *arg, enum aws_task_status status); +static int s_decoder_on_ping(uint8_t opaque_data[AWS_H2_PING_DATA_SIZE], void *userdata); + static struct aws_http_connection_vtable s_h2_connection_vtable = { .channel_handler_vtable = { @@ -84,6 +86,7 @@ static struct aws_http_connection_vtable s_h2_connection_vtable = { static const struct aws_h2_decoder_vtable s_h2_decoder_vtable = { .on_data = NULL, + .on_ping = s_decoder_on_ping, }; static void s_lock_synced_data(struct aws_h2_connection *connection) { @@ -297,7 +300,9 @@ void aws_h2_connection_enqueue_outgoing_frame(struct aws_h2_connection *connecti aws_linked_list_push_back(&connection->thread_data.outgoing_frames_queue, &frame->node); } -void aws_h2_connection_enqueue_outgoing_frame_from_head(struct aws_h2_connection *connection, struct aws_h2_frame_base *frame) { +void aws_h2_connection_enqueue_outgoing_frame_from_head( + struct aws_h2_connection *connection, + struct aws_h2_frame *frame) { AWS_PRECONDITION(frame->type != AWS_H2_FRAME_T_DATA); AWS_PRECONDITION(aws_channel_thread_is_callers_thread(connection->base.channel_slot->channel)); @@ -499,6 +504,25 @@ static void s_try_write_outgoing_frames(struct aws_h2_connection *connection) { s_outgoing_frames_task(&connection->outgoing_frames_task, connection, AWS_TASK_STATUS_RUN_READY); } +/* Decoder callbacks */ +static int s_decoder_on_ping(uint8_t opaque_data[AWS_H2_PING_DATA_SIZE], void *userdata) { + struct aws_h2_connection *connection = userdata; + + /* send a PING frame with the ACK flag set in response, with an identical payload. */ + struct aws_h2_frame *ping_ack_frame = aws_h2_frame_new_ping(connection->base.alloc, true, opaque_data); + if (!ping_ack_frame) { + goto error; + } + /* PING responses SHOULD be given higher priority than any other frame, so it will be inserted at the head of the + * queue */ + aws_h2_connection_enqueue_outgoing_frame_from_head(connection, ping_ack_frame); + s_try_write_outgoing_frames(connection); + return AWS_OP_SUCCESS; +error: + CONNECTION_LOGF(ERROR, connection, "Ping ACK frame failed to be sent, error %s", aws_error_name(aws_last_error())); + return AWS_OP_ERR; +} + static int s_send_connection_preface_client_string(struct aws_h2_connection *connection) { /* Just send the magic string on its own aws_io_message. */ @@ -542,19 +566,6 @@ static int s_enqueue_settings_frame(struct aws_h2_connection *connection) { return AWS_OP_SUCCESS; } -static int s_enqueue_ping_frame(struct aws_h2_connection *connection, struct aws_h2_frame_ping *ping_frame) { - - if (ping_frame->ack) { - /* PING responses SHOULD be given higher priority than any other frame, so it will be inserted at the head of the - * queue */ - aws_h2_connection_enqueue_outgoing_frame_from_head(connection, &ping_frame->base); - } - else { - aws_h2_connection_enqueue_outgoing_frame(connection, &ping_frame->base); - } - return AWS_OP_SUCCESS; -} - static void s_handler_installed(struct aws_channel_handler *handler, struct aws_channel_slot *slot) { AWS_PRECONDITION(aws_channel_thread_is_callers_thread(slot->channel)); struct aws_h2_connection *connection = handler->impl; From a53f0cbea312eb8ec3871405b04701b6502e9c3f Mon Sep 17 00:00:00 2001 From: Michael Graeb Date: Fri, 20 Mar 2020 22:33:19 -0700 Subject: [PATCH 21/35] h2_test_helper.h Extract useful machinery from test_h2_decoder.c for use with other tests. --- include/aws/http/private/h1_decoder.h | 2 +- include/aws/http/private/h1_encoder.h | 2 +- include/aws/http/private/h2_decoder.h | 2 +- include/aws/http/private/h2_frames.h | 5 +- include/aws/http/private/hpack.h | 2 +- source/h1_decoder.c | 4 +- source/h2_decoder.c | 2 +- source/h2_frames.c | 5 +- source/hpack.c | 4 +- tests/h2_test_helper.c | 460 ++++++++++++++++++ tests/h2_test_helper.h | 132 ++++++ tests/test_h2_decoder.c | 645 +++++--------------------- 12 files changed, 733 insertions(+), 532 deletions(-) create mode 100644 tests/h2_test_helper.c create mode 100644 tests/h2_test_helper.h diff --git a/include/aws/http/private/h1_decoder.h b/include/aws/http/private/h1_decoder.h index c732a8a53..0592bcc2c 100644 --- a/include/aws/http/private/h1_decoder.h +++ b/include/aws/http/private/h1_decoder.h @@ -81,7 +81,7 @@ AWS_HTTP_API struct aws_h1_decoder *aws_h1_decoder_new(struct aws_h1_decoder_par AWS_HTTP_API void aws_h1_decoder_destroy(struct aws_h1_decoder *decoder); AWS_HTTP_API int aws_h1_decode(struct aws_h1_decoder *decoder, struct aws_byte_cursor *data); -AWS_HTTP_API void aws_h1_decoder_set_logging_id(struct aws_h1_decoder *decoder, void *id); +AWS_HTTP_API void aws_h1_decoder_set_logging_id(struct aws_h1_decoder *decoder, const void *id); AWS_HTTP_API void aws_h1_decoder_set_body_headers_ignored(struct aws_h1_decoder *decoder, bool body_headers_ignored); /* RFC-7230 section 4.2 Message Format */ diff --git a/include/aws/http/private/h1_encoder.h b/include/aws/http/private/h1_encoder.h index 27090a760..0faa8e928 100644 --- a/include/aws/http/private/h1_encoder.h +++ b/include/aws/http/private/h1_encoder.h @@ -43,7 +43,7 @@ struct aws_h1_encoder { enum aws_h1_encoder_state state; struct aws_h1_encoder_message *message; uint64_t progress_bytes; - void *logging_id; + const void *logging_id; }; AWS_EXTERN_C_BEGIN diff --git a/include/aws/http/private/h2_decoder.h b/include/aws/http/private/h2_decoder.h index 79d366110..dffb41e85 100644 --- a/include/aws/http/private/h2_decoder.h +++ b/include/aws/http/private/h2_decoder.h @@ -89,7 +89,7 @@ struct aws_h2_decoder_params { struct aws_allocator *alloc; const struct aws_h2_decoder_vtable *vtable; void *userdata; - void *logging_id; + const void *logging_id; bool is_server; /* If true, do not expect the connection preface and immediately accept any frame type. diff --git a/include/aws/http/private/h2_frames.h b/include/aws/http/private/h2_frames.h index 4b414f1f2..9e5ddd62b 100644 --- a/include/aws/http/private/h2_frames.h +++ b/include/aws/http/private/h2_frames.h @@ -159,7 +159,10 @@ int aws_h2_validate_stream_id(uint32_t stream_id); * 2. Encode the frame using aws_h2_encode_frame() */ AWS_HTTP_API -int aws_h2_frame_encoder_init(struct aws_h2_frame_encoder *encoder, struct aws_allocator *allocator, void *logging_id); +int aws_h2_frame_encoder_init( + struct aws_h2_frame_encoder *encoder, + struct aws_allocator *allocator, + const void *logging_id); AWS_HTTP_API void aws_h2_frame_encoder_clean_up(struct aws_h2_frame_encoder *encoder); diff --git a/include/aws/http/private/hpack.h b/include/aws/http/private/hpack.h index 7cd6e53af..f9c6108c9 100644 --- a/include/aws/http/private/hpack.h +++ b/include/aws/http/private/hpack.h @@ -71,7 +71,7 @@ AWS_HTTP_API struct aws_hpack_context *aws_hpack_context_new( struct aws_allocator *allocator, enum aws_http_log_subject log_subject, - void *log_id); + const void *log_id); AWS_HTTP_API void aws_hpack_context_destroy(struct aws_hpack_context *context); diff --git a/source/h1_decoder.c b/source/h1_decoder.c index 0003d94f6..221f7ef16 100644 --- a/source/h1_decoder.c +++ b/source/h1_decoder.c @@ -50,7 +50,7 @@ struct aws_h1_decoder { bool body_headers_ignored; bool body_headers_forbidden; enum aws_http_header_block header_block; - void *logging_id; + const void *logging_id; /* User callbacks and settings. */ struct aws_h1_decoder_vtable vtable; @@ -766,7 +766,7 @@ enum aws_http_header_block aws_h1_decoder_get_header_block(const struct aws_h1_d return decoder->header_block; } -void aws_h1_decoder_set_logging_id(struct aws_h1_decoder *decoder, void *id) { +void aws_h1_decoder_set_logging_id(struct aws_h1_decoder *decoder, const void *id) { decoder->logging_id = id; } diff --git a/source/h2_decoder.c b/source/h2_decoder.c index 9ba900c1b..bf55b5a2a 100644 --- a/source/h2_decoder.c +++ b/source/h2_decoder.c @@ -125,7 +125,7 @@ static const struct decoder_state *s_state_frames[] = { struct aws_h2_decoder { /* Implementation data. */ struct aws_allocator *alloc; - void *logging_id; + const void *logging_id; struct aws_hpack_context *hpack; bool is_server; struct aws_byte_buf scratch; diff --git a/source/h2_frames.c b/source/h2_frames.c index 0f1234432..4c3ce9896 100644 --- a/source/h2_frames.c +++ b/source/h2_frames.c @@ -231,7 +231,10 @@ static void s_frame_prefix_encode( /*********************************************************************************************************************** * Encoder **********************************************************************************************************************/ -int aws_h2_frame_encoder_init(struct aws_h2_frame_encoder *encoder, struct aws_allocator *allocator, void *logging_id) { +int aws_h2_frame_encoder_init( + struct aws_h2_frame_encoder *encoder, + struct aws_allocator *allocator, + const void *logging_id) { AWS_PRECONDITION(encoder); AWS_PRECONDITION(allocator); diff --git a/source/hpack.c b/source/hpack.c index 09119278e..1e26e8da3 100644 --- a/source/hpack.c +++ b/source/hpack.c @@ -230,7 +230,7 @@ struct aws_hpack_context { enum aws_hpack_huffman_mode huffman_mode; enum aws_http_log_subject log_subject; - void *log_id; + const void *log_id; struct aws_huffman_encoder encoder; struct aws_huffman_decoder decoder; @@ -316,7 +316,7 @@ struct aws_hpack_context { struct aws_hpack_context *aws_hpack_context_new( struct aws_allocator *allocator, enum aws_http_log_subject log_subject, - void *log_id) { + const void *log_id) { struct aws_hpack_context *context = aws_mem_calloc(allocator, 1, sizeof(struct aws_hpack_context)); if (!context) { diff --git a/tests/h2_test_helper.c b/tests/h2_test_helper.c new file mode 100644 index 000000000..27d56b82e --- /dev/null +++ b/tests/h2_test_helper.c @@ -0,0 +1,460 @@ +/* + * Copyright 2010-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +#include "h2_test_helper.h" + +#include + +static const void *s_logging_id = (void *)0xAAAAAAAA; + +/******************************************************************************* + * h2_decoded_frame + ******************************************************************************/ +static int s_frame_init( + struct h2_decoded_frame *frame, + struct aws_allocator *alloc, + enum aws_h2_frame_type type, + uint32_t stream_id) { + + AWS_ZERO_STRUCT(*frame); + frame->type = type; + frame->stream_id = stream_id; + frame->headers = aws_http_headers_new(alloc); + ASSERT_SUCCESS(aws_array_list_init_dynamic(&frame->settings, alloc, 16, sizeof(struct aws_h2_frame_setting))); + ASSERT_SUCCESS(aws_byte_buf_init(&frame->data, alloc, 1024)); + return AWS_OP_SUCCESS; +} + +static void s_frame_clean_up(struct h2_decoded_frame *frame) { + aws_http_headers_release(frame->headers); + aws_array_list_clean_up(&frame->settings); + aws_byte_buf_clean_up(&frame->data); +} + +int h2_decoded_frame_check_finished( + const struct h2_decoded_frame *frame, + enum aws_h2_frame_type expected_type, + uint32_t expected_stream_id) { + + ASSERT_INT_EQUALS(expected_type, frame->type); + ASSERT_UINT_EQUALS(expected_stream_id, frame->stream_id); + ASSERT_TRUE(frame->finished); + return AWS_OP_SUCCESS; +} + +/******************************************************************************* + * h2_decode_tester + ******************************************************************************/ + +size_t h2_decode_tester_frame_count(const struct h2_decode_tester *decode_tester) { + return aws_array_list_length(&decode_tester->frames); +} + +struct h2_decoded_frame *h2_decode_tester_get_frame(const struct h2_decode_tester *decode_tester, size_t i) { + AWS_FATAL_ASSERT(h2_decode_tester_frame_count(decode_tester) > i); + struct h2_decoded_frame *frame = NULL; + aws_array_list_get_at_ptr(&decode_tester->frames, (void **)&frame, i); + return frame; +} + +struct h2_decoded_frame *h2_decode_tester_latest_frame(const struct h2_decode_tester *decode_tester) { + size_t frame_count = h2_decode_tester_frame_count(decode_tester); + AWS_FATAL_ASSERT(frame_count != 0); + return h2_decode_tester_get_frame(decode_tester, frame_count - 1); +} + +int h2_decode_tester_check_data_across_frames( + const struct h2_decode_tester *decode_tester, + uint32_t stream_id, + struct aws_byte_cursor expected, + bool expect_end_stream) { + + struct aws_byte_buf data; + ASSERT_SUCCESS(aws_byte_buf_init(&data, decode_tester->alloc, 128)); + + bool found_end_stream = false; + + for (size_t frame_i = 0; frame_i < h2_decode_tester_frame_count(decode_tester); ++frame_i) { + struct h2_decoded_frame *frame = h2_decode_tester_get_frame(decode_tester, frame_i); + + if (frame->type == AWS_H2_FRAME_T_DATA && frame->stream_id == stream_id) { + struct aws_byte_cursor frame_data = aws_byte_cursor_from_buf(&frame->data); + ASSERT_SUCCESS(aws_byte_buf_append_dynamic(&data, &frame_data)); + + found_end_stream = frame->end_stream; + } + } + + ASSERT_BIN_ARRAYS_EQUALS(expected.ptr, expected.len, data.buffer, data.len); + ASSERT_UINT_EQUALS(expect_end_stream, found_end_stream); + + aws_byte_buf_clean_up(&data); + return AWS_OP_SUCCESS; +} + +int h2_decode_tester_check_data_str_across_frames( + const struct h2_decode_tester *decode_tester, + uint32_t stream_id, + const char *expected, + bool expect_end_stream) { + + return h2_decode_tester_check_data_across_frames( + decode_tester, stream_id, aws_byte_cursor_from_c_str(expected), expect_end_stream); +} + +/* decode-tester begins recording a new frame's data */ +static int s_begin_new_frame( + struct h2_decode_tester *decode_tester, + enum aws_h2_frame_type type, + uint32_t stream_id, + struct h2_decoded_frame **out_frame) { + + /* If there's a previous frame, assert that we know it was finished. + * If this fails, some on_X_begin(), on_X_i(), on_X_end() loop didn't fire correctly. + * It should be impossible for an unrelated callback to fire during these loops */ + if (aws_array_list_length(&decode_tester->frames) > 0) { + const struct h2_decoded_frame *prev_frame = h2_decode_tester_latest_frame(decode_tester); + ASSERT_TRUE(prev_frame->finished); + } + + /* Create new frame */ + struct h2_decoded_frame new_frame; + ASSERT_SUCCESS(s_frame_init(&new_frame, decode_tester->alloc, type, stream_id)); + ASSERT_SUCCESS(aws_array_list_push_back(&decode_tester->frames, &new_frame)); + + if (out_frame) { + aws_array_list_get_at_ptr( + &decode_tester->frames, (void **)out_frame, aws_array_list_length(&decode_tester->frames) - 1); + } + return AWS_OP_SUCCESS; +} + +/* decode-tester stops recording the latest frame's data */ +static int s_end_current_frame( + struct h2_decode_tester *decode_tester, + enum aws_h2_frame_type type, + uint32_t stream_id) { + struct h2_decoded_frame *frame = h2_decode_tester_latest_frame(decode_tester); + ASSERT_FALSE(frame->finished); + frame->finished = true; + ASSERT_SUCCESS(h2_decoded_frame_check_finished(frame, type, stream_id)); + return AWS_OP_SUCCESS; +} + +static int s_decoder_on_headers_begin(uint32_t stream_id, void *userdata) { + struct h2_decode_tester *decode_tester = userdata; + ASSERT_SUCCESS(s_begin_new_frame(decode_tester, AWS_H2_FRAME_T_HEADERS, stream_id, NULL /*out_frame*/)); + return AWS_OP_SUCCESS; +} + +static int s_on_header(bool is_push_promise, uint32_t stream_id, const struct aws_http_header *header, void *userdata) { + + struct h2_decode_tester *decode_tester = userdata; + struct h2_decoded_frame *frame = h2_decode_tester_latest_frame(decode_tester); + + /* Validate */ + if (is_push_promise) { + ASSERT_INT_EQUALS(AWS_H2_FRAME_T_PUSH_PROMISE, frame->type); + } else { + ASSERT_INT_EQUALS(AWS_H2_FRAME_T_HEADERS, frame->type); + } + + ASSERT_FALSE(frame->finished); + ASSERT_UINT_EQUALS(frame->stream_id, stream_id); + + /* Stash header */ + ASSERT_SUCCESS(aws_http_headers_add_header(frame->headers, header)); + + return AWS_OP_SUCCESS; +} + +static int s_decoder_on_headers_i(uint32_t stream_id, const struct aws_http_header *header, void *userdata) { + return s_on_header(false /* is_push_promise */, stream_id, header, userdata); +} + +static int s_decoder_on_headers_end(uint32_t stream_id, void *userdata) { + struct h2_decode_tester *decode_tester = userdata; + ASSERT_SUCCESS(s_end_current_frame(decode_tester, AWS_H2_FRAME_T_HEADERS, stream_id)); + return AWS_OP_SUCCESS; +} + +static int s_decoder_on_push_promise_begin(uint32_t stream_id, uint32_t promised_stream_id, void *userdata) { + struct h2_decode_tester *decode_tester = userdata; + struct h2_decoded_frame *frame; + ASSERT_SUCCESS(s_begin_new_frame(decode_tester, AWS_H2_FRAME_T_PUSH_PROMISE, stream_id, &frame /*out_frame*/)); + + frame->promised_stream_id = promised_stream_id; + + return AWS_OP_SUCCESS; +} + +static int s_decoder_on_push_promise_i(uint32_t stream_id, const struct aws_http_header *header, void *userdata) { + return s_on_header(true /* is_push_promise */, stream_id, header, userdata); +} + +static int s_decoder_on_push_promise_end(uint32_t stream_id, void *userdata) { + struct h2_decode_tester *decode_tester = userdata; + ASSERT_SUCCESS(s_end_current_frame(decode_tester, AWS_H2_FRAME_T_PUSH_PROMISE, stream_id)); + return AWS_OP_SUCCESS; +} + +static int s_decoder_on_data(uint32_t stream_id, struct aws_byte_cursor data, void *userdata) { + struct h2_decode_tester *decode_tester = userdata; + struct h2_decoded_frame *frame; + + /* Pretend each on_data callback is a full DATA frame for the purposes of these tests */ + ASSERT_SUCCESS(s_begin_new_frame(decode_tester, AWS_H2_FRAME_T_DATA, stream_id, &frame)); + + /* Stash data*/ + ASSERT_SUCCESS(aws_byte_buf_append_dynamic(&frame->data, &data)); + + ASSERT_SUCCESS(s_end_current_frame(decode_tester, AWS_H2_FRAME_T_DATA, stream_id)); + return AWS_OP_SUCCESS; +} + +static int s_decoder_on_end_stream(uint32_t stream_id, void *userdata) { + struct h2_decode_tester *decode_tester = userdata; + struct h2_decoded_frame *frame = h2_decode_tester_latest_frame(decode_tester); + + /* Validate */ + + /* on_end_stream should fire IMMEDIATELY after on_data OR after on_headers_end. + * This timing lets the user close the stream from this callback without waiting for any trailing data/headers + */ + ASSERT_TRUE(frame->finished); + ASSERT_TRUE(frame->type == AWS_H2_FRAME_T_HEADERS || frame->type == AWS_H2_FRAME_T_DATA); + ASSERT_UINT_EQUALS(frame->stream_id, stream_id); + + ASSERT_FALSE(frame->end_stream); + + /* Stash */ + frame->end_stream = true; + + return AWS_OP_SUCCESS; +} + +static int s_decoder_on_rst_stream(uint32_t stream_id, uint32_t error_code, void *userdata) { + struct h2_decode_tester *decode_tester = userdata; + struct h2_decoded_frame *frame; + + ASSERT_SUCCESS(s_begin_new_frame(decode_tester, AWS_H2_FRAME_T_RST_STREAM, stream_id, &frame)); + + /* Stash data*/ + frame->error_code = error_code; + + ASSERT_SUCCESS(s_end_current_frame(decode_tester, AWS_H2_FRAME_T_RST_STREAM, stream_id)); + return AWS_OP_SUCCESS; +} + +static int s_decoder_on_settings_begin(void *userdata) { + struct h2_decode_tester *decode_tester = userdata; + struct h2_decoded_frame *frame; + ASSERT_SUCCESS(s_begin_new_frame(decode_tester, AWS_H2_FRAME_T_SETTINGS, 0, &frame)); + return AWS_OP_SUCCESS; +} + +static int s_decoder_on_settings_i(uint16_t setting_id, uint32_t value, void *userdata) { + struct h2_decode_tester *decode_tester = userdata; + struct h2_decoded_frame *frame = h2_decode_tester_latest_frame(decode_tester); + + /* Validate */ + ASSERT_INT_EQUALS(AWS_H2_FRAME_T_SETTINGS, frame->type); + ASSERT_FALSE(frame->finished); + + /* Stash setting */ + struct aws_h2_frame_setting setting = {setting_id, value}; + ASSERT_SUCCESS(aws_array_list_push_back(&frame->settings, &setting)); + + return AWS_OP_SUCCESS; +} + +static int s_decoder_on_settings_end(void *userdata) { + struct h2_decode_tester *decode_tester = userdata; + ASSERT_SUCCESS(s_end_current_frame(decode_tester, AWS_H2_FRAME_T_SETTINGS, 0)); + return AWS_OP_SUCCESS; +} + +static int s_decoder_on_settings_ack(void *userdata) { + struct h2_decode_tester *decode_tester = userdata; + struct h2_decoded_frame *frame; + + ASSERT_SUCCESS(s_begin_new_frame(decode_tester, AWS_H2_FRAME_T_SETTINGS, 0 /*stream_id*/, &frame)); + + /* Stash data*/ + frame->ack = true; + + ASSERT_SUCCESS(s_end_current_frame(decode_tester, AWS_H2_FRAME_T_SETTINGS, 0 /*stream_id*/)); + return AWS_OP_SUCCESS; +} + +static int s_decoder_on_ping(uint8_t opaque_data[AWS_H2_PING_DATA_SIZE], void *userdata) { + struct h2_decode_tester *decode_tester = userdata; + struct h2_decoded_frame *frame; + + ASSERT_SUCCESS(s_begin_new_frame(decode_tester, AWS_H2_FRAME_T_PING, 0 /*stream_id*/, &frame)); + + /* Stash data*/ + memcpy(frame->ping_opaque_data, opaque_data, AWS_H2_PING_DATA_SIZE); + + ASSERT_SUCCESS(s_end_current_frame(decode_tester, AWS_H2_FRAME_T_PING, 0 /*stream_id*/)); + return AWS_OP_SUCCESS; +} + +static int s_decoder_on_ping_ack(uint8_t opaque_data[AWS_H2_PING_DATA_SIZE], void *userdata) { + struct h2_decode_tester *decode_tester = userdata; + struct h2_decoded_frame *frame; + + ASSERT_SUCCESS(s_begin_new_frame(decode_tester, AWS_H2_FRAME_T_PING, 0 /*stream_id*/, &frame)); + + /* Stash data*/ + memcpy(frame->ping_opaque_data, opaque_data, AWS_H2_PING_DATA_SIZE); + frame->ack = true; + + ASSERT_SUCCESS(s_end_current_frame(decode_tester, AWS_H2_FRAME_T_PING, 0 /*stream_id*/)); + return AWS_OP_SUCCESS; +} + +static int s_decoder_on_goaway_begin( + uint32_t last_stream, + uint32_t error_code, + uint32_t debug_data_length, + void *userdata) { + + struct h2_decode_tester *decode_tester = userdata; + struct h2_decoded_frame *frame; + ASSERT_SUCCESS(s_begin_new_frame(decode_tester, AWS_H2_FRAME_T_GOAWAY, 0, &frame)); + + frame->goaway_last_stream_id = last_stream; + frame->error_code = error_code; + frame->goaway_debug_data_remaining = debug_data_length; + + return AWS_OP_SUCCESS; +} + +static int s_decoder_on_goaway_i(struct aws_byte_cursor debug_data, void *userdata) { + struct h2_decode_tester *decode_tester = userdata; + struct h2_decoded_frame *frame = h2_decode_tester_latest_frame(decode_tester); + + /* Validate */ + ASSERT_INT_EQUALS(AWS_H2_FRAME_T_GOAWAY, frame->type); + ASSERT_FALSE(frame->finished); + ASSERT_TRUE(frame->goaway_debug_data_remaining >= debug_data.len); + + frame->goaway_debug_data_remaining -= (uint32_t)debug_data.len; + + /* Stash data */ + ASSERT_SUCCESS(aws_byte_buf_append_dynamic(&frame->data, &debug_data)); + + return AWS_OP_SUCCESS; +} + +static int s_decoder_on_goaway_end(void *userdata) { + struct h2_decode_tester *decode_tester = userdata; + ASSERT_SUCCESS(s_end_current_frame(decode_tester, AWS_H2_FRAME_T_GOAWAY, 0)); + + struct h2_decoded_frame *frame = h2_decode_tester_latest_frame(decode_tester); + ASSERT_UINT_EQUALS(0, frame->goaway_debug_data_remaining); + + return AWS_OP_SUCCESS; +} + +static int s_decoder_on_window_update(uint32_t stream_id, uint32_t window_size_increment, void *userdata) { + struct h2_decode_tester *decode_tester = userdata; + struct h2_decoded_frame *frame; + ASSERT_SUCCESS(s_begin_new_frame(decode_tester, AWS_H2_FRAME_T_WINDOW_UPDATE, stream_id, &frame)); + + frame->window_size_increment = window_size_increment; + + ASSERT_SUCCESS(s_end_current_frame(decode_tester, AWS_H2_FRAME_T_WINDOW_UPDATE, stream_id)); + + return AWS_OP_SUCCESS; +} + +static struct aws_h2_decoder_vtable s_decoder_vtable = { + .on_headers_begin = s_decoder_on_headers_begin, + .on_headers_i = s_decoder_on_headers_i, + .on_headers_end = s_decoder_on_headers_end, + .on_push_promise_begin = s_decoder_on_push_promise_begin, + .on_push_promise_i = s_decoder_on_push_promise_i, + .on_push_promise_end = s_decoder_on_push_promise_end, + .on_data = s_decoder_on_data, + .on_end_stream = s_decoder_on_end_stream, + .on_rst_stream = s_decoder_on_rst_stream, + .on_settings_begin = s_decoder_on_settings_begin, + .on_settings_i = s_decoder_on_settings_i, + .on_settings_end = s_decoder_on_settings_end, + .on_settings_ack = s_decoder_on_settings_ack, + .on_ping = s_decoder_on_ping, + .on_ping_ack = s_decoder_on_ping_ack, + .on_goaway_begin = s_decoder_on_goaway_begin, + .on_goaway_i = s_decoder_on_goaway_i, + .on_goaway_end = s_decoder_on_goaway_end, + .on_window_update = s_decoder_on_window_update, +}; + +int h2_decode_tester_init(struct h2_decode_tester *decode_tester, const struct h2_decode_tester_options *options) { + AWS_ZERO_STRUCT(*decode_tester); + decode_tester->alloc = options->alloc; + + struct aws_h2_decoder_params decoder_params = { + .alloc = options->alloc, + .vtable = &s_decoder_vtable, + .userdata = decode_tester, + .logging_id = s_logging_id, + .is_server = options->is_server, + .skip_connection_preface = options->skip_connection_preface, + }; + decode_tester->decoder = aws_h2_decoder_new(&decoder_params); + ASSERT_NOT_NULL(decode_tester->decoder); + + ASSERT_SUCCESS( + aws_array_list_init_dynamic(&decode_tester->frames, options->alloc, 16, sizeof(struct h2_decoded_frame))); + return AWS_OP_SUCCESS; +} + +void h2_decode_tester_clean_up(struct h2_decode_tester *decode_tester) { + aws_h2_decoder_destroy(decode_tester->decoder); + + for (size_t i = 0; i < aws_array_list_length(&decode_tester->frames); ++i) { + struct h2_decoded_frame *frame; + aws_array_list_get_at_ptr(&decode_tester->frames, (void **)&frame, i); + s_frame_clean_up(frame); + } + aws_array_list_clean_up(&decode_tester->frames); + + AWS_ZERO_STRUCT(*decode_tester); +} + +/******************************************************************************* + * h2_fake_peer + ******************************************************************************/ + +int h2_fake_peer_init(struct h2_fake_peer *peer, const struct h2_fake_peer_options *options) { + AWS_ZERO_STRUCT(*peer); + peer->alloc = options->alloc; + peer->testing_channel = options->testing_channel; + + ASSERT_SUCCESS(aws_h2_frame_encoder_init(&peer->encoder, peer->alloc, s_logging_id)); + + struct h2_decode_tester_options decode_options = {.alloc = options->alloc, .is_server = options->is_server}; + ASSERT_SUCCESS(h2_decode_tester_init(&peer->decode, &decode_options)); + return AWS_OP_SUCCESS; +} + +void h2_fake_peer_clean_up(struct h2_fake_peer *peer) { + aws_h2_frame_encoder_clean_up(&peer->encoder); + h2_decode_tester_clean_up(&peer->decode); + AWS_ZERO_STRUCT(peer); +} diff --git a/tests/h2_test_helper.h b/tests/h2_test_helper.h new file mode 100644 index 000000000..de2118683 --- /dev/null +++ b/tests/h2_test_helper.h @@ -0,0 +1,132 @@ +#ifndef AWS_HTTP_H2_TEST_HELPER_H +#define AWS_HTTP_H2_TEST_HELPER_H +/* + * Copyright 2010-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +#include +#include +#include +#include + +/** + * Information gathered about a given frame from decoder callbacks. + * These aren't 1:1 with literal H2 frames: + * - The decoder hides the existence of CONTINUATION frames, + * their data continues the preceding HEADERS or PUSH_PROMISE frame. + * + * - A DATA frame could appear as N on_data callbacks. + * + * - The on_end_stream callback fires after all other callbacks for that frame, + * so we count it as part of the preceding "finished" frame. + */ +struct h2_decoded_frame { + /* If true, we expect no further callbacks regarding this frame */ + bool finished; + + enum aws_h2_frame_type type; /* All frame types have this */ + uint32_t stream_id; /* All frame types have this */ + + /* + * Everything else is only found in certain frame types + */ + + bool end_stream; /* HEADERS and DATA might have this */ + bool ack; /* PING and SETTINGS might have this */ + + uint32_t error_code; /* RST_STREAM and GOAWAY have this */ + uint32_t promised_stream_id; /* PUSH_PROMISE has this */ + uint32_t goaway_last_stream_id; /* GOAWAY has this */ + uint32_t goaway_debug_data_remaining; /* GOAWAY has this*/ + uint8_t ping_opaque_data[AWS_H2_PING_DATA_SIZE]; /* PING has this */ + uint32_t window_size_increment; /* WINDOW_UPDATE has this */ + + struct aws_http_headers *headers; /* HEADERS and PUSH_PROMISE have this */ + struct aws_array_list settings; /* contains aws_h2_frame_setting, SETTINGS has this */ + struct aws_byte_buf data /* DATA has this */; +}; + +/** + * Check that: + * - frame finished (ex: if HEADERS frame, then on_headers_end() fired) + * - frame was in fact using the expected type and stream_id. + */ +int h2_decoded_frame_check_finished( + const struct h2_decoded_frame *frame, + enum aws_h2_frame_type expected_type, + uint32_t expected_stream_id); + +/** + * Translates decoder callbacks into an array-list of h2_decoded_frames. + */ +struct h2_decode_tester { + struct aws_allocator *alloc; + struct aws_h2_decoder *decoder; + struct aws_array_list frames; /* contains h2_decoded_frame */ +}; + +struct h2_decode_tester_options { + struct aws_allocator *alloc; + bool is_server; + bool skip_connection_preface; +}; + +int h2_decode_tester_init(struct h2_decode_tester *decode_tester, const struct h2_decode_tester_options *options); +void h2_decode_tester_clean_up(struct h2_decode_tester *decode_tester); + +size_t h2_decode_tester_frame_count(const struct h2_decode_tester *decode_tester); +struct h2_decoded_frame *h2_decode_tester_get_frame(const struct h2_decode_tester *decode_tester, size_t i); +struct h2_decoded_frame *h2_decode_tester_latest_frame(const struct h2_decode_tester *decode_tester); + +/** + * Compare data (which may be split across N frames) against expected + */ +int h2_decode_tester_check_data_across_frames( + const struct h2_decode_tester *decode_tester, + uint32_t stream_id, + struct aws_byte_cursor expected, + bool expect_end_stream); + +/** + * Compare data (which may be split across N frames) against expected + */ +int h2_decode_tester_check_data_str_across_frames( + const struct h2_decode_tester *decode_tester, + uint32_t stream_id, + const char *expected, + bool expect_end_stream); + +/** + * Fake HTTP/2 peer. + * Can decode H2 frames that are are written to the testing channel. + * Can encode H2 frames and push it into the channel in the read direction. + */ +struct h2_fake_peer { + struct aws_allocator *alloc; + struct testing_channel *testing_channel; + + struct aws_h2_frame_encoder encoder; + struct h2_decode_tester decode; +}; + +struct h2_fake_peer_options { + struct aws_allocator *alloc; + struct testing_channel *testing_channel; + bool is_server; +}; + +int h2_fake_peer_init(struct h2_fake_peer *peer, const struct h2_fake_peer_options *options); +void h2_fake_peer_clean_up(struct h2_fake_peer *peer); + +#endif /* AWS_HTTP_H2_TEST_HELPER_H */ diff --git a/tests/test_h2_decoder.c b/tests/test_h2_decoder.c index 140199276..7ba70abe1 100644 --- a/tests/test_h2_decoder.c +++ b/tests/test_h2_decoder.c @@ -12,45 +12,13 @@ * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ -#include +#include "h2_test_helper.h" #include -/* Information gathered about a given frame from decoder callbacks. - * These aren't 1:1 with literal H2 frames: - * - The decoder hides the existence of CONTINUATION frames, - * their data continues the preceding HEADERS or PUSH_PROMISE frame. - * - * - A DATA frame could appear as N on_data callbacks. - * - * - The on_end_stream callback fires after all other callbacks for that frame, - * so we count it as part of the preceding "finished" frame. - */ -struct frame { - enum aws_h2_frame_type type; - uint32_t stream_id; - - /* If true, we expect no further callbacks regarding this frame */ - bool finished; - - struct aws_array_list headers; /* contains aws_http_header */ - struct aws_array_list settings; /* contains aws_h2_frame_setting */ - struct aws_byte_buf data; - - bool end_stream; - uint32_t error_code; - uint32_t promised_stream_id; - bool ack; - uint32_t goaway_last_stream_id; - uint32_t goaway_debug_data_remaining; - uint8_t ping_opaque_data[AWS_H2_PING_DATA_SIZE]; - uint32_t window_size_increment; -}; - struct fixture { struct aws_allocator *allocator; - struct aws_h2_decoder *decoder; - struct aws_array_list frames; /* contains frame */ + struct h2_decode_tester decode; /* If true, run decoder over input one byte at a time */ bool one_byte_at_a_time; @@ -64,363 +32,23 @@ struct fixture { bool skip_connection_preface; }; -static int s_frame_init( - struct frame *frame, - struct aws_allocator *allocator, - enum aws_h2_frame_type type, - uint32_t stream_id) { - AWS_ZERO_STRUCT(*frame); - frame->type = type; - frame->stream_id = stream_id; - ASSERT_SUCCESS(aws_array_list_init_dynamic(&frame->headers, allocator, 16, sizeof(struct aws_http_header))); - ASSERT_SUCCESS(aws_array_list_init_dynamic(&frame->settings, allocator, 16, sizeof(struct aws_h2_frame_setting))); - ASSERT_SUCCESS(aws_byte_buf_init(&frame->data, allocator, 1024)); - return AWS_OP_SUCCESS; -} - -static void s_frame_clean_up(struct frame *frame) { - aws_array_list_clean_up(&frame->headers); - aws_array_list_clean_up(&frame->settings); - aws_byte_buf_clean_up(&frame->data); -} - -static int s_validate_finished_frame(struct frame *frame, enum aws_h2_frame_type type, uint32_t stream_id) { - ASSERT_INT_EQUALS(type, frame->type); - ASSERT_UINT_EQUALS(stream_id, frame->stream_id); - ASSERT_TRUE(frame->finished); - return AWS_OP_SUCCESS; -} - -static struct frame *s_latest_frame(struct fixture *fixture) { - AWS_FATAL_ASSERT(aws_array_list_length(&fixture->frames) > 0); - struct frame *frame = NULL; - aws_array_list_get_at_ptr(&fixture->frames, (void **)&frame, aws_array_list_length(&fixture->frames) - 1); - return frame; -} - -/* fixture begins recording a new frame's data */ -static int s_begin_new_frame( - struct fixture *fixture, - enum aws_h2_frame_type type, - uint32_t stream_id, - struct frame **out_frame) { - - /* If there's a previous frame, assert that we know it was finished. - * If this fails, some on_X_begin(), on_X_i(), on_X_end() loop didn't fire correctly. - * It should be impossible for an unrelated callback to fire during these loops */ - if (aws_array_list_length(&fixture->frames) > 0) { - struct frame *prev_frame = s_latest_frame(fixture); - ASSERT_TRUE(prev_frame->finished); - } - - /* Create new frame */ - struct frame new_frame; - ASSERT_SUCCESS(s_frame_init(&new_frame, fixture->allocator, type, stream_id)); - ASSERT_SUCCESS(aws_array_list_push_back(&fixture->frames, &new_frame)); - - if (out_frame) { - aws_array_list_get_at_ptr(&fixture->frames, (void **)out_frame, aws_array_list_length(&fixture->frames) - 1); - } - return AWS_OP_SUCCESS; -} - -/* fixture stops recording the latest frame's data */ -static int s_end_current_frame(struct fixture *fixture, enum aws_h2_frame_type type, uint32_t stream_id) { - struct frame *frame = s_latest_frame(fixture); - ASSERT_FALSE(frame->finished); - frame->finished = true; - ASSERT_SUCCESS(s_validate_finished_frame(frame, type, stream_id)); - return AWS_OP_SUCCESS; -} - -static int s_on_header(bool is_push_promise, uint32_t stream_id, const struct aws_http_header *header, void *userdata) { - - struct fixture *fixture = userdata; - struct frame *frame = s_latest_frame(fixture); - - /* validate */ - if (is_push_promise) { - ASSERT_INT_EQUALS(AWS_H2_FRAME_T_PUSH_PROMISE, frame->type); - } else { - ASSERT_INT_EQUALS(AWS_H2_FRAME_T_HEADERS, frame->type); - } - - ASSERT_FALSE(frame->finished); - ASSERT_UINT_EQUALS(frame->stream_id, stream_id); - - /* Stash header strings in frame->data. - * DO NOT resize buffer or pointers will get messed up */ - struct aws_http_header header_field = *header; - ASSERT_SUCCESS(aws_byte_buf_append_and_update(&frame->data, &header_field.name)); - ASSERT_SUCCESS(aws_byte_buf_append_and_update(&frame->data, &header_field.value)); - - ASSERT_SUCCESS(aws_array_list_push_back(&frame->headers, &header_field)); - - return AWS_OP_SUCCESS; -} - -/**************************** DECODER CALLBACKS *******************************/ - -static int s_decoder_on_headers_begin(uint32_t stream_id, void *userdata) { - struct fixture *fixture = userdata; - ASSERT_SUCCESS(s_begin_new_frame(fixture, AWS_H2_FRAME_T_HEADERS, stream_id, NULL /*out_frame*/)); - return AWS_OP_SUCCESS; -} - -static int s_decoder_on_headers_i(uint32_t stream_id, const struct aws_http_header *header, void *userdata) { - - return s_on_header(false /* is_push_promise */, stream_id, header, userdata); -} - -static int s_decoder_on_headers_end(uint32_t stream_id, void *userdata) { - struct fixture *fixture = userdata; - ASSERT_SUCCESS(s_end_current_frame(fixture, AWS_H2_FRAME_T_HEADERS, stream_id)); - return AWS_OP_SUCCESS; -} - -static int s_decoder_on_push_promise_begin(uint32_t stream_id, uint32_t promised_stream_id, void *userdata) { - struct fixture *fixture = userdata; - struct frame *frame; - ASSERT_SUCCESS(s_begin_new_frame(fixture, AWS_H2_FRAME_T_PUSH_PROMISE, stream_id, &frame /*out_frame*/)); - - frame->promised_stream_id = promised_stream_id; - - return AWS_OP_SUCCESS; -} - -static int s_decoder_on_push_promise_i(uint32_t stream_id, const struct aws_http_header *header, void *userdata) { - - return s_on_header(true /* is_push_promise */, stream_id, header, userdata); -} - -static int s_decoder_on_push_promise_end(uint32_t stream_id, void *userdata) { - struct fixture *fixture = userdata; - ASSERT_SUCCESS(s_end_current_frame(fixture, AWS_H2_FRAME_T_PUSH_PROMISE, stream_id)); - return AWS_OP_SUCCESS; -} - -static int s_decoder_on_data(uint32_t stream_id, struct aws_byte_cursor data, void *userdata) { - struct fixture *fixture = userdata; - struct frame *frame; - - /* Pretend each on_data callback is a full DATA frame for the purposes of these tests */ - ASSERT_SUCCESS(s_begin_new_frame(fixture, AWS_H2_FRAME_T_DATA, stream_id, &frame)); - - /* Stash data*/ - ASSERT_SUCCESS(aws_byte_buf_append_dynamic(&frame->data, &data)); - - ASSERT_SUCCESS(s_end_current_frame(fixture, AWS_H2_FRAME_T_DATA, stream_id)); - return AWS_OP_SUCCESS; -} - -static int s_decoder_on_end_stream(uint32_t stream_id, void *userdata) { - struct fixture *fixture = userdata; - struct frame *frame = s_latest_frame(fixture); - - /* Validate */ - - /* on_end_stream should fire IMMEDIATELY after on_data OR after on_headers_end. - * This timing lets the user close the stream from this callback without waiting for any trailing data/headers - */ - ASSERT_TRUE(frame->finished); - ASSERT_TRUE(frame->type == AWS_H2_FRAME_T_HEADERS || frame->type == AWS_H2_FRAME_T_DATA); - ASSERT_UINT_EQUALS(frame->stream_id, stream_id); - - ASSERT_FALSE(frame->end_stream); - - /* Stash */ - frame->end_stream = true; - - return AWS_OP_SUCCESS; -} - -static int s_decoder_on_rst_stream(uint32_t stream_id, uint32_t error_code, void *userdata) { - struct fixture *fixture = userdata; - struct frame *frame; - - ASSERT_SUCCESS(s_begin_new_frame(fixture, AWS_H2_FRAME_T_RST_STREAM, stream_id, &frame)); - - /* Stash data*/ - frame->error_code = error_code; - - ASSERT_SUCCESS(s_end_current_frame(fixture, AWS_H2_FRAME_T_RST_STREAM, stream_id)); - return AWS_OP_SUCCESS; -} - -static int s_decoder_on_settings_begin(void *userdata) { - struct fixture *fixture = userdata; - struct frame *frame; - ASSERT_SUCCESS(s_begin_new_frame(fixture, AWS_H2_FRAME_T_SETTINGS, 0, &frame)); - return AWS_OP_SUCCESS; -} - -static int s_decoder_on_settings_i(uint16_t setting_id, uint32_t value, void *userdata) { - struct fixture *fixture = userdata; - struct frame *frame = s_latest_frame(fixture); - - /* Validate */ - ASSERT_INT_EQUALS(AWS_H2_FRAME_T_SETTINGS, frame->type); - ASSERT_FALSE(frame->finished); - - /* Stash setting */ - struct aws_h2_frame_setting setting = {setting_id, value}; - ASSERT_SUCCESS(aws_array_list_push_back(&frame->settings, &setting)); - - return AWS_OP_SUCCESS; -} - -static int s_decoder_on_settings_end(void *userdata) { - struct fixture *fixture = userdata; - ASSERT_SUCCESS(s_end_current_frame(fixture, AWS_H2_FRAME_T_SETTINGS, 0)); - return AWS_OP_SUCCESS; -} - -static int s_decoder_on_settings_ack(void *userdata) { - struct fixture *fixture = userdata; - struct frame *frame; - - ASSERT_SUCCESS(s_begin_new_frame(fixture, AWS_H2_FRAME_T_SETTINGS, 0 /*stream_id*/, &frame)); - - /* Stash data*/ - frame->ack = true; - - ASSERT_SUCCESS(s_end_current_frame(fixture, AWS_H2_FRAME_T_SETTINGS, 0 /*stream_id*/)); - return AWS_OP_SUCCESS; -} - -static int s_decoder_on_ping(uint8_t opaque_data[AWS_H2_PING_DATA_SIZE], void *userdata) { - struct fixture *fixture = userdata; - struct frame *frame; - - ASSERT_SUCCESS(s_begin_new_frame(fixture, AWS_H2_FRAME_T_PING, 0 /*stream_id*/, &frame)); - - /* Stash data*/ - memcpy(frame->ping_opaque_data, opaque_data, AWS_H2_PING_DATA_SIZE); - - ASSERT_SUCCESS(s_end_current_frame(fixture, AWS_H2_FRAME_T_PING, 0 /*stream_id*/)); - return AWS_OP_SUCCESS; -} - -static int s_decoder_on_ping_ack(uint8_t opaque_data[AWS_H2_PING_DATA_SIZE], void *userdata) { - struct fixture *fixture = userdata; - struct frame *frame; - - ASSERT_SUCCESS(s_begin_new_frame(fixture, AWS_H2_FRAME_T_PING, 0 /*stream_id*/, &frame)); - - /* Stash data*/ - memcpy(frame->ping_opaque_data, opaque_data, AWS_H2_PING_DATA_SIZE); - frame->ack = true; - - ASSERT_SUCCESS(s_end_current_frame(fixture, AWS_H2_FRAME_T_PING, 0 /*stream_id*/)); - return AWS_OP_SUCCESS; -} - -static int s_decoder_on_goaway_begin( - uint32_t last_stream, - uint32_t error_code, - uint32_t debug_data_length, - void *userdata) { - - struct fixture *fixture = userdata; - struct frame *frame; - ASSERT_SUCCESS(s_begin_new_frame(fixture, AWS_H2_FRAME_T_GOAWAY, 0, &frame)); - - frame->goaway_last_stream_id = last_stream; - frame->error_code = error_code; - frame->goaway_debug_data_remaining = debug_data_length; - - return AWS_OP_SUCCESS; -} - -static int s_decoder_on_goaway_i(struct aws_byte_cursor debug_data, void *userdata) { - struct fixture *fixture = userdata; - struct frame *frame = s_latest_frame(fixture); - - /* Validate */ - ASSERT_INT_EQUALS(AWS_H2_FRAME_T_GOAWAY, frame->type); - ASSERT_FALSE(frame->finished); - ASSERT_TRUE(frame->goaway_debug_data_remaining >= debug_data.len); - - frame->goaway_debug_data_remaining -= (uint32_t)debug_data.len; - - /* Stash data */ - ASSERT_SUCCESS(aws_byte_buf_append_dynamic(&frame->data, &debug_data)); - - return AWS_OP_SUCCESS; -} - -static int s_decoder_on_goaway_end(void *userdata) { - struct fixture *fixture = userdata; - ASSERT_SUCCESS(s_end_current_frame(fixture, AWS_H2_FRAME_T_GOAWAY, 0)); - - struct frame *frame = s_latest_frame(fixture); - ASSERT_UINT_EQUALS(0, frame->goaway_debug_data_remaining); - - return AWS_OP_SUCCESS; -} - -static int s_decoder_on_window_update(uint32_t stream_id, uint32_t window_size_increment, void *userdata) { - struct fixture *fixture = userdata; - struct frame *frame; - ASSERT_SUCCESS(s_begin_new_frame(fixture, AWS_H2_FRAME_T_WINDOW_UPDATE, stream_id, &frame)); - - frame->window_size_increment = window_size_increment; - - ASSERT_SUCCESS(s_end_current_frame(fixture, AWS_H2_FRAME_T_WINDOW_UPDATE, stream_id)); - - return AWS_OP_SUCCESS; -} - -static struct aws_h2_decoder_vtable s_decoder_vtable = { - .on_headers_begin = s_decoder_on_headers_begin, - .on_headers_i = s_decoder_on_headers_i, - .on_headers_end = s_decoder_on_headers_end, - .on_push_promise_begin = s_decoder_on_push_promise_begin, - .on_push_promise_i = s_decoder_on_push_promise_i, - .on_push_promise_end = s_decoder_on_push_promise_end, - .on_data = s_decoder_on_data, - .on_end_stream = s_decoder_on_end_stream, - .on_rst_stream = s_decoder_on_rst_stream, - .on_settings_begin = s_decoder_on_settings_begin, - .on_settings_i = s_decoder_on_settings_i, - .on_settings_end = s_decoder_on_settings_end, - .on_settings_ack = s_decoder_on_settings_ack, - .on_ping = s_decoder_on_ping, - .on_ping_ack = s_decoder_on_ping_ack, - .on_goaway_begin = s_decoder_on_goaway_begin, - .on_goaway_i = s_decoder_on_goaway_i, - .on_goaway_end = s_decoder_on_goaway_end, - .on_window_update = s_decoder_on_window_update, -}; - -/************************** END DECODER CALLBACKS *****************************/ - /* Note that init() and clean_up() are called multiple times in "split tests", * which re-runs the test at each possible split point */ static int s_fixture_init(struct fixture *fixture, struct aws_allocator *allocator) { fixture->allocator = allocator; - ASSERT_SUCCESS(aws_array_list_init_dynamic(&fixture->frames, allocator, 2, sizeof(struct frame))); - struct aws_h2_decoder_params options = { + struct h2_decode_tester_options options = { .alloc = allocator, - .vtable = &s_decoder_vtable, - .userdata = fixture, .is_server = fixture->is_server, .skip_connection_preface = fixture->skip_connection_preface, }; - fixture->decoder = aws_h2_decoder_new(&options); - ASSERT_NOT_NULL(fixture->decoder); + ASSERT_SUCCESS(h2_decode_tester_init(&fixture->decode, &options)); + return AWS_OP_SUCCESS; } static void s_fixture_clean_up(struct fixture *fixture) { - for (size_t i = 0; i < aws_array_list_length(&fixture->frames); ++i) { - struct frame *frame; - aws_array_list_get_at_ptr(&fixture->frames, (void **)&frame, i); - s_frame_clean_up(frame); - } - aws_array_list_clean_up(&fixture->frames); - aws_h2_decoder_destroy(fixture->decoder); + h2_decode_tester_clean_up(&fixture->decode); } static int s_fixture_test_setup(struct aws_allocator *allocator, void *ctx) { @@ -519,7 +147,7 @@ static int s_decode_all(struct fixture *fixture, struct aws_byte_cursor input) { /* Decode input one byte at a time */ while (input.len) { struct aws_byte_cursor one_byte = aws_byte_cursor_advance(&input, 1); - if (aws_h2_decode(fixture->decoder, &one_byte)) { + if (aws_h2_decode(fixture->decode.decoder, &one_byte)) { return AWS_OP_ERR; } ASSERT_UINT_EQUALS(0, one_byte.len); @@ -534,19 +162,19 @@ static int s_decode_all(struct fixture *fixture, struct aws_byte_cursor input) { } struct aws_byte_cursor first_chunk = aws_byte_cursor_advance(&input, fixture->split_i); - if (aws_h2_decode(fixture->decoder, &first_chunk)) { + if (aws_h2_decode(fixture->decode.decoder, &first_chunk)) { return AWS_OP_ERR; } ASSERT_UINT_EQUALS(0, first_chunk.len); - if (aws_h2_decode(fixture->decoder, &input)) { + if (aws_h2_decode(fixture->decode.decoder, &input)) { return AWS_OP_ERR; } ASSERT_UINT_EQUALS(0, input.len); } else { /* Decode buffer all at once */ - if (aws_h2_decode(fixture->decoder, &input)) { + if (aws_h2_decode(fixture->decode.decoder, &input)) { return AWS_OP_ERR; } ASSERT_UINT_EQUALS(0, input.len); @@ -555,37 +183,6 @@ static int s_decode_all(struct fixture *fixture, struct aws_byte_cursor input) { return AWS_OP_SUCCESS; } -/* Compare data (which might be split across N frames) to expected string */ -static int s_check_data_across_frames( - struct fixture *fixture, - uint32_t stream_id, - const char *expected, - bool expect_end_stream) { - - struct aws_byte_buf data; - ASSERT_SUCCESS(aws_byte_buf_init(&data, fixture->allocator, 128)); - - bool found_end_stream = false; - - for (size_t frame_i = 0; frame_i < aws_array_list_length(&fixture->frames); ++frame_i) { - struct frame *frame; - aws_array_list_get_at_ptr(&fixture->frames, (void **)&frame, frame_i); - - if (frame->type == AWS_H2_FRAME_T_DATA && frame->stream_id == stream_id) { - struct aws_byte_cursor frame_data = aws_byte_cursor_from_buf(&frame->data); - ASSERT_SUCCESS(aws_byte_buf_append_dynamic(&data, &frame_data)); - - found_end_stream = frame->end_stream; - } - } - - ASSERT_BIN_ARRAYS_EQUALS(expected, strlen(expected), data.buffer, data.len); - ASSERT_UINT_EQUALS(expect_end_stream, found_end_stream); - - aws_byte_buf_clean_up(&data); - return AWS_OP_SUCCESS; -} - /* Test DATA frame */ H2_DECODER_TEST_CASE(h2_decoder_data) { (void)allocator; @@ -605,9 +202,10 @@ H2_DECODER_TEST_CASE(h2_decoder_data) { ASSERT_SUCCESS(s_decode_all(fixture, aws_byte_cursor_from_array(input, sizeof(input)))); /* Validate. */ - struct frame *frame = s_latest_frame(fixture); - ASSERT_SUCCESS(s_validate_finished_frame(frame, AWS_H2_FRAME_T_DATA, 0x76543210 /*stream_id*/)); - ASSERT_SUCCESS(s_check_data_across_frames(fixture, 0x76543210 /*stream_id*/, "hello", true /*end_stream*/)); + struct h2_decoded_frame *frame = h2_decode_tester_latest_frame(&fixture->decode); + ASSERT_SUCCESS(h2_decoded_frame_check_finished(frame, AWS_H2_FRAME_T_DATA, 0x76543210 /*stream_id*/)); + ASSERT_SUCCESS(h2_decode_tester_check_data_str_across_frames( + &fixture->decode, 0x76543210 /*stream_id*/, "hello", true /*end_stream*/)); return AWS_OP_SUCCESS; } @@ -632,9 +230,10 @@ H2_DECODER_TEST_CASE(h2_decoder_data_padded) { ASSERT_SUCCESS(s_decode_all(fixture, aws_byte_cursor_from_array(input, sizeof(input)))); /* Validate. */ - struct frame *frame = s_latest_frame(fixture); - ASSERT_SUCCESS(s_validate_finished_frame(frame, AWS_H2_FRAME_T_DATA, 0x76543210 /*stream_id*/)); - ASSERT_SUCCESS(s_check_data_across_frames(fixture, 0x76543210 /*stream_id*/, "hello", false /*end_stream*/)); + struct h2_decoded_frame *frame = h2_decode_tester_latest_frame(&fixture->decode); + ASSERT_SUCCESS(h2_decoded_frame_check_finished(frame, AWS_H2_FRAME_T_DATA, 0x76543210 /*stream_id*/)); + ASSERT_SUCCESS(h2_decode_tester_check_data_str_across_frames( + &fixture->decode, 0x76543210 /*stream_id*/, "hello", false /*end_stream*/)); return AWS_OP_SUCCESS; } @@ -659,9 +258,10 @@ H2_DECODER_TEST_CASE(h2_decoder_data_pad_length_zero) { ASSERT_SUCCESS(s_decode_all(fixture, aws_byte_cursor_from_array(input, sizeof(input)))); /* Validate. */ - struct frame *frame = s_latest_frame(fixture); - ASSERT_SUCCESS(s_validate_finished_frame(frame, AWS_H2_FRAME_T_DATA, 0x76543210 /*stream_id*/)); - ASSERT_SUCCESS(s_check_data_across_frames(fixture, 0x76543210 /*stream_id*/, "hello", true /*end_stream*/)); + struct h2_decoded_frame *frame = h2_decode_tester_latest_frame(&fixture->decode); + ASSERT_SUCCESS(h2_decoded_frame_check_finished(frame, AWS_H2_FRAME_T_DATA, 0x76543210 /*stream_id*/)); + ASSERT_SUCCESS(h2_decode_tester_check_data_str_across_frames( + &fixture->decode, 0x76543210 /*stream_id*/, "hello", true /*end_stream*/)); return AWS_OP_SUCCESS; } @@ -684,7 +284,8 @@ H2_DECODER_TEST_CASE(h2_decoder_data_empty) { ASSERT_SUCCESS(s_decode_all(fixture, aws_byte_cursor_from_array(input, sizeof(input)))); /* Validate. */ - ASSERT_SUCCESS(s_check_data_across_frames(fixture, 0x76543210 /*stream_id*/, "", false /*end_stream*/)); + ASSERT_SUCCESS(h2_decode_tester_check_data_str_across_frames( + &fixture->decode, 0x76543210 /*stream_id*/, "", false /*end_stream*/)); return AWS_OP_SUCCESS; } @@ -709,7 +310,8 @@ H2_DECODER_TEST_CASE(h2_decoder_data_empty_padded) { ASSERT_SUCCESS(s_decode_all(fixture, aws_byte_cursor_from_array(input, sizeof(input)))); /* Validate. */ - ASSERT_SUCCESS(s_check_data_across_frames(fixture, 0x76543210 /*stream_id*/, "", false /*end_stream*/)); + ASSERT_SUCCESS(h2_decode_tester_check_data_str_across_frames( + &fixture->decode, 0x76543210 /*stream_id*/, "", false /*end_stream*/)); return AWS_OP_SUCCESS; } @@ -735,9 +337,10 @@ H2_DECODER_TEST_CASE(h2_decoder_data_ignores_unknown_flags) { ASSERT_SUCCESS(s_decode_all(fixture, aws_byte_cursor_from_array(input, sizeof(input)))); /* Validate. */ - struct frame *frame = s_latest_frame(fixture); - ASSERT_SUCCESS(s_validate_finished_frame(frame, AWS_H2_FRAME_T_DATA, 0x76543210 /*stream_id*/)); - ASSERT_SUCCESS(s_check_data_across_frames(fixture, 0x76543210 /*stream_id*/, "hello", true /*end_stream*/)); + struct h2_decoded_frame *frame = h2_decode_tester_latest_frame(&fixture->decode); + ASSERT_SUCCESS(h2_decoded_frame_check_finished(frame, AWS_H2_FRAME_T_DATA, 0x76543210 /*stream_id*/)); + ASSERT_SUCCESS(h2_decode_tester_check_data_str_across_frames( + &fixture->decode, 0x76543210 /*stream_id*/, "hello", true /*end_stream*/)); return AWS_OP_SUCCESS; } @@ -805,26 +408,26 @@ H2_DECODER_TEST_CASE(h2_decoder_stream_id_ignores_reserved_bit) { ASSERT_SUCCESS(s_decode_all(fixture, aws_byte_cursor_from_array(input, sizeof(input)))); /* Validate. */ - struct frame *frame = s_latest_frame(fixture); - ASSERT_SUCCESS(s_validate_finished_frame(frame, AWS_H2_FRAME_T_DATA, 0x7FFFFFFF /*stream_id*/)); - ASSERT_SUCCESS(s_check_data_across_frames(fixture, 0x7FFFFFFF /*stream_id*/, "hello", true /*end_stream*/)); + struct h2_decoded_frame *frame = h2_decode_tester_latest_frame(&fixture->decode); + ASSERT_SUCCESS(h2_decoded_frame_check_finished(frame, AWS_H2_FRAME_T_DATA, 0x7FFFFFFF /*stream_id*/)); + ASSERT_SUCCESS(h2_decode_tester_check_data_str_across_frames( + &fixture->decode, 0x7FFFFFFF /*stream_id*/, "hello", true /*end_stream*/)); return AWS_OP_SUCCESS; } static int s_check_header( - struct frame *frame, + struct h2_decoded_frame *frame, size_t header_idx, const char *name, const char *value, enum aws_http_header_compression compression) { - ASSERT_TRUE(header_idx < aws_array_list_length(&frame->headers)); - struct aws_http_header *header_field; - aws_array_list_get_at_ptr(&frame->headers, (void **)&header_field, header_idx); + struct aws_http_header header_field; + ASSERT_SUCCESS(aws_http_headers_get_index(frame->headers, header_idx, &header_field)); - ASSERT_BIN_ARRAYS_EQUALS(name, strlen(name), header_field->name.ptr, header_field->name.len); - ASSERT_BIN_ARRAYS_EQUALS(value, strlen(value), header_field->value.ptr, header_field->value.len); - ASSERT_INT_EQUALS(compression, header_field->compression); + ASSERT_BIN_ARRAYS_EQUALS(name, strlen(name), header_field.name.ptr, header_field.name.len); + ASSERT_BIN_ARRAYS_EQUALS(value, strlen(value), header_field.value.ptr, header_field.value.len); + ASSERT_INT_EQUALS(compression, header_field.compression); return AWS_OP_SUCCESS; } @@ -849,9 +452,9 @@ H2_DECODER_TEST_CASE(h2_decoder_headers) { ASSERT_SUCCESS(s_decode_all(fixture, aws_byte_cursor_from_array(input, sizeof(input)))); /* Validate */ - struct frame *frame = s_latest_frame(fixture); - ASSERT_SUCCESS(s_validate_finished_frame(frame, AWS_H2_FRAME_T_HEADERS, 0x76543210 /*stream_id*/)); - ASSERT_UINT_EQUALS(1, aws_array_list_length(&frame->headers)); + struct h2_decoded_frame *frame = h2_decode_tester_latest_frame(&fixture->decode); + ASSERT_SUCCESS(h2_decoded_frame_check_finished(frame, AWS_H2_FRAME_T_HEADERS, 0x76543210 /*stream_id*/)); + ASSERT_UINT_EQUALS(1, aws_http_headers_count(frame->headers)); ASSERT_SUCCESS(s_check_header(frame, 0, ":status", "302", AWS_HTTP_HEADER_COMPRESSION_USE_CACHE)); ASSERT_TRUE(frame->end_stream); return AWS_OP_SUCCESS; @@ -879,9 +482,9 @@ H2_DECODER_TEST_CASE(h2_decoder_headers_padded) { ASSERT_SUCCESS(s_decode_all(fixture, aws_byte_cursor_from_array(input, sizeof(input)))); /* Validate */ - struct frame *frame = s_latest_frame(fixture); - ASSERT_SUCCESS(s_validate_finished_frame(frame, AWS_H2_FRAME_T_HEADERS, 0x76543210 /*stream_id*/)); - ASSERT_UINT_EQUALS(1, aws_array_list_length(&frame->headers)); + struct h2_decoded_frame *frame = h2_decode_tester_latest_frame(&fixture->decode); + ASSERT_SUCCESS(h2_decoded_frame_check_finished(frame, AWS_H2_FRAME_T_HEADERS, 0x76543210 /*stream_id*/)); + ASSERT_UINT_EQUALS(1, aws_http_headers_count(frame->headers)); ASSERT_SUCCESS(s_check_header(frame, 0, ":status", "302", AWS_HTTP_HEADER_COMPRESSION_USE_CACHE)); return AWS_OP_SUCCESS; } @@ -910,9 +513,9 @@ H2_DECODER_TEST_CASE(h2_decoder_headers_priority) { ASSERT_SUCCESS(s_decode_all(fixture, aws_byte_cursor_from_array(input, sizeof(input)))); /* Validate */ - struct frame *frame = s_latest_frame(fixture); - ASSERT_SUCCESS(s_validate_finished_frame(frame, AWS_H2_FRAME_T_HEADERS, 0x76543210 /*stream_id*/)); - ASSERT_UINT_EQUALS(1, aws_array_list_length(&frame->headers)); + struct h2_decoded_frame *frame = h2_decode_tester_latest_frame(&fixture->decode); + ASSERT_SUCCESS(h2_decoded_frame_check_finished(frame, AWS_H2_FRAME_T_HEADERS, 0x76543210 /*stream_id*/)); + ASSERT_UINT_EQUALS(1, aws_http_headers_count(frame->headers)); ASSERT_SUCCESS(s_check_header(frame, 0, ":status", "302", AWS_HTTP_HEADER_COMPRESSION_USE_CACHE)); return AWS_OP_SUCCESS; } @@ -942,9 +545,9 @@ H2_DECODER_TEST_CASE(h2_decoder_headers_ignores_unknown_flags) { ASSERT_SUCCESS(s_decode_all(fixture, aws_byte_cursor_from_array(input, sizeof(input)))); /* Validate */ - struct frame *frame = s_latest_frame(fixture); - ASSERT_SUCCESS(s_validate_finished_frame(frame, AWS_H2_FRAME_T_HEADERS, 0x76543210 /*stream_id*/)); - ASSERT_UINT_EQUALS(1, aws_array_list_length(&frame->headers)); + struct h2_decoded_frame *frame = h2_decode_tester_latest_frame(&fixture->decode); + ASSERT_SUCCESS(h2_decoded_frame_check_finished(frame, AWS_H2_FRAME_T_HEADERS, 0x76543210 /*stream_id*/)); + ASSERT_UINT_EQUALS(1, aws_http_headers_count(frame->headers)); ASSERT_SUCCESS(s_check_header(frame, 0, ":status", "302", AWS_HTTP_HEADER_COMPRESSION_USE_CACHE)); ASSERT_TRUE(frame->end_stream); return AWS_OP_SUCCESS; @@ -1055,9 +658,9 @@ H2_DECODER_TEST_CASE(h2_decoder_continuation) { ASSERT_SUCCESS(s_decode_all(fixture, aws_byte_cursor_from_array(input, sizeof(input)))); /* Validate */ - struct frame *frame = s_latest_frame(fixture); - ASSERT_SUCCESS(s_validate_finished_frame(frame, AWS_H2_FRAME_T_HEADERS, 0x76543210 /*stream_id*/)); - ASSERT_UINT_EQUALS(2, aws_array_list_length(&frame->headers)); + struct h2_decoded_frame *frame = h2_decode_tester_latest_frame(&fixture->decode); + ASSERT_SUCCESS(h2_decoded_frame_check_finished(frame, AWS_H2_FRAME_T_HEADERS, 0x76543210 /*stream_id*/)); + ASSERT_UINT_EQUALS(2, aws_http_headers_count(frame->headers)); ASSERT_SUCCESS(s_check_header(frame, 0, ":status", "302", AWS_HTTP_HEADER_COMPRESSION_USE_CACHE)); ASSERT_SUCCESS(s_check_header(frame, 1, "cache-control", "private", AWS_HTTP_HEADER_COMPRESSION_USE_CACHE)); ASSERT_TRUE(frame->end_stream); @@ -1095,9 +698,9 @@ H2_DECODER_TEST_CASE(h2_decoder_continuation_ignores_unknown_flags) { ASSERT_SUCCESS(s_decode_all(fixture, aws_byte_cursor_from_array(input, sizeof(input)))); /* Validate */ - struct frame *frame = s_latest_frame(fixture); - ASSERT_SUCCESS(s_validate_finished_frame(frame, AWS_H2_FRAME_T_HEADERS, 0x76543210 /*stream_id*/)); - ASSERT_UINT_EQUALS(2, aws_array_list_length(&frame->headers)); + struct h2_decoded_frame *frame = h2_decode_tester_latest_frame(&fixture->decode); + ASSERT_SUCCESS(h2_decoded_frame_check_finished(frame, AWS_H2_FRAME_T_HEADERS, 0x76543210 /*stream_id*/)); + ASSERT_UINT_EQUALS(2, aws_http_headers_count(frame->headers)); ASSERT_SUCCESS(s_check_header(frame, 0, ":status", "302", AWS_HTTP_HEADER_COMPRESSION_USE_CACHE)); ASSERT_SUCCESS(s_check_header(frame, 1, "cache-control", "private", AWS_HTTP_HEADER_COMPRESSION_USE_CACHE)); return AWS_OP_SUCCESS; @@ -1135,9 +738,9 @@ H2_DECODER_TEST_CASE(h2_decoder_continuation_header_field_spans_frames) { ASSERT_SUCCESS(s_decode_all(fixture, aws_byte_cursor_from_array(input, sizeof(input)))); /* Validate */ - struct frame *frame = s_latest_frame(fixture); - ASSERT_SUCCESS(s_validate_finished_frame(frame, AWS_H2_FRAME_T_HEADERS, 0x76543210 /*stream_id*/)); - ASSERT_UINT_EQUALS(1, aws_array_list_length(&frame->headers)); + struct h2_decoded_frame *frame = h2_decode_tester_latest_frame(&fixture->decode); + ASSERT_SUCCESS(h2_decoded_frame_check_finished(frame, AWS_H2_FRAME_T_HEADERS, 0x76543210 /*stream_id*/)); + ASSERT_UINT_EQUALS(1, aws_http_headers_count(frame->headers)); ASSERT_SUCCESS(s_check_header(frame, 0, ":status", "302", AWS_HTTP_HEADER_COMPRESSION_USE_CACHE)); ASSERT_FALSE(frame->end_stream); return AWS_OP_SUCCESS; @@ -1180,9 +783,9 @@ H2_DECODER_TEST_CASE(h2_decoder_continuation_many_frames) { ASSERT_SUCCESS(s_decode_all(fixture, aws_byte_cursor_from_array(input, sizeof(input)))); /* Validate */ - struct frame *frame = s_latest_frame(fixture); - ASSERT_SUCCESS(s_validate_finished_frame(frame, AWS_H2_FRAME_T_HEADERS, 0x76543210 /*stream_id*/)); - ASSERT_UINT_EQUALS(3, aws_array_list_length(&frame->headers)); + struct h2_decoded_frame *frame = h2_decode_tester_latest_frame(&fixture->decode); + ASSERT_SUCCESS(h2_decoded_frame_check_finished(frame, AWS_H2_FRAME_T_HEADERS, 0x76543210 /*stream_id*/)); + ASSERT_UINT_EQUALS(3, aws_http_headers_count(frame->headers)); ASSERT_SUCCESS(s_check_header(frame, 0, ":status", "302", AWS_HTTP_HEADER_COMPRESSION_USE_CACHE)); ASSERT_SUCCESS(s_check_header(frame, 1, "cache-control", "private", AWS_HTTP_HEADER_COMPRESSION_USE_CACHE)); ASSERT_SUCCESS(s_check_header(frame, 2, "hi", "mom", AWS_HTTP_HEADER_COMPRESSION_USE_CACHE)); @@ -1225,9 +828,9 @@ H2_DECODER_TEST_CASE(h2_decoder_continuation_empty_payloads) { ASSERT_SUCCESS(s_decode_all(fixture, aws_byte_cursor_from_array(input, sizeof(input)))); /* Validate */ - struct frame *frame = s_latest_frame(fixture); - ASSERT_SUCCESS(s_validate_finished_frame(frame, AWS_H2_FRAME_T_HEADERS, 0x76543210 /*stream_id*/)); - ASSERT_UINT_EQUALS(1, aws_array_list_length(&frame->headers)); + struct h2_decoded_frame *frame = h2_decode_tester_latest_frame(&fixture->decode); + ASSERT_SUCCESS(h2_decoded_frame_check_finished(frame, AWS_H2_FRAME_T_HEADERS, 0x76543210 /*stream_id*/)); + ASSERT_UINT_EQUALS(1, aws_http_headers_count(frame->headers)); ASSERT_SUCCESS(s_check_header(frame, 0, ":status", "302", AWS_HTTP_HEADER_COMPRESSION_USE_CACHE)); ASSERT_TRUE(frame->end_stream); return AWS_OP_SUCCESS; @@ -1368,7 +971,7 @@ H2_DECODER_TEST_CASE(h2_decoder_priority) { ASSERT_SUCCESS(s_decode_all(fixture, aws_byte_cursor_from_array(input, sizeof(input)))); /* Our implementation currently chooses to ignore PRIORITY frames, so no callbacks should have fired */ - ASSERT_UINT_EQUALS(0, aws_array_list_length(&fixture->frames)); + ASSERT_UINT_EQUALS(0, h2_decode_tester_frame_count(&fixture->decode)); return AWS_OP_SUCCESS; } @@ -1392,7 +995,7 @@ H2_DECODER_TEST_CASE(h2_decoder_priority_ignores_unknown_flags) { ASSERT_SUCCESS(s_decode_all(fixture, aws_byte_cursor_from_array(input, sizeof(input)))); /* Our implementation currently chooses to ignore PRIORITY frames, so no callbacks should have fired */ - ASSERT_UINT_EQUALS(0, aws_array_list_length(&fixture->frames)); + ASSERT_UINT_EQUALS(0, h2_decode_tester_frame_count(&fixture->decode)); return AWS_OP_SUCCESS; } @@ -1484,9 +1087,9 @@ H2_DECODER_TEST_CASE(h2_decoder_rst_stream) { ASSERT_SUCCESS(s_decode_all(fixture, aws_byte_cursor_from_array(input, sizeof(input)))); /* Validate */ - ASSERT_UINT_EQUALS(1, aws_array_list_length(&fixture->frames)); - struct frame *frame = s_latest_frame(fixture); - ASSERT_SUCCESS(s_validate_finished_frame(frame, AWS_H2_FRAME_T_RST_STREAM, 0x76543210 /*stream_id*/)); + ASSERT_UINT_EQUALS(1, h2_decode_tester_frame_count(&fixture->decode)); + struct h2_decoded_frame *frame = h2_decode_tester_latest_frame(&fixture->decode); + ASSERT_SUCCESS(h2_decoded_frame_check_finished(frame, AWS_H2_FRAME_T_RST_STREAM, 0x76543210 /*stream_id*/)); ASSERT_UINT_EQUALS(0xFFEEDDCC, frame->error_code); return AWS_OP_SUCCESS; } @@ -1510,9 +1113,9 @@ H2_DECODER_TEST_CASE(h2_decoder_rst_stream_ignores_unknown_flags) { ASSERT_SUCCESS(s_decode_all(fixture, aws_byte_cursor_from_array(input, sizeof(input)))); /* Validate */ - ASSERT_UINT_EQUALS(1, aws_array_list_length(&fixture->frames)); - struct frame *frame = s_latest_frame(fixture); - ASSERT_SUCCESS(s_validate_finished_frame(frame, AWS_H2_FRAME_T_RST_STREAM, 0x76543210 /*stream_id*/)); + ASSERT_UINT_EQUALS(1, h2_decode_tester_frame_count(&fixture->decode)); + struct h2_decoded_frame *frame = h2_decode_tester_latest_frame(&fixture->decode); + ASSERT_SUCCESS(h2_decoded_frame_check_finished(frame, AWS_H2_FRAME_T_RST_STREAM, 0x76543210 /*stream_id*/)); ASSERT_UINT_EQUALS(0xFFEEDDCC, frame->error_code); return AWS_OP_SUCCESS; } @@ -1605,9 +1208,9 @@ H2_DECODER_TEST_CASE(h2_decoder_settings) { ASSERT_SUCCESS(s_decode_all(fixture, aws_byte_cursor_from_array(input, sizeof(input)))); /* Validate. */ - ASSERT_UINT_EQUALS(1, aws_array_list_length(&fixture->frames)); - struct frame *frame = s_latest_frame(fixture); - ASSERT_SUCCESS(s_validate_finished_frame(frame, AWS_H2_FRAME_T_SETTINGS, 0 /*stream_id*/)); + ASSERT_UINT_EQUALS(1, h2_decode_tester_frame_count(&fixture->decode)); + struct h2_decoded_frame *frame = h2_decode_tester_latest_frame(&fixture->decode); + ASSERT_SUCCESS(h2_decoded_frame_check_finished(frame, AWS_H2_FRAME_T_SETTINGS, 0 /*stream_id*/)); ASSERT_FALSE(frame->ack); ASSERT_UINT_EQUALS(2, aws_array_list_length(&frame->settings)); @@ -1641,9 +1244,9 @@ H2_DECODER_TEST_CASE(h2_decoder_settings_empty) { ASSERT_SUCCESS(s_decode_all(fixture, aws_byte_cursor_from_array(input, sizeof(input)))); /* Validate. */ - ASSERT_UINT_EQUALS(1, aws_array_list_length(&fixture->frames)); - struct frame *frame = s_latest_frame(fixture); - ASSERT_SUCCESS(s_validate_finished_frame(frame, AWS_H2_FRAME_T_SETTINGS, 0 /*stream_id*/)); + ASSERT_UINT_EQUALS(1, h2_decode_tester_frame_count(&fixture->decode)); + struct h2_decoded_frame *frame = h2_decode_tester_latest_frame(&fixture->decode); + ASSERT_SUCCESS(h2_decoded_frame_check_finished(frame, AWS_H2_FRAME_T_SETTINGS, 0 /*stream_id*/)); ASSERT_FALSE(frame->ack); ASSERT_UINT_EQUALS(0, aws_array_list_length(&frame->settings)); @@ -1668,9 +1271,9 @@ H2_DECODER_TEST_CASE(h2_decoder_settings_ack) { ASSERT_SUCCESS(s_decode_all(fixture, aws_byte_cursor_from_array(input, sizeof(input)))); /* Validate. */ - ASSERT_UINT_EQUALS(1, aws_array_list_length(&fixture->frames)); - struct frame *frame = s_latest_frame(fixture); - ASSERT_SUCCESS(s_validate_finished_frame(frame, AWS_H2_FRAME_T_SETTINGS, 0 /*stream_id*/)); + ASSERT_UINT_EQUALS(1, h2_decode_tester_frame_count(&fixture->decode)); + struct h2_decoded_frame *frame = h2_decode_tester_latest_frame(&fixture->decode); + ASSERT_SUCCESS(h2_decoded_frame_check_finished(frame, AWS_H2_FRAME_T_SETTINGS, 0 /*stream_id*/)); ASSERT_TRUE(frame->ack); ASSERT_UINT_EQUALS(0, aws_array_list_length(&frame->settings)); @@ -1701,9 +1304,9 @@ H2_DECODER_TEST_CASE(h2_decoder_settings_ignores_unknown_ids) { ASSERT_SUCCESS(s_decode_all(fixture, aws_byte_cursor_from_array(input, sizeof(input)))); /* Validate. */ - ASSERT_UINT_EQUALS(1, aws_array_list_length(&fixture->frames)); - struct frame *frame = s_latest_frame(fixture); - ASSERT_SUCCESS(s_validate_finished_frame(frame, AWS_H2_FRAME_T_SETTINGS, 0 /*stream_id*/)); + ASSERT_UINT_EQUALS(1, h2_decode_tester_frame_count(&fixture->decode)); + struct h2_decoded_frame *frame = h2_decode_tester_latest_frame(&fixture->decode); + ASSERT_SUCCESS(h2_decoded_frame_check_finished(frame, AWS_H2_FRAME_T_SETTINGS, 0 /*stream_id*/)); ASSERT_FALSE(frame->ack); ASSERT_UINT_EQUALS(1, aws_array_list_length(&frame->settings)); @@ -1734,9 +1337,9 @@ H2_DECODER_TEST_CASE(h2_decoder_settings_ignores_unknown_flags) { ASSERT_SUCCESS(s_decode_all(fixture, aws_byte_cursor_from_array(input, sizeof(input)))); /* Validate. */ - ASSERT_UINT_EQUALS(1, aws_array_list_length(&fixture->frames)); - struct frame *frame = s_latest_frame(fixture); - ASSERT_SUCCESS(s_validate_finished_frame(frame, AWS_H2_FRAME_T_SETTINGS, 0 /*stream_id*/)); + ASSERT_UINT_EQUALS(1, h2_decode_tester_frame_count(&fixture->decode)); + struct h2_decoded_frame *frame = h2_decode_tester_latest_frame(&fixture->decode); + ASSERT_SUCCESS(h2_decoded_frame_check_finished(frame, AWS_H2_FRAME_T_SETTINGS, 0 /*stream_id*/)); ASSERT_TRUE(frame->ack); ASSERT_UINT_EQUALS(0, aws_array_list_length(&frame->settings)); @@ -1837,10 +1440,10 @@ H2_DECODER_TEST_CASE(h2_decoder_push_promise) { ASSERT_SUCCESS(s_decode_all(fixture, aws_byte_cursor_from_array(input, sizeof(input)))); /* Validate */ - struct frame *frame = s_latest_frame(fixture); - ASSERT_SUCCESS(s_validate_finished_frame(frame, AWS_H2_FRAME_T_PUSH_PROMISE, 0x1 /*stream_id*/)); + struct h2_decoded_frame *frame = h2_decode_tester_latest_frame(&fixture->decode); + ASSERT_SUCCESS(h2_decoded_frame_check_finished(frame, AWS_H2_FRAME_T_PUSH_PROMISE, 0x1 /*stream_id*/)); ASSERT_UINT_EQUALS(2, frame->promised_stream_id); - ASSERT_UINT_EQUALS(3, aws_array_list_length(&frame->headers)); + ASSERT_UINT_EQUALS(3, aws_http_headers_count(frame->headers)); ASSERT_SUCCESS(s_check_header(frame, 0, ":method", "GET", AWS_HTTP_HEADER_COMPRESSION_USE_CACHE)); ASSERT_SUCCESS(s_check_header(frame, 1, ":scheme", "https", AWS_HTTP_HEADER_COMPRESSION_USE_CACHE)); ASSERT_SUCCESS(s_check_header(frame, 2, ":path", "/index.html", AWS_HTTP_HEADER_COMPRESSION_USE_CACHE)); @@ -1874,10 +1477,10 @@ H2_DECODER_TEST_CASE(h2_decoder_push_promise_ignores_unknown_flags) { ASSERT_SUCCESS(s_decode_all(fixture, aws_byte_cursor_from_array(input, sizeof(input)))); /* Validate */ - struct frame *frame = s_latest_frame(fixture); - ASSERT_SUCCESS(s_validate_finished_frame(frame, AWS_H2_FRAME_T_PUSH_PROMISE, 0x1 /*stream_id*/)); + struct h2_decoded_frame *frame = h2_decode_tester_latest_frame(&fixture->decode); + ASSERT_SUCCESS(h2_decoded_frame_check_finished(frame, AWS_H2_FRAME_T_PUSH_PROMISE, 0x1 /*stream_id*/)); ASSERT_UINT_EQUALS(2, frame->promised_stream_id); - ASSERT_UINT_EQUALS(3, aws_array_list_length(&frame->headers)); + ASSERT_UINT_EQUALS(3, aws_http_headers_count(frame->headers)); ASSERT_SUCCESS(s_check_header(frame, 0, ":method", "GET", AWS_HTTP_HEADER_COMPRESSION_USE_CACHE)); ASSERT_SUCCESS(s_check_header(frame, 1, ":scheme", "https", AWS_HTTP_HEADER_COMPRESSION_USE_CACHE)); ASSERT_SUCCESS(s_check_header(frame, 2, ":path", "/index.html", AWS_HTTP_HEADER_COMPRESSION_USE_CACHE)); @@ -1925,10 +1528,10 @@ H2_DECODER_TEST_CASE(h2_decoder_push_promise_continuation) { ASSERT_SUCCESS(s_decode_all(fixture, aws_byte_cursor_from_array(input, sizeof(input)))); /* Validate */ - struct frame *frame = s_latest_frame(fixture); - ASSERT_SUCCESS(s_validate_finished_frame(frame, AWS_H2_FRAME_T_PUSH_PROMISE, 0x1 /*stream_id*/)); + struct h2_decoded_frame *frame = h2_decode_tester_latest_frame(&fixture->decode); + ASSERT_SUCCESS(h2_decoded_frame_check_finished(frame, AWS_H2_FRAME_T_PUSH_PROMISE, 0x1 /*stream_id*/)); ASSERT_UINT_EQUALS(2, frame->promised_stream_id); - ASSERT_UINT_EQUALS(3, aws_array_list_length(&frame->headers)); + ASSERT_UINT_EQUALS(3, aws_http_headers_count(frame->headers)); ASSERT_SUCCESS(s_check_header(frame, 0, ":method", "GET", AWS_HTTP_HEADER_COMPRESSION_USE_CACHE)); ASSERT_SUCCESS(s_check_header(frame, 1, ":scheme", "https", AWS_HTTP_HEADER_COMPRESSION_USE_CACHE)); ASSERT_SUCCESS(s_check_header(frame, 2, ":path", "/index.html", AWS_HTTP_HEADER_COMPRESSION_USE_CACHE)); @@ -2042,8 +1645,8 @@ H2_DECODER_TEST_CASE(h2_decoder_ping) { ASSERT_SUCCESS(s_decode_all(fixture, aws_byte_cursor_from_array(input, sizeof(input)))); /* Validate. */ - struct frame *frame = s_latest_frame(fixture); - ASSERT_SUCCESS(s_validate_finished_frame(frame, AWS_H2_FRAME_T_PING, 0x0 /*stream_id*/)); + struct h2_decoded_frame *frame = h2_decode_tester_latest_frame(&fixture->decode); + ASSERT_SUCCESS(h2_decoded_frame_check_finished(frame, AWS_H2_FRAME_T_PING, 0x0 /*stream_id*/)); ASSERT_BIN_ARRAYS_EQUALS("pingpong", AWS_H2_PING_DATA_SIZE, frame->ping_opaque_data, AWS_H2_PING_DATA_SIZE); ASSERT_FALSE(frame->ack); return AWS_OP_SUCCESS; @@ -2068,8 +1671,8 @@ H2_DECODER_TEST_CASE(h2_decoder_ping_ack) { ASSERT_SUCCESS(s_decode_all(fixture, aws_byte_cursor_from_array(input, sizeof(input)))); /* Validate. */ - struct frame *frame = s_latest_frame(fixture); - ASSERT_SUCCESS(s_validate_finished_frame(frame, AWS_H2_FRAME_T_PING, 0x0 /*stream_id*/)); + struct h2_decoded_frame *frame = h2_decode_tester_latest_frame(&fixture->decode); + ASSERT_SUCCESS(h2_decoded_frame_check_finished(frame, AWS_H2_FRAME_T_PING, 0x0 /*stream_id*/)); ASSERT_BIN_ARRAYS_EQUALS("pingpong", AWS_H2_PING_DATA_SIZE, frame->ping_opaque_data, AWS_H2_PING_DATA_SIZE); ASSERT_TRUE(frame->ack); return AWS_OP_SUCCESS; @@ -2161,8 +1764,8 @@ H2_DECODER_TEST_CASE(h2_decoder_goaway) { ASSERT_SUCCESS(s_decode_all(fixture, aws_byte_cursor_from_array(input, sizeof(input)))); /* Validate. */ - struct frame *frame = s_latest_frame(fixture); - ASSERT_SUCCESS(s_validate_finished_frame(frame, AWS_H2_FRAME_T_GOAWAY, 0x0 /*stream_id*/)); + struct h2_decoded_frame *frame = h2_decode_tester_latest_frame(&fixture->decode); + ASSERT_SUCCESS(h2_decoded_frame_check_finished(frame, AWS_H2_FRAME_T_GOAWAY, 0x0 /*stream_id*/)); ASSERT_UINT_EQUALS(0x7F000001, frame->goaway_last_stream_id); ASSERT_UINT_EQUALS(0xFEEDBEEF, frame->error_code); ASSERT_BIN_ARRAYS_EQUALS("bye", 3, frame->data.buffer, frame->data.len); @@ -2191,8 +1794,8 @@ H2_DECODER_TEST_CASE(h2_decoder_goaway_empty) { ASSERT_SUCCESS(s_decode_all(fixture, aws_byte_cursor_from_array(input, sizeof(input)))); /* Validate. */ - struct frame *frame = s_latest_frame(fixture); - ASSERT_SUCCESS(s_validate_finished_frame(frame, AWS_H2_FRAME_T_GOAWAY, 0x0 /*stream_id*/)); + struct h2_decoded_frame *frame = h2_decode_tester_latest_frame(&fixture->decode); + ASSERT_SUCCESS(h2_decoded_frame_check_finished(frame, AWS_H2_FRAME_T_GOAWAY, 0x0 /*stream_id*/)); ASSERT_UINT_EQUALS(0x7F000001, frame->goaway_last_stream_id); ASSERT_UINT_EQUALS(0xFEEDBEEF, frame->error_code); ASSERT_BIN_ARRAYS_EQUALS("", 0, frame->data.buffer, frame->data.len); @@ -2265,8 +1868,8 @@ H2_DECODER_TEST_CASE(h2_decoder_window_update_connection) { ASSERT_SUCCESS(s_decode_all(fixture, aws_byte_cursor_from_array(input, sizeof(input)))); /* Validate. */ - struct frame *frame = s_latest_frame(fixture); - ASSERT_SUCCESS(s_validate_finished_frame(frame, AWS_H2_FRAME_T_WINDOW_UPDATE, 0x0 /*stream_id*/)); + struct h2_decoded_frame *frame = h2_decode_tester_latest_frame(&fixture->decode); + ASSERT_SUCCESS(h2_decoded_frame_check_finished(frame, AWS_H2_FRAME_T_WINDOW_UPDATE, 0x0 /*stream_id*/)); ASSERT_UINT_EQUALS(0x7F000001, frame->window_size_increment); return AWS_OP_SUCCESS; @@ -2292,8 +1895,8 @@ H2_DECODER_TEST_CASE(h2_decoder_window_update_stream) { ASSERT_SUCCESS(s_decode_all(fixture, aws_byte_cursor_from_array(input, sizeof(input)))); /* Validate. */ - struct frame *frame = s_latest_frame(fixture); - ASSERT_SUCCESS(s_validate_finished_frame(frame, AWS_H2_FRAME_T_WINDOW_UPDATE, 0x1 /*stream_id*/)); + struct h2_decoded_frame *frame = h2_decode_tester_latest_frame(&fixture->decode); + ASSERT_SUCCESS(h2_decoded_frame_check_finished(frame, AWS_H2_FRAME_T_WINDOW_UPDATE, 0x1 /*stream_id*/)); ASSERT_UINT_EQUALS(0x7F000001, frame->window_size_increment); return AWS_OP_SUCCESS; @@ -2377,7 +1980,7 @@ H2_DECODER_TEST_CASE(h2_decoder_unknown_frame_type_ignored) { ASSERT_SUCCESS(s_decode_all(fixture, aws_byte_cursor_from_array(input, sizeof(input)))); /* No callbacks should have fired about any of these frames */ - ASSERT_UINT_EQUALS(0, aws_array_list_length(&fixture->frames)); + ASSERT_UINT_EQUALS(0, h2_decode_tester_frame_count(&fixture->decode)); return AWS_OP_SUCCESS; } @@ -2386,11 +1989,11 @@ static int s_get_finished_frame_i( size_t i, enum aws_h2_frame_type type, uint32_t stream_id, - struct frame **out_frame) { + struct h2_decoded_frame **out_frame) { - ASSERT_TRUE(i < aws_array_list_length(&fixture->frames)); - aws_array_list_get_at_ptr(&fixture->frames, (void **)out_frame, i); - ASSERT_SUCCESS(s_validate_finished_frame(*out_frame, type, stream_id)); + ASSERT_TRUE(i < h2_decode_tester_frame_count(&fixture->decode)); + *out_frame = h2_decode_tester_get_frame(&fixture->decode, i); + ASSERT_SUCCESS(h2_decoded_frame_check_finished(*out_frame, type, stream_id)); return AWS_OP_SUCCESS; } @@ -2506,11 +2109,11 @@ H2_DECODER_TEST_CASE(h2_decoder_many_frames_in_a_row) { ASSERT_SUCCESS(s_decode_all(fixture, aws_byte_cursor_from_array(input, sizeof(input)))); size_t frame_i = 0; - struct frame *frame; + struct h2_decoded_frame *frame; /* Validate HEADERS (and its CONTINUATION) */ ASSERT_SUCCESS(s_get_finished_frame_i(fixture, frame_i++, AWS_H2_FRAME_T_HEADERS, 0x1 /*stream-id*/, &frame)); - ASSERT_UINT_EQUALS(2, aws_array_list_length(&frame->headers)); + ASSERT_UINT_EQUALS(2, aws_http_headers_count(frame->headers)); ASSERT_SUCCESS(s_check_header(frame, 0, ":status", "302", AWS_HTTP_HEADER_COMPRESSION_USE_CACHE)); ASSERT_SUCCESS(s_check_header(frame, 1, "cache-control", "private", AWS_HTTP_HEADER_COMPRESSION_USE_CACHE)); ASSERT_FALSE(frame->end_stream); @@ -2522,7 +2125,7 @@ H2_DECODER_TEST_CASE(h2_decoder_many_frames_in_a_row) { /* Validate PUSH_PROMISE (and its CONTINUATION) */ ASSERT_SUCCESS(s_get_finished_frame_i(fixture, frame_i++, AWS_H2_FRAME_T_PUSH_PROMISE, 0x1 /*stream-id*/, &frame)); ASSERT_UINT_EQUALS(2, frame->promised_stream_id); - ASSERT_UINT_EQUALS(3, aws_array_list_length(&frame->headers)); + ASSERT_UINT_EQUALS(3, aws_http_headers_count(frame->headers)); ASSERT_SUCCESS(s_check_header(frame, 0, ":method", "GET", AWS_HTTP_HEADER_COMPRESSION_USE_CACHE)); ASSERT_SUCCESS(s_check_header(frame, 1, ":scheme", "https", AWS_HTTP_HEADER_COMPRESSION_USE_CACHE)); ASSERT_SUCCESS(s_check_header(frame, 2, ":path", "/index.html", AWS_HTTP_HEADER_COMPRESSION_USE_CACHE)); @@ -2556,7 +2159,7 @@ H2_DECODER_TEST_CASE(h2_decoder_many_frames_in_a_row) { ASSERT_TRUE(frame->ack); /* Ensure no further frames reported */ - ASSERT_UINT_EQUALS(frame_i, aws_array_list_length(&fixture->frames)); + ASSERT_UINT_EQUALS(frame_i, h2_decode_tester_frame_count(&fixture->decode)); return AWS_OP_SUCCESS; } @@ -2588,9 +2191,9 @@ H2_DECODER_ON_CLIENT_PREFACE_TEST(h2_decoder_preface_from_server) { ASSERT_SUCCESS(s_decode_all(fixture, aws_byte_cursor_from_array(input, sizeof(input)))); /* Validate */ - ASSERT_UINT_EQUALS(2, aws_array_list_length(&fixture->frames)); + ASSERT_UINT_EQUALS(2, h2_decode_tester_frame_count(&fixture->decode)); - struct frame *frame; + struct h2_decoded_frame *frame; ASSERT_SUCCESS(s_get_finished_frame_i(fixture, 0, AWS_H2_FRAME_T_SETTINGS, 0 /*stream-id*/, &frame)); ASSERT_SUCCESS(s_get_finished_frame_i(fixture, 1, AWS_H2_FRAME_T_PING, 0 /*stream-id*/, &frame)); @@ -2686,9 +2289,9 @@ H2_DECODER_ON_SERVER_PREFACE_TEST(h2_decoder_preface_from_client) { ASSERT_SUCCESS(s_decode_all(fixture, aws_byte_cursor_from_array(input, sizeof(input)))); /* Validate */ - ASSERT_UINT_EQUALS(2, aws_array_list_length(&fixture->frames)); + ASSERT_UINT_EQUALS(2, h2_decode_tester_frame_count(&fixture->decode)); - struct frame *frame; + struct h2_decoded_frame *frame; ASSERT_SUCCESS(s_get_finished_frame_i(fixture, 0, AWS_H2_FRAME_T_SETTINGS, 0 /*stream-id*/, &frame)); ASSERT_SUCCESS(s_get_finished_frame_i(fixture, 1, AWS_H2_FRAME_T_PING, 0 /*stream-id*/, &frame)); From 3bbc224759616ba9abb18e70f7ab26485fbe7039 Mon Sep 17 00:00:00 2001 From: Dengke Tang Date: Sat, 21 Mar 2020 00:57:52 -0700 Subject: [PATCH 22/35] merge with master --- include/aws/http/private/h2_frames.h | 10 +- include/aws/http/request_response.h | 7 - source/h2_frames.c | 320 +-------------------------- source/hpack.c | 28 --- source/request_response.c | 12 - tests/test_h2_encoder.c | 4 - tests/test_h2_headers.c | 4 - 7 files changed, 5 insertions(+), 380 deletions(-) diff --git a/include/aws/http/private/h2_frames.h b/include/aws/http/private/h2_frames.h index dcfa87ec0..33f9e63e8 100644 --- a/include/aws/http/private/h2_frames.h +++ b/include/aws/http/private/h2_frames.h @@ -112,15 +112,13 @@ struct aws_h2_frame_priority_settings { struct aws_h2_frame { const struct aws_h2_frame_vtable *vtable; struct aws_allocator *alloc; -<<<<<<< HEAD - enum aws_h2_frame_type type; - uint32_t stream_id; - struct aws_linked_list_node node; -======= struct aws_linked_list_node node; enum aws_h2_frame_type type; uint32_t stream_id; ->>>>>>> 64aa5fbc363f8a6c99abe4278720ff15fbe2f957 + + /* If true, frame will be sent before those with normal priority. + * Useful for frames like PING ACK where low latency is important. */ + bool high_priority; }; /* A h2 setting and its value, used in SETTINGS frame */ diff --git a/include/aws/http/request_response.h b/include/aws/http/request_response.h index 6d0d3315d..aeada382d 100644 --- a/include/aws/http/request_response.h +++ b/include/aws/http/request_response.h @@ -362,18 +362,11 @@ void aws_http_headers_release(struct aws_http_headers *headers); * The underlying strings are copied. */ AWS_HTTP_API -<<<<<<< HEAD -int aws_http_headers_add_v2(struct aws_http_headers *headers, const struct aws_http_header *header); - -/** - * Deprecated. Use aws_http_headers_add_v2(). -======= int aws_http_headers_add_header(struct aws_http_headers *headers, const struct aws_http_header *header); /** * Add a header. * The underlying strings are copied. ->>>>>>> 64aa5fbc363f8a6c99abe4278720ff15fbe2f957 */ AWS_HTTP_API int aws_http_headers_add(struct aws_http_headers *headers, struct aws_byte_cursor name, struct aws_byte_cursor value); diff --git a/source/h2_frames.c b/source/h2_frames.c index 146608119..63f454296 100644 --- a/source/h2_frames.c +++ b/source/h2_frames.c @@ -163,18 +163,6 @@ static void s_frame_priority_settings_encode( * | Weight (8) | * +-+-------------+ */ -<<<<<<< HEAD - bool all_wrote = true; - - /* Write the top 4 bytes */ - uint32_t top_bytes = priority->stream_dependency | ((uint32_t)priority->stream_dependency_exclusive << 31); - all_wrote &= aws_byte_buf_write_be32(output, top_bytes); - - /* Write the priority weight */ - all_wrote &= aws_byte_buf_write_u8(output, priority->weight); - - AWS_ASSERT(all_wrote); -======= bool writes_ok = true; /* Write the top 4 bytes */ @@ -185,7 +173,6 @@ static void s_frame_priority_settings_encode( writes_ok &= aws_byte_buf_write_u8(output, priority->weight); AWS_ASSERT(writes_ok); ->>>>>>> 64aa5fbc363f8a6c99abe4278720ff15fbe2f957 } /*********************************************************************************************************************** @@ -223,23 +210,6 @@ static void s_frame_prefix_encode( * |R| Stream Identifier (31) | * +=+=============================================================+ */ -<<<<<<< HEAD - bool all_wrote = true; - - /* Write length */ - all_wrote &= aws_byte_buf_write_be24(output, (uint32_t)length); - - /* Write type */ - all_wrote &= aws_byte_buf_write_u8(output, type); - - /* Write flags */ - all_wrote &= aws_byte_buf_write_u8(output, flags); - - /* Write stream id (with reserved first bit) */ - all_wrote &= aws_byte_buf_write_be32(output, stream_id & s_31_bit_mask); - - AWS_ASSERT(all_wrote); -======= bool writes_ok = true; /* Write length */ @@ -255,7 +225,6 @@ static void s_frame_prefix_encode( writes_ok &= aws_byte_buf_write_be32(output, stream_id); AWS_ASSERT(writes_ok); ->>>>>>> 64aa5fbc363f8a6c99abe4278720ff15fbe2f957 } /*********************************************************************************************************************** @@ -364,12 +333,6 @@ int aws_h2_encode_data_frame( } } -<<<<<<< HEAD - /* - * Write in the other parts of the frame. - */ - bool all_wrote = true; -======= ENCODER_LOGF( TRACE, encoder, @@ -382,7 +345,6 @@ int aws_h2_encode_data_frame( * Write in the other parts of the frame. */ bool writes_ok = true; ->>>>>>> 64aa5fbc363f8a6c99abe4278720ff15fbe2f957 /* Write the frame prefix */ const size_t payload_len = body_sub_buf.len + payload_overhead; @@ -390,11 +352,7 @@ int aws_h2_encode_data_frame( /* Write pad length */ if (flags & AWS_H2_FRAME_F_PADDED) { -<<<<<<< HEAD - all_wrote &= aws_byte_buf_write_u8(output, pad_length); -======= writes_ok &= aws_byte_buf_write_u8(output, pad_length); ->>>>>>> 64aa5fbc363f8a6c99abe4278720ff15fbe2f957 } /* Increment output->len to jump over the body that we already wrote in */ @@ -403,17 +361,10 @@ int aws_h2_encode_data_frame( /* Write padding */ if (flags & AWS_H2_FRAME_F_PADDED) { -<<<<<<< HEAD - all_wrote &= aws_byte_buf_write_u8_n(output, 0, pad_length); - } - - AWS_ASSERT(all_wrote); -======= writes_ok &= aws_byte_buf_write_u8_n(output, 0, pad_length); } AWS_ASSERT(writes_ok); ->>>>>>> 64aa5fbc363f8a6c99abe4278720ff15fbe2f957 return AWS_OP_SUCCESS; handle_waiting_for_more_space: @@ -612,43 +563,6 @@ void s_encode_single_header_block_frame( promised_stream_id = &frame->promised_stream_id; payload_overhead += 4; } -<<<<<<< HEAD - - } else /* CONTINUATION */ { - frame_type = AWS_H2_FRAME_T_CONTINUATION; - } - - /* - * Figure out what size header-block fragment should go in this frame. - */ - - size_t max_payload; - if (s_get_max_contiguous_payload_length(encoder, output, &max_payload)) { - goto handle_waiting_for_more_space; - } - - size_t max_fragment; - if (aws_sub_size_checked(max_payload, payload_overhead, &max_fragment)) { - goto handle_waiting_for_more_space; - } - - const size_t fragment_len = aws_min_size(max_fragment, frame->header_block_cursor.len); - if (fragment_len == frame->header_block_cursor.len) { - /* This will finish the header-block */ - flags |= AWS_H2_FRAME_F_END_HEADERS; - } else { - /* If we're not finishing the header-block, is it even worth trying to send this frame now? */ - const size_t even_worth_sending_threshold = s_frame_prefix_length + payload_overhead; - if (fragment_len < even_worth_sending_threshold) { - goto handle_waiting_for_more_space; - } - } - - /* - * Ok, it fits! Write the frame - */ - bool all_wrote = true; -======= } else /* CONTINUATION */ { frame_type = AWS_H2_FRAME_T_CONTINUATION; @@ -693,7 +607,6 @@ void s_encode_single_header_block_frame( (flags & AWS_H2_FRAME_F_END_STREAM) ? " END_STREAM" : ""); bool writes_ok = true; ->>>>>>> 64aa5fbc363f8a6c99abe4278720ff15fbe2f957 /* Write the frame prefix */ const size_t payload_len = fragment_len + payload_overhead; @@ -702,11 +615,7 @@ void s_encode_single_header_block_frame( /* Write pad length */ if (flags & AWS_H2_FRAME_F_PADDED) { AWS_ASSERT(frame_type != AWS_H2_FRAME_T_CONTINUATION); -<<<<<<< HEAD - all_wrote &= aws_byte_buf_write_u8(output, pad_length); -======= writes_ok &= aws_byte_buf_write_u8(output, pad_length); ->>>>>>> 64aa5fbc363f8a6c99abe4278720ff15fbe2f957 } /* Write priority */ @@ -718,36 +627,21 @@ void s_encode_single_header_block_frame( /* Write promised stream ID */ if (promised_stream_id) { AWS_ASSERT(frame_type == AWS_H2_FRAME_T_PUSH_PROMISE); -<<<<<<< HEAD - all_wrote &= aws_byte_buf_write_be32(output, *promised_stream_id & s_31_bit_mask); -======= writes_ok &= aws_byte_buf_write_be32(output, *promised_stream_id); ->>>>>>> 64aa5fbc363f8a6c99abe4278720ff15fbe2f957 } /* Write header-block fragment */ if (fragment_len > 0) { struct aws_byte_cursor fragment = aws_byte_cursor_advance(&frame->header_block_cursor, fragment_len); -<<<<<<< HEAD - all_wrote &= aws_byte_buf_write_from_whole_cursor(output, fragment); -======= writes_ok &= aws_byte_buf_write_from_whole_cursor(output, fragment); ->>>>>>> 64aa5fbc363f8a6c99abe4278720ff15fbe2f957 } /* Write padding */ if (flags & AWS_H2_FRAME_F_PADDED) { -<<<<<<< HEAD - all_wrote &= aws_byte_buf_write_u8_n(output, 0, pad_length); - } - - AWS_ASSERT(all_wrote); -======= writes_ok &= aws_byte_buf_write_u8_n(output, 0, pad_length); } AWS_ASSERT(writes_ok); ->>>>>>> 64aa5fbc363f8a6c99abe4278720ff15fbe2f957 /* Success! Wrote entire frame. It's safe to change state now */ frame->state = @@ -786,105 +680,6 @@ static int s_frame_headers_encode( aws_error_name(aws_last_error())); goto error; } -<<<<<<< HEAD - - frame->header_block_cursor = aws_byte_cursor_from_buf(&frame->whole_encoded_header_block); - frame->state = AWS_H2_HEADERS_STATE_FIRST_FRAME; - } - - /* Write frames (HEADER or PUSH_PROMISE, followed by N CONTINUATION frames) - * until we're done writing header-block or the buffer is too full to continue */ - bool waiting_for_more_space = false; - while (frame->state < AWS_H2_HEADERS_STATE_COMPLETE && !waiting_for_more_space) { - s_encode_single_header_block_frame(frame, encoder, output, &waiting_for_more_space); - } - - if (waiting_for_more_space) { - ENCODER_LOGF( - TRACE, - encoder, - "Insufficient space to finish encoding %s header-block for stream %" PRIu32 " right now", - aws_h2_frame_type_to_str(frame->base.type), - frame->base.stream_id); - } - - *complete = frame->state == AWS_H2_HEADERS_STATE_COMPLETE; - return AWS_OP_SUCCESS; - -error: - return AWS_OP_ERR; -} - -/*********************************************************************************************************************** - * aws_h2_frame_prebuilt - Used by small simple frame types that we can pre-encode at the time of creation. - * The pre-encoded buffer is then just copied bit-by-bit during the actual "encode()" function. - * - * It's safe to pre-encode a frame if it doesn't query/mutate any external state. So PING is totally great - * to pre-encode, but HEADERS (which queries MAX_FRAME_SIZE and mutates the HPACK table) would be a bad candidate. - **********************************************************************************************************************/ -struct aws_h2_frame_prebuilt { - struct aws_h2_frame base; - struct aws_byte_buf encoded_buf; /* pre-encoded H2 frame */ - struct aws_byte_cursor send_progress; /* tracks progress sending encoded buffer */ -}; - -DEFINE_FRAME_VTABLE(prebuilt); - -/* Can't pre-encode a frame unless it's guaranteed to fit, regardless of current settings. */ -static size_t s_prebuilt_payload_max(void) { - return aws_h2_settings_bounds[AWS_H2_SETTINGS_MAX_FRAME_SIZE][0]; -} - -/* Create aws_h2_frame_prebuilt and encode frame prefix into frame->encoded_buf. - * Caller must encode the payload to fill the rest of the encoded_buf. */ -static struct aws_h2_frame_prebuilt *s_h2_frame_new_prebuilt( - struct aws_allocator *allocator, - enum aws_h2_frame_type type, - uint32_t stream_id, - size_t payload_len, - uint8_t flags) { - - AWS_PRECONDITION(payload_len <= s_prebuilt_payload_max()); - - const size_t encoded_frame_len = s_frame_prefix_length + payload_len; - - /* Use single allocation for frame and buffer storage */ - struct aws_h2_frame_prebuilt *frame; - void *storage; - if (!aws_mem_acquire_many( - allocator, 2, &frame, sizeof(struct aws_h2_frame_prebuilt), &storage, encoded_frame_len)) { - return NULL; - } - - AWS_ZERO_STRUCT(*frame); - s_init_frame_base(&frame->base, allocator, type, &s_frame_prebuilt_vtable, stream_id); - frame->encoded_buf = aws_byte_buf_from_empty_array(storage, encoded_frame_len); - frame->send_progress = aws_byte_cursor_from_array(storage, encoded_frame_len); - - /* Write frame prefix */ - s_frame_prefix_encode(type, stream_id, payload_len, flags, &frame->encoded_buf); - - return frame; -} - -static void s_frame_prebuilt_destroy(struct aws_h2_frame *frame_base) { - aws_mem_release(frame_base->alloc, frame_base); -} - -static int s_frame_prebuilt_encode( - struct aws_h2_frame *frame_base, - struct aws_h2_frame_encoder *encoder, - struct aws_byte_buf *output, - bool *complete) { - - struct aws_h2_frame_prebuilt *frame = AWS_CONTAINER_OF(frame_base, struct aws_h2_frame_prebuilt, base); - - if (frame->send_progress.len == frame->encoded_buf.len) { - ENCODER_LOGF( - TRACE, - encoder, - "Encoding frame type=%s stream_id=%" PRIu32 " - begin", -======= frame->header_block_cursor = aws_byte_cursor_from_buf(&frame->whole_encoded_header_block); frame->state = AWS_H2_HEADERS_STATE_FIRST_FRAME; @@ -1001,22 +796,10 @@ static int s_frame_prebuilt_encode( TRACE, encoder, "Resume encoding frame type=%s stream_id=%" PRIu32, ->>>>>>> 64aa5fbc363f8a6c99abe4278720ff15fbe2f957 aws_h2_frame_type_to_str(frame->base.type), frame->base.stream_id); } -<<<<<<< HEAD - bool all_wrote = true; - - /* Write as much of the pre-encoded frame as will fit */ - size_t chunk_len = aws_min_size(frame->send_progress.len, output->capacity - output->len); - struct aws_byte_cursor chunk = aws_byte_cursor_advance(&frame->send_progress, chunk_len); - all_wrote &= aws_byte_buf_write_from_whole_cursor(output, chunk); - AWS_ASSERT(all_wrote); - - *complete = frame->send_progress.len == 0; -======= bool writes_ok = true; /* Copy as much as we can from cursor (pre-encoded frame contents) to output. @@ -1038,7 +821,6 @@ static int s_frame_prebuilt_encode( *complete = false; } ->>>>>>> 64aa5fbc363f8a6c99abe4278720ff15fbe2f957 return AWS_OP_SUCCESS; } @@ -1102,15 +884,9 @@ struct aws_h2_frame *aws_h2_frame_new_rst_stream( * | Error Code (32) | * +---------------------------------------------------------------+ */ -<<<<<<< HEAD - bool all_wrote = true; - all_wrote &= aws_byte_buf_write_be32(&frame->encoded_buf, error_code); - AWS_ASSERT(all_wrote); -======= bool writes_ok = true; writes_ok &= aws_byte_buf_write_be32(&frame->encoded_buf, error_code); AWS_ASSERT(writes_ok); ->>>>>>> 64aa5fbc363f8a6c99abe4278720ff15fbe2f957 return &frame->base; } @@ -1165,21 +941,12 @@ struct aws_h2_frame *aws_h2_frame_new_settings( * | Value (32) | * +---------------------------------------------------------------+ */ -<<<<<<< HEAD - bool all_wrote = true; - for (size_t i = 0; i < num_settings; ++i) { - all_wrote &= aws_byte_buf_write_be16(&frame->encoded_buf, settings_array[i].id); - all_wrote &= aws_byte_buf_write_be32(&frame->encoded_buf, settings_array[i].value); - } - AWS_ASSERT(all_wrote); -======= bool writes_ok = true; for (size_t i = 0; i < num_settings; ++i) { writes_ok &= aws_byte_buf_write_be16(&frame->encoded_buf, settings_array[i].id); writes_ok &= aws_byte_buf_write_be32(&frame->encoded_buf, settings_array[i].value); } AWS_ASSERT(writes_ok); ->>>>>>> 64aa5fbc363f8a6c99abe4278720ff15fbe2f957 return &frame->base; } @@ -1210,16 +977,10 @@ struct aws_h2_frame *aws_h2_frame_new_ping( * | | * +---------------------------------------------------------------+ */ -<<<<<<< HEAD - bool all_wrote = true; - all_wrote &= aws_byte_buf_write(&frame->encoded_buf, opaque_data, AWS_H2_PING_DATA_SIZE); - AWS_ASSERT(all_wrote); -======= bool writes_ok = true; writes_ok &= aws_byte_buf_write(&frame->encoded_buf, opaque_data, AWS_H2_PING_DATA_SIZE); AWS_ASSERT(writes_ok); ->>>>>>> 64aa5fbc363f8a6c99abe4278720ff15fbe2f957 - + frame->base.high_priority = ack; return &frame->base; } @@ -1247,59 +1008,6 @@ struct aws_h2_frame *aws_h2_frame_new_goaway( debug_data.len = 0; } -<<<<<<< HEAD - /* GOAWAY can be pre-encoded */ - const uint8_t flags = 0; - const size_t payload_len = debug_data.len + s_frame_goaway_length_min; - const uint32_t stream_id = 0; - - struct aws_h2_frame_prebuilt *frame = - s_h2_frame_new_prebuilt(allocator, AWS_H2_FRAME_T_GOAWAY, stream_id, payload_len, flags); - if (!frame) { - return NULL; - } - - /* Write the GOAWAY payload (RFC-7540 6.8): - * +-+-------------------------------------------------------------+ - * |R| Last-Stream-ID (31) | - * +-+-------------------------------------------------------------+ - * | Error Code (32) | - * +---------------------------------------------------------------+ - * | Additional Debug Data (*) | - * +---------------------------------------------------------------+ - */ - bool all_wrote = true; - all_wrote &= aws_byte_buf_write_be32(&frame->encoded_buf, last_stream_id & s_31_bit_mask); - all_wrote &= aws_byte_buf_write_be32(&frame->encoded_buf, error_code); - all_wrote &= aws_byte_buf_write_from_whole_cursor(&frame->encoded_buf, debug_data); - AWS_ASSERT(all_wrote); - - return &frame->base; -} - -/*********************************************************************************************************************** - * WINDOW_UPDATE - **********************************************************************************************************************/ -static const size_t s_frame_window_update_length = 4; - -struct aws_h2_frame *aws_h2_frame_new_window_update( - struct aws_allocator *allocator, - uint32_t stream_id, - uint32_t window_size_increment) { - - /* Note: stream_id may be zero or non-zero */ - if (stream_id > AWS_H2_STREAM_ID_MAX) { - aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); - return NULL; - } - - if (window_size_increment > AWS_H2_WINDOW_UPDATE_MAX) { - AWS_LOGF_ERROR( - AWS_LS_HTTP_ENCODER, - "Window increment size %" PRIu32 " exceeds HTTP/2 max %" PRIu32, - window_size_increment, - AWS_H2_WINDOW_UPDATE_MAX); -======= /* It would be illegal to send a lower value, this is unrecoverable */ AWS_FATAL_ASSERT(last_stream_id <= AWS_H2_STREAM_ID_MAX); @@ -1344,35 +1052,10 @@ struct aws_h2_frame *aws_h2_frame_new_window_update( /* Note: stream_id may be zero or non-zero */ if (stream_id > AWS_H2_STREAM_ID_MAX) { ->>>>>>> 64aa5fbc363f8a6c99abe4278720ff15fbe2f957 aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); return NULL; } -<<<<<<< HEAD - /* WINDOW_UPDATE can be pre-encoded */ - const uint8_t flags = 0; - const size_t payload_len = s_frame_window_update_length; - - struct aws_h2_frame_prebuilt *frame = - s_h2_frame_new_prebuilt(allocator, AWS_H2_FRAME_T_WINDOW_UPDATE, stream_id, payload_len, flags); - if (!frame) { - return NULL; - } - - /* Write the WINDOW_UPDATE payload (RFC-7540 6.9): - * +-+-------------------------------------------------------------+ - * |R| Window Size Increment (31) | - * +-+-------------------------------------------------------------+ - */ - bool all_wrote = true; - all_wrote &= aws_byte_buf_write_be32(&frame->encoded_buf, window_size_increment); - AWS_ASSERT(all_wrote); - - return &frame->base; -} - -======= if (window_size_increment > AWS_H2_WINDOW_UPDATE_MAX) { AWS_LOGF_ERROR( AWS_LS_HTTP_ENCODER, @@ -1405,7 +1088,6 @@ struct aws_h2_frame *aws_h2_frame_new_window_update( return &frame->base; } ->>>>>>> 64aa5fbc363f8a6c99abe4278720ff15fbe2f957 void aws_h2_frame_destroy(struct aws_h2_frame *frame) { if (frame) { frame->vtable->destroy(frame); diff --git a/source/hpack.c b/source/hpack.c index 53e7f7eca..09119278e 100644 --- a/source/hpack.c +++ b/source/hpack.c @@ -69,11 +69,7 @@ static int s_ensure_space(struct aws_byte_buf *output, size_t required_space) { /* Prefer to double capacity, but if that's not enough grow to exactly required_capacity */ size_t double_capacity = aws_add_size_saturating(output->capacity, output->capacity); -<<<<<<< HEAD - size_t reserve = required_capacity > double_capacity ? required_capacity : double_capacity; -======= size_t reserve = aws_max_size(required_capacity, double_capacity); ->>>>>>> 64aa5fbc363f8a6c99abe4278720ff15fbe2f957 return aws_byte_buf_reserve(output, reserve); } @@ -87,11 +83,7 @@ int aws_hpack_encode_integer( const uint8_t prefix_mask = s_masked_right_bits_u8(prefix_size); AWS_ASSERT((starting_bits & prefix_mask) == 0); -<<<<<<< HEAD - const size_t output_len_backup = output->len; -======= const size_t original_len = output->len; ->>>>>>> 64aa5fbc363f8a6c99abe4278720ff15fbe2f957 if (integer < prefix_mask) { /* If the integer fits inside the specified number of bits but won't be all 1's, just write it */ @@ -131,11 +123,7 @@ int aws_hpack_encode_integer( return AWS_OP_SUCCESS; error: -<<<<<<< HEAD - output->len = output_len_backup; -======= output->len = original_len; ->>>>>>> 64aa5fbc363f8a6c99abe4278720ff15fbe2f957 return AWS_OP_ERR; } @@ -824,11 +812,7 @@ int aws_hpack_encode_string( AWS_PRECONDITION(aws_byte_cursor_is_valid(&to_encode)); AWS_PRECONDITION(output); -<<<<<<< HEAD - const size_t output_len_backup = output->len; -======= const size_t original_len = output->len; ->>>>>>> 64aa5fbc363f8a6c99abe4278720ff15fbe2f957 /* Determine length of encoded string (and whether or not to use huffman) */ uint8_t use_huffman; @@ -901,11 +885,7 @@ int aws_hpack_encode_string( return AWS_OP_SUCCESS; error: -<<<<<<< HEAD - output->len = output_len_backup; -======= output->len = original_len; ->>>>>>> 64aa5fbc363f8a6c99abe4278720ff15fbe2f957 aws_huffman_encoder_reset(&context->encoder); return AWS_OP_ERR; } @@ -1290,11 +1270,7 @@ static int s_encode_header_field( AWS_PRECONDITION(header); AWS_PRECONDITION(output); -<<<<<<< HEAD - size_t output_len_backup = output->len; -======= size_t original_len = output->len; ->>>>>>> 64aa5fbc363f8a6c99abe4278720ff15fbe2f957 /* Search for header-field in tables */ bool found_indexed_value; @@ -1366,11 +1342,7 @@ static int s_encode_header_field( return AWS_OP_SUCCESS; error: -<<<<<<< HEAD - output->len = output_len_backup; -======= output->len = original_len; ->>>>>>> 64aa5fbc363f8a6c99abe4278720ff15fbe2f957 return AWS_OP_ERR; } diff --git a/source/request_response.c b/source/request_response.c index 22452b9f3..5f9fbbf20 100644 --- a/source/request_response.c +++ b/source/request_response.c @@ -97,11 +97,7 @@ void aws_http_headers_acquire(struct aws_http_headers *headers) { aws_atomic_fetch_add(&headers->refcount, 1); } -<<<<<<< HEAD -int aws_http_headers_add_v2(struct aws_http_headers *headers, const struct aws_http_header *header) { -======= int aws_http_headers_add_header(struct aws_http_headers *headers, const struct aws_http_header *header) { ->>>>>>> 64aa5fbc363f8a6c99abe4278720ff15fbe2f957 AWS_PRECONDITION(headers); AWS_PRECONDITION(header); AWS_PRECONDITION(aws_byte_cursor_is_valid(&header->name) && aws_byte_cursor_is_valid(&header->value)); @@ -140,11 +136,7 @@ int aws_http_headers_add_header(struct aws_http_headers *headers, const struct a int aws_http_headers_add(struct aws_http_headers *headers, struct aws_byte_cursor name, struct aws_byte_cursor value) { struct aws_http_header header = {.name = name, .value = value}; -<<<<<<< HEAD - return aws_http_headers_add_v2(headers, &header); -======= return aws_http_headers_add_header(headers, &header); ->>>>>>> 64aa5fbc363f8a6c99abe4278720ff15fbe2f957 } void aws_http_headers_clear(struct aws_http_headers *headers) { @@ -248,11 +240,7 @@ int aws_http_headers_add_array(struct aws_http_headers *headers, const struct aw const size_t orig_count = aws_http_headers_count(headers); for (size_t i = 0; i < count; ++i) { -<<<<<<< HEAD - if (aws_http_headers_add_v2(headers, &array[i])) { -======= if (aws_http_headers_add_header(headers, &array[i])) { ->>>>>>> 64aa5fbc363f8a6c99abe4278720ff15fbe2f957 goto error; } } diff --git a/tests/test_h2_encoder.c b/tests/test_h2_encoder.c index 6b8521fa9..d573dd865 100644 --- a/tests/test_h2_encoder.c +++ b/tests/test_h2_encoder.c @@ -119,11 +119,7 @@ TEST_CASE(h2_encoder_headers) { struct aws_http_header h = DEFINE_STATIC_HEADER(":status", "302", USE_CACHE); -<<<<<<< HEAD - ASSERT_SUCCESS(aws_http_headers_add_v2(headers, &h)); -======= ASSERT_SUCCESS(aws_http_headers_add_header(headers, &h)); ->>>>>>> 64aa5fbc363f8a6c99abe4278720ff15fbe2f957 struct aws_h2_frame_priority_settings priority = { .stream_dependency_exclusive = true, diff --git a/tests/test_h2_headers.c b/tests/test_h2_headers.c index d4b21d787..b0979cbfd 100644 --- a/tests/test_h2_headers.c +++ b/tests/test_h2_headers.c @@ -126,11 +126,7 @@ static int s_header_test_run(struct aws_allocator *allocator, void *ctx) { } if (result.type == AWS_HPACK_DECODE_T_HEADER_FIELD) { -<<<<<<< HEAD - ASSERT_SUCCESS(aws_http_headers_add_v2(fixture->decoded_headers, &result.data.header_field)); -======= ASSERT_SUCCESS(aws_http_headers_add_header(fixture->decoded_headers, &result.data.header_field)); ->>>>>>> 64aa5fbc363f8a6c99abe4278720ff15fbe2f957 } } From 6e46d9c01a73026b75c785dfaef0131948f565fa Mon Sep 17 00:00:00 2001 From: Michael Graeb Date: Sat, 21 Mar 2020 22:51:47 -0700 Subject: [PATCH 23/35] test_h2_client.c is using fake peer --- tests/h2_test_helper.c | 14 ++++++++++++++ tests/h2_test_helper.h | 8 ++++++++ tests/test_h2_client.c | 39 ++++++++++++++++++--------------------- 3 files changed, 40 insertions(+), 21 deletions(-) diff --git a/tests/h2_test_helper.c b/tests/h2_test_helper.c index 27d56b82e..168c601f6 100644 --- a/tests/h2_test_helper.c +++ b/tests/h2_test_helper.c @@ -16,6 +16,7 @@ #include "h2_test_helper.h" #include +#include static const void *s_logging_id = (void *)0xAAAAAAAA; @@ -458,3 +459,16 @@ void h2_fake_peer_clean_up(struct h2_fake_peer *peer) { h2_decode_tester_clean_up(&peer->decode); AWS_ZERO_STRUCT(peer); } + +int h2_fake_peer_decode_messages_from_testing_channel(struct h2_fake_peer *peer) { + struct aws_byte_buf msg_buf; + ASSERT_SUCCESS(aws_byte_buf_init(&msg_buf, peer->alloc, 128)); + ASSERT_SUCCESS(testing_channel_drain_written_messages(peer->testing_channel, &msg_buf)); + + struct aws_byte_cursor msg_cursor = aws_byte_cursor_from_buf(&msg_buf); + ASSERT_SUCCESS(aws_h2_decode(peer->decode.decoder, &msg_cursor)); + ASSERT_UINT_EQUALS(0, msg_cursor.len); + + aws_byte_buf_clean_up(&msg_buf); + return AWS_OP_SUCCESS; +} diff --git a/tests/h2_test_helper.h b/tests/h2_test_helper.h index de2118683..b243f11e3 100644 --- a/tests/h2_test_helper.h +++ b/tests/h2_test_helper.h @@ -129,4 +129,12 @@ struct h2_fake_peer_options { int h2_fake_peer_init(struct h2_fake_peer *peer, const struct h2_fake_peer_options *options); void h2_fake_peer_clean_up(struct h2_fake_peer *peer); +/* Pop all written messages off the testing-channel and run them through the peer's decode-tester */ +int h2_fake_peer_decode_messages_from_testing_channel(struct h2_fake_peer *peer); + +int h2_fake_peer_send_frame(struct h2_fake_peer *peer, struct aws_h2_frame *frame); + +/* Peer sends the connection preface with default settings */ +int h2_fake_peer_send_connection_preface_default_settings(struct h2_fake_peer *peer); + #endif /* AWS_HTTP_H2_TEST_HELPER_H */ diff --git a/tests/test_h2_client.c b/tests/test_h2_client.c index 5d2ef3edb..ad3df0867 100644 --- a/tests/test_h2_client.c +++ b/tests/test_h2_client.c @@ -13,8 +13,8 @@ * permissions and limitations under the License. */ +#include "h2_test_helper.h" #include - #include #include @@ -27,6 +27,7 @@ struct tester { struct aws_allocator *alloc; struct aws_http_connection *connection; struct testing_channel testing_channel; + struct h2_fake_peer peer; } s_tester; static int s_tester_init(struct aws_allocator *alloc, void *ctx) { @@ -50,11 +51,19 @@ static int s_tester_init(struct aws_allocator *alloc, void *ctx) { s_tester.connection->vtable->on_channel_handler_installed(&s_tester.connection->channel_handler, slot); } + struct h2_fake_peer_options peer_options = { + .alloc = alloc, + .testing_channel = &s_tester.testing_channel, + .is_server = true, + }; + ASSERT_SUCCESS(h2_fake_peer_init(&s_tester.peer, &peer_options)); + testing_channel_drain_queued_tasks(&s_tester.testing_channel); return AWS_OP_SUCCESS; } static int s_tester_clean_up(void) { + h2_fake_peer_clean_up(&s_tester.peer); aws_http_connection_release(s_tester.connection); ASSERT_SUCCESS(testing_channel_clean_up(&s_tester.testing_channel)); aws_http_library_clean_up(); @@ -102,30 +111,18 @@ TEST_CASE(h2_client_request_create) { return s_tester_clean_up(); } -/* Test that client automatically sends the HTTP/2 Connection Preface */ +/* Test that client automatically sends the HTTP/2 Connection Preface (magic string, followed by SETTINGS frame) */ TEST_CASE(h2_client_connection_preface_sent) { ASSERT_SUCCESS(s_tester_init(allocator, ctx)); - struct aws_byte_buf expected; - ASSERT_SUCCESS(aws_byte_buf_init(&expected, s_tester.alloc, 1024)); - - ASSERT_TRUE(aws_byte_buf_write_from_whole_cursor(&expected, aws_h2_connection_preface_client_string)); - - /* clang-format off */ - uint8_t expected_settings[] = { - 0x00, 0x00, 0x00, /* Length (24) */ - AWS_H2_FRAME_T_SETTINGS, /* Type (8) */ - 0x00, /* Flags (8) */ - 0x00, 0x00, 0x00, 0x00, /* Reserved (1) | Stream Identifier (31) */ - }; - /* clang-format on */ - - ASSERT_TRUE(aws_byte_buf_write(&expected, expected_settings, sizeof(expected_settings))); - - ASSERT_SUCCESS(testing_channel_check_written_messages( - &s_tester.testing_channel, s_tester.alloc, aws_byte_cursor_from_buf(&expected))); + /* Have the fake peer to run its decoder on what the client has written. + * The decoder will raise an error if it doesn't receive the "client connection preface string" first. */ + ASSERT_SUCCESS(h2_fake_peer_decode_messages_from_testing_channel(&s_tester.peer)); - aws_byte_buf_clean_up(&expected); + /* Now check that client sent SETTINGS frame */ + struct h2_decoded_frame *first_written_frame = h2_decode_tester_get_frame(&s_tester.peer.decode, 0); + ASSERT_UINT_EQUALS(AWS_H2_FRAME_T_SETTINGS, first_written_frame->type); + ASSERT_FALSE(first_written_frame->ack); return s_tester_clean_up(); } From 37e386aae6a24a78fb1f38e65e75cc0f33906354 Mon Sep 17 00:00:00 2001 From: Dengke Tang Date: Sun, 22 Mar 2020 11:04:14 -0700 Subject: [PATCH 24/35] test_for_ping_ack --- source/h2_connection.c | 75 +++++++++++++++++++++---------------- tests/CMakeLists.txt | 1 + tests/test_h2_client.c | 84 ++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 129 insertions(+), 31 deletions(-) diff --git a/source/h2_connection.c b/source/h2_connection.c index ef22a3811..fd48ee2e8 100644 --- a/source/h2_connection.c +++ b/source/h2_connection.c @@ -297,16 +297,22 @@ void aws_h2_connection_enqueue_outgoing_frame(struct aws_h2_connection *connecti AWS_PRECONDITION(frame->type != AWS_H2_FRAME_T_DATA); AWS_PRECONDITION(aws_channel_thread_is_callers_thread(connection->base.channel_slot->channel)); - aws_linked_list_push_back(&connection->thread_data.outgoing_frames_queue, &frame->node); -} - -void aws_h2_connection_enqueue_outgoing_frame_from_head( - struct aws_h2_connection *connection, - struct aws_h2_frame *frame) { - AWS_PRECONDITION(frame->type != AWS_H2_FRAME_T_DATA); - AWS_PRECONDITION(aws_channel_thread_is_callers_thread(connection->base.channel_slot->channel)); - - aws_linked_list_push_front(&connection->thread_data.outgoing_frames_queue, &frame->node); + if (frame->high_priority) { + /* Check from the head of the queue, and find a node with normal priority, and insert before it */ + struct aws_linked_list_node *iter = aws_linked_list_begin(&connection->thread_data.outgoing_frames_queue); + /* one past the last element */ + const struct aws_linked_list_node *end = aws_linked_list_end(&connection->thread_data.outgoing_frames_queue); + while (iter != end) { + struct aws_h2_frame *frame_i = AWS_CONTAINER_OF(iter, struct aws_h2_frame, node); + if (!frame_i->high_priority) { + break; + } + iter = iter->next; + } + aws_linked_list_insert_before(iter, &frame->node); + } else { + aws_linked_list_push_back(&connection->thread_data.outgoing_frames_queue, &frame->node); + } } static void s_on_channel_write_complete( @@ -382,21 +388,6 @@ static void s_outgoing_frames_task(struct aws_channel_task *task, void *arg, enu while (!aws_linked_list_empty(outgoing_frames_queue)) { struct aws_linked_list_node *frame_node = aws_linked_list_front(outgoing_frames_queue); struct aws_h2_frame *frame = AWS_CONTAINER_OF(frame_node, struct aws_h2_frame, node); -<<<<<<< HEAD - - bool frame_complete; - if (aws_h2_encode_frame(&connection->thread_data.encoder, frame, &msg->message_data, &frame_complete)) { - CONNECTION_LOGF( - ERROR, - connection, - "Error encoding frame: type=%s stream=%" PRIu32 " error=%s", - aws_h2_frame_type_to_str(frame->type), - frame->stream_id, - aws_error_name(aws_last_error())); - goto error; - } - -======= bool frame_complete; if (aws_h2_encode_frame(&connection->thread_data.encoder, frame, &msg->message_data, &frame_complete)) { @@ -410,7 +401,6 @@ static void s_outgoing_frames_task(struct aws_channel_task *task, void *arg, enu goto error; } ->>>>>>> 64aa5fbc363f8a6c99abe4278720ff15fbe2f957 if (!frame_complete) { if (msg->message_data.len == 0) { /* We're in trouble if an empty message isn't big enough for this frame to do any work with */ @@ -531,7 +521,7 @@ static int s_decoder_on_ping(uint8_t opaque_data[AWS_H2_PING_DATA_SIZE], void *u } /* PING responses SHOULD be given higher priority than any other frame, so it will be inserted at the head of the * queue */ - aws_h2_connection_enqueue_outgoing_frame_from_head(connection, ping_ack_frame); + aws_h2_connection_enqueue_outgoing_frame(connection, ping_ack_frame); s_try_write_outgoing_frames(connection); return AWS_OP_SUCCESS; error: @@ -782,16 +772,39 @@ static int s_handler_process_read_message( struct aws_channel_handler *handler, struct aws_channel_slot *slot, struct aws_io_message *message) { - - (void)handler; (void)slot; - (void)message; + struct aws_h2_connection *connection = handler->impl; + + CONNECTION_LOGF( + TRACE, + connection, + "id=%p: H2 connection Begin processing message of size %zu.", + (void *)&connection->base, + message->message_data.len); /* HTTP/2 protocol uses WINDOW_UPDATE frames to coordinate data rates with peer, * so we can just keep the aws_channel's read-window wide open */ + struct aws_byte_cursor message_cursor = aws_byte_cursor_from_buf(&message->message_data); + if (connection->thread_data.is_reading_stopped) { + CONNECTION_LOGF( + ERROR, + connection, + "id=%p: Cannot process message because connection is shutting down.", + (void *)&connection->base); + + aws_raise_error(AWS_ERROR_HTTP_CONNECTION_CLOSED); + goto shutdown; + } /* #TODO update read window by however much we just read */ + aws_h2_decode(connection->thread_data.decoder, &message_cursor); - return aws_raise_error(AWS_ERROR_UNIMPLEMENTED); + return AWS_OP_SUCCESS; +shutdown: + if (message) { + aws_mem_release(message->allocator, message); + } + s_stop(connection, true, true, true, aws_last_error()); + return AWS_OP_SUCCESS; } static int s_handler_process_write_message( diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 49024220c..eb2a1437b 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -300,6 +300,7 @@ add_h2_decoder_test_set(h2_decoder_err_bad_preface_from_client_3) add_test_case(h2_client_sanity_check) add_test_case(h2_client_request_create) add_test_case(h2_client_connection_preface_sent) +add_test_case(h2_client_ping_ack) add_test_case(server_new_destroy) add_test_case(connection_setup_shutdown) diff --git a/tests/test_h2_client.c b/tests/test_h2_client.c index 5d2ef3edb..b9bb5d6cd 100644 --- a/tests/test_h2_client.c +++ b/tests/test_h2_client.c @@ -14,6 +14,7 @@ */ #include +#include #include #include @@ -22,11 +23,16 @@ AWS_TEST_CASE(NAME, s_test_##NAME); \ static int s_test_##NAME(struct aws_allocator *allocator, void *ctx) +#define MAX_FRAME_SIZE 16384 + /* Singleton used by tests in this file */ struct tester { struct aws_allocator *alloc; struct aws_http_connection *connection; struct testing_channel testing_channel; + + struct aws_h2_frame_encoder encoder; + struct aws_array_list frames; /* contains frame */ } s_tester; static int s_tester_init(struct aws_allocator *alloc, void *ctx) { @@ -50,6 +56,7 @@ static int s_tester_init(struct aws_allocator *alloc, void *ctx) { s_tester.connection->vtable->on_channel_handler_installed(&s_tester.connection->channel_handler, slot); } + ASSERT_SUCCESS(aws_h2_frame_encoder_init(&s_tester.encoder, alloc, NULL /*logging_id*/)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); return AWS_OP_SUCCESS; } @@ -57,10 +64,44 @@ static int s_tester_init(struct aws_allocator *alloc, void *ctx) { static int s_tester_clean_up(void) { aws_http_connection_release(s_tester.connection); ASSERT_SUCCESS(testing_channel_clean_up(&s_tester.testing_channel)); + aws_h2_frame_encoder_clean_up(&s_tester.encoder); aws_http_library_clean_up(); return AWS_OP_SUCCESS; } +/* send the frame into the testing channel */ +static int s_send_frame(struct aws_h2_frame *frame) { + ASSERT_NOT_NULL(frame); + struct aws_byte_buf buffer; + + /* Allocate more room than necessary, easier to debug the full output than a failed aws_h2_encode_frame() call */ + ASSERT_SUCCESS(aws_byte_buf_init(&buffer, s_tester.alloc, MAX_FRAME_SIZE)); + bool frame_complete; + ASSERT_SUCCESS(aws_h2_encode_frame(&s_tester.encoder, frame, &buffer, &frame_complete)); + ASSERT_UINT_EQUALS(true, frame_complete); + ASSERT_SUCCESS(testing_channel_push_read_data(&s_tester.testing_channel, aws_byte_cursor_from_buf(&buffer))); + testing_channel_drain_queued_tasks(&s_tester.testing_channel); + + /* clean up */ + aws_byte_buf_clean_up(&buffer); + aws_h2_frame_destroy(frame); + return AWS_OP_SUCCESS; +} + +static int s_tester_send_default_setting() { + struct aws_h2_frame_setting settings[] = { + {.id = AWS_H2_SETTINGS_ENABLE_PUSH, .value = 1}, /* real world value */ + {.id = 0x0000, .value = 0x00000000}, /* min value */ + {.id = 0xFFFF, .value = 0xFFFFFFFF}, /* max value */ + }; + + struct aws_h2_frame *frame = + aws_h2_frame_new_settings(s_tester.alloc, settings, AWS_ARRAY_SIZE(settings), false /*ack*/); + ASSERT_SUCCESS(s_send_frame(frame)); + + return AWS_OP_SUCCESS; +} + /* Test the common setup/teardown used by all tests in this file */ TEST_CASE(h2_client_sanity_check) { ASSERT_SUCCESS(s_tester_init(allocator, ctx)); @@ -129,3 +170,46 @@ TEST_CASE(h2_client_connection_preface_sent) { return s_tester_clean_up(); } + +/* Test that client will automatically send the PING ACK frame back, when the PING frame is received */ +TEST_CASE(h2_client_ping_ack) { + ASSERT_SUCCESS(s_tester_init(allocator, ctx)); + + /* Connection preface requires that SETTINGS be sent first (RFC-7540 3.5). */ + ASSERT_SUCCESS(s_tester_send_default_setting()); + + uint8_t opaque_data[AWS_H2_PING_DATA_SIZE] = {0, 1, 2, 3, 4, 5, 6, 7}; + + struct aws_h2_frame *frame = aws_h2_frame_new_ping(allocator, false /*ack*/, opaque_data); + ASSERT_NOT_NULL(frame); + + /* clang-format off */ + uint8_t expected_settings[] = { + /* SETTINGS FRAME - empty settings frame is acceptable in preface */ + 0x00, 0x00, 0x00, /* Length (24) */ + AWS_H2_FRAME_T_SETTINGS, /* Type (8) */ + 0x00, /* Flags (8) */ + 0x00, 0x00, 0x00, 0x00, /* Reserved (1) | Stream Identifier (31) */ + + /* PING FRAME - send another frame to be sure decoder is now functioning normally */ + 0x00, 0x00, 0x08, /* Length (24) */ + AWS_H2_FRAME_T_PING, /* Type (8) */ + 0x1, /* Flags (8) */ + 0x00, 0x00, 0x00, 0x00, /* Reserved (1) | Stream Identifier (31) */ + /* PING */ + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* Opaque Data (64) */ + }; + /* clang-format on */ + + ASSERT_SUCCESS(s_send_frame(frame)); + struct aws_byte_buf expected; + ASSERT_SUCCESS(aws_byte_buf_init(&expected, s_tester.alloc, 1024)); + + ASSERT_TRUE(aws_byte_buf_write_from_whole_cursor(&expected, aws_h2_connection_preface_client_string)); + ASSERT_TRUE(aws_byte_buf_write(&expected, expected_settings, sizeof(expected_settings))); + ASSERT_SUCCESS(testing_channel_check_written_messages( + &s_tester.testing_channel, s_tester.alloc, aws_byte_cursor_from_buf(&expected))); + + aws_byte_buf_clean_up(&expected); + return s_tester_clean_up(); +} From 1f62b817bd262bce71bbb8cc214324ea33c92baf Mon Sep 17 00:00:00 2001 From: Michael Graeb Date: Sun, 22 Mar 2020 22:15:50 -0700 Subject: [PATCH 25/35] more h2_fake_peer functionality --- tests/h2_test_helper.c | 39 +++++++++++++++++++++++++++++++++++++++ tests/h2_test_helper.h | 23 +++++++++++++++++++++-- 2 files changed, 60 insertions(+), 2 deletions(-) diff --git a/tests/h2_test_helper.c b/tests/h2_test_helper.c index 168c601f6..4a6e3ef49 100644 --- a/tests/h2_test_helper.c +++ b/tests/h2_test_helper.c @@ -446,6 +446,7 @@ int h2_fake_peer_init(struct h2_fake_peer *peer, const struct h2_fake_peer_optio AWS_ZERO_STRUCT(*peer); peer->alloc = options->alloc; peer->testing_channel = options->testing_channel; + peer->is_server = options->is_server; ASSERT_SUCCESS(aws_h2_frame_encoder_init(&peer->encoder, peer->alloc, s_logging_id)); @@ -472,3 +473,41 @@ int h2_fake_peer_decode_messages_from_testing_channel(struct h2_fake_peer *peer) aws_byte_buf_clean_up(&msg_buf); return AWS_OP_SUCCESS; } + +int h2_fake_peer_send_frame(struct h2_fake_peer *peer, struct aws_h2_frame *frame) { + bool frame_complete = false; + while (!frame_complete) { + struct aws_io_message *msg = aws_channel_acquire_message_from_pool( + peer->testing_channel->channel, AWS_IO_MESSAGE_APPLICATION_DATA, g_aws_channel_max_fragment_size); + ASSERT_NOT_NULL(msg); + + ASSERT_SUCCESS(aws_h2_encode_frame(&peer->encoder, frame, &msg->message_data, &frame_complete)); + ASSERT_TRUE(msg->message_data.len != 0); + + ASSERT_SUCCESS(testing_channel_push_read_message(peer->testing_channel, msg)); + } + + aws_h2_frame_destroy(frame); + return AWS_OP_SUCCESS; +} + +int h2_fake_peer_send_connection_preface(struct h2_fake_peer *peer, struct aws_h2_frame *settings) { + if (!peer->is_server) { + /* Client must first send magic string */ + ASSERT_SUCCESS(testing_channel_push_read_data(peer->testing_channel, aws_h2_connection_preface_client_string)); + } + + /* Both server and client send SETTINGS as first proper frame */ + ASSERT_SUCCESS(h2_fake_peer_send_frame(peer, settings)); + + return AWS_OP_SUCCESS; +} + +int h2_fake_peer_send_connection_preface_default_settings(struct h2_fake_peer *peer) { + /* Empty SETTINGS frame means "everything default" */ + struct aws_h2_frame *settings = aws_h2_frame_new_settings(peer->alloc, NULL, 0, false /*ack*/); + ASSERT_NOT_NULL(settings); + + ASSERT_SUCCESS(h2_fake_peer_send_connection_preface(peer, settings)); + return AWS_OP_SUCCESS; +} diff --git a/tests/h2_test_helper.h b/tests/h2_test_helper.h index b243f11e3..36cd870c4 100644 --- a/tests/h2_test_helper.h +++ b/tests/h2_test_helper.h @@ -67,6 +67,8 @@ int h2_decoded_frame_check_finished( enum aws_h2_frame_type expected_type, uint32_t expected_stream_id); +/******************************************************************************/ + /** * Translates decoder callbacks into an array-list of h2_decoded_frames. */ @@ -107,6 +109,8 @@ int h2_decode_tester_check_data_str_across_frames( const char *expected, bool expect_end_stream); +/******************************************************************************/ + /** * Fake HTTP/2 peer. * Can decode H2 frames that are are written to the testing channel. @@ -118,6 +122,7 @@ struct h2_fake_peer { struct aws_h2_frame_encoder encoder; struct h2_decode_tester decode; + bool is_server; }; struct h2_fake_peer_options { @@ -129,12 +134,26 @@ struct h2_fake_peer_options { int h2_fake_peer_init(struct h2_fake_peer *peer, const struct h2_fake_peer_options *options); void h2_fake_peer_clean_up(struct h2_fake_peer *peer); -/* Pop all written messages off the testing-channel and run them through the peer's decode-tester */ +/** + * Pop all written messages off the testing-channel and run them through the peer's decode-tester + */ int h2_fake_peer_decode_messages_from_testing_channel(struct h2_fake_peer *peer); +/** + * Encode frame and push it into the testing-channel in the read-direction. + * Takes ownership of frame and destroys after sending. + */ int h2_fake_peer_send_frame(struct h2_fake_peer *peer, struct aws_h2_frame *frame); -/* Peer sends the connection preface with default settings */ +/** + * Peer sends the connection preface with specified settings. + * Takes ownership of frame and destroys after sending + */ +int h2_fake_peer_send_connection_preface(struct h2_fake_peer *peer, struct aws_h2_frame *settings); + +/** + * Peer sends the connection preface with default settings. + */ int h2_fake_peer_send_connection_preface_default_settings(struct h2_fake_peer *peer); #endif /* AWS_HTTP_H2_TEST_HELPER_H */ From 8c9bd17b74f5712a575fc6d762cb834f4f4cce0d Mon Sep 17 00:00:00 2001 From: Michael Graeb Date: Sun, 22 Mar 2020 22:39:12 -0700 Subject: [PATCH 26/35] msvc warning --- tests/h2_test_helper.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/tests/h2_test_helper.c b/tests/h2_test_helper.c index 4a6e3ef49..d2e63ac1c 100644 --- a/tests/h2_test_helper.c +++ b/tests/h2_test_helper.c @@ -18,8 +18,6 @@ #include #include -static const void *s_logging_id = (void *)0xAAAAAAAA; - /******************************************************************************* * h2_decoded_frame ******************************************************************************/ @@ -413,7 +411,6 @@ int h2_decode_tester_init(struct h2_decode_tester *decode_tester, const struct h .alloc = options->alloc, .vtable = &s_decoder_vtable, .userdata = decode_tester, - .logging_id = s_logging_id, .is_server = options->is_server, .skip_connection_preface = options->skip_connection_preface, }; @@ -448,7 +445,7 @@ int h2_fake_peer_init(struct h2_fake_peer *peer, const struct h2_fake_peer_optio peer->testing_channel = options->testing_channel; peer->is_server = options->is_server; - ASSERT_SUCCESS(aws_h2_frame_encoder_init(&peer->encoder, peer->alloc, s_logging_id)); + ASSERT_SUCCESS(aws_h2_frame_encoder_init(&peer->encoder, peer->alloc, NULL /*logging_id*/)); struct h2_decode_tester_options decode_options = {.alloc = options->alloc, .is_server = options->is_server}; ASSERT_SUCCESS(h2_decode_tester_init(&peer->decode, &decode_options)); From 8c8623151cc9d46f5e0e51e83dd48b4a2d78ded0 Mon Sep 17 00:00:00 2001 From: Dengke Tang Date: Mon, 23 Mar 2020 09:24:45 -0700 Subject: [PATCH 27/35] delete the message --- source/h2_connection.c | 5 +++++ tests/test_h2_client.c | 14 +++++++------- 2 files changed, 12 insertions(+), 7 deletions(-) diff --git a/source/h2_connection.c b/source/h2_connection.c index fd48ee2e8..9ea359b97 100644 --- a/source/h2_connection.c +++ b/source/h2_connection.c @@ -798,6 +798,11 @@ static int s_handler_process_read_message( /* #TODO update read window by however much we just read */ aws_h2_decode(connection->thread_data.decoder, &message_cursor); + /* release message */ + if (message) { + aws_mem_release(message->allocator, message); + message = NULL; + } return AWS_OP_SUCCESS; shutdown: if (message) { diff --git a/tests/test_h2_client.c b/tests/test_h2_client.c index b9bb5d6cd..5f6a95502 100644 --- a/tests/test_h2_client.c +++ b/tests/test_h2_client.c @@ -183,6 +183,12 @@ TEST_CASE(h2_client_ping_ack) { struct aws_h2_frame *frame = aws_h2_frame_new_ping(allocator, false /*ack*/, opaque_data); ASSERT_NOT_NULL(frame); + ASSERT_SUCCESS(s_send_frame(frame)); + struct aws_byte_buf expected; + ASSERT_SUCCESS(aws_byte_buf_init(&expected, s_tester.alloc, 1024)); + + /* The channel will receive the preface and the ping ACK frame */ + ASSERT_TRUE(aws_byte_buf_write_from_whole_cursor(&expected, aws_h2_connection_preface_client_string)); /* clang-format off */ uint8_t expected_settings[] = { /* SETTINGS FRAME - empty settings frame is acceptable in preface */ @@ -194,18 +200,12 @@ TEST_CASE(h2_client_ping_ack) { /* PING FRAME - send another frame to be sure decoder is now functioning normally */ 0x00, 0x00, 0x08, /* Length (24) */ AWS_H2_FRAME_T_PING, /* Type (8) */ - 0x1, /* Flags (8) */ + 0x1, /* Flags (8) ACK */ 0x00, 0x00, 0x00, 0x00, /* Reserved (1) | Stream Identifier (31) */ /* PING */ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* Opaque Data (64) */ }; /* clang-format on */ - - ASSERT_SUCCESS(s_send_frame(frame)); - struct aws_byte_buf expected; - ASSERT_SUCCESS(aws_byte_buf_init(&expected, s_tester.alloc, 1024)); - - ASSERT_TRUE(aws_byte_buf_write_from_whole_cursor(&expected, aws_h2_connection_preface_client_string)); ASSERT_TRUE(aws_byte_buf_write(&expected, expected_settings, sizeof(expected_settings))); ASSERT_SUCCESS(testing_channel_check_written_messages( &s_tester.testing_channel, s_tester.alloc, aws_byte_cursor_from_buf(&expected))); From 30acbe3d4c7b9c80f6c41ed01277d22c2b6fd333 Mon Sep 17 00:00:00 2001 From: Dengke Tang Date: Mon, 23 Mar 2020 09:30:06 -0700 Subject: [PATCH 28/35] prototype function --- tests/test_h2_client.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/test_h2_client.c b/tests/test_h2_client.c index 5f6a95502..6aa5a8ac1 100644 --- a/tests/test_h2_client.c +++ b/tests/test_h2_client.c @@ -88,7 +88,7 @@ static int s_send_frame(struct aws_h2_frame *frame) { return AWS_OP_SUCCESS; } -static int s_tester_send_default_setting() { +static int s_tester_send_default_setting(void) { struct aws_h2_frame_setting settings[] = { {.id = AWS_H2_SETTINGS_ENABLE_PUSH, .value = 1}, /* real world value */ {.id = 0x0000, .value = 0x00000000}, /* min value */ @@ -186,7 +186,7 @@ TEST_CASE(h2_client_ping_ack) { ASSERT_SUCCESS(s_send_frame(frame)); struct aws_byte_buf expected; ASSERT_SUCCESS(aws_byte_buf_init(&expected, s_tester.alloc, 1024)); - + /* The channel will receive the preface and the ping ACK frame */ ASSERT_TRUE(aws_byte_buf_write_from_whole_cursor(&expected, aws_h2_connection_preface_client_string)); /* clang-format off */ @@ -209,7 +209,7 @@ TEST_CASE(h2_client_ping_ack) { ASSERT_TRUE(aws_byte_buf_write(&expected, expected_settings, sizeof(expected_settings))); ASSERT_SUCCESS(testing_channel_check_written_messages( &s_tester.testing_channel, s_tester.alloc, aws_byte_cursor_from_buf(&expected))); - + aws_byte_buf_clean_up(&expected); return s_tester_clean_up(); } From b2ad8a3f3b78432376447aee3fa31656a64d4793 Mon Sep 17 00:00:00 2001 From: Dengke Tang Date: Mon, 23 Mar 2020 10:45:32 -0700 Subject: [PATCH 29/35] tiny changes for making the code clear --- source/h2_connection.c | 8 +++++--- tests/test_h2_client.c | 10 +++++----- 2 files changed, 10 insertions(+), 8 deletions(-) diff --git a/source/h2_connection.c b/source/h2_connection.c index 9ea359b97..3198eb98c 100644 --- a/source/h2_connection.c +++ b/source/h2_connection.c @@ -782,9 +782,6 @@ static int s_handler_process_read_message( (void *)&connection->base, message->message_data.len); - /* HTTP/2 protocol uses WINDOW_UPDATE frames to coordinate data rates with peer, - * so we can just keep the aws_channel's read-window wide open */ - struct aws_byte_cursor message_cursor = aws_byte_cursor_from_buf(&message->message_data); if (connection->thread_data.is_reading_stopped) { CONNECTION_LOGF( ERROR, @@ -795,6 +792,10 @@ static int s_handler_process_read_message( aws_raise_error(AWS_ERROR_HTTP_CONNECTION_CLOSED); goto shutdown; } + /* HTTP/2 protocol uses WINDOW_UPDATE frames to coordinate data rates with peer, + * so we can just keep the aws_channel's read-window wide open */ + struct aws_byte_cursor message_cursor = aws_byte_cursor_from_buf(&message->message_data); + /* #TODO update read window by however much we just read */ aws_h2_decode(connection->thread_data.decoder, &message_cursor); @@ -808,6 +809,7 @@ static int s_handler_process_read_message( if (message) { aws_mem_release(message->allocator, message); } + /* stop reading, stop writing, schedule sutdown */ s_stop(connection, true, true, true, aws_last_error()); return AWS_OP_SUCCESS; } diff --git a/tests/test_h2_client.c b/tests/test_h2_client.c index d500b99ed..4b079bcc0 100644 --- a/tests/test_h2_client.c +++ b/tests/test_h2_client.c @@ -147,13 +147,13 @@ TEST_CASE(h2_client_ping_ack) { * The decoder will raise an error if it doesn't receive the "client connection preface string" first. */ ASSERT_SUCCESS(h2_fake_peer_decode_messages_from_testing_channel(&s_tester.peer)); - /* Now check that client sent PING ACK frame, it should be the last frame received by peer + /* Now check that client sent PING ACK frame, it should be the latest frame received by peer * The last frame should be a ping type with ack on, and identical payload */ - struct h2_decoded_frame *last_written_frame = h2_decode_tester_latest_frame(&s_tester.peer.decode); - ASSERT_UINT_EQUALS(AWS_H2_FRAME_T_PING, last_written_frame->type); - ASSERT_TRUE(last_written_frame->ack); + struct h2_decoded_frame *latest_frame = h2_decode_tester_latest_frame(&s_tester.peer.decode); + ASSERT_UINT_EQUALS(AWS_H2_FRAME_T_PING, latest_frame->type); + ASSERT_TRUE(latest_frame->ack); ASSERT_BIN_ARRAYS_EQUALS( - opaque_data, AWS_H2_PING_DATA_SIZE, last_written_frame->ping_opaque_data, AWS_H2_PING_DATA_SIZE); + opaque_data, AWS_H2_PING_DATA_SIZE, latest_frame->ping_opaque_data, AWS_H2_PING_DATA_SIZE); return s_tester_clean_up(); } From 9eb6225390573ed9431d34ffbda5eecc5da4f4a2 Mon Sep 17 00:00:00 2001 From: Dengke Tang Date: Mon, 23 Mar 2020 10:47:23 -0700 Subject: [PATCH 30/35] clang format --- tests/test_h2_client.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/tests/test_h2_client.c b/tests/test_h2_client.c index 4b079bcc0..38c69da59 100644 --- a/tests/test_h2_client.c +++ b/tests/test_h2_client.c @@ -152,8 +152,7 @@ TEST_CASE(h2_client_ping_ack) { struct h2_decoded_frame *latest_frame = h2_decode_tester_latest_frame(&s_tester.peer.decode); ASSERT_UINT_EQUALS(AWS_H2_FRAME_T_PING, latest_frame->type); ASSERT_TRUE(latest_frame->ack); - ASSERT_BIN_ARRAYS_EQUALS( - opaque_data, AWS_H2_PING_DATA_SIZE, latest_frame->ping_opaque_data, AWS_H2_PING_DATA_SIZE); + ASSERT_BIN_ARRAYS_EQUALS(opaque_data, AWS_H2_PING_DATA_SIZE, latest_frame->ping_opaque_data, AWS_H2_PING_DATA_SIZE); return s_tester_clean_up(); } From 9fa2ee0ea976adac62da4b8b23148813aa2f222d Mon Sep 17 00:00:00 2001 From: Dengke Tang Date: Mon, 23 Mar 2020 12:52:49 -0700 Subject: [PATCH 31/35] Stop just the reading direction. --- source/h2_connection.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/source/h2_connection.c b/source/h2_connection.c index 3198eb98c..2bf5e4a8e 100644 --- a/source/h2_connection.c +++ b/source/h2_connection.c @@ -809,8 +809,8 @@ static int s_handler_process_read_message( if (message) { aws_mem_release(message->allocator, message); } - /* stop reading, stop writing, schedule sutdown */ - s_stop(connection, true, true, true, aws_last_error()); + /* Stop reading, because the reading error happans here */ + s_stop(connection, true /*stop_reading*/, false /*stop_writing*/, true /*schedule_shutdown*/, aws_last_error()); return AWS_OP_SUCCESS; } From 41ee085bce5c1f4d9ea8563abc491f072f44a151 Mon Sep 17 00:00:00 2001 From: Dengke Tang Date: Mon, 23 Mar 2020 15:41:00 -0700 Subject: [PATCH 32/35] error check&udpate read window --- source/h2_connection.c | 28 ++++++++++++++++++++++++---- tests/test_h2_client.c | 3 +-- 2 files changed, 25 insertions(+), 6 deletions(-) diff --git a/source/h2_connection.c b/source/h2_connection.c index 2bf5e4a8e..430077042 100644 --- a/source/h2_connection.c +++ b/source/h2_connection.c @@ -774,6 +774,7 @@ static int s_handler_process_read_message( struct aws_io_message *message) { (void)slot; struct aws_h2_connection *connection = handler->impl; + int err; CONNECTION_LOGF( TRACE, @@ -792,12 +793,31 @@ static int s_handler_process_read_message( aws_raise_error(AWS_ERROR_HTTP_CONNECTION_CLOSED); goto shutdown; } - /* HTTP/2 protocol uses WINDOW_UPDATE frames to coordinate data rates with peer, - * so we can just keep the aws_channel's read-window wide open */ + struct aws_byte_cursor message_cursor = aws_byte_cursor_from_buf(&message->message_data); + err = aws_h2_decode(connection->thread_data.decoder, &message_cursor); + if (err) { + CONNECTION_LOGF( + ERROR, + connection, + "id=%p: Decoding message failed, error %d (%s). Closing connection", + (void *)&connection->base, + aws_last_error(), + aws_error_name(aws_last_error())); + } - /* #TODO update read window by however much we just read */ - aws_h2_decode(connection->thread_data.decoder, &message_cursor); + /* HTTP/2 protocol uses WINDOW_UPDATE frames to coordinate data rates with peer, + * so we can just keep the aws_channel's read-window wide open */ + err = aws_channel_slot_increment_read_window(slot, message->message_data.len); + if (err) { + CONNECTION_LOGF( + ERROR, + connection, + "id=%p: Incrementing read window failed, error %d (%s). Closing connection", + (void *)&connection->base, + aws_last_error(), + aws_error_name(aws_last_error())); + } /* release message */ if (message) { diff --git a/tests/test_h2_client.c b/tests/test_h2_client.c index 38c69da59..4910389eb 100644 --- a/tests/test_h2_client.c +++ b/tests/test_h2_client.c @@ -22,8 +22,6 @@ AWS_TEST_CASE(NAME, s_test_##NAME); \ static int s_test_##NAME(struct aws_allocator *allocator, void *ctx) -#define MAX_FRAME_SIZE 16384 - /* Singleton used by tests in this file */ struct tester { struct aws_allocator *alloc; @@ -156,3 +154,4 @@ TEST_CASE(h2_client_ping_ack) { return s_tester_clean_up(); } +/* TODO: test that ping response is sent with higher priority than any other frame */ From 587a8f687950433cef061d256e20ef77096e7ebc Mon Sep 17 00:00:00 2001 From: Dengke Tang Date: Mon, 23 Mar 2020 15:52:00 -0700 Subject: [PATCH 33/35] move comments --- source/h2_connection.c | 3 +-- source/h2_frames.c | 2 ++ 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/source/h2_connection.c b/source/h2_connection.c index 430077042..2b7a79a42 100644 --- a/source/h2_connection.c +++ b/source/h2_connection.c @@ -519,8 +519,7 @@ static int s_decoder_on_ping(uint8_t opaque_data[AWS_H2_PING_DATA_SIZE], void *u if (!ping_ack_frame) { goto error; } - /* PING responses SHOULD be given higher priority than any other frame, so it will be inserted at the head of the - * queue */ + aws_h2_connection_enqueue_outgoing_frame(connection, ping_ack_frame); s_try_write_outgoing_frames(connection); return AWS_OP_SUCCESS; diff --git a/source/h2_frames.c b/source/h2_frames.c index 63dba1014..8d0157575 100644 --- a/source/h2_frames.c +++ b/source/h2_frames.c @@ -984,6 +984,8 @@ struct aws_h2_frame *aws_h2_frame_new_ping( bool writes_ok = true; writes_ok &= aws_byte_buf_write(&frame->encoded_buf, opaque_data, AWS_H2_PING_DATA_SIZE); AWS_ASSERT(writes_ok); + + /* PING responses SHOULD be given higher priority than any other frame */ frame->base.high_priority = ack; return &frame->base; } From c8d716097192e997061d3777a301109ea4de9362 Mon Sep 17 00:00:00 2001 From: Dengke Tang Date: Mon, 23 Mar 2020 17:33:38 -0700 Subject: [PATCH 34/35] remove redundant connection id and style change --- source/h2_connection.c | 27 ++++++--------------------- 1 file changed, 6 insertions(+), 21 deletions(-) diff --git a/source/h2_connection.c b/source/h2_connection.c index 2b7a79a42..53c5b07b8 100644 --- a/source/h2_connection.c +++ b/source/h2_connection.c @@ -773,47 +773,32 @@ static int s_handler_process_read_message( struct aws_io_message *message) { (void)slot; struct aws_h2_connection *connection = handler->impl; - int err; - CONNECTION_LOGF( - TRACE, - connection, - "id=%p: H2 connection Begin processing message of size %zu.", - (void *)&connection->base, - message->message_data.len); + CONNECTION_LOGF(TRACE, connection, "Begin processing message of size %zu.", message->message_data.len); if (connection->thread_data.is_reading_stopped) { - CONNECTION_LOGF( - ERROR, - connection, - "id=%p: Cannot process message because connection is shutting down.", - (void *)&connection->base); - + CONNECTION_LOGF(ERROR, connection, "Cannot process message because connection is shutting down."); aws_raise_error(AWS_ERROR_HTTP_CONNECTION_CLOSED); goto shutdown; } struct aws_byte_cursor message_cursor = aws_byte_cursor_from_buf(&message->message_data); - err = aws_h2_decode(connection->thread_data.decoder, &message_cursor); - if (err) { + if (aws_h2_decode(connection->thread_data.decoder, &message_cursor)) { CONNECTION_LOGF( ERROR, connection, - "id=%p: Decoding message failed, error %d (%s). Closing connection", - (void *)&connection->base, + "Decoding message failed, error %d (%s). Closing connection", aws_last_error(), aws_error_name(aws_last_error())); } /* HTTP/2 protocol uses WINDOW_UPDATE frames to coordinate data rates with peer, * so we can just keep the aws_channel's read-window wide open */ - err = aws_channel_slot_increment_read_window(slot, message->message_data.len); - if (err) { + if (aws_channel_slot_increment_read_window(slot, message->message_data.len)) { CONNECTION_LOGF( ERROR, connection, - "id=%p: Incrementing read window failed, error %d (%s). Closing connection", - (void *)&connection->base, + "Incrementing read window failed, error %d (%s). Closing connection", aws_last_error(), aws_error_name(aws_last_error())); } From 09e0d31c939e6d4733677252b0afa396deaa09bc Mon Sep 17 00:00:00 2001 From: Dengke Tang Date: Mon, 23 Mar 2020 17:50:14 -0700 Subject: [PATCH 35/35] LOGF->LOG --- source/h2_connection.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/h2_connection.c b/source/h2_connection.c index 53c5b07b8..bbdcd551c 100644 --- a/source/h2_connection.c +++ b/source/h2_connection.c @@ -777,7 +777,7 @@ static int s_handler_process_read_message( CONNECTION_LOGF(TRACE, connection, "Begin processing message of size %zu.", message->message_data.len); if (connection->thread_data.is_reading_stopped) { - CONNECTION_LOGF(ERROR, connection, "Cannot process message because connection is shutting down."); + CONNECTION_LOG(ERROR, connection, "Cannot process message because connection is shutting down."); aws_raise_error(AWS_ERROR_HTTP_CONNECTION_CLOSED); goto shutdown; }