From c28c43bf6c14097267e16d9299e65a92f58f9614 Mon Sep 17 00:00:00 2001 From: Dengke Tang Date: Thu, 25 Aug 2022 15:26:59 -0700 Subject: [PATCH 01/48] http2 stream manager canary --- CMakeLists.txt | 1 + bin/canary/CMakeLists.txt | 29 +++ bin/canary/main.c | 383 ++++++++++++++++++++++++++++++++++++++ 3 files changed, 413 insertions(+) create mode 100644 bin/canary/CMakeLists.txt create mode 100644 bin/canary/main.c diff --git a/CMakeLists.txt b/CMakeLists.txt index bd23201c6..caf0fc227 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -101,5 +101,6 @@ if (NOT BYO_CRYPTO AND BUILD_TESTING) add_subdirectory(tests) if (NOT CMAKE_CROSSCOMPILING) add_subdirectory(bin/elasticurl) + add_subdirectory(bin/canary) endif() endif() diff --git a/bin/canary/CMakeLists.txt b/bin/canary/CMakeLists.txt new file mode 100644 index 000000000..b4911a6a0 --- /dev/null +++ b/bin/canary/CMakeLists.txt @@ -0,0 +1,29 @@ +project(canary C) + +list(APPEND CMAKE_MODULE_PATH "${CMAKE_INSTALL_PREFIX}/lib/cmake") + +file(GLOB ELASTICURL_SRC + "*.c" + ) + +set(PROJECT_NAME canary) +add_executable(${PROJECT_NAME} ${ELASTICURL_SRC}) +aws_set_common_properties(${PROJECT_NAME}) + + +target_include_directories(${PROJECT_NAME} PUBLIC + $ + $) + +target_link_libraries(${PROJECT_NAME} aws-c-http) + +if (BUILD_SHARED_LIBS AND NOT WIN32) + message(INFO " elasticurl will be built with shared libs, but you may need to set LD_LIBRARY_PATH=${CMAKE_INSTALL_PREFIX}/lib to run the application") +endif() + +install(TARGETS ${PROJECT_NAME} + EXPORT ${PROJECT_NAME}-targets + COMPONENT Runtime + RUNTIME + DESTINATION bin + COMPONENT Runtime) diff --git a/bin/canary/main.c b/bin/canary/main.c new file mode 100644 index 000000000..adb933f73 --- /dev/null +++ b/bin/canary/main.c @@ -0,0 +1,383 @@ +/** + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#ifdef _MSC_VER +# pragma warning(disable : 4996) /* Disable warnings about fopen() being insecure */ +# pragma warning(disable : 4204) /* Declared initializers */ +# pragma warning(disable : 4221) /* Local var in declared initializer */ +#endif + +#define DEFINE_HEADER(NAME, VALUE) \ + { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(NAME), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(VALUE), } + +const struct aws_byte_cursor uri_cursor = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("https://localhost:8443/echo"); +const int rate_secs = 10; /* Time interval to collect data */ +const int batch_size = 100; /* The number of requests to made each batch */ +const int num_data_to_collect = 10; /* The number of data to collect */ +const enum aws_log_level log_level = AWS_LOG_LEVEL_NONE; + +struct aws_http_canary_helper { + struct aws_task task; + struct aws_event_loop *eventloop; + + int num_collected; /* number of data collected */ + uint64_t rate_ns; /* Collect data per rate_ns */ + + struct aws_atomic_var canary_finished; +}; + +struct canary_ctx { + struct aws_allocator *allocator; + const char *verb; + struct aws_uri uri; + struct aws_mutex mutex; + struct aws_condition_variable c_var; + + enum aws_log_level log_level; + struct aws_http_canary_helper helper; + struct aws_event_loop_group *el_group; + struct aws_http2_stream_manager *manager; + + bool is_shutdown_complete; + struct aws_atomic_var streams_failed; + struct aws_atomic_var streams_completed; + + int batch_size; + struct aws_atomic_var batch_completed; +}; + +static void s_collect_data_task(struct aws_task *task, void *arg, enum aws_task_status status) { + (void)status; + (void)task; + + struct canary_ctx *app_ctx = arg; + struct aws_http_canary_helper *helper = &app_ctx->helper; + + /* collect data */ + size_t stream_completed = aws_atomic_exchange_int(&app_ctx->streams_completed, 0); + ++helper->num_collected; + printf("Loop %d: The stream completed during %d secs: %zu\n", helper->num_collected, rate_secs, stream_completed); + if (helper->num_collected >= num_data_to_collect) { + /* done */ + aws_atomic_store_int(&helper->canary_finished, 1); + } else { + /* keep running */ + uint64_t now = 0; + aws_high_res_clock_get_ticks(&now); + aws_event_loop_schedule_task_future(helper->eventloop, &helper->task, now + helper->rate_ns); + } +} + +void aws_http_canary_helper_init(struct canary_ctx *app_ctx, struct aws_http_canary_helper *helper) { + + helper->eventloop = aws_event_loop_group_get_next_loop(app_ctx->el_group); + helper->rate_ns = aws_timestamp_convert(rate_secs, AWS_TIMESTAMP_SECS, AWS_TIMESTAMP_NANOS, NULL); + aws_atomic_init_int(&helper->canary_finished, 0); + aws_task_init(&helper->task, s_collect_data_task, app_ctx, "data_collector"); + uint64_t now = 0; + aws_high_res_clock_get_ticks(&now); + + aws_event_loop_schedule_task_future(helper->eventloop, &helper->task, now + helper->rate_ns); +} + +static void s_on_stream_acquired(struct aws_http_stream *stream, int error_code, void *user_data) { + (void)stream; + + struct canary_ctx *app_ctx = user_data; + if (error_code) { + aws_mutex_lock(&app_ctx->mutex); + aws_atomic_fetch_add(&app_ctx->streams_failed, 1); + aws_atomic_fetch_add(&app_ctx->batch_completed, 1); + aws_mutex_unlock(&app_ctx->mutex); + aws_condition_variable_notify_one(&app_ctx->c_var); + } +} + +static void s_on_stream_complete(struct aws_http_stream *stream, int error_code, void *user_data) { + (void)stream; + + struct canary_ctx *app_ctx = user_data; + aws_mutex_lock(&app_ctx->mutex); + aws_atomic_fetch_add(&app_ctx->batch_completed, 1); + if (error_code) { + aws_atomic_fetch_add(&app_ctx->streams_failed, 1); + } else { + aws_atomic_fetch_add(&app_ctx->streams_completed, 1); + } + + aws_mutex_unlock(&app_ctx->mutex); + aws_http_stream_release(stream); + aws_condition_variable_notify_one(&app_ctx->c_var); +} + +static bool s_are_batch_completed(void *context) { + struct canary_ctx *app_ctx = context; + size_t completed = aws_atomic_load_int(&app_ctx->batch_completed); + return (int)completed >= app_ctx->batch_size; +} + +static int s_wait_on_batch_complete(struct canary_ctx *app_ctx) { + + aws_mutex_lock(&app_ctx->mutex); + int signal_error = + aws_condition_variable_wait_pred(&app_ctx->c_var, &app_ctx->mutex, s_are_batch_completed, app_ctx); + aws_mutex_unlock(&app_ctx->mutex); + + return signal_error; +} + +static struct aws_http_message *s_create_request(struct canary_ctx *app_ctx) { + struct aws_http_message *request = aws_http2_message_new_request(app_ctx->allocator); + + struct aws_http_header request_headers_src[] = { + DEFINE_HEADER(":method", "GET"), + { + .name = aws_byte_cursor_from_c_str(":scheme"), + .value = *aws_uri_scheme(&app_ctx->uri), + }, + { + .name = aws_byte_cursor_from_c_str(":path"), + .value = *aws_uri_path(&app_ctx->uri), + }, + { + .name = aws_byte_cursor_from_c_str(":authority"), + .value = *aws_uri_host_name(&app_ctx->uri), + }, + }; + aws_http_message_add_header_array(request, request_headers_src, AWS_ARRAY_SIZE(request_headers_src)); + return request; +} + +static void s_run_canary(struct canary_ctx *app_ctx) { + aws_http_canary_helper_init(app_ctx, &app_ctx->helper); + struct aws_http_message *request = s_create_request(app_ctx); + struct aws_http_make_request_options request_options = { + .self_size = sizeof(request_options), + .request = request, + .user_data = app_ctx, + .on_complete = s_on_stream_complete, + }; + + struct aws_http2_stream_manager_acquire_stream_options acquire_stream_option = { + .options = &request_options, + .callback = s_on_stream_acquired, + .user_data = app_ctx, + }; + + bool keep_loop = true; + while (keep_loop) { + /* Loop a batch of requests to be made and completed */ + aws_atomic_store_int(&app_ctx->batch_completed, 0); + + for (int i = 0; i < app_ctx->batch_size; ++i) { + aws_http2_stream_manager_acquire_stream(app_ctx->manager, &acquire_stream_option); + } + /* once the data finished collected during waiting, no more data will be collected, still wait for all + requests + * made to be completed. */ + s_wait_on_batch_complete(app_ctx); + + size_t finished = aws_atomic_load_int(&app_ctx->helper.canary_finished); + if (finished) { + keep_loop = false; + } + } + aws_http_message_release(request); +} + +static bool s_is_shutdown_complete(void *context) { + struct canary_ctx *app_ctx = context; + return app_ctx->is_shutdown_complete; +} + +static void s_on_sm_shutdown_complete(void *user_data) { + struct canary_ctx *app_ctx = user_data; + + aws_mutex_lock(&app_ctx->mutex); + app_ctx->is_shutdown_complete = true; + aws_mutex_unlock(&app_ctx->mutex); + aws_condition_variable_notify_one(&app_ctx->c_var); +} + +int main(int argc, char **argv) { + (void)argc; + (void)argv; + + struct aws_allocator *allocator = aws_default_allocator(); + + aws_http_library_init(allocator); + + struct canary_ctx app_ctx; + AWS_ZERO_STRUCT(app_ctx); + app_ctx.allocator = allocator; + app_ctx.batch_size = batch_size; + app_ctx.log_level = log_level; + + aws_mutex_init(&app_ctx.mutex); + aws_condition_variable_init(&app_ctx.c_var); + + struct aws_logger logger; + AWS_ZERO_STRUCT(logger); + + if (app_ctx.log_level) { + struct aws_logger_standard_options options = { + .level = app_ctx.log_level, + .file = stderr, + }; + + if (aws_logger_init_standard(&logger, allocator, &options)) { + fprintf(stderr, "Failed to initialize logger with error %s\n", aws_error_debug_str(aws_last_error())); + exit(1); + } + + aws_logger_set(&logger); + } + if (aws_uri_init_parse(&app_ctx.uri, allocator, &uri_cursor)) { + fprintf(stderr, "Failed to create uri %s\n", aws_error_debug_str(aws_last_error())); + exit(1); + } + + aws_atomic_store_int(&app_ctx.streams_completed, 0); + aws_atomic_store_int(&app_ctx.streams_failed, 0); + + bool use_tls = true; + uint16_t port = 443; + + if (!app_ctx.uri.scheme.len && (app_ctx.uri.port == 80 || app_ctx.uri.port == 8080)) { + use_tls = false; + } else { + if (aws_byte_cursor_eq_c_str_ignore_case(&app_ctx.uri.scheme, "http")) { + use_tls = false; + } + } + + struct aws_tls_ctx *tls_ctx = NULL; + struct aws_tls_ctx_options tls_ctx_options; + AWS_ZERO_STRUCT(tls_ctx_options); + struct aws_tls_connection_options tls_connection_options; + AWS_ZERO_STRUCT(tls_connection_options); + struct aws_tls_connection_options *tls_options = NULL; + + if (use_tls) { + aws_tls_ctx_options_init_default_client(&tls_ctx_options, allocator); + aws_tls_ctx_options_set_verify_peer(&tls_ctx_options, false); + + if (aws_tls_ctx_options_set_alpn_list(&tls_ctx_options, "h2;http/1.1")) { + fprintf(stderr, "Failed to load alpn list with error %s.", aws_error_debug_str(aws_last_error())); + exit(1); + } + + tls_ctx = aws_tls_client_ctx_new(allocator, &tls_ctx_options); + + if (!tls_ctx) { + fprintf(stderr, "Failed to initialize TLS context with error %s.", aws_error_debug_str(aws_last_error())); + exit(1); + } + + aws_tls_connection_options_init_from_ctx(&tls_connection_options, tls_ctx); + if (aws_tls_connection_options_set_server_name(&tls_connection_options, allocator, &app_ctx.uri.host_name)) { + fprintf(stderr, "Failed to set servername with error %s.", aws_error_debug_str(aws_last_error())); + exit(1); + } + tls_options = &tls_connection_options; + + if (app_ctx.uri.port) { + port = app_ctx.uri.port; + } + } else { + port = 80; + if (app_ctx.uri.port) { + port = app_ctx.uri.port; + } + } + + app_ctx.el_group = aws_event_loop_group_new_default(allocator, 0, NULL); + + struct aws_host_resolver_default_options resolver_options = { + .el_group = app_ctx.el_group, + .max_entries = 8, + }; + + struct aws_host_resolver *resolver = aws_host_resolver_new_default(allocator, &resolver_options); + + struct aws_client_bootstrap_options bootstrap_options = { + .event_loop_group = app_ctx.el_group, + .host_resolver = resolver, + }; + struct aws_client_bootstrap *bootstrap = aws_client_bootstrap_new(allocator, &bootstrap_options); + + struct aws_socket_options socket_options = { + .type = AWS_SOCKET_STREAM, + .connect_timeout_ms = 3000, + .keep_alive_timeout_sec = 0, + .keepalive = false, + .keep_alive_interval_sec = 0, + }; + + struct aws_http2_stream_manager_options sm_options = { + .bootstrap = bootstrap, + .socket_options = &socket_options, + .tls_connection_options = use_tls ? tls_options : NULL, + .host = app_ctx.uri.host_name, + .port = port, + .max_connections = 100, + .http2_prior_knowledge = !use_tls, + .shutdown_complete_user_data = &app_ctx, + .shutdown_complete_callback = s_on_sm_shutdown_complete, + }; + app_ctx.manager = aws_http2_stream_manager_new(allocator, &sm_options); + + /* Really do the job */ + s_run_canary(&app_ctx); + aws_http2_stream_manager_release(app_ctx.manager); + + aws_mutex_lock(&app_ctx.mutex); + aws_condition_variable_wait_pred(&app_ctx.c_var, &app_ctx.mutex, s_is_shutdown_complete, &app_ctx); + aws_mutex_unlock(&app_ctx.mutex); + + aws_client_bootstrap_release(bootstrap); + aws_host_resolver_release(resolver); + aws_event_loop_group_release(app_ctx.el_group); + + if (tls_ctx) { + aws_tls_connection_options_clean_up(&tls_connection_options); + aws_tls_ctx_release(tls_ctx); + aws_tls_ctx_options_clean_up(&tls_ctx_options); + } + + aws_http_library_clean_up(); + + if (app_ctx.log_level) { + aws_logger_clean_up(&logger); + } + + aws_uri_clean_up(&app_ctx.uri); + + return 0; +} From c0b4ce41eccac833f0c1d5732c35b3c4acba5f35 Mon Sep 17 00:00:00 2001 From: Dengke Tang Date: Thu, 25 Aug 2022 15:45:04 -0700 Subject: [PATCH 02/48] add an error handling to make sure no request failed in the middle --- bin/canary/main.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/bin/canary/main.c b/bin/canary/main.c index adb933f73..bf0132fb1 100644 --- a/bin/canary/main.c +++ b/bin/canary/main.c @@ -19,7 +19,6 @@ #include #include #include -#include #include #include #include @@ -202,6 +201,12 @@ static void s_run_canary(struct canary_ctx *app_ctx) { requests * made to be completed. */ s_wait_on_batch_complete(app_ctx); + size_t streams_failed = aws_atomic_load_int(&app_ctx->streams_failed); + if (streams_failed > 0) { + fprintf( + stderr, "%zu stream failed to complete %s\n", streams_failed, aws_error_debug_str(aws_last_error())); + exit(1); + } size_t finished = aws_atomic_load_int(&app_ctx->helper.canary_finished); if (finished) { From 0247e69872fb769d50381f1cb34d945ecbb0cfce Mon Sep 17 00:00:00 2001 From: Dengke Tang Date: Thu, 25 Aug 2022 16:58:08 -0700 Subject: [PATCH 03/48] add direct connection test --- bin/canary/main.c | 206 +++++++++++++++++++++++++++++++++++----------- 1 file changed, 159 insertions(+), 47 deletions(-) diff --git a/bin/canary/main.c b/bin/canary/main.c index bf0132fb1..a4021697d 100644 --- a/bin/canary/main.c +++ b/bin/canary/main.c @@ -37,9 +37,11 @@ const struct aws_byte_cursor uri_cursor = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("https://localhost:8443/echo"); const int rate_secs = 10; /* Time interval to collect data */ -const int batch_size = 100; /* The number of requests to made each batch */ +const int batch_size = 10; /* The number of requests to made each batch */ const int num_data_to_collect = 10; /* The number of data to collect */ -const enum aws_log_level log_level = AWS_LOG_LEVEL_NONE; +const enum aws_log_level log_level = AWS_LOG_LEVEL_ERROR; +const bool direct_connection = true; /* If true, will create one connection and make requests from that connection. + * If false, will use stream manager to acquire streams */ struct aws_http_canary_helper { struct aws_task task; @@ -69,8 +71,12 @@ struct canary_ctx { int batch_size; struct aws_atomic_var batch_completed; + + struct aws_http_connection *connection; }; +/************************* Data collector ******************************************/ + static void s_collect_data_task(struct aws_task *task, void *arg, enum aws_task_status status) { (void)status; (void)task; @@ -105,6 +111,8 @@ void aws_http_canary_helper_init(struct canary_ctx *app_ctx, struct aws_http_can aws_event_loop_schedule_task_future(helper->eventloop, &helper->task, now + helper->rate_ns); } +/************************* Stream callbacks ******************************************/ + static void s_on_stream_acquired(struct aws_http_stream *stream, int error_code, void *user_data) { (void)stream; @@ -135,6 +143,8 @@ static void s_on_stream_complete(struct aws_http_stream *stream, int error_code, aws_condition_variable_notify_one(&app_ctx->c_var); } +/************************* Stream manager ops ******************************************/ + static bool s_are_batch_completed(void *context) { struct canary_ctx *app_ctx = context; size_t completed = aws_atomic_load_int(&app_ctx->batch_completed); @@ -151,31 +161,7 @@ static int s_wait_on_batch_complete(struct canary_ctx *app_ctx) { return signal_error; } -static struct aws_http_message *s_create_request(struct canary_ctx *app_ctx) { - struct aws_http_message *request = aws_http2_message_new_request(app_ctx->allocator); - - struct aws_http_header request_headers_src[] = { - DEFINE_HEADER(":method", "GET"), - { - .name = aws_byte_cursor_from_c_str(":scheme"), - .value = *aws_uri_scheme(&app_ctx->uri), - }, - { - .name = aws_byte_cursor_from_c_str(":path"), - .value = *aws_uri_path(&app_ctx->uri), - }, - { - .name = aws_byte_cursor_from_c_str(":authority"), - .value = *aws_uri_host_name(&app_ctx->uri), - }, - }; - aws_http_message_add_header_array(request, request_headers_src, AWS_ARRAY_SIZE(request_headers_src)); - return request; -} - -static void s_run_canary(struct canary_ctx *app_ctx) { - aws_http_canary_helper_init(app_ctx, &app_ctx->helper); - struct aws_http_message *request = s_create_request(app_ctx); +static void s_run_stream_manager_test(struct canary_ctx *app_ctx, struct aws_http_message *request) { struct aws_http_make_request_options request_options = { .self_size = sizeof(request_options), .request = request, @@ -213,15 +199,57 @@ static void s_run_canary(struct canary_ctx *app_ctx) { keep_loop = false; } } - aws_http_message_release(request); } -static bool s_is_shutdown_complete(void *context) { - struct canary_ctx *app_ctx = context; - return app_ctx->is_shutdown_complete; +static void s_on_shutdown_complete(void *user_data) { + struct canary_ctx *app_ctx = user_data; + + aws_mutex_lock(&app_ctx->mutex); + app_ctx->is_shutdown_complete = true; + aws_mutex_unlock(&app_ctx->mutex); + aws_condition_variable_notify_one(&app_ctx->c_var); +} + +/************************* direct connection ops ******************************************/ + +static void s_run_direct_connection_test(struct canary_ctx *app_ctx, struct aws_http_message *request) { + struct aws_http_make_request_options request_options = { + .self_size = sizeof(request_options), + .request = request, + .user_data = app_ctx, + .on_complete = s_on_stream_complete, + }; + + bool keep_loop = true; + while (keep_loop) { + /* Loop a batch of requests to be made and completed */ + aws_atomic_store_int(&app_ctx->batch_completed, 0); + + for (int i = 0; i < app_ctx->batch_size; ++i) { + struct aws_http_stream *stream = aws_http_connection_make_request(app_ctx->connection, &request_options); + aws_http_stream_activate(stream); + } + /* once the data finished collected during waiting, no more data will be collected, still wait for all + requests + * made to be completed. */ + s_wait_on_batch_complete(app_ctx); + size_t streams_failed = aws_atomic_load_int(&app_ctx->streams_failed); + if (streams_failed > 0) { + fprintf( + stderr, "%zu stream failed to complete %s\n", streams_failed, aws_error_debug_str(aws_last_error())); + exit(1); + } + + size_t finished = aws_atomic_load_int(&app_ctx->helper.canary_finished); + if (finished) { + keep_loop = false; + } + } } -static void s_on_sm_shutdown_complete(void *user_data) { +static void s_on_connection_shutdown(struct aws_http_connection *connection, int error_code, void *user_data) { + (void)connection; + (void)error_code; struct canary_ctx *app_ctx = user_data; aws_mutex_lock(&app_ctx->mutex); @@ -230,6 +258,66 @@ static void s_on_sm_shutdown_complete(void *user_data) { aws_condition_variable_notify_one(&app_ctx->c_var); } +static void s_on_client_connection_setup(struct aws_http_connection *connection, int error_code, void *user_data) { + if (error_code) { + fprintf(stderr, "Failed to create connection with error %s\n", aws_error_debug_str(aws_last_error())); + exit(1); + } + struct canary_ctx *app_ctx = user_data; + + aws_mutex_lock(&app_ctx->mutex); + app_ctx->connection = connection; + aws_mutex_unlock(&app_ctx->mutex); + aws_condition_variable_notify_one(&app_ctx->c_var); +} + +static bool s_is_connected(void *context) { + struct canary_ctx *app_ctx = context; + return app_ctx->connection != NULL; +} + +/************************* general connection ops ******************************************/ + +static bool s_is_shutdown_complete(void *context) { + struct canary_ctx *app_ctx = context; + return app_ctx->is_shutdown_complete; +} + +static struct aws_http_message *s_create_request(struct canary_ctx *app_ctx) { + struct aws_http_message *request = aws_http2_message_new_request(app_ctx->allocator); + + struct aws_http_header request_headers_src[] = { + DEFINE_HEADER(":method", "GET"), + { + .name = aws_byte_cursor_from_c_str(":scheme"), + .value = *aws_uri_scheme(&app_ctx->uri), + }, + { + .name = aws_byte_cursor_from_c_str(":path"), + .value = *aws_uri_path(&app_ctx->uri), + }, + { + .name = aws_byte_cursor_from_c_str(":authority"), + .value = *aws_uri_host_name(&app_ctx->uri), + }, + }; + aws_http_message_add_header_array(request, request_headers_src, AWS_ARRAY_SIZE(request_headers_src)); + return request; +} + +static void s_run_canary(struct canary_ctx *app_ctx) { + aws_http_canary_helper_init(app_ctx, &app_ctx->helper); + struct aws_http_message *request = s_create_request(app_ctx); + + if (direct_connection) { + s_run_direct_connection_test(app_ctx, request); + } else { + s_run_stream_manager_test(app_ctx, request); + } + + aws_http_message_release(request); +} + int main(int argc, char **argv) { (void)argc; (void)argv; @@ -344,23 +432,47 @@ int main(int argc, char **argv) { .keepalive = false, .keep_alive_interval_sec = 0, }; - - struct aws_http2_stream_manager_options sm_options = { - .bootstrap = bootstrap, - .socket_options = &socket_options, - .tls_connection_options = use_tls ? tls_options : NULL, - .host = app_ctx.uri.host_name, - .port = port, - .max_connections = 100, - .http2_prior_knowledge = !use_tls, - .shutdown_complete_user_data = &app_ctx, - .shutdown_complete_callback = s_on_sm_shutdown_complete, - }; - app_ctx.manager = aws_http2_stream_manager_new(allocator, &sm_options); - + if (!direct_connection) { + struct aws_http2_stream_manager_options sm_options = { + .bootstrap = bootstrap, + .socket_options = &socket_options, + .tls_connection_options = use_tls ? tls_options : NULL, + .host = app_ctx.uri.host_name, + .port = port, + .max_connections = 100, + .http2_prior_knowledge = !use_tls, + .shutdown_complete_user_data = &app_ctx, + .shutdown_complete_callback = s_on_shutdown_complete, + }; + app_ctx.manager = aws_http2_stream_manager_new(allocator, &sm_options); + } else { + struct aws_http_client_connection_options http_client_options = { + .self_size = sizeof(struct aws_http_client_connection_options), + .socket_options = &socket_options, + .allocator = allocator, + .port = port, + .host_name = app_ctx.uri.host_name, + .bootstrap = bootstrap, + .initial_window_size = SIZE_MAX, + .tls_options = tls_options, + .user_data = &app_ctx, + .on_setup = s_on_client_connection_setup, + .on_shutdown = s_on_connection_shutdown, + }; + if (aws_http_client_connect(&http_client_options)) { + exit(1); + } + aws_mutex_lock(&app_ctx.mutex); + aws_condition_variable_wait_pred(&app_ctx.c_var, &app_ctx.mutex, s_is_connected, &app_ctx); + aws_mutex_unlock(&app_ctx.mutex); + } /* Really do the job */ s_run_canary(&app_ctx); - aws_http2_stream_manager_release(app_ctx.manager); + if (!direct_connection) { + aws_http2_stream_manager_release(app_ctx.manager); + } else { + aws_http_connection_release(app_ctx.connection); + } aws_mutex_lock(&app_ctx.mutex); aws_condition_variable_wait_pred(&app_ctx.c_var, &app_ctx.mutex, s_is_shutdown_complete, &app_ctx); From 06ab9a8cd43022dab76edf214960c18087da87fd Mon Sep 17 00:00:00 2001 From: Dengke Tang Date: Thu, 25 Aug 2022 17:07:19 -0700 Subject: [PATCH 04/48] add todo --- bin/canary/main.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/bin/canary/main.c b/bin/canary/main.c index a4021697d..b1c573f12 100644 --- a/bin/canary/main.c +++ b/bin/canary/main.c @@ -35,11 +35,12 @@ #define DEFINE_HEADER(NAME, VALUE) \ { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(NAME), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(VALUE), } +/* TODO: Make those configurable from cmd line */ const struct aws_byte_cursor uri_cursor = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("https://localhost:8443/echo"); const int rate_secs = 10; /* Time interval to collect data */ const int batch_size = 10; /* The number of requests to made each batch */ const int num_data_to_collect = 10; /* The number of data to collect */ -const enum aws_log_level log_level = AWS_LOG_LEVEL_ERROR; +const enum aws_log_level log_level = AWS_LOG_LEVEL_NONE; const bool direct_connection = true; /* If true, will create one connection and make requests from that connection. * If false, will use stream manager to acquire streams */ @@ -87,6 +88,7 @@ static void s_collect_data_task(struct aws_task *task, void *arg, enum aws_task_ /* collect data */ size_t stream_completed = aws_atomic_exchange_int(&app_ctx->streams_completed, 0); ++helper->num_collected; + /* TODO: maybe collect the data somewhere instead of just printing it out. */ printf("Loop %d: The stream completed during %d secs: %zu\n", helper->num_collected, rate_secs, stream_completed); if (helper->num_collected >= num_data_to_collect) { /* done */ @@ -276,7 +278,7 @@ static bool s_is_connected(void *context) { return app_ctx->connection != NULL; } -/************************* general connection ops ******************************************/ +/************************* general ops ******************************************/ static bool s_is_shutdown_complete(void *context) { struct canary_ctx *app_ctx = context; @@ -466,8 +468,10 @@ int main(int argc, char **argv) { aws_condition_variable_wait_pred(&app_ctx.c_var, &app_ctx.mutex, s_is_connected, &app_ctx); aws_mutex_unlock(&app_ctx.mutex); } + /* Really do the job */ s_run_canary(&app_ctx); + if (!direct_connection) { aws_http2_stream_manager_release(app_ctx.manager); } else { From def775f2350316f76dcaa998def5f7312d0a6c39 Mon Sep 17 00:00:00 2001 From: Dengke Tang Date: Fri, 26 Aug 2022 11:06:30 -0700 Subject: [PATCH 05/48] nginx test --- bin/canary/main.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/bin/canary/main.c b/bin/canary/main.c index b1c573f12..d3907c63d 100644 --- a/bin/canary/main.c +++ b/bin/canary/main.c @@ -36,13 +36,13 @@ { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(NAME), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(VALUE), } /* TODO: Make those configurable from cmd line */ -const struct aws_byte_cursor uri_cursor = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("https://localhost:8443/echo"); +const struct aws_byte_cursor uri_cursor = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("https://localhost:443/echo"); const int rate_secs = 10; /* Time interval to collect data */ -const int batch_size = 10; /* The number of requests to made each batch */ +const int batch_size = 100; /* The number of requests to made each batch */ const int num_data_to_collect = 10; /* The number of data to collect */ const enum aws_log_level log_level = AWS_LOG_LEVEL_NONE; -const bool direct_connection = true; /* If true, will create one connection and make requests from that connection. - * If false, will use stream manager to acquire streams */ +const bool direct_connection = false; /* If true, will create one connection and make requests from that connection. + * If false, will use stream manager to acquire streams */ struct aws_http_canary_helper { struct aws_task task; From 9197dc6f16f8037ed078b61bf28502098f307e61 Mon Sep 17 00:00:00 2001 From: Dengke Tang Date: Mon, 12 Sep 2022 14:15:11 -0700 Subject: [PATCH 06/48] try to get it running from CI --- builder.json | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/builder.json b/builder.json index d9e4bbb12..df768c3f0 100644 --- a/builder.json +++ b/builder.json @@ -16,6 +16,7 @@ ], "test_steps": [ "aws-c-http-test", - ["{python}", "{source_dir}/integration-testing/http_client_test.py", "{install_dir}/bin/elasticurl{exe}"] + ["{python}", "{source_dir}/integration-testing/http_client_test.py", "{install_dir}/bin/elasticurl{exe}"], + "{install_dir}/bin/canary{exe}" ] } From 4cd733889440474ee715c420f2c1f04c3057d343 Mon Sep 17 00:00:00 2001 From: Dengke Tang Date: Mon, 12 Sep 2022 14:45:08 -0700 Subject: [PATCH 07/48] maybe we will have canary as elasticurl in the future --- .github/workflows/ci.yml | 49 +++++++++++++++++++++- CMakeLists.txt | 5 ++- bin/canary/main.c | 12 +++--- builder.json | 2 +- integration-testing/http_client_canary.py | 50 +++++++++++++++++++++++ integration-testing/http_client_test.py | 2 +- tests/py_localhost/non_tls_server.py | 1 - 7 files changed, 109 insertions(+), 12 deletions(-) create mode 100644 integration-testing/http_client_canary.py diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 7ec8b21df..3e9b45bc4 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -127,7 +127,6 @@ jobs: chmod a+x builder ./builder build downstream -p ${{ env.PACKAGE_NAME }} - localhost-test-linux: runs-on: ubuntu-20.04 # latest steps: @@ -175,3 +174,51 @@ jobs: Start-Process -NoNewWindow python .\non_tls_server.py python -c "from urllib.request import urlretrieve; urlretrieve('${{ env.BUILDER_HOST }}/${{ env.BUILDER_SOURCE }}/${{ env.BUILDER_VERSION }}/builder.pyz?run=${{ env.RUN }}', 'builder.pyz')" python builder.pyz build -p aws-c-http --cmake-extra=-DENABLE_LOCALHOST_INTEGRATION_TESTS=ON + + localhost-canary-linux: + runs-on: ubuntu-20.04 # latest + steps: + - name: Checkout + uses: actions/checkout@v3 + - name: Configure local host + run: | + python3 -m pip install h2 + cd ./tests/py_localhost/ + python3 server.py & + python3 non_tls_server.py & + - name: Build and test + run: | + python3 -c "from urllib.request import urlretrieve; urlretrieve('${{ env.BUILDER_HOST }}/${{ env.BUILDER_SOURCE }}/${{ env.BUILDER_VERSION }}/builder.pyz?run=${{ env.RUN }}', 'builder.pyz')" + python3 builder.pyz build -p aws-c-http --cmake-extra=-DAWS_BUILD_CANARY=ON + + localhost-canary-mac: + runs-on: macos-11 # latest + steps: + - name: Checkout + uses: actions/checkout@v3 + - name: Configure local host + run: | + python3 -m pip install h2 + cd ./tests/py_localhost/ + python3 server.py & + python3 non_tls_server.py & + - name: Build and test + run: | + python3 -c "from urllib.request import urlretrieve; urlretrieve('${{ env.BUILDER_HOST }}/${{ env.BUILDER_SOURCE }}/${{ env.BUILDER_VERSION }}/builder.pyz?run=${{ env.RUN }}', 'builder.pyz')" + python3 builder.pyz build -p aws-c-http --cmake-extra=-DAWS_BUILD_CANARY=ON + + localhost-canary-win: + runs-on: windows-2022 # latest + steps: + - name: Checkout + uses: actions/checkout@v3 + - name: Configure local host + run: | + python -m pip install h2 + - name: Build and test + run: | + cd ./tests/py_localhost/ + Start-Process -NoNewWindow python .\server.py + Start-Process -NoNewWindow python .\non_tls_server.py + python -c "from urllib.request import urlretrieve; urlretrieve('${{ env.BUILDER_HOST }}/${{ env.BUILDER_SOURCE }}/${{ env.BUILDER_VERSION }}/builder.pyz?run=${{ env.RUN }}', 'builder.pyz')" + python builder.pyz build -p aws-c-http --cmake-extra=-DAWS_BUILD_CANARY=ON diff --git a/CMakeLists.txt b/CMakeLists.txt index caf0fc227..a822895f9 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -7,6 +7,7 @@ endif() option(ENABLE_PROXY_INTEGRATION_TESTS "Whether to run the proxy integration tests that rely on pre-configured proxy" OFF) option(ENABLE_LOCALHOST_INTEGRATION_TESTS "Whether to run the integration tests that rely on pre-configured localhost" OFF) +option(AWS_BUILD_CANARY "Whether to build the canary for benchmark the performance of our http client" OFF) if (DEFINED CMAKE_PREFIX_PATH) file(TO_CMAKE_PATH "${CMAKE_PREFIX_PATH}" CMAKE_PREFIX_PATH) @@ -101,6 +102,8 @@ if (NOT BYO_CRYPTO AND BUILD_TESTING) add_subdirectory(tests) if (NOT CMAKE_CROSSCOMPILING) add_subdirectory(bin/elasticurl) - add_subdirectory(bin/canary) + if (AWS_BUILD_CANARY) + add_subdirectory(bin/canary) + endif() endif() endif() diff --git a/bin/canary/main.c b/bin/canary/main.c index d3907c63d..99ca5b113 100644 --- a/bin/canary/main.c +++ b/bin/canary/main.c @@ -36,7 +36,7 @@ { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(NAME), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(VALUE), } /* TODO: Make those configurable from cmd line */ -const struct aws_byte_cursor uri_cursor = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("https://localhost:443/echo"); +const struct aws_byte_cursor uri_cursor = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("http://localhost:8080/"); const int rate_secs = 10; /* Time interval to collect data */ const int batch_size = 100; /* The number of requests to made each batch */ const int num_data_to_collect = 10; /* The number of data to collect */ @@ -120,11 +120,8 @@ static void s_on_stream_acquired(struct aws_http_stream *stream, int error_code, struct canary_ctx *app_ctx = user_data; if (error_code) { - aws_mutex_lock(&app_ctx->mutex); - aws_atomic_fetch_add(&app_ctx->streams_failed, 1); - aws_atomic_fetch_add(&app_ctx->batch_completed, 1); - aws_mutex_unlock(&app_ctx->mutex); - aws_condition_variable_notify_one(&app_ctx->c_var); + fprintf(stderr, "stream failed to be acquired from stream manager %s\n", aws_error_debug_str(error_code)); + exit(1); } } @@ -135,7 +132,8 @@ static void s_on_stream_complete(struct aws_http_stream *stream, int error_code, aws_mutex_lock(&app_ctx->mutex); aws_atomic_fetch_add(&app_ctx->batch_completed, 1); if (error_code) { - aws_atomic_fetch_add(&app_ctx->streams_failed, 1); + fprintf(stderr, "stream failed to complete %s\n", aws_error_debug_str(error_code)); + exit(1); } else { aws_atomic_fetch_add(&app_ctx->streams_completed, 1); } diff --git a/builder.json b/builder.json index df768c3f0..c309232bd 100644 --- a/builder.json +++ b/builder.json @@ -17,6 +17,6 @@ "test_steps": [ "aws-c-http-test", ["{python}", "{source_dir}/integration-testing/http_client_test.py", "{install_dir}/bin/elasticurl{exe}"], - "{install_dir}/bin/canary{exe}" + ["{python}", "{source_dir}/integration-testing/http_client_canary.py", "{install_dir}/bin/canary{exe}"] ] } diff --git a/integration-testing/http_client_canary.py b/integration-testing/http_client_canary.py new file mode 100644 index 000000000..3312089ce --- /dev/null +++ b/integration-testing/http_client_canary.py @@ -0,0 +1,50 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0. + +import sys +import os.path +import subprocess + +TIMEOUT = 100 + +canary_args = sys.argv[1:] +if not canary_args: + print('You must pass the canary cmd prefix') + sys.exit(-1) + +program_to_run = canary_args[0] + +if not os.path.exists(program_to_run): + print(f'the {program_to_run} is not found, skip canary test') + sys.exit(0) + +# We don't have args to pass to canary yet. TODO add args for canary + + +def run_command(args): + # gather all stderr and stdout to a single string that we print only if things go wrong + process = subprocess.Popen( + args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) + timedout = False + try: + output = process.communicate(timeout=TIMEOUT)[0] + except subprocess.TimeoutExpired: + timedout = True + process.kill() + args_str = subprocess.list2cmdline(args) + output = process.communicate()[0] + finally: + if process.returncode != 0 or timedout: + args_str = subprocess.list2cmdline(args) + print(args_str) + for line in output.splitlines(): + print(line.decode()) + if timedout: + raise RuntimeError("Timeout happened after {secs} secs from: {cmd}".format( + secs=TIMEOUT, cmd=args_str)) + else: + raise RuntimeError("Return code {code} from: {cmd}".format( + code=process.returncode, cmd=args_str)) + + +run_command(canary_args) diff --git a/integration-testing/http_client_test.py b/integration-testing/http_client_test.py index adb637250..572bef791 100644 --- a/integration-testing/http_client_test.py +++ b/integration-testing/http_client_test.py @@ -19,7 +19,7 @@ if 'bin' in program_to_run: if not os.path.exists(program_to_run): - print('the program_to_run is not found, skip integration test') + print(f'the {program_to_run} is not found, skip integration test') sys.exit(0) # Remove args from sys.argv so that unittest doesn't also try to parse them. diff --git a/tests/py_localhost/non_tls_server.py b/tests/py_localhost/non_tls_server.py index a8f0f9736..ec05bb31d 100644 --- a/tests/py_localhost/non_tls_server.py +++ b/tests/py_localhost/non_tls_server.py @@ -16,7 +16,6 @@ def send_response(conn, event): stream_id=stream_id, headers=[ (':status', '200'), - ('content-length', str(len(response_data))), ], ) conn.send_data( From f445de008bceb670633900bf7eb5c3be73232466 Mon Sep 17 00:00:00 2001 From: Dengke Tang Date: Mon, 12 Sep 2022 14:56:23 -0700 Subject: [PATCH 08/48] fix build --- bin/canary/main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bin/canary/main.c b/bin/canary/main.c index 99ca5b113..33323144c 100644 --- a/bin/canary/main.c +++ b/bin/canary/main.c @@ -117,8 +117,8 @@ void aws_http_canary_helper_init(struct canary_ctx *app_ctx, struct aws_http_can static void s_on_stream_acquired(struct aws_http_stream *stream, int error_code, void *user_data) { (void)stream; + (void)user_data; - struct canary_ctx *app_ctx = user_data; if (error_code) { fprintf(stderr, "stream failed to be acquired from stream manager %s\n", aws_error_debug_str(error_code)); exit(1); From 37696e21d84cc78799241bfefaf8878cb7bdb8b3 Mon Sep 17 00:00:00 2001 From: Dengke Tang Date: Mon, 12 Sep 2022 15:03:11 -0700 Subject: [PATCH 09/48] increase timeout --- integration-testing/http_client_canary.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/integration-testing/http_client_canary.py b/integration-testing/http_client_canary.py index 3312089ce..2ee103165 100644 --- a/integration-testing/http_client_canary.py +++ b/integration-testing/http_client_canary.py @@ -5,7 +5,7 @@ import os.path import subprocess -TIMEOUT = 100 +TIMEOUT = 300 canary_args = sys.argv[1:] if not canary_args: From f86a9f7467f9efe8f220094324792a6183eca2b4 Mon Sep 17 00:00:00 2001 From: Dengke Tang Date: Mon, 12 Sep 2022 15:04:17 -0700 Subject: [PATCH 10/48] print out output --- integration-testing/http_client_canary.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/integration-testing/http_client_canary.py b/integration-testing/http_client_canary.py index 2ee103165..ededf25b6 100644 --- a/integration-testing/http_client_canary.py +++ b/integration-testing/http_client_canary.py @@ -45,6 +45,8 @@ def run_command(args): else: raise RuntimeError("Return code {code} from: {cmd}".format( code=process.returncode, cmd=args_str)) + else: + print(output) run_command(canary_args) From ca0c7c55d3060b1e68dc88dc7b963ec47e2392ba Mon Sep 17 00:00:00 2001 From: Dengke Tang Date: Mon, 12 Sep 2022 15:10:03 -0700 Subject: [PATCH 11/48] give me a nicer printout --- integration-testing/http_client_canary.py | 2 +- tests/localhost/openssl | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) create mode 160000 tests/localhost/openssl diff --git a/integration-testing/http_client_canary.py b/integration-testing/http_client_canary.py index ededf25b6..ec9fec431 100644 --- a/integration-testing/http_client_canary.py +++ b/integration-testing/http_client_canary.py @@ -46,7 +46,7 @@ def run_command(args): raise RuntimeError("Return code {code} from: {cmd}".format( code=process.returncode, cmd=args_str)) else: - print(output) + print(output.decode("utf-8")) run_command(canary_args) diff --git a/tests/localhost/openssl b/tests/localhost/openssl new file mode 160000 index 000000000..1bf649b59 --- /dev/null +++ b/tests/localhost/openssl @@ -0,0 +1 @@ +Subproject commit 1bf649b5998ac98511203f54ce954eccfaf75467 From 53d8d43acfbad60a6eb26c090be346dbd33419be Mon Sep 17 00:00:00 2001 From: Dengke Tang Date: Mon, 12 Sep 2022 22:47:15 +0000 Subject: [PATCH 12/48] unused arg --- integration-testing/http_client_canary.py | 1 - 1 file changed, 1 deletion(-) diff --git a/integration-testing/http_client_canary.py b/integration-testing/http_client_canary.py index ec9fec431..d3032ff5f 100644 --- a/integration-testing/http_client_canary.py +++ b/integration-testing/http_client_canary.py @@ -31,7 +31,6 @@ def run_command(args): except subprocess.TimeoutExpired: timedout = True process.kill() - args_str = subprocess.list2cmdline(args) output = process.communicate()[0] finally: if process.returncode != 0 or timedout: From 6f2c30a6031d8aa3041789522e514a8dc9049433 Mon Sep 17 00:00:00 2001 From: Dengke Tang Date: Mon, 12 Sep 2022 23:15:57 +0000 Subject: [PATCH 13/48] not updating the window so frequently --- include/aws/http/private/h2_connection.h | 3 +++ source/h2_connection.c | 18 ++++++++++++------ tests/test_h2_client.c | 9 --------- 3 files changed, 15 insertions(+), 15 deletions(-) diff --git a/include/aws/http/private/h2_connection.h b/include/aws/http/private/h2_connection.h index 6d42b8316..0edbd50ea 100644 --- a/include/aws/http/private/h2_connection.h +++ b/include/aws/http/private/h2_connection.h @@ -98,6 +98,9 @@ struct aws_h2_connection { * Reduce the space after receiving a flow-controlled frame. Increment after sending WINDOW_UPDATE for * connection */ size_t window_size_self; + /* The size dropped */ + size_t window_size_self_dropped; + size_t window_size_self_dropped_threshold; /* Highest self-initiated stream-id that peer might have processed. * Defaults to max stream-id, may be lowered when GOAWAY frame received. */ diff --git a/source/h2_connection.c b/source/h2_connection.c index cb580074b..776958eca 100644 --- a/source/h2_connection.c +++ b/source/h2_connection.c @@ -384,6 +384,8 @@ static struct aws_h2_connection *s_connection_new( connection->thread_data.window_size_peer = AWS_H2_INIT_WINDOW_SIZE; connection->thread_data.window_size_self = AWS_H2_INIT_WINDOW_SIZE; + connection->thread_data.window_size_self_dropped_threshold = 0; /* TODO: expose this config to user? */ + connection->thread_data.goaway_received_last_stream_id = AWS_H2_STREAM_ID_MAX; connection->thread_data.goaway_sent_last_stream_id = AWS_H2_STREAM_ID_MAX; @@ -1246,17 +1248,19 @@ struct aws_h2err s_decoder_on_data_begin( /* Automatically update the full amount we just received */ auto_window_update = payload_len; } - - if (auto_window_update != 0) { - if (s_connection_send_update_window(connection, auto_window_update)) { + CONNECTION_LOGF(TRACE, connection, "%" PRIu32 " Bytes of padding received.", total_padding_bytes); + connection->thread_data.window_size_self_dropped += auto_window_update; + if (connection->thread_data.window_size_self_dropped >= + connection->thread_data.window_size_self_dropped_threshold) { + if (s_connection_send_update_window(connection, connection->thread_data.window_size_self_dropped)) { return aws_h2err_from_last_error(); } + connection->thread_data.window_size_self_dropped = 0; CONNECTION_LOGF( TRACE, connection, - "Automatically updating connection window by %" PRIu32 "(%" PRIu32 " due to padding).", - auto_window_update, - total_padding_bytes); + "Automatically updating connection window by %zu", + connection->thread_data.window_size_self_dropped); } return AWS_H2ERR_SUCCESS; @@ -1762,6 +1766,8 @@ static void s_handler_installed(struct aws_channel_handler *handler, struct aws_ aws_linked_list_push_back( &connection->thread_data.outgoing_frames_queue, &connection_window_update_frame->node); connection->thread_data.window_size_self += initial_window_update_size; + /* For automatic window management, we only update connectio windows when it droped blow 50% of MAX. */ + connection->thread_data.window_size_self_dropped_threshold = AWS_H2_WINDOW_UPDATE_MAX / 2; } aws_h2_try_write_outgoing_frames(connection); return; diff --git a/tests/test_h2_client.c b/tests/test_h2_client.c index 7c7706fbb..f897cd045 100644 --- a/tests/test_h2_client.c +++ b/tests/test_h2_client.c @@ -2429,15 +2429,6 @@ TEST_CASE(h2_client_stream_send_window_update) { ASSERT_NOT_NULL(stream_window_update_frame); ASSERT_UINT_EQUALS(5, stream_window_update_frame->window_size_increment); - struct h2_decoded_frame *connection_window_update_frame = h2_decode_tester_find_stream_frame( - &s_tester.peer.decode, - AWS_H2_FRAME_T_WINDOW_UPDATE, - 0 /*stream_id*/, - initial_window_update_index + 1 /*idx*/, - NULL); - ASSERT_NOT_NULL(connection_window_update_frame); - ASSERT_UINT_EQUALS(5, connection_window_update_frame->window_size_increment); - /* clean up */ aws_http_headers_release(response_headers); aws_http_message_release(request); From 21994442745a45ec754d04b2a3a4ccfce40fd054 Mon Sep 17 00:00:00 2001 From: Dengke Tang Date: Mon, 12 Sep 2022 23:33:55 +0000 Subject: [PATCH 14/48] my computers dead, disable it for now --- tests/CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 3259fa743..234bdba45 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -429,7 +429,7 @@ add_test_case(h2_client_manual_window_management_disabled_auto_window_update) add_test_case(h2_client_manual_window_management_user_send_stream_window_update) add_test_case(h2_client_manual_window_management_user_send_stream_window_update_with_padding) add_test_case(h2_client_manual_window_management_user_send_stream_window_update_overflow) -add_test_case(h2_client_manual_window_management_user_send_conn_window_update) +# add_test_case(h2_client_manual_window_management_user_send_conn_window_update) add_test_case(h2_client_manual_window_management_user_send_conn_window_update_with_padding) add_test_case(h2_client_manual_window_management_user_send_connection_window_update_overflow) # Build these when we address window_update() differences in H1 vs H2 From 2a23d4f11353732bb2e7c7c8546903673776235b Mon Sep 17 00:00:00 2001 From: Dengke Tang Date: Mon, 12 Sep 2022 16:41:17 -0700 Subject: [PATCH 15/48] try to fix it --- include/aws/http/private/h2_connection.h | 4 ++-- source/h2_connection.c | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/include/aws/http/private/h2_connection.h b/include/aws/http/private/h2_connection.h index 0edbd50ea..c5942b9db 100644 --- a/include/aws/http/private/h2_connection.h +++ b/include/aws/http/private/h2_connection.h @@ -99,8 +99,8 @@ struct aws_h2_connection { * connection */ size_t window_size_self; /* The size dropped */ - size_t window_size_self_dropped; - size_t window_size_self_dropped_threshold; + uint32_t window_size_self_dropped; + uint32_t window_size_self_dropped_threshold; /* Highest self-initiated stream-id that peer might have processed. * Defaults to max stream-id, may be lowered when GOAWAY frame received. */ diff --git a/source/h2_connection.c b/source/h2_connection.c index 776958eca..239003535 100644 --- a/source/h2_connection.c +++ b/source/h2_connection.c @@ -1259,7 +1259,7 @@ struct aws_h2err s_decoder_on_data_begin( CONNECTION_LOGF( TRACE, connection, - "Automatically updating connection window by %zu", + "Automatically updating connection window by %" PRIu32 "", connection->thread_data.window_size_self_dropped); } From a34e41031936c0f1c7b41708f2a1da24735e41d6 Mon Sep 17 00:00:00 2001 From: Dengke Tang Date: Mon, 12 Sep 2022 17:00:06 -0700 Subject: [PATCH 16/48] why it's committed --- tests/localhost/openssl | 1 - 1 file changed, 1 deletion(-) delete mode 160000 tests/localhost/openssl diff --git a/tests/localhost/openssl b/tests/localhost/openssl deleted file mode 160000 index 1bf649b59..000000000 --- a/tests/localhost/openssl +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 1bf649b5998ac98511203f54ce954eccfaf75467 From 8e9ffd550f769a92f4425167b439684edbbfb625 Mon Sep 17 00:00:00 2001 From: Dengke Tang Date: Tue, 13 Sep 2022 10:24:51 -0700 Subject: [PATCH 17/48] update window when larger --- source/h2_connection.c | 2 +- tests/CMakeLists.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/source/h2_connection.c b/source/h2_connection.c index 239003535..b0aab1cc9 100644 --- a/source/h2_connection.c +++ b/source/h2_connection.c @@ -1250,7 +1250,7 @@ struct aws_h2err s_decoder_on_data_begin( } CONNECTION_LOGF(TRACE, connection, "%" PRIu32 " Bytes of padding received.", total_padding_bytes); connection->thread_data.window_size_self_dropped += auto_window_update; - if (connection->thread_data.window_size_self_dropped >= + if (connection->thread_data.window_size_self_dropped > connection->thread_data.window_size_self_dropped_threshold) { if (s_connection_send_update_window(connection, connection->thread_data.window_size_self_dropped)) { return aws_h2err_from_last_error(); diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 234bdba45..3259fa743 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -429,7 +429,7 @@ add_test_case(h2_client_manual_window_management_disabled_auto_window_update) add_test_case(h2_client_manual_window_management_user_send_stream_window_update) add_test_case(h2_client_manual_window_management_user_send_stream_window_update_with_padding) add_test_case(h2_client_manual_window_management_user_send_stream_window_update_overflow) -# add_test_case(h2_client_manual_window_management_user_send_conn_window_update) +add_test_case(h2_client_manual_window_management_user_send_conn_window_update) add_test_case(h2_client_manual_window_management_user_send_conn_window_update_with_padding) add_test_case(h2_client_manual_window_management_user_send_connection_window_update_overflow) # Build these when we address window_update() differences in H1 vs H2 From fc4769c118d56bc6a915561438f633e20bc25d87 Mon Sep 17 00:00:00 2001 From: Dengke Tang Date: Tue, 13 Sep 2022 10:41:52 -0700 Subject: [PATCH 18/48] format --- source/h2_connection.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/source/h2_connection.c b/source/h2_connection.c index b0aab1cc9..eadcd16e5 100644 --- a/source/h2_connection.c +++ b/source/h2_connection.c @@ -1250,8 +1250,7 @@ struct aws_h2err s_decoder_on_data_begin( } CONNECTION_LOGF(TRACE, connection, "%" PRIu32 " Bytes of padding received.", total_padding_bytes); connection->thread_data.window_size_self_dropped += auto_window_update; - if (connection->thread_data.window_size_self_dropped > - connection->thread_data.window_size_self_dropped_threshold) { + if (connection->thread_data.window_size_self_dropped > connection->thread_data.window_size_self_dropped_threshold) { if (s_connection_send_update_window(connection, connection->thread_data.window_size_self_dropped)) { return aws_h2err_from_last_error(); } From 2f0dcf447c41fd09a52df9569446ef3acde885d9 Mon Sep 17 00:00:00 2001 From: Dengke Tang Date: Tue, 13 Sep 2022 11:52:30 -0700 Subject: [PATCH 19/48] check the number is not slower than expected --- bin/canary/main.c | 39 ++++++++++++++++++++++++++++++++------- 1 file changed, 32 insertions(+), 7 deletions(-) diff --git a/bin/canary/main.c b/bin/canary/main.c index 33323144c..cd5dafcd3 100644 --- a/bin/canary/main.c +++ b/bin/canary/main.c @@ -37,13 +37,18 @@ /* TODO: Make those configurable from cmd line */ const struct aws_byte_cursor uri_cursor = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("http://localhost:8080/"); -const int rate_secs = 10; /* Time interval to collect data */ -const int batch_size = 100; /* The number of requests to made each batch */ -const int num_data_to_collect = 10; /* The number of data to collect */ +const int rate_secs = 5; /* Time interval to collect data */ +const int streams_per_connection = 20; +const int max_connections = 8; +const int num_data_to_collect = 5; /* The number of data to collect */ const enum aws_log_level log_level = AWS_LOG_LEVEL_NONE; const bool direct_connection = false; /* If true, will create one connection and make requests from that connection. * If false, will use stream manager to acquire streams */ +const double rate_threshold = + 4000; /* From the previous tests. All platforms seem to be larger than 4000, but it could various. TODO: Maybe + gather the number of previous test run, and be platform specific. */ + struct aws_http_canary_helper { struct aws_task task; struct aws_event_loop *eventloop; @@ -52,6 +57,8 @@ struct aws_http_canary_helper { uint64_t rate_ns; /* Collect data per rate_ns */ struct aws_atomic_var canary_finished; + + double *results; }; struct canary_ctx { @@ -87,11 +94,27 @@ static void s_collect_data_task(struct aws_task *task, void *arg, enum aws_task_ /* collect data */ size_t stream_completed = aws_atomic_exchange_int(&app_ctx->streams_completed, 0); - ++helper->num_collected; + /* TODO: maybe collect the data somewhere instead of just printing it out. */ - printf("Loop %d: The stream completed during %d secs: %zu\n", helper->num_collected, rate_secs, stream_completed); + double rate = (double)stream_completed / rate_secs; + helper->results[helper->num_collected] = rate; + ++helper->num_collected; + printf("Loop %d: The stream completed per second is %f\n", helper->num_collected, rate); if (helper->num_collected >= num_data_to_collect) { /* done */ + double sum = 0; + for (int i = 0; i < num_data_to_collect; i++) { + sum += helper->results[i]; + } + double avg = sum / num_data_to_collect; + printf("In average, the stream completed per second is %f\n", avg); + + if (avg < rate_threshold) { + + fprintf(stderr, "The average result is lower than threshold (%f). Failed\n", rate_threshold); + exit(1); + } + aws_atomic_store_int(&helper->canary_finished, 1); } else { /* keep running */ @@ -107,6 +130,7 @@ void aws_http_canary_helper_init(struct canary_ctx *app_ctx, struct aws_http_can helper->rate_ns = aws_timestamp_convert(rate_secs, AWS_TIMESTAMP_SECS, AWS_TIMESTAMP_NANOS, NULL); aws_atomic_init_int(&helper->canary_finished, 0); aws_task_init(&helper->task, s_collect_data_task, app_ctx, "data_collector"); + helper->results = aws_mem_calloc(app_ctx->allocator, num_data_to_collect, sizeof(double)); uint64_t now = 0; aws_high_res_clock_get_ticks(&now); @@ -329,7 +353,7 @@ int main(int argc, char **argv) { struct canary_ctx app_ctx; AWS_ZERO_STRUCT(app_ctx); app_ctx.allocator = allocator; - app_ctx.batch_size = batch_size; + app_ctx.batch_size = max_connections * streams_per_connection; app_ctx.log_level = log_level; aws_mutex_init(&app_ctx.mutex); @@ -439,7 +463,8 @@ int main(int argc, char **argv) { .tls_connection_options = use_tls ? tls_options : NULL, .host = app_ctx.uri.host_name, .port = port, - .max_connections = 100, + .max_connections = max_connections, + .max_concurrent_streams_per_connection = streams_per_connection, .http2_prior_knowledge = !use_tls, .shutdown_complete_user_data = &app_ctx, .shutdown_complete_callback = s_on_shutdown_complete, From c6a1676c5e218f8cbd51a542981ed67c670410c5 Mon Sep 17 00:00:00 2001 From: Dengke Tang Date: Tue, 13 Sep 2022 12:00:53 -0700 Subject: [PATCH 20/48] result mem --- bin/canary/main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bin/canary/main.c b/bin/canary/main.c index cd5dafcd3..814aaf1bb 100644 --- a/bin/canary/main.c +++ b/bin/canary/main.c @@ -108,7 +108,7 @@ static void s_collect_data_task(struct aws_task *task, void *arg, enum aws_task_ } double avg = sum / num_data_to_collect; printf("In average, the stream completed per second is %f\n", avg); - + aws_mem_release(app_ctx->allocator, helper->results); if (avg < rate_threshold) { fprintf(stderr, "The average result is lower than threshold (%f). Failed\n", rate_threshold); From e4c01fea3eb2eccc51442d025289a31a4ec470f9 Mon Sep 17 00:00:00 2001 From: Dengke Tang Date: Tue, 13 Sep 2022 12:23:28 -0700 Subject: [PATCH 21/48] add documentation and same thing for stream --- include/aws/http/private/h2_connection.h | 10 +++++++++- include/aws/http/private/h2_stream.h | 12 ++++++++++++ source/h2_connection.c | 8 +++++--- source/h2_stream.c | 18 +++++++++++++----- 4 files changed, 39 insertions(+), 9 deletions(-) diff --git a/include/aws/http/private/h2_connection.h b/include/aws/http/private/h2_connection.h index c5942b9db..bea809dc8 100644 --- a/include/aws/http/private/h2_connection.h +++ b/include/aws/http/private/h2_connection.h @@ -98,8 +98,16 @@ struct aws_h2_connection { * Reduce the space after receiving a flow-controlled frame. Increment after sending WINDOW_UPDATE for * connection */ size_t window_size_self; - /* The size dropped */ + /* The self window size dropped before the client send window update automatically. + * When manual management for connection window is off, the dropped size equals to the size of data frame + * received. + * When manual management for connection window is on, the dropped size equals to the size of all the padding in + * the data frame received */ uint32_t window_size_self_dropped; + /* The threshold to send out a window update frame. When the window_size_self_dropped is larger than the + * threshold, client will automatically send a WINDOW_UPDATE frame with the dropped size to keep flow continues. + * TODO: expose this to user + */ uint32_t window_size_self_dropped_threshold; /* Highest self-initiated stream-id that peer might have processed. diff --git a/include/aws/http/private/h2_stream.h b/include/aws/http/private/h2_stream.h index 62de106c3..d48ff048d 100644 --- a/include/aws/http/private/h2_stream.h +++ b/include/aws/http/private/h2_stream.h @@ -84,6 +84,18 @@ struct aws_h2_stream { * We allow this value exceed the max window size (int64 can hold much more than 0x7FFFFFFF), * We leave it up to the remote peer to detect whether the max window size has been exceeded. */ int64_t window_size_self; + /* The self window size dropped before the client send window update automatically. + * When manual management for stream window is off, the dropped size equals to the size of data frame + * received. + * When manual management for stream window is on, the dropped size equals to the size of all the padding in + * the data frame received */ + uint32_t window_size_self_dropped; + /* The threshold to send out a window update frame. When the window_size_self_dropped is larger than the + * threshold, client will automatically send a WINDOW_UPDATE frame with the dropped size to keep flow continues. + * TODO: expose this to user + */ + uint32_t window_size_self_dropped_threshold; + struct aws_http_message *outgoing_message; /* All queued writes. If the message provides a body stream, it will be first in this list * This list can drain, which results in the stream being put to sleep (moved to waiting_streams_list in diff --git a/source/h2_connection.c b/source/h2_connection.c index eadcd16e5..200434a83 100644 --- a/source/h2_connection.c +++ b/source/h2_connection.c @@ -384,7 +384,7 @@ static struct aws_h2_connection *s_connection_new( connection->thread_data.window_size_peer = AWS_H2_INIT_WINDOW_SIZE; connection->thread_data.window_size_self = AWS_H2_INIT_WINDOW_SIZE; - connection->thread_data.window_size_self_dropped_threshold = 0; /* TODO: expose this config to user? */ + connection->thread_data.window_size_self_dropped_threshold = 0; connection->thread_data.goaway_received_last_stream_id = AWS_H2_STREAM_ID_MAX; connection->thread_data.goaway_sent_last_stream_id = AWS_H2_STREAM_ID_MAX; @@ -1248,7 +1248,9 @@ struct aws_h2err s_decoder_on_data_begin( /* Automatically update the full amount we just received */ auto_window_update = payload_len; } - CONNECTION_LOGF(TRACE, connection, "%" PRIu32 " Bytes of padding received.", total_padding_bytes); + if (total_padding_bytes) { + CONNECTION_LOGF(TRACE, connection, "%" PRIu32 " Bytes of padding received.", total_padding_bytes); + } connection->thread_data.window_size_self_dropped += auto_window_update; if (connection->thread_data.window_size_self_dropped > connection->thread_data.window_size_self_dropped_threshold) { if (s_connection_send_update_window(connection, connection->thread_data.window_size_self_dropped)) { @@ -1258,7 +1260,7 @@ struct aws_h2err s_decoder_on_data_begin( CONNECTION_LOGF( TRACE, connection, - "Automatically updating connection window by %" PRIu32 "", + "Automatically updating connection window by %" PRIu32 ".", connection->thread_data.window_size_self_dropped); } diff --git a/source/h2_stream.c b/source/h2_stream.c index 9be1bee6f..58c4c388e 100644 --- a/source/h2_stream.c +++ b/source/h2_stream.c @@ -712,6 +712,10 @@ int aws_h2_stream_on_activated(struct aws_h2_stream *stream, enum aws_h2_stream_ stream->thread_data.window_size_self = connection->thread_data.settings_self[AWS_HTTP2_SETTINGS_INITIAL_WINDOW_SIZE]; + if (!connection->base.stream_manual_window_management) { + stream->thread_data.window_size_self_dropped_threshold = + connection->thread_data.settings_self[AWS_HTTP2_SETTINGS_INITIAL_WINDOW_SIZE] / 2; + } if (with_data) { /* If stream has DATA to send, put it in the outgoing_streams_list, and we'll send data later */ stream->thread_data.state = AWS_H2_STREAM_STATE_OPEN; @@ -1077,17 +1081,21 @@ struct aws_h2err aws_h2_stream_on_decoder_data_begin( /* Automatically update the full amount we just received */ auto_window_update = payload_len; } + if (total_padding_bytes) { + AWS_H2_STREAM_LOGF(TRACE, stream, "%" PRIu32 " Bytes of padding received.", total_padding_bytes); + } + stream->thread_data.window_size_self_dropped += auto_window_update; - if (auto_window_update != 0) { - if (s_stream_send_update_window(stream, auto_window_update)) { + if (stream->thread_data.window_size_self_dropped > stream->thread_data.window_size_self_dropped_threshold) { + if (s_stream_send_update_window(stream, stream->thread_data.window_size_self_dropped)) { return aws_h2err_from_last_error(); } + stream->thread_data.window_size_self_dropped = 0; AWS_H2_STREAM_LOGF( TRACE, stream, - "Automatically updating stream window by %" PRIu32 "(%" PRIu32 " due to padding).", - auto_window_update, - total_padding_bytes); + "Automatically updating stream window by %" PRIu32 ".", + stream->thread_data.window_size_self_dropped); } } From db1a7283ca2579390f5983edcd651969b8411e94 Mon Sep 17 00:00:00 2001 From: Dengke Tang Date: Tue, 13 Sep 2022 13:56:29 -0700 Subject: [PATCH 22/48] fix tests --- tests/test_h2_client.c | 19 ++++++++----------- tests/test_localhost_integ.c | 4 +--- tests/test_stream_manager.c | 8 -------- 3 files changed, 9 insertions(+), 22 deletions(-) diff --git a/tests/test_h2_client.c b/tests/test_h2_client.c index f897cd045..6e8fd3174 100644 --- a/tests/test_h2_client.c +++ b/tests/test_h2_client.c @@ -2419,15 +2419,18 @@ TEST_CASE(h2_client_stream_send_window_update) { const char *body_src = "hello"; ASSERT_SUCCESS(h2_fake_peer_send_data_frame_str(&s_tester.peer, stream_id, body_src, false /*end_stream*/)); - /* check that 2 WINDOW_UPDATE frames have been sent. - * 1 for the connection, and 1 for the stream */ + /* check that 1 WINDOW_UPDATE frames have been sent. + * 1 for the connection, and no window_update for the stream as window only updates when the window smaller than + * half of the original window */ testing_channel_drain_queued_tasks(&s_tester.testing_channel); ASSERT_SUCCESS(h2_fake_peer_decode_messages_from_testing_channel(&s_tester.peer)); struct h2_decoded_frame *stream_window_update_frame = h2_decode_tester_find_stream_frame( &s_tester.peer.decode, AWS_H2_FRAME_T_WINDOW_UPDATE, stream_id, 0 /*idx*/, NULL); - ASSERT_NOT_NULL(stream_window_update_frame); - ASSERT_UINT_EQUALS(5, stream_window_update_frame->window_size_increment); + ASSERT_NULL(stream_window_update_frame); + + /* For testing automatic window update, we have localhost_integ_h2_download_stress that downloads a file from one + * connection and one stream. If the window was not properly updated, that should fail */ /* clean up */ aws_http_headers_release(response_headers); @@ -3888,17 +3891,11 @@ TEST_CASE(h2_client_manual_window_management_user_send_conn_window_update) { } else { ASSERT_SUCCESS(h2_fake_peer_send_data_frame(&s_tester.peer, stream_id, body_cursor, false /*end_stream*/)); } - /* manually update the stream and connection flow-control window. */ - aws_http_stream_update_window(stream_tester.stream, body_size); + /* manually update connection flow-control window. */ aws_http2_connection_update_window(s_tester.connection, (uint32_t)body_size); testing_channel_drain_queued_tasks(&s_tester.testing_channel); ASSERT_SUCCESS(h2_fake_peer_decode_messages_from_testing_channel(&s_tester.peer)); - struct h2_decoded_frame *stream_window_update_frame = h2_decode_tester_find_stream_frame( - &s_tester.peer.decode, AWS_H2_FRAME_T_WINDOW_UPDATE, stream_id, 0 /*idx*/, NULL); - ASSERT_NOT_NULL(stream_window_update_frame); - ASSERT_UINT_EQUALS(body_size, stream_window_update_frame->window_size_increment); - struct h2_decoded_frame *connection_window_update_frame = h2_decode_tester_find_stream_frame( &s_tester.peer.decode, AWS_H2_FRAME_T_WINDOW_UPDATE, 0 /*stream_id*/, 0 /*idx*/, NULL); ASSERT_NOT_NULL(connection_window_update_frame); diff --git a/tests/test_localhost_integ.c b/tests/test_localhost_integ.c index 9797db4d9..989285570 100644 --- a/tests/test_localhost_integ.c +++ b/tests/test_localhost_integ.c @@ -407,9 +407,7 @@ static int s_localhost_integ_h2_upload_stress(struct aws_allocator *allocator, v size_t length = 2500000000UL; #ifdef AWS_OS_LINUX - /* Using Python hyper h2 server frame work, met a weird upload performance issue on Linux. Our client against nginx - * platform has not met the same issue. We assume it's because the server framework implementation. Use lower - * number of linux */ + /* TODO: dig into upload stress performance on linux */ length = 250000000UL; #endif diff --git a/tests/test_stream_manager.c b/tests/test_stream_manager.c index afb8ce00a..2817a41c0 100644 --- a/tests/test_stream_manager.c +++ b/tests/test_stream_manager.c @@ -1388,14 +1388,6 @@ TEST_CASE(localhost_integ_h2_sm_acquire_stream_stress_with_body) { s_tester.length_sent = 2000; int num_to_acquire = 500 * 100; -#ifdef AWS_OS_LINUX - /* Using Python hyper h2 server frame work, met a weird upload performance issue on Linux. Our client against nginx - * platform has not met the same issue. We assume it's because the server framework implementation. Use lower - * number of linux - */ - num_to_acquire = 500; -#endif - ASSERT_SUCCESS(s_sm_stream_acquiring_with_body(num_to_acquire)); ASSERT_SUCCESS(s_wait_on_streams_completed_count(num_to_acquire)); ASSERT_TRUE((int)s_tester.acquiring_stream_errors == 0); From 9c89ade1636b94ccbed7d58712d825769a1d44db Mon Sep 17 00:00:00 2001 From: Dengke Tang Date: Wed, 14 Sep 2022 13:18:13 -0700 Subject: [PATCH 23/48] 30 secs instead --- bin/canary/main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bin/canary/main.c b/bin/canary/main.c index 814aaf1bb..ff3bcc426 100644 --- a/bin/canary/main.c +++ b/bin/canary/main.c @@ -37,7 +37,7 @@ /* TODO: Make those configurable from cmd line */ const struct aws_byte_cursor uri_cursor = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("http://localhost:8080/"); -const int rate_secs = 5; /* Time interval to collect data */ +const int rate_secs = 30; /* Time interval to collect data */ const int streams_per_connection = 20; const int max_connections = 8; const int num_data_to_collect = 5; /* The number of data to collect */ From 918c9ce9fe36ff0a1b16c16ed82e50debf305a6a Mon Sep 17 00:00:00 2001 From: Dengke Tang Date: Mon, 19 Sep 2022 15:55:17 -0700 Subject: [PATCH 24/48] copy/paste fix --- bin/canary/CMakeLists.txt | 7 +++---- bin/canary/main.c | 1 - 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/bin/canary/CMakeLists.txt b/bin/canary/CMakeLists.txt index b4911a6a0..2fa8642d3 100644 --- a/bin/canary/CMakeLists.txt +++ b/bin/canary/CMakeLists.txt @@ -2,15 +2,14 @@ project(canary C) list(APPEND CMAKE_MODULE_PATH "${CMAKE_INSTALL_PREFIX}/lib/cmake") -file(GLOB ELASTICURL_SRC +file(GLOB CANARY_SRC "*.c" ) set(PROJECT_NAME canary) -add_executable(${PROJECT_NAME} ${ELASTICURL_SRC}) +add_executable(${PROJECT_NAME} ${CANARY_SRC}) aws_set_common_properties(${PROJECT_NAME}) - target_include_directories(${PROJECT_NAME} PUBLIC $ $) @@ -18,7 +17,7 @@ target_include_directories(${PROJECT_NAME} PUBLIC target_link_libraries(${PROJECT_NAME} aws-c-http) if (BUILD_SHARED_LIBS AND NOT WIN32) - message(INFO " elasticurl will be built with shared libs, but you may need to set LD_LIBRARY_PATH=${CMAKE_INSTALL_PREFIX}/lib to run the application") + message(INFO " canary will be built with shared libs, but you may need to set LD_LIBRARY_PATH=${CMAKE_INSTALL_PREFIX}/lib to run the application") endif() install(TARGETS ${PROJECT_NAME} diff --git a/bin/canary/main.c b/bin/canary/main.c index ff3bcc426..21e4b3310 100644 --- a/bin/canary/main.c +++ b/bin/canary/main.c @@ -27,7 +27,6 @@ #include #ifdef _MSC_VER -# pragma warning(disable : 4996) /* Disable warnings about fopen() being insecure */ # pragma warning(disable : 4204) /* Declared initializers */ # pragma warning(disable : 4221) /* Local var in declared initializer */ #endif From c72d9b05b55615b506275ac97305a144ffcaea34 Mon Sep 17 00:00:00 2001 From: Dengke Tang Date: Mon, 26 Sep 2022 15:36:10 -0700 Subject: [PATCH 25/48] get rid of the linux only thing --- tests/test_localhost_integ.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/tests/test_localhost_integ.c b/tests/test_localhost_integ.c index 989285570..52afe3d28 100644 --- a/tests/test_localhost_integ.c +++ b/tests/test_localhost_integ.c @@ -406,10 +406,6 @@ static int s_localhost_integ_h2_upload_stress(struct aws_allocator *allocator, v s_tester.alloc = allocator; size_t length = 2500000000UL; -#ifdef AWS_OS_LINUX - /* TODO: dig into upload stress performance on linux */ - length = 250000000UL; -#endif struct aws_string *http_localhost_host = NULL; if (aws_get_environment_value(allocator, s_http_localhost_env_var, &http_localhost_host) || From 74c7b06e3d57ea3c0c9d25f12a5facf8a9926ab8 Mon Sep 17 00:00:00 2001 From: Dengke Tang Date: Mon, 12 Dec 2022 16:45:42 -0800 Subject: [PATCH 26/48] update the port --- .github/workflows/ci.yml | 18 ------------------ bin/canary/main.c | 2 +- 2 files changed, 1 insertion(+), 19 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 7c6f13b7c..6ce71d2a1 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -184,12 +184,6 @@ jobs: steps: - name: Checkout uses: actions/checkout@v3 - - name: Configure local host - run: | - python3 -m pip install h2 - cd ./tests/py_localhost/ - python3 server.py & - python3 non_tls_server.py & - name: Build and test run: | python3 -c "from urllib.request import urlretrieve; urlretrieve('${{ env.BUILDER_HOST }}/${{ env.BUILDER_SOURCE }}/${{ env.BUILDER_VERSION }}/builder.pyz?run=${{ env.RUN }}', 'builder.pyz')" @@ -200,12 +194,6 @@ jobs: steps: - name: Checkout uses: actions/checkout@v3 - - name: Configure local host - run: | - python3 -m pip install h2 - cd ./tests/py_localhost/ - python3 server.py & - python3 non_tls_server.py & - name: Build and test run: | python3 -c "from urllib.request import urlretrieve; urlretrieve('${{ env.BUILDER_HOST }}/${{ env.BUILDER_SOURCE }}/${{ env.BUILDER_VERSION }}/builder.pyz?run=${{ env.RUN }}', 'builder.pyz')" @@ -216,13 +204,7 @@ jobs: steps: - name: Checkout uses: actions/checkout@v3 - - name: Configure local host - run: | - python -m pip install h2 - name: Build and test run: | - cd ./tests/py_localhost/ - Start-Process -NoNewWindow python .\server.py - Start-Process -NoNewWindow python .\non_tls_server.py python -c "from urllib.request import urlretrieve; urlretrieve('${{ env.BUILDER_HOST }}/${{ env.BUILDER_SOURCE }}/${{ env.BUILDER_VERSION }}/builder.pyz?run=${{ env.RUN }}', 'builder.pyz')" python builder.pyz build -p aws-c-http --cmake-extra=-DAWS_BUILD_CANARY=ON diff --git a/bin/canary/main.c b/bin/canary/main.c index 21e4b3310..4ba0a3559 100644 --- a/bin/canary/main.c +++ b/bin/canary/main.c @@ -35,7 +35,7 @@ { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(NAME), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(VALUE), } /* TODO: Make those configurable from cmd line */ -const struct aws_byte_cursor uri_cursor = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("http://localhost:8080/"); +const struct aws_byte_cursor uri_cursor = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("http://localhost:3280/"); const int rate_secs = 30; /* Time interval to collect data */ const int streams_per_connection = 20; const int max_connections = 8; From e112d01decd86192b4fef6a0edbc761136270911 Mon Sep 17 00:00:00 2001 From: Dengke Date: Tue, 1 Apr 2025 14:43:32 -0700 Subject: [PATCH 27/48] trivial --- bin/canary/main.c | 7 +++++-- source/h2_connection.c | 2 +- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/bin/canary/main.c b/bin/canary/main.c index 4ba0a3559..9b3aa42ee 100644 --- a/bin/canary/main.c +++ b/bin/canary/main.c @@ -32,7 +32,10 @@ #endif #define DEFINE_HEADER(NAME, VALUE) \ - { .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(NAME), .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(VALUE), } + { \ + .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(NAME), \ + .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(VALUE), \ + } /* TODO: Make those configurable from cmd line */ const struct aws_byte_cursor uri_cursor = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("http://localhost:3280/"); @@ -424,7 +427,7 @@ int main(int argc, char **argv) { tls_options = &tls_connection_options; if (app_ctx.uri.port) { - port = app_ctx.uri.port; + port = (uint16_t)app_ctx.uri.port; } } else { port = 80; diff --git a/source/h2_connection.c b/source/h2_connection.c index 0d58f336e..28143bed8 100644 --- a/source/h2_connection.c +++ b/source/h2_connection.c @@ -1767,7 +1767,7 @@ static void s_handler_installed(struct aws_channel_handler *handler, struct aws_ aws_linked_list_push_back( &connection->thread_data.outgoing_frames_queue, &connection_window_update_frame->node); connection->thread_data.window_size_self += initial_window_update_size; - /* For automatic window management, we only update connectio windows when it droped blow 50% of MAX. */ + /* For automatic window management, we only update connection windows when it droped blow 50% of MAX. */ connection->thread_data.window_size_self_dropped_threshold = AWS_H2_WINDOW_UPDATE_MAX / 2; } aws_h2_try_write_outgoing_frames(connection); From 9287f0b66d119909c3e9e0f24dd0bf161a1736d1 Mon Sep 17 00:00:00 2001 From: Dengke Date: Tue, 1 Apr 2025 14:53:08 -0700 Subject: [PATCH 28/48] just use 32 bit --- bin/canary/main.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/bin/canary/main.c b/bin/canary/main.c index 9b3aa42ee..66c3e0d94 100644 --- a/bin/canary/main.c +++ b/bin/canary/main.c @@ -386,7 +386,7 @@ int main(int argc, char **argv) { aws_atomic_store_int(&app_ctx.streams_failed, 0); bool use_tls = true; - uint16_t port = 443; + uint32_t port = 443; if (!app_ctx.uri.scheme.len && (app_ctx.uri.port == 80 || app_ctx.uri.port == 8080)) { use_tls = false; @@ -427,7 +427,7 @@ int main(int argc, char **argv) { tls_options = &tls_connection_options; if (app_ctx.uri.port) { - port = (uint16_t)app_ctx.uri.port; + port = app_ctx.uri.port; } } else { port = 80; From ed923c37cac9d4607dd8f1c515d059afce5dfeb0 Mon Sep 17 00:00:00 2001 From: Dengke Date: Tue, 1 Apr 2025 14:54:52 -0700 Subject: [PATCH 29/48] things has been updated --- .github/workflows/ci.yml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index c5da2a6c5..5e426932d 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -282,20 +282,20 @@ jobs: python builder.pyz build -p aws-c-http --cmake-extra=-DENABLE_LOCALHOST_INTEGRATION_TESTS=ON --config Debug localhost-canary-linux: - runs-on: ubuntu-20.04 # latest + runs-on: ubuntu-24.04 # latest steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Build and test run: | python3 -c "from urllib.request import urlretrieve; urlretrieve('${{ env.BUILDER_HOST }}/${{ env.BUILDER_SOURCE }}/${{ env.BUILDER_VERSION }}/builder.pyz?run=${{ env.RUN }}', 'builder.pyz')" python3 builder.pyz build -p aws-c-http --cmake-extra=-DAWS_BUILD_CANARY=ON --config Debug localhost-canary-mac: - runs-on: macos-11 # latest + runs-on: macos-14 # latest steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@vr - name: Build and test run: | python3 -c "from urllib.request import urlretrieve; urlretrieve('${{ env.BUILDER_HOST }}/${{ env.BUILDER_SOURCE }}/${{ env.BUILDER_VERSION }}/builder.pyz?run=${{ env.RUN }}', 'builder.pyz')" @@ -305,7 +305,7 @@ jobs: runs-on: windows-2022 # latest steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Build and test run: | python -c "from urllib.request import urlretrieve; urlretrieve('${{ env.BUILDER_HOST }}/${{ env.BUILDER_SOURCE }}/${{ env.BUILDER_VERSION }}/builder.pyz?run=${{ env.RUN }}', 'builder.pyz')" From 29ec995c2dcb617d8597bb4b7c8515ddf41e587e Mon Sep 17 00:00:00 2001 From: Dengke Date: Tue, 1 Apr 2025 14:56:27 -0700 Subject: [PATCH 30/48] typo --- .github/workflows/ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 5e426932d..3e97d65c8 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -295,7 +295,7 @@ jobs: runs-on: macos-14 # latest steps: - name: Checkout - uses: actions/checkout@vr + uses: actions/checkout@v4 - name: Build and test run: | python3 -c "from urllib.request import urlretrieve; urlretrieve('${{ env.BUILDER_HOST }}/${{ env.BUILDER_SOURCE }}/${{ env.BUILDER_VERSION }}/builder.pyz?run=${{ env.RUN }}', 'builder.pyz')" From 2066110b8a084505177f90b53cd21b4c80344683 Mon Sep 17 00:00:00 2001 From: Dengke Date: Wed, 2 Apr 2025 10:50:10 -0700 Subject: [PATCH 31/48] you need venv now for new python --- .github/workflows/ci.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 3e97d65c8..678557357 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -298,6 +298,8 @@ jobs: uses: actions/checkout@v4 - name: Build and test run: | + python3 -m venv .venv + source .venv/bin/activate python3 -c "from urllib.request import urlretrieve; urlretrieve('${{ env.BUILDER_HOST }}/${{ env.BUILDER_SOURCE }}/${{ env.BUILDER_VERSION }}/builder.pyz?run=${{ env.RUN }}', 'builder.pyz')" python3 builder.pyz build -p aws-c-http --cmake-extra=-DAWS_BUILD_CANARY=ON --config Debug From bd3bef5a37187df2fa629a565c068fb8fd0c2c3a Mon Sep 17 00:00:00 2001 From: Dengke Tang Date: Wed, 2 Apr 2025 21:45:39 +0000 Subject: [PATCH 32/48] make the flow control window wide open for server --- tests/py_localhost/server.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/tests/py_localhost/server.py b/tests/py_localhost/server.py index 8bd58cf17..160071b78 100644 --- a/tests/py_localhost/server.py +++ b/tests/py_localhost/server.py @@ -69,7 +69,13 @@ def data_received(self, data: bytes): for event in events: if isinstance(event, RequestReceived): self.request_received(event.headers, event.stream_id) + self.conn.increment_flow_control_window(214748364) + self.conn.increment_flow_control_window( + 214748364, event.stream_id) elif isinstance(event, DataReceived): + self.conn.increment_flow_control_window(event.flow_controlled_length) + self.conn.increment_flow_control_window( + event.flow_controlled_length, event.stream_id) self.receive_data(event.data, event.stream_id) elif isinstance(event, StreamEnded): self.stream_complete(event.stream_id) @@ -164,11 +170,7 @@ def receive_data(self, data: bytes, stream_id: int): len(data) else: self.num_sentence_received[stream_id] = len(data) - # update window for stream - if len(data) > 0: - self.conn.increment_flow_control_window(len(data)) - self.conn.increment_flow_control_window( - len(data), stream_id) + else: stream_data.data.write(data) From e4e6cbe04fe7b1b926c50f3fcfde4f95ce93341c Mon Sep 17 00:00:00 2001 From: Dengke Tang Date: Wed, 2 Apr 2025 21:53:00 +0000 Subject: [PATCH 33/48] increament once at the beginning --- tests/py_localhost/server.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/py_localhost/server.py b/tests/py_localhost/server.py index 160071b78..678ab31ae 100644 --- a/tests/py_localhost/server.py +++ b/tests/py_localhost/server.py @@ -51,6 +51,7 @@ def __init__(self): def connection_made(self, transport: asyncio.Transport): self.transport = transport self.conn.initiate_connection() + self.conn.increment_flow_control_window(int(2147483647/2)) self.transport.write(self.conn.data_to_send()) def connection_lost(self, exc): @@ -69,9 +70,8 @@ def data_received(self, data: bytes): for event in events: if isinstance(event, RequestReceived): self.request_received(event.headers, event.stream_id) - self.conn.increment_flow_control_window(214748364) self.conn.increment_flow_control_window( - 214748364, event.stream_id) + int(2147483647/2), event.stream_id) elif isinstance(event, DataReceived): self.conn.increment_flow_control_window(event.flow_controlled_length) self.conn.increment_flow_control_window( From 46445c0db34cd4af5fa3ddbc6178bd8d901a2760 Mon Sep 17 00:00:00 2001 From: Dengke Tang Date: Fri, 4 Apr 2025 10:17:50 -0700 Subject: [PATCH 34/48] get the benchmark --- .github/workflows/ci.yml | 32 -------------- CMakeLists.txt | 5 +-- bin/canary/CMakeLists.txt | 28 ------------ bin/h2benchmark/CMakeLists.txt | 22 ++++++++++ bin/h2benchmark/README.md | 10 +++++ bin/{canary => h2benchmark}/main.c | 52 +++++++++++------------ builder.json | 3 +- integration-testing/http_client_canary.py | 51 ---------------------- 8 files changed, 60 insertions(+), 143 deletions(-) delete mode 100644 bin/canary/CMakeLists.txt create mode 100644 bin/h2benchmark/CMakeLists.txt create mode 100644 bin/h2benchmark/README.md rename bin/{canary => h2benchmark}/main.c (92%) delete mode 100644 integration-testing/http_client_canary.py diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 678557357..73d798ea5 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -280,35 +280,3 @@ jobs: run: | python -c "from urllib.request import urlretrieve; urlretrieve('${{ env.BUILDER_HOST }}/${{ env.BUILDER_SOURCE }}/${{ env.BUILDER_VERSION }}/builder.pyz?run=${{ env.RUN }}', 'builder.pyz')" python builder.pyz build -p aws-c-http --cmake-extra=-DENABLE_LOCALHOST_INTEGRATION_TESTS=ON --config Debug - - localhost-canary-linux: - runs-on: ubuntu-24.04 # latest - steps: - - name: Checkout - uses: actions/checkout@v4 - - name: Build and test - run: | - python3 -c "from urllib.request import urlretrieve; urlretrieve('${{ env.BUILDER_HOST }}/${{ env.BUILDER_SOURCE }}/${{ env.BUILDER_VERSION }}/builder.pyz?run=${{ env.RUN }}', 'builder.pyz')" - python3 builder.pyz build -p aws-c-http --cmake-extra=-DAWS_BUILD_CANARY=ON --config Debug - - localhost-canary-mac: - runs-on: macos-14 # latest - steps: - - name: Checkout - uses: actions/checkout@v4 - - name: Build and test - run: | - python3 -m venv .venv - source .venv/bin/activate - python3 -c "from urllib.request import urlretrieve; urlretrieve('${{ env.BUILDER_HOST }}/${{ env.BUILDER_SOURCE }}/${{ env.BUILDER_VERSION }}/builder.pyz?run=${{ env.RUN }}', 'builder.pyz')" - python3 builder.pyz build -p aws-c-http --cmake-extra=-DAWS_BUILD_CANARY=ON --config Debug - - localhost-canary-win: - runs-on: windows-2022 # latest - steps: - - name: Checkout - uses: actions/checkout@v4 - - name: Build and test - run: | - python -c "from urllib.request import urlretrieve; urlretrieve('${{ env.BUILDER_HOST }}/${{ env.BUILDER_SOURCE }}/${{ env.BUILDER_VERSION }}/builder.pyz?run=${{ env.RUN }}', 'builder.pyz')" - python builder.pyz build -p aws-c-http --cmake-extra=-DAWS_BUILD_CANARY=ON --config Debug diff --git a/CMakeLists.txt b/CMakeLists.txt index 9e4e1d245..000d428b5 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -3,7 +3,6 @@ project(aws-c-http C) option(ENABLE_PROXY_INTEGRATION_TESTS "Whether to run the proxy integration tests that rely on pre-configured proxy" OFF) option(ENABLE_LOCALHOST_INTEGRATION_TESTS "Whether to run the integration tests that rely on pre-configured localhost" OFF) -option(AWS_BUILD_CANARY "Whether to build the canary for benchmark the performance of our http client" OFF) if (NOT IN_SOURCE_BUILD) # this is required so we can use aws-c-common's CMake modules @@ -84,8 +83,6 @@ if (NOT BYO_CRYPTO AND BUILD_TESTING) add_subdirectory(tests) if (NOT CMAKE_CROSSCOMPILING) add_subdirectory(bin/elasticurl) - if (AWS_BUILD_CANARY) - add_subdirectory(bin/canary) - endif() + add_subdirectory(bin/h2benchmark) endif() endif() diff --git a/bin/canary/CMakeLists.txt b/bin/canary/CMakeLists.txt deleted file mode 100644 index 2fa8642d3..000000000 --- a/bin/canary/CMakeLists.txt +++ /dev/null @@ -1,28 +0,0 @@ -project(canary C) - -list(APPEND CMAKE_MODULE_PATH "${CMAKE_INSTALL_PREFIX}/lib/cmake") - -file(GLOB CANARY_SRC - "*.c" - ) - -set(PROJECT_NAME canary) -add_executable(${PROJECT_NAME} ${CANARY_SRC}) -aws_set_common_properties(${PROJECT_NAME}) - -target_include_directories(${PROJECT_NAME} PUBLIC - $ - $) - -target_link_libraries(${PROJECT_NAME} aws-c-http) - -if (BUILD_SHARED_LIBS AND NOT WIN32) - message(INFO " canary will be built with shared libs, but you may need to set LD_LIBRARY_PATH=${CMAKE_INSTALL_PREFIX}/lib to run the application") -endif() - -install(TARGETS ${PROJECT_NAME} - EXPORT ${PROJECT_NAME}-targets - COMPONENT Runtime - RUNTIME - DESTINATION bin - COMPONENT Runtime) diff --git a/bin/h2benchmark/CMakeLists.txt b/bin/h2benchmark/CMakeLists.txt new file mode 100644 index 000000000..fb505ddca --- /dev/null +++ b/bin/h2benchmark/CMakeLists.txt @@ -0,0 +1,22 @@ +project(h2benchmark C) + +file(GLOB CANARY_SRC + "*.c" + ) + +set(PROJECT_NAME h2benchmark) +add_executable(${PROJECT_NAME} ${CANARY_SRC}) +aws_set_common_properties(${PROJECT_NAME}) + +target_link_libraries(${PROJECT_NAME} aws-c-http) + +if (BUILD_SHARED_LIBS AND NOT WIN32) + message(INFO " h2benchmark will be built with shared libs, but you may need to set LD_LIBRARY_PATH=${CMAKE_INSTALL_PREFIX}/lib to run the application") +endif() + +install(TARGETS ${PROJECT_NAME} + EXPORT ${PROJECT_NAME}-targets + COMPONENT Runtime + RUNTIME + DESTINATION ${CMAKE_INSTALL_BINDIR} + COMPONENT Runtime) diff --git a/bin/h2benchmark/README.md b/bin/h2benchmark/README.md new file mode 100644 index 000000000..9fd1c93e0 --- /dev/null +++ b/bin/h2benchmark/README.md @@ -0,0 +1,10 @@ +# HTTP/2 benchmark + +this is a C program mimic the API call benchmark from aws-java-sdk-v2. https://github.com/aws/aws-sdk-java-v2/tree/master/test/sdk-benchmarks/src/main/java/software/amazon/awssdk/benchmark/apicall/httpclient + +It collects how many API calls finish per second. Basically how many request can made per second. + +The program connects to the local host that can be found [here](../../tests/py_localhost). + +To run the benchmark, build the h2benchmark with aws-c-http as dependency. +TODO: Currently the configs are all hardcoded. diff --git a/bin/canary/main.c b/bin/h2benchmark/main.c similarity index 92% rename from bin/canary/main.c rename to bin/h2benchmark/main.c index 66c3e0d94..a748bcb07 100644 --- a/bin/canary/main.c +++ b/bin/h2benchmark/main.c @@ -51,19 +51,19 @@ const double rate_threshold = 4000; /* From the previous tests. All platforms seem to be larger than 4000, but it could various. TODO: Maybe gather the number of previous test run, and be platform specific. */ -struct aws_http_canary_helper { +struct aws_http_benchmark_helper { struct aws_task task; struct aws_event_loop *eventloop; int num_collected; /* number of data collected */ uint64_t rate_ns; /* Collect data per rate_ns */ - struct aws_atomic_var canary_finished; + struct aws_atomic_var benchmark_finished; double *results; }; -struct canary_ctx { +struct benchmark_ctx { struct aws_allocator *allocator; const char *verb; struct aws_uri uri; @@ -71,7 +71,7 @@ struct canary_ctx { struct aws_condition_variable c_var; enum aws_log_level log_level; - struct aws_http_canary_helper helper; + struct aws_http_benchmark_helper helper; struct aws_event_loop_group *el_group; struct aws_http2_stream_manager *manager; @@ -91,8 +91,8 @@ static void s_collect_data_task(struct aws_task *task, void *arg, enum aws_task_ (void)status; (void)task; - struct canary_ctx *app_ctx = arg; - struct aws_http_canary_helper *helper = &app_ctx->helper; + struct benchmark_ctx *app_ctx = arg; + struct aws_http_benchmark_helper *helper = &app_ctx->helper; /* collect data */ size_t stream_completed = aws_atomic_exchange_int(&app_ctx->streams_completed, 0); @@ -117,7 +117,7 @@ static void s_collect_data_task(struct aws_task *task, void *arg, enum aws_task_ exit(1); } - aws_atomic_store_int(&helper->canary_finished, 1); + aws_atomic_store_int(&helper->benchmark_finished, 1); } else { /* keep running */ uint64_t now = 0; @@ -126,11 +126,11 @@ static void s_collect_data_task(struct aws_task *task, void *arg, enum aws_task_ } } -void aws_http_canary_helper_init(struct canary_ctx *app_ctx, struct aws_http_canary_helper *helper) { +void aws_http_benchmark_helper_init(struct benchmark_ctx *app_ctx, struct aws_http_benchmark_helper *helper) { helper->eventloop = aws_event_loop_group_get_next_loop(app_ctx->el_group); helper->rate_ns = aws_timestamp_convert(rate_secs, AWS_TIMESTAMP_SECS, AWS_TIMESTAMP_NANOS, NULL); - aws_atomic_init_int(&helper->canary_finished, 0); + aws_atomic_init_int(&helper->benchmark_finished, 0); aws_task_init(&helper->task, s_collect_data_task, app_ctx, "data_collector"); helper->results = aws_mem_calloc(app_ctx->allocator, num_data_to_collect, sizeof(double)); uint64_t now = 0; @@ -154,7 +154,7 @@ static void s_on_stream_acquired(struct aws_http_stream *stream, int error_code, static void s_on_stream_complete(struct aws_http_stream *stream, int error_code, void *user_data) { (void)stream; - struct canary_ctx *app_ctx = user_data; + struct benchmark_ctx *app_ctx = user_data; aws_mutex_lock(&app_ctx->mutex); aws_atomic_fetch_add(&app_ctx->batch_completed, 1); if (error_code) { @@ -172,12 +172,12 @@ static void s_on_stream_complete(struct aws_http_stream *stream, int error_code, /************************* Stream manager ops ******************************************/ static bool s_are_batch_completed(void *context) { - struct canary_ctx *app_ctx = context; + struct benchmark_ctx *app_ctx = context; size_t completed = aws_atomic_load_int(&app_ctx->batch_completed); return (int)completed >= app_ctx->batch_size; } -static int s_wait_on_batch_complete(struct canary_ctx *app_ctx) { +static int s_wait_on_batch_complete(struct benchmark_ctx *app_ctx) { aws_mutex_lock(&app_ctx->mutex); int signal_error = @@ -187,7 +187,7 @@ static int s_wait_on_batch_complete(struct canary_ctx *app_ctx) { return signal_error; } -static void s_run_stream_manager_test(struct canary_ctx *app_ctx, struct aws_http_message *request) { +static void s_run_stream_manager_test(struct benchmark_ctx *app_ctx, struct aws_http_message *request) { struct aws_http_make_request_options request_options = { .self_size = sizeof(request_options), .request = request, @@ -220,7 +220,7 @@ static void s_run_stream_manager_test(struct canary_ctx *app_ctx, struct aws_htt exit(1); } - size_t finished = aws_atomic_load_int(&app_ctx->helper.canary_finished); + size_t finished = aws_atomic_load_int(&app_ctx->helper.benchmark_finished); if (finished) { keep_loop = false; } @@ -228,7 +228,7 @@ static void s_run_stream_manager_test(struct canary_ctx *app_ctx, struct aws_htt } static void s_on_shutdown_complete(void *user_data) { - struct canary_ctx *app_ctx = user_data; + struct benchmark_ctx *app_ctx = user_data; aws_mutex_lock(&app_ctx->mutex); app_ctx->is_shutdown_complete = true; @@ -238,7 +238,7 @@ static void s_on_shutdown_complete(void *user_data) { /************************* direct connection ops ******************************************/ -static void s_run_direct_connection_test(struct canary_ctx *app_ctx, struct aws_http_message *request) { +static void s_run_direct_connection_test(struct benchmark_ctx *app_ctx, struct aws_http_message *request) { struct aws_http_make_request_options request_options = { .self_size = sizeof(request_options), .request = request, @@ -266,7 +266,7 @@ static void s_run_direct_connection_test(struct canary_ctx *app_ctx, struct aws_ exit(1); } - size_t finished = aws_atomic_load_int(&app_ctx->helper.canary_finished); + size_t finished = aws_atomic_load_int(&app_ctx->helper.benchmark_finished); if (finished) { keep_loop = false; } @@ -276,7 +276,7 @@ static void s_run_direct_connection_test(struct canary_ctx *app_ctx, struct aws_ static void s_on_connection_shutdown(struct aws_http_connection *connection, int error_code, void *user_data) { (void)connection; (void)error_code; - struct canary_ctx *app_ctx = user_data; + struct benchmark_ctx *app_ctx = user_data; aws_mutex_lock(&app_ctx->mutex); app_ctx->is_shutdown_complete = true; @@ -289,7 +289,7 @@ static void s_on_client_connection_setup(struct aws_http_connection *connection, fprintf(stderr, "Failed to create connection with error %s\n", aws_error_debug_str(aws_last_error())); exit(1); } - struct canary_ctx *app_ctx = user_data; + struct benchmark_ctx *app_ctx = user_data; aws_mutex_lock(&app_ctx->mutex); app_ctx->connection = connection; @@ -298,18 +298,18 @@ static void s_on_client_connection_setup(struct aws_http_connection *connection, } static bool s_is_connected(void *context) { - struct canary_ctx *app_ctx = context; + struct benchmark_ctx *app_ctx = context; return app_ctx->connection != NULL; } /************************* general ops ******************************************/ static bool s_is_shutdown_complete(void *context) { - struct canary_ctx *app_ctx = context; + struct benchmark_ctx *app_ctx = context; return app_ctx->is_shutdown_complete; } -static struct aws_http_message *s_create_request(struct canary_ctx *app_ctx) { +static struct aws_http_message *s_create_request(struct benchmark_ctx *app_ctx) { struct aws_http_message *request = aws_http2_message_new_request(app_ctx->allocator); struct aws_http_header request_headers_src[] = { @@ -331,8 +331,8 @@ static struct aws_http_message *s_create_request(struct canary_ctx *app_ctx) { return request; } -static void s_run_canary(struct canary_ctx *app_ctx) { - aws_http_canary_helper_init(app_ctx, &app_ctx->helper); +static void s_run_benchmark(struct benchmark_ctx *app_ctx) { + aws_http_benchmark_helper_init(app_ctx, &app_ctx->helper); struct aws_http_message *request = s_create_request(app_ctx); if (direct_connection) { @@ -352,7 +352,7 @@ int main(int argc, char **argv) { aws_http_library_init(allocator); - struct canary_ctx app_ctx; + struct benchmark_ctx app_ctx; AWS_ZERO_STRUCT(app_ctx); app_ctx.allocator = allocator; app_ctx.batch_size = max_connections * streams_per_connection; @@ -495,7 +495,7 @@ int main(int argc, char **argv) { } /* Really do the job */ - s_run_canary(&app_ctx); + s_run_benchmark(&app_ctx); if (!direct_connection) { aws_http2_stream_manager_release(app_ctx.manager); diff --git a/builder.json b/builder.json index 09da6038c..45222a323 100644 --- a/builder.json +++ b/builder.json @@ -18,7 +18,6 @@ "pre_build_steps": ["local-server-setup"], "test_steps": [ "aws-c-http-test", - ["python3", "{source_dir}/integration-testing/http_client_test.py", "{install_dir}/bin/elasticurl{exe}"], - ["python3", "{source_dir}/integration-testing/http_client_canary.py", "{install_dir}/bin/canary{exe}"] + ["python3", "{source_dir}/integration-testing/http_client_test.py", "{install_dir}/bin/elasticurl{exe}"] ] } diff --git a/integration-testing/http_client_canary.py b/integration-testing/http_client_canary.py deleted file mode 100644 index d3032ff5f..000000000 --- a/integration-testing/http_client_canary.py +++ /dev/null @@ -1,51 +0,0 @@ -# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -# SPDX-License-Identifier: Apache-2.0. - -import sys -import os.path -import subprocess - -TIMEOUT = 300 - -canary_args = sys.argv[1:] -if not canary_args: - print('You must pass the canary cmd prefix') - sys.exit(-1) - -program_to_run = canary_args[0] - -if not os.path.exists(program_to_run): - print(f'the {program_to_run} is not found, skip canary test') - sys.exit(0) - -# We don't have args to pass to canary yet. TODO add args for canary - - -def run_command(args): - # gather all stderr and stdout to a single string that we print only if things go wrong - process = subprocess.Popen( - args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) - timedout = False - try: - output = process.communicate(timeout=TIMEOUT)[0] - except subprocess.TimeoutExpired: - timedout = True - process.kill() - output = process.communicate()[0] - finally: - if process.returncode != 0 or timedout: - args_str = subprocess.list2cmdline(args) - print(args_str) - for line in output.splitlines(): - print(line.decode()) - if timedout: - raise RuntimeError("Timeout happened after {secs} secs from: {cmd}".format( - secs=TIMEOUT, cmd=args_str)) - else: - raise RuntimeError("Return code {code} from: {cmd}".format( - code=process.returncode, cmd=args_str)) - else: - print(output.decode("utf-8")) - - -run_command(canary_args) From e6eaa1d6af69e382dac8d610ddc0aaf8056beaa5 Mon Sep 17 00:00:00 2001 From: Dengke Tang Date: Mon, 7 Apr 2025 16:05:24 -0700 Subject: [PATCH 35/48] Cap the window to max --- bin/h2benchmark/CMakeLists.txt | 4 +- include/aws/http/connection.h | 30 +++++- include/aws/http/private/h2_connection.h | 27 +++-- include/aws/http/private/h2_stream.h | 21 ++-- source/h2_connection.c | 129 +++++++++++------------ source/h2_stream.c | 115 +++++++++----------- 6 files changed, 160 insertions(+), 166 deletions(-) diff --git a/bin/h2benchmark/CMakeLists.txt b/bin/h2benchmark/CMakeLists.txt index fb505ddca..1bcf7e8a6 100644 --- a/bin/h2benchmark/CMakeLists.txt +++ b/bin/h2benchmark/CMakeLists.txt @@ -1,11 +1,11 @@ project(h2benchmark C) -file(GLOB CANARY_SRC +file(GLOB H2BENCHMARK_SRC "*.c" ) set(PROJECT_NAME h2benchmark) -add_executable(${PROJECT_NAME} ${CANARY_SRC}) +add_executable(${PROJECT_NAME} ${H2BENCHMARK_SRC}) aws_set_common_properties(${PROJECT_NAME}) target_link_libraries(${PROJECT_NAME} aws-c-http) diff --git a/include/aws/http/connection.h b/include/aws/http/connection.h index 067e94998..f734ba267 100644 --- a/include/aws/http/connection.h +++ b/include/aws/http/connection.h @@ -238,6 +238,27 @@ struct aws_http2_connection_options { * But, the client will always automatically update the window for padding even for manual window update. */ bool conn_manual_window_management; + + /** + * Optional. + * The threshold to send out a window update frame for the connection. + * Ignored if `conn_manual_window_management` is false. + * When the window_size_self for the connection is larger than the threshold, + * client will batch the window update and not send it out until the window_size_self + * drops below the threshold. + * Default to half of the initial connection flow-control window size, which is 32767. + */ + size_t conn_window_size_threshold_to_send_update; + /** + * Optional. + * The threshold to send out a window update frame for the streams. + * Ignored if `manual_window_management` is false. + * When the window_size_self for the stream is larger than the threshold, + * client will batch the window update and not send it out until the window_size_self + * drops below the threshold. + * Default to half of the `initial_window_size`. + */ + size_t stream_window_size_threshold_to_send_update; }; /** @@ -330,10 +351,11 @@ struct aws_http_client_connection_options { * If a stream's flow-control window reaches 0, no further data will be received. The user must call * aws_http_stream_update_window() to increment the stream's window and keep data flowing. * - * If a HTTP/2 connection created, it will ONLY control the stream window - * management. Connection window management is controlled by - * conn_manual_window_management. Note: the padding of data frame counts to the flow-control window. - * But, the client will always automatically update the window for padding even for manual window update. + * If a HTTP/2 connection created, it will ONLY control the stream window management. + * Connection window management is controlled by `conn_manual_window_management`. + * `stream_window_size_threshold_to_send_update` controls when to send the WINDOW_UPDATE frame for the stream. + * Note: the padding of data frame counts to the flow-control window. But, the client will always automatically + * update the window for padding even for manual window update. */ bool manual_window_management; diff --git a/include/aws/http/private/h2_connection.h b/include/aws/http/private/h2_connection.h index 609e6ea35..5558caf9e 100644 --- a/include/aws/http/private/h2_connection.h +++ b/include/aws/http/private/h2_connection.h @@ -28,6 +28,14 @@ struct aws_h2_connection { struct aws_channel_task outgoing_frames_task; bool conn_manual_window_management; + /* The threshold to send out a window update frame. + * When the window_size_self is less than the threshold, client will starts the sending of WINDOW_UPDATE frame + * to keep flow continues. + */ + uint32_t window_size_threshold_to_send_update; + /* The threshold to send out a window update frame for all the streams on the connection. + */ + uint32_t stream_window_size_threshold_to_send_update; /* Only the event-loop thread may touch this data */ struct { @@ -92,23 +100,14 @@ struct aws_h2_connection { /* Flow-control of connection from peer. Indicating the buffer capacity of our peer. * Reduce the space after sending a flow-controlled frame. Increment after receiving WINDOW_UPDATE for * connection */ - size_t window_size_peer; + uint32_t window_size_peer; /* Flow-control of connection for this side. * Reduce the space after receiving a flow-controlled frame. Increment after sending WINDOW_UPDATE for * connection */ - size_t window_size_self; - /* The self window size dropped before the client send window update automatically. - * When manual management for connection window is off, the dropped size equals to the size of data frame - * received. - * When manual management for connection window is on, the dropped size equals to the size of all the padding in - * the data frame received */ - uint32_t window_size_self_dropped; - /* The threshold to send out a window update frame. When the window_size_self_dropped is larger than the - * threshold, client will automatically send a WINDOW_UPDATE frame with the dropped size to keep flow continues. - * TODO: expose this to user - */ - uint32_t window_size_self_dropped_threshold; + uint32_t window_size_self; + /* The size to increment the window_size_self pending to be sent. */ + uint32_t pending_window_update_size; /* Highest self-initiated stream-id that peer might have processed. * Defaults to max stream-id, may be lowered when GOAWAY frame received. */ @@ -161,7 +160,7 @@ struct aws_h2_connection { bool is_cross_thread_work_task_scheduled; /* The window_update value for `thread_data.window_size_self` that haven't applied yet */ - size_t window_update_size; + uint32_t window_update_size; /* For checking status from outside the event-loop thread. */ bool is_open; diff --git a/include/aws/http/private/h2_stream.h b/include/aws/http/private/h2_stream.h index d48ff048d..ad9c44f80 100644 --- a/include/aws/http/private/h2_stream.h +++ b/include/aws/http/private/h2_stream.h @@ -75,6 +75,11 @@ struct aws_h2_stream { struct aws_linked_list_node node; struct aws_channel_task cross_thread_work_task; + /* The threshold to send out a window update frame. + * When the window_size_self is less than the threshold, client will starts the sending of WINDOW_UPDATE frame + * to keep flow continues. + */ + uint32_t window_size_threshold_to_send_update; /* Only the event-loop thread may touch this data */ struct { @@ -84,17 +89,9 @@ struct aws_h2_stream { * We allow this value exceed the max window size (int64 can hold much more than 0x7FFFFFFF), * We leave it up to the remote peer to detect whether the max window size has been exceeded. */ int64_t window_size_self; - /* The self window size dropped before the client send window update automatically. - * When manual management for stream window is off, the dropped size equals to the size of data frame - * received. - * When manual management for stream window is on, the dropped size equals to the size of all the padding in - * the data frame received */ - uint32_t window_size_self_dropped; - /* The threshold to send out a window update frame. When the window_size_self_dropped is larger than the - * threshold, client will automatically send a WINDOW_UPDATE frame with the dropped size to keep flow continues. - * TODO: expose this to user - */ - uint32_t window_size_self_dropped_threshold; + + /* The size to increment the window_size_self pending to be sent. */ + uint32_t pending_window_update_size; struct aws_http_message *outgoing_message; /* All queued writes. If the message provides a body stream, it will be first in this list @@ -121,7 +118,7 @@ struct aws_h2_stream { bool is_cross_thread_work_task_scheduled; /* The window_update value for `thread_data.window_size_self` that haven't applied yet */ - size_t window_update_size; + uint32_t window_update_size; /* The combined aws_http2_error_code user wanted to send to remote peer via rst_stream and internal aws error * code we want to inform user about. */ diff --git a/source/h2_connection.c b/source/h2_connection.c index 28143bed8..5bce18389 100644 --- a/source/h2_connection.c +++ b/source/h2_connection.c @@ -384,7 +384,14 @@ static struct aws_h2_connection *s_connection_new( connection->thread_data.window_size_peer = AWS_H2_INIT_WINDOW_SIZE; connection->thread_data.window_size_self = AWS_H2_INIT_WINDOW_SIZE; - connection->thread_data.window_size_self_dropped_threshold = 0; + /* Default to half of the initial window size */ + if (http2_options->conn_window_size_threshold_to_send_update) { + connection->window_size_threshold_to_send_update = http2_options->conn_window_size_threshold_to_send_update; + } else { + connection->window_size_threshold_to_send_update = (uint32_t)(AWS_H2_INIT_WINDOW_SIZE / 2); + } + connection->stream_window_size_threshold_to_send_update = + http2_options->stream_window_size_threshold_to_send_update; connection->thread_data.goaway_received_last_stream_id = AWS_H2_STREAM_ID_MAX; connection->thread_data.goaway_sent_last_stream_id = AWS_H2_STREAM_ID_MAX; @@ -834,7 +841,8 @@ static int s_encode_data_from_outgoing_streams(struct aws_h2_connection *connect CONNECTION_LOGF( DEBUG, connection, - "Peer connection's flow-control window is too small now %zu. Connection will stop sending DATA until " + "Peer connection's flow-control window is too small now %" PRIu32 + ". Connection will stop sending DATA until " "WINDOW_UPDATE is received.", connection->thread_data.window_size_peer); goto done; @@ -1188,19 +1196,48 @@ struct aws_h2err s_decoder_on_push_promise(uint32_t stream_id, uint32_t promised return AWS_H2ERR_SUCCESS; } -static int s_connection_send_update_window(struct aws_h2_connection *connection, uint32_t window_size) { +static int s_connection_send_update_window_if_needed(struct aws_h2_connection *connection, uint32_t window_size) { + AWS_PRECONDITION(aws_channel_thread_is_callers_thread(connection->base.channel_slot->channel)); + + /* Only send a WINDOW_UPDATE frame if the connection window is below the threshold */ + connection->thread_data.pending_window_update_size = + aws_add_u32_saturating(connection->thread_data.pending_window_update_size, window_size); + if (connection->thread_data.window_size_self > connection->window_size_threshold_to_send_update) { + CONNECTION_LOGF( + TRACE, + connection, + "Ignoring sending connection window update of size%" PRIu32 ". Current size: %" PRIu32 + ", threshold: %" PRIu32 " pending: %" PRIu32, + window_size, + connection->thread_data.window_size_self, + connection->window_size_threshold_to_send_update, + connection->thread_data.pending_window_update_size); + return AWS_OP_SUCCESS; + } + + /* Cap the window to AWS_H2_WINDOW_UPDATE_MAX */ + uint32_t previous_window_size_self = connection->thread_data.window_size_self; + connection->thread_data.window_size_self = aws_add_u32_saturating( + connection->thread_data.window_size_self, connection->thread_data.pending_window_update_size); + connection->thread_data.window_size_self = + aws_min_u32(connection->thread_data.window_size_self, AWS_H2_WINDOW_UPDATE_MAX); + uint32_t window_size_delta = + aws_sub_u32_saturating(connection->thread_data.window_size_self, previous_window_size_self); struct aws_h2_frame *connection_window_update_frame = - aws_h2_frame_new_window_update(connection->base.alloc, 0, window_size); + aws_h2_frame_new_window_update(connection->base.alloc, 0, window_size_delta); if (!connection_window_update_frame) { CONNECTION_LOGF( ERROR, connection, "WINDOW_UPDATE frame on connection failed to be sent, error %s", aws_error_name(aws_last_error())); + connection->thread_data.window_size_self = previous_window_size_self; return AWS_OP_ERR; } + + CONNECTION_LOGF(DEBUG, connection, "Sending connection window by %" PRIu32 ".", window_size_delta); aws_h2_connection_enqueue_outgoing_frame(connection, connection_window_update_frame); - connection->thread_data.window_size_self += window_size; + connection->thread_data.pending_window_update_size = 0; return AWS_OP_SUCCESS; } @@ -1214,12 +1251,12 @@ struct aws_h2err s_decoder_on_data_begin( /* A receiver that receives a flow-controlled frame MUST always account for its contribution against the connection * flow-control window, unless the receiver treats this as a connection error */ - if (aws_sub_size_checked( + if (aws_sub_u32_checked( connection->thread_data.window_size_self, payload_len, &connection->thread_data.window_size_self)) { CONNECTION_LOGF( ERROR, connection, - "DATA length %" PRIu32 " exceeds flow-control window %zu", + "DATA length %" PRIu32 " exceeds flow-control window %" PRIu32 ".", payload_len, connection->thread_data.window_size_self); return aws_h2err_from_h2_code(AWS_HTTP2_ERR_FLOW_CONTROL_ERROR); @@ -1248,20 +1285,9 @@ struct aws_h2err s_decoder_on_data_begin( /* Automatically update the full amount we just received */ auto_window_update = payload_len; } - if (total_padding_bytes) { - CONNECTION_LOGF(TRACE, connection, "%" PRIu32 " Bytes of padding received.", total_padding_bytes); - } - connection->thread_data.window_size_self_dropped += auto_window_update; - if (connection->thread_data.window_size_self_dropped > connection->thread_data.window_size_self_dropped_threshold) { - if (s_connection_send_update_window(connection, connection->thread_data.window_size_self_dropped)) { - return aws_h2err_from_last_error(); - } - connection->thread_data.window_size_self_dropped = 0; - CONNECTION_LOGF( - TRACE, - connection, - "Automatically updating connection window by %" PRIu32 ".", - connection->thread_data.window_size_self_dropped); + + if (s_connection_send_update_window_if_needed(connection, auto_window_update)) { + return aws_h2err_from_last_error(); } return AWS_H2ERR_SUCCESS; @@ -1767,8 +1793,8 @@ static void s_handler_installed(struct aws_channel_handler *handler, struct aws_ aws_linked_list_push_back( &connection->thread_data.outgoing_frames_queue, &connection_window_update_frame->node); connection->thread_data.window_size_self += initial_window_update_size; - /* For automatic window management, we only update connection windows when it droped blow 50% of MAX. */ - connection->thread_data.window_size_self_dropped_threshold = AWS_H2_WINDOW_UPDATE_MAX / 2; + /* Only update the window when the windows size drop to half of the size */ + connection->window_size_threshold_to_send_update = (uint32_t)(AWS_H2_WINDOW_UPDATE_MAX / 2); } aws_h2_try_write_outgoing_frames(connection); return; @@ -1983,10 +2009,8 @@ static void s_cross_thread_work_task(struct aws_channel_task *task, void *arg, e aws_h2_connection_enqueue_outgoing_frame(connection, frame); } - /* We already enqueued the window_update frame, just apply the change and let our peer check this value, no matter - * overflow happens or not. Peer will detect it for us. */ - connection->thread_data.window_size_self = - aws_add_size_saturating(connection->thread_data.window_size_self, window_update_size); + /* Add window update and enqueue the frame if needed */ + s_connection_send_update_window_if_needed(connection, window_update_size); /* Process new pending_streams */ while (!aws_linked_list_empty(&pending_streams)) { @@ -2175,7 +2199,7 @@ static bool s_connection_new_requests_allowed(const struct aws_http_connection * static void s_connection_update_window(struct aws_http_connection *connection_base, uint32_t increment_size) { struct aws_h2_connection *connection = AWS_CONTAINER_OF(connection_base, struct aws_h2_connection, base); - if (!increment_size) { + if (increment_size == 0) { /* Silently do nothing. */ return; } @@ -2187,47 +2211,23 @@ static void s_connection_update_window(struct aws_http_connection *connection_ba "Connection manual window management is off, update window operations are not supported."); return; } - struct aws_h2_frame *connection_window_update_frame = - aws_h2_frame_new_window_update(connection->base.alloc, 0, increment_size); - if (!connection_window_update_frame) { - CONNECTION_LOGF( - ERROR, - connection, - "Failed to create WINDOW_UPDATE frame on connection, error %s", - aws_error_name(aws_last_error())); - /* OOM should result in a crash. And the increment size is too huge is the only other failure case, which will - * result in overflow. */ - goto overflow; - } - - int err = 0; bool cross_thread_work_should_schedule = false; bool connection_open = false; - size_t sum_size = 0; { /* BEGIN CRITICAL SECTION */ s_lock_synced_data(connection); - - err |= aws_add_size_checked(connection->synced_data.window_update_size, increment_size, &sum_size); - err |= sum_size > AWS_H2_WINDOW_UPDATE_MAX; - connection_open = connection->synced_data.is_open; - - if (!err && connection_open) { + if (connection_open) { cross_thread_work_should_schedule = !connection->synced_data.is_cross_thread_work_task_scheduled; connection->synced_data.is_cross_thread_work_task_scheduled = true; - aws_linked_list_push_back( - &connection->synced_data.pending_frame_list, &connection_window_update_frame->node); - connection->synced_data.window_update_size = sum_size; + /** + * Be more user friendly, if the increment size is too large, we will just saturate it to the max. + * AWS_H2_WINDOW_UPDATE_MAX will be checked during the actual sending of the window update frame. + */ + connection->synced_data.window_update_size = + aws_add_u32_saturating(connection->synced_data.window_update_size, increment_size); + connection_open = connection->synced_data.is_open; } s_unlock_synced_data(connection); } /* END CRITICAL SECTION */ - if (err) { - CONNECTION_LOG( - ERROR, - connection, - "The connection's flow-control windows has been incremented beyond 2**31 -1, the max for HTTP/2. The "); - aws_h2_frame_destroy(connection_window_update_frame); - goto overflow; - } if (cross_thread_work_should_schedule) { CONNECTION_LOG(TRACE, connection, "Scheduling cross-thread work task"); @@ -2236,7 +2236,6 @@ static void s_connection_update_window(struct aws_http_connection *connection_ba if (!connection_open) { /* connection already closed, just do nothing */ - aws_h2_frame_destroy(connection_window_update_frame); return; } CONNECTION_LOGF( @@ -2245,14 +2244,6 @@ static void s_connection_update_window(struct aws_http_connection *connection_ba "User requested to update the HTTP/2 connection's flow-control windows by %" PRIu32 ".", increment_size); return; -overflow: - /* Shutdown the connection as overflow detected */ - s_stop( - connection, - false /*stop_reading*/, - false /*stop_writing*/, - true /*schedule_shutdown*/, - AWS_ERROR_OVERFLOW_DETECTED); } static int s_connection_change_settings( diff --git a/source/h2_stream.c b/source/h2_stream.c index a4da2eda5..c9b2b6952 100644 --- a/source/h2_stream.c +++ b/source/h2_stream.c @@ -209,24 +209,48 @@ static struct aws_h2err s_check_state_allows_frame_type( return aws_h2err_from_h2_code(h2_error_code); } -static int s_stream_send_update_window_frame(struct aws_h2_stream *stream, size_t increment_size) { +static int s_stream_send_update_window_if_needed(struct aws_h2_stream *stream, uint32_t window_size) { AWS_PRECONDITION_ON_CHANNEL_THREAD(stream); - AWS_PRECONDITION(increment_size <= AWS_H2_WINDOW_UPDATE_MAX); - struct aws_h2_connection *connection = s_get_h2_connection(stream); - struct aws_h2_frame *stream_window_update_frame = - aws_h2_frame_new_window_update(stream->base.alloc, stream->base.id, (uint32_t)increment_size); + /* Only send a WINDOW_UPDATE frame if the stream window is below the threshold */ + stream->thread_data.pending_window_update_size = + aws_add_u32_saturating(stream->thread_data.pending_window_update_size, window_size); + if (stream->thread_data.window_size_self > stream->window_size_threshold_to_send_update) { + AWS_H2_STREAM_LOGF( + TRACE, + stream, + "Ignoring sending WINDOW_UPDATE update of size%" PRIu32 ". Current size: %" PRIi64 ", threshold: %" PRIu32 + " pending: %" PRIu32, + window_size, + stream->thread_data.window_size_self, + stream->window_size_threshold_to_send_update, + stream->thread_data.pending_window_update_size); + return AWS_OP_SUCCESS; + } + /* Cap the window to AWS_H2_WINDOW_UPDATE_MAX */ + uint32_t previous_window_size = stream->thread_data.window_size_self; + stream->thread_data.window_size_self = + stream->thread_data.window_size_self + stream->thread_data.pending_window_update_size; + stream->thread_data.window_size_self = aws_min_u32(stream->thread_data.window_size_self, AWS_H2_WINDOW_UPDATE_MAX); + uint32_t window_size_delta = aws_sub_u32_saturating(stream->thread_data.window_size_self, previous_window_size); + + AWS_H2_STREAM_LOGF(TRACE, stream, "Sending WINDOW_UPDATE by %" PRIu32 ".", window_size_delta); + + struct aws_h2_frame *stream_window_update_frame = + aws_h2_frame_new_window_update(stream->base.alloc, stream->base.id, window_size_delta); if (!stream_window_update_frame) { AWS_H2_STREAM_LOGF( ERROR, stream, - "Failed to create WINDOW_UPDATE frame on connection, error %s", + "WINDOW_UPDATE frame on stream failed to be sent, error %s", aws_error_name(aws_last_error())); + stream->thread_data.window_size_self = previous_window_size; return AWS_OP_ERR; } - aws_h2_connection_enqueue_outgoing_frame(connection, stream_window_update_frame); + aws_h2_connection_enqueue_outgoing_frame(s_get_h2_connection(stream), stream_window_update_frame); + stream->thread_data.pending_window_update_size = 0; return AWS_OP_SUCCESS; } @@ -366,7 +390,7 @@ static void s_stream_cross_thread_work_task(struct aws_channel_task *task, void } /* END CRITICAL SECTION */ if (window_update_size > 0 && !ignore_window_update) { - if (s_stream_send_update_window_frame(stream, window_update_size)) { + if (s_stream_send_update_window_if_needed(stream, window_update_size)) { /* Treat this as a connection error */ aws_h2_connection_shutdown_due_to_write_err(connection, aws_last_error()); } @@ -481,21 +505,21 @@ static void s_stream_update_window(struct aws_http_stream *stream_base, size_t i return; } - int err = 0; bool stream_is_init; bool cross_thread_work_should_schedule = false; - size_t sum_size; { /* BEGIN CRITICAL SECTION */ s_lock_synced_data(stream); - - err |= aws_add_size_checked(stream->synced_data.window_update_size, increment_size, &sum_size); - err |= sum_size > AWS_H2_WINDOW_UPDATE_MAX; stream_is_init = stream->synced_data.api_state == AWS_H2_STREAM_API_STATE_INIT; - if (!err && !stream_is_init) { + if (!stream_is_init) { cross_thread_work_should_schedule = !stream->synced_data.is_cross_thread_work_task_scheduled; stream->synced_data.is_cross_thread_work_task_scheduled = true; - stream->synced_data.window_update_size = sum_size; + /** + * Be more user friendly, if the increment size is too large, we will just saturate it to the max. + * AWS_H2_WINDOW_UPDATE_MAX will be checked during the actual sending of the window update frame. + */ + stream->synced_data.window_update_size = + aws_add_u32_saturating(stream->synced_data.window_update_size, increment_size); } s_unlock_synced_data(stream); } /* END CRITICAL SECTION */ @@ -516,24 +540,6 @@ static void s_stream_update_window(struct aws_http_stream *stream_base, size_t i aws_raise_error(AWS_ERROR_INVALID_STATE); return; } - - if (err) { - /* The increment_size is still not 100% safe, since we cannot control the incoming data frame. So just - * ruled out the value that is obviously wrong values */ - AWS_H2_STREAM_LOG( - ERROR, - stream, - "The stream's flow-control window has been incremented beyond 2**31 -1, the max for HTTP/2. The stream " - "will close."); - aws_raise_error(AWS_ERROR_OVERFLOW_DETECTED); - struct aws_h2err stream_error = { - .aws_code = AWS_ERROR_OVERFLOW_DETECTED, - .h2_code = AWS_HTTP2_ERR_INTERNAL_ERROR, - }; - /* Only when stream is not initialized reset will fail. So, we can assert it to be succeed. */ - AWS_FATAL_ASSERT( - s_stream_reset_stream_internal(stream_base, stream_error, false /*cancelling*/) == AWS_OP_SUCCESS); - } return; } @@ -748,9 +754,14 @@ int aws_h2_stream_on_activated(struct aws_h2_stream *stream, enum aws_h2_stream_ connection->thread_data.settings_self[AWS_HTTP2_SETTINGS_INITIAL_WINDOW_SIZE]; if (!connection->base.stream_manual_window_management) { - stream->thread_data.window_size_self_dropped_threshold = - connection->thread_data.settings_self[AWS_HTTP2_SETTINGS_INITIAL_WINDOW_SIZE] / 2; + if (connection->stream_window_size_threshold_to_send_update) { + stream->window_size_threshold_to_send_update = connection->stream_window_size_threshold_to_send_update; + } else { + stream->window_size_threshold_to_send_update = + connection->thread_data.settings_self[AWS_HTTP2_SETTINGS_INITIAL_WINDOW_SIZE] / 2; + } } + if (with_data) { /* If stream has DATA to send, put it in the outgoing_streams_list, and we'll send data later */ stream->thread_data.state = AWS_H2_STREAM_STATE_OPEN; @@ -816,7 +827,7 @@ int aws_h2_stream_encode_data_frame( ends_stream, 0 /*pad_length*/, &stream->thread_data.window_size_peer, - &connection->thread_data.window_size_peer, + (size_t *)&connection->thread_data.window_size_peer, output, &input_stream_complete, &input_stream_stalled)) { @@ -1045,23 +1056,6 @@ struct aws_h2err aws_h2_stream_on_decoder_push_promise(struct aws_h2_stream *str return AWS_H2ERR_SUCCESS; } -static int s_stream_send_update_window(struct aws_h2_stream *stream, uint32_t window_size) { - struct aws_h2_frame *stream_window_update_frame = - aws_h2_frame_new_window_update(stream->base.alloc, stream->base.id, window_size); - if (!stream_window_update_frame) { - AWS_H2_STREAM_LOGF( - ERROR, - stream, - "WINDOW_UPDATE frame on stream failed to be sent, error %s", - aws_error_name(aws_last_error())); - return AWS_OP_ERR; - } - - aws_h2_connection_enqueue_outgoing_frame(s_get_h2_connection(stream), stream_window_update_frame); - stream->thread_data.window_size_self += window_size; - return AWS_OP_SUCCESS; -} - struct aws_h2err aws_h2_stream_on_decoder_data_begin( struct aws_h2_stream *stream, uint32_t payload_len, @@ -1130,18 +1124,9 @@ struct aws_h2err aws_h2_stream_on_decoder_data_begin( if (total_padding_bytes) { AWS_H2_STREAM_LOGF(TRACE, stream, "%" PRIu32 " Bytes of padding received.", total_padding_bytes); } - stream->thread_data.window_size_self_dropped += auto_window_update; - if (stream->thread_data.window_size_self_dropped > stream->thread_data.window_size_self_dropped_threshold) { - if (s_stream_send_update_window(stream, stream->thread_data.window_size_self_dropped)) { - return aws_h2err_from_last_error(); - } - stream->thread_data.window_size_self_dropped = 0; - AWS_H2_STREAM_LOGF( - TRACE, - stream, - "Automatically updating stream window by %" PRIu32 ".", - stream->thread_data.window_size_self_dropped); + if (s_stream_send_update_window_if_needed(stream, auto_window_update)) { + return aws_h2err_from_last_error(); } } @@ -1183,7 +1168,7 @@ struct aws_h2err aws_h2_stream_on_decoder_window_update( return s_send_rst_and_close_stream(stream, aws_h2err_from_h2_code(AWS_HTTP2_ERR_PROTOCOL_ERROR)); } int32_t old_window_size = stream->thread_data.window_size_peer; - stream_err = (aws_h2_stream_window_size_change(stream, window_size_increment, false /*self*/)); + stream_err = aws_h2_stream_window_size_change(stream, window_size_increment, false /*self*/); if (aws_h2err_failed(stream_err)) { /* We MUST NOT allow a flow-control window to exceed the max */ AWS_H2_STREAM_LOG( From 777964584fbd11b0157d6aca278b41fcbff10881 Mon Sep 17 00:00:00 2001 From: Dengke Tang Date: Tue, 8 Apr 2025 15:57:57 -0700 Subject: [PATCH 36/48] almost there --- include/aws/http/connection.h | 7 +- include/aws/http/private/h2_connection.h | 13 +++- include/aws/http/private/h2_stream.h | 29 +++++--- include/aws/http/request_response.h | 4 ++ source/h2_connection.c | 90 +++++++++++++++++------- source/h2_stream.c | 63 +++++++++-------- 6 files changed, 138 insertions(+), 68 deletions(-) diff --git a/include/aws/http/connection.h b/include/aws/http/connection.h index f734ba267..9af292c57 100644 --- a/include/aws/http/connection.h +++ b/include/aws/http/connection.h @@ -714,8 +714,11 @@ int aws_http2_connection_get_received_goaway( * If you are not connected, this call will have no effect. * * Crashes when the connection is not http2 connection. - * The limit of the Maximum Size is 2**31 - 1. If the increment size cause the connection flow window exceeds the - * Maximum size, this call will result in the connection lost. + * The limit of the Maximum Size is 2**31 - 1. + * And client will make sure the WINDOW_UPDATE frame to be valid. + * + * The client control exactly when the WINDOW_UPDATE frame sent. + * Check `conn_window_size_threshold_to_send_update` for details. * * @param http2_connection HTTP/2 connection. * @param increment_size The size to increment for the connection's flow control window diff --git a/include/aws/http/private/h2_connection.h b/include/aws/http/private/h2_connection.h index 5558caf9e..59942c360 100644 --- a/include/aws/http/private/h2_connection.h +++ b/include/aws/http/private/h2_connection.h @@ -106,8 +106,12 @@ struct aws_h2_connection { * Reduce the space after receiving a flow-controlled frame. Increment after sending WINDOW_UPDATE for * connection */ uint32_t window_size_self; - /* The size to increment the window_size_self pending to be sent. */ - uint32_t pending_window_update_size; + /* The size to increment the window_size_self pending to be sent. + * Allow the pending_window_update_size to exceed the max window size. + * The client will send the WINDOW_UPDATE frame to the server only valid. + * If the pending_window_update_size is too large, we will leave the excess to send it out later. + */ + uint64_t pending_window_update_size_thread; /* Highest self-initiated stream-id that peer might have processed. * Defaults to max stream-id, may be lowered when GOAWAY frame received. */ @@ -160,7 +164,7 @@ struct aws_h2_connection { bool is_cross_thread_work_task_scheduled; /* The window_update value for `thread_data.window_size_self` that haven't applied yet */ - uint32_t window_update_size; + uint64_t pending_window_update_size_sync; /* For checking status from outside the event-loop thread. */ bool is_open; @@ -297,4 +301,7 @@ void aws_h2_connection_shutdown_due_to_write_err(struct aws_h2_connection *conne */ void aws_h2_try_write_outgoing_frames(struct aws_h2_connection *connection); +/* Helper to calculate the capped windows update delta */ +uint32_t aws_h2_calculate_cap_window_update_delta(int64_t current_window, uint64_t pending_update_size); + #endif /* AWS_HTTP_H2_CONNECTION_H */ diff --git a/include/aws/http/private/h2_stream.h b/include/aws/http/private/h2_stream.h index ad9c44f80..244d3fc54 100644 --- a/include/aws/http/private/h2_stream.h +++ b/include/aws/http/private/h2_stream.h @@ -84,14 +84,22 @@ struct aws_h2_stream { /* Only the event-loop thread may touch this data */ struct { enum aws_h2_stream_state state; + /* The remote window size. + * RFC-9113 6.9.2: The settings can shrink the exiting flow-control windows to negative values. + **/ int32_t window_size_peer; /* The local window size. - * We allow this value exceed the max window size (int64 can hold much more than 0x7FFFFFFF), - * We leave it up to the remote peer to detect whether the max window size has been exceeded. */ - int64_t window_size_self; - - /* The size to increment the window_size_self pending to be sent. */ - uint32_t pending_window_update_size; + * RFC-9113 6.9.2: The settings can shrink the exiting flow-control windows to negative values. + **/ + int32_t window_size_self; + + /** + * The size to increment the window_size_self pending to be sent. + * Allow the pending_window_update_size to exceed the max window size. + * The client will send the WINDOW_UPDATE frame to the server only valid. + * If the pending_window_update_size is too large, we will leave the excess to send it out later. + **/ + uint64_t pending_window_update_size_thread; struct aws_http_message *outgoing_message; /* All queued writes. If the message provides a body stream, it will be first in this list @@ -118,7 +126,7 @@ struct aws_h2_stream { bool is_cross_thread_work_task_scheduled; /* The window_update value for `thread_data.window_size_self` that haven't applied yet */ - uint32_t window_update_size; + uint64_t pending_window_update_size_sync; /* The combined aws_http2_error_code user wanted to send to remote peer via rst_stream and internal aws error * code we want to inform user about. */ @@ -149,7 +157,12 @@ struct aws_h2_stream *aws_h2_stream_new_request( enum aws_h2_stream_state aws_h2_stream_get_state(const struct aws_h2_stream *stream); -struct aws_h2err aws_h2_stream_window_size_change(struct aws_h2_stream *stream, int32_t size_changed, bool self); +/** + * When the flow control windows changed with SETTING and WINDOW_UPDATE frame receive. + * The SETTINGS and remote WINDOW_UPDATE frame should cause flow control error if window size is illegal. + * But client controls the window to be legal for the user. + **/ +struct aws_h2err aws_h2_stream_window_size_change_direct(struct aws_h2_stream *stream, int32_t size_changed, bool self); /* Connection is ready to send frames from stream now */ int aws_h2_stream_on_activated(struct aws_h2_stream *stream, enum aws_h2_stream_body_state *body_state); diff --git a/include/aws/http/request_response.h b/include/aws/http/request_response.h index 7cd365da4..98fb71da3 100644 --- a/include/aws/http/request_response.h +++ b/include/aws/http/request_response.h @@ -1105,6 +1105,10 @@ int aws_http_stream_send_response(struct aws_http_stream *stream, struct aws_htt * If `manual_window_management` is false, this call will have no effect. * The connection maintains its flow-control windows such that * no back-pressure is applied and data arrives as fast as possible. + * + * For HTTP/2, the client control exactly when the WINDOW_UPDATE frame sent. + * And client will make sure the WINDOW_UPDATE frame to be valid. + * Check `stream_window_size_threshold_to_send_update` for details. */ AWS_HTTP_API void aws_http_stream_update_window(struct aws_http_stream *stream, size_t increment_size); diff --git a/source/h2_connection.c b/source/h2_connection.c index 5bce18389..fb902b0b2 100644 --- a/source/h2_connection.c +++ b/source/h2_connection.c @@ -1196,48 +1196,88 @@ struct aws_h2err s_decoder_on_push_promise(uint32_t stream_id, uint32_t promised return AWS_H2ERR_SUCCESS; } -static int s_connection_send_update_window_if_needed(struct aws_h2_connection *connection, uint32_t window_size) { +static int64_t s_add_i64_saturating(int64_t a, int64_t b) { + if (a > 0 && b > INT64_MAX - a) { + return INT64_MAX; + } + if (a < 0 && b < INT64_MIN - a) { + return INT64_MIN; + } + return a + b; +} + +/* Calculate the capped windows update delta */ +uint32_t aws_h2_calculate_cap_window_update_delta(int64_t current_window, uint64_t pending_update_size) { + + /* Calculate target window size, ensuring proper handling of negative windows */ + int64_t target_window; + + if (pending_update_size > INT64_MAX) { + /* If pending update is enormous, cap it at maximum positive value */ + target_window = current_window < 0 ? INT64_MAX : s_add_i64_saturating(current_window, INT64_MAX); + } else { + /* Normal case: add pending update to current window with overflow protection */ + target_window = s_add_i64_saturating(current_window, (int64_t)pending_update_size); + } + + /* Cap window size to maximum allowed */ + target_window = aws_min_i64(target_window, AWS_H2_WINDOW_UPDATE_MAX); + + /* Calculate delta (how much we need to increase the window) */ + uint32_t delta = 0; + AWS_ASSERT(target_window >= current_window); + delta = (uint32_t)(target_window - current_window); + if (delta > AWS_H2_WINDOW_UPDATE_MAX) { + /* Cap delta to maximum allowed */ + delta = AWS_H2_WINDOW_UPDATE_MAX; + } + return delta; +} + +static int s_connection_send_update_window_if_needed(struct aws_h2_connection *connection, uint64_t window_size) { AWS_PRECONDITION(aws_channel_thread_is_callers_thread(connection->base.channel_slot->channel)); - /* Only send a WINDOW_UPDATE frame if the connection window is below the threshold */ - connection->thread_data.pending_window_update_size = - aws_add_u32_saturating(connection->thread_data.pending_window_update_size, window_size); + /** + * Only send a WINDOW_UPDATE frame if the connection window is below the threshold + * If the pending amount is greater than uin64 max. Probably an unexpected error, ignores it and cap it. + */ + connection->thread_data.pending_window_update_size_thread = + aws_add_u64_saturating(connection->thread_data.pending_window_update_size_thread, window_size); if (connection->thread_data.window_size_self > connection->window_size_threshold_to_send_update) { CONNECTION_LOGF( TRACE, connection, - "Ignoring sending connection window update of size%" PRIu32 ". Current size: %" PRIu32 - ", threshold: %" PRIu32 " pending: %" PRIu32, + "Ignoring sending connection window update of size%" PRIu64 ". Current size: %" PRIu32 + ", threshold: %" PRIu32 " pending: %" PRIu64, window_size, connection->thread_data.window_size_self, connection->window_size_threshold_to_send_update, - connection->thread_data.pending_window_update_size); + connection->thread_data.pending_window_update_size_thread); return AWS_OP_SUCCESS; } /* Cap the window to AWS_H2_WINDOW_UPDATE_MAX */ - uint32_t previous_window_size_self = connection->thread_data.window_size_self; - connection->thread_data.window_size_self = aws_add_u32_saturating( - connection->thread_data.window_size_self, connection->thread_data.pending_window_update_size); - connection->thread_data.window_size_self = - aws_min_u32(connection->thread_data.window_size_self, AWS_H2_WINDOW_UPDATE_MAX); - uint32_t window_size_delta = - aws_sub_u32_saturating(connection->thread_data.window_size_self, previous_window_size_self); + uint32_t previous_window = connection->thread_data.window_size_self; + uint32_t window_delta = aws_h2_calculate_cap_window_update_delta( + connection->thread_data.window_size_self, connection->thread_data.pending_window_update_size_thread); struct aws_h2_frame *connection_window_update_frame = - aws_h2_frame_new_window_update(connection->base.alloc, 0, window_size_delta); + aws_h2_frame_new_window_update(connection->base.alloc, 0, window_delta); if (!connection_window_update_frame) { CONNECTION_LOGF( ERROR, connection, "WINDOW_UPDATE frame on connection failed to be sent, error %s", aws_error_name(aws_last_error())); - connection->thread_data.window_size_self = previous_window_size_self; + connection->thread_data.window_size_self = previous_window; return AWS_OP_ERR; } - CONNECTION_LOGF(DEBUG, connection, "Sending connection window by %" PRIu32 ".", window_size_delta); + CONNECTION_LOGF(DEBUG, connection, "Sending connection window by %" PRIu32 ".", window_delta); aws_h2_connection_enqueue_outgoing_frame(connection, connection_window_update_frame); - connection->thread_data.pending_window_update_size = 0; + + /* The math in aws_h2_calculate_cap_window_update_delta makes sure no overflow afterwards. */ + connection->thread_data.window_size_self = previous_window + window_delta; + connection->thread_data.pending_window_update_size_thread -= window_delta; return AWS_OP_SUCCESS; } @@ -1467,7 +1507,7 @@ static struct aws_h2err s_decoder_on_settings( while (!aws_hash_iter_done(&stream_iter)) { struct aws_h2_stream *stream = stream_iter.element.value; aws_hash_iter_next(&stream_iter); - err = aws_h2_stream_window_size_change(stream, size_changed, false /*self*/); + err = aws_h2_stream_window_size_change_direct(stream, size_changed, false /*self*/); if (aws_h2err_failed(err)) { CONNECTION_LOG( ERROR, @@ -1543,7 +1583,7 @@ static struct aws_h2err s_decoder_on_settings_ack(void *userdata) { while (!aws_hash_iter_done(&stream_iter)) { struct aws_h2_stream *stream = stream_iter.element.value; aws_hash_iter_next(&stream_iter); - err = aws_h2_stream_window_size_change(stream, size_changed, true /*self*/); + err = aws_h2_stream_window_size_change_direct(stream, size_changed, true /*self*/); if (aws_h2err_failed(err)) { CONNECTION_LOG( ERROR, @@ -1984,7 +2024,7 @@ static void s_cross_thread_work_task(struct aws_channel_task *task, void *arg, e struct aws_linked_list pending_goaway; aws_linked_list_init(&pending_goaway); - size_t window_update_size; + uint64_t window_update_size; int new_stream_error_code; { /* BEGIN CRITICAL SECTION */ s_lock_synced_data(connection); @@ -1995,8 +2035,8 @@ static void s_cross_thread_work_task(struct aws_channel_task *task, void *arg, e aws_linked_list_swap_contents(&connection->synced_data.pending_settings_list, &pending_settings); aws_linked_list_swap_contents(&connection->synced_data.pending_ping_list, &pending_ping); aws_linked_list_swap_contents(&connection->synced_data.pending_goaway_list, &pending_goaway); - window_update_size = connection->synced_data.window_update_size; - connection->synced_data.window_update_size = 0; + window_update_size = connection->synced_data.pending_window_update_size_sync; + connection->synced_data.pending_window_update_size_sync = 0; new_stream_error_code = connection->synced_data.new_stream_error_code; s_unlock_synced_data(connection); @@ -2222,8 +2262,8 @@ static void s_connection_update_window(struct aws_http_connection *connection_ba * Be more user friendly, if the increment size is too large, we will just saturate it to the max. * AWS_H2_WINDOW_UPDATE_MAX will be checked during the actual sending of the window update frame. */ - connection->synced_data.window_update_size = - aws_add_u32_saturating(connection->synced_data.window_update_size, increment_size); + connection->synced_data.pending_window_update_size_sync = + aws_add_u64_saturating(connection->synced_data.pending_window_update_size_sync, increment_size); connection_open = connection->synced_data.is_open; } s_unlock_synced_data(connection); diff --git a/source/h2_stream.c b/source/h2_stream.c index c9b2b6952..0a797b66b 100644 --- a/source/h2_stream.c +++ b/source/h2_stream.c @@ -209,48 +209,51 @@ static struct aws_h2err s_check_state_allows_frame_type( return aws_h2err_from_h2_code(h2_error_code); } -static int s_stream_send_update_window_if_needed(struct aws_h2_stream *stream, uint32_t window_size) { +static int s_stream_send_update_window_if_needed(struct aws_h2_stream *stream, uint64_t window_size) { AWS_PRECONDITION_ON_CHANNEL_THREAD(stream); - /* Only send a WINDOW_UPDATE frame if the stream window is below the threshold */ - stream->thread_data.pending_window_update_size = - aws_add_u32_saturating(stream->thread_data.pending_window_update_size, window_size); - if (stream->thread_data.window_size_self > stream->window_size_threshold_to_send_update) { + /* Only send a WINDOW_UPDATE frame if the stream window is below the threshold + * If the pending amount is greater than uin64 max. Probably an unexpected error, ignores it and cap it. */ + stream->thread_data.pending_window_update_size_thread = + aws_add_u64_saturating(stream->thread_data.pending_window_update_size_thread, window_size); + if (stream->thread_data.window_size_self > (int32_t)stream->window_size_threshold_to_send_update) { AWS_H2_STREAM_LOGF( TRACE, stream, - "Ignoring sending WINDOW_UPDATE update of size%" PRIu32 ". Current size: %" PRIi64 ", threshold: %" PRIu32 - " pending: %" PRIu32, + "Ignoring sending WINDOW_UPDATE update of size%" PRIu64 ". Current size: %" PRIi32 ", threshold: %" PRIu32 + " pending: %" PRIu64, window_size, stream->thread_data.window_size_self, stream->window_size_threshold_to_send_update, - stream->thread_data.pending_window_update_size); + stream->thread_data.pending_window_update_size_thread); return AWS_OP_SUCCESS; } /* Cap the window to AWS_H2_WINDOW_UPDATE_MAX */ - uint32_t previous_window_size = stream->thread_data.window_size_self; - stream->thread_data.window_size_self = - stream->thread_data.window_size_self + stream->thread_data.pending_window_update_size; - stream->thread_data.window_size_self = aws_min_u32(stream->thread_data.window_size_self, AWS_H2_WINDOW_UPDATE_MAX); - uint32_t window_size_delta = aws_sub_u32_saturating(stream->thread_data.window_size_self, previous_window_size); - - AWS_H2_STREAM_LOGF(TRACE, stream, "Sending WINDOW_UPDATE by %" PRIu32 ".", window_size_delta); + int32_t previous_window = stream->thread_data.window_size_self; + uint32_t window_delta = aws_h2_calculate_cap_window_update_delta( + stream->thread_data.window_size_self, stream->thread_data.pending_window_update_size_thread); struct aws_h2_frame *stream_window_update_frame = - aws_h2_frame_new_window_update(stream->base.alloc, stream->base.id, window_size_delta); + aws_h2_frame_new_window_update(stream->base.alloc, stream->base.id, window_delta); if (!stream_window_update_frame) { AWS_H2_STREAM_LOGF( ERROR, stream, "WINDOW_UPDATE frame on stream failed to be sent, error %s", aws_error_name(aws_last_error())); - stream->thread_data.window_size_self = previous_window_size; + stream->thread_data.window_size_self = previous_window; return AWS_OP_ERR; } + AWS_H2_STREAM_LOGF(TRACE, stream, "Sending WINDOW_UPDATE by %" PRIu32 ".", window_delta); aws_h2_connection_enqueue_outgoing_frame(s_get_h2_connection(stream), stream_window_update_frame); - stream->thread_data.pending_window_update_size = 0; + /* The real size should be delta plus previous window size. Since the math in + * aws_h2_calculate_cap_window_update_delta, no overflow should happen. */ + stream->thread_data.window_size_self = previous_window + (uint32_t)window_delta; + AWS_ASSERT(stream->thread_data.window_size_self <= AWS_H2_WINDOW_UPDATE_MAX); + AWS_ASSERT(window_delta <= stream->thread_data.pending_window_update_size_thread); + stream->thread_data.pending_window_update_size_thread -= window_delta; return AWS_OP_SUCCESS; } @@ -367,7 +370,7 @@ static void s_stream_cross_thread_work_task(struct aws_channel_task *task, void /* Not sending window update at half closed remote state */ bool ignore_window_update = (aws_h2_stream_get_state(stream) == AWS_H2_STREAM_STATE_HALF_CLOSED_REMOTE); bool reset_called; - size_t window_update_size; + uint64_t window_update_size; struct aws_h2err reset_error; struct aws_linked_list pending_writes; @@ -378,8 +381,8 @@ static void s_stream_cross_thread_work_task(struct aws_channel_task *task, void stream->synced_data.is_cross_thread_work_task_scheduled = false; /* window_update_size is ensured to be not greater than AWS_H2_WINDOW_UPDATE_MAX */ - window_update_size = stream->synced_data.window_update_size; - stream->synced_data.window_update_size = 0; + window_update_size = stream->synced_data.pending_window_update_size_sync; + stream->synced_data.pending_window_update_size_sync = 0; reset_called = stream->synced_data.reset_called; reset_error = stream->synced_data.reset_error; @@ -514,12 +517,8 @@ static void s_stream_update_window(struct aws_http_stream *stream_base, size_t i if (!stream_is_init) { cross_thread_work_should_schedule = !stream->synced_data.is_cross_thread_work_task_scheduled; stream->synced_data.is_cross_thread_work_task_scheduled = true; - /** - * Be more user friendly, if the increment size is too large, we will just saturate it to the max. - * AWS_H2_WINDOW_UPDATE_MAX will be checked during the actual sending of the window update frame. - */ - stream->synced_data.window_update_size = - aws_add_u32_saturating(stream->synced_data.window_update_size, increment_size); + stream->synced_data.pending_window_update_size_sync = + aws_add_u64_saturating(stream->synced_data.pending_window_update_size_sync, increment_size); } s_unlock_synced_data(stream); } /* END CRITICAL SECTION */ @@ -669,7 +668,11 @@ static struct aws_h2err s_send_rst_and_close_stream(struct aws_h2_stream *stream return AWS_H2ERR_SUCCESS; } -struct aws_h2err aws_h2_stream_window_size_change(struct aws_h2_stream *stream, int32_t size_changed, bool self) { +struct aws_h2err aws_h2_stream_window_size_change_direct( + struct aws_h2_stream *stream, + int32_t size_changed, + bool self) { + /* If settings causing the flow control windows to overflow, error out. */ if (self) { if (stream->thread_data.window_size_self + size_changed > AWS_H2_WINDOW_UPDATE_MAX) { return aws_h2err_from_h2_code(AWS_HTTP2_ERR_FLOW_CONTROL_ERROR); @@ -1102,7 +1105,7 @@ struct aws_h2err aws_h2_stream_on_decoder_data_begin( AWS_H2_STREAM_LOGF( ERROR, stream, - "DATA length=%" PRIu32 " exceeds flow-control window=%" PRIi64, + "DATA length=%" PRIu32 " exceeds flow-control window=%" PRIi32, payload_len, stream->thread_data.window_size_self); return s_send_rst_and_close_stream(stream, aws_h2err_from_h2_code(AWS_HTTP2_ERR_FLOW_CONTROL_ERROR)); @@ -1168,7 +1171,7 @@ struct aws_h2err aws_h2_stream_on_decoder_window_update( return s_send_rst_and_close_stream(stream, aws_h2err_from_h2_code(AWS_HTTP2_ERR_PROTOCOL_ERROR)); } int32_t old_window_size = stream->thread_data.window_size_peer; - stream_err = aws_h2_stream_window_size_change(stream, window_size_increment, false /*self*/); + stream_err = aws_h2_stream_window_size_change_direct(stream, window_size_increment, false /*self*/); if (aws_h2err_failed(stream_err)) { /* We MUST NOT allow a flow-control window to exceed the max */ AWS_H2_STREAM_LOG( From eef6e31254bdfe968d3701bdeffbf8fac0413d52 Mon Sep 17 00:00:00 2001 From: Dengke Tang Date: Thu, 17 Apr 2025 13:32:11 -0700 Subject: [PATCH 37/48] a bunch of fix --- include/aws/http/connection.h | 4 +- include/aws/http/private/h2_connection.h | 5 +- include/aws/http/private/h2_frames.h | 2 +- include/aws/http/private/h2_stream.h | 2 +- source/h2_connection.c | 45 +++++++++------- source/h2_frames.c | 2 +- source/h2_stream.c | 65 +++++++++++++----------- tests/CMakeLists.txt | 4 +- tests/fuzz/fuzz_h2_decoder_correct.c | 2 +- tests/h2_test_helper.c | 2 +- tests/test_h2_client.c | 59 +++++++++++---------- tests/test_h2_encoder.c | 6 +-- 12 files changed, 107 insertions(+), 91 deletions(-) diff --git a/include/aws/http/connection.h b/include/aws/http/connection.h index 9af292c57..a7f5ec628 100644 --- a/include/aws/http/connection.h +++ b/include/aws/http/connection.h @@ -248,7 +248,7 @@ struct aws_http2_connection_options { * drops below the threshold. * Default to half of the initial connection flow-control window size, which is 32767. */ - size_t conn_window_size_threshold_to_send_update; + uint32_t conn_window_size_threshold_to_send_update; /** * Optional. * The threshold to send out a window update frame for the streams. @@ -258,7 +258,7 @@ struct aws_http2_connection_options { * drops below the threshold. * Default to half of the `initial_window_size`. */ - size_t stream_window_size_threshold_to_send_update; + uint32_t stream_window_size_threshold_to_send_update; }; /** diff --git a/include/aws/http/private/h2_connection.h b/include/aws/http/private/h2_connection.h index 59942c360..5572b0241 100644 --- a/include/aws/http/private/h2_connection.h +++ b/include/aws/http/private/h2_connection.h @@ -33,9 +33,10 @@ struct aws_h2_connection { * to keep flow continues. */ uint32_t window_size_threshold_to_send_update; - /* The threshold to send out a window update frame for all the streams on the connection. + /* The threshold to send out a window update frame for all the streams on the connection. Stream window takes int32 + * since it allows negative. */ - uint32_t stream_window_size_threshold_to_send_update; + int32_t stream_window_size_threshold_to_send_update; /* Only the event-loop thread may touch this data */ struct { diff --git a/include/aws/http/private/h2_frames.h b/include/aws/http/private/h2_frames.h index 878c7cd68..10e299230 100644 --- a/include/aws/http/private/h2_frames.h +++ b/include/aws/http/private/h2_frames.h @@ -221,7 +221,7 @@ int aws_h2_encode_data_frame( bool body_ends_stream, uint8_t pad_length, int32_t *stream_window_size_peer, - size_t *connection_window_size_peer, + uint32_t *connection_window_size_peer, struct aws_byte_buf *output, bool *body_complete, bool *body_stalled, diff --git a/include/aws/http/private/h2_stream.h b/include/aws/http/private/h2_stream.h index 244d3fc54..0738a7841 100644 --- a/include/aws/http/private/h2_stream.h +++ b/include/aws/http/private/h2_stream.h @@ -79,7 +79,7 @@ struct aws_h2_stream { * When the window_size_self is less than the threshold, client will starts the sending of WINDOW_UPDATE frame * to keep flow continues. */ - uint32_t window_size_threshold_to_send_update; + int32_t window_size_threshold_to_send_update; /* Only the event-loop thread may touch this data */ struct { diff --git a/source/h2_connection.c b/source/h2_connection.c index fb902b0b2..28d941c0e 100644 --- a/source/h2_connection.c +++ b/source/h2_connection.c @@ -390,8 +390,9 @@ static struct aws_h2_connection *s_connection_new( } else { connection->window_size_threshold_to_send_update = (uint32_t)(AWS_H2_INIT_WINDOW_SIZE / 2); } + /* Cap it to int32 max, since stream window allows negative. */ connection->stream_window_size_threshold_to_send_update = - http2_options->stream_window_size_threshold_to_send_update; + aws_min_u32(http2_options->stream_window_size_threshold_to_send_update, INT32_MAX); connection->thread_data.goaway_received_last_stream_id = AWS_H2_STREAM_ID_MAX; connection->thread_data.goaway_sent_last_stream_id = AWS_H2_STREAM_ID_MAX; @@ -1260,24 +1261,35 @@ static int s_connection_send_update_window_if_needed(struct aws_h2_connection *c uint32_t previous_window = connection->thread_data.window_size_self; uint32_t window_delta = aws_h2_calculate_cap_window_update_delta( connection->thread_data.window_size_self, connection->thread_data.pending_window_update_size_thread); - struct aws_h2_frame *connection_window_update_frame = - aws_h2_frame_new_window_update(connection->base.alloc, 0, window_delta); - if (!connection_window_update_frame) { + + if (window_delta != connection->thread_data.pending_window_update_size_thread) { CONNECTION_LOGF( - ERROR, - connection, - "WINDOW_UPDATE frame on connection failed to be sent, error %s", - aws_error_name(aws_last_error())); - connection->thread_data.window_size_self = previous_window; - return AWS_OP_ERR; + DEBUG, + (void *)connection, + "Capping window update delta from %" PRIu64 " to %" PRIu32, + connection->thread_data.pending_window_update_size_thread, + window_delta); } - CONNECTION_LOGF(DEBUG, connection, "Sending connection window by %" PRIu32 ".", window_delta); - aws_h2_connection_enqueue_outgoing_frame(connection, connection_window_update_frame); + if (window_delta > 0) { + struct aws_h2_frame *connection_window_update_frame = + aws_h2_frame_new_window_update(connection->base.alloc, 0, window_delta); + if (!connection_window_update_frame) { + CONNECTION_LOGF( + ERROR, + connection, + "WINDOW_UPDATE frame on connection failed to be sent, error %s", + aws_error_name(aws_last_error())); + connection->thread_data.window_size_self = previous_window; + return AWS_OP_ERR; + } + CONNECTION_LOGF(DEBUG, connection, "Sending connection window by %" PRIu32 ".", window_delta); + aws_h2_connection_enqueue_outgoing_frame(connection, connection_window_update_frame); + /* The math in aws_h2_calculate_cap_window_update_delta makes sure no overflow afterwards. */ + connection->thread_data.window_size_self = previous_window + window_delta; + connection->thread_data.pending_window_update_size_thread -= window_delta; + } - /* The math in aws_h2_calculate_cap_window_update_delta makes sure no overflow afterwards. */ - connection->thread_data.window_size_self = previous_window + window_delta; - connection->thread_data.pending_window_update_size_thread -= window_delta; return AWS_OP_SUCCESS; } @@ -1833,8 +1845,6 @@ static void s_handler_installed(struct aws_channel_handler *handler, struct aws_ aws_linked_list_push_back( &connection->thread_data.outgoing_frames_queue, &connection_window_update_frame->node); connection->thread_data.window_size_self += initial_window_update_size; - /* Only update the window when the windows size drop to half of the size */ - connection->window_size_threshold_to_send_update = (uint32_t)(AWS_H2_WINDOW_UPDATE_MAX / 2); } aws_h2_try_write_outgoing_frames(connection); return; @@ -2255,6 +2265,7 @@ static void s_connection_update_window(struct aws_http_connection *connection_ba bool connection_open = false; { /* BEGIN CRITICAL SECTION */ s_lock_synced_data(connection); + connection_open = connection->synced_data.is_open; if (connection_open) { cross_thread_work_should_schedule = !connection->synced_data.is_cross_thread_work_task_scheduled; connection->synced_data.is_cross_thread_work_task_scheduled = true; diff --git a/source/h2_frames.c b/source/h2_frames.c index a91629db5..2a83525e0 100644 --- a/source/h2_frames.c +++ b/source/h2_frames.c @@ -318,7 +318,7 @@ int aws_h2_encode_data_frame( bool body_ends_stream, uint8_t pad_length, int32_t *stream_window_size_peer, - size_t *connection_window_size_peer, + uint32_t *connection_window_size_peer, struct aws_byte_buf *output, bool *body_complete, bool *body_stalled, diff --git a/source/h2_stream.c b/source/h2_stream.c index fa4332715..4603c610f 100644 --- a/source/h2_stream.c +++ b/source/h2_stream.c @@ -234,26 +234,37 @@ static int s_stream_send_update_window_if_needed(struct aws_h2_stream *stream, u uint32_t window_delta = aws_h2_calculate_cap_window_update_delta( stream->thread_data.window_size_self, stream->thread_data.pending_window_update_size_thread); - struct aws_h2_frame *stream_window_update_frame = - aws_h2_frame_new_window_update(stream->base.alloc, stream->base.id, window_delta); - if (!stream_window_update_frame) { + if (window_delta != stream->thread_data.pending_window_update_size_thread) { AWS_H2_STREAM_LOGF( - ERROR, + DEBUG, stream, - "WINDOW_UPDATE frame on stream failed to be sent, error %s", - aws_error_name(aws_last_error())); - stream->thread_data.window_size_self = previous_window; - return AWS_OP_ERR; - } - AWS_H2_STREAM_LOGF(TRACE, stream, "Sending WINDOW_UPDATE by %" PRIu32 ".", window_delta); - - aws_h2_connection_enqueue_outgoing_frame(s_get_h2_connection(stream), stream_window_update_frame); - /* The real size should be delta plus previous window size. Since the math in - * aws_h2_calculate_cap_window_update_delta, no overflow should happen. */ - stream->thread_data.window_size_self = previous_window + (uint32_t)window_delta; - AWS_ASSERT(stream->thread_data.window_size_self <= AWS_H2_WINDOW_UPDATE_MAX); - AWS_ASSERT(window_delta <= stream->thread_data.pending_window_update_size_thread); - stream->thread_data.pending_window_update_size_thread -= window_delta; + "Capping window update delta from %" PRIu64 " to %" PRIu32, + stream->thread_data.pending_window_update_size_thread, + window_delta); + } + + if (window_delta > 0) { + struct aws_h2_frame *stream_window_update_frame = + aws_h2_frame_new_window_update(stream->base.alloc, stream->base.id, window_delta); + if (!stream_window_update_frame) { + AWS_H2_STREAM_LOGF( + ERROR, + stream, + "WINDOW_UPDATE frame on stream failed to be sent, error %s", + aws_error_name(aws_last_error())); + stream->thread_data.window_size_self = previous_window; + return AWS_OP_ERR; + } + AWS_H2_STREAM_LOGF(TRACE, stream, "Sending WINDOW_UPDATE by %" PRIu32 ".", window_delta); + + aws_h2_connection_enqueue_outgoing_frame(s_get_h2_connection(stream), stream_window_update_frame); + /* The real size should be delta plus previous window size. Since the math in + * aws_h2_calculate_cap_window_update_delta, no overflow should happen. */ + stream->thread_data.window_size_self = previous_window + (uint32_t)window_delta; + AWS_ASSERT(stream->thread_data.window_size_self <= AWS_H2_WINDOW_UPDATE_MAX); + AWS_ASSERT(window_delta <= stream->thread_data.pending_window_update_size_thread); + stream->thread_data.pending_window_update_size_thread -= window_delta; + } return AWS_OP_SUCCESS; } @@ -399,10 +410,6 @@ static void s_stream_cross_thread_work_task(struct aws_channel_task *task, void } } - /* The largest legal value will be 2 * max window size, which is way less than INT64_MAX, so if the window_size_self - * overflows, remote peer will find it out. So just apply the change and ignore the possible overflow.*/ - stream->thread_data.window_size_self += window_update_size; - if (reset_called) { struct aws_h2err returned_h2err = s_send_rst_and_close_stream(stream, reset_error); if (aws_h2err_failed(returned_h2err)) { @@ -733,13 +740,11 @@ int aws_h2_stream_on_activated(struct aws_h2_stream *stream, enum aws_h2_stream_ stream->thread_data.window_size_self = connection->thread_data.settings_self[AWS_HTTP2_SETTINGS_INITIAL_WINDOW_SIZE]; - if (!connection->base.stream_manual_window_management) { - if (connection->stream_window_size_threshold_to_send_update) { - stream->window_size_threshold_to_send_update = connection->stream_window_size_threshold_to_send_update; - } else { - stream->window_size_threshold_to_send_update = - connection->thread_data.settings_self[AWS_HTTP2_SETTINGS_INITIAL_WINDOW_SIZE] / 2; - } + if (connection->stream_window_size_threshold_to_send_update) { + stream->window_size_threshold_to_send_update = connection->stream_window_size_threshold_to_send_update; + } else { + stream->window_size_threshold_to_send_update = + connection->thread_data.settings_self[AWS_HTTP2_SETTINGS_INITIAL_WINDOW_SIZE] / 2; } if (with_data) { @@ -809,7 +814,7 @@ int aws_h2_stream_encode_data_frame( ends_stream, 0 /*pad_length*/, &stream->thread_data.window_size_peer, - (size_t *)&connection->thread_data.window_size_peer, + &connection->thread_data.window_size_peer, output, &input_stream_complete, &input_stream_stalled, diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 3eb8bf0f1..9cdc8ffd7 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -468,10 +468,10 @@ add_test_case(h2_client_change_settings_failed_no_ack_received) add_test_case(h2_client_manual_window_management_disabled_auto_window_update) add_test_case(h2_client_manual_window_management_user_send_stream_window_update) add_test_case(h2_client_manual_window_management_user_send_stream_window_update_with_padding) -add_test_case(h2_client_manual_window_management_user_send_stream_window_update_overflow) +add_test_case(h2_client_manual_window_management_user_send_stream_window_update_overflow_capped) add_test_case(h2_client_manual_window_management_user_send_conn_window_update) add_test_case(h2_client_manual_window_management_user_send_conn_window_update_with_padding) -add_test_case(h2_client_manual_window_management_user_send_connection_window_update_overflow) +add_test_case(h2_client_manual_window_management_user_send_connection_window_update_overflow_capped) # Build these when we address window_update() differences in H1 vs H2 # TODO add_test_case(h2_client_manual_updated_window_ignored_when_automatical_on) diff --git a/tests/fuzz/fuzz_h2_decoder_correct.c b/tests/fuzz/fuzz_h2_decoder_correct.c index 1f885382d..4f71fa721 100644 --- a/tests/fuzz/fuzz_h2_decoder_correct.c +++ b/tests/fuzz/fuzz_h2_decoder_correct.c @@ -238,7 +238,7 @@ int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) { bool body_stalled; bool body_failed; int32_t stream_window_size_peer = AWS_H2_WINDOW_UPDATE_MAX; - size_t connection_window_size_peer = AWS_H2_WINDOW_UPDATE_MAX; + uint32_t connection_window_size_peer = AWS_H2_WINDOW_UPDATE_MAX; AWS_FATAL_ASSERT( aws_h2_encode_data_frame( &encoder, diff --git a/tests/h2_test_helper.c b/tests/h2_test_helper.c index aff917263..6310b39b3 100644 --- a/tests/h2_test_helper.c +++ b/tests/h2_test_helper.c @@ -596,7 +596,7 @@ int h2_fake_peer_send_data_frame_with_padding_length( bool body_stalled; bool body_failed; int32_t stream_window_size_peer = AWS_H2_WINDOW_UPDATE_MAX; - size_t connection_window_size_peer = AWS_H2_WINDOW_UPDATE_MAX; + uint32_t connection_window_size_peer = AWS_H2_WINDOW_UPDATE_MAX; ASSERT_SUCCESS(aws_h2_encode_data_frame( &peer->encoder, stream_id, diff --git a/tests/test_h2_client.c b/tests/test_h2_client.c index aaea50132..b467252a5 100644 --- a/tests/test_h2_client.c +++ b/tests/test_h2_client.c @@ -6,6 +6,7 @@ #include "h2_test_helper.h" #include "stream_test_helper.h" #include +#include #include #include #include @@ -106,6 +107,9 @@ static int s_tester_init(struct aws_allocator *alloc, void *ctx) { .on_goaway_received = s_on_goaway_received, .on_remote_settings_change = s_on_remote_settings_change, .conn_manual_window_management = !s_tester.no_conn_manual_win_management, + /* Disable the batching for window update in tests */ + .conn_window_size_threshold_to_send_update = UINT32_MAX, + .stream_window_size_threshold_to_send_update = UINT32_MAX, }; s_tester.connection = @@ -2183,7 +2187,7 @@ TEST_CASE(h2_client_stream_send_data_controlled_by_connection_window_size) { ASSERT_TRUE(latest_frame->end_stream); size_t frames_count = h2_decode_tester_frame_count(&s_tester.peer.decode); - /* Send the rest requst, which only data frames will be blocked */ + /* Send the rest request, and only data frames will be blocked */ ASSERT_SUCCESS(s_stream_tester_init(&stream_testers[1], requests[1])); testing_channel_drain_queued_tasks(&s_tester.testing_channel); ASSERT_SUCCESS(h2_fake_peer_decode_messages_from_testing_channel(&s_tester.peer)); @@ -2506,18 +2510,23 @@ TEST_CASE(h2_client_stream_send_window_update) { const char *body_src = "hello"; ASSERT_SUCCESS(h2_fake_peer_send_data_frame_str(&s_tester.peer, stream_id, body_src, false /*end_stream*/)); - /* check that 1 WINDOW_UPDATE frames have been sent. - * 1 for the connection, and no window_update for the stream as window only updates when the window smaller than - * half of the original window */ + /* check that 2 WINDOW_UPDATE frames have been sent. + * 1 for the connection, and 1 for the stream */ testing_channel_drain_queued_tasks(&s_tester.testing_channel); ASSERT_SUCCESS(h2_fake_peer_decode_messages_from_testing_channel(&s_tester.peer)); struct h2_decoded_frame *stream_window_update_frame = h2_decode_tester_find_stream_frame( &s_tester.peer.decode, AWS_H2_FRAME_T_WINDOW_UPDATE, stream_id, 0 /*idx*/, NULL); - ASSERT_NULL(stream_window_update_frame); - - /* For testing automatic window update, we have localhost_integ_h2_download_stress that downloads a file from one - * connection and one stream. If the window was not properly updated, that should fail */ + ASSERT_NOT_NULL(stream_window_update_frame); + ASSERT_UINT_EQUALS(5, stream_window_update_frame->window_size_increment); + struct h2_decoded_frame *connection_window_update_frame = h2_decode_tester_find_stream_frame( + &s_tester.peer.decode, + AWS_H2_FRAME_T_WINDOW_UPDATE, + 0 /*stream_id*/, + initial_window_update_index + 1 /*idx*/, + NULL); + ASSERT_NOT_NULL(connection_window_update_frame); + ASSERT_UINT_EQUALS(5, connection_window_update_frame->window_size_increment); /* clean up */ aws_http_headers_release(response_headers); @@ -2631,6 +2640,9 @@ static int s_manual_window_management_tester_init(struct aws_allocator *alloc, b .num_initial_settings = AWS_ARRAY_SIZE(settings_array), .max_closed_streams = AWS_HTTP2_DEFAULT_MAX_CLOSED_STREAMS, .conn_manual_window_management = conn, + /* Disable the batching for window update in tests */ + .conn_window_size_threshold_to_send_update = UINT32_MAX, + .stream_window_size_threshold_to_send_update = UINT32_MAX, }; s_tester.connection = @@ -3873,7 +3885,7 @@ TEST_CASE(h2_client_manual_window_management_user_send_stream_window_update_with return s_tester_clean_up(); } -TEST_CASE(h2_client_manual_window_management_user_send_stream_window_update_overflow) { +TEST_CASE(h2_client_manual_window_management_user_send_stream_window_update_overflow_capped) { ASSERT_SUCCESS(s_manual_window_management_tester_init(allocator, false /*conn*/, true /*stream*/, ctx)); /* fake peer sends connection preface */ @@ -3901,18 +3913,10 @@ TEST_CASE(h2_client_manual_window_management_user_send_stream_window_update_over aws_http_stream_update_window(stream_tester.stream, INT32_MAX); aws_http_stream_update_window(stream_tester.stream, INT32_MAX); testing_channel_drain_queued_tasks(&s_tester.testing_channel); - /* validate that stream completed with error */ + /* The overflowed update window will be capped to the allowed max */ ASSERT_TRUE(aws_http_connection_is_open(s_tester.connection)); - ASSERT_TRUE(stream_tester.complete); - /* overflow happens */ - ASSERT_INT_EQUALS(AWS_ERROR_OVERFLOW_DETECTED, stream_tester.on_complete_error_code); - /* validate that stream sent RST_STREAM */ - ASSERT_SUCCESS(h2_fake_peer_decode_messages_from_testing_channel(&s_tester.peer)); - struct h2_decoded_frame *rst_stream_frame = - h2_decode_tester_find_frame(&s_tester.peer.decode, AWS_H2_FRAME_T_RST_STREAM, 0, NULL); - /* But the error code is not the same as user was trying to send */ - ASSERT_UINT_EQUALS(AWS_HTTP2_ERR_INTERNAL_ERROR, rst_stream_frame->error_code); - + struct aws_h2_stream *h2_stream = AWS_CONTAINER_OF(stream_tester.stream, struct aws_h2_stream, base); + ASSERT_TRUE(h2_stream->thread_data.window_size_self == AWS_H2_WINDOW_UPDATE_MAX); /* clean up */ aws_http_message_release(request); client_stream_tester_clean_up(&stream_tester); @@ -4113,7 +4117,7 @@ TEST_CASE(h2_client_manual_window_management_user_send_conn_window_update_with_p return s_tester_clean_up(); } -TEST_CASE(h2_client_manual_window_management_user_send_connection_window_update_overflow) { +TEST_CASE(h2_client_manual_window_management_user_send_connection_window_update_overflow_capped) { ASSERT_SUCCESS(s_manual_window_management_tester_init(allocator, true /*conn*/, false /*stream*/, ctx)); /* fake peer sends connection preface */ @@ -4126,15 +4130,10 @@ TEST_CASE(h2_client_manual_window_management_user_send_connection_window_update_ aws_http2_connection_update_window(s_tester.connection, INT32_MAX); aws_http2_connection_update_window(s_tester.connection, INT32_MAX); testing_channel_drain_queued_tasks(&s_tester.testing_channel); - /* validate that connection closed with error */ - ASSERT_FALSE(aws_http_connection_is_open(s_tester.connection)); - /* client should send GOAWAY */ - ASSERT_SUCCESS(h2_fake_peer_decode_messages_from_testing_channel(&s_tester.peer)); - struct h2_decoded_frame *goaway = - h2_decode_tester_find_frame(&s_tester.peer.decode, AWS_H2_FRAME_T_GOAWAY, 0, NULL); - ASSERT_NOT_NULL(goaway); - ASSERT_UINT_EQUALS(AWS_HTTP2_ERR_INTERNAL_ERROR, goaway->error_code); - ASSERT_UINT_EQUALS(0, goaway->goaway_last_stream_id); + /* The overflowed update window will be capped to the allowed max */ + ASSERT_TRUE(aws_http_connection_is_open(s_tester.connection)); + struct aws_h2_connection *h2_connection = AWS_CONTAINER_OF(s_tester.connection, struct aws_h2_connection, base); + ASSERT_TRUE(h2_connection->thread_data.window_size_self == AWS_H2_WINDOW_UPDATE_MAX); /* clean up */ return s_tester_clean_up(); diff --git a/tests/test_h2_encoder.c b/tests/test_h2_encoder.c index 65f8a2925..d0822e8fc 100644 --- a/tests/test_h2_encoder.c +++ b/tests/test_h2_encoder.c @@ -88,7 +88,7 @@ TEST_CASE(h2_encoder_data) { bool body_stalled; bool body_failed; int32_t stream_window_size_peer = AWS_H2_WINDOW_UPDATE_MAX; - size_t connection_window_size_peer = AWS_H2_WINDOW_UPDATE_MAX; + uint32_t connection_window_size_peer = AWS_H2_WINDOW_UPDATE_MAX; ASSERT_SUCCESS(aws_h2_encode_data_frame( &encoder, 0x76543210 /*stream_id*/, @@ -145,7 +145,7 @@ TEST_CASE(h2_encoder_data_stalled) { bool body_stalled; bool body_failed; int32_t stream_window_size_peer = AWS_H2_WINDOW_UPDATE_MAX; - size_t connection_window_size_peer = AWS_H2_WINDOW_UPDATE_MAX; + uint32_t connection_window_size_peer = AWS_H2_WINDOW_UPDATE_MAX; ASSERT_SUCCESS(aws_h2_encode_data_frame( &encoder, 0x76543210 /*stream_id*/, @@ -190,7 +190,7 @@ TEST_CASE(h2_encoder_data_stalled_completely) { bool body_stalled; bool body_failed; int32_t stream_window_size_peer = AWS_H2_WINDOW_UPDATE_MAX; - size_t connection_window_size_peer = AWS_H2_WINDOW_UPDATE_MAX; + uint32_t connection_window_size_peer = AWS_H2_WINDOW_UPDATE_MAX; ASSERT_SUCCESS(aws_h2_encode_data_frame( &encoder, 0x76543210 /*stream_id*/, From a2cee45e625b16565344166bd23ae39830d695ad Mon Sep 17 00:00:00 2001 From: Dengke Tang Date: Fri, 18 Apr 2025 09:46:16 -0700 Subject: [PATCH 38/48] conversion --- source/h2_frames.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/h2_frames.c b/source/h2_frames.c index 2a83525e0..d047edc93 100644 --- a/source/h2_frames.c +++ b/source/h2_frames.c @@ -443,7 +443,7 @@ int aws_h2_encode_data_frame( /* update the connection window size now, we will update stream window size when this function returns */ AWS_ASSERT(payload_len <= min_window_size); - *connection_window_size_peer -= payload_len; + *connection_window_size_peer -= (uint32_t)payload_len; *stream_window_size_peer -= (int32_t)payload_len; AWS_ASSERT(writes_ok); From af4c962cc757061133273b50b8ce81e2218862e9 Mon Sep 17 00:00:00 2001 From: Dengke Tang Date: Thu, 24 Apr 2025 16:09:23 -0700 Subject: [PATCH 39/48] address comments --- include/aws/http/private/h2_connection.h | 6 +- include/aws/http/private/h2_stream.h | 4 +- source/h2_connection.c | 77 +++++++++--------------- source/h2_stream.c | 38 ++++++------ 4 files changed, 52 insertions(+), 73 deletions(-) diff --git a/include/aws/http/private/h2_connection.h b/include/aws/http/private/h2_connection.h index 5572b0241..8cd7d9487 100644 --- a/include/aws/http/private/h2_connection.h +++ b/include/aws/http/private/h2_connection.h @@ -112,7 +112,7 @@ struct aws_h2_connection { * The client will send the WINDOW_UPDATE frame to the server only valid. * If the pending_window_update_size is too large, we will leave the excess to send it out later. */ - uint64_t pending_window_update_size_thread; + uint64_t pending_window_update_size_self; /* Highest self-initiated stream-id that peer might have processed. * Defaults to max stream-id, may be lowered when GOAWAY frame received. */ @@ -164,8 +164,8 @@ struct aws_h2_connection { bool is_cross_thread_work_task_scheduled; - /* The window_update value for `thread_data.window_size_self` that haven't applied yet */ - uint64_t pending_window_update_size_sync; + /* Value for `thread_data.pending_window_update_size_self` that we haven't applied yet */ + uint64_t pending_window_update_size_self; /* For checking status from outside the event-loop thread. */ bool is_open; diff --git a/include/aws/http/private/h2_stream.h b/include/aws/http/private/h2_stream.h index 0738a7841..463a9da77 100644 --- a/include/aws/http/private/h2_stream.h +++ b/include/aws/http/private/h2_stream.h @@ -99,7 +99,7 @@ struct aws_h2_stream { * The client will send the WINDOW_UPDATE frame to the server only valid. * If the pending_window_update_size is too large, we will leave the excess to send it out later. **/ - uint64_t pending_window_update_size_thread; + uint64_t pending_window_update_size_self; struct aws_http_message *outgoing_message; /* All queued writes. If the message provides a body stream, it will be first in this list @@ -126,7 +126,7 @@ struct aws_h2_stream { bool is_cross_thread_work_task_scheduled; /* The window_update value for `thread_data.window_size_self` that haven't applied yet */ - uint64_t pending_window_update_size_sync; + uint64_t pending_window_update_size_self; /* The combined aws_http2_error_code user wanted to send to remote peer via rst_stream and internal aws error * code we want to inform user about. */ diff --git a/source/h2_connection.c b/source/h2_connection.c index 28d941c0e..8ab3aa806 100644 --- a/source/h2_connection.c +++ b/source/h2_connection.c @@ -1197,77 +1197,55 @@ struct aws_h2err s_decoder_on_push_promise(uint32_t stream_id, uint32_t promised return AWS_H2ERR_SUCCESS; } -static int64_t s_add_i64_saturating(int64_t a, int64_t b) { - if (a > 0 && b > INT64_MAX - a) { - return INT64_MAX; - } - if (a < 0 && b < INT64_MIN - a) { - return INT64_MIN; - } - return a + b; -} - /* Calculate the capped windows update delta */ uint32_t aws_h2_calculate_cap_window_update_delta(int64_t current_window, uint64_t pending_update_size) { - /* Calculate target window size, ensuring proper handling of negative windows */ - int64_t target_window; - - if (pending_update_size > INT64_MAX) { - /* If pending update is enormous, cap it at maximum positive value */ - target_window = current_window < 0 ? INT64_MAX : s_add_i64_saturating(current_window, INT64_MAX); - } else { - /* Normal case: add pending update to current window with overflow protection */ - target_window = s_add_i64_saturating(current_window, (int64_t)pending_update_size); - } - - /* Cap window size to maximum allowed */ - target_window = aws_min_i64(target_window, AWS_H2_WINDOW_UPDATE_MAX); - - /* Calculate delta (how much we need to increase the window) */ - uint32_t delta = 0; - AWS_ASSERT(target_window >= current_window); - delta = (uint32_t)(target_window - current_window); - if (delta > AWS_H2_WINDOW_UPDATE_MAX) { - /* Cap delta to maximum allowed */ - delta = AWS_H2_WINDOW_UPDATE_MAX; - } - return delta; + /* UPDATE_WINDOW frame can't exceed AWS_H2_WINDOW_UPDATE_MAX (aka INT32_MAX) */ + int64_t delta = (int64_t)aws_min_u64(pending_update_size, INT32_MAX); + /* resulting window size can't exceed INT32_MAX */ + delta = aws_min_i64(delta, (int64_t)INT32_MAX - current_window); + /* assert should hold, based on our clamping above */ + AWS_ASSERT(delta >= 0 && delta <= AWS_H2_WINDOW_UPDATE_MAX); + return (uint32_t)delta; } -static int s_connection_send_update_window_if_needed(struct aws_h2_connection *connection, uint64_t window_size) { +static int s_connection_send_update_window_if_needed( + struct aws_h2_connection *connection, + uint64_t window_update_size) { AWS_PRECONDITION(aws_channel_thread_is_callers_thread(connection->base.channel_slot->channel)); - + if (window_update_size == 0) { + /* Nothing to do */ + return AWS_OP_SUCCESS; + } /** * Only send a WINDOW_UPDATE frame if the connection window is below the threshold * If the pending amount is greater than uin64 max. Probably an unexpected error, ignores it and cap it. */ - connection->thread_data.pending_window_update_size_thread = - aws_add_u64_saturating(connection->thread_data.pending_window_update_size_thread, window_size); + connection->thread_data.pending_window_update_size_self = + aws_add_u64_saturating(connection->thread_data.pending_window_update_size_self, window_update_size); if (connection->thread_data.window_size_self > connection->window_size_threshold_to_send_update) { CONNECTION_LOGF( TRACE, connection, "Ignoring sending connection window update of size%" PRIu64 ". Current size: %" PRIu32 ", threshold: %" PRIu32 " pending: %" PRIu64, - window_size, + window_update_size, connection->thread_data.window_size_self, connection->window_size_threshold_to_send_update, - connection->thread_data.pending_window_update_size_thread); + connection->thread_data.pending_window_update_size_self); return AWS_OP_SUCCESS; } /* Cap the window to AWS_H2_WINDOW_UPDATE_MAX */ - uint32_t previous_window = connection->thread_data.window_size_self; uint32_t window_delta = aws_h2_calculate_cap_window_update_delta( - connection->thread_data.window_size_self, connection->thread_data.pending_window_update_size_thread); + connection->thread_data.window_size_self, connection->thread_data.pending_window_update_size_self); - if (window_delta != connection->thread_data.pending_window_update_size_thread) { + if (window_delta != connection->thread_data.pending_window_update_size_self) { CONNECTION_LOGF( DEBUG, (void *)connection, "Capping window update delta from %" PRIu64 " to %" PRIu32, - connection->thread_data.pending_window_update_size_thread, + connection->thread_data.pending_window_update_size_self, window_delta); } @@ -1280,14 +1258,13 @@ static int s_connection_send_update_window_if_needed(struct aws_h2_connection *c connection, "WINDOW_UPDATE frame on connection failed to be sent, error %s", aws_error_name(aws_last_error())); - connection->thread_data.window_size_self = previous_window; return AWS_OP_ERR; } CONNECTION_LOGF(DEBUG, connection, "Sending connection window by %" PRIu32 ".", window_delta); aws_h2_connection_enqueue_outgoing_frame(connection, connection_window_update_frame); /* The math in aws_h2_calculate_cap_window_update_delta makes sure no overflow afterwards. */ - connection->thread_data.window_size_self = previous_window + window_delta; - connection->thread_data.pending_window_update_size_thread -= window_delta; + connection->thread_data.window_size_self += window_delta; + connection->thread_data.pending_window_update_size_self -= window_delta; } return AWS_OP_SUCCESS; @@ -2045,8 +2022,8 @@ static void s_cross_thread_work_task(struct aws_channel_task *task, void *arg, e aws_linked_list_swap_contents(&connection->synced_data.pending_settings_list, &pending_settings); aws_linked_list_swap_contents(&connection->synced_data.pending_ping_list, &pending_ping); aws_linked_list_swap_contents(&connection->synced_data.pending_goaway_list, &pending_goaway); - window_update_size = connection->synced_data.pending_window_update_size_sync; - connection->synced_data.pending_window_update_size_sync = 0; + window_update_size = connection->synced_data.pending_window_update_size_self; + connection->synced_data.pending_window_update_size_self = 0; new_stream_error_code = connection->synced_data.new_stream_error_code; s_unlock_synced_data(connection); @@ -2273,8 +2250,8 @@ static void s_connection_update_window(struct aws_http_connection *connection_ba * Be more user friendly, if the increment size is too large, we will just saturate it to the max. * AWS_H2_WINDOW_UPDATE_MAX will be checked during the actual sending of the window update frame. */ - connection->synced_data.pending_window_update_size_sync = - aws_add_u64_saturating(connection->synced_data.pending_window_update_size_sync, increment_size); + connection->synced_data.pending_window_update_size_self = + aws_add_u64_saturating(connection->synced_data.pending_window_update_size_self, increment_size); connection_open = connection->synced_data.is_open; } s_unlock_synced_data(connection); diff --git a/source/h2_stream.c b/source/h2_stream.c index 4603c610f..d6d1182b0 100644 --- a/source/h2_stream.c +++ b/source/h2_stream.c @@ -209,37 +209,40 @@ static struct aws_h2err s_check_state_allows_frame_type( return aws_h2err_from_h2_code(h2_error_code); } -static int s_stream_send_update_window_if_needed(struct aws_h2_stream *stream, uint64_t window_size) { +static int s_stream_send_update_window_if_needed(struct aws_h2_stream *stream, uint64_t window_update_size) { AWS_PRECONDITION_ON_CHANNEL_THREAD(stream); + if (window_update_size == 0) { + /* Nothing to do */ + return AWS_OP_SUCCESS; + } /* Only send a WINDOW_UPDATE frame if the stream window is below the threshold * If the pending amount is greater than uin64 max. Probably an unexpected error, ignores it and cap it. */ - stream->thread_data.pending_window_update_size_thread = - aws_add_u64_saturating(stream->thread_data.pending_window_update_size_thread, window_size); + stream->thread_data.pending_window_update_size_self = + aws_add_u64_saturating(stream->thread_data.pending_window_update_size_self, window_update_size); if (stream->thread_data.window_size_self > (int32_t)stream->window_size_threshold_to_send_update) { AWS_H2_STREAM_LOGF( TRACE, stream, "Ignoring sending WINDOW_UPDATE update of size%" PRIu64 ". Current size: %" PRIi32 ", threshold: %" PRIu32 " pending: %" PRIu64, - window_size, + window_update_size, stream->thread_data.window_size_self, stream->window_size_threshold_to_send_update, - stream->thread_data.pending_window_update_size_thread); + stream->thread_data.pending_window_update_size_self); return AWS_OP_SUCCESS; } /* Cap the window to AWS_H2_WINDOW_UPDATE_MAX */ - int32_t previous_window = stream->thread_data.window_size_self; uint32_t window_delta = aws_h2_calculate_cap_window_update_delta( - stream->thread_data.window_size_self, stream->thread_data.pending_window_update_size_thread); + stream->thread_data.window_size_self, stream->thread_data.pending_window_update_size_self); - if (window_delta != stream->thread_data.pending_window_update_size_thread) { + if (window_delta != stream->thread_data.pending_window_update_size_self) { AWS_H2_STREAM_LOGF( DEBUG, stream, "Capping window update delta from %" PRIu64 " to %" PRIu32, - stream->thread_data.pending_window_update_size_thread, + stream->thread_data.pending_window_update_size_self, window_delta); } @@ -252,7 +255,6 @@ static int s_stream_send_update_window_if_needed(struct aws_h2_stream *stream, u stream, "WINDOW_UPDATE frame on stream failed to be sent, error %s", aws_error_name(aws_last_error())); - stream->thread_data.window_size_self = previous_window; return AWS_OP_ERR; } AWS_H2_STREAM_LOGF(TRACE, stream, "Sending WINDOW_UPDATE by %" PRIu32 ".", window_delta); @@ -260,10 +262,10 @@ static int s_stream_send_update_window_if_needed(struct aws_h2_stream *stream, u aws_h2_connection_enqueue_outgoing_frame(s_get_h2_connection(stream), stream_window_update_frame); /* The real size should be delta plus previous window size. Since the math in * aws_h2_calculate_cap_window_update_delta, no overflow should happen. */ - stream->thread_data.window_size_self = previous_window + (uint32_t)window_delta; + stream->thread_data.window_size_self += (uint32_t)window_delta; AWS_ASSERT(stream->thread_data.window_size_self <= AWS_H2_WINDOW_UPDATE_MAX); - AWS_ASSERT(window_delta <= stream->thread_data.pending_window_update_size_thread); - stream->thread_data.pending_window_update_size_thread -= window_delta; + AWS_ASSERT(window_delta <= stream->thread_data.pending_window_update_size_self); + stream->thread_data.pending_window_update_size_self -= window_delta; } return AWS_OP_SUCCESS; } @@ -392,8 +394,8 @@ static void s_stream_cross_thread_work_task(struct aws_channel_task *task, void stream->synced_data.is_cross_thread_work_task_scheduled = false; /* window_update_size is ensured to be not greater than AWS_H2_WINDOW_UPDATE_MAX */ - window_update_size = stream->synced_data.pending_window_update_size_sync; - stream->synced_data.pending_window_update_size_sync = 0; + window_update_size = stream->synced_data.pending_window_update_size_self; + stream->synced_data.pending_window_update_size_self = 0; reset_called = stream->synced_data.reset_called; reset_error = stream->synced_data.reset_error; @@ -525,8 +527,8 @@ static void s_stream_update_window(struct aws_http_stream *stream_base, size_t i if (!stream_is_init) { cross_thread_work_should_schedule = !stream->synced_data.is_cross_thread_work_task_scheduled; stream->synced_data.is_cross_thread_work_task_scheduled = true; - stream->synced_data.pending_window_update_size_sync = - aws_add_u64_saturating(stream->synced_data.pending_window_update_size_sync, increment_size); + stream->synced_data.pending_window_update_size_self = + aws_add_u64_saturating(stream->synced_data.pending_window_update_size_self, increment_size); } s_unlock_synced_data(stream); } /* END CRITICAL SECTION */ @@ -682,7 +684,7 @@ struct aws_h2err aws_h2_stream_window_size_change_direct( bool self) { /* If settings causing the flow control windows to overflow, error out. */ if (self) { - if (stream->thread_data.window_size_self + size_changed > AWS_H2_WINDOW_UPDATE_MAX) { + if ((int64_t)stream->thread_data.window_size_self + size_changed > AWS_H2_WINDOW_UPDATE_MAX) { return aws_h2err_from_h2_code(AWS_HTTP2_ERR_FLOW_CONTROL_ERROR); } stream->thread_data.window_size_self += size_changed; From 37adfb220c3a00441986b1b5404df9e22ef1a06e Mon Sep 17 00:00:00 2001 From: Dengke Tang Date: Thu, 24 Apr 2025 16:10:08 -0700 Subject: [PATCH 40/48] add comments --- source/h2_stream.c | 1 + 1 file changed, 1 insertion(+) diff --git a/source/h2_stream.c b/source/h2_stream.c index d6d1182b0..5f991c083 100644 --- a/source/h2_stream.c +++ b/source/h2_stream.c @@ -745,6 +745,7 @@ int aws_h2_stream_on_activated(struct aws_h2_stream *stream, enum aws_h2_stream_ if (connection->stream_window_size_threshold_to_send_update) { stream->window_size_threshold_to_send_update = connection->stream_window_size_threshold_to_send_update; } else { + /* Set reasonable default of: 50% initial window size (same as Netty's DEFAULT_WINDOW_UPDATE_RATIO) */ stream->window_size_threshold_to_send_update = connection->thread_data.settings_self[AWS_HTTP2_SETTINGS_INITIAL_WINDOW_SIZE] / 2; } From 19d94399c0b61d8a6615d9ba7af3e61d1cb57612 Mon Sep 17 00:00:00 2001 From: Dengke Tang Date: Fri, 25 Apr 2025 14:32:49 -0700 Subject: [PATCH 41/48] h2: respect the initial window size setting (#514) --- include/aws/http/connection.h | 14 ++-- include/aws/http/http2_stream_manager.h | 12 ++- include/aws/http/private/h2_connection.h | 2 + source/connection.c | 6 +- source/h2_connection.c | 56 +++++++++++-- source/h2_stream.c | 3 - source/http2_stream_manager.c | 1 + tests/CMakeLists.txt | 2 + tests/test_h2_client.c | 101 +++++++++++------------ tests/test_stream_manager.c | 2 +- 10 files changed, 126 insertions(+), 73 deletions(-) diff --git a/include/aws/http/connection.h b/include/aws/http/connection.h index a7f5ec628..582fb86e2 100644 --- a/include/aws/http/connection.h +++ b/include/aws/http/connection.h @@ -345,8 +345,7 @@ struct aws_http_client_connection_options { * * If true, the flow-control window of each stream will shrink as body data * is received (headers, padding, and other metadata do not affect the window). - * `initial_window_size` determines the starting size of each stream's window for HTTP/1 stream, while HTTP/2 stream - * will use the settings AWS_HTTP2_SETTINGS_INITIAL_WINDOW_SIZE to inform the other side about read back pressure + * `initial_window_size` determines the starting size of each stream's window. * * If a stream's flow-control window reaches 0, no further data will be received. The user must call * aws_http_stream_update_window() to increment the stream's window and keep data flowing. @@ -360,12 +359,17 @@ struct aws_http_client_connection_options { bool manual_window_management; /** - * The starting size of each HTTP stream's flow-control window for HTTP/1 connection. + * The starting size of each HTTP stream's flow-control window. * Required if `manual_window_management` is true, * ignored if `manual_window_management` is false. * - * Always ignored when HTTP/2 connection created. The initial window size is controlled by the settings, - * `AWS_HTTP2_SETTINGS_INITIAL_WINDOW_SIZE` + * For HTTP/2 connection, this value will end up being one of the initial settings for the connection, + * `AWS_HTTP2_SETTINGS_INITIAL_WINDOW_SIZE`. + * The corresponding settings from `initial_settings_array` will override this value. + * Notes: + * - the setting value has the limitation of 2^31-1, otherwise the connection will be failed to be established with + * AWS_ERROR_INVALID_ARGUMENT. + * - when this set to 0, the initial window size will be set to 0, when `manual_window_management` is true. */ size_t initial_window_size; diff --git a/include/aws/http/http2_stream_manager.h b/include/aws/http/http2_stream_manager.h index bd7867759..cb9a52bd3 100644 --- a/include/aws/http/http2_stream_manager.h +++ b/include/aws/http/http2_stream_manager.h @@ -79,7 +79,7 @@ struct aws_http2_stream_manager_options { * - For connection level window control, `conn_manual_window_management` will enable manual control. The * inital window size is not controllable. * - For stream level window control, `enable_read_back_pressure` will enable manual control. The initial window - * size needs to be set through `initial_settings_array`. + * size needs to be set through `initial_window_size` or `initial_settings_array`. */ const struct aws_http2_setting *initial_settings_array; size_t num_initial_settings; @@ -92,6 +92,16 @@ struct aws_http2_stream_manager_options { * The initial window size can be set by `AWS_HTTP2_SETTINGS_INITIAL_WINDOW_SIZE` via `initial_settings_array` */ bool enable_read_back_pressure; + /* + * This value will end up being one of the initial settings for the connection, + * `AWS_HTTP2_SETTINGS_INITIAL_WINDOW_SIZE`. + * The corresponding settings from `initial_settings_array` will override this value. + * Notes: + * - the setting value has the limitation of 2^31-1, otherwise the connection will be failed to be established with + * AWS_ERROR_INVALID_ARGUMENT. + * - when this set to 0, the initial window size will be set to 0, when `enable_read_back_pressure` is true. + * */ + size_t initial_window_size; /* Connection monitor for the underlying connections made */ const struct aws_http_connection_monitoring_options *monitoring_options; diff --git a/include/aws/http/private/h2_connection.h b/include/aws/http/private/h2_connection.h index 8cd7d9487..eb393a139 100644 --- a/include/aws/http/private/h2_connection.h +++ b/include/aws/http/private/h2_connection.h @@ -249,12 +249,14 @@ AWS_HTTP_API struct aws_http_connection *aws_http_connection_new_http2_server( struct aws_allocator *allocator, bool manual_window_management, + size_t initial_window_size, const struct aws_http2_connection_options *http2_options); AWS_HTTP_API struct aws_http_connection *aws_http_connection_new_http2_client( struct aws_allocator *allocator, bool manual_window_management, + size_t initial_window_size, const struct aws_http2_connection_options *http2_options); AWS_EXTERN_C_END diff --git a/source/connection.c b/source/connection.c index 3d1a082ed..bbeb00ad1 100644 --- a/source/connection.c +++ b/source/connection.c @@ -195,9 +195,11 @@ struct aws_http_connection *aws_http_connection_new_channel_handler( break; case AWS_HTTP_VERSION_2: if (is_server) { - connection = aws_http_connection_new_http2_server(alloc, manual_window_management, http2_options); + connection = aws_http_connection_new_http2_server( + alloc, manual_window_management, initial_window_size, http2_options); } else { - connection = aws_http_connection_new_http2_client(alloc, manual_window_management, http2_options); + connection = aws_http_connection_new_http2_client( + alloc, manual_window_management, initial_window_size, http2_options); } break; default: diff --git a/source/h2_connection.c b/source/h2_connection.c index 8ab3aa806..cec24b3f0 100644 --- a/source/h2_connection.c +++ b/source/h2_connection.c @@ -107,6 +107,8 @@ static void s_send_goaway( const struct aws_byte_cursor *optional_debug_data); static struct aws_h2_pending_settings *s_new_pending_settings( struct aws_allocator *allocator, + bool add_initial_window, + size_t initial_window_size, const struct aws_http2_setting *settings_array, size_t num_settings, aws_http2_on_change_settings_complete_fn *on_completed, @@ -298,6 +300,7 @@ void aws_h2_connection_shutdown_due_to_write_err(struct aws_h2_connection *conne static struct aws_h2_connection *s_connection_new( struct aws_allocator *alloc, bool manual_window_management, + size_t initial_window_size, const struct aws_http2_connection_options *http2_options, bool server) { @@ -426,6 +429,10 @@ static struct aws_h2_connection *s_connection_new( /* User data from connection base is not ready until the handler installed */ connection->thread_data.init_pending_settings = s_new_pending_settings( connection->base.alloc, + manual_window_management && + initial_window_size != AWS_H2_INIT_WINDOW_SIZE, /* If the initial window size equals to the initial window + size, don't need to send an extra one. */ + initial_window_size, http2_options->initial_settings_array, http2_options->num_initial_settings, http2_options->on_initial_settings_completed, @@ -445,9 +452,11 @@ static struct aws_h2_connection *s_connection_new( struct aws_http_connection *aws_http_connection_new_http2_server( struct aws_allocator *allocator, bool manual_window_management, + size_t initial_window_size, const struct aws_http2_connection_options *http2_options) { - struct aws_h2_connection *connection = s_connection_new(allocator, manual_window_management, http2_options, true); + struct aws_h2_connection *connection = + s_connection_new(allocator, manual_window_management, initial_window_size, http2_options, true); if (!connection) { return NULL; } @@ -460,9 +469,11 @@ struct aws_http_connection *aws_http_connection_new_http2_server( struct aws_http_connection *aws_http_connection_new_http2_client( struct aws_allocator *allocator, bool manual_window_management, + size_t initial_window_size, const struct aws_http2_connection_options *http2_options) { - struct aws_h2_connection *connection = s_connection_new(allocator, manual_window_management, http2_options, false); + struct aws_h2_connection *connection = + s_connection_new(allocator, manual_window_management, initial_window_size, http2_options, false); if (!connection) { return NULL; } @@ -513,14 +524,29 @@ static void s_handler_destroy(struct aws_channel_handler *handler) { static struct aws_h2_pending_settings *s_new_pending_settings( struct aws_allocator *allocator, + bool add_initial_window, + size_t initial_window_size, const struct aws_http2_setting *settings_array, - size_t num_settings, + size_t passin_num_settings, aws_http2_on_change_settings_complete_fn *on_completed, void *user_data) { + size_t num_settings = passin_num_settings; + if (add_initial_window) { + if (initial_window_size > AWS_H2_WINDOW_UPDATE_MAX) { + AWS_LOGF_ERROR( + AWS_LS_HTTP_CONNECTION, + "Initial window size %zu is larger than max %d", + initial_window_size, + AWS_H2_WINDOW_UPDATE_MAX); + aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); + return NULL; + } + num_settings++; + } size_t settings_storage_size = sizeof(struct aws_http2_setting) * num_settings; struct aws_h2_pending_settings *pending_settings; - void *settings_storage; + uint8_t *settings_storage; if (!aws_mem_acquire_many( allocator, 2, @@ -533,9 +559,17 @@ static struct aws_h2_pending_settings *s_new_pending_settings( AWS_ZERO_STRUCT(*pending_settings); /* We buffer the settings up, incase the caller has freed them when the ACK arrives */ - pending_settings->settings_array = settings_storage; + pending_settings->settings_array = (void *)settings_storage; + if (add_initial_window) { + /* insert the initial window size settings to the first of all the settings. */ + pending_settings->settings_array[0].id = AWS_HTTP2_SETTINGS_INITIAL_WINDOW_SIZE; + pending_settings->settings_array[0].value = (uint32_t)initial_window_size; + /* Move the storage pointer to the second in the list */ + settings_storage = settings_storage + sizeof(struct aws_http2_setting); + } if (settings_array) { - memcpy(pending_settings->settings_array, settings_array, num_settings * sizeof(struct aws_http2_setting)); + /* copy all passin settings to the settings storage. */ + memcpy(settings_storage, settings_array, passin_num_settings * sizeof(struct aws_http2_setting)); } pending_settings->num_settings = num_settings; pending_settings->on_completed = on_completed; @@ -2288,8 +2322,14 @@ static int s_connection_change_settings( return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); } - struct aws_h2_pending_settings *pending_settings = - s_new_pending_settings(connection->base.alloc, settings_array, num_settings, on_completed, user_data); + struct aws_h2_pending_settings *pending_settings = s_new_pending_settings( + connection->base.alloc, + false /*add initial window*/, + 0 /*initial window size*/, + settings_array, + num_settings, + on_completed, + user_data); if (!pending_settings) { return AWS_OP_ERR; } diff --git a/source/h2_stream.c b/source/h2_stream.c index 5f991c083..b18199e47 100644 --- a/source/h2_stream.c +++ b/source/h2_stream.c @@ -1122,9 +1122,6 @@ struct aws_h2err aws_h2_stream_on_decoder_data_begin( /* Automatically update the full amount we just received */ auto_window_update = payload_len; } - if (total_padding_bytes) { - AWS_H2_STREAM_LOGF(TRACE, stream, "%" PRIu32 " Bytes of padding received.", total_padding_bytes); - } if (s_stream_send_update_window_if_needed(stream, auto_window_update)) { return aws_h2err_from_last_error(); diff --git a/source/http2_stream_manager.c b/source/http2_stream_manager.c index fb2319937..9cd9a9520 100644 --- a/source/http2_stream_manager.c +++ b/source/http2_stream_manager.c @@ -1128,6 +1128,7 @@ struct aws_http2_stream_manager *aws_http2_stream_manager_new( .host = options->host, .port = options->port, .enable_read_back_pressure = options->enable_read_back_pressure, + .initial_window_size = options->initial_window_size, .monitoring_options = options->monitoring_options, .proxy_options = options->proxy_options, .proxy_ev_settings = options->proxy_ev_settings, diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 9cdc8ffd7..ddef45d0d 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -466,6 +466,8 @@ add_test_case(h2_client_conn_err_invalid_last_stream_id_goaway) add_test_case(h2_client_change_settings_succeed) add_test_case(h2_client_change_settings_failed_no_ack_received) add_test_case(h2_client_manual_window_management_disabled_auto_window_update) +add_test_case(h2_client_manual_window_management_invalid_initial_window_size) +add_test_case(h2_client_manual_window_management_max_initial_window_size) add_test_case(h2_client_manual_window_management_user_send_stream_window_update) add_test_case(h2_client_manual_window_management_user_send_stream_window_update_with_padding) add_test_case(h2_client_manual_window_management_user_send_stream_window_update_overflow_capped) diff --git a/tests/test_h2_client.c b/tests/test_h2_client.c index b467252a5..f8438475a 100644 --- a/tests/test_h2_client.c +++ b/tests/test_h2_client.c @@ -113,7 +113,7 @@ static int s_tester_init(struct aws_allocator *alloc, void *ctx) { }; s_tester.connection = - aws_http_connection_new_http2_client(alloc, false /* manual window management */, &http2_options); + aws_http_connection_new_http2_client(alloc, false /* manual window management */, 0, &http2_options); ASSERT_NOT_NULL(s_tester.connection); { @@ -2622,7 +2622,12 @@ TEST_CASE(h2_client_stream_err_received_data_flow_control) { return s_tester_clean_up(); } -static int s_manual_window_management_tester_init(struct aws_allocator *alloc, bool conn, bool stream, void *ctx) { +static int s_manual_window_management_tester_init( + struct aws_allocator *alloc, + bool conn, + bool stream, + size_t stream_initial_window_size, + void *ctx) { (void)ctx; aws_http_library_init(alloc); @@ -2645,8 +2650,8 @@ static int s_manual_window_management_tester_init(struct aws_allocator *alloc, b .stream_window_size_threshold_to_send_update = UINT32_MAX, }; - s_tester.connection = - aws_http_connection_new_http2_client(alloc, stream /* manual window management */, &http2_options); + s_tester.connection = aws_http_connection_new_http2_client( + alloc, stream /* manual window management */, stream_initial_window_size, &http2_options); ASSERT_NOT_NULL(s_tester.connection); { /* re-enact marriage vows of http-connection and channel (handled by http-bootstrap in real world) */ @@ -2672,7 +2677,8 @@ static int s_manual_window_management_tester_init(struct aws_allocator *alloc, b * flow-control error */ TEST_CASE(h2_client_conn_err_received_data_flow_control) { /* disable the connection automatic window update */ - ASSERT_SUCCESS(s_manual_window_management_tester_init(allocator, true /*conn*/, false /*stream*/, ctx)); + ASSERT_SUCCESS(s_manual_window_management_tester_init( + allocator, true /*conn*/, false /*stream*/, AWS_H2_INIT_WINDOW_SIZE, ctx)); /* get connection preface and acks out of the way */ ASSERT_SUCCESS(h2_fake_peer_send_connection_preface_default_settings(&s_tester.peer)); @@ -3568,30 +3574,15 @@ TEST_CASE(h2_client_change_settings_failed_no_ack_received) { /* Test manual window management for stream successfully disabled the automatically window update */ TEST_CASE(h2_client_manual_window_management_disabled_auto_window_update) { - ASSERT_SUCCESS(s_manual_window_management_tester_init(allocator, false /*conn*/, true /*stream*/, ctx)); + size_t window_size = 10; + ASSERT_SUCCESS( + s_manual_window_management_tester_init(allocator, false /*conn*/, true /*stream*/, window_size, ctx)); /* fake peer sends connection preface */ ASSERT_SUCCESS(h2_fake_peer_send_connection_preface_default_settings(&s_tester.peer)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); - - size_t window_size = 10; - - /* change the settings of the initial window size for new stream flow-control window */ - struct aws_http2_setting settings_array[] = { - {.id = AWS_HTTP2_SETTINGS_INITIAL_WINDOW_SIZE, .value = (uint32_t)window_size}, - }; - - ASSERT_SUCCESS(aws_http2_connection_change_settings( - s_tester.connection, - settings_array, - AWS_ARRAY_SIZE(settings_array), - NULL /*callback function*/, - NULL /*user_data*/)); - testing_channel_drain_queued_tasks(&s_tester.testing_channel); - /* fake peer sends two settings ack back, one for the initial settings, one for the user settings we just sent */ + /* fake peer sends settings ack back for the initial settings */ struct aws_h2_frame *peer_frame = aws_h2_frame_new_settings(allocator, NULL, 0, true); ASSERT_SUCCESS(h2_fake_peer_send_frame(&s_tester.peer, peer_frame)); - peer_frame = aws_h2_frame_new_settings(allocator, NULL, 0, true); - ASSERT_SUCCESS(h2_fake_peer_send_frame(&s_tester.peer, peer_frame)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); /* send request */ @@ -3663,9 +3654,25 @@ TEST_CASE(h2_client_manual_window_management_disabled_auto_window_update) { return s_tester_clean_up(); } +/* Test manual window management for stream with an invalid initial window size. */ +TEST_CASE(h2_client_manual_window_management_invalid_initial_window_size) { + /* The initial window size must be less than or equal to 2^31 - 1. */ + ASSERT_FAILS( + s_manual_window_management_tester_init(allocator, false /*conn*/, true /*stream*/, (size_t)INT32_MAX + 1, ctx)); + ASSERT_UINT_EQUALS(AWS_ERROR_INVALID_ARGUMENT, aws_last_error()); + return s_tester_clean_up(); +} +/* Test manual window management for stream with the max initial window size. */ +TEST_CASE(h2_client_manual_window_management_max_initial_window_size) { + ASSERT_SUCCESS( + s_manual_window_management_tester_init(allocator, false /*conn*/, true /*stream*/, (size_t)INT32_MAX, ctx)); + return s_tester_clean_up(); +} + TEST_CASE(h2_client_manual_window_management_user_send_stream_window_update) { - ASSERT_SUCCESS(s_manual_window_management_tester_init(allocator, false /*conn*/, true /*stream*/, ctx)); + ASSERT_SUCCESS(s_manual_window_management_tester_init( + allocator, false /*conn*/, true /*stream*/, AWS_H2_INIT_WINDOW_SIZE, ctx)); /* fake peer sends connection preface */ ASSERT_SUCCESS(h2_fake_peer_send_connection_preface_default_settings(&s_tester.peer)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); @@ -3772,32 +3779,18 @@ TEST_CASE(h2_client_manual_window_management_user_send_stream_window_update) { TEST_CASE(h2_client_manual_window_management_user_send_stream_window_update_with_padding) { - ASSERT_SUCCESS(s_manual_window_management_tester_init(allocator, false /*conn*/, true /*stream*/, ctx)); - /* fake peer sends connection preface */ - ASSERT_SUCCESS(h2_fake_peer_send_connection_preface_default_settings(&s_tester.peer)); - testing_channel_drain_queued_tasks(&s_tester.testing_channel); - size_t window_size = 20; size_t padding_length = 10; size_t data_length = window_size - padding_length - 1; - - /* change the settings of the initial window size for new stream flow-control window */ - struct aws_http2_setting settings_array[] = { - {.id = AWS_HTTP2_SETTINGS_INITIAL_WINDOW_SIZE, .value = (uint32_t)window_size}, - }; - - ASSERT_SUCCESS(aws_http2_connection_change_settings( - s_tester.connection, - settings_array, - AWS_ARRAY_SIZE(settings_array), - NULL /*callback function*/, - NULL /*user_data*/)); + ASSERT_SUCCESS( + s_manual_window_management_tester_init(allocator, false /*conn*/, true /*stream*/, window_size, ctx)); + /* fake peer sends connection preface */ + ASSERT_SUCCESS(h2_fake_peer_send_connection_preface_default_settings(&s_tester.peer)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); - /* fake peer sends two settings ack back, one for the initial settings, one for the user settings we just sent */ + + /* fake peer sends settings ack back for the initial settings */ struct aws_h2_frame *peer_frame = aws_h2_frame_new_settings(allocator, NULL, 0, true); ASSERT_SUCCESS(h2_fake_peer_send_frame(&s_tester.peer, peer_frame)); - peer_frame = aws_h2_frame_new_settings(allocator, NULL, 0, true); - ASSERT_SUCCESS(h2_fake_peer_send_frame(&s_tester.peer, peer_frame)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); /* send request */ @@ -3887,7 +3880,8 @@ TEST_CASE(h2_client_manual_window_management_user_send_stream_window_update_with TEST_CASE(h2_client_manual_window_management_user_send_stream_window_update_overflow_capped) { - ASSERT_SUCCESS(s_manual_window_management_tester_init(allocator, false /*conn*/, true /*stream*/, ctx)); + ASSERT_SUCCESS(s_manual_window_management_tester_init( + allocator, false /*conn*/, true /*stream*/, AWS_H2_INIT_WINDOW_SIZE, ctx)); /* fake peer sends connection preface */ ASSERT_SUCCESS(h2_fake_peer_send_connection_preface_default_settings(&s_tester.peer)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); @@ -3928,7 +3922,7 @@ TEST_CASE(h2_client_manual_window_management_user_send_stream_window_update_over * flow-control error */ TEST_CASE(h2_client_manual_window_management_user_send_conn_window_update) { - ASSERT_SUCCESS(s_manual_window_management_tester_init(allocator, true /*conn*/, false /*stream*/, ctx)); + ASSERT_SUCCESS(s_manual_window_management_tester_init(allocator, true /*conn*/, false /*stream*/, 0, ctx)); /* get connection preface and acks out of the way */ ASSERT_SUCCESS(h2_fake_peer_send_connection_preface_default_settings(&s_tester.peer)); @@ -4017,7 +4011,8 @@ TEST_CASE(h2_client_manual_window_management_user_send_conn_window_update) { TEST_CASE(h2_client_manual_window_management_user_send_conn_window_update_with_padding) { - ASSERT_SUCCESS(s_manual_window_management_tester_init(allocator, true /*conn*/, false /*stream*/, ctx)); + ASSERT_SUCCESS(s_manual_window_management_tester_init( + allocator, true /*conn*/, false /*stream*/, AWS_H2_INIT_WINDOW_SIZE, ctx)); /* get connection preface and acks out of the way */ ASSERT_SUCCESS(h2_fake_peer_send_connection_preface_default_settings(&s_tester.peer)); @@ -4119,7 +4114,7 @@ TEST_CASE(h2_client_manual_window_management_user_send_conn_window_update_with_p TEST_CASE(h2_client_manual_window_management_user_send_connection_window_update_overflow_capped) { - ASSERT_SUCCESS(s_manual_window_management_tester_init(allocator, true /*conn*/, false /*stream*/, ctx)); + ASSERT_SUCCESS(s_manual_window_management_tester_init(allocator, true /*conn*/, false /*stream*/, 0, ctx)); /* fake peer sends connection preface */ ASSERT_SUCCESS(h2_fake_peer_send_connection_preface_default_settings(&s_tester.peer)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); @@ -4311,8 +4306,8 @@ TEST_CASE(h2_client_empty_initial_settings) { .on_remote_settings_change = s_on_remote_settings_change, }; - s_tester.connection = - aws_http_connection_new_http2_client(allocator, false /* manual window management */, &http2_options); + s_tester.connection = aws_http_connection_new_http2_client( + allocator, false /* manual window management */, AWS_H2_INIT_WINDOW_SIZE, &http2_options); ASSERT_NOT_NULL(s_tester.connection); { @@ -4363,8 +4358,8 @@ TEST_CASE(h2_client_conn_failed_initial_settings_completed_not_invoked) { .max_closed_streams = AWS_HTTP2_DEFAULT_MAX_CLOSED_STREAMS, .on_remote_settings_change = s_on_remote_settings_change, }; - s_tester.connection = - aws_http_connection_new_http2_client(allocator, false /* manual window management */, &http2_options); + s_tester.connection = aws_http_connection_new_http2_client( + allocator, false /* manual window management */, AWS_H2_INIT_WINDOW_SIZE, &http2_options); ASSERT_NOT_NULL(s_tester.connection); s_tester.user_data.initial_settings_error_code = INT32_MAX; { diff --git a/tests/test_stream_manager.c b/tests/test_stream_manager.c index 5adc29f3f..18ef80022 100644 --- a/tests/test_stream_manager.c +++ b/tests/test_stream_manager.c @@ -610,7 +610,7 @@ static struct sm_fake_connection *s_sm_tester_fake_connection_new_from_options( } struct aws_http_connection *connection = aws_http_connection_new_http2_client( - options->allocator, options->manual_window_management /* manual window management */, options->http2_options); + options->allocator, options->manual_window_management, 0 /*initial window size*/, options->http2_options); AWS_FATAL_ASSERT(connection); aws_http_connection_acquire(connection); From 84d623c9d49ce1e632867fa88dbd0f9773320546 Mon Sep 17 00:00:00 2001 From: Dengke Tang Date: Fri, 25 Apr 2025 14:41:40 -0700 Subject: [PATCH 42/48] WIP --- tests/test_h2_client.c | 83 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 83 insertions(+) diff --git a/tests/test_h2_client.c b/tests/test_h2_client.c index b467252a5..3eb5a0b3b 100644 --- a/tests/test_h2_client.c +++ b/tests/test_h2_client.c @@ -5759,3 +5759,86 @@ TEST_CASE(h2_client_manual_data_write_connection_close) { aws_input_stream_release(data_stream); return s_tester_clean_up(); } + +/* Initialize the tester to test on the behavior that window update will be batched */ +static int s_batch_window_management_tester_init( + struct aws_allocator *alloc, + bool manual_conn, + bool manual_stream, + uint32_t conn_threshold, + uint32_t stream_threshold, + uint32_t initial_window_size, + void *ctx) { + (void)ctx; + aws_http_library_init(alloc); + + s_tester.alloc = alloc; + + struct aws_testing_channel_options options = {.clock_fn = aws_high_res_clock_get_ticks}; + + ASSERT_SUCCESS(testing_channel_init(&s_tester.testing_channel, alloc, &options)); + struct aws_http2_setting settings_array[] = { + {.id = AWS_HTTP2_SETTINGS_ENABLE_PUSH, .value = 0}, + }; + + struct aws_http2_connection_options http2_options = { + .initial_settings_array = settings_array, + .num_initial_settings = AWS_ARRAY_SIZE(settings_array), + .max_closed_streams = AWS_HTTP2_DEFAULT_MAX_CLOSED_STREAMS, + .conn_manual_window_management = manual_conn, + }; + + s_tester.connection = + aws_http_connection_new_http2_client(alloc, manual_stream /* manual window management */, &http2_options); + ASSERT_NOT_NULL(s_tester.connection); + + { /* re-enact marriage vows of http-connection and channel (handled by http-bootstrap in real world) */ + struct aws_channel_slot *slot = aws_channel_slot_new(s_tester.testing_channel.channel); + ASSERT_NOT_NULL(slot); + ASSERT_SUCCESS(aws_channel_slot_insert_end(s_tester.testing_channel.channel, slot)); + ASSERT_SUCCESS(aws_channel_slot_set_handler(slot, &s_tester.connection->channel_handler)); + s_tester.connection->vtable->on_channel_handler_installed(&s_tester.connection->channel_handler, slot); + } + + struct h2_fake_peer_options peer_options = { + .alloc = alloc, + .testing_channel = &s_tester.testing_channel, + .is_server = true, + }; + ASSERT_SUCCESS(h2_fake_peer_init(&s_tester.peer, &peer_options)); + + testing_channel_drain_queued_tasks(&s_tester.testing_channel); + return AWS_OP_SUCCESS; +} + +TEST_CASE(h2_client_batch_auto_window_update) { + /* Automated and default threshold */ + ASSERT_SUCCESS(s_batch_window_management_tester_init( + allocator, false /*manual_conn*/, false /*manual_stream*/, 0 /*conn_threshold*/, 0 /*stream_threshold*/, ctx)); + /* fake peer sends connection preface */ + ASSERT_SUCCESS(h2_fake_peer_send_connection_preface_default_settings(&s_tester.peer)); + testing_channel_drain_queued_tasks(&s_tester.testing_channel); + + size_t window_size = 10; + + /* change the settings of the initial window size for new stream flow-control window */ + struct aws_http2_setting settings_array[] = { + {.id = AWS_HTTP2_SETTINGS_INITIAL_WINDOW_SIZE, .value = (uint32_t)window_size}, + }; + + ASSERT_SUCCESS(aws_http2_connection_change_settings( + s_tester.connection, + settings_array, + AWS_ARRAY_SIZE(settings_array), + NULL /*callback function*/, + NULL /*user_data*/)); + testing_channel_drain_queued_tasks(&s_tester.testing_channel); + /* fake peer sends two settings ack back, one for the initial settings, one for the user settings we just sent */ + struct aws_h2_frame *peer_frame = aws_h2_frame_new_settings(allocator, NULL, 0, true); + ASSERT_SUCCESS(h2_fake_peer_send_frame(&s_tester.peer, peer_frame)); + peer_frame = aws_h2_frame_new_settings(allocator, NULL, 0, true); + ASSERT_SUCCESS(h2_fake_peer_send_frame(&s_tester.peer, peer_frame)); + testing_channel_drain_queued_tasks(&s_tester.testing_channel); + + return s_tester_clean_up(); +} From c66b19fba89b55e77a1f2cc523d22ef8bed1d7c4 Mon Sep 17 00:00:00 2001 From: Dengke Tang Date: Tue, 29 Apr 2025 10:03:46 -0700 Subject: [PATCH 43/48] add tests --- source/h2_connection.c | 4 +- source/h2_stream.c | 2 +- tests/CMakeLists.txt | 2 + tests/test_h2_client.c | 220 +++++++++++++++++++++++++++++++++++++---- 4 files changed, 207 insertions(+), 21 deletions(-) diff --git a/source/h2_connection.c b/source/h2_connection.c index cec24b3f0..14df2e0bd 100644 --- a/source/h2_connection.c +++ b/source/h2_connection.c @@ -1257,11 +1257,11 @@ static int s_connection_send_update_window_if_needed( */ connection->thread_data.pending_window_update_size_self = aws_add_u64_saturating(connection->thread_data.pending_window_update_size_self, window_update_size); - if (connection->thread_data.window_size_self > connection->window_size_threshold_to_send_update) { + if (connection->thread_data.window_size_self >= connection->window_size_threshold_to_send_update) { CONNECTION_LOGF( TRACE, connection, - "Ignoring sending connection window update of size%" PRIu64 ". Current size: %" PRIu32 + "Ignoring sending connection window update of size %" PRIu64 ". Current size: %" PRIu32 ", threshold: %" PRIu32 " pending: %" PRIu64, window_update_size, connection->thread_data.window_size_self, diff --git a/source/h2_stream.c b/source/h2_stream.c index b18199e47..e033a1584 100644 --- a/source/h2_stream.c +++ b/source/h2_stream.c @@ -220,7 +220,7 @@ static int s_stream_send_update_window_if_needed(struct aws_h2_stream *stream, u * If the pending amount is greater than uin64 max. Probably an unexpected error, ignores it and cap it. */ stream->thread_data.pending_window_update_size_self = aws_add_u64_saturating(stream->thread_data.pending_window_update_size_self, window_update_size); - if (stream->thread_data.window_size_self > (int32_t)stream->window_size_threshold_to_send_update) { + if (stream->thread_data.window_size_self >= (int32_t)stream->window_size_threshold_to_send_update) { AWS_H2_STREAM_LOGF( TRACE, stream, diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index ddef45d0d..54a147f8a 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -509,6 +509,8 @@ add_test_case(h2_client_manual_data_write_not_enabled) add_test_case(h2_client_manual_data_write_with_body) add_test_case(h2_client_manual_data_write_no_data) add_test_case(h2_client_manual_data_write_connection_close) +add_test_case(h2_client_batch_auto_window_update) +add_test_case(h2_client_batch_manual_window_update) add_test_case(server_new_destroy) add_test_case(server_new_destroy_tcp) diff --git a/tests/test_h2_client.c b/tests/test_h2_client.c index 0fa83fedb..695f61fb7 100644 --- a/tests/test_h2_client.c +++ b/tests/test_h2_client.c @@ -3961,7 +3961,7 @@ TEST_CASE(h2_client_manual_window_management_user_send_conn_window_update) { /* The max body size here is limited. So we need to send multiple bodies to get the flow-control error */ size_t body_size = - aws_max_size(aws_h2_settings_initial[AWS_HTTP2_SETTINGS_MAX_FRAME_SIZE], g_aws_channel_max_fragment_size) - + aws_min_size(aws_h2_settings_initial[AWS_HTTP2_SETTINGS_MAX_FRAME_SIZE], g_aws_channel_max_fragment_size) - AWS_H2_FRAME_PREFIX_SIZE; /* fake peer sends a DATA frame larger than the window size we have */ ASSERT_SUCCESS(aws_byte_buf_init(&response_body_bufs, allocator, body_size)); @@ -4052,7 +4052,7 @@ TEST_CASE(h2_client_manual_window_management_user_send_conn_window_update_with_p /* The max body size here is limited. So we need to send multiple bodies to get the flow-control error */ size_t padding_size = 10; size_t body_size = - aws_max_size(aws_h2_settings_initial[AWS_HTTP2_SETTINGS_MAX_FRAME_SIZE], g_aws_channel_max_fragment_size) - + aws_min_size(aws_h2_settings_initial[AWS_HTTP2_SETTINGS_MAX_FRAME_SIZE], g_aws_channel_max_fragment_size) - AWS_H2_FRAME_PREFIX_SIZE - padding_size - 1; /* fake peer sends a DATA frame larger than the window size we have */ ASSERT_SUCCESS(aws_byte_buf_init(&response_body_bufs, allocator, body_size)); @@ -5781,10 +5781,12 @@ static int s_batch_window_management_tester_init( .num_initial_settings = AWS_ARRAY_SIZE(settings_array), .max_closed_streams = AWS_HTTP2_DEFAULT_MAX_CLOSED_STREAMS, .conn_manual_window_management = manual_conn, + .conn_window_size_threshold_to_send_update = conn_threshold, + .stream_window_size_threshold_to_send_update = stream_threshold, }; - s_tester.connection = - aws_http_connection_new_http2_client(alloc, manual_stream /* manual window management */, &http2_options); + s_tester.connection = aws_http_connection_new_http2_client( + alloc, manual_stream /* manual window management */, initial_window_size, &http2_options); ASSERT_NOT_NULL(s_tester.connection); { /* re-enact marriage vows of http-connection and channel (handled by http-bootstrap in real world) */ @@ -5809,31 +5811,213 @@ static int s_batch_window_management_tester_init( TEST_CASE(h2_client_batch_auto_window_update) { /* Automated and default threshold */ ASSERT_SUCCESS(s_batch_window_management_tester_init( - allocator, false /*manual_conn*/, false /*manual_stream*/, 0 /*conn_threshold*/, 0 /*stream_threshold*/, ctx)); + allocator, + false /*manual_conn*/, + false /*manual_stream*/, + 0 /*conn_threshold*/, + 0 /*stream_threshold*/, + 0 /*initial_window_size*/, + ctx)); /* fake peer sends connection preface */ ASSERT_SUCCESS(h2_fake_peer_send_connection_preface_default_settings(&s_tester.peer)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); + struct aws_h2_frame *peer_frame = aws_h2_frame_new_settings(allocator, NULL, 0, true); + ASSERT_SUCCESS(h2_fake_peer_send_frame(&s_tester.peer, peer_frame)); + testing_channel_drain_queued_tasks(&s_tester.testing_channel); + ASSERT_SUCCESS(h2_fake_peer_decode_messages_from_testing_channel(&s_tester.peer)); + /* Get the initial frames out of the way. */ + size_t start_index = h2_decode_tester_frame_count(&s_tester.peer.decode); - size_t window_size = 10; + /* send request */ + struct aws_http_message *request = aws_http2_message_new_request(allocator); + ASSERT_NOT_NULL(request); - /* change the settings of the initial window size for new stream flow-control window */ - struct aws_http2_setting settings_array[] = { - {.id = AWS_HTTP2_SETTINGS_INITIAL_WINDOW_SIZE, .value = (uint32_t)window_size}, + struct aws_http_header request_headers_src[] = { + DEFINE_HEADER(":method", "GET"), + DEFINE_HEADER(":scheme", "https"), + DEFINE_HEADER(":path", "/"), }; + aws_http_message_add_header_array(request, request_headers_src, AWS_ARRAY_SIZE(request_headers_src)); - ASSERT_SUCCESS(aws_http2_connection_change_settings( - s_tester.connection, - settings_array, - AWS_ARRAY_SIZE(settings_array), - NULL /*callback function*/, - NULL /*user_data*/)); + struct client_stream_tester stream_tester; + ASSERT_SUCCESS(s_stream_tester_init(&stream_tester, request)); + testing_channel_drain_queued_tasks(&s_tester.testing_channel); + uint32_t stream_id = aws_http_stream_get_id(stream_tester.stream); + + /* fake peer sends response headers */ + struct aws_http_header response_headers_src[] = { + DEFINE_HEADER(":status", "200"), + }; + + struct aws_http_headers *response_headers = aws_http_headers_new(allocator); + aws_http_headers_add_array(response_headers, response_headers_src, AWS_ARRAY_SIZE(response_headers_src)); + + struct aws_h2_frame *response_frame = + aws_h2_frame_new_headers(allocator, stream_id, response_headers, false /*end_stream*/, 0, NULL); + ASSERT_SUCCESS(h2_fake_peer_send_frame(&s_tester.peer, response_frame)); + + /* The other end sents half the initial window size to get the window below he threshold and trigger the window + * update from client. */ + size_t total_data_size = AWS_H2_INIT_WINDOW_SIZE / 2 + 1; + + /* The frame size has the limit from channel and the protocol level. */ + size_t max_frame_data_size = + aws_min_size(aws_h2_settings_initial[AWS_HTTP2_SETTINGS_MAX_FRAME_SIZE], g_aws_channel_max_fragment_size) - + AWS_H2_FRAME_PREFIX_SIZE - 1; + /* number of bodies peer will send, just to ensure the connection flow-control window will not be blocked when we + * manually update it */ + size_t sent_data = 0; + + /* Make sure we have multiple data frames. */ + ASSERT_TRUE(total_data_size > max_frame_data_size); + while (sent_data < total_data_size) { + size_t body_size = aws_min_size(total_data_size - sent_data, max_frame_data_size); + sent_data += body_size; + struct aws_byte_buf body_buf; + ASSERT_SUCCESS(aws_byte_buf_init(&body_buf, allocator, body_size)); + ASSERT_TRUE(aws_byte_buf_write_u8_n(&body_buf, (uint8_t)'a', body_size)); + struct aws_byte_cursor body_cursor = aws_byte_cursor_from_buf(&body_buf); + ASSERT_SUCCESS(h2_fake_peer_send_data_frame(&s_tester.peer, stream_id, body_cursor, false /*end_stream*/)); + testing_channel_drain_queued_tasks(&s_tester.testing_channel); + aws_byte_buf_clean_up(&body_buf); + } + + ASSERT_SUCCESS(h2_fake_peer_decode_messages_from_testing_channel(&s_tester.peer)); + /* Check that only one window update frame sent for the stream and no window update frame sent for the connection, + * since client batches the window update to skip the small increment. */ + struct h2_decoded_frame *connection_window_update_frame = h2_decode_tester_find_stream_frame( + &s_tester.peer.decode, AWS_H2_FRAME_T_WINDOW_UPDATE, 0 /*stream_id*/, start_index /*idx*/, NULL); + ASSERT_NULL(connection_window_update_frame); + + size_t stream_window_update_index = 0; + struct h2_decoded_frame *stream_window_update_frame = h2_decode_tester_find_stream_frame( + &s_tester.peer.decode, + AWS_H2_FRAME_T_WINDOW_UPDATE, + 1 /*stream_id*/, + start_index /*idx*/, + &stream_window_update_index); + ASSERT_NOT_NULL(stream_window_update_frame); + ASSERT_UINT_EQUALS(total_data_size, stream_window_update_frame->window_size_increment); + + stream_window_update_frame = h2_decode_tester_find_stream_frame( + &s_tester.peer.decode, AWS_H2_FRAME_T_WINDOW_UPDATE, 1 /*stream_id*/, stream_window_update_index + 1, NULL); + /* No more found */ + ASSERT_NULL(stream_window_update_frame); + + aws_http_message_release(request); + aws_http_headers_release(response_headers); + client_stream_tester_clean_up(&stream_tester); + return s_tester_clean_up(); +} + +TEST_CASE(h2_client_batch_manual_window_update) { + /* Automated and default threshold */ + uint32_t stream_initial_window_size = 100; + ASSERT_SUCCESS(s_batch_window_management_tester_init( + allocator, + true /*manual_conn*/, + true /*manual_stream*/, + UINT32_MAX /*conn_threshold, make sure every connection level update will be sent*/, + 0 /*stream_threshold, use the default as half of the initial window size*/, + 100 /*initial_window_size*/, + ctx)); + /* fake peer sends connection preface */ + ASSERT_SUCCESS(h2_fake_peer_send_connection_preface_default_settings(&s_tester.peer)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); - /* fake peer sends two settings ack back, one for the initial settings, one for the user settings we just sent */ struct aws_h2_frame *peer_frame = aws_h2_frame_new_settings(allocator, NULL, 0, true); ASSERT_SUCCESS(h2_fake_peer_send_frame(&s_tester.peer, peer_frame)); - peer_frame = aws_h2_frame_new_settings(allocator, NULL, 0, true); - ASSERT_SUCCESS(h2_fake_peer_send_frame(&s_tester.peer, peer_frame)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); + ASSERT_SUCCESS(h2_fake_peer_decode_messages_from_testing_channel(&s_tester.peer)); + /* Get the initial frames out of the way. */ + size_t start_index = h2_decode_tester_frame_count(&s_tester.peer.decode); + + /* send request */ + struct aws_http_message *request = aws_http2_message_new_request(allocator); + ASSERT_NOT_NULL(request); + struct aws_http_header request_headers_src[] = { + DEFINE_HEADER(":method", "GET"), + DEFINE_HEADER(":scheme", "https"), + DEFINE_HEADER(":path", "/"), + }; + aws_http_message_add_header_array(request, request_headers_src, AWS_ARRAY_SIZE(request_headers_src)); + + struct client_stream_tester stream_tester; + ASSERT_SUCCESS(s_stream_tester_init(&stream_tester, request)); + testing_channel_drain_queued_tasks(&s_tester.testing_channel); + uint32_t stream_id = aws_http_stream_get_id(stream_tester.stream); + + /* fake peer sends response headers */ + struct aws_http_header response_headers_src[] = { + DEFINE_HEADER(":status", "200"), + }; + + struct aws_http_headers *response_headers = aws_http_headers_new(allocator); + aws_http_headers_add_array(response_headers, response_headers_src, AWS_ARRAY_SIZE(response_headers_src)); + + struct aws_h2_frame *response_frame = + aws_h2_frame_new_headers(allocator, stream_id, response_headers, false /*end_stream*/, 0, NULL); + ASSERT_SUCCESS(h2_fake_peer_send_frame(&s_tester.peer, response_frame)); + + /* Sent 3 data frame with manual window update from user */ + size_t body_size = stream_initial_window_size / 4; + /* number of bodies peer will send, just to ensure the connection flow-control window will not be blocked when we + * manually update it */ + + /* Make sure we have multiple data frames. */ + size_t num_data_frames = 3; + for (size_t i = 0; i < num_data_frames; i++) { + struct aws_byte_buf body_buf; + ASSERT_SUCCESS(aws_byte_buf_init(&body_buf, allocator, body_size)); + ASSERT_TRUE(aws_byte_buf_write_u8_n(&body_buf, (uint8_t)'a', body_size)); + struct aws_byte_cursor body_cursor = aws_byte_cursor_from_buf(&body_buf); + ASSERT_SUCCESS(h2_fake_peer_send_data_frame(&s_tester.peer, stream_id, body_cursor, false /*end_stream*/)); + testing_channel_drain_queued_tasks(&s_tester.testing_channel); + aws_byte_buf_clean_up(&body_buf); + /* Client update the window manually */ + aws_http_stream_update_window(stream_tester.stream, body_size); + aws_http2_connection_update_window(s_tester.connection, body_size); + testing_channel_drain_queued_tasks(&s_tester.testing_channel); + } + + testing_channel_drain_queued_tasks(&s_tester.testing_channel); + ASSERT_SUCCESS(h2_fake_peer_decode_messages_from_testing_channel(&s_tester.peer)); + /* Check that only one window update frame sent for the stream and 3 window update frame sent for the connection */ + size_t conn_window_update_index = start_index; + struct h2_decoded_frame *connection_window_update_frame; + for (size_t i = 0; i < num_data_frames; i++) { + connection_window_update_frame = h2_decode_tester_find_stream_frame( + &s_tester.peer.decode, + AWS_H2_FRAME_T_WINDOW_UPDATE, + 0 /*stream_id*/, + conn_window_update_index + 1 /*idx*/, + &conn_window_update_index); + ASSERT_NOT_NULL(connection_window_update_frame); + ASSERT_UINT_EQUALS(body_size, connection_window_update_frame->window_size_increment); + } + /* There shall be no more */ + connection_window_update_frame = h2_decode_tester_find_stream_frame( + &s_tester.peer.decode, AWS_H2_FRAME_T_WINDOW_UPDATE, 0 /*stream_id*/, conn_window_update_index + 1, NULL); + ASSERT_NULL(connection_window_update_frame); + + /* Only one stream window update should be found. */ + size_t stream_window_update_index = 0; + struct h2_decoded_frame *stream_window_update_frame = h2_decode_tester_find_stream_frame( + &s_tester.peer.decode, + AWS_H2_FRAME_T_WINDOW_UPDATE, + 1 /*stream_id*/, + start_index /*idx*/, + &stream_window_update_index); + ASSERT_NOT_NULL(stream_window_update_frame); + ASSERT_UINT_EQUALS(num_data_frames * body_size, stream_window_update_frame->window_size_increment); + + stream_window_update_frame = h2_decode_tester_find_stream_frame( + &s_tester.peer.decode, AWS_H2_FRAME_T_WINDOW_UPDATE, 1 /*stream_id*/, stream_window_update_index + 1, NULL); + /* No more found */ + ASSERT_NULL(stream_window_update_frame); + + aws_http_message_release(request); + aws_http_headers_release(response_headers); + client_stream_tester_clean_up(&stream_tester); return s_tester_clean_up(); } From af0a797391b078adcf06559febdc15e795281792 Mon Sep 17 00:00:00 2001 From: Dengke Tang Date: Tue, 29 Apr 2025 10:29:08 -0700 Subject: [PATCH 44/48] add one more tricky test --- source/h2_connection.c | 9 +-- source/h2_stream.c | 10 +-- tests/CMakeLists.txt | 1 + tests/test_h2_client.c | 138 ++++++++++++++++++++++++++++++++++++++++- 4 files changed, 148 insertions(+), 10 deletions(-) diff --git a/source/h2_connection.c b/source/h2_connection.c index 14df2e0bd..2c184546a 100644 --- a/source/h2_connection.c +++ b/source/h2_connection.c @@ -1247,16 +1247,17 @@ static int s_connection_send_update_window_if_needed( struct aws_h2_connection *connection, uint64_t window_update_size) { AWS_PRECONDITION(aws_channel_thread_is_callers_thread(connection->base.channel_slot->channel)); - if (window_update_size == 0) { - /* Nothing to do */ - return AWS_OP_SUCCESS; - } + /** * Only send a WINDOW_UPDATE frame if the connection window is below the threshold * If the pending amount is greater than uin64 max. Probably an unexpected error, ignores it and cap it. */ connection->thread_data.pending_window_update_size_self = aws_add_u64_saturating(connection->thread_data.pending_window_update_size_self, window_update_size); + if (connection->thread_data.pending_window_update_size_self == 0) { + /* Nothing to do */ + return AWS_OP_SUCCESS; + } if (connection->thread_data.window_size_self >= connection->window_size_threshold_to_send_update) { CONNECTION_LOGF( TRACE, diff --git a/source/h2_stream.c b/source/h2_stream.c index e033a1584..a11bf4a0b 100644 --- a/source/h2_stream.c +++ b/source/h2_stream.c @@ -211,20 +211,20 @@ static struct aws_h2err s_check_state_allows_frame_type( static int s_stream_send_update_window_if_needed(struct aws_h2_stream *stream, uint64_t window_update_size) { AWS_PRECONDITION_ON_CHANNEL_THREAD(stream); - if (window_update_size == 0) { - /* Nothing to do */ - return AWS_OP_SUCCESS; - } /* Only send a WINDOW_UPDATE frame if the stream window is below the threshold * If the pending amount is greater than uin64 max. Probably an unexpected error, ignores it and cap it. */ stream->thread_data.pending_window_update_size_self = aws_add_u64_saturating(stream->thread_data.pending_window_update_size_self, window_update_size); + if (stream->thread_data.pending_window_update_size_self == 0) { + /* Nothing to do */ + return AWS_OP_SUCCESS; + } if (stream->thread_data.window_size_self >= (int32_t)stream->window_size_threshold_to_send_update) { AWS_H2_STREAM_LOGF( TRACE, stream, - "Ignoring sending WINDOW_UPDATE update of size%" PRIu64 ". Current size: %" PRIi32 ", threshold: %" PRIu32 + "Ignoring sending WINDOW_UPDATE update of size %" PRIu64 ". Current size: %" PRIi32 ", threshold: %" PRIu32 " pending: %" PRIu64, window_update_size, stream->thread_data.window_size_self, diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 54a147f8a..01160d28c 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -511,6 +511,7 @@ add_test_case(h2_client_manual_data_write_no_data) add_test_case(h2_client_manual_data_write_connection_close) add_test_case(h2_client_batch_auto_window_update) add_test_case(h2_client_batch_manual_window_update) +add_test_case(h2_client_cap_manual_window_update) add_test_case(server_new_destroy) add_test_case(server_new_destroy_tcp) diff --git a/tests/test_h2_client.c b/tests/test_h2_client.c index 695f61fb7..7a3ee2908 100644 --- a/tests/test_h2_client.c +++ b/tests/test_h2_client.c @@ -5919,7 +5919,7 @@ TEST_CASE(h2_client_batch_manual_window_update) { true /*manual_stream*/, UINT32_MAX /*conn_threshold, make sure every connection level update will be sent*/, 0 /*stream_threshold, use the default as half of the initial window size*/, - 100 /*initial_window_size*/, + stream_initial_window_size /*initial_window_size*/, ctx)); /* fake peer sends connection preface */ ASSERT_SUCCESS(h2_fake_peer_send_connection_preface_default_settings(&s_tester.peer)); @@ -6021,3 +6021,139 @@ TEST_CASE(h2_client_batch_manual_window_update) { client_stream_tester_clean_up(&stream_tester); return s_tester_clean_up(); } + +/* The overflow window update will be capped to the allowed max to be sent. */ +TEST_CASE(h2_client_cap_manual_window_update) { + /* Automated and default threshold */ + uint32_t stream_initial_window_size = 100; + ASSERT_SUCCESS(s_batch_window_management_tester_init( + allocator, + true /*manual_conn*/, + true /*manual_stream*/, + UINT32_MAX /*conn_threshold, make sure every connection level update will be sent*/, + 0 /*stream_threshold, use the default as half of the initial window size*/, + stream_initial_window_size /*initial_window_size*/, + ctx)); + /* fake peer sends connection preface */ + ASSERT_SUCCESS(h2_fake_peer_send_connection_preface_default_settings(&s_tester.peer)); + testing_channel_drain_queued_tasks(&s_tester.testing_channel); + struct aws_h2_frame *peer_frame = aws_h2_frame_new_settings(allocator, NULL, 0, true); + ASSERT_SUCCESS(h2_fake_peer_send_frame(&s_tester.peer, peer_frame)); + testing_channel_drain_queued_tasks(&s_tester.testing_channel); + ASSERT_SUCCESS(h2_fake_peer_decode_messages_from_testing_channel(&s_tester.peer)); + /* Get the initial frames out of the way. */ + size_t start_index = h2_decode_tester_frame_count(&s_tester.peer.decode); + + /* send request */ + struct aws_http_message *request = aws_http2_message_new_request(allocator); + ASSERT_NOT_NULL(request); + + struct aws_http_header request_headers_src[] = { + DEFINE_HEADER(":method", "GET"), + DEFINE_HEADER(":scheme", "https"), + DEFINE_HEADER(":path", "/"), + }; + aws_http_message_add_header_array(request, request_headers_src, AWS_ARRAY_SIZE(request_headers_src)); + + struct client_stream_tester stream_tester; + ASSERT_SUCCESS(s_stream_tester_init(&stream_tester, request)); + testing_channel_drain_queued_tasks(&s_tester.testing_channel); + uint32_t stream_id = aws_http_stream_get_id(stream_tester.stream); + + /* fake peer sends response headers */ + struct aws_http_header response_headers_src[] = { + DEFINE_HEADER(":status", "200"), + }; + + struct aws_http_headers *response_headers = aws_http_headers_new(allocator); + aws_http_headers_add_array(response_headers, response_headers_src, AWS_ARRAY_SIZE(response_headers_src)); + + struct aws_h2_frame *response_frame = + aws_h2_frame_new_headers(allocator, stream_id, response_headers, false /*end_stream*/, 0, NULL); + ASSERT_SUCCESS(h2_fake_peer_send_frame(&s_tester.peer, response_frame)); + + /* User wants to update the connection window to overflow */ + size_t allowed_max_window_update = INT32_MAX - AWS_H2_INIT_WINDOW_SIZE; + aws_http2_connection_update_window(s_tester.connection, allowed_max_window_update + 100); + /* update stream window to max */ + aws_http_stream_update_window(stream_tester.stream, SIZE_MAX); + /* The other side should received the allowed max for the connection, since it's below the threshold */ + /* But no window update received for the stream, since the threshold is not hit. */ + testing_channel_drain_queued_tasks(&s_tester.testing_channel); + ASSERT_SUCCESS(h2_fake_peer_decode_messages_from_testing_channel(&s_tester.peer)); + + size_t conn_window_update_index = start_index; + struct h2_decoded_frame *connection_window_update_frame = h2_decode_tester_find_stream_frame( + &s_tester.peer.decode, + AWS_H2_FRAME_T_WINDOW_UPDATE, + 0 /*stream_id*/, + conn_window_update_index + 1 /*idx*/, + &conn_window_update_index); + ASSERT_NOT_NULL(connection_window_update_frame); + ASSERT_UINT_EQUALS(allowed_max_window_update, connection_window_update_frame->window_size_increment); + /* The user requested window update still has more left, so everytime the window shrink, it will update the window + * automatically, until all the requested window update has been sent */ + size_t stream_window_update_index = 0; + struct h2_decoded_frame *stream_window_update_frame = h2_decode_tester_find_stream_frame( + &s_tester.peer.decode, + AWS_H2_FRAME_T_WINDOW_UPDATE, + 1 /*stream_id*/, + start_index /*idx*/, + &stream_window_update_index); + ASSERT_NULL(stream_window_update_frame); + + /* Sent 3 data frame with manual window update from user */ + size_t body_size = 50; + size_t num_data_frames = 3; + for (size_t i = 0; i < num_data_frames; i++) { + struct aws_byte_buf body_buf; + ASSERT_SUCCESS(aws_byte_buf_init(&body_buf, allocator, body_size)); + ASSERT_TRUE(aws_byte_buf_write_u8_n(&body_buf, (uint8_t)'a', body_size)); + struct aws_byte_cursor body_cursor = aws_byte_cursor_from_buf(&body_buf); + ASSERT_SUCCESS(h2_fake_peer_send_data_frame(&s_tester.peer, stream_id, body_cursor, false /*end_stream*/)); + testing_channel_drain_queued_tasks(&s_tester.testing_channel); + aws_byte_buf_clean_up(&body_buf); + /* No more window update invoked */ + } + + testing_channel_drain_queued_tasks(&s_tester.testing_channel); + ASSERT_SUCCESS(h2_fake_peer_decode_messages_from_testing_channel(&s_tester.peer)); + /* Check that only one window update frame sent for the stream and 2 more window update frame sent for the + * connection to fit the overflow. + */ + for (size_t i = 0; i < 2; i++) { + connection_window_update_frame = h2_decode_tester_find_stream_frame( + &s_tester.peer.decode, + AWS_H2_FRAME_T_WINDOW_UPDATE, + 0 /*stream_id*/, + conn_window_update_index + 1 /*idx*/, + &conn_window_update_index); + ASSERT_NOT_NULL(connection_window_update_frame); + ASSERT_UINT_EQUALS(body_size, connection_window_update_frame->window_size_increment); + } + /* There shall be no more */ + connection_window_update_frame = h2_decode_tester_find_stream_frame( + &s_tester.peer.decode, AWS_H2_FRAME_T_WINDOW_UPDATE, 0 /*stream_id*/, conn_window_update_index + 1, NULL); + ASSERT_NULL(connection_window_update_frame); + + /* Only one stream window update should be found. */ + stream_window_update_frame = h2_decode_tester_find_stream_frame( + &s_tester.peer.decode, + AWS_H2_FRAME_T_WINDOW_UPDATE, + 1 /*stream_id*/, + start_index /*idx*/, + &stream_window_update_index); + ASSERT_NOT_NULL(stream_window_update_frame); + /* The max allowed size should be max - current and the current there should be zero. */ + ASSERT_UINT_EQUALS(INT32_MAX, stream_window_update_frame->window_size_increment); + + stream_window_update_frame = h2_decode_tester_find_stream_frame( + &s_tester.peer.decode, AWS_H2_FRAME_T_WINDOW_UPDATE, 1 /*stream_id*/, stream_window_update_index + 1, NULL); + /* No more found */ + ASSERT_NULL(stream_window_update_frame); + + aws_http_message_release(request); + aws_http_headers_release(response_headers); + client_stream_tester_clean_up(&stream_tester); + return s_tester_clean_up(); +} From 1e6e25eba165775b556353e0f08e2509552be0b2 Mon Sep 17 00:00:00 2001 From: Dengke Tang Date: Tue, 29 Apr 2025 10:41:14 -0700 Subject: [PATCH 45/48] math --- tests/test_h2_client.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tests/test_h2_client.c b/tests/test_h2_client.c index 7a3ee2908..bfb05a3df 100644 --- a/tests/test_h2_client.c +++ b/tests/test_h2_client.c @@ -5858,7 +5858,9 @@ TEST_CASE(h2_client_batch_auto_window_update) { /* The other end sents half the initial window size to get the window below he threshold and trigger the window * update from client. */ - size_t total_data_size = AWS_H2_INIT_WINDOW_SIZE / 2 + 1; + /* The threshold is floor(AWS_H2_INIT_WINDOW_SIZE/2), so we need the window size to drop below the threshold. + * So, we need at least AWS_H2_INIT_WINDOW_SIZE - AWS_H2_INIT_WINDOW_SIZE/2 + 1 */ + size_t total_data_size = AWS_H2_INIT_WINDOW_SIZE - AWS_H2_INIT_WINDOW_SIZE / 2 + 1; /* The frame size has the limit from channel and the protocol level. */ size_t max_frame_data_size = From 932b2a560714ce593fc3072b94590e6a1ab53a89 Mon Sep 17 00:00:00 2001 From: Dengke Tang Date: Tue, 29 Apr 2025 12:00:46 -0700 Subject: [PATCH 46/48] omg, this thing is so complicated --- tests/test_h2_client.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/tests/test_h2_client.c b/tests/test_h2_client.c index bfb05a3df..3d942eb18 100644 --- a/tests/test_h2_client.c +++ b/tests/test_h2_client.c @@ -5969,6 +5969,11 @@ TEST_CASE(h2_client_batch_manual_window_update) { /* Make sure we have multiple data frames. */ size_t num_data_frames = 3; for (size_t i = 0; i < num_data_frames; i++) { + /* Client update the window manually before receive the data, since once the data received, the client will + * start to send out window updates if needed. */ + aws_http_stream_update_window(stream_tester.stream, body_size); + aws_http2_connection_update_window(s_tester.connection, body_size); + testing_channel_drain_queued_tasks(&s_tester.testing_channel); struct aws_byte_buf body_buf; ASSERT_SUCCESS(aws_byte_buf_init(&body_buf, allocator, body_size)); ASSERT_TRUE(aws_byte_buf_write_u8_n(&body_buf, (uint8_t)'a', body_size)); @@ -5976,10 +5981,6 @@ TEST_CASE(h2_client_batch_manual_window_update) { ASSERT_SUCCESS(h2_fake_peer_send_data_frame(&s_tester.peer, stream_id, body_cursor, false /*end_stream*/)); testing_channel_drain_queued_tasks(&s_tester.testing_channel); aws_byte_buf_clean_up(&body_buf); - /* Client update the window manually */ - aws_http_stream_update_window(stream_tester.stream, body_size); - aws_http2_connection_update_window(s_tester.connection, body_size); - testing_channel_drain_queued_tasks(&s_tester.testing_channel); } testing_channel_drain_queued_tasks(&s_tester.testing_channel); From f5868b9672746d7a429a3db21bd8edc08378e312 Mon Sep 17 00:00:00 2001 From: Dengke Tang Date: Tue, 29 Apr 2025 12:55:44 -0700 Subject: [PATCH 47/48] windows --- tests/test_h2_client.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/test_h2_client.c b/tests/test_h2_client.c index 3d942eb18..5bf944c57 100644 --- a/tests/test_h2_client.c +++ b/tests/test_h2_client.c @@ -5972,7 +5972,7 @@ TEST_CASE(h2_client_batch_manual_window_update) { /* Client update the window manually before receive the data, since once the data received, the client will * start to send out window updates if needed. */ aws_http_stream_update_window(stream_tester.stream, body_size); - aws_http2_connection_update_window(s_tester.connection, body_size); + aws_http2_connection_update_window(s_tester.connection, (uint32_t)body_size); testing_channel_drain_queued_tasks(&s_tester.testing_channel); struct aws_byte_buf body_buf; ASSERT_SUCCESS(aws_byte_buf_init(&body_buf, allocator, body_size)); @@ -6077,7 +6077,7 @@ TEST_CASE(h2_client_cap_manual_window_update) { /* User wants to update the connection window to overflow */ size_t allowed_max_window_update = INT32_MAX - AWS_H2_INIT_WINDOW_SIZE; - aws_http2_connection_update_window(s_tester.connection, allowed_max_window_update + 100); + aws_http2_connection_update_window(s_tester.connection, (uint32_t)allowed_max_window_update + 100); /* update stream window to max */ aws_http_stream_update_window(stream_tester.stream, SIZE_MAX); /* The other side should received the allowed max for the connection, since it's below the threshold */ From 196e51757ef56ce910190dc80a81c5a9563bf0c0 Mon Sep 17 00:00:00 2001 From: Dengke Tang Date: Tue, 29 Apr 2025 13:30:44 -0700 Subject: [PATCH 48/48] adding comments to the local server impl --- tests/py_localhost/server.py | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/tests/py_localhost/server.py b/tests/py_localhost/server.py index 678ab31ae..d5b5bed03 100644 --- a/tests/py_localhost/server.py +++ b/tests/py_localhost/server.py @@ -51,6 +51,7 @@ def __init__(self): def connection_made(self, transport: asyncio.Transport): self.transport = transport self.conn.initiate_connection() + # Increase the window to large enough to avoid blocking on the window limits. self.conn.increment_flow_control_window(int(2147483647/2)) self.transport.write(self.conn.data_to_send()) @@ -70,13 +71,9 @@ def data_received(self, data: bytes): for event in events: if isinstance(event, RequestReceived): self.request_received(event.headers, event.stream_id) - self.conn.increment_flow_control_window( - int(2147483647/2), event.stream_id) elif isinstance(event, DataReceived): - self.conn.increment_flow_control_window(event.flow_controlled_length) - self.conn.increment_flow_control_window( - event.flow_controlled_length, event.stream_id) - self.receive_data(event.data, event.stream_id) + self.receive_data(event.data, event.stream_id, + event.flow_controlled_length) elif isinstance(event, StreamEnded): self.stream_complete(event.stream_id) elif isinstance(event, ConnectionTerminated): @@ -92,6 +89,9 @@ def data_received(self, data: bytes): self.transport.write(self.conn.data_to_send()) def request_received(self, headers: List[Tuple[str, str]], stream_id: int): + # Bump the flow control window to large enough to avoid blocking on the window limits. + self.conn.increment_flow_control_window( + int(2147483647/2), stream_id) self.raw_headers = headers headers = collections.OrderedDict(headers) path = headers[':path'] @@ -151,12 +151,16 @@ def stream_complete(self, stream_id: int): self.conn.send_headers(stream_id, [(':status', '404')]) asyncio.ensure_future(self.send_data(b"Not Found", stream_id)) - def receive_data(self, data: bytes, stream_id: int): + def receive_data(self, data: bytes, stream_id: int, flow_controlled_length: int): """ We've received some data on a stream. If that stream is one we're expecting data on, save it off. Otherwise, reset the stream. """ try: + self.conn.increment_flow_control_window( + flow_controlled_length) + self.conn.increment_flow_control_window( + flow_controlled_length, stream_id) stream_data = self.stream_data[stream_id] except KeyError: self.conn.reset_stream(