@@ -122,8 +122,43 @@ public class Netty4HttpServerTransport extends AbstractLifecycleComponent implem
122122 Netty4Utils .setup ();
123123 }
124124
125+ /*
126+ * Size in bytes of an individual message received by io.netty.handler.codec.MessageAggregator which accumulates the content for an
127+ * HTTP request. This number is used for estimating the maximum number of allowed buffers before the MessageAggregator's internal
128+ * collection of buffers is resized.
129+ *
130+ * By default we assume the Ethernet MTU (1500 bytes) but users can override it with a system property.
131+ */
132+ private static final ByteSizeValue MTU = new ByteSizeValue (Long .parseLong (System .getProperty ("es.net.mtu" , "1500" )));
133+
134+ private static final String SETTING_KEY_HTTP_NETTY_MAX_COMPOSITE_BUFFER_COMPONENTS = "http.netty.max_composite_buffer_components" ;
135+
125136 public static Setting <Integer > SETTING_HTTP_NETTY_MAX_COMPOSITE_BUFFER_COMPONENTS =
126- Setting .intSetting ("http.netty.max_composite_buffer_components" , -1 , Property .NodeScope );
137+ new Setting <>(SETTING_KEY_HTTP_NETTY_MAX_COMPOSITE_BUFFER_COMPONENTS , (s ) -> {
138+ ByteSizeValue maxContentLength = SETTING_HTTP_MAX_CONTENT_LENGTH .get (s );
139+ /*
140+ * Netty accumulates buffers containing data from all incoming network packets that make up one HTTP request in an instance of
141+ * io.netty.buffer.CompositeByteBuf (think of it as a buffer of buffers). Once its capacity is reached, the buffer will iterate
142+ * over its individual entries and put them into larger buffers (see io.netty.buffer.CompositeByteBuf#consolidateIfNeeded()
143+ * for implementation details). We want to to resize that buffer because this leads to additional garbage on the heap and also
144+ * increases the application's native memory footprint (as direct byte buffers hold their contents off-heap).
145+ *
146+ * With this setting we control the CompositeByteBuf's capacity (which is by default 1024, see
147+ * io.netty.handler.codec.MessageAggregator#DEFAULT_MAX_COMPOSITEBUFFER_COMPONENTS). To determine a proper default capacity for
148+ * that buffer, we need to consider that the upper bound for the size of HTTP requests is determined by `maxContentLength`. The
149+ * number of buffers that are needed depend on how often Netty reads network packets which depends on the network type (MTU).
150+ * We assume here that Elasticsearch receives HTTP requests via an Ethernet connection which has a MTU of 1500 bytes.
151+ *
152+ * Note that we are *not* pre-allocating any memory based on this setting but rather determine the CompositeByteBuf's capacity.
153+ * The tradeoff is between less (but larger) buffers that are contained in the CompositeByteBuf and more (but smaller) buffers.
154+ * With the default max content length of 100MB and a MTU of 1500 bytes we would allow 69905 entries.
155+ */
156+ long maxBufferComponentsEstimate = Math .round ((double ) (maxContentLength .getBytes () / MTU .getBytes ()));
157+ // clamp value to the allowed range
158+ long maxBufferComponents = Math .max (2 , Math .min (maxBufferComponentsEstimate , Integer .MAX_VALUE ));
159+ return String .valueOf (maxBufferComponents );
160+ // Netty's CompositeByteBuf implementation does not allow less than two components.
161+ }, s -> Setting .parseInt (s , 2 , Integer .MAX_VALUE , SETTING_KEY_HTTP_NETTY_MAX_COMPOSITE_BUFFER_COMPONENTS ), Property .NodeScope );
127162
128163 public static final Setting <Integer > SETTING_HTTP_WORKER_COUNT = new Setting <>("http.netty.worker_count" ,
129164 (s ) -> Integer .toString (EsExecutors .numberOfProcessors (s ) * 2 ),
@@ -236,8 +271,9 @@ public Netty4HttpServerTransport(Settings settings, NetworkService networkServic
236271 this .maxContentLength = maxContentLength ;
237272
238273 logger .debug ("using max_chunk_size[{}], max_header_size[{}], max_initial_line_length[{}], max_content_length[{}], " +
239- "receive_predictor[{}], pipelining[{}], pipelining_max_events[{}]" ,
240- maxChunkSize , maxHeaderSize , maxInitialLineLength , this .maxContentLength , receivePredictor , pipelining , pipeliningMaxEvents );
274+ "receive_predictor[{}], max_composite_buffer_components[{}], pipelining[{}], pipelining_max_events[{}]" ,
275+ maxChunkSize , maxHeaderSize , maxInitialLineLength , this .maxContentLength , receivePredictor , maxCompositeBufferComponents ,
276+ pipelining , pipeliningMaxEvents );
241277 }
242278
243279 public Settings settings () {
@@ -532,9 +568,7 @@ protected void initChannel(Channel ch) throws Exception {
532568 ch .pipeline ().addLast ("decoder_compress" , new HttpContentDecompressor ());
533569 ch .pipeline ().addLast ("encoder" , new HttpResponseEncoder ());
534570 final HttpObjectAggregator aggregator = new HttpObjectAggregator (Math .toIntExact (transport .maxContentLength .getBytes ()));
535- if (transport .maxCompositeBufferComponents != -1 ) {
536- aggregator .setMaxCumulationBufferComponents (transport .maxCompositeBufferComponents );
537- }
571+ aggregator .setMaxCumulationBufferComponents (transport .maxCompositeBufferComponents );
538572 ch .pipeline ().addLast ("aggregator" , aggregator );
539573 if (transport .compression ) {
540574 ch .pipeline ().addLast ("encoder_compress" , new HttpContentCompressor (transport .compressionLevel ));
0 commit comments