1111import org .apache .lucene .util .BytesRef ;
1212import org .elasticsearch .common .Table ;
1313import org .elasticsearch .common .recycler .Recycler ;
14+ import org .elasticsearch .common .unit .ByteSizeUnit ;
1415import org .elasticsearch .common .xcontent .XContentHelper ;
1516import org .elasticsearch .rest .AbstractRestChannel ;
1617import org .elasticsearch .rest .RestResponse ;
2930
3031import static org .elasticsearch .rest .action .cat .RestTable .buildDisplayHeaders ;
3132import static org .elasticsearch .rest .action .cat .RestTable .buildResponse ;
32- import static org .hamcrest .Matchers .allOf ;
3333import static org .hamcrest .Matchers .contains ;
3434import static org .hamcrest .Matchers .equalTo ;
3535import static org .hamcrest .Matchers .equalToIgnoringCase ;
@@ -259,22 +259,28 @@ public void testMultiSort() {
259259 assertEquals (Arrays .asList (1 , 0 , 2 ), rowOrder );
260260 }
261261
262- @ AwaitsFix (bugUrl = "https://github.com/elastic/elasticsearch/issues/92181" )
263262 public void testPlainTextChunking () throws Exception {
263+ final var cells = randomArray (8 , 8 , String []::new , () -> randomAlphaOfLengthBetween (1 , 5 ));
264+ final var expectedRow = String .join (" " , cells ) + "\n " ;
265+
266+ // OutputStreamWriter has an 8kiB buffer so all chunks are at least that big
267+ final var bufferSize = ByteSizeUnit .KB .toIntBytes (8 );
268+ final var rowLength = expectedRow .length ();
269+ final var expectedRowsPerChunk = 1 + bufferSize / rowLength ; // end chunk after first row which overflows the buffer
270+ final var expectedChunkSize = expectedRowsPerChunk * rowLength ;
271+
272+ final var rowCount = between (expectedRowsPerChunk + 1 , expectedRowsPerChunk * 10 );
273+ final var expectedChunkCount = 1 + (rowCount * rowLength - 1 ) / expectedChunkSize ; // ceil(rowCount * rowLength / expectedChunkSize)
274+ assertThat (expectedChunkCount , greaterThan (1 ));
275+
264276 final var expectedBody = new StringBuilder ();
265- final var rowCount = between (10000 , 20000 );
266277 for (int i = 0 ; i < rowCount ; i ++) {
267278 table .startRow ();
268- table .addCell ("foo" );
269- table .addCell ("foo" );
270- table .addCell ("foo" );
271- table .addCell ("foo" );
272- table .addCell ("foo" );
273- table .addCell ("foo" );
274- table .addCell ("foo" );
275- table .addCell ("foo" );
279+ for (final var cell : cells ) {
280+ table .addCell (cell );
281+ }
276282 table .endRow ();
277- expectedBody .append (TEXT_TABLE_BODY );
283+ expectedBody .append (expectedRow );
278284 }
279285
280286 final var request = new FakeRestRequest .Builder (xContentRegistry ()).withHeaders (
@@ -286,13 +292,10 @@ public void testPlainTextChunking() throws Exception {
286292 public void sendResponse (RestResponse response ) {}
287293 });
288294
289- // OutputStreamWriter has an 8kiB buffer so all chunks are at least that big anyway
290- final var bodyChunks = getBodyChunks (response , 8192 );
291- final var rowLength = TEXT_TABLE_BODY .length ();
292- final var expectedChunkSize = ((8193 + rowLength ) / rowLength ) * rowLength ; // ceil(8193/rowLength) * rowLength
293- assertThat (bodyChunks .size (), allOf (greaterThan (1 ), equalTo ((rowCount * rowLength + expectedChunkSize ) / expectedChunkSize )));
294- assertThat (bodyChunks .get (0 ).length (), equalTo (expectedChunkSize ));
295- assertEquals (expectedBody .toString (), String .join ("" , bodyChunks ));
295+ final var bodyChunks = getBodyChunks (response , bufferSize );
296+ assertEquals ("chunk count" , expectedChunkCount , bodyChunks .size ());
297+ assertEquals ("first chunk size" , expectedChunkSize , bodyChunks .get (0 ).length ());
298+ assertEquals ("body contents" , expectedBody .toString (), String .join ("" , bodyChunks ));
296299 }
297300
298301 public void testEmptyTable () throws Exception {
0 commit comments