From 33a85bb14c0a918e1ddccfc204b24817eb53c961 Mon Sep 17 00:00:00 2001 From: Tim Brooks Date: Mon, 21 Jun 2021 16:26:05 -0600 Subject: [PATCH 01/29] Initial --- .../action/bulk/BulkShardRequest.java | 3 +- .../RecoveryTranslogOperationsRequest.java | 3 +- .../transport/ConnectionProfile.java | 31 +++++++++++++++++-- .../transport/RawDataTransportRequest.java | 12 +++++++ .../transport/RemoteClusterService.java | 6 ++++ .../transport/RemoteConnectionStrategy.java | 3 +- .../elasticsearch/transport/TcpTransport.java | 5 ++- .../transport/TransportSettings.java | 2 ++ .../xpack/ccr/action/ShardChangesAction.java | 3 +- 9 files changed, 60 insertions(+), 8 deletions(-) create mode 100644 server/src/main/java/org/elasticsearch/transport/RawDataTransportRequest.java diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkShardRequest.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkShardRequest.java index 9ecac5d377da6..3984c03b1cdeb 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkShardRequest.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkShardRequest.java @@ -18,13 +18,14 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.transport.RawDataTransportRequest; import java.io.IOException; import java.util.HashSet; import java.util.Set; import java.util.stream.Stream; -public class BulkShardRequest extends ReplicatedWriteRequest implements Accountable { +public class BulkShardRequest extends ReplicatedWriteRequest implements Accountable, RawDataTransportRequest { private static final long SHALLOW_SIZE = RamUsageEstimator.shallowSizeOfInstance(BulkShardRequest.class); diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTranslogOperationsRequest.java b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTranslogOperationsRequest.java index 8cbd3e221fac7..6a6c2a29a7356 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTranslogOperationsRequest.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTranslogOperationsRequest.java @@ -13,11 +13,12 @@ import org.elasticsearch.index.seqno.RetentionLeases; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.translog.Translog; +import org.elasticsearch.transport.RawDataTransportRequest; import java.io.IOException; import java.util.List; -public class RecoveryTranslogOperationsRequest extends RecoveryTransportRequest { +public class RecoveryTranslogOperationsRequest extends RecoveryTransportRequest implements RawDataTransportRequest { private final long recoveryId; private final ShardId shardId; diff --git a/server/src/main/java/org/elasticsearch/transport/ConnectionProfile.java b/server/src/main/java/org/elasticsearch/transport/ConnectionProfile.java index 2a691536b846c..9ffa8adee2a12 100644 --- a/server/src/main/java/org/elasticsearch/transport/ConnectionProfile.java +++ b/server/src/main/java/org/elasticsearch/transport/ConnectionProfile.java @@ -35,7 +35,8 @@ public static ConnectionProfile resolveConnectionProfile(@Nullable ConnectionPro if (profile == null) { return fallbackProfile; } else if (profile.getConnectTimeout() != null && profile.getHandshakeTimeout() != null - && profile.getPingInterval() != null && profile.getCompressionEnabled() != null) { + && profile.getPingInterval() != null && profile.getCompressionEnabled() != null + && profile.getRawDataCompressionEnabled() != null) { return profile; } else { ConnectionProfile.Builder builder = new ConnectionProfile.Builder(profile); @@ -51,6 +52,9 @@ public static ConnectionProfile resolveConnectionProfile(@Nullable ConnectionPro if (profile.getCompressionEnabled() == null) { builder.setCompressionEnabled(fallbackProfile.getCompressionEnabled()); } + if (profile.getRawDataCompressionEnabled() == null) { + builder.setRawDataCompressionEnabled(fallbackProfile.getRawDataCompressionEnabled()); + } return builder.build(); } } @@ -116,15 +120,18 @@ public static ConnectionProfile buildSingleChannelProfile(TransportRequestOption private final TimeValue handshakeTimeout; private final TimeValue pingInterval; private final Boolean compressionEnabled; + private final Boolean rawDataCompressionEnabled; private ConnectionProfile(List handles, int numConnections, TimeValue connectTimeout, - TimeValue handshakeTimeout, TimeValue pingInterval, Boolean compressionEnabled) { + TimeValue handshakeTimeout, TimeValue pingInterval, Boolean compressionEnabled, + Boolean rawDataCompressionEnabled) { this.handles = handles; this.numConnections = numConnections; this.connectTimeout = connectTimeout; this.handshakeTimeout = handshakeTimeout; this.pingInterval = pingInterval; this.compressionEnabled = compressionEnabled; + this.rawDataCompressionEnabled = rawDataCompressionEnabled; } /** @@ -137,6 +144,7 @@ public static class Builder { private TimeValue connectTimeout; private TimeValue handshakeTimeout; private Boolean compressionEnabled; + private Boolean rawDataCompressionEnabled; private TimeValue pingInterval; /** create an empty builder */ @@ -151,6 +159,7 @@ public Builder(ConnectionProfile source) { connectTimeout = source.getConnectTimeout(); handshakeTimeout = source.getHandshakeTimeout(); compressionEnabled = source.getCompressionEnabled(); + rawDataCompressionEnabled = source.getRawDataCompressionEnabled(); pingInterval = source.getPingInterval(); } /** @@ -191,6 +200,14 @@ public Builder setCompressionEnabled(boolean compressionEnabled) { return this; } + /** + * Sets raw data compression enabled for this connection profile + */ + public Builder setRawDataCompressionEnabled(boolean rawDataCompressionEnabled) { + this.rawDataCompressionEnabled = rawDataCompressionEnabled; + return this; + } + /** * Adds a number of connections for one or more types. Each type can only be added once. * @param numConnections the number of connections to use in the pool for the given connection types @@ -222,7 +239,7 @@ public ConnectionProfile build() { throw new IllegalStateException("not all types are added for this connection profile - missing types: " + types); } return new ConnectionProfile(Collections.unmodifiableList(handles), numConnections, connectTimeout, handshakeTimeout, - pingInterval, compressionEnabled); + pingInterval, compressionEnabled, rawDataCompressionEnabled); } } @@ -256,6 +273,14 @@ public Boolean getCompressionEnabled() { return compressionEnabled; } + /** + * Returns boolean indicating if raw data compression is enabled or null if no explicit raw data compression + * is set on this profile. + */ + public Boolean getRawDataCompressionEnabled() { + return rawDataCompressionEnabled; + } + /** * Returns the total number of connections for this profile */ diff --git a/server/src/main/java/org/elasticsearch/transport/RawDataTransportRequest.java b/server/src/main/java/org/elasticsearch/transport/RawDataTransportRequest.java new file mode 100644 index 0000000000000..bf87290fdd3e6 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/transport/RawDataTransportRequest.java @@ -0,0 +1,12 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.transport; + +public interface RawDataTransportRequest { +} diff --git a/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java b/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java index 4f9aefc7808c0..3866f8ee4c440 100644 --- a/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java +++ b/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java @@ -95,6 +95,12 @@ public final class RemoteClusterService extends RemoteClusterAware implements Cl (ns, key) -> boolSetting(key, TransportSettings.TRANSPORT_COMPRESS, new RemoteConnectionEnabled<>(ns, key), Setting.Property.Dynamic, Setting.Property.NodeScope)); + public static final Setting.AffixSetting REMOTE_CLUSTER_COMPRESS_RAW_DATA = Setting.affixKeySetting( + "cluster.remote.", + "transport.compress_raw_data", + (ns, key) -> boolSetting(key, TransportSettings.TRANSPORT_COMPRESS_RAW_DATA, + new RemoteConnectionEnabled<>(ns, key), Setting.Property.Dynamic, Setting.Property.NodeScope)); + private final boolean enabled; public boolean isEnabled() { diff --git a/server/src/main/java/org/elasticsearch/transport/RemoteConnectionStrategy.java b/server/src/main/java/org/elasticsearch/transport/RemoteConnectionStrategy.java index 981e7dc144a63..53d25ffb7896f 100644 --- a/server/src/main/java/org/elasticsearch/transport/RemoteConnectionStrategy.java +++ b/server/src/main/java/org/elasticsearch/transport/RemoteConnectionStrategy.java @@ -354,7 +354,8 @@ private List> getAndClearListeners() { private boolean connectionProfileChanged(ConnectionProfile oldProfile, ConnectionProfile newProfile) { return Objects.equals(oldProfile.getCompressionEnabled(), newProfile.getCompressionEnabled()) == false - || Objects.equals(oldProfile.getPingInterval(), newProfile.getPingInterval()) == false; + || Objects.equals(oldProfile.getPingInterval(), newProfile.getPingInterval()) == false + || Objects.equals(oldProfile.getRawDataCompressionEnabled(), newProfile.getRawDataCompressionEnabled()) == false; } static class StrategyValidator implements Setting.Validator { diff --git a/server/src/main/java/org/elasticsearch/transport/TcpTransport.java b/server/src/main/java/org/elasticsearch/transport/TcpTransport.java index 636b01d43a65e..cc2bc961dd4ae 100644 --- a/server/src/main/java/org/elasticsearch/transport/TcpTransport.java +++ b/server/src/main/java/org/elasticsearch/transport/TcpTransport.java @@ -184,6 +184,7 @@ public final class NodeChannels extends CloseableConnection { private final DiscoveryNode node; private final Version version; private final boolean compress; + private final boolean rawDataCompress; private final AtomicBoolean isClosing = new AtomicBoolean(false); NodeChannels(DiscoveryNode node, List channels, ConnectionProfile connectionProfile, Version handshakeVersion) { @@ -198,6 +199,7 @@ public final class NodeChannels extends CloseableConnection { } version = handshakeVersion; compress = connectionProfile.getCompressionEnabled(); + rawDataCompress = connectionProfile.getRawDataCompressionEnabled(); } @Override @@ -242,7 +244,8 @@ public void sendRequest(long requestId, String action, TransportRequest request, throw new NodeNotConnectedException(node, "connection already closed"); } TcpChannel channel = channel(options.type()); - outboundHandler.sendRequest(node, channel, requestId, action, request, options, getVersion(), compress, false); + boolean shouldCompress = compress || (rawDataCompress && request instanceof RawDataTransportRequest); + outboundHandler.sendRequest(node, channel, requestId, action, request, options, getVersion(), shouldCompress, false); } @Override diff --git a/server/src/main/java/org/elasticsearch/transport/TransportSettings.java b/server/src/main/java/org/elasticsearch/transport/TransportSettings.java index f0938b47a65a2..37e24719c1c4c 100644 --- a/server/src/main/java/org/elasticsearch/transport/TransportSettings.java +++ b/server/src/main/java/org/elasticsearch/transport/TransportSettings.java @@ -51,6 +51,8 @@ public final class TransportSettings { key -> intSetting(key, -1, -1, Setting.Property.NodeScope)); public static final Setting TRANSPORT_COMPRESS = boolSetting("transport.compress", false, Setting.Property.NodeScope); + public static final Setting TRANSPORT_COMPRESS_RAW_DATA = + boolSetting("transport.compress_raw_data", false, Setting.Property.NodeScope); // the scheduled internal ping interval setting, defaults to disabled (-1) public static final Setting PING_SCHEDULE = timeSetting("transport.ping_schedule", TimeValue.timeValueSeconds(-1), Setting.Property.NodeScope); diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardChangesAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardChangesAction.java index 41a5b37004fba..0eace656f304b 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardChangesAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardChangesAction.java @@ -41,6 +41,7 @@ import org.elasticsearch.index.translog.Translog; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.RawDataTransportRequest; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.ccr.Ccr; @@ -66,7 +67,7 @@ private ShardChangesAction() { super(NAME, ShardChangesAction.Response::new); } - public static class Request extends SingleShardRequest { + public static class Request extends SingleShardRequest implements RawDataTransportRequest { private long fromSeqNo; private int maxOperationCount; From a31922bfca5f10536dc070f5fa0b57d5d88deb90 Mon Sep 17 00:00:00 2001 From: Tim Brooks Date: Mon, 21 Jun 2021 16:41:43 -0600 Subject: [PATCH 02/29] Changes --- .../HandshakingTransportAddressConnector.java | 2 +- .../transport/ConnectionProfile.java | 6 +++++- .../transport/RemoteConnectionStrategy.java | 2 ++ .../ClusterConnectionManagerTests.java | 2 +- .../transport/ConnectionProfileTests.java | 18 ++++++++++++++++++ .../transport/RemoteClusterServiceTests.java | 16 +++++++++++++--- .../RemoteConnectionStrategyTests.java | 14 ++++++++++++-- .../elasticsearch/transport/TestProfiles.java | 1 + .../transport/nio/MockNioTransport.java | 1 + 9 files changed, 54 insertions(+), 8 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/discovery/HandshakingTransportAddressConnector.java b/server/src/main/java/org/elasticsearch/discovery/HandshakingTransportAddressConnector.java index 2270656124d88..c7a4b51c5a9ec 100644 --- a/server/src/main/java/org/elasticsearch/discovery/HandshakingTransportAddressConnector.java +++ b/server/src/main/java/org/elasticsearch/discovery/HandshakingTransportAddressConnector.java @@ -74,7 +74,7 @@ protected void doRun() { logger.trace("[{}] opening probe connection", thisConnectionAttempt); transportService.openConnection(targetNode, ConnectionProfile.buildSingleChannelProfile(Type.REG, probeConnectTimeout, probeHandshakeTimeout, - TimeValue.MINUS_ONE, null), listener.delegateFailure((l, connection) -> { + TimeValue.MINUS_ONE, null, null), listener.delegateFailure((l, connection) -> { logger.trace("[{}] opened probe connection", thisConnectionAttempt); // use NotifyOnceListener to make sure the following line does not result in onFailure being called when diff --git a/server/src/main/java/org/elasticsearch/transport/ConnectionProfile.java b/server/src/main/java/org/elasticsearch/transport/ConnectionProfile.java index 9ffa8adee2a12..9b00ac784a91f 100644 --- a/server/src/main/java/org/elasticsearch/transport/ConnectionProfile.java +++ b/server/src/main/java/org/elasticsearch/transport/ConnectionProfile.java @@ -76,6 +76,7 @@ public static ConnectionProfile buildDefaultConnectionProfile(Settings settings) builder.setHandshakeTimeout(TransportSettings.CONNECT_TIMEOUT.get(settings)); builder.setPingInterval(TransportSettings.PING_SCHEDULE.get(settings)); builder.setCompressionEnabled(TransportSettings.TRANSPORT_COMPRESS.get(settings)); + builder.setRawDataCompressionEnabled(TransportSettings.TRANSPORT_COMPRESS_RAW_DATA.get(settings)); builder.addConnections(connectionsPerNodeBulk, TransportRequestOptions.Type.BULK); builder.addConnections(connectionsPerNodePing, TransportRequestOptions.Type.PING); // if we are not master eligible we don't need a dedicated channel to publish the state @@ -93,7 +94,7 @@ public static ConnectionProfile buildDefaultConnectionProfile(Settings settings) */ public static ConnectionProfile buildSingleChannelProfile(TransportRequestOptions.Type channelType, @Nullable TimeValue connectTimeout, @Nullable TimeValue handshakeTimeout, @Nullable TimeValue pingInterval, - @Nullable Boolean compressionEnabled) { + @Nullable Boolean compressionEnabled, @Nullable Boolean rawDataCompressionEnabled) { Builder builder = new Builder(); builder.addConnections(1, channelType); final EnumSet otherTypes = EnumSet.allOf(TransportRequestOptions.Type.class); @@ -111,6 +112,9 @@ public static ConnectionProfile buildSingleChannelProfile(TransportRequestOption if (compressionEnabled != null) { builder.setCompressionEnabled(compressionEnabled); } + if (rawDataCompressionEnabled != null) { + builder.setRawDataCompressionEnabled(rawDataCompressionEnabled); + } return builder.build(); } diff --git a/server/src/main/java/org/elasticsearch/transport/RemoteConnectionStrategy.java b/server/src/main/java/org/elasticsearch/transport/RemoteConnectionStrategy.java index 53d25ffb7896f..8f7a1530a393b 100644 --- a/server/src/main/java/org/elasticsearch/transport/RemoteConnectionStrategy.java +++ b/server/src/main/java/org/elasticsearch/transport/RemoteConnectionStrategy.java @@ -125,6 +125,8 @@ static ConnectionProfile buildConnectionProfile(String clusterAlias, Settings se .setConnectTimeout(TransportSettings.CONNECT_TIMEOUT.get(settings)) .setHandshakeTimeout(TransportSettings.CONNECT_TIMEOUT.get(settings)) .setCompressionEnabled(RemoteClusterService.REMOTE_CLUSTER_COMPRESS.getConcreteSettingForNamespace(clusterAlias).get(settings)) + .setRawDataCompressionEnabled(RemoteClusterService.REMOTE_CLUSTER_COMPRESS_RAW_DATA + .getConcreteSettingForNamespace(clusterAlias).get(settings)) .setPingInterval(RemoteClusterService.REMOTE_CLUSTER_PING_SCHEDULE.getConcreteSettingForNamespace(clusterAlias).get(settings)) .addConnections(0, TransportRequestOptions.Type.BULK, TransportRequestOptions.Type.STATE, TransportRequestOptions.Type.RECOVERY, TransportRequestOptions.Type.PING) diff --git a/server/src/test/java/org/elasticsearch/transport/ClusterConnectionManagerTests.java b/server/src/test/java/org/elasticsearch/transport/ClusterConnectionManagerTests.java index 4976183220870..9f1543e21d10f 100644 --- a/server/src/test/java/org/elasticsearch/transport/ClusterConnectionManagerTests.java +++ b/server/src/test/java/org/elasticsearch/transport/ClusterConnectionManagerTests.java @@ -56,7 +56,7 @@ public void createConnectionManager() { TimeValue oneSecond = new TimeValue(1000); TimeValue oneMinute = TimeValue.timeValueMinutes(1); connectionProfile = ConnectionProfile.buildSingleChannelProfile(TransportRequestOptions.Type.REG, oneSecond, oneSecond, - oneMinute, false); + oneMinute, false, false); } @After diff --git a/server/src/test/java/org/elasticsearch/transport/ConnectionProfileTests.java b/server/src/test/java/org/elasticsearch/transport/ConnectionProfileTests.java index dfb5232d9a7d6..df70b54262899 100644 --- a/server/src/test/java/org/elasticsearch/transport/ConnectionProfileTests.java +++ b/server/src/test/java/org/elasticsearch/transport/ConnectionProfileTests.java @@ -32,6 +32,7 @@ public void testBuildConnectionProfile() { TimeValue handshakeTimeout = TimeValue.timeValueMillis(randomIntBetween(1, 10)); TimeValue pingInterval = TimeValue.timeValueMillis(randomIntBetween(1, 10)); boolean compressionEnabled = randomBoolean(); + boolean rawDataCompressionEnabled = randomBoolean(); final boolean setConnectTimeout = randomBoolean(); if (setConnectTimeout) { builder.setConnectTimeout(connectTimeout); @@ -44,6 +45,10 @@ public void testBuildConnectionProfile() { if (setCompress) { builder.setCompressionEnabled(compressionEnabled); } + final boolean setRawDataCompress = randomBoolean(); + if (setRawDataCompress) { + builder.setRawDataCompressionEnabled(rawDataCompressionEnabled); + } final boolean setPingInterval = randomBoolean(); if (setPingInterval) { builder.setPingInterval(pingInterval); @@ -81,6 +86,12 @@ public void testBuildConnectionProfile() { assertNull(build.getCompressionEnabled()); } + if (setRawDataCompress) { + assertEquals(rawDataCompressionEnabled, build.getRawDataCompressionEnabled()); + } else { + assertNull(build.getRawDataCompressionEnabled()); + } + if (setPingInterval) { assertEquals(pingInterval, build.getPingInterval()); } else { @@ -173,6 +184,10 @@ public void testConnectionProfileResolve() { if (connectionCompressSet) { builder.setCompressionEnabled(randomBoolean()); } + final boolean connectionRawDataCompressSet = randomBoolean(); + if (connectionRawDataCompressSet) { + builder.setRawDataCompressionEnabled(randomBoolean()); + } final ConnectionProfile profile = builder.build(); final ConnectionProfile resolved = ConnectionProfile.resolveConnectionProfile(profile, defaultProfile); @@ -188,6 +203,8 @@ public void testConnectionProfileResolve() { equalTo(pingIntervalSet ? profile.getPingInterval() : defaultProfile.getPingInterval())); assertThat(resolved.getCompressionEnabled(), equalTo(connectionCompressSet ? profile.getCompressionEnabled() : defaultProfile.getCompressionEnabled())); + assertThat(resolved.getRawDataCompressionEnabled(), + equalTo(connectionRawDataCompressSet ? profile.getRawDataCompressionEnabled() : defaultProfile.getRawDataCompressionEnabled())); } public void testDefaultConnectionProfile() { @@ -201,6 +218,7 @@ public void testDefaultConnectionProfile() { assertEquals(TransportSettings.CONNECT_TIMEOUT.get(Settings.EMPTY), profile.getConnectTimeout()); assertEquals(TransportSettings.CONNECT_TIMEOUT.get(Settings.EMPTY), profile.getHandshakeTimeout()); assertEquals(TransportSettings.TRANSPORT_COMPRESS.get(Settings.EMPTY), profile.getCompressionEnabled()); + assertEquals(TransportSettings.TRANSPORT_COMPRESS_RAW_DATA.get(Settings.EMPTY), profile.getRawDataCompressionEnabled()); assertEquals(TransportSettings.PING_SCHEDULE.get(Settings.EMPTY), profile.getPingInterval()); profile = ConnectionProfile.buildDefaultConnectionProfile(nonMasterNode()); diff --git a/server/src/test/java/org/elasticsearch/transport/RemoteClusterServiceTests.java b/server/src/test/java/org/elasticsearch/transport/RemoteClusterServiceTests.java index 3cf8671701da0..eb9dbcdd9ae42 100644 --- a/server/src/test/java/org/elasticsearch/transport/RemoteClusterServiceTests.java +++ b/server/src/test/java/org/elasticsearch/transport/RemoteClusterServiceTests.java @@ -369,8 +369,12 @@ public void testChangeSettings() throws Exception { Settings.Builder settingsChange = Settings.builder(); TimeValue pingSchedule = TimeValue.timeValueSeconds(randomIntBetween(6, 8)); settingsChange.put("cluster.remote.cluster_1.transport.ping_schedule", pingSchedule); - boolean compressionEnabled = true; - settingsChange.put("cluster.remote.cluster_1.transport.compress", compressionEnabled); + boolean rawDataOption = randomBoolean(); + if (rawDataOption) { + settingsChange.put("cluster.remote.cluster_1.transport.compress_raw_data", true); + } else { + settingsChange.put("cluster.remote.cluster_1.transport.compress", true); + } settingsChange.putList("cluster.remote.cluster_1.seeds", cluster1Seed.getAddress().toString()); service.validateAndUpdateRemoteCluster("cluster_1", settingsChange.build()); assertBusy(remoteClusterConnection::isClosed); @@ -378,7 +382,13 @@ public void testChangeSettings() throws Exception { remoteClusterConnection = service.getRemoteClusterConnection("cluster_1"); ConnectionProfile connectionProfile = remoteClusterConnection.getConnectionManager().getConnectionProfile(); assertEquals(pingSchedule, connectionProfile.getPingInterval()); - assertEquals(compressionEnabled, connectionProfile.getCompressionEnabled()); + if (rawDataOption) { + assertEquals(false, connectionProfile.getCompressionEnabled()); + assertEquals(true, connectionProfile.getRawDataCompressionEnabled()); + } else { + assertEquals(true, connectionProfile.getCompressionEnabled()); + assertEquals(false, connectionProfile.getRawDataCompressionEnabled()); + } } } } diff --git a/server/src/test/java/org/elasticsearch/transport/RemoteConnectionStrategyTests.java b/server/src/test/java/org/elasticsearch/transport/RemoteConnectionStrategyTests.java index 7b48c29eb97df..0f6092c68fb54 100644 --- a/server/src/test/java/org/elasticsearch/transport/RemoteConnectionStrategyTests.java +++ b/server/src/test/java/org/elasticsearch/transport/RemoteConnectionStrategyTests.java @@ -46,6 +46,7 @@ public void testChangeInConnectionProfileMeansTheStrategyMustBeRebuilt() { ClusterConnectionManager connectionManager = new ClusterConnectionManager(TestProfiles.LIGHT_PROFILE, mock(Transport.class)); assertEquals(TimeValue.MINUS_ONE, connectionManager.getConnectionProfile().getPingInterval()); assertEquals(false, connectionManager.getConnectionProfile().getCompressionEnabled()); + assertEquals(false, connectionManager.getConnectionProfile().getRawDataCompressionEnabled()); RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager("cluster-alias", connectionManager); FakeConnectionStrategy first = new FakeConnectionStrategy("cluster-alias", mock(TransportService.class), remoteConnectionManager, RemoteConnectionStrategy.ConnectionStrategy.PROXY); @@ -53,11 +54,20 @@ public void testChangeInConnectionProfileMeansTheStrategyMustBeRebuilt() { Settings.Builder newBuilder = Settings.builder(); newBuilder.put(RemoteConnectionStrategy.REMOTE_CONNECTION_MODE.getConcreteSettingForNamespace("cluster-alias").getKey(), "proxy"); newBuilder.put(ProxyConnectionStrategy.PROXY_ADDRESS.getConcreteSettingForNamespace("cluster-alias").getKey(), "127.0.0.1:9300"); - if (randomBoolean()) { + String ping = "ping"; + String compress = "compress"; + String rawDataCompress = "raw_data_compress"; + String change = randomFrom(ping, compress, rawDataCompress); + if (change.equals(ping)) { newBuilder.put(RemoteClusterService.REMOTE_CLUSTER_PING_SCHEDULE.getConcreteSettingForNamespace("cluster-alias").getKey(), TimeValue.timeValueSeconds(5)); - } else { + } else if (change.equals(compress)) { newBuilder.put(RemoteClusterService.REMOTE_CLUSTER_COMPRESS.getConcreteSettingForNamespace("cluster-alias").getKey(), true); + } else if (change.equals(rawDataCompress)) { + newBuilder.put(RemoteClusterService.REMOTE_CLUSTER_COMPRESS_RAW_DATA.getConcreteSettingForNamespace("cluster-alias").getKey(), + true); + } else { + throw new AssertionError("Unexpected option: " + change); } assertTrue(first.shouldRebuildConnection(newBuilder.build())); } diff --git a/test/framework/src/main/java/org/elasticsearch/transport/TestProfiles.java b/test/framework/src/main/java/org/elasticsearch/transport/TestProfiles.java index 432578b3cef28..4a28c39d92e18 100644 --- a/test/framework/src/main/java/org/elasticsearch/transport/TestProfiles.java +++ b/test/framework/src/main/java/org/elasticsearch/transport/TestProfiles.java @@ -26,6 +26,7 @@ private TestProfiles() {} builder.setConnectTimeout(source.getConnectTimeout()); builder.setHandshakeTimeout(source.getHandshakeTimeout()); builder.setCompressionEnabled(source.getCompressionEnabled()); + builder.setRawDataCompressionEnabled(source.getRawDataCompressionEnabled()); builder.setPingInterval(source.getPingInterval()); builder.addConnections(1, TransportRequestOptions.Type.BULK, diff --git a/test/framework/src/main/java/org/elasticsearch/transport/nio/MockNioTransport.java b/test/framework/src/main/java/org/elasticsearch/transport/nio/MockNioTransport.java index 0c603bcc512f1..46bf48ad0b3bc 100644 --- a/test/framework/src/main/java/org/elasticsearch/transport/nio/MockNioTransport.java +++ b/test/framework/src/main/java/org/elasticsearch/transport/nio/MockNioTransport.java @@ -171,6 +171,7 @@ protected ConnectionProfile maybeOverrideConnectionProfile(ConnectionProfile con builder.setConnectTimeout(connectionProfile.getConnectTimeout()); builder.setPingInterval(connectionProfile.getPingInterval()); builder.setCompressionEnabled(connectionProfile.getCompressionEnabled()); + builder.setRawDataCompressionEnabled(connectionProfile.getRawDataCompressionEnabled()); return builder.build(); } From 8bc7cbf8a2a05efb5bb1f8e76d39dd9cb1f22ce2 Mon Sep 17 00:00:00 2001 From: Tim Brooks Date: Thu, 24 Jun 2021 16:32:52 -0600 Subject: [PATCH 03/29] Changes --- server/build.gradle | 3 + .../common/compress/DeflateCompressor.java | 2 +- .../elasticsearch/common/compress/ESLZ4.java | 462 ++++++++++++++++++ .../DeflateTransportDecompressor.java | 125 +++++ .../transport/InboundDecoder.java | 17 +- .../transport/Lz4TransportDecompressor.java | 136 ++++++ .../transport/OutboundMessage.java | 9 +- .../transport/TransportDecompressor.java | 144 ++---- ...=> DeflateTransportDecompressorTests.java} | 27 +- 9 files changed, 799 insertions(+), 126 deletions(-) create mode 100644 server/src/main/java/org/elasticsearch/common/compress/ESLZ4.java create mode 100644 server/src/main/java/org/elasticsearch/transport/DeflateTransportDecompressor.java create mode 100644 server/src/main/java/org/elasticsearch/transport/Lz4TransportDecompressor.java rename server/src/test/java/org/elasticsearch/transport/{TransportDecompressorTests.java => DeflateTransportDecompressorTests.java} (86%) diff --git a/server/build.gradle b/server/build.gradle index 7688bcb61a61d..eddd17202f263 100644 --- a/server/build.gradle +++ b/server/build.gradle @@ -50,6 +50,9 @@ dependencies { api project(":libs:elasticsearch-cli") api 'com.carrotsearch:hppc:0.8.1' + // LZ4 + api 'org.lz4:lz4-java:1.8.0' + // time handling, remove with java 8 time api "joda-time:joda-time:${versions.joda}" diff --git a/server/src/main/java/org/elasticsearch/common/compress/DeflateCompressor.java b/server/src/main/java/org/elasticsearch/common/compress/DeflateCompressor.java index da94385118155..c44ce128d7dd3 100644 --- a/server/src/main/java/org/elasticsearch/common/compress/DeflateCompressor.java +++ b/server/src/main/java/org/elasticsearch/common/compress/DeflateCompressor.java @@ -34,7 +34,7 @@ public class DeflateCompressor implements Compressor { // It needs to be different from other compressors and to not be specific // enough so that no stream starting with these bytes could be detected as // a XContent - private static final byte[] HEADER = new byte[]{'D', 'F', 'L', '\0'}; + public static final byte[] HEADER = new byte[]{'D', 'F', 'L', '\0'}; // 3 is a good trade-off between speed and compression ratio private static final int LEVEL = 3; // We use buffering on the input and output of in/def-laters in order to diff --git a/server/src/main/java/org/elasticsearch/common/compress/ESLZ4.java b/server/src/main/java/org/elasticsearch/common/compress/ESLZ4.java new file mode 100644 index 0000000000000..aac8dc76cd91f --- /dev/null +++ b/server/src/main/java/org/elasticsearch/common/compress/ESLZ4.java @@ -0,0 +1,462 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.common.compress; + +import org.apache.lucene.store.DataInput; +import org.apache.lucene.util.FutureArrays; +import org.apache.lucene.util.FutureObjects; +import org.apache.lucene.util.compress.LZ4; +import org.apache.lucene.util.packed.PackedInts; +import org.elasticsearch.common.io.stream.BytesStreamOutput; + +import java.io.IOException; +import java.util.Arrays; + +public class ESLZ4 { + + static final int MEMORY_USAGE = 14; + static final int MIN_MATCH = 4; // minimum length of a match + static final int MAX_DISTANCE = 1 << 16; // maximum distance of a reference + static final int LAST_LITERALS = 5; // the last 5 bytes must be encoded as literals + static final int HASH_LOG_HC = 15; // log size of the dictionary for compressHC + static final int HASH_TABLE_SIZE_HC = 1 << HASH_LOG_HC; + + + private static int hash(int i, int hashBits) { + return (i * -1640531535) >>> (32 - hashBits); + } + + private static int hashHC(int i) { + return hash(i, HASH_LOG_HC); + } + + private static int readInt(byte[] buf, int i) { + return ((buf[i] & 0xFF) << 24) | ((buf[i+1] & 0xFF) << 16) | ((buf[i+2] & 0xFF) << 8) | (buf[i+3] & 0xFF); + } + + private static int commonBytes(byte[] b, int o1, int o2, int limit) { + assert o1 < o2; + // never -1 because lengths always differ + return FutureArrays.mismatch(b, o1, limit, b, o2, limit); + } + + /** + * Decompress at least {@code decompressedLen} bytes into + * {@code dest[dOff:]}. Please note that {@code dest} must be large + * enough to be able to hold all decompressed data (meaning that you + * need to know the total decompressed length). + * If the given bytes were compressed using a preset dictionary then the same + * dictionary must be provided in {@code dest[dOff-dictLen:dOff]}. + */ + public static int decompress(DataInput compressed, int decompressedLen, byte[] dest, int dOff) throws IOException { + final int destEnd = dOff + decompressedLen; + + do { + // literals + final int token = compressed.readByte() & 0xFF; + int literalLen = token >>> 4; + + if (literalLen != 0) { + if (literalLen == 0x0F) { + byte len; + while ((len = compressed.readByte()) == (byte) 0xFF) { + literalLen += 0xFF; + } + literalLen += len & 0xFF; + } + compressed.readBytes(dest, dOff, literalLen); + dOff += literalLen; + } + + if (dOff >= destEnd) { + break; + } + + // matchs + final int matchDec = (compressed.readByte() & 0xFF) | ((compressed.readByte() & 0xFF) << 8); + assert matchDec > 0; + + int matchLen = token & 0x0F; + if (matchLen == 0x0F) { + int len; + while ((len = compressed.readByte()) == (byte) 0xFF) { + matchLen += 0xFF; + } + matchLen += len & 0xFF; + } + matchLen += MIN_MATCH; + + // copying a multiple of 8 bytes can make decompression from 5% to 10% faster + final int fastLen = (matchLen + 7) & 0xFFFFFFF8; + if (matchDec < matchLen || dOff + fastLen > destEnd) { + // overlap -> naive incremental copy + for (int ref = dOff - matchDec, end = dOff + matchLen; dOff < end; ++ref, ++dOff) { + dest[dOff] = dest[ref]; + } + } else { + // no overlap -> arraycopy + System.arraycopy(dest, dOff - matchDec, dest, dOff, fastLen); + dOff += matchLen; + } + } while (dOff < destEnd); + + return dOff; + } + + private static void encodeLen(int l, BytesStreamOutput out) throws IOException { + while (l >= 0xFF) { + out.writeByte((byte) 0xFF); + l -= 0xFF; + } + out.writeByte((byte) l); + } + + private static void encodeLiterals(byte[] bytes, int token, int anchor, int literalLen, BytesStreamOutput out) throws IOException { + out.writeByte((byte) token); + + // encode literal length + if (literalLen >= 0x0F) { + encodeLen(literalLen - 0x0F, out); + } + + // encode literals + out.writeBytes(bytes, anchor, literalLen); + } + + private static void encodeLastLiterals(byte[] bytes, int anchor, int literalLen, BytesStreamOutput out) throws IOException { + final int token = Math.min(literalLen, 0x0F) << 4; + encodeLiterals(bytes, token, anchor, literalLen, out); + } + + private static void encodeSequence(byte[] bytes, int anchor, int matchRef, int matchOff, int matchLen, BytesStreamOutput out) + throws IOException { + final int literalLen = matchOff - anchor; + assert matchLen >= 4; + // encode token + final int token = (Math.min(literalLen, 0x0F) << 4) | Math.min(matchLen - 4, 0x0F); + encodeLiterals(bytes, token, anchor, literalLen, out); + + // encode match dec + final int matchDec = matchOff - matchRef; + assert matchDec > 0 && matchDec < 1 << 16; + out.writeByte((byte) matchDec); + out.writeByte((byte) (matchDec >>> 8)); + + // encode match len + if (matchLen >= MIN_MATCH + 0x0F) { + encodeLen(matchLen - 0x0F - MIN_MATCH, out); + } + } + + /** + * A record of previous occurrences of sequences of 4 bytes. + */ + static abstract class HashTable { + + /** Reset this hash table in order to compress the given content. */ + abstract void reset(byte[] b, int off, int len); + + /** Init {@code dictLen} bytes to be used as a dictionary. */ + abstract void initDictionary(int dictLen); + + /** + * Advance the cursor to {@off} and return an index that stored the same + * 4 bytes as {@code b[o:o+4)}. This may only be called on strictly + * increasing sequences of offsets. A return value of {@code -1} indicates + * that no other index could be found. */ + abstract int get(int off); + + /** + * Return an index that less than {@code off} and stores the same 4 + * bytes. Unlike {@link #get}, it doesn't need to be called on increasing + * offsets. A return value of {@code -1} indicates that no other index could + * be found. */ + abstract int previous(int off); + + // For testing + abstract boolean assertReset(); + } + + /** + * Simple lossy {@link HashTable} that only stores the last ocurrence for + * each hash on {@code 2^14} bytes of memory. + */ + public static final class FastCompressionHashTable extends HashTable { + + private byte[] bytes; + private int base; + private int lastOff; + private int end; + private int hashLog; + private PackedInts.Mutable hashTable; + + /** Sole constructor */ + public FastCompressionHashTable() {} + + @Override + void reset(byte[] bytes, int off, int len) { + FutureObjects.checkFromIndexSize(off, len, bytes.length); + this.bytes = bytes; + this.base = off; + this.end = off + len; + final int bitsPerOffset = PackedInts.bitsRequired(len - LAST_LITERALS); + final int bitsPerOffsetLog = 32 - Integer.numberOfLeadingZeros(bitsPerOffset - 1); + hashLog = MEMORY_USAGE + 3 - bitsPerOffsetLog; + if (hashTable == null || hashTable.size() < 1 << hashLog || hashTable.getBitsPerValue() < bitsPerOffset) { + hashTable = PackedInts.getMutable(1 << hashLog, bitsPerOffset, PackedInts.DEFAULT); + } else { + // Avoid calling hashTable.clear(), this makes it costly to compress many short sequences otherwise. + // Instead, get() checks that references are less than the current offset. + } + this.lastOff = off - 1; + } + + @Override + void initDictionary(int dictLen) { + for (int i = 0; i < dictLen; ++i) { + final int v = readInt(bytes, base + i); + final int h = hash(v, hashLog); + hashTable.set(h, i); + } + lastOff += dictLen; + } + + @Override + int get(int off) { + assert off > lastOff; + assert off < end; + + final int v = readInt(bytes, off); + final int h = hash(v, hashLog); + + final int ref = base + (int) hashTable.get(h); + hashTable.set(h, off - base); + lastOff = off; + + if (ref < off && off - ref < MAX_DISTANCE && readInt(bytes, ref) == v) { + return ref; + } else { + return -1; + } + } + + @Override + public int previous(int off) { + return -1; + } + + @Override + boolean assertReset() { + return true; + } + + } + + /** + * A higher-precision {@link HashTable}. It stores up to 256 occurrences of + * 4-bytes sequences in the last {@code 2^16} bytes, which makes it much more + * likely to find matches than {@link LZ4.FastCompressionHashTable}. + */ + public static final class HighCompressionHashTable extends HashTable { + private static final int MAX_ATTEMPTS = 256; + private static final int MASK = MAX_DISTANCE - 1; + + private byte[] bytes; + private int base; + private int next; + private int end; + private final int[] hashTable; + private final short[] chainTable; + private int attempts = 0; + + /** Sole constructor */ + public HighCompressionHashTable() { + hashTable = new int[HASH_TABLE_SIZE_HC]; + Arrays.fill(hashTable, -1); + chainTable = new short[MAX_DISTANCE]; + Arrays.fill(chainTable, (short) 0xFFFF); + } + + @Override + void reset(byte[] bytes, int off, int len) { + FutureObjects.checkFromIndexSize(off, len, bytes.length); + if (end - base < chainTable.length) { + // The last call to compress was done on less than 64kB, let's not reset + // the hashTable and only reset the relevant parts of the chainTable. + // This helps avoid slowing down calling compress() many times on short + // inputs. + int startOffset = base & MASK; + int endOffset = end == 0 ? 0 : ((end - 1) & MASK) + 1; + if (startOffset < endOffset) { + Arrays.fill(chainTable, startOffset, endOffset, (short) 0xFFFF); + } else { + Arrays.fill(chainTable, 0, endOffset, (short) 0xFFFF); + Arrays.fill(chainTable, startOffset, chainTable.length, (short) 0xFFFF); + } + } else { + // The last call to compress was done on a large enough amount of data + // that it's fine to reset both tables + Arrays.fill(hashTable, -1); + Arrays.fill(chainTable, (short) 0xFFFF); + } + this.bytes = bytes; + this.base = off; + this.next = off; + this.end = off + len; + } + + @Override + void initDictionary(int dictLen) { + assert next == base; + for (int i = 0; i < dictLen; ++i) { + addHash(base + i); + } + next += dictLen; + } + + @Override + int get(int off) { + assert off >= next; + assert off < end; + + for (; next < off; next++) { + addHash(next); + } + + final int v = readInt(bytes, off); + final int h = hashHC(v); + + attempts = 0; + int ref = hashTable[h]; + if (ref >= off) { + // remainder from a previous call to compress() + return -1; + } + for (int min = Math.max(base, off - MAX_DISTANCE + 1); + ref >= min && attempts < MAX_ATTEMPTS; + ref -= chainTable[ref & MASK] & 0xFFFF, attempts++) { + if (readInt(bytes, ref) == v) { + return ref; + } + } + return -1; + } + + private void addHash(int off) { + final int v = readInt(bytes, off); + final int h = hashHC(v); + int delta = off - hashTable[h]; + if (delta <= 0 || delta >= MAX_DISTANCE) { + delta = MAX_DISTANCE - 1; + } + chainTable[off & MASK] = (short) delta; + hashTable[h] = off; + } + + @Override + int previous(int off) { + final int v = readInt(bytes, off); + for (int ref = off - (chainTable[off & MASK] & 0xFFFF); + ref >= base && attempts < MAX_ATTEMPTS; + ref -= chainTable[ref & MASK] & 0xFFFF, attempts++ ) { + if (readInt(bytes, ref) == v) { + return ref; + } + } + return -1; + } + + @Override + boolean assertReset() { + for (int i = 0; i < chainTable.length; ++i) { + assert chainTable[i] == (short) 0xFFFF : i; + } + return true; + } + } + + /** + * Compress {@code bytes[off:off+len]} into {@code out} using at most 16kB of + * memory. {@code ht} shouldn't be shared across threads but can safely be + * reused. + */ + public static void compress(byte[] bytes, int off, int len, BytesStreamOutput out, HashTable ht) throws IOException { + compressWithDictionary(bytes, off, 0, len, out, ht); + } + + /** + * Compress {@code bytes[dictOff+dictLen:dictOff+dictLen+len]} into + * {@code out} using at most 16kB of memory. + * {@code bytes[dictOff:dictOff+dictLen]} will be used as a dictionary. + * {@code dictLen} must not be greater than 64kB, the maximum window size. + * + * {@code ht} shouldn't be shared across threads but can safely be reused. + */ + public static void compressWithDictionary(byte[] bytes, int dictOff, int dictLen, int len, BytesStreamOutput out, HashTable ht) + throws IOException { + FutureObjects.checkFromIndexSize(dictOff, dictLen, bytes.length); + FutureObjects.checkFromIndexSize(dictOff + dictLen, len, bytes.length); + if (dictLen > MAX_DISTANCE) { + throw new IllegalArgumentException("dictLen must not be greater than 64kB, but got " + dictLen); + } + + final int end = dictOff + dictLen + len; + + int off = dictOff + dictLen; + int anchor = off; + + if (len > LAST_LITERALS + MIN_MATCH) { + + final int limit = end - LAST_LITERALS; + final int matchLimit = limit - MIN_MATCH; + ht.reset(bytes, dictOff, dictLen + len); + ht.initDictionary(dictLen); + + main: + while (off <= limit) { + // find a match + int ref; + while (true) { + if (off >= matchLimit) { + break main; + } + ref = ht.get(off); + if (ref != -1) { + assert ref >= dictOff && ref < off; + assert readInt(bytes, ref) == readInt(bytes, off); + break; + } + ++off; + } + + // compute match length + int matchLen = MIN_MATCH + commonBytes(bytes, ref + MIN_MATCH, off + MIN_MATCH, limit); + + // try to find a better match + for (int r = ht.previous(ref), min = Math.max(off - MAX_DISTANCE + 1, dictOff); r >= min; r = ht.previous(r)) { + assert readInt(bytes, r) == readInt(bytes, off); + int rMatchLen = MIN_MATCH + commonBytes(bytes, r + MIN_MATCH, off + MIN_MATCH, limit); + if (rMatchLen > matchLen) { + ref = r; + matchLen = rMatchLen; + } + } + + encodeSequence(bytes, anchor, ref, off, matchLen, out); + off += matchLen; + anchor = off; + } + } + + // last literals + final int literalLen = end - anchor; + assert literalLen >= LAST_LITERALS || literalLen == len; + encodeLastLiterals(bytes, anchor, end - anchor, out); + } +} diff --git a/server/src/main/java/org/elasticsearch/transport/DeflateTransportDecompressor.java b/server/src/main/java/org/elasticsearch/transport/DeflateTransportDecompressor.java new file mode 100644 index 0000000000000..b66efb73876a7 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/transport/DeflateTransportDecompressor.java @@ -0,0 +1,125 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.transport; + +import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.BytesRefIterator; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.bytes.ReleasableBytesReference; +import org.elasticsearch.common.compress.CompressorFactory; +import org.elasticsearch.core.Releasable; +import org.elasticsearch.common.recycler.Recycler; +import org.elasticsearch.common.util.PageCacheRecycler; + +import java.io.IOException; +import java.util.ArrayDeque; +import java.util.zip.DataFormatException; +import java.util.zip.Inflater; + +public class DeflateTransportDecompressor implements TransportDecompressor { + + private final Inflater inflater; + private final PageCacheRecycler recycler; + private final ArrayDeque> pages; + private int pageOffset = PageCacheRecycler.BYTE_PAGE_SIZE; + private boolean hasSkippedHeader = false; + + public DeflateTransportDecompressor(PageCacheRecycler recycler) { + this.recycler = recycler; + inflater = new Inflater(true); + pages = new ArrayDeque<>(4); + } + + @Override + public int decompress(BytesReference bytesReference) throws IOException { + int bytesConsumed = 0; + if (hasSkippedHeader == false) { + hasSkippedHeader = true; + int headerLength = TransportDecompressor.HEADER_LENGTH; + bytesReference = bytesReference.slice(headerLength, bytesReference.length() - headerLength); + bytesConsumed += headerLength; + } + + BytesRefIterator refIterator = bytesReference.iterator(); + BytesRef ref; + while ((ref = refIterator.next()) != null) { + inflater.setInput(ref.bytes, ref.offset, ref.length); + bytesConsumed += ref.length; + boolean continueInflating = true; + while (continueInflating) { + final Recycler.V page; + final boolean isNewPage = pageOffset == PageCacheRecycler.BYTE_PAGE_SIZE; + if (isNewPage) { + pageOffset = 0; + page = recycler.bytePage(false); + } else { + page = pages.getLast(); + } + byte[] output = page.v(); + try { + int bytesInflated = inflater.inflate(output, pageOffset, PageCacheRecycler.BYTE_PAGE_SIZE - pageOffset); + pageOffset += bytesInflated; + if (isNewPage) { + if (bytesInflated == 0) { + page.close(); + pageOffset = PageCacheRecycler.BYTE_PAGE_SIZE; + } else { + pages.add(page); + } + } + } catch (DataFormatException e) { + throw new IOException("Exception while inflating bytes", e); + } + if (inflater.needsInput()) { + continueInflating = false; + } + if (inflater.finished()) { + bytesConsumed -= inflater.getRemaining(); + continueInflating = false; + } + assert inflater.needsDictionary() == false; + } + } + + return bytesConsumed; + } + + public boolean isEOS() { + return inflater.finished(); + } + + @Override + public ReleasableBytesReference pollDecompressedPage(boolean isEOS) { + if (pages.isEmpty()) { + return null; + } else if (pages.size() == 1) { + if (isEOS) { + assert isEOS(); + Recycler.V page = pages.pollFirst(); + ReleasableBytesReference reference = new ReleasableBytesReference(new BytesArray(page.v(), 0, pageOffset), page); + pageOffset = 0; + return reference; + } else { + return null; + } + } else { + Recycler.V page = pages.pollFirst(); + return new ReleasableBytesReference(new BytesArray(page.v()), page); + } + } + + @Override + public void close() { + inflater.end(); + for (Recycler.V page : pages) { + page.close(); + } + } +} diff --git a/server/src/main/java/org/elasticsearch/transport/InboundDecoder.java b/server/src/main/java/org/elasticsearch/transport/InboundDecoder.java index ddd8b8f0a7d8b..fb619d583902a 100644 --- a/server/src/main/java/org/elasticsearch/transport/InboundDecoder.java +++ b/server/src/main/java/org/elasticsearch/transport/InboundDecoder.java @@ -29,6 +29,7 @@ public class InboundDecoder implements Releasable { private TransportDecompressor decompressor; private int totalNetworkSize = -1; private int bytesConsumed = 0; + private boolean isCompressed = false; private boolean isClosed = false; public InboundDecoder(Version version, PageCacheRecycler recycler) { @@ -64,7 +65,7 @@ public int internalDecode(ReleasableBytesReference reference, Consumer f Header header = readHeader(version, messageLength, reference); bytesConsumed += headerBytesToRead; if (header.isCompressed()) { - decompressor = new TransportDecompressor(recycler); + isCompressed = true; } fragmentConsumer.accept(header); @@ -75,9 +76,14 @@ public int internalDecode(ReleasableBytesReference reference, Consumer f } } } else { - // There are a minimum number of bytes required to start decompression - if (decompressor != null && decompressor.canDecompress(reference.length()) == false) { - return 0; + if (isCompressed && decompressor == null) { + // Attempt to initialize decompressor + TransportDecompressor decompressor = TransportDecompressor.getDecompressor(recycler, reference); + if (decompressor == null) { + return 0; + } else { + this.decompressor = decompressor; + } } int bytesToConsume = Math.min(reference.length(), totalNetworkSize - bytesConsumed); bytesConsumed += bytesToConsume; @@ -90,7 +96,7 @@ public int internalDecode(ReleasableBytesReference reference, Consumer f if (decompressor != null) { decompress(retainedContent); ReleasableBytesReference decompressed; - while ((decompressed = decompressor.pollDecompressedPage()) != null) { + while ((decompressed = decompressor.pollDecompressedPage(isDone())) != null) { fragmentConsumer.accept(decompressed); } } else { @@ -119,6 +125,7 @@ private void cleanDecodeState() { try { Releasables.closeExpectNoException(decompressor); } finally { + isCompressed = false; decompressor = null; totalNetworkSize = -1; bytesConsumed = 0; diff --git a/server/src/main/java/org/elasticsearch/transport/Lz4TransportDecompressor.java b/server/src/main/java/org/elasticsearch/transport/Lz4TransportDecompressor.java new file mode 100644 index 0000000000000..db1040e9000ea --- /dev/null +++ b/server/src/main/java/org/elasticsearch/transport/Lz4TransportDecompressor.java @@ -0,0 +1,136 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.transport; + +import net.jpountz.lz4.LZ4FrameInputStream; + +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.bytes.ReleasableBytesReference; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.recycler.Recycler; +import org.elasticsearch.common.util.PageCacheRecycler; + +import java.io.IOException; +import java.io.InputStream; +import java.util.ArrayDeque; + +public class Lz4TransportDecompressor implements TransportDecompressor { + + private final LZ4FrameInputStream inputStream; + private final ExpandableStream expandableStream; + private final PageCacheRecycler recycler; + private final ArrayDeque> pages; + private int pageOffset = PageCacheRecycler.BYTE_PAGE_SIZE; + private boolean hasSkippedHeader = false; + + public Lz4TransportDecompressor(PageCacheRecycler recycler) throws IOException { + this.recycler = recycler; + expandableStream = new ExpandableStream(); + inputStream = new LZ4FrameInputStream(expandableStream); + pages = new ArrayDeque<>(4); + } + + @Override + public int decompress(BytesReference bytesReference) throws IOException { + final StreamInput underlyingStream = bytesReference.streamInput(); + this.expandableStream.nextStream(underlyingStream); + + if (hasSkippedHeader == false) { + hasSkippedHeader = true; + int headerLength = TransportDecompressor.HEADER_LENGTH; + bytesReference = bytesReference.slice(headerLength, bytesReference.length() - headerLength); + } + + boolean continueDecompressing = true; + while (continueDecompressing) { + final Recycler.V page; + final boolean isNewPage = pageOffset == PageCacheRecycler.BYTE_PAGE_SIZE; + if (isNewPage) { + pageOffset = 0; + page = recycler.bytePage(false); + } else { + page = pages.getLast(); + } + byte[] output = page.v(); + int bytesDecompressed; + try { + bytesDecompressed = inputStream.read(output, pageOffset, PageCacheRecycler.BYTE_PAGE_SIZE - pageOffset); + pageOffset += bytesDecompressed; + if (isNewPage) { + if (bytesDecompressed == 0) { + page.close(); + pageOffset = PageCacheRecycler.BYTE_PAGE_SIZE; + } else { + pages.add(page); + } + } + } catch (IOException e) { + throw new IOException("Exception while LZ4 decompressing bytes", e); + } + if (bytesDecompressed == 0) { + continueDecompressing = false; + } + } + + assert underlyingStream.available() == 0; + + return bytesReference.length(); + } + + @Override + public ReleasableBytesReference pollDecompressedPage(boolean isEOS) { + if (pages.isEmpty()) { + return null; + } else if (pages.size() == 1) { + if (isEOS) { + Recycler.V page = pages.pollFirst(); + ReleasableBytesReference reference = new ReleasableBytesReference(new BytesArray(page.v(), 0, pageOffset), page); + pageOffset = 0; + return reference; + } else { + return null; + } + } else { + Recycler.V page = pages.pollFirst(); + return new ReleasableBytesReference(new BytesArray(page.v()), page); + } + } + + @Override + public void close() { + try { + inputStream.close(); + } catch (IOException e) { + assert false : "Exception should not be thrown."; + } + for (Recycler.V page : pages) { + page.close(); + } + } + + private static class ExpandableStream extends InputStream { + + private StreamInput current; + + private void nextStream(StreamInput next) { + current = next; + } + + @Override + public int read() throws IOException { + return Math.max(0, current.read()); + } + + @Override + public int read(byte[] b, int off, int len) throws IOException { + return Math.max(0, current.read(b, off, len)); + } + } +} diff --git a/server/src/main/java/org/elasticsearch/transport/OutboundMessage.java b/server/src/main/java/org/elasticsearch/transport/OutboundMessage.java index cf241c1866e82..86b3c2b715a1c 100644 --- a/server/src/main/java/org/elasticsearch/transport/OutboundMessage.java +++ b/server/src/main/java/org/elasticsearch/transport/OutboundMessage.java @@ -7,6 +7,8 @@ */ package org.elasticsearch.transport; +import net.jpountz.lz4.LZ4FrameOutputStream; + import org.elasticsearch.Version; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesArray; @@ -87,7 +89,12 @@ BytesReference serialize(BytesStreamOutput bytesStream) throws IOException { // compressed stream wrapped bytes must be no-close wrapped since we need to close the compressed wrapper below to release // resources and write EOS marker bytes but must not yet release the bytes themselves private OutputStreamStreamOutput wrapCompressed(BytesStreamOutput bytesStream) throws IOException { - return new OutputStreamStreamOutput(CompressorFactory.COMPRESSOR.threadLocalOutputStream(Streams.noCloseStream(bytesStream))); + if (true) { + return new OutputStreamStreamOutput(CompressorFactory.COMPRESSOR.threadLocalOutputStream(Streams.noCloseStream(bytesStream))); + } else { + return new OutputStreamStreamOutput(new LZ4FrameOutputStream(Streams.noCloseStream(bytesStream), + LZ4FrameOutputStream.BLOCKSIZE.SIZE_64KB)); + } } protected void writeVariableHeader(StreamOutput stream) throws IOException { diff --git a/server/src/main/java/org/elasticsearch/transport/TransportDecompressor.java b/server/src/main/java/org/elasticsearch/transport/TransportDecompressor.java index 58df5c632731d..cb62b0dc47325 100644 --- a/server/src/main/java/org/elasticsearch/transport/TransportDecompressor.java +++ b/server/src/main/java/org/elasticsearch/transport/TransportDecompressor.java @@ -8,130 +8,64 @@ package org.elasticsearch.transport; -import org.apache.lucene.util.BytesRef; -import org.apache.lucene.util.BytesRefIterator; -import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.bytes.ReleasableBytesReference; import org.elasticsearch.common.compress.CompressorFactory; -import org.elasticsearch.core.Releasable; -import org.elasticsearch.common.recycler.Recycler; +import org.elasticsearch.common.compress.DeflateCompressor; import org.elasticsearch.common.util.PageCacheRecycler; +import org.elasticsearch.core.Releasable; import java.io.IOException; -import java.util.ArrayDeque; -import java.util.zip.DataFormatException; -import java.util.zip.Inflater; -public class TransportDecompressor implements Releasable { +public interface TransportDecompressor extends Releasable { - private final Inflater inflater; - private final PageCacheRecycler recycler; - private final ArrayDeque> pages; - private int pageOffset = PageCacheRecycler.BYTE_PAGE_SIZE; - private boolean hasReadHeader = false; + int decompress(BytesReference bytesReference) throws IOException; - public TransportDecompressor(PageCacheRecycler recycler) { - this.recycler = recycler; - inflater = new Inflater(true); - pages = new ArrayDeque<>(4); - } + ReleasableBytesReference pollDecompressedPage(boolean isEOS); - public int decompress(BytesReference bytesReference) throws IOException { - int bytesConsumed = 0; - if (hasReadHeader == false) { - if (CompressorFactory.COMPRESSOR.isCompressed(bytesReference) == false) { - int maxToRead = Math.min(bytesReference.length(), 10); - StringBuilder sb = new StringBuilder("stream marked as compressed, but no compressor found, first [") - .append(maxToRead).append("] content bytes out of [").append(bytesReference.length()) - .append("] readable bytes with message size [").append(bytesReference.length()).append("] ").append("] are ["); - for (int i = 0; i < maxToRead; i++) { - sb.append(bytesReference.get(i)).append(","); - } - sb.append("]"); - throw new IllegalStateException(sb.toString()); - } - hasReadHeader = true; - int headerLength = CompressorFactory.COMPRESSOR.headerLength(); - bytesReference = bytesReference.slice(headerLength, bytesReference.length() - headerLength); - bytesConsumed += headerLength; + @Override + void close(); + + byte[] DEFLATE_HEADER = DeflateCompressor.HEADER; + byte[] LZ4_HEADER = new byte[]{'L', 'Z', '4', '\0'}; + int HEADER_LENGTH = 4; + + static TransportDecompressor getDecompressor(PageCacheRecycler recycler, BytesReference bytes) throws IOException { + if (bytes.length() < DeflateCompressor.HEADER.length) { + return null; + } + byte firstByte = bytes.get(0); + byte[] header; + if (firstByte == DEFLATE_HEADER[0]) { + header = DEFLATE_HEADER; + } else if (firstByte == LZ4_HEADER[0]) { + header = LZ4_HEADER; + } else { + throw createIllegalState(bytes); } - BytesRefIterator refIterator = bytesReference.iterator(); - BytesRef ref; - while ((ref = refIterator.next()) != null) { - inflater.setInput(ref.bytes, ref.offset, ref.length); - bytesConsumed += ref.length; - boolean continueInflating = true; - while (continueInflating) { - final Recycler.V page; - final boolean isNewPage = pageOffset == PageCacheRecycler.BYTE_PAGE_SIZE; - if (isNewPage) { - pageOffset = 0; - page = recycler.bytePage(false); - } else { - page = pages.getLast(); - } - byte[] output = page.v(); - try { - int bytesInflated = inflater.inflate(output, pageOffset, PageCacheRecycler.BYTE_PAGE_SIZE - pageOffset); - pageOffset += bytesInflated; - if (isNewPage) { - if (bytesInflated == 0) { - page.close(); - pageOffset = PageCacheRecycler.BYTE_PAGE_SIZE; - } else { - pages.add(page); - } - } - } catch (DataFormatException e) { - throw new IOException("Exception while inflating bytes", e); - } - if (inflater.needsInput()) { - continueInflating = false; - } - if (inflater.finished()) { - bytesConsumed -= inflater.getRemaining(); - continueInflating = false; - } - assert inflater.needsDictionary() == false; + for (int i = 1; i < HEADER_LENGTH; ++i) { + if (bytes.get(i) != header[i]) { + throw createIllegalState(bytes); } } - return bytesConsumed; - } - - public boolean canDecompress(int bytesAvailable) { - return hasReadHeader || bytesAvailable >= CompressorFactory.COMPRESSOR.headerLength(); - } - - public boolean isEOS() { - return inflater.finished(); - } - - public ReleasableBytesReference pollDecompressedPage() { - if (pages.isEmpty()) { - return null; - } else if (pages.size() == 1) { - if (isEOS()) { - Recycler.V page = pages.pollFirst(); - ReleasableBytesReference reference = new ReleasableBytesReference(new BytesArray(page.v(), 0, pageOffset), page); - pageOffset = 0; - return reference; - } else { - return null; - } + if (header == DEFLATE_HEADER) { + return new DeflateTransportDecompressor(recycler); } else { - Recycler.V page = pages.pollFirst(); - return new ReleasableBytesReference(new BytesArray(page.v()), page); + return new Lz4TransportDecompressor(recycler); } } - @Override - public void close() { - inflater.end(); - for (Recycler.V page : pages) { - page.close(); + private static IllegalStateException createIllegalState(BytesReference bytes) { + int maxToRead = Math.min(bytes.length(), 10); + StringBuilder sb = new StringBuilder("stream marked as compressed, but no compressor found, first [") + .append(maxToRead).append("] content bytes out of [").append(bytes.length()) + .append("] readable bytes with message size [").append(bytes.length()).append("] ").append("] are ["); + for (int i = 0; i < maxToRead; i++) { + sb.append(bytes.get(i)).append(","); } + sb.append("]"); + return new IllegalStateException(sb.toString()); } } diff --git a/server/src/test/java/org/elasticsearch/transport/TransportDecompressorTests.java b/server/src/test/java/org/elasticsearch/transport/DeflateTransportDecompressorTests.java similarity index 86% rename from server/src/test/java/org/elasticsearch/transport/TransportDecompressorTests.java rename to server/src/test/java/org/elasticsearch/transport/DeflateTransportDecompressorTests.java index 60e06ceef2945..f7eddbe185d8d 100644 --- a/server/src/test/java/org/elasticsearch/transport/TransportDecompressorTests.java +++ b/server/src/test/java/org/elasticsearch/transport/DeflateTransportDecompressorTests.java @@ -24,7 +24,7 @@ import java.io.IOException; import java.io.OutputStream; -public class TransportDecompressorTests extends ESTestCase { +public class DeflateTransportDecompressorTests extends ESTestCase { public void testSimpleCompression() throws IOException { try (BytesStreamOutput output = new BytesStreamOutput()) { @@ -35,11 +35,11 @@ public void testSimpleCompression() throws IOException { BytesReference bytes = output.bytes(); - TransportDecompressor decompressor = new TransportDecompressor(PageCacheRecycler.NON_RECYCLING_INSTANCE); + DeflateTransportDecompressor decompressor = new DeflateTransportDecompressor(PageCacheRecycler.NON_RECYCLING_INSTANCE); int bytesConsumed = decompressor.decompress(bytes); assertEquals(bytes.length(), bytesConsumed); assertTrue(decompressor.isEOS()); - ReleasableBytesReference releasableBytesReference = decompressor.pollDecompressedPage(); + ReleasableBytesReference releasableBytesReference = decompressor.pollDecompressedPage(true); assertEquals(randomByte, releasableBytesReference.get(0)); releasableBytesReference.close(); @@ -57,14 +57,14 @@ public void testMultiPageCompression() throws IOException { BytesReference bytes = output.bytes(); - TransportDecompressor decompressor = new TransportDecompressor(PageCacheRecycler.NON_RECYCLING_INSTANCE); + DeflateTransportDecompressor decompressor = new DeflateTransportDecompressor(PageCacheRecycler.NON_RECYCLING_INSTANCE); int bytesConsumed = decompressor.decompress(bytes); assertEquals(bytes.length(), bytesConsumed); assertTrue(decompressor.isEOS()); - ReleasableBytesReference reference1 = decompressor.pollDecompressedPage(); - ReleasableBytesReference reference2 = decompressor.pollDecompressedPage(); - ReleasableBytesReference reference3 = decompressor.pollDecompressedPage(); - assertNull(decompressor.pollDecompressedPage()); + ReleasableBytesReference reference1 = decompressor.pollDecompressedPage(false); + ReleasableBytesReference reference2 = decompressor.pollDecompressedPage(false); + ReleasableBytesReference reference3 = decompressor.pollDecompressedPage(true); + assertNull(decompressor.pollDecompressedPage(true)); BytesReference composite = CompositeBytesReference.of(reference1, reference2, reference3); assertEquals(4 * 10000, composite.length()); StreamInput streamInput = composite.streamInput(); @@ -86,7 +86,7 @@ public void testIncrementalMultiPageCompression() throws IOException { BytesReference bytes = output.bytes(); - TransportDecompressor decompressor = new TransportDecompressor(PageCacheRecycler.NON_RECYCLING_INSTANCE); + DeflateTransportDecompressor decompressor = new DeflateTransportDecompressor(PageCacheRecycler.NON_RECYCLING_INSTANCE); int split1 = (int) (bytes.length() * 0.3); int split2 = (int) (bytes.length() * 0.65); @@ -103,10 +103,10 @@ public void testIncrementalMultiPageCompression() throws IOException { int bytesConsumed3 = decompressor.decompress(inbound3); assertEquals(inbound3.length(), bytesConsumed3); assertTrue(decompressor.isEOS()); - ReleasableBytesReference reference1 = decompressor.pollDecompressedPage(); - ReleasableBytesReference reference2 = decompressor.pollDecompressedPage(); - ReleasableBytesReference reference3 = decompressor.pollDecompressedPage(); - assertNull(decompressor.pollDecompressedPage()); + ReleasableBytesReference reference1 = decompressor.pollDecompressedPage(false); + ReleasableBytesReference reference2 = decompressor.pollDecompressedPage(false); + ReleasableBytesReference reference3 = decompressor.pollDecompressedPage(true); + assertNull(decompressor.pollDecompressedPage(false)); BytesReference composite = CompositeBytesReference.of(reference1, reference2, reference3); assertEquals(4 * 10000, composite.length()); StreamInput streamInput = composite.streamInput(); @@ -117,5 +117,4 @@ public void testIncrementalMultiPageCompression() throws IOException { } } - } From 0e67dc4a375ea036bcc57172fecee8adf6576d7e Mon Sep 17 00:00:00 2001 From: Tim Brooks Date: Thu, 24 Jun 2021 18:37:48 -0600 Subject: [PATCH 04/29] Changes --- .../elasticsearch/common/compress/ESLZ4.java | 462 ------------------ .../transport/CompressionScheme.java | 30 ++ .../transport/ConnectionProfile.java | 3 +- .../DeflateTransportDecompressor.java | 4 +- .../transport/Lz4TransportDecompressor.java | 2 +- .../transport/NetworkMessage.java | 4 +- .../transport/OutboundHandler.java | 21 +- .../transport/OutboundMessage.java | 38 +- .../elasticsearch/transport/TcpTransport.java | 15 +- .../transport/TransportDecompressor.java | 17 +- .../transport/TransportSettings.java | 4 + .../transport/InboundDecoderTests.java | 25 +- .../transport/InboundHandlerTests.java | 4 +- .../transport/InboundPipelineTests.java | 28 +- .../transport/OutboundHandlerTests.java | 7 +- .../transport/TcpTransportTests.java | 2 +- .../transport/TransportLoggerTests.java | 2 +- .../test/InternalTestCluster.java | 7 + .../AbstractSimpleTransportTestCase.java | 5 +- .../transport/TestTransportChannels.java | 5 +- 20 files changed, 150 insertions(+), 535 deletions(-) delete mode 100644 server/src/main/java/org/elasticsearch/common/compress/ESLZ4.java create mode 100644 server/src/main/java/org/elasticsearch/transport/CompressionScheme.java diff --git a/server/src/main/java/org/elasticsearch/common/compress/ESLZ4.java b/server/src/main/java/org/elasticsearch/common/compress/ESLZ4.java deleted file mode 100644 index aac8dc76cd91f..0000000000000 --- a/server/src/main/java/org/elasticsearch/common/compress/ESLZ4.java +++ /dev/null @@ -1,462 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.common.compress; - -import org.apache.lucene.store.DataInput; -import org.apache.lucene.util.FutureArrays; -import org.apache.lucene.util.FutureObjects; -import org.apache.lucene.util.compress.LZ4; -import org.apache.lucene.util.packed.PackedInts; -import org.elasticsearch.common.io.stream.BytesStreamOutput; - -import java.io.IOException; -import java.util.Arrays; - -public class ESLZ4 { - - static final int MEMORY_USAGE = 14; - static final int MIN_MATCH = 4; // minimum length of a match - static final int MAX_DISTANCE = 1 << 16; // maximum distance of a reference - static final int LAST_LITERALS = 5; // the last 5 bytes must be encoded as literals - static final int HASH_LOG_HC = 15; // log size of the dictionary for compressHC - static final int HASH_TABLE_SIZE_HC = 1 << HASH_LOG_HC; - - - private static int hash(int i, int hashBits) { - return (i * -1640531535) >>> (32 - hashBits); - } - - private static int hashHC(int i) { - return hash(i, HASH_LOG_HC); - } - - private static int readInt(byte[] buf, int i) { - return ((buf[i] & 0xFF) << 24) | ((buf[i+1] & 0xFF) << 16) | ((buf[i+2] & 0xFF) << 8) | (buf[i+3] & 0xFF); - } - - private static int commonBytes(byte[] b, int o1, int o2, int limit) { - assert o1 < o2; - // never -1 because lengths always differ - return FutureArrays.mismatch(b, o1, limit, b, o2, limit); - } - - /** - * Decompress at least {@code decompressedLen} bytes into - * {@code dest[dOff:]}. Please note that {@code dest} must be large - * enough to be able to hold all decompressed data (meaning that you - * need to know the total decompressed length). - * If the given bytes were compressed using a preset dictionary then the same - * dictionary must be provided in {@code dest[dOff-dictLen:dOff]}. - */ - public static int decompress(DataInput compressed, int decompressedLen, byte[] dest, int dOff) throws IOException { - final int destEnd = dOff + decompressedLen; - - do { - // literals - final int token = compressed.readByte() & 0xFF; - int literalLen = token >>> 4; - - if (literalLen != 0) { - if (literalLen == 0x0F) { - byte len; - while ((len = compressed.readByte()) == (byte) 0xFF) { - literalLen += 0xFF; - } - literalLen += len & 0xFF; - } - compressed.readBytes(dest, dOff, literalLen); - dOff += literalLen; - } - - if (dOff >= destEnd) { - break; - } - - // matchs - final int matchDec = (compressed.readByte() & 0xFF) | ((compressed.readByte() & 0xFF) << 8); - assert matchDec > 0; - - int matchLen = token & 0x0F; - if (matchLen == 0x0F) { - int len; - while ((len = compressed.readByte()) == (byte) 0xFF) { - matchLen += 0xFF; - } - matchLen += len & 0xFF; - } - matchLen += MIN_MATCH; - - // copying a multiple of 8 bytes can make decompression from 5% to 10% faster - final int fastLen = (matchLen + 7) & 0xFFFFFFF8; - if (matchDec < matchLen || dOff + fastLen > destEnd) { - // overlap -> naive incremental copy - for (int ref = dOff - matchDec, end = dOff + matchLen; dOff < end; ++ref, ++dOff) { - dest[dOff] = dest[ref]; - } - } else { - // no overlap -> arraycopy - System.arraycopy(dest, dOff - matchDec, dest, dOff, fastLen); - dOff += matchLen; - } - } while (dOff < destEnd); - - return dOff; - } - - private static void encodeLen(int l, BytesStreamOutput out) throws IOException { - while (l >= 0xFF) { - out.writeByte((byte) 0xFF); - l -= 0xFF; - } - out.writeByte((byte) l); - } - - private static void encodeLiterals(byte[] bytes, int token, int anchor, int literalLen, BytesStreamOutput out) throws IOException { - out.writeByte((byte) token); - - // encode literal length - if (literalLen >= 0x0F) { - encodeLen(literalLen - 0x0F, out); - } - - // encode literals - out.writeBytes(bytes, anchor, literalLen); - } - - private static void encodeLastLiterals(byte[] bytes, int anchor, int literalLen, BytesStreamOutput out) throws IOException { - final int token = Math.min(literalLen, 0x0F) << 4; - encodeLiterals(bytes, token, anchor, literalLen, out); - } - - private static void encodeSequence(byte[] bytes, int anchor, int matchRef, int matchOff, int matchLen, BytesStreamOutput out) - throws IOException { - final int literalLen = matchOff - anchor; - assert matchLen >= 4; - // encode token - final int token = (Math.min(literalLen, 0x0F) << 4) | Math.min(matchLen - 4, 0x0F); - encodeLiterals(bytes, token, anchor, literalLen, out); - - // encode match dec - final int matchDec = matchOff - matchRef; - assert matchDec > 0 && matchDec < 1 << 16; - out.writeByte((byte) matchDec); - out.writeByte((byte) (matchDec >>> 8)); - - // encode match len - if (matchLen >= MIN_MATCH + 0x0F) { - encodeLen(matchLen - 0x0F - MIN_MATCH, out); - } - } - - /** - * A record of previous occurrences of sequences of 4 bytes. - */ - static abstract class HashTable { - - /** Reset this hash table in order to compress the given content. */ - abstract void reset(byte[] b, int off, int len); - - /** Init {@code dictLen} bytes to be used as a dictionary. */ - abstract void initDictionary(int dictLen); - - /** - * Advance the cursor to {@off} and return an index that stored the same - * 4 bytes as {@code b[o:o+4)}. This may only be called on strictly - * increasing sequences of offsets. A return value of {@code -1} indicates - * that no other index could be found. */ - abstract int get(int off); - - /** - * Return an index that less than {@code off} and stores the same 4 - * bytes. Unlike {@link #get}, it doesn't need to be called on increasing - * offsets. A return value of {@code -1} indicates that no other index could - * be found. */ - abstract int previous(int off); - - // For testing - abstract boolean assertReset(); - } - - /** - * Simple lossy {@link HashTable} that only stores the last ocurrence for - * each hash on {@code 2^14} bytes of memory. - */ - public static final class FastCompressionHashTable extends HashTable { - - private byte[] bytes; - private int base; - private int lastOff; - private int end; - private int hashLog; - private PackedInts.Mutable hashTable; - - /** Sole constructor */ - public FastCompressionHashTable() {} - - @Override - void reset(byte[] bytes, int off, int len) { - FutureObjects.checkFromIndexSize(off, len, bytes.length); - this.bytes = bytes; - this.base = off; - this.end = off + len; - final int bitsPerOffset = PackedInts.bitsRequired(len - LAST_LITERALS); - final int bitsPerOffsetLog = 32 - Integer.numberOfLeadingZeros(bitsPerOffset - 1); - hashLog = MEMORY_USAGE + 3 - bitsPerOffsetLog; - if (hashTable == null || hashTable.size() < 1 << hashLog || hashTable.getBitsPerValue() < bitsPerOffset) { - hashTable = PackedInts.getMutable(1 << hashLog, bitsPerOffset, PackedInts.DEFAULT); - } else { - // Avoid calling hashTable.clear(), this makes it costly to compress many short sequences otherwise. - // Instead, get() checks that references are less than the current offset. - } - this.lastOff = off - 1; - } - - @Override - void initDictionary(int dictLen) { - for (int i = 0; i < dictLen; ++i) { - final int v = readInt(bytes, base + i); - final int h = hash(v, hashLog); - hashTable.set(h, i); - } - lastOff += dictLen; - } - - @Override - int get(int off) { - assert off > lastOff; - assert off < end; - - final int v = readInt(bytes, off); - final int h = hash(v, hashLog); - - final int ref = base + (int) hashTable.get(h); - hashTable.set(h, off - base); - lastOff = off; - - if (ref < off && off - ref < MAX_DISTANCE && readInt(bytes, ref) == v) { - return ref; - } else { - return -1; - } - } - - @Override - public int previous(int off) { - return -1; - } - - @Override - boolean assertReset() { - return true; - } - - } - - /** - * A higher-precision {@link HashTable}. It stores up to 256 occurrences of - * 4-bytes sequences in the last {@code 2^16} bytes, which makes it much more - * likely to find matches than {@link LZ4.FastCompressionHashTable}. - */ - public static final class HighCompressionHashTable extends HashTable { - private static final int MAX_ATTEMPTS = 256; - private static final int MASK = MAX_DISTANCE - 1; - - private byte[] bytes; - private int base; - private int next; - private int end; - private final int[] hashTable; - private final short[] chainTable; - private int attempts = 0; - - /** Sole constructor */ - public HighCompressionHashTable() { - hashTable = new int[HASH_TABLE_SIZE_HC]; - Arrays.fill(hashTable, -1); - chainTable = new short[MAX_DISTANCE]; - Arrays.fill(chainTable, (short) 0xFFFF); - } - - @Override - void reset(byte[] bytes, int off, int len) { - FutureObjects.checkFromIndexSize(off, len, bytes.length); - if (end - base < chainTable.length) { - // The last call to compress was done on less than 64kB, let's not reset - // the hashTable and only reset the relevant parts of the chainTable. - // This helps avoid slowing down calling compress() many times on short - // inputs. - int startOffset = base & MASK; - int endOffset = end == 0 ? 0 : ((end - 1) & MASK) + 1; - if (startOffset < endOffset) { - Arrays.fill(chainTable, startOffset, endOffset, (short) 0xFFFF); - } else { - Arrays.fill(chainTable, 0, endOffset, (short) 0xFFFF); - Arrays.fill(chainTable, startOffset, chainTable.length, (short) 0xFFFF); - } - } else { - // The last call to compress was done on a large enough amount of data - // that it's fine to reset both tables - Arrays.fill(hashTable, -1); - Arrays.fill(chainTable, (short) 0xFFFF); - } - this.bytes = bytes; - this.base = off; - this.next = off; - this.end = off + len; - } - - @Override - void initDictionary(int dictLen) { - assert next == base; - for (int i = 0; i < dictLen; ++i) { - addHash(base + i); - } - next += dictLen; - } - - @Override - int get(int off) { - assert off >= next; - assert off < end; - - for (; next < off; next++) { - addHash(next); - } - - final int v = readInt(bytes, off); - final int h = hashHC(v); - - attempts = 0; - int ref = hashTable[h]; - if (ref >= off) { - // remainder from a previous call to compress() - return -1; - } - for (int min = Math.max(base, off - MAX_DISTANCE + 1); - ref >= min && attempts < MAX_ATTEMPTS; - ref -= chainTable[ref & MASK] & 0xFFFF, attempts++) { - if (readInt(bytes, ref) == v) { - return ref; - } - } - return -1; - } - - private void addHash(int off) { - final int v = readInt(bytes, off); - final int h = hashHC(v); - int delta = off - hashTable[h]; - if (delta <= 0 || delta >= MAX_DISTANCE) { - delta = MAX_DISTANCE - 1; - } - chainTable[off & MASK] = (short) delta; - hashTable[h] = off; - } - - @Override - int previous(int off) { - final int v = readInt(bytes, off); - for (int ref = off - (chainTable[off & MASK] & 0xFFFF); - ref >= base && attempts < MAX_ATTEMPTS; - ref -= chainTable[ref & MASK] & 0xFFFF, attempts++ ) { - if (readInt(bytes, ref) == v) { - return ref; - } - } - return -1; - } - - @Override - boolean assertReset() { - for (int i = 0; i < chainTable.length; ++i) { - assert chainTable[i] == (short) 0xFFFF : i; - } - return true; - } - } - - /** - * Compress {@code bytes[off:off+len]} into {@code out} using at most 16kB of - * memory. {@code ht} shouldn't be shared across threads but can safely be - * reused. - */ - public static void compress(byte[] bytes, int off, int len, BytesStreamOutput out, HashTable ht) throws IOException { - compressWithDictionary(bytes, off, 0, len, out, ht); - } - - /** - * Compress {@code bytes[dictOff+dictLen:dictOff+dictLen+len]} into - * {@code out} using at most 16kB of memory. - * {@code bytes[dictOff:dictOff+dictLen]} will be used as a dictionary. - * {@code dictLen} must not be greater than 64kB, the maximum window size. - * - * {@code ht} shouldn't be shared across threads but can safely be reused. - */ - public static void compressWithDictionary(byte[] bytes, int dictOff, int dictLen, int len, BytesStreamOutput out, HashTable ht) - throws IOException { - FutureObjects.checkFromIndexSize(dictOff, dictLen, bytes.length); - FutureObjects.checkFromIndexSize(dictOff + dictLen, len, bytes.length); - if (dictLen > MAX_DISTANCE) { - throw new IllegalArgumentException("dictLen must not be greater than 64kB, but got " + dictLen); - } - - final int end = dictOff + dictLen + len; - - int off = dictOff + dictLen; - int anchor = off; - - if (len > LAST_LITERALS + MIN_MATCH) { - - final int limit = end - LAST_LITERALS; - final int matchLimit = limit - MIN_MATCH; - ht.reset(bytes, dictOff, dictLen + len); - ht.initDictionary(dictLen); - - main: - while (off <= limit) { - // find a match - int ref; - while (true) { - if (off >= matchLimit) { - break main; - } - ref = ht.get(off); - if (ref != -1) { - assert ref >= dictOff && ref < off; - assert readInt(bytes, ref) == readInt(bytes, off); - break; - } - ++off; - } - - // compute match length - int matchLen = MIN_MATCH + commonBytes(bytes, ref + MIN_MATCH, off + MIN_MATCH, limit); - - // try to find a better match - for (int r = ht.previous(ref), min = Math.max(off - MAX_DISTANCE + 1, dictOff); r >= min; r = ht.previous(r)) { - assert readInt(bytes, r) == readInt(bytes, off); - int rMatchLen = MIN_MATCH + commonBytes(bytes, r + MIN_MATCH, off + MIN_MATCH, limit); - if (rMatchLen > matchLen) { - ref = r; - matchLen = rMatchLen; - } - } - - encodeSequence(bytes, anchor, ref, off, matchLen, out); - off += matchLen; - anchor = off; - } - } - - // last literals - final int literalLen = end - anchor; - assert literalLen >= LAST_LITERALS || literalLen == len; - encodeLastLiterals(bytes, anchor, end - anchor, out); - } -} diff --git a/server/src/main/java/org/elasticsearch/transport/CompressionScheme.java b/server/src/main/java/org/elasticsearch/transport/CompressionScheme.java new file mode 100644 index 0000000000000..a1a03603ca5ed --- /dev/null +++ b/server/src/main/java/org/elasticsearch/transport/CompressionScheme.java @@ -0,0 +1,30 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.transport; + +import net.jpountz.lz4.LZ4FrameOutputStream; + +import org.elasticsearch.common.compress.DeflateCompressor; + +import java.io.IOException; +import java.io.OutputStream; + +public enum CompressionScheme { + LZ4, + DEFLATE; + + static byte[] DEFLATE_HEADER = DeflateCompressor.HEADER; + static byte[] LZ4_HEADER = new byte[]{'L', 'Z', '4', '\0'}; + static int HEADER_LENGTH = 4; + + public static OutputStream lz4OutputStream(OutputStream outputStream) throws IOException { + outputStream.write(LZ4_HEADER); + return new LZ4FrameOutputStream(outputStream, LZ4FrameOutputStream.BLOCKSIZE.SIZE_64KB); + } +} diff --git a/server/src/main/java/org/elasticsearch/transport/ConnectionProfile.java b/server/src/main/java/org/elasticsearch/transport/ConnectionProfile.java index 9b00ac784a91f..df5bfd0a3e5bc 100644 --- a/server/src/main/java/org/elasticsearch/transport/ConnectionProfile.java +++ b/server/src/main/java/org/elasticsearch/transport/ConnectionProfile.java @@ -94,7 +94,8 @@ public static ConnectionProfile buildDefaultConnectionProfile(Settings settings) */ public static ConnectionProfile buildSingleChannelProfile(TransportRequestOptions.Type channelType, @Nullable TimeValue connectTimeout, @Nullable TimeValue handshakeTimeout, @Nullable TimeValue pingInterval, - @Nullable Boolean compressionEnabled, @Nullable Boolean rawDataCompressionEnabled) { + @Nullable Boolean compressionEnabled, + @Nullable Boolean rawDataCompressionEnabled) { Builder builder = new Builder(); builder.addConnections(1, channelType); final EnumSet otherTypes = EnumSet.allOf(TransportRequestOptions.Type.class); diff --git a/server/src/main/java/org/elasticsearch/transport/DeflateTransportDecompressor.java b/server/src/main/java/org/elasticsearch/transport/DeflateTransportDecompressor.java index b66efb73876a7..677f7f18b49a4 100644 --- a/server/src/main/java/org/elasticsearch/transport/DeflateTransportDecompressor.java +++ b/server/src/main/java/org/elasticsearch/transport/DeflateTransportDecompressor.java @@ -13,8 +13,6 @@ import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.bytes.ReleasableBytesReference; -import org.elasticsearch.common.compress.CompressorFactory; -import org.elasticsearch.core.Releasable; import org.elasticsearch.common.recycler.Recycler; import org.elasticsearch.common.util.PageCacheRecycler; @@ -42,7 +40,7 @@ public int decompress(BytesReference bytesReference) throws IOException { int bytesConsumed = 0; if (hasSkippedHeader == false) { hasSkippedHeader = true; - int headerLength = TransportDecompressor.HEADER_LENGTH; + int headerLength = CompressionScheme.HEADER_LENGTH; bytesReference = bytesReference.slice(headerLength, bytesReference.length() - headerLength); bytesConsumed += headerLength; } diff --git a/server/src/main/java/org/elasticsearch/transport/Lz4TransportDecompressor.java b/server/src/main/java/org/elasticsearch/transport/Lz4TransportDecompressor.java index db1040e9000ea..605fed6f98c79 100644 --- a/server/src/main/java/org/elasticsearch/transport/Lz4TransportDecompressor.java +++ b/server/src/main/java/org/elasticsearch/transport/Lz4TransportDecompressor.java @@ -44,7 +44,7 @@ public int decompress(BytesReference bytesReference) throws IOException { if (hasSkippedHeader == false) { hasSkippedHeader = true; - int headerLength = TransportDecompressor.HEADER_LENGTH; + int headerLength = CompressionScheme.HEADER_LENGTH; bytesReference = bytesReference.slice(headerLength, bytesReference.length() - headerLength); } diff --git a/server/src/main/java/org/elasticsearch/transport/NetworkMessage.java b/server/src/main/java/org/elasticsearch/transport/NetworkMessage.java index fa4320aa42b9a..dc3393eb57b6e 100644 --- a/server/src/main/java/org/elasticsearch/transport/NetworkMessage.java +++ b/server/src/main/java/org/elasticsearch/transport/NetworkMessage.java @@ -21,12 +21,14 @@ public abstract class NetworkMessage { protected final Writeable threadContext; protected final long requestId; protected final byte status; + protected final CompressionScheme compressionScheme; - NetworkMessage(ThreadContext threadContext, Version version, byte status, long requestId) { + NetworkMessage(ThreadContext threadContext, Version version, byte status, long requestId, CompressionScheme compressionScheme) { this.threadContext = threadContext.captureAsWriteable(); this.version = version; this.requestId = requestId; this.status = status; + this.compressionScheme = compressionScheme; } public Version getVersion() { diff --git a/server/src/main/java/org/elasticsearch/transport/OutboundHandler.java b/server/src/main/java/org/elasticsearch/transport/OutboundHandler.java index 1108cb94f5c4c..08e2e17ef1f35 100644 --- a/server/src/main/java/org/elasticsearch/transport/OutboundHandler.java +++ b/server/src/main/java/org/elasticsearch/transport/OutboundHandler.java @@ -38,17 +38,20 @@ final class OutboundHandler { private final StatsTracker statsTracker; private final ThreadPool threadPool; private final BigArrays bigArrays; + private final CompressionScheme configuredCompressionScheme; private volatile long slowLogThresholdMs = Long.MAX_VALUE; private volatile TransportMessageListener messageListener = TransportMessageListener.NOOP_LISTENER; - OutboundHandler(String nodeName, Version version, StatsTracker statsTracker, ThreadPool threadPool, BigArrays bigArrays) { + OutboundHandler(String nodeName, Version version, StatsTracker statsTracker, ThreadPool threadPool, BigArrays bigArrays, + CompressionScheme compressionScheme) { this.nodeName = nodeName; this.version = version; this.statsTracker = statsTracker; this.threadPool = threadPool; this.bigArrays = bigArrays; + this.configuredCompressionScheme = compressionScheme; } void setSlowLogThreshold(TimeValue slowLogThreshold) { @@ -65,10 +68,10 @@ void sendBytes(TcpChannel channel, BytesReference bytes, ActionListener li */ void sendRequest(final DiscoveryNode node, final TcpChannel channel, final long requestId, final String action, final TransportRequest request, final TransportRequestOptions options, final Version channelVersion, - final boolean compressRequest, final boolean isHandshake) throws IOException, TransportException { + final CompressionScheme compressionScheme, final boolean isHandshake) throws IOException, TransportException { Version version = Version.min(this.version, channelVersion); OutboundMessage.Request message = - new OutboundMessage.Request(threadPool.getThreadContext(), request, version, action, requestId, isHandshake, compressRequest); + new OutboundMessage.Request(threadPool.getThreadContext(), request, version, action, requestId, isHandshake, compressionScheme); if (request.tryIncRef() == false) { assert false : "request [" + request + "] has been released already"; throw new AlreadyClosedException("request [" + request + "] has been released already"); @@ -90,10 +93,16 @@ void sendRequest(final DiscoveryNode node, final TcpChannel channel, final long * @see #sendErrorResponse(Version, TcpChannel, long, String, Exception) for sending error responses */ void sendResponse(final Version nodeVersion, final TcpChannel channel, final long requestId, final String action, - final TransportResponse response, final boolean compress, final boolean isHandshake) throws IOException { + final TransportResponse response, final boolean compressResponse, final boolean isHandshake) throws IOException { Version version = Version.min(this.version, nodeVersion); + final CompressionScheme compressionScheme; + if (compressResponse) { + compressionScheme = null; + } else { + compressionScheme = configuredCompressionScheme; + } OutboundMessage.Response message = new OutboundMessage.Response(threadPool.getThreadContext(), response, version, - requestId, isHandshake, compress); + requestId, isHandshake, compressionScheme); ActionListener listener = ActionListener.wrap(() -> messageListener.onResponseSent(requestId, action, response)); sendMessage(channel, message, listener); } @@ -107,7 +116,7 @@ void sendErrorResponse(final Version nodeVersion, final TcpChannel channel, fina TransportAddress address = new TransportAddress(channel.getLocalAddress()); RemoteTransportException tx = new RemoteTransportException(nodeName, address, action, error); OutboundMessage.Response message = new OutboundMessage.Response(threadPool.getThreadContext(), tx, version, requestId, - false, false); + false, null); ActionListener listener = ActionListener.wrap(() -> messageListener.onResponseSent(requestId, action, error)); sendMessage(channel, message, listener); } diff --git a/server/src/main/java/org/elasticsearch/transport/OutboundMessage.java b/server/src/main/java/org/elasticsearch/transport/OutboundMessage.java index 86b3c2b715a1c..d2f566aa1ac3d 100644 --- a/server/src/main/java/org/elasticsearch/transport/OutboundMessage.java +++ b/server/src/main/java/org/elasticsearch/transport/OutboundMessage.java @@ -7,8 +7,6 @@ */ package org.elasticsearch.transport; -import net.jpountz.lz4.LZ4FrameOutputStream; - import org.elasticsearch.Version; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesArray; @@ -28,8 +26,9 @@ abstract class OutboundMessage extends NetworkMessage { protected final Writeable message; - OutboundMessage(ThreadContext threadContext, Version version, byte status, long requestId, Writeable message) { - super(threadContext, version, status, requestId); + OutboundMessage(ThreadContext threadContext, Version version, byte status, long requestId, CompressionScheme compressionScheme, + Writeable message) { + super(threadContext, version, status, requestId, compressionScheme); this.message = message; } @@ -88,12 +87,18 @@ BytesReference serialize(BytesStreamOutput bytesStream) throws IOException { // compressed stream wrapped bytes must be no-close wrapped since we need to close the compressed wrapper below to release // resources and write EOS marker bytes but must not yet release the bytes themselves - private OutputStreamStreamOutput wrapCompressed(BytesStreamOutput bytesStream) throws IOException { - if (true) { + private StreamOutput wrapCompressed(BytesStreamOutput bytesStream) throws IOException { + if (compressionScheme == CompressionScheme.DEFLATE) { return new OutputStreamStreamOutput(CompressorFactory.COMPRESSOR.threadLocalOutputStream(Streams.noCloseStream(bytesStream))); + } else if (compressionScheme == CompressionScheme.LZ4) { + // TODO: Change after backport + if (version.onOrAfter(Version.V_8_0_0)) { + return new OutputStreamStreamOutput(CompressionScheme.lz4OutputStream(Streams.noCloseStream(bytesStream))); + } else { + return bytesStream; + } } else { - return new OutputStreamStreamOutput(new LZ4FrameOutputStream(Streams.noCloseStream(bytesStream), - LZ4FrameOutputStream.BLOCKSIZE.SIZE_64KB)); + throw new IllegalArgumentException("Invalid compression scheme: " + compressionScheme); } } @@ -106,8 +111,8 @@ static class Request extends OutboundMessage { private final String action; Request(ThreadContext threadContext, Writeable message, Version version, String action, long requestId, - boolean isHandshake, boolean compress) { - super(threadContext, version, setStatus(compress, isHandshake, message), requestId, message); + boolean isHandshake, CompressionScheme compressionScheme) { + super(threadContext, version, setStatus(compressionScheme, isHandshake, message), requestId, compressionScheme, message); this.action = action; } @@ -121,10 +126,10 @@ protected void writeVariableHeader(StreamOutput stream) throws IOException { stream.writeString(action); } - private static byte setStatus(boolean compress, boolean isHandshake, Writeable message) { + private static byte setStatus(CompressionScheme compressionScheme, boolean isHandshake, Writeable message) { byte status = 0; status = TransportStatus.setRequest(status); - if (compress && OutboundMessage.canCompress(message)) { + if (compressionScheme != null && OutboundMessage.canCompress(message)) { status = TransportStatus.setCompress(status); } if (isHandshake) { @@ -143,17 +148,18 @@ public String toString() { static class Response extends OutboundMessage { - Response(ThreadContext threadContext, Writeable message, Version version, long requestId, boolean isHandshake, boolean compress) { - super(threadContext, version, setStatus(compress, isHandshake, message), requestId, message); + Response(ThreadContext threadContext, Writeable message, Version version, long requestId, boolean isHandshake, + CompressionScheme compressionScheme) { + super(threadContext, version, setStatus(compressionScheme, isHandshake, message), requestId, compressionScheme, message); } - private static byte setStatus(boolean compress, boolean isHandshake, Writeable message) { + private static byte setStatus(CompressionScheme compressionScheme, boolean isHandshake, Writeable message) { byte status = 0; status = TransportStatus.setResponse(status); if (message instanceof RemoteTransportException) { status = TransportStatus.setError(status); } - if (compress) { + if (compressionScheme != null) { status = TransportStatus.setCompress(status); } if (isHandshake) { diff --git a/server/src/main/java/org/elasticsearch/transport/TcpTransport.java b/server/src/main/java/org/elasticsearch/transport/TcpTransport.java index cc2bc961dd4ae..ee0dbf1147667 100644 --- a/server/src/main/java/org/elasticsearch/transport/TcpTransport.java +++ b/server/src/main/java/org/elasticsearch/transport/TcpTransport.java @@ -104,6 +104,7 @@ public abstract class TcpTransport extends AbstractLifecycleComponent implements protected final NetworkService networkService; protected final Set profileSettings; private final CircuitBreakerService circuitBreakerService; + private final CompressionScheme compressionScheme; private final ConcurrentMap profileBoundAddresses = newConcurrentMap(); private final Map> serverChannels = newConcurrentMap(); @@ -133,14 +134,15 @@ public TcpTransport(Settings settings, Version version, ThreadPool threadPool, P this.pageCacheRecycler = pageCacheRecycler; this.circuitBreakerService = circuitBreakerService; this.networkService = networkService; + this.compressionScheme = TransportSettings.TRANSPORT_COMPRESSION_SCHEME.get(settings); String nodeName = Node.NODE_NAME_SETTING.get(settings); BigArrays bigArrays = new BigArrays(pageCacheRecycler, circuitBreakerService, CircuitBreaker.IN_FLIGHT_REQUESTS); - this.outboundHandler = new OutboundHandler(nodeName, version, statsTracker, threadPool, bigArrays); + this.outboundHandler = new OutboundHandler(nodeName, version, statsTracker, threadPool, bigArrays, this.compressionScheme); this.handshaker = new TransportHandshaker(version, threadPool, (node, channel, requestId, v) -> outboundHandler.sendRequest(node, channel, requestId, TransportHandshaker.HANDSHAKE_ACTION_NAME, new TransportHandshaker.HandshakeRequest(version), - TransportRequestOptions.EMPTY, v, false, true)); + TransportRequestOptions.EMPTY, v, null, true)); this.keepAlive = new TransportKeepAlive(threadPool, this.outboundHandler::sendBytes); this.inboundHandler = new InboundHandler(threadPool, outboundHandler, namedWriteableRegistry, handshaker, keepAlive, requestHandlers, responseHandlers); @@ -244,8 +246,13 @@ public void sendRequest(long requestId, String action, TransportRequest request, throw new NodeNotConnectedException(node, "connection already closed"); } TcpChannel channel = channel(options.type()); - boolean shouldCompress = compress || (rawDataCompress && request instanceof RawDataTransportRequest); - outboundHandler.sendRequest(node, channel, requestId, action, request, options, getVersion(), shouldCompress, false); + CompressionScheme compressionScheme; + if (compress || (rawDataCompress && request instanceof RawDataTransportRequest)) { + compressionScheme = TcpTransport.this.compressionScheme; + } else { + compressionScheme = null; + } + outboundHandler.sendRequest(node, channel, requestId, action, request, options, getVersion(), compressionScheme, false); } @Override diff --git a/server/src/main/java/org/elasticsearch/transport/TransportDecompressor.java b/server/src/main/java/org/elasticsearch/transport/TransportDecompressor.java index cb62b0dc47325..f208c02eedc20 100644 --- a/server/src/main/java/org/elasticsearch/transport/TransportDecompressor.java +++ b/server/src/main/java/org/elasticsearch/transport/TransportDecompressor.java @@ -10,7 +10,6 @@ import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.bytes.ReleasableBytesReference; -import org.elasticsearch.common.compress.CompressorFactory; import org.elasticsearch.common.compress.DeflateCompressor; import org.elasticsearch.common.util.PageCacheRecycler; import org.elasticsearch.core.Releasable; @@ -26,31 +25,27 @@ public interface TransportDecompressor extends Releasable { @Override void close(); - byte[] DEFLATE_HEADER = DeflateCompressor.HEADER; - byte[] LZ4_HEADER = new byte[]{'L', 'Z', '4', '\0'}; - int HEADER_LENGTH = 4; - static TransportDecompressor getDecompressor(PageCacheRecycler recycler, BytesReference bytes) throws IOException { if (bytes.length() < DeflateCompressor.HEADER.length) { return null; } byte firstByte = bytes.get(0); byte[] header; - if (firstByte == DEFLATE_HEADER[0]) { - header = DEFLATE_HEADER; - } else if (firstByte == LZ4_HEADER[0]) { - header = LZ4_HEADER; + if (firstByte == CompressionScheme.DEFLATE_HEADER[0]) { + header = CompressionScheme.DEFLATE_HEADER; + } else if (firstByte == CompressionScheme.LZ4_HEADER[0]) { + header = CompressionScheme.LZ4_HEADER; } else { throw createIllegalState(bytes); } - for (int i = 1; i < HEADER_LENGTH; ++i) { + for (int i = 1; i < CompressionScheme.HEADER_LENGTH; ++i) { if (bytes.get(i) != header[i]) { throw createIllegalState(bytes); } } - if (header == DEFLATE_HEADER) { + if (header == CompressionScheme.DEFLATE_HEADER) { return new DeflateTransportDecompressor(recycler); } else { return new Lz4TransportDecompressor(recycler); diff --git a/server/src/main/java/org/elasticsearch/transport/TransportSettings.java b/server/src/main/java/org/elasticsearch/transport/TransportSettings.java index 37e24719c1c4c..6c48d24c7602e 100644 --- a/server/src/main/java/org/elasticsearch/transport/TransportSettings.java +++ b/server/src/main/java/org/elasticsearch/transport/TransportSettings.java @@ -21,6 +21,7 @@ import static java.util.Collections.emptyList; import static org.elasticsearch.common.settings.Setting.affixKeySetting; import static org.elasticsearch.common.settings.Setting.boolSetting; +import static org.elasticsearch.common.settings.Setting.enumSetting; import static org.elasticsearch.common.settings.Setting.intSetting; import static org.elasticsearch.common.settings.Setting.listSetting; import static org.elasticsearch.common.settings.Setting.timeSetting; @@ -53,6 +54,9 @@ public final class TransportSettings { boolSetting("transport.compress", false, Setting.Property.NodeScope); public static final Setting TRANSPORT_COMPRESS_RAW_DATA = boolSetting("transport.compress_raw_data", false, Setting.Property.NodeScope); + public static final Setting TRANSPORT_COMPRESSION_SCHEME = + enumSetting(CompressionScheme.class, "transport.compression_scheme", CompressionScheme.DEFLATE, + Setting.Property.NodeScope); // the scheduled internal ping interval setting, defaults to disabled (-1) public static final Setting PING_SCHEDULE = timeSetting("transport.ping_schedule", TimeValue.timeValueSeconds(-1), Setting.Property.NodeScope); diff --git a/server/src/test/java/org/elasticsearch/transport/InboundDecoderTests.java b/server/src/test/java/org/elasticsearch/transport/InboundDecoderTests.java index c4ac247399b89..cc96d0ebc42b7 100644 --- a/server/src/test/java/org/elasticsearch/transport/InboundDecoderTests.java +++ b/server/src/test/java/org/elasticsearch/transport/InboundDecoderTests.java @@ -47,10 +47,10 @@ public void testDecode() throws IOException { OutboundMessage message; if (isRequest) { message = new OutboundMessage.Request(threadContext, new TestRequest(randomAlphaOfLength(100)), - Version.CURRENT, action, requestId, false, false); + Version.CURRENT, action, requestId, false, null); } else { message = new OutboundMessage.Response(threadContext, new TestResponse(randomAlphaOfLength(100)), - Version.CURRENT, requestId, false, false); + Version.CURRENT, requestId, false, null); } final BytesReference totalBytes = message.serialize(new BytesStreamOutput()); @@ -96,14 +96,14 @@ public void testDecode() throws IOException { public void testDecodePreHeaderSizeVariableInt() throws IOException { // TODO: Can delete test on 9.0 - boolean isCompressed = randomBoolean(); + CompressionScheme compressionScheme = randomFrom(null, CompressionScheme.DEFLATE, CompressionScheme.DEFLATE); String action = "test-request"; long requestId = randomNonNegativeLong(); final Version preHeaderVariableInt = Version.V_7_5_0; final String contentValue = randomAlphaOfLength(100); // 8.0 is only compatible with handshakes on a pre-variable int version final OutboundMessage message = new OutboundMessage.Request(threadContext, new TestRequest(contentValue), - preHeaderVariableInt, action, requestId, true, isCompressed); + preHeaderVariableInt, action, requestId, true, compressionScheme); final BytesReference totalBytes = message.serialize(new BytesStreamOutput()); int partialHeaderSize = TcpHeader.headerSize(preHeaderVariableInt); @@ -118,7 +118,11 @@ public void testDecodePreHeaderSizeVariableInt() throws IOException { final Header header = (Header) fragments.get(0); assertEquals(requestId, header.getRequestId()); assertEquals(preHeaderVariableInt, header.getVersion()); - assertEquals(isCompressed, header.isCompressed()); + if (compressionScheme == null) { + assertFalse(header.isCompressed()); + } else { + assertTrue(header.isCompressed()); + } assertTrue(header.isHandshake()); assertTrue(header.isRequest()); assertTrue(header.needsToReadVariableHeader()); @@ -140,7 +144,7 @@ public void testDecodeHandshakeCompatibility() throws IOException { threadContext.putHeader(headerKey, headerValue); Version handshakeCompat = Version.CURRENT.minimumCompatibilityVersion().minimumCompatibilityVersion(); OutboundMessage message = new OutboundMessage.Request(threadContext, new TestRequest(randomAlphaOfLength(100)), - handshakeCompat, action, requestId, true, false); + handshakeCompat, action, requestId, true, null); final BytesReference bytes = message.serialize(new BytesStreamOutput()); int totalHeaderSize = TcpHeader.headerSize(handshakeCompat); @@ -176,12 +180,13 @@ public void testCompressedDecode() throws IOException { } OutboundMessage message; TransportMessage transportMessage; + CompressionScheme scheme = randomFrom(CompressionScheme.DEFLATE, CompressionScheme.LZ4); if (isRequest) { transportMessage = new TestRequest(randomAlphaOfLength(100)); - message = new OutboundMessage.Request(threadContext, transportMessage, Version.CURRENT, action, requestId, false, true); + message = new OutboundMessage.Request(threadContext, transportMessage, Version.CURRENT, action, requestId, false, scheme); } else { transportMessage = new TestResponse(randomAlphaOfLength(100)); - message = new OutboundMessage.Response(threadContext, transportMessage, Version.CURRENT, requestId, false, true); + message = new OutboundMessage.Response(threadContext, transportMessage, Version.CURRENT, requestId, false, scheme); } final BytesReference totalBytes = message.serialize(new BytesStreamOutput()); @@ -235,7 +240,7 @@ public void testCompressedDecodeHandshakeCompatibility() throws IOException { threadContext.putHeader(headerKey, headerValue); Version handshakeCompat = Version.CURRENT.minimumCompatibilityVersion().minimumCompatibilityVersion(); OutboundMessage message = new OutboundMessage.Request(threadContext, new TestRequest(randomAlphaOfLength(100)), - handshakeCompat, action, requestId, true, true); + handshakeCompat, action, requestId, true, CompressionScheme.DEFLATE); final BytesReference bytes = message.serialize(new BytesStreamOutput()); int totalHeaderSize = TcpHeader.headerSize(handshakeCompat); @@ -263,7 +268,7 @@ public void testVersionIncompatibilityDecodeException() throws IOException { long requestId = randomNonNegativeLong(); Version incompatibleVersion = Version.CURRENT.minimumCompatibilityVersion().minimumCompatibilityVersion(); OutboundMessage message = new OutboundMessage.Request(threadContext, new TestRequest(randomAlphaOfLength(100)), - incompatibleVersion, action, requestId, false, true); + incompatibleVersion, action, requestId, false, CompressionScheme.DEFLATE); final BytesReference bytes = message.serialize(new BytesStreamOutput()); diff --git a/server/src/test/java/org/elasticsearch/transport/InboundHandlerTests.java b/server/src/test/java/org/elasticsearch/transport/InboundHandlerTests.java index 52a818384f322..0a9e526420191 100644 --- a/server/src/test/java/org/elasticsearch/transport/InboundHandlerTests.java +++ b/server/src/test/java/org/elasticsearch/transport/InboundHandlerTests.java @@ -65,7 +65,7 @@ public void setUp() throws Exception { TransportHandshaker handshaker = new TransportHandshaker(version, threadPool, (n, c, r, v) -> {}); TransportKeepAlive keepAlive = new TransportKeepAlive(threadPool, TcpChannel::sendMessage); OutboundHandler outboundHandler = new OutboundHandler("node", version, new StatsTracker(), threadPool, - BigArrays.NON_RECYCLING_INSTANCE); + BigArrays.NON_RECYCLING_INSTANCE, randomFrom(CompressionScheme.DEFLATE, CompressionScheme.LZ4)); requestHandlers = new Transport.RequestHandlers(); responseHandlers = new Transport.ResponseHandlers(); handler = new InboundHandler(threadPool, outboundHandler, namedWriteableRegistry, handshaker, keepAlive, requestHandlers, @@ -125,7 +125,7 @@ public TestResponse read(StreamInput in) throws IOException { requestHandlers.registerHandler(registry); String requestValue = randomAlphaOfLength(10); OutboundMessage.Request request = new OutboundMessage.Request(threadPool.getThreadContext(), - new TestRequest(requestValue), version, action, requestId, false, false); + new TestRequest(requestValue), version, action, requestId, false, null); BytesReference fullRequestBytes = request.serialize(new BytesStreamOutput()); BytesReference requestContent = fullRequestBytes.slice(headerSize, fullRequestBytes.length() - headerSize); diff --git a/server/src/test/java/org/elasticsearch/transport/InboundPipelineTests.java b/server/src/test/java/org/elasticsearch/transport/InboundPipelineTests.java index a2472059d3b6a..fbe415fddded0 100644 --- a/server/src/test/java/org/elasticsearch/transport/InboundPipelineTests.java +++ b/server/src/test/java/org/elasticsearch/transport/InboundPipelineTests.java @@ -95,7 +95,13 @@ public void testPipelineHandling() throws IOException { final Version version = randomFrom(Version.CURRENT, Version.CURRENT.minimumCompatibilityVersion()); final String value = randomAlphaOfLength(randomIntBetween(10, 200)); final boolean isRequest = randomBoolean(); - final boolean isCompressed = randomBoolean(); + + CompressionScheme scheme; + if (randomBoolean()) { + scheme = null; + } else { + scheme = randomFrom(CompressionScheme.DEFLATE, CompressionScheme.LZ4); + } final long requestId = totalMessages++; final MessageData messageData; @@ -104,19 +110,19 @@ public void testPipelineHandling() throws IOException { OutboundMessage message; if (isRequest) { if (rarely()) { - messageData = new MessageData(version, requestId, true, isCompressed, breakThisAction, null); + messageData = new MessageData(version, requestId, true, scheme != null, breakThisAction, null); message = new OutboundMessage.Request(threadContext, new TestRequest(value), - version, breakThisAction, requestId, false, isCompressed); + version, breakThisAction, requestId, false, scheme); expectedExceptionClass = new CircuitBreakingException("", CircuitBreaker.Durability.PERMANENT); } else { - messageData = new MessageData(version, requestId, true, isCompressed, actionName, value); + messageData = new MessageData(version, requestId, true, scheme != null, actionName, value); message = new OutboundMessage.Request(threadContext, new TestRequest(value), - version, actionName, requestId, false, isCompressed); + version, actionName, requestId, false, scheme); } } else { - messageData = new MessageData(version, requestId, false, isCompressed, null, value); + messageData = new MessageData(version, requestId, false, scheme != null, null, value); message = new OutboundMessage.Response(threadContext, new TestResponse(value), - version, requestId, false, isCompressed); + version, requestId, false, scheme); } expected.add(new Tuple<>(messageData, expectedExceptionClass)); @@ -184,10 +190,10 @@ public void testDecodeExceptionIsPropagated() throws IOException { OutboundMessage message; if (isRequest) { message = new OutboundMessage.Request(threadContext, new TestRequest(value), - invalidVersion, actionName, requestId, false, false); + invalidVersion, actionName, requestId, false, null); } else { message = new OutboundMessage.Response(threadContext, new TestResponse(value), - invalidVersion, requestId, false, false); + invalidVersion, requestId, false, null); } final BytesReference reference = message.serialize(streamOutput); @@ -221,10 +227,10 @@ public void testEnsureBodyIsNotPrematurelyReleased() throws IOException { OutboundMessage message; if (isRequest) { message = new OutboundMessage.Request(threadContext, new TestRequest(value), - version, actionName, requestId, false, false); + version, actionName, requestId, false, null); } else { message = new OutboundMessage.Response(threadContext, new TestResponse(value), - version, requestId, false, false); + version, requestId, false, null); } final BytesReference reference = message.serialize(streamOutput); diff --git a/server/src/test/java/org/elasticsearch/transport/OutboundHandlerTests.java b/server/src/test/java/org/elasticsearch/transport/OutboundHandlerTests.java index b0355ee391fe2..660026c6c4482 100644 --- a/server/src/test/java/org/elasticsearch/transport/OutboundHandlerTests.java +++ b/server/src/test/java/org/elasticsearch/transport/OutboundHandlerTests.java @@ -66,7 +66,8 @@ public void setUp() throws Exception { TransportAddress transportAddress = buildNewFakeTransportAddress(); node = new DiscoveryNode("", transportAddress, Version.CURRENT); StatsTracker statsTracker = new StatsTracker(); - handler = new OutboundHandler("node", Version.CURRENT, statsTracker, threadPool, BigArrays.NON_RECYCLING_INSTANCE); + handler = new OutboundHandler("node", Version.CURRENT, statsTracker, threadPool, BigArrays.NON_RECYCLING_INSTANCE, + randomFrom(CompressionScheme.DEFLATE, CompressionScheme.LZ4)); final LongSupplier millisSupplier = () -> TimeValue.nsecToMSec(System.nanoTime()); final InboundDecoder decoder = new InboundDecoder(Version.CURRENT, PageCacheRecycler.NON_RECYCLING_INSTANCE); @@ -119,7 +120,7 @@ public void testSendRequest() throws IOException { String action = "handshake"; long requestId = randomLongBetween(0, 300); boolean isHandshake = randomBoolean(); - boolean compress = randomBoolean(); + CompressionScheme compress = randomFrom(null, CompressionScheme.DEFLATE, CompressionScheme.LZ4); String value = "message"; threadContext.putHeader("header", "header_value"); TestRequest request = new TestRequest(value); @@ -166,7 +167,7 @@ public void onRequestSent(DiscoveryNode node, long requestId, String action, Tra } else { assertFalse(header.isHandshake()); } - if (compress) { + if (compress != null) { assertTrue(header.isCompressed()); } else { assertFalse(header.isCompressed()); diff --git a/server/src/test/java/org/elasticsearch/transport/TcpTransportTests.java b/server/src/test/java/org/elasticsearch/transport/TcpTransportTests.java index 2ae5c670c7ada..df822be122a73 100644 --- a/server/src/test/java/org/elasticsearch/transport/TcpTransportTests.java +++ b/server/src/test/java/org/elasticsearch/transport/TcpTransportTests.java @@ -403,7 +403,7 @@ private void testExceptionHandling(boolean startTransport, Exception exception, TcpTransport.handleException(channel, exception, lifecycle, new OutboundHandler(randomAlphaOfLength(10), Version.CURRENT, new StatsTracker(), testThreadPool, - BigArrays.NON_RECYCLING_INSTANCE)); + BigArrays.NON_RECYCLING_INSTANCE, randomFrom(CompressionScheme.DEFLATE, CompressionScheme.LZ4))); if (expectClosed) { assertTrue(listener.isDone()); diff --git a/server/src/test/java/org/elasticsearch/transport/TransportLoggerTests.java b/server/src/test/java/org/elasticsearch/transport/TransportLoggerTests.java index 2cd47c0a9b42d..8a90384e7d2a4 100644 --- a/server/src/test/java/org/elasticsearch/transport/TransportLoggerTests.java +++ b/server/src/test/java/org/elasticsearch/transport/TransportLoggerTests.java @@ -78,7 +78,7 @@ public void testLoggingHandler() throws IOException { } private BytesReference buildRequest() throws IOException { - boolean compress = randomBoolean(); + CompressionScheme compress = randomFrom(null, CompressionScheme.DEFLATE, CompressionScheme.LZ4); try (BytesStreamOutput bytesStreamOutput = new BytesStreamOutput()) { OutboundMessage.Request request = new OutboundMessage.Request(new ThreadContext(Settings.EMPTY), new ClusterStatsRequest(), Version.CURRENT, ClusterStatsAction.NAME, randomInt(30), false, compress); diff --git a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java index 6f96cf48789eb..3d8cf5bba5030 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java +++ b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java @@ -97,6 +97,7 @@ import org.elasticsearch.tasks.TaskManager; import org.elasticsearch.test.disruption.ServiceDisruptionScheme; import org.elasticsearch.test.transport.MockTransportService; +import org.elasticsearch.transport.CompressionScheme; import org.elasticsearch.transport.TransportService; import org.elasticsearch.transport.TransportSettings; @@ -440,6 +441,12 @@ private static Settings getRandomNodeSettings(long seed) { Random random = new Random(seed); Builder builder = Settings.builder(); builder.put(TransportSettings.TRANSPORT_COMPRESS.getKey(), rarely(random)); + builder.put(TransportSettings.TRANSPORT_COMPRESS_RAW_DATA.getKey(), random.nextBoolean()); + if (random.nextBoolean()) { + builder.put(TransportSettings.TRANSPORT_COMPRESSION_SCHEME.getKey(), CompressionScheme.DEFLATE); + } else { + builder.put(TransportSettings.TRANSPORT_COMPRESSION_SCHEME.getKey(), CompressionScheme.LZ4); + } if (random.nextBoolean()) { builder.put("cache.recycler.page.type", RandomPicks.randomFrom(random, PageCacheRecycler.Type.values())); } diff --git a/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java b/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java index b6d463f40b032..7a79b8c21e9b8 100644 --- a/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java @@ -555,7 +555,10 @@ public void testVoidMessageCompressed() throws Exception { } }); - Settings settingsWithCompress = Settings.builder().put(TransportSettings.TRANSPORT_COMPRESS.getKey(), true).build(); + Settings settingsWithCompress = Settings.builder() + .put(TransportSettings.TRANSPORT_COMPRESS.getKey(), true) + .put(TransportSettings.TRANSPORT_COMPRESSION_SCHEME.getKey(), randomFrom(CompressionScheme.DEFLATE, CompressionScheme.LZ4)) + .build(); ConnectionProfile connectionProfile = ConnectionProfile.buildDefaultConnectionProfile(settingsWithCompress); connectToNode(serviceC, serviceA.getLocalDiscoNode(), connectionProfile); diff --git a/test/framework/src/main/java/org/elasticsearch/transport/TestTransportChannels.java b/test/framework/src/main/java/org/elasticsearch/transport/TestTransportChannels.java index c204ece09a425..228bc47ae4819 100644 --- a/test/framework/src/main/java/org/elasticsearch/transport/TestTransportChannels.java +++ b/test/framework/src/main/java/org/elasticsearch/transport/TestTransportChannels.java @@ -12,12 +12,15 @@ import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.threadpool.ThreadPool; +import static org.elasticsearch.test.ESTestCase.randomFrom; + public class TestTransportChannels { public static TcpTransportChannel newFakeTcpTransportChannel(String nodeName, TcpChannel channel, ThreadPool threadPool, String action, long requestId, Version version) { return new TcpTransportChannel( - new OutboundHandler(nodeName, version, new StatsTracker(), threadPool, BigArrays.NON_RECYCLING_INSTANCE), + new OutboundHandler(nodeName, version, new StatsTracker(), threadPool, BigArrays.NON_RECYCLING_INSTANCE, + randomFrom(CompressionScheme.DEFLATE, CompressionScheme.LZ4)), channel, action, requestId, version, false, false, () -> {}); } } From 62da09a32b782626579b668aef296c622e3e0293 Mon Sep 17 00:00:00 2001 From: Tim Brooks Date: Thu, 24 Jun 2021 19:00:45 -0600 Subject: [PATCH 05/29] Fix --- .../org/elasticsearch/common/settings/ClusterSettings.java | 3 +++ 1 file changed, 3 insertions(+) diff --git a/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java b/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java index 0861e483d52b7..c2b7ebce86847 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java @@ -296,6 +296,7 @@ public void apply(Settings value, Settings current, Settings previous) { RemoteClusterService.REMOTE_NODE_ATTRIBUTE, RemoteClusterService.REMOTE_CLUSTER_PING_SCHEDULE, RemoteClusterService.REMOTE_CLUSTER_COMPRESS, + RemoteClusterService.REMOTE_CLUSTER_COMPRESS_RAW_DATA, RemoteConnectionStrategy.REMOTE_CONNECTION_MODE, ProxyConnectionStrategy.PROXY_ADDRESS, ProxyConnectionStrategy.REMOTE_SOCKET_CONNECTIONS, @@ -320,6 +321,8 @@ public void apply(Settings value, Settings current, Settings previous) { TransportSettings.PUBLISH_PORT, TransportSettings.PUBLISH_PORT_PROFILE, TransportSettings.TRANSPORT_COMPRESS, + TransportSettings.TRANSPORT_COMPRESS_RAW_DATA, + TransportSettings.TRANSPORT_COMPRESSION_SCHEME, TransportSettings.PING_SCHEDULE, TransportSettings.CONNECT_TIMEOUT, TransportSettings.DEFAULT_FEATURES_SETTING, From 6e8bef46ab32bbf0a9ac656bd1388252408bdc13 Mon Sep 17 00:00:00 2001 From: Tim Brooks Date: Thu, 24 Jun 2021 19:06:09 -0600 Subject: [PATCH 06/29] Fix --- .../org/elasticsearch/transport/Lz4TransportDecompressor.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/transport/Lz4TransportDecompressor.java b/server/src/main/java/org/elasticsearch/transport/Lz4TransportDecompressor.java index 605fed6f98c79..0a8b9a5e4990d 100644 --- a/server/src/main/java/org/elasticsearch/transport/Lz4TransportDecompressor.java +++ b/server/src/main/java/org/elasticsearch/transport/Lz4TransportDecompressor.java @@ -62,7 +62,7 @@ public int decompress(BytesReference bytesReference) throws IOException { int bytesDecompressed; try { bytesDecompressed = inputStream.read(output, pageOffset, PageCacheRecycler.BYTE_PAGE_SIZE - pageOffset); - pageOffset += bytesDecompressed; + pageOffset += Math.max(bytesDecompressed, 0); if (isNewPage) { if (bytesDecompressed == 0) { page.close(); @@ -74,7 +74,7 @@ public int decompress(BytesReference bytesReference) throws IOException { } catch (IOException e) { throw new IOException("Exception while LZ4 decompressing bytes", e); } - if (bytesDecompressed == 0) { + if (bytesDecompressed <= 0) { continueDecompressing = false; } } From 0ffd172a3ff8a1f7187fa6f9d77342873ee73cec Mon Sep 17 00:00:00 2001 From: Tim Brooks Date: Thu, 24 Jun 2021 19:06:59 -0600 Subject: [PATCH 07/29] Fix --- .../org/elasticsearch/transport/Lz4TransportDecompressor.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server/src/main/java/org/elasticsearch/transport/Lz4TransportDecompressor.java b/server/src/main/java/org/elasticsearch/transport/Lz4TransportDecompressor.java index 0a8b9a5e4990d..82b33a24957d2 100644 --- a/server/src/main/java/org/elasticsearch/transport/Lz4TransportDecompressor.java +++ b/server/src/main/java/org/elasticsearch/transport/Lz4TransportDecompressor.java @@ -64,7 +64,7 @@ public int decompress(BytesReference bytesReference) throws IOException { bytesDecompressed = inputStream.read(output, pageOffset, PageCacheRecycler.BYTE_PAGE_SIZE - pageOffset); pageOffset += Math.max(bytesDecompressed, 0); if (isNewPage) { - if (bytesDecompressed == 0) { + if (bytesDecompressed <= 0) { page.close(); pageOffset = PageCacheRecycler.BYTE_PAGE_SIZE; } else { From 723d7b60e9214b901cabe54ff7b773d81422ca03 Mon Sep 17 00:00:00 2001 From: Tim Brooks Date: Thu, 24 Jun 2021 23:51:59 -0600 Subject: [PATCH 08/29] Changes --- .../transport/CompressionScheme.java | 4 +- .../transport/InboundDecoder.java | 18 +- .../elasticsearch/transport/LZ4Inflater.java | 320 ++++++++++++++++++ .../transport/Lz4TransportDecompressor.java | 10 +- .../transport/OutboundHandler.java | 8 +- .../transport/OutboundMessage.java | 18 +- .../elasticsearch/transport/TcpTransport.java | 11 +- .../transport/TransportDecompressor.java | 2 +- .../transport/InboundDecoderTests.java | 2 +- .../transport/InboundPipelineTests.java | 7 +- .../transport/OutboundHandlerTests.java | 4 +- 11 files changed, 368 insertions(+), 36 deletions(-) create mode 100644 server/src/main/java/org/elasticsearch/transport/LZ4Inflater.java diff --git a/server/src/main/java/org/elasticsearch/transport/CompressionScheme.java b/server/src/main/java/org/elasticsearch/transport/CompressionScheme.java index a1a03603ca5ed..6bdb8524a4f71 100644 --- a/server/src/main/java/org/elasticsearch/transport/CompressionScheme.java +++ b/server/src/main/java/org/elasticsearch/transport/CompressionScheme.java @@ -8,7 +8,7 @@ package org.elasticsearch.transport; -import net.jpountz.lz4.LZ4FrameOutputStream; +import net.jpountz.lz4.LZ4BlockOutputStream; import org.elasticsearch.common.compress.DeflateCompressor; @@ -25,6 +25,6 @@ public enum CompressionScheme { public static OutputStream lz4OutputStream(OutputStream outputStream) throws IOException { outputStream.write(LZ4_HEADER); - return new LZ4FrameOutputStream(outputStream, LZ4FrameOutputStream.BLOCKSIZE.SIZE_64KB); + return new LZ4BlockOutputStream(outputStream, 64 * 1024); } } diff --git a/server/src/main/java/org/elasticsearch/transport/InboundDecoder.java b/server/src/main/java/org/elasticsearch/transport/InboundDecoder.java index fb619d583902a..a074264737883 100644 --- a/server/src/main/java/org/elasticsearch/transport/InboundDecoder.java +++ b/server/src/main/java/org/elasticsearch/transport/InboundDecoder.java @@ -85,28 +85,31 @@ public int internalDecode(ReleasableBytesReference reference, Consumer f this.decompressor = decompressor; } } - int bytesToConsume = Math.min(reference.length(), totalNetworkSize - bytesConsumed); - bytesConsumed += bytesToConsume; + int maxBytesToConsume = Math.min(reference.length(), totalNetworkSize - bytesConsumed); + int bytesConsumedThisDecode = 0; ReleasableBytesReference retainedContent; if (isDone()) { - retainedContent = reference.retainedSlice(0, bytesToConsume); + retainedContent = reference.retainedSlice(0, maxBytesToConsume); } else { retainedContent = reference.retain(); } if (decompressor != null) { - decompress(retainedContent); + bytesConsumedThisDecode += decompress(retainedContent); + bytesConsumed += bytesConsumedThisDecode; ReleasableBytesReference decompressed; while ((decompressed = decompressor.pollDecompressedPage(isDone())) != null) { fragmentConsumer.accept(decompressed); } } else { + bytesConsumedThisDecode += maxBytesToConsume; + bytesConsumed += maxBytesToConsume; fragmentConsumer.accept(retainedContent); } if (isDone()) { finishMessage(fragmentConsumer); } - return bytesToConsume; + return bytesConsumedThisDecode; } } @@ -132,10 +135,9 @@ private void cleanDecodeState() { } } - private void decompress(ReleasableBytesReference content) throws IOException { + private int decompress(ReleasableBytesReference content) throws IOException { try (content) { - int consumed = decompressor.decompress(content); - assert consumed == content.length(); + return decompressor.decompress(content); } } diff --git a/server/src/main/java/org/elasticsearch/transport/LZ4Inflater.java b/server/src/main/java/org/elasticsearch/transport/LZ4Inflater.java new file mode 100644 index 0000000000000..b780fc2a5d99e --- /dev/null +++ b/server/src/main/java/org/elasticsearch/transport/LZ4Inflater.java @@ -0,0 +1,320 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.transport; + +import net.jpountz.lz4.LZ4Exception; +import net.jpountz.lz4.LZ4Factory; +import net.jpountz.lz4.LZ4FastDecompressor; + +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.bytes.ReleasableBytesReference; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.recycler.Recycler; +import org.elasticsearch.common.util.PageCacheRecycler; + +import java.io.IOException; +import java.util.ArrayDeque; +import java.util.zip.Checksum; + +public class LZ4Inflater implements TransportDecompressor { + + private final ThreadLocal uncompressed = ThreadLocal.withInitial(() -> new byte[64 * 1024]); + + /** + * Magic number of LZ4 block. + */ + static final long MAGIC_NUMBER = (long) 'L' << 56 | + (long) 'Z' << 48 | + (long) '4' << 40 | + (long) 'B' << 32 | + 'l' << 24 | + 'o' << 16 | + 'c' << 8 | + 'k'; + + static final int HEADER_LENGTH = 8 + // magic number + 1 + // token + 4 + // compressed length + 4 + // decompressed length + 4; // checksum + + + /** + * Base value for compression level. + */ + static final int COMPRESSION_LEVEL_BASE = 10; + + static final int MIN_BLOCK_SIZE = 64; + static final int MAX_BLOCK_SIZE = 1 << COMPRESSION_LEVEL_BASE + 0x0F; // 32 M + static final int DEFAULT_BLOCK_SIZE = 1 << 16; // 64 KB + + static final int BLOCK_TYPE_NON_COMPRESSED = 0x10; + static final int BLOCK_TYPE_COMPRESSED = 0x20; + + private enum State { + INIT_BLOCK, + DECOMPRESS_DATA, + FINISHED, + CORRUPTED + } + + private State currentState = State.INIT_BLOCK; + + /** + * Underlying decompressor in use. + */ + private LZ4FastDecompressor decompressor; + + /** + * Underlying checksum calculator in use. + */ + private Checksum checksum; + + /** + * Type of current block. + */ + private int blockType; + + /** + * Compressed length of current incoming block. + */ + private int compressedLength; + + /** + * Decompressed length of current incoming block. + */ + private int decompressedLength; + + /** + * Checksum value of current incoming block. + */ + private int currentChecksum; + + private final PageCacheRecycler recycler; + private final ArrayDeque> pages; + private int pageOffset = PageCacheRecycler.BYTE_PAGE_SIZE; + private byte[] compressedBuffer = new byte[0]; + private boolean hasSkippedESHeader = false; + + public LZ4Inflater(PageCacheRecycler recycler) { + this.decompressor = LZ4Factory.fastestJavaInstance().fastDecompressor(); + this.recycler = recycler; + this.pages = new ArrayDeque<>(4); + this.checksum = null; + } + + @Override + public ReleasableBytesReference pollDecompressedPage(boolean isEOS) { + if (pages.isEmpty()) { + return null; + } else if (pages.size() == 1) { + if (isEOS) { + Recycler.V page = pages.pollFirst(); + ReleasableBytesReference reference = new ReleasableBytesReference(new BytesArray(page.v(), 0, pageOffset), page); + pageOffset = 0; + return reference; + } else { + return null; + } + } else { + Recycler.V page = pages.pollFirst(); + return new ReleasableBytesReference(new BytesArray(page.v()), page); + } + } + + @Override + public void close() { + for (Recycler.V page : pages) { + page.close(); + } + } + + @Override + public int decompress(BytesReference bytesReference) throws IOException { + int bytesConsumed = 0; + if (hasSkippedESHeader == false) { + hasSkippedESHeader = true; + int esHeaderLength = CompressionScheme.HEADER_LENGTH; + bytesReference = bytesReference.slice(esHeaderLength, bytesReference.length() - esHeaderLength); + bytesConsumed += esHeaderLength; + } + + while (true) { + int consumed = decodeBlock(bytesReference); + bytesConsumed += consumed; + int newLength = bytesReference.length() - consumed; + if (consumed > 0 && newLength > 0) { + bytesReference = bytesReference.slice(consumed, newLength); + } else { + break; + } + } + + return bytesConsumed; + } + + private int decodeBlock(BytesReference reference) throws IOException { + int bytesConsumed = 0; + try { + switch (currentState) { + case INIT_BLOCK: + if (reference.length() < HEADER_LENGTH) { + return bytesConsumed; + } + try (StreamInput in = reference.streamInput()) { + final long magic = in.readLong(); + if (magic != MAGIC_NUMBER) { + throw new IllegalStateException("unexpected block identifier"); + } + + final int token = in.readByte(); + final int compressionLevel = (token & 0x0F) + COMPRESSION_LEVEL_BASE; + int blockType = token & 0xF0; + + int compressedLength = Integer.reverseBytes(in.readInt()); + if (compressedLength < 0 || compressedLength > MAX_BLOCK_SIZE) { + throw new IllegalStateException(String.format( + "invalid compressedLength: %d (expected: 0-%d)", + compressedLength, MAX_BLOCK_SIZE)); + } + + int decompressedLength = Integer.reverseBytes(in.readInt()); + final int maxDecompressedLength = 1 << compressionLevel; + if (decompressedLength < 0 || decompressedLength > maxDecompressedLength) { + throw new IllegalStateException(String.format( + "invalid decompressedLength: %d (expected: 0-%d)", + decompressedLength, maxDecompressedLength)); + } + if (decompressedLength == 0 && compressedLength != 0 + || decompressedLength != 0 && compressedLength == 0 + || blockType == BLOCK_TYPE_NON_COMPRESSED && decompressedLength != compressedLength) { + throw new IllegalStateException(String.format( + "stream corrupted: compressedLength(%d) and decompressedLength(%d) mismatch", + compressedLength, decompressedLength)); + } + + int currentChecksum = Integer.reverseBytes(in.readInt()); + bytesConsumed += HEADER_LENGTH; + + if (decompressedLength == 0 && compressedLength == 0) { + if (currentChecksum != 0) { + throw new IllegalStateException("stream corrupted: checksum error"); + } + currentState = State.FINISHED; + decompressor = null; + checksum = null; + break; + } + + this.blockType = blockType; + this.compressedLength = compressedLength; + this.decompressedLength = decompressedLength; + this.currentChecksum = currentChecksum; + } + + currentState = State.DECOMPRESS_DATA; + break; + case DECOMPRESS_DATA: + if (reference.length() < compressedLength) { + break; + } + + final Checksum checksum = this.checksum; + byte[] uncompressed = this.uncompressed.get(); + if (decompressedLength > uncompressed.length) { + uncompressed = new byte[decompressedLength]; + this.uncompressed.set(uncompressed); + } + + try { + switch (blockType) { + case BLOCK_TYPE_NON_COMPRESSED: + try (StreamInput streamInput = reference.streamInput()) { + streamInput.readBytes(uncompressed, 0, decompressedLength); + } + break; + case BLOCK_TYPE_COMPRESSED: + BytesRef ref = reference.iterator().next(); + final byte[] compressed; + if (ref.length >= compressedLength) { + compressed = ref.bytes; + } else { + compressed = getCompressedBuffer(compressedLength); + try (StreamInput streamInput = reference.streamInput()) { + streamInput.readBytes(compressed, 0, compressedLength); + } + } + decompressor.decompress(compressed, 0, uncompressed, 0, decompressedLength); + break; + default: + throw new IllegalStateException(String.format( + "unexpected blockType: %d (expected: %d or %d)", + blockType, BLOCK_TYPE_NON_COMPRESSED, BLOCK_TYPE_COMPRESSED)); + } + // Skip inbound bytes after we processed them. + bytesConsumed += compressedLength; + + if (checksum != null) { +// CompressionUtil.checkChecksum(checksum, uncompressed, currentChecksum); + } + + int bytesToCopy = decompressedLength; + int bytesCopied = 0; + while (bytesCopied != bytesToCopy) { + final Recycler.V page; + final boolean isNewPage = pageOffset == PageCacheRecycler.BYTE_PAGE_SIZE; + if (isNewPage) { + pageOffset = 0; + pages.add(recycler.bytePage(false)); + } + page = pages.getLast(); + + int toCopy = Math.min(bytesToCopy, PageCacheRecycler.BYTE_PAGE_SIZE - pageOffset); + System.arraycopy(uncompressed, bytesCopied, page.v(), pageOffset, toCopy); + pageOffset += toCopy; + bytesCopied += toCopy; + } + currentState = State.INIT_BLOCK; + } catch (LZ4Exception e) { + throw new IllegalStateException(e); + } + break; + case FINISHED: + break; + case CORRUPTED: + throw new IllegalStateException("LZ4 stream corrupted."); + default: + throw new IllegalStateException(); + } + } catch (IOException e) { + currentState = State.CORRUPTED; + throw e; + } + return bytesConsumed; + } + + private byte[] getCompressedBuffer(int requiredSize) { + if (compressedBuffer.length >= requiredSize) { + return compressedBuffer; + } else { + this.compressedBuffer = new byte[requiredSize]; + return compressedBuffer; + } + } + + /** + * Returns {@code true} if and only if the end of the compressed stream + * has been reached. + */ + public boolean isClosed() { + return currentState == State.FINISHED; + } +} diff --git a/server/src/main/java/org/elasticsearch/transport/Lz4TransportDecompressor.java b/server/src/main/java/org/elasticsearch/transport/Lz4TransportDecompressor.java index 82b33a24957d2..7bc860399530a 100644 --- a/server/src/main/java/org/elasticsearch/transport/Lz4TransportDecompressor.java +++ b/server/src/main/java/org/elasticsearch/transport/Lz4TransportDecompressor.java @@ -39,14 +39,15 @@ public Lz4TransportDecompressor(PageCacheRecycler recycler) throws IOException { @Override public int decompress(BytesReference bytesReference) throws IOException { - final StreamInput underlyingStream = bytesReference.streamInput(); - this.expandableStream.nextStream(underlyingStream); - + int bytesConsumed = 0; if (hasSkippedHeader == false) { hasSkippedHeader = true; int headerLength = CompressionScheme.HEADER_LENGTH; bytesReference = bytesReference.slice(headerLength, bytesReference.length() - headerLength); + bytesConsumed += headerLength; } + final StreamInput underlyingStream = bytesReference.streamInput(); + this.expandableStream.nextStream(underlyingStream); boolean continueDecompressing = true; while (continueDecompressing) { @@ -80,8 +81,9 @@ public int decompress(BytesReference bytesReference) throws IOException { } assert underlyingStream.available() == 0; + bytesConsumed += bytesReference.length(); - return bytesReference.length(); + return bytesConsumed; } @Override diff --git a/server/src/main/java/org/elasticsearch/transport/OutboundHandler.java b/server/src/main/java/org/elasticsearch/transport/OutboundHandler.java index 08e2e17ef1f35..3d811ed7ca237 100644 --- a/server/src/main/java/org/elasticsearch/transport/OutboundHandler.java +++ b/server/src/main/java/org/elasticsearch/transport/OutboundHandler.java @@ -68,8 +68,14 @@ void sendBytes(TcpChannel channel, BytesReference bytes, ActionListener li */ void sendRequest(final DiscoveryNode node, final TcpChannel channel, final long requestId, final String action, final TransportRequest request, final TransportRequestOptions options, final Version channelVersion, - final CompressionScheme compressionScheme, final boolean isHandshake) throws IOException, TransportException { + final boolean compressRequest, final boolean isHandshake) throws IOException, TransportException { Version version = Version.min(this.version, channelVersion); + final CompressionScheme compressionScheme; + if (compressRequest) { + compressionScheme = null; + } else { + compressionScheme = configuredCompressionScheme; + } OutboundMessage.Request message = new OutboundMessage.Request(threadPool.getThreadContext(), request, version, action, requestId, isHandshake, compressionScheme); if (request.tryIncRef() == false) { diff --git a/server/src/main/java/org/elasticsearch/transport/OutboundMessage.java b/server/src/main/java/org/elasticsearch/transport/OutboundMessage.java index d2f566aa1ac3d..fdb97a63f9dee 100644 --- a/server/src/main/java/org/elasticsearch/transport/OutboundMessage.java +++ b/server/src/main/java/org/elasticsearch/transport/OutboundMessage.java @@ -91,12 +91,7 @@ private StreamOutput wrapCompressed(BytesStreamOutput bytesStream) throws IOExce if (compressionScheme == CompressionScheme.DEFLATE) { return new OutputStreamStreamOutput(CompressorFactory.COMPRESSOR.threadLocalOutputStream(Streams.noCloseStream(bytesStream))); } else if (compressionScheme == CompressionScheme.LZ4) { - // TODO: Change after backport - if (version.onOrAfter(Version.V_8_0_0)) { - return new OutputStreamStreamOutput(CompressionScheme.lz4OutputStream(Streams.noCloseStream(bytesStream))); - } else { - return bytesStream; - } + return new OutputStreamStreamOutput(CompressionScheme.lz4OutputStream(Streams.noCloseStream(bytesStream))); } else { throw new IllegalArgumentException("Invalid compression scheme: " + compressionScheme); } @@ -112,7 +107,8 @@ static class Request extends OutboundMessage { Request(ThreadContext threadContext, Writeable message, Version version, String action, long requestId, boolean isHandshake, CompressionScheme compressionScheme) { - super(threadContext, version, setStatus(compressionScheme, isHandshake, message), requestId, compressionScheme, message); + super(threadContext, version, setStatus(adjustedScheme(version, compressionScheme), isHandshake, message), requestId, + adjustedScheme(version, compressionScheme), message); this.action = action; } @@ -150,7 +146,8 @@ static class Response extends OutboundMessage { Response(ThreadContext threadContext, Writeable message, Version version, long requestId, boolean isHandshake, CompressionScheme compressionScheme) { - super(threadContext, version, setStatus(compressionScheme, isHandshake, message), requestId, compressionScheme, message); + super(threadContext, version, setStatus(adjustedScheme(version, compressionScheme), isHandshake, message), requestId, + adjustedScheme(version, compressionScheme), message); } private static byte setStatus(CompressionScheme compressionScheme, boolean isHandshake, Writeable message) { @@ -176,6 +173,11 @@ public String toString() { } } + private static CompressionScheme adjustedScheme(Version version, CompressionScheme compressionScheme) { + // TODO: Change after backport + return compressionScheme == CompressionScheme.LZ4 && version.before(Version.V_8_0_0) ? null : compressionScheme; + } + private static boolean canCompress(Writeable message) { return message instanceof BytesTransportRequest == false; } diff --git a/server/src/main/java/org/elasticsearch/transport/TcpTransport.java b/server/src/main/java/org/elasticsearch/transport/TcpTransport.java index ee0dbf1147667..ef2378ba67894 100644 --- a/server/src/main/java/org/elasticsearch/transport/TcpTransport.java +++ b/server/src/main/java/org/elasticsearch/transport/TcpTransport.java @@ -142,7 +142,7 @@ public TcpTransport(Settings settings, Version version, ThreadPool threadPool, P this.handshaker = new TransportHandshaker(version, threadPool, (node, channel, requestId, v) -> outboundHandler.sendRequest(node, channel, requestId, TransportHandshaker.HANDSHAKE_ACTION_NAME, new TransportHandshaker.HandshakeRequest(version), - TransportRequestOptions.EMPTY, v, null, true)); + TransportRequestOptions.EMPTY, v, false, true)); this.keepAlive = new TransportKeepAlive(threadPool, this.outboundHandler::sendBytes); this.inboundHandler = new InboundHandler(threadPool, outboundHandler, namedWriteableRegistry, handshaker, keepAlive, requestHandlers, responseHandlers); @@ -246,13 +246,8 @@ public void sendRequest(long requestId, String action, TransportRequest request, throw new NodeNotConnectedException(node, "connection already closed"); } TcpChannel channel = channel(options.type()); - CompressionScheme compressionScheme; - if (compress || (rawDataCompress && request instanceof RawDataTransportRequest)) { - compressionScheme = TcpTransport.this.compressionScheme; - } else { - compressionScheme = null; - } - outboundHandler.sendRequest(node, channel, requestId, action, request, options, getVersion(), compressionScheme, false); + boolean shouldCompress = compress || (rawDataCompress && request instanceof RawDataTransportRequest); + outboundHandler.sendRequest(node, channel, requestId, action, request, options, getVersion(), shouldCompress, false); } @Override diff --git a/server/src/main/java/org/elasticsearch/transport/TransportDecompressor.java b/server/src/main/java/org/elasticsearch/transport/TransportDecompressor.java index f208c02eedc20..58fccc5dc60c9 100644 --- a/server/src/main/java/org/elasticsearch/transport/TransportDecompressor.java +++ b/server/src/main/java/org/elasticsearch/transport/TransportDecompressor.java @@ -48,7 +48,7 @@ static TransportDecompressor getDecompressor(PageCacheRecycler recycler, BytesRe if (header == CompressionScheme.DEFLATE_HEADER) { return new DeflateTransportDecompressor(recycler); } else { - return new Lz4TransportDecompressor(recycler); + return new LZ4Inflater(recycler); } } diff --git a/server/src/test/java/org/elasticsearch/transport/InboundDecoderTests.java b/server/src/test/java/org/elasticsearch/transport/InboundDecoderTests.java index cc96d0ebc42b7..5440a76eb459d 100644 --- a/server/src/test/java/org/elasticsearch/transport/InboundDecoderTests.java +++ b/server/src/test/java/org/elasticsearch/transport/InboundDecoderTests.java @@ -96,7 +96,7 @@ public void testDecode() throws IOException { public void testDecodePreHeaderSizeVariableInt() throws IOException { // TODO: Can delete test on 9.0 - CompressionScheme compressionScheme = randomFrom(null, CompressionScheme.DEFLATE, CompressionScheme.DEFLATE); + CompressionScheme compressionScheme = randomFrom(CompressionScheme.DEFLATE, CompressionScheme.DEFLATE, null); String action = "test-request"; long requestId = randomNonNegativeLong(); final Version preHeaderVariableInt = Version.V_7_5_0; diff --git a/server/src/test/java/org/elasticsearch/transport/InboundPipelineTests.java b/server/src/test/java/org/elasticsearch/transport/InboundPipelineTests.java index fbe415fddded0..e9ffa12956828 100644 --- a/server/src/test/java/org/elasticsearch/transport/InboundPipelineTests.java +++ b/server/src/test/java/org/elasticsearch/transport/InboundPipelineTests.java @@ -100,7 +100,12 @@ public void testPipelineHandling() throws IOException { if (randomBoolean()) { scheme = null; } else { - scheme = randomFrom(CompressionScheme.DEFLATE, CompressionScheme.LZ4); + // TODO: Change after backport + if (version.onOrAfter(Version.V_8_0_0)) { + scheme = randomFrom(CompressionScheme.DEFLATE, CompressionScheme.LZ4); + } else { + scheme = CompressionScheme.DEFLATE; + } } final long requestId = totalMessages++; diff --git a/server/src/test/java/org/elasticsearch/transport/OutboundHandlerTests.java b/server/src/test/java/org/elasticsearch/transport/OutboundHandlerTests.java index 660026c6c4482..8e2bed35cbf86 100644 --- a/server/src/test/java/org/elasticsearch/transport/OutboundHandlerTests.java +++ b/server/src/test/java/org/elasticsearch/transport/OutboundHandlerTests.java @@ -120,7 +120,7 @@ public void testSendRequest() throws IOException { String action = "handshake"; long requestId = randomLongBetween(0, 300); boolean isHandshake = randomBoolean(); - CompressionScheme compress = randomFrom(null, CompressionScheme.DEFLATE, CompressionScheme.LZ4); + boolean compress = randomBoolean(); String value = "message"; threadContext.putHeader("header", "header_value"); TestRequest request = new TestRequest(value); @@ -167,7 +167,7 @@ public void onRequestSent(DiscoveryNode node, long requestId, String action, Tra } else { assertFalse(header.isHandshake()); } - if (compress != null) { + if (compress) { assertTrue(header.isCompressed()); } else { assertFalse(header.isCompressed()); From a4a9e1a1d3ec8159d9171f44ed224a1ab4589355 Mon Sep 17 00:00:00 2001 From: Tim Brooks Date: Fri, 25 Jun 2021 00:01:51 -0600 Subject: [PATCH 09/29] Changes --- .../main/java/org/elasticsearch/transport/LZ4Inflater.java | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/server/src/main/java/org/elasticsearch/transport/LZ4Inflater.java b/server/src/main/java/org/elasticsearch/transport/LZ4Inflater.java index b780fc2a5d99e..24c220be46de2 100644 --- a/server/src/main/java/org/elasticsearch/transport/LZ4Inflater.java +++ b/server/src/main/java/org/elasticsearch/transport/LZ4Inflater.java @@ -244,15 +244,18 @@ private int decodeBlock(BytesReference reference) throws IOException { case BLOCK_TYPE_COMPRESSED: BytesRef ref = reference.iterator().next(); final byte[] compressed; + final int compressedOffset; if (ref.length >= compressedLength) { compressed = ref.bytes; + compressedOffset = ref.offset; } else { compressed = getCompressedBuffer(compressedLength); + compressedOffset = 0; try (StreamInput streamInput = reference.streamInput()) { streamInput.readBytes(compressed, 0, compressedLength); } } - decompressor.decompress(compressed, 0, uncompressed, 0, decompressedLength); + decompressor.decompress(compressed, compressedOffset, uncompressed, 0, decompressedLength); break; default: throw new IllegalStateException(String.format( From d26ec36651988eeaff34e5fbf36805641f37e509 Mon Sep 17 00:00:00 2001 From: Tim Brooks Date: Fri, 25 Jun 2021 00:45:49 -0600 Subject: [PATCH 10/29] Fix --- .../java/org/elasticsearch/transport/LZ4Inflater.java | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/transport/LZ4Inflater.java b/server/src/main/java/org/elasticsearch/transport/LZ4Inflater.java index 24c220be46de2..7e5c24fefc1fc 100644 --- a/server/src/main/java/org/elasticsearch/transport/LZ4Inflater.java +++ b/server/src/main/java/org/elasticsearch/transport/LZ4Inflater.java @@ -270,8 +270,8 @@ private int decodeBlock(BytesReference reference) throws IOException { } int bytesToCopy = decompressedLength; - int bytesCopied = 0; - while (bytesCopied != bytesToCopy) { + int uncompressedOffset = 0; + while (bytesToCopy > 0) { final Recycler.V page; final boolean isNewPage = pageOffset == PageCacheRecycler.BYTE_PAGE_SIZE; if (isNewPage) { @@ -281,9 +281,10 @@ private int decodeBlock(BytesReference reference) throws IOException { page = pages.getLast(); int toCopy = Math.min(bytesToCopy, PageCacheRecycler.BYTE_PAGE_SIZE - pageOffset); - System.arraycopy(uncompressed, bytesCopied, page.v(), pageOffset, toCopy); + System.arraycopy(uncompressed, uncompressedOffset, page.v(), pageOffset, toCopy); pageOffset += toCopy; - bytesCopied += toCopy; + bytesToCopy -= toCopy; + uncompressedOffset += toCopy; } currentState = State.INIT_BLOCK; } catch (LZ4Exception e) { From 68d4a6dabeccd6750af7523bfca338a0f217d2ba Mon Sep 17 00:00:00 2001 From: Tim Brooks Date: Fri, 25 Jun 2021 00:53:13 -0600 Subject: [PATCH 11/29] Delete --- .../elasticsearch/transport/LZ4Inflater.java | 324 ----------------- .../transport/Lz4TransportDecompressor.java | 334 ++++++++++++++---- .../transport/TransportDecompressor.java | 2 +- 3 files changed, 261 insertions(+), 399 deletions(-) delete mode 100644 server/src/main/java/org/elasticsearch/transport/LZ4Inflater.java diff --git a/server/src/main/java/org/elasticsearch/transport/LZ4Inflater.java b/server/src/main/java/org/elasticsearch/transport/LZ4Inflater.java deleted file mode 100644 index 7e5c24fefc1fc..0000000000000 --- a/server/src/main/java/org/elasticsearch/transport/LZ4Inflater.java +++ /dev/null @@ -1,324 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.transport; - -import net.jpountz.lz4.LZ4Exception; -import net.jpountz.lz4.LZ4Factory; -import net.jpountz.lz4.LZ4FastDecompressor; - -import org.apache.lucene.util.BytesRef; -import org.elasticsearch.common.bytes.BytesArray; -import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.bytes.ReleasableBytesReference; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.recycler.Recycler; -import org.elasticsearch.common.util.PageCacheRecycler; - -import java.io.IOException; -import java.util.ArrayDeque; -import java.util.zip.Checksum; - -public class LZ4Inflater implements TransportDecompressor { - - private final ThreadLocal uncompressed = ThreadLocal.withInitial(() -> new byte[64 * 1024]); - - /** - * Magic number of LZ4 block. - */ - static final long MAGIC_NUMBER = (long) 'L' << 56 | - (long) 'Z' << 48 | - (long) '4' << 40 | - (long) 'B' << 32 | - 'l' << 24 | - 'o' << 16 | - 'c' << 8 | - 'k'; - - static final int HEADER_LENGTH = 8 + // magic number - 1 + // token - 4 + // compressed length - 4 + // decompressed length - 4; // checksum - - - /** - * Base value for compression level. - */ - static final int COMPRESSION_LEVEL_BASE = 10; - - static final int MIN_BLOCK_SIZE = 64; - static final int MAX_BLOCK_SIZE = 1 << COMPRESSION_LEVEL_BASE + 0x0F; // 32 M - static final int DEFAULT_BLOCK_SIZE = 1 << 16; // 64 KB - - static final int BLOCK_TYPE_NON_COMPRESSED = 0x10; - static final int BLOCK_TYPE_COMPRESSED = 0x20; - - private enum State { - INIT_BLOCK, - DECOMPRESS_DATA, - FINISHED, - CORRUPTED - } - - private State currentState = State.INIT_BLOCK; - - /** - * Underlying decompressor in use. - */ - private LZ4FastDecompressor decompressor; - - /** - * Underlying checksum calculator in use. - */ - private Checksum checksum; - - /** - * Type of current block. - */ - private int blockType; - - /** - * Compressed length of current incoming block. - */ - private int compressedLength; - - /** - * Decompressed length of current incoming block. - */ - private int decompressedLength; - - /** - * Checksum value of current incoming block. - */ - private int currentChecksum; - - private final PageCacheRecycler recycler; - private final ArrayDeque> pages; - private int pageOffset = PageCacheRecycler.BYTE_PAGE_SIZE; - private byte[] compressedBuffer = new byte[0]; - private boolean hasSkippedESHeader = false; - - public LZ4Inflater(PageCacheRecycler recycler) { - this.decompressor = LZ4Factory.fastestJavaInstance().fastDecompressor(); - this.recycler = recycler; - this.pages = new ArrayDeque<>(4); - this.checksum = null; - } - - @Override - public ReleasableBytesReference pollDecompressedPage(boolean isEOS) { - if (pages.isEmpty()) { - return null; - } else if (pages.size() == 1) { - if (isEOS) { - Recycler.V page = pages.pollFirst(); - ReleasableBytesReference reference = new ReleasableBytesReference(new BytesArray(page.v(), 0, pageOffset), page); - pageOffset = 0; - return reference; - } else { - return null; - } - } else { - Recycler.V page = pages.pollFirst(); - return new ReleasableBytesReference(new BytesArray(page.v()), page); - } - } - - @Override - public void close() { - for (Recycler.V page : pages) { - page.close(); - } - } - - @Override - public int decompress(BytesReference bytesReference) throws IOException { - int bytesConsumed = 0; - if (hasSkippedESHeader == false) { - hasSkippedESHeader = true; - int esHeaderLength = CompressionScheme.HEADER_LENGTH; - bytesReference = bytesReference.slice(esHeaderLength, bytesReference.length() - esHeaderLength); - bytesConsumed += esHeaderLength; - } - - while (true) { - int consumed = decodeBlock(bytesReference); - bytesConsumed += consumed; - int newLength = bytesReference.length() - consumed; - if (consumed > 0 && newLength > 0) { - bytesReference = bytesReference.slice(consumed, newLength); - } else { - break; - } - } - - return bytesConsumed; - } - - private int decodeBlock(BytesReference reference) throws IOException { - int bytesConsumed = 0; - try { - switch (currentState) { - case INIT_BLOCK: - if (reference.length() < HEADER_LENGTH) { - return bytesConsumed; - } - try (StreamInput in = reference.streamInput()) { - final long magic = in.readLong(); - if (magic != MAGIC_NUMBER) { - throw new IllegalStateException("unexpected block identifier"); - } - - final int token = in.readByte(); - final int compressionLevel = (token & 0x0F) + COMPRESSION_LEVEL_BASE; - int blockType = token & 0xF0; - - int compressedLength = Integer.reverseBytes(in.readInt()); - if (compressedLength < 0 || compressedLength > MAX_BLOCK_SIZE) { - throw new IllegalStateException(String.format( - "invalid compressedLength: %d (expected: 0-%d)", - compressedLength, MAX_BLOCK_SIZE)); - } - - int decompressedLength = Integer.reverseBytes(in.readInt()); - final int maxDecompressedLength = 1 << compressionLevel; - if (decompressedLength < 0 || decompressedLength > maxDecompressedLength) { - throw new IllegalStateException(String.format( - "invalid decompressedLength: %d (expected: 0-%d)", - decompressedLength, maxDecompressedLength)); - } - if (decompressedLength == 0 && compressedLength != 0 - || decompressedLength != 0 && compressedLength == 0 - || blockType == BLOCK_TYPE_NON_COMPRESSED && decompressedLength != compressedLength) { - throw new IllegalStateException(String.format( - "stream corrupted: compressedLength(%d) and decompressedLength(%d) mismatch", - compressedLength, decompressedLength)); - } - - int currentChecksum = Integer.reverseBytes(in.readInt()); - bytesConsumed += HEADER_LENGTH; - - if (decompressedLength == 0 && compressedLength == 0) { - if (currentChecksum != 0) { - throw new IllegalStateException("stream corrupted: checksum error"); - } - currentState = State.FINISHED; - decompressor = null; - checksum = null; - break; - } - - this.blockType = blockType; - this.compressedLength = compressedLength; - this.decompressedLength = decompressedLength; - this.currentChecksum = currentChecksum; - } - - currentState = State.DECOMPRESS_DATA; - break; - case DECOMPRESS_DATA: - if (reference.length() < compressedLength) { - break; - } - - final Checksum checksum = this.checksum; - byte[] uncompressed = this.uncompressed.get(); - if (decompressedLength > uncompressed.length) { - uncompressed = new byte[decompressedLength]; - this.uncompressed.set(uncompressed); - } - - try { - switch (blockType) { - case BLOCK_TYPE_NON_COMPRESSED: - try (StreamInput streamInput = reference.streamInput()) { - streamInput.readBytes(uncompressed, 0, decompressedLength); - } - break; - case BLOCK_TYPE_COMPRESSED: - BytesRef ref = reference.iterator().next(); - final byte[] compressed; - final int compressedOffset; - if (ref.length >= compressedLength) { - compressed = ref.bytes; - compressedOffset = ref.offset; - } else { - compressed = getCompressedBuffer(compressedLength); - compressedOffset = 0; - try (StreamInput streamInput = reference.streamInput()) { - streamInput.readBytes(compressed, 0, compressedLength); - } - } - decompressor.decompress(compressed, compressedOffset, uncompressed, 0, decompressedLength); - break; - default: - throw new IllegalStateException(String.format( - "unexpected blockType: %d (expected: %d or %d)", - blockType, BLOCK_TYPE_NON_COMPRESSED, BLOCK_TYPE_COMPRESSED)); - } - // Skip inbound bytes after we processed them. - bytesConsumed += compressedLength; - - if (checksum != null) { -// CompressionUtil.checkChecksum(checksum, uncompressed, currentChecksum); - } - - int bytesToCopy = decompressedLength; - int uncompressedOffset = 0; - while (bytesToCopy > 0) { - final Recycler.V page; - final boolean isNewPage = pageOffset == PageCacheRecycler.BYTE_PAGE_SIZE; - if (isNewPage) { - pageOffset = 0; - pages.add(recycler.bytePage(false)); - } - page = pages.getLast(); - - int toCopy = Math.min(bytesToCopy, PageCacheRecycler.BYTE_PAGE_SIZE - pageOffset); - System.arraycopy(uncompressed, uncompressedOffset, page.v(), pageOffset, toCopy); - pageOffset += toCopy; - bytesToCopy -= toCopy; - uncompressedOffset += toCopy; - } - currentState = State.INIT_BLOCK; - } catch (LZ4Exception e) { - throw new IllegalStateException(e); - } - break; - case FINISHED: - break; - case CORRUPTED: - throw new IllegalStateException("LZ4 stream corrupted."); - default: - throw new IllegalStateException(); - } - } catch (IOException e) { - currentState = State.CORRUPTED; - throw e; - } - return bytesConsumed; - } - - private byte[] getCompressedBuffer(int requiredSize) { - if (compressedBuffer.length >= requiredSize) { - return compressedBuffer; - } else { - this.compressedBuffer = new byte[requiredSize]; - return compressedBuffer; - } - } - - /** - * Returns {@code true} if and only if the end of the compressed stream - * has been reached. - */ - public boolean isClosed() { - return currentState == State.FINISHED; - } -} diff --git a/server/src/main/java/org/elasticsearch/transport/Lz4TransportDecompressor.java b/server/src/main/java/org/elasticsearch/transport/Lz4TransportDecompressor.java index 7bc860399530a..4cf9d8f911ec1 100644 --- a/server/src/main/java/org/elasticsearch/transport/Lz4TransportDecompressor.java +++ b/server/src/main/java/org/elasticsearch/transport/Lz4TransportDecompressor.java @@ -8,8 +8,11 @@ package org.elasticsearch.transport; -import net.jpountz.lz4.LZ4FrameInputStream; +import net.jpountz.lz4.LZ4Exception; +import net.jpountz.lz4.LZ4Factory; +import net.jpountz.lz4.LZ4FastDecompressor; +import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.bytes.ReleasableBytesReference; @@ -18,72 +21,94 @@ import org.elasticsearch.common.util.PageCacheRecycler; import java.io.IOException; -import java.io.InputStream; import java.util.ArrayDeque; +import java.util.zip.Checksum; public class Lz4TransportDecompressor implements TransportDecompressor { - private final LZ4FrameInputStream inputStream; - private final ExpandableStream expandableStream; - private final PageCacheRecycler recycler; - private final ArrayDeque> pages; - private int pageOffset = PageCacheRecycler.BYTE_PAGE_SIZE; - private boolean hasSkippedHeader = false; + private final ThreadLocal uncompressed = ThreadLocal.withInitial(() -> new byte[64 * 1024]); - public Lz4TransportDecompressor(PageCacheRecycler recycler) throws IOException { - this.recycler = recycler; - expandableStream = new ExpandableStream(); - inputStream = new LZ4FrameInputStream(expandableStream); - pages = new ArrayDeque<>(4); + /** + * Magic number of LZ4 block. + */ + static final long MAGIC_NUMBER = (long) 'L' << 56 | + (long) 'Z' << 48 | + (long) '4' << 40 | + (long) 'B' << 32 | + 'l' << 24 | + 'o' << 16 | + 'c' << 8 | + 'k'; + + static final int HEADER_LENGTH = 8 + // magic number + 1 + // token + 4 + // compressed length + 4 + // decompressed length + 4; // checksum + + + /** + * Base value for compression level. + */ + static final int COMPRESSION_LEVEL_BASE = 10; + + static final int MIN_BLOCK_SIZE = 64; + static final int MAX_BLOCK_SIZE = 1 << COMPRESSION_LEVEL_BASE + 0x0F; // 32 M + static final int DEFAULT_BLOCK_SIZE = 1 << 16; // 64 KB + + static final int BLOCK_TYPE_NON_COMPRESSED = 0x10; + static final int BLOCK_TYPE_COMPRESSED = 0x20; + + private enum State { + INIT_BLOCK, + DECOMPRESS_DATA, + FINISHED, + CORRUPTED } - @Override - public int decompress(BytesReference bytesReference) throws IOException { - int bytesConsumed = 0; - if (hasSkippedHeader == false) { - hasSkippedHeader = true; - int headerLength = CompressionScheme.HEADER_LENGTH; - bytesReference = bytesReference.slice(headerLength, bytesReference.length() - headerLength); - bytesConsumed += headerLength; - } - final StreamInput underlyingStream = bytesReference.streamInput(); - this.expandableStream.nextStream(underlyingStream); - - boolean continueDecompressing = true; - while (continueDecompressing) { - final Recycler.V page; - final boolean isNewPage = pageOffset == PageCacheRecycler.BYTE_PAGE_SIZE; - if (isNewPage) { - pageOffset = 0; - page = recycler.bytePage(false); - } else { - page = pages.getLast(); - } - byte[] output = page.v(); - int bytesDecompressed; - try { - bytesDecompressed = inputStream.read(output, pageOffset, PageCacheRecycler.BYTE_PAGE_SIZE - pageOffset); - pageOffset += Math.max(bytesDecompressed, 0); - if (isNewPage) { - if (bytesDecompressed <= 0) { - page.close(); - pageOffset = PageCacheRecycler.BYTE_PAGE_SIZE; - } else { - pages.add(page); - } - } - } catch (IOException e) { - throw new IOException("Exception while LZ4 decompressing bytes", e); - } - if (bytesDecompressed <= 0) { - continueDecompressing = false; - } - } + private State currentState = State.INIT_BLOCK; - assert underlyingStream.available() == 0; - bytesConsumed += bytesReference.length(); + /** + * Underlying decompressor in use. + */ + private LZ4FastDecompressor decompressor; - return bytesConsumed; + /** + * Underlying checksum calculator in use. + */ + private Checksum checksum; + + /** + * Type of current block. + */ + private int blockType; + + /** + * Compressed length of current incoming block. + */ + private int compressedLength; + + /** + * Decompressed length of current incoming block. + */ + private int decompressedLength; + + /** + * Checksum value of current incoming block. + */ + private int currentChecksum; + + private final PageCacheRecycler recycler; + private final ArrayDeque> pages; + private int pageOffset = PageCacheRecycler.BYTE_PAGE_SIZE; + private byte[] compressedBuffer = new byte[0]; + private boolean hasSkippedESHeader = false; + + public Lz4TransportDecompressor(PageCacheRecycler recycler) { + this.decompressor = LZ4Factory.fastestJavaInstance().fastDecompressor(); + this.recycler = recycler; + this.pages = new ArrayDeque<>(4); + this.checksum = null; } @Override @@ -107,32 +132,193 @@ public ReleasableBytesReference pollDecompressedPage(boolean isEOS) { @Override public void close() { - try { - inputStream.close(); - } catch (IOException e) { - assert false : "Exception should not be thrown."; - } for (Recycler.V page : pages) { page.close(); } } - private static class ExpandableStream extends InputStream { - - private StreamInput current; + @Override + public int decompress(BytesReference bytesReference) throws IOException { + int bytesConsumed = 0; + if (hasSkippedESHeader == false) { + hasSkippedESHeader = true; + int esHeaderLength = CompressionScheme.HEADER_LENGTH; + bytesReference = bytesReference.slice(esHeaderLength, bytesReference.length() - esHeaderLength); + bytesConsumed += esHeaderLength; + } - private void nextStream(StreamInput next) { - current = next; + while (true) { + int consumed = decodeBlock(bytesReference); + bytesConsumed += consumed; + int newLength = bytesReference.length() - consumed; + if (consumed > 0 && newLength > 0) { + bytesReference = bytesReference.slice(consumed, newLength); + } else { + break; + } } - @Override - public int read() throws IOException { - return Math.max(0, current.read()); + return bytesConsumed; + } + + private int decodeBlock(BytesReference reference) throws IOException { + int bytesConsumed = 0; + try { + switch (currentState) { + case INIT_BLOCK: + if (reference.length() < HEADER_LENGTH) { + return bytesConsumed; + } + try (StreamInput in = reference.streamInput()) { + final long magic = in.readLong(); + if (magic != MAGIC_NUMBER) { + throw new IllegalStateException("unexpected block identifier"); + } + + final int token = in.readByte(); + final int compressionLevel = (token & 0x0F) + COMPRESSION_LEVEL_BASE; + int blockType = token & 0xF0; + + int compressedLength = Integer.reverseBytes(in.readInt()); + if (compressedLength < 0 || compressedLength > MAX_BLOCK_SIZE) { + throw new IllegalStateException(String.format( + "invalid compressedLength: %d (expected: 0-%d)", + compressedLength, MAX_BLOCK_SIZE)); + } + + int decompressedLength = Integer.reverseBytes(in.readInt()); + final int maxDecompressedLength = 1 << compressionLevel; + if (decompressedLength < 0 || decompressedLength > maxDecompressedLength) { + throw new IllegalStateException(String.format( + "invalid decompressedLength: %d (expected: 0-%d)", + decompressedLength, maxDecompressedLength)); + } + if (decompressedLength == 0 && compressedLength != 0 + || decompressedLength != 0 && compressedLength == 0 + || blockType == BLOCK_TYPE_NON_COMPRESSED && decompressedLength != compressedLength) { + throw new IllegalStateException(String.format( + "stream corrupted: compressedLength(%d) and decompressedLength(%d) mismatch", + compressedLength, decompressedLength)); + } + + int currentChecksum = Integer.reverseBytes(in.readInt()); + bytesConsumed += HEADER_LENGTH; + + if (decompressedLength == 0 && compressedLength == 0) { + if (currentChecksum != 0) { + throw new IllegalStateException("stream corrupted: checksum error"); + } + currentState = State.FINISHED; + decompressor = null; + checksum = null; + break; + } + + this.blockType = blockType; + this.compressedLength = compressedLength; + this.decompressedLength = decompressedLength; + this.currentChecksum = currentChecksum; + } + + currentState = State.DECOMPRESS_DATA; + break; + case DECOMPRESS_DATA: + if (reference.length() < compressedLength) { + break; + } + + final Checksum checksum = this.checksum; + byte[] uncompressed = this.uncompressed.get(); + if (decompressedLength > uncompressed.length) { + uncompressed = new byte[decompressedLength]; + this.uncompressed.set(uncompressed); + } + + try { + switch (blockType) { + case BLOCK_TYPE_NON_COMPRESSED: + try (StreamInput streamInput = reference.streamInput()) { + streamInput.readBytes(uncompressed, 0, decompressedLength); + } + break; + case BLOCK_TYPE_COMPRESSED: + BytesRef ref = reference.iterator().next(); + final byte[] compressed; + final int compressedOffset; + if (ref.length >= compressedLength) { + compressed = ref.bytes; + compressedOffset = ref.offset; + } else { + compressed = getCompressedBuffer(compressedLength); + compressedOffset = 0; + try (StreamInput streamInput = reference.streamInput()) { + streamInput.readBytes(compressed, 0, compressedLength); + } + } + decompressor.decompress(compressed, compressedOffset, uncompressed, 0, decompressedLength); + break; + default: + throw new IllegalStateException(String.format( + "unexpected blockType: %d (expected: %d or %d)", + blockType, BLOCK_TYPE_NON_COMPRESSED, BLOCK_TYPE_COMPRESSED)); + } + // Skip inbound bytes after we processed them. + bytesConsumed += compressedLength; + + if (checksum != null) { +// CompressionUtil.checkChecksum(checksum, uncompressed, currentChecksum); + } + + int bytesToCopy = decompressedLength; + int uncompressedOffset = 0; + while (bytesToCopy > 0) { + final Recycler.V page; + final boolean isNewPage = pageOffset == PageCacheRecycler.BYTE_PAGE_SIZE; + if (isNewPage) { + pageOffset = 0; + pages.add(recycler.bytePage(false)); + } + page = pages.getLast(); + + int toCopy = Math.min(bytesToCopy, PageCacheRecycler.BYTE_PAGE_SIZE - pageOffset); + System.arraycopy(uncompressed, uncompressedOffset, page.v(), pageOffset, toCopy); + pageOffset += toCopy; + bytesToCopy -= toCopy; + uncompressedOffset += toCopy; + } + currentState = State.INIT_BLOCK; + } catch (LZ4Exception e) { + throw new IllegalStateException(e); + } + break; + case FINISHED: + break; + case CORRUPTED: + throw new IllegalStateException("LZ4 stream corrupted."); + default: + throw new IllegalStateException(); + } + } catch (IOException e) { + currentState = State.CORRUPTED; + throw e; } + return bytesConsumed; + } - @Override - public int read(byte[] b, int off, int len) throws IOException { - return Math.max(0, current.read(b, off, len)); + private byte[] getCompressedBuffer(int requiredSize) { + if (compressedBuffer.length >= requiredSize) { + return compressedBuffer; + } else { + this.compressedBuffer = new byte[requiredSize]; + return compressedBuffer; } } + + /** + * Returns {@code true} if and only if the end of the compressed stream + * has been reached. + */ + public boolean isClosed() { + return currentState == State.FINISHED; + } } diff --git a/server/src/main/java/org/elasticsearch/transport/TransportDecompressor.java b/server/src/main/java/org/elasticsearch/transport/TransportDecompressor.java index 58fccc5dc60c9..f208c02eedc20 100644 --- a/server/src/main/java/org/elasticsearch/transport/TransportDecompressor.java +++ b/server/src/main/java/org/elasticsearch/transport/TransportDecompressor.java @@ -48,7 +48,7 @@ static TransportDecompressor getDecompressor(PageCacheRecycler recycler, BytesRe if (header == CompressionScheme.DEFLATE_HEADER) { return new DeflateTransportDecompressor(recycler); } else { - return new LZ4Inflater(recycler); + return new Lz4TransportDecompressor(recycler); } } From 9ab0faa0948d581eb532379373e250d06f1a4d65 Mon Sep 17 00:00:00 2001 From: Tim Brooks Date: Sun, 27 Jun 2021 21:27:49 -0600 Subject: [PATCH 12/29] Finish --- .../java/org/elasticsearch/transport/InboundDecoder.java | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/transport/InboundDecoder.java b/server/src/main/java/org/elasticsearch/transport/InboundDecoder.java index a074264737883..702dd532c0676 100644 --- a/server/src/main/java/org/elasticsearch/transport/InboundDecoder.java +++ b/server/src/main/java/org/elasticsearch/transport/InboundDecoder.java @@ -85,14 +85,16 @@ public int internalDecode(ReleasableBytesReference reference, Consumer f this.decompressor = decompressor; } } - int maxBytesToConsume = Math.min(reference.length(), totalNetworkSize - bytesConsumed); - int bytesConsumedThisDecode = 0; + int remainingToConsume = totalNetworkSize - bytesConsumed; + int maxBytesToConsume = Math.min(reference.length(), remainingToConsume); ReleasableBytesReference retainedContent; - if (isDone()) { + if (maxBytesToConsume == remainingToConsume) { retainedContent = reference.retainedSlice(0, maxBytesToConsume); } else { retainedContent = reference.retain(); } + + int bytesConsumedThisDecode = 0; if (decompressor != null) { bytesConsumedThisDecode += decompress(retainedContent); bytesConsumed += bytesConsumedThisDecode; From 7a91488333395c38c57f48cf3b6bc3860021b6fe Mon Sep 17 00:00:00 2001 From: Tim Brooks Date: Sun, 27 Jun 2021 21:44:18 -0600 Subject: [PATCH 13/29] Changes --- .../transport/Lz4TransportDecompressor.java | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/transport/Lz4TransportDecompressor.java b/server/src/main/java/org/elasticsearch/transport/Lz4TransportDecompressor.java index 4cf9d8f911ec1..83b3e91430ed8 100644 --- a/server/src/main/java/org/elasticsearch/transport/Lz4TransportDecompressor.java +++ b/server/src/main/java/org/elasticsearch/transport/Lz4TransportDecompressor.java @@ -22,6 +22,7 @@ import java.io.IOException; import java.util.ArrayDeque; +import java.util.Locale; import java.util.zip.Checksum; public class Lz4TransportDecompressor implements TransportDecompressor { @@ -181,7 +182,7 @@ private int decodeBlock(BytesReference reference) throws IOException { int compressedLength = Integer.reverseBytes(in.readInt()); if (compressedLength < 0 || compressedLength > MAX_BLOCK_SIZE) { - throw new IllegalStateException(String.format( + throw new IllegalStateException(String.format(Locale.ROOT, "invalid compressedLength: %d (expected: 0-%d)", compressedLength, MAX_BLOCK_SIZE)); } @@ -189,14 +190,14 @@ private int decodeBlock(BytesReference reference) throws IOException { int decompressedLength = Integer.reverseBytes(in.readInt()); final int maxDecompressedLength = 1 << compressionLevel; if (decompressedLength < 0 || decompressedLength > maxDecompressedLength) { - throw new IllegalStateException(String.format( + throw new IllegalStateException(String.format(Locale.ROOT, "invalid decompressedLength: %d (expected: 0-%d)", decompressedLength, maxDecompressedLength)); } if (decompressedLength == 0 && compressedLength != 0 || decompressedLength != 0 && compressedLength == 0 || blockType == BLOCK_TYPE_NON_COMPRESSED && decompressedLength != compressedLength) { - throw new IllegalStateException(String.format( + throw new IllegalStateException(String.format(Locale.ROOT, "stream corrupted: compressedLength(%d) and decompressedLength(%d) mismatch", compressedLength, decompressedLength)); } @@ -258,7 +259,7 @@ private int decodeBlock(BytesReference reference) throws IOException { decompressor.decompress(compressed, compressedOffset, uncompressed, 0, decompressedLength); break; default: - throw new IllegalStateException(String.format( + throw new IllegalStateException(String.format(Locale.ROOT, "unexpected blockType: %d (expected: %d or %d)", blockType, BLOCK_TYPE_NON_COMPRESSED, BLOCK_TYPE_COMPRESSED)); } From dcf3e3cc18e2f41ce47528157a94fa31339c63b7 Mon Sep 17 00:00:00 2001 From: Tim Brooks Date: Sun, 27 Jun 2021 22:08:58 -0600 Subject: [PATCH 14/29] Fix --- .../java/org/elasticsearch/transport/OutboundHandler.java | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/transport/OutboundHandler.java b/server/src/main/java/org/elasticsearch/transport/OutboundHandler.java index 3d811ed7ca237..64a5d03b8c595 100644 --- a/server/src/main/java/org/elasticsearch/transport/OutboundHandler.java +++ b/server/src/main/java/org/elasticsearch/transport/OutboundHandler.java @@ -72,9 +72,9 @@ void sendRequest(final DiscoveryNode node, final TcpChannel channel, final long Version version = Version.min(this.version, channelVersion); final CompressionScheme compressionScheme; if (compressRequest) { - compressionScheme = null; - } else { compressionScheme = configuredCompressionScheme; + } else { + compressionScheme = null; } OutboundMessage.Request message = new OutboundMessage.Request(threadPool.getThreadContext(), request, version, action, requestId, isHandshake, compressionScheme); @@ -103,9 +103,9 @@ void sendResponse(final Version nodeVersion, final TcpChannel channel, final lon Version version = Version.min(this.version, nodeVersion); final CompressionScheme compressionScheme; if (compressResponse) { - compressionScheme = null; - } else { compressionScheme = configuredCompressionScheme; + } else { + compressionScheme = null; } OutboundMessage.Response message = new OutboundMessage.Response(threadPool.getThreadContext(), response, version, requestId, isHandshake, compressionScheme); From 4371da618b3d15bd91786c323aa43a89e99a2400 Mon Sep 17 00:00:00 2001 From: Tim Brooks Date: Sun, 27 Jun 2021 22:27:44 -0600 Subject: [PATCH 15/29] Deps --- server/licenses/lz4-java-1.8.0.jar.sha1 | 1 + server/licenses/lz4-java-LICENSE.txt | 202 ++++++++++++++++++++++++ server/licenses/lz4-java-NOTICE.txt | 0 3 files changed, 203 insertions(+) create mode 100644 server/licenses/lz4-java-1.8.0.jar.sha1 create mode 100644 server/licenses/lz4-java-LICENSE.txt create mode 100644 server/licenses/lz4-java-NOTICE.txt diff --git a/server/licenses/lz4-java-1.8.0.jar.sha1 b/server/licenses/lz4-java-1.8.0.jar.sha1 new file mode 100644 index 0000000000000..5e3536d1b7d29 --- /dev/null +++ b/server/licenses/lz4-java-1.8.0.jar.sha1 @@ -0,0 +1 @@ +4b986a99445e49ea5fbf5d149c4b63f6ed6c6780 \ No newline at end of file diff --git a/server/licenses/lz4-java-LICENSE.txt b/server/licenses/lz4-java-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/server/licenses/lz4-java-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/server/licenses/lz4-java-NOTICE.txt b/server/licenses/lz4-java-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d From cc7588ff0a2ecf301247a10e33b482c6029a9c96 Mon Sep 17 00:00:00 2001 From: Tim Brooks Date: Sun, 27 Jun 2021 22:31:04 -0600 Subject: [PATCH 16/29] License --- .../transport/Lz4TransportDecompressor.java | 23 +++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/server/src/main/java/org/elasticsearch/transport/Lz4TransportDecompressor.java b/server/src/main/java/org/elasticsearch/transport/Lz4TransportDecompressor.java index 83b3e91430ed8..62822a910238d 100644 --- a/server/src/main/java/org/elasticsearch/transport/Lz4TransportDecompressor.java +++ b/server/src/main/java/org/elasticsearch/transport/Lz4TransportDecompressor.java @@ -5,6 +5,21 @@ * in compliance with, at your election, the Elastic License 2.0 or the Server * Side Public License, v 1. */ +/* + * Copyright 2014 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, version + * 2.0 (the "License"); you may not use this file except in compliance with the + * License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ package org.elasticsearch.transport; @@ -25,6 +40,14 @@ import java.util.Locale; import java.util.zip.Checksum; +/** + * This file is forked from the https://netty.io project. In particular it forks the follow file + * io.netty.handler.codec.compression.Lz4FrameDecoder. + * + * It modifies the original netty code to operate on byte arrays opposed to ByteBufs. + * Additionally, it integrates the decompression code to work in the Elasticsearch transport + * pipeline, Finally, it replaces the custom Netty decoder exceptions. + */ public class Lz4TransportDecompressor implements TransportDecompressor { private final ThreadLocal uncompressed = ThreadLocal.withInitial(() -> new byte[64 * 1024]); From 56e6359790a345b5fb9fdb20fee2f0283022ac31 Mon Sep 17 00:00:00 2001 From: Tim Brooks Date: Sun, 27 Jun 2021 22:49:17 -0600 Subject: [PATCH 17/29] Fix violation --- server/build.gradle | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/server/build.gradle b/server/build.gradle index eddd17202f263..748c0c7706018 100644 --- a/server/build.gradle +++ b/server/build.gradle @@ -269,6 +269,11 @@ tasks.named("thirdPartyAudit").configure { 'com.google.common.geometry.S2LatLng' ) ignoreMissingClasses 'javax.xml.bind.DatatypeConverter' + + ignoreViolations( + // from java-lz4 + 'net.jpountz.util.UnsafeUtils' + ) } tasks.named("dependencyLicenses").configure { From 9e1fca9e11e016304ee5df6690f1d1a4ad83dc05 Mon Sep 17 00:00:00 2001 From: Tim Brooks Date: Sun, 27 Jun 2021 23:08:34 -0600 Subject: [PATCH 18/29] Fix --- .../transport/Lz4TransportDecompressor.java | 9 ++++++++- .../elasticsearch/transport/TransportLoggerTests.java | 2 +- 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/transport/Lz4TransportDecompressor.java b/server/src/main/java/org/elasticsearch/transport/Lz4TransportDecompressor.java index 62822a910238d..c4e24d0a5152a 100644 --- a/server/src/main/java/org/elasticsearch/transport/Lz4TransportDecompressor.java +++ b/server/src/main/java/org/elasticsearch/transport/Lz4TransportDecompressor.java @@ -290,7 +290,14 @@ private int decodeBlock(BytesReference reference) throws IOException { bytesConsumed += compressedLength; if (checksum != null) { -// CompressionUtil.checkChecksum(checksum, uncompressed, currentChecksum); + checksum.reset(); + checksum.update(uncompressed, 0, decompressedLength); + final int checksumResult = (int) checksum.getValue(); + if (checksumResult != currentChecksum) { + throw new IllegalStateException(String.format(Locale.ROOT, + "stream corrupted: mismatching checksum: %d (expected: %d)", + checksumResult, currentChecksum)); + } } int bytesToCopy = decompressedLength; diff --git a/server/src/test/java/org/elasticsearch/transport/TransportLoggerTests.java b/server/src/test/java/org/elasticsearch/transport/TransportLoggerTests.java index 8a90384e7d2a4..7f0f4576dc5a8 100644 --- a/server/src/test/java/org/elasticsearch/transport/TransportLoggerTests.java +++ b/server/src/test/java/org/elasticsearch/transport/TransportLoggerTests.java @@ -78,7 +78,7 @@ public void testLoggingHandler() throws IOException { } private BytesReference buildRequest() throws IOException { - CompressionScheme compress = randomFrom(null, CompressionScheme.DEFLATE, CompressionScheme.LZ4); + CompressionScheme compress = randomFrom(CompressionScheme.DEFLATE, CompressionScheme.LZ4, null); try (BytesStreamOutput bytesStreamOutput = new BytesStreamOutput()) { OutboundMessage.Request request = new OutboundMessage.Request(new ThreadContext(Settings.EMPTY), new ClusterStatsRequest(), Version.CURRENT, ClusterStatsAction.NAME, randomInt(30), false, compress); From 76158a0ef6288a2629632333737cee05c99c41be Mon Sep 17 00:00:00 2001 From: Tim Brooks Date: Sun, 27 Jun 2021 23:30:00 -0600 Subject: [PATCH 19/29] Fix test --- .../transport/OutboundHandlerTests.java | 20 ++++++++++++++++--- 1 file changed, 17 insertions(+), 3 deletions(-) diff --git a/server/src/test/java/org/elasticsearch/transport/OutboundHandlerTests.java b/server/src/test/java/org/elasticsearch/transport/OutboundHandlerTests.java index 8e2bed35cbf86..f6b8a675448b6 100644 --- a/server/src/test/java/org/elasticsearch/transport/OutboundHandlerTests.java +++ b/server/src/test/java/org/elasticsearch/transport/OutboundHandlerTests.java @@ -58,6 +58,7 @@ public class OutboundHandlerTests extends ESTestCase { private OutboundHandler handler; private FakeTcpChannel channel; private DiscoveryNode node; + private CompressionScheme compressionScheme; @Before public void setUp() throws Exception { @@ -66,8 +67,9 @@ public void setUp() throws Exception { TransportAddress transportAddress = buildNewFakeTransportAddress(); node = new DiscoveryNode("", transportAddress, Version.CURRENT); StatsTracker statsTracker = new StatsTracker(); + compressionScheme = randomFrom(CompressionScheme.DEFLATE, CompressionScheme.LZ4); handler = new OutboundHandler("node", Version.CURRENT, statsTracker, threadPool, BigArrays.NON_RECYCLING_INSTANCE, - randomFrom(CompressionScheme.DEFLATE, CompressionScheme.LZ4)); + compressionScheme); final LongSupplier millisSupplier = () -> TimeValue.nsecToMSec(System.nanoTime()); final InboundDecoder decoder = new InboundDecoder(Version.CURRENT, PageCacheRecycler.NON_RECYCLING_INSTANCE); @@ -120,7 +122,13 @@ public void testSendRequest() throws IOException { String action = "handshake"; long requestId = randomLongBetween(0, 300); boolean isHandshake = randomBoolean(); - boolean compress = randomBoolean(); + boolean compress; + // TODO: Change after backport + if (compressionScheme == CompressionScheme.LZ4 && version.before(Version.V_8_0_0)) { + compress = false; + } else { + compress = randomBoolean(); + } String value = "message"; threadContext.putHeader("header", "header_value"); TestRequest request = new TestRequest(value); @@ -183,7 +191,13 @@ public void testSendResponse() throws IOException { String action = "handshake"; long requestId = randomLongBetween(0, 300); boolean isHandshake = randomBoolean(); - boolean compress = randomBoolean(); + boolean compress; + // TODO: Change after backport + if (compressionScheme == CompressionScheme.LZ4 && version.before(Version.V_8_0_0)) { + compress = false; + } else { + compress = randomBoolean(); + } String value = "message"; threadContext.putHeader("header", "header_value"); TestResponse response = new TestResponse(value); From 23b3f88237a2c0144a24fdd1ede92955a198e71c Mon Sep 17 00:00:00 2001 From: Tim Brooks Date: Mon, 28 Jun 2021 11:39:30 -0600 Subject: [PATCH 20/29] Changes --- .../common/settings/ClusterSettings.java | 4 +-- .../transport/CompressionScheme.java | 8 ++++- .../transport/ConnectionProfile.java | 36 +++++++++---------- .../transport/Lz4TransportDecompressor.java | 2 +- .../transport/NetworkMessage.java | 12 +++++-- .../transport/OutboundMessage.java | 34 +++++++----------- .../transport/RemoteClusterService.java | 6 ++-- .../transport/RemoteConnectionStrategy.java | 4 +-- .../elasticsearch/transport/TcpTransport.java | 2 +- .../transport/TransportSettings.java | 4 +-- .../transport/TransportStatus.java | 5 +++ .../transport/ConnectionProfileTests.java | 15 ++++---- .../transport/InboundPipelineTests.java | 3 +- .../transport/OutboundHandlerTests.java | 6 ++-- .../transport/RemoteClusterServiceTests.java | 12 +++---- .../RemoteConnectionStrategyTests.java | 8 +++-- .../test/InternalTestCluster.java | 2 +- .../elasticsearch/transport/TestProfiles.java | 2 +- .../transport/nio/MockNioTransport.java | 2 +- 19 files changed, 89 insertions(+), 78 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java b/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java index c2b7ebce86847..b184bcf2cb3d3 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java @@ -296,7 +296,7 @@ public void apply(Settings value, Settings current, Settings previous) { RemoteClusterService.REMOTE_NODE_ATTRIBUTE, RemoteClusterService.REMOTE_CLUSTER_PING_SCHEDULE, RemoteClusterService.REMOTE_CLUSTER_COMPRESS, - RemoteClusterService.REMOTE_CLUSTER_COMPRESS_RAW_DATA, + RemoteClusterService.REMOTE_CLUSTER_COMPRESS_INDEXING_DATA, RemoteConnectionStrategy.REMOTE_CONNECTION_MODE, ProxyConnectionStrategy.PROXY_ADDRESS, ProxyConnectionStrategy.REMOTE_SOCKET_CONNECTIONS, @@ -321,7 +321,7 @@ public void apply(Settings value, Settings current, Settings previous) { TransportSettings.PUBLISH_PORT, TransportSettings.PUBLISH_PORT_PROFILE, TransportSettings.TRANSPORT_COMPRESS, - TransportSettings.TRANSPORT_COMPRESS_RAW_DATA, + TransportSettings.TRANSPORT_COMPRESS_INDEXING_DATA, TransportSettings.TRANSPORT_COMPRESSION_SCHEME, TransportSettings.PING_SCHEDULE, TransportSettings.CONNECT_TIMEOUT, diff --git a/server/src/main/java/org/elasticsearch/transport/CompressionScheme.java b/server/src/main/java/org/elasticsearch/transport/CompressionScheme.java index 6bdb8524a4f71..d45f9be906258 100644 --- a/server/src/main/java/org/elasticsearch/transport/CompressionScheme.java +++ b/server/src/main/java/org/elasticsearch/transport/CompressionScheme.java @@ -10,6 +10,9 @@ import net.jpountz.lz4.LZ4BlockOutputStream; +import net.jpountz.lz4.LZ4Factory; + +import org.elasticsearch.Version; import org.elasticsearch.common.compress.DeflateCompressor; import java.io.IOException; @@ -19,12 +22,15 @@ public enum CompressionScheme { LZ4, DEFLATE; + // TODO: Change after backport + static Version LZ4_VERSION = Version.V_8_0_0; static byte[] DEFLATE_HEADER = DeflateCompressor.HEADER; static byte[] LZ4_HEADER = new byte[]{'L', 'Z', '4', '\0'}; static int HEADER_LENGTH = 4; public static OutputStream lz4OutputStream(OutputStream outputStream) throws IOException { outputStream.write(LZ4_HEADER); - return new LZ4BlockOutputStream(outputStream, 64 * 1024); + // 16KB block size to minimize the allocation of large buffers + return new LZ4BlockOutputStream(outputStream, 16 * 1024, LZ4Factory.safeInstance().fastCompressor()); } } diff --git a/server/src/main/java/org/elasticsearch/transport/ConnectionProfile.java b/server/src/main/java/org/elasticsearch/transport/ConnectionProfile.java index df5bfd0a3e5bc..1883ba1c3cd1e 100644 --- a/server/src/main/java/org/elasticsearch/transport/ConnectionProfile.java +++ b/server/src/main/java/org/elasticsearch/transport/ConnectionProfile.java @@ -36,7 +36,7 @@ public static ConnectionProfile resolveConnectionProfile(@Nullable ConnectionPro return fallbackProfile; } else if (profile.getConnectTimeout() != null && profile.getHandshakeTimeout() != null && profile.getPingInterval() != null && profile.getCompressionEnabled() != null - && profile.getRawDataCompressionEnabled() != null) { + && profile.getIndexingDataCompressionEnabled() != null) { return profile; } else { ConnectionProfile.Builder builder = new ConnectionProfile.Builder(profile); @@ -52,8 +52,8 @@ public static ConnectionProfile resolveConnectionProfile(@Nullable ConnectionPro if (profile.getCompressionEnabled() == null) { builder.setCompressionEnabled(fallbackProfile.getCompressionEnabled()); } - if (profile.getRawDataCompressionEnabled() == null) { - builder.setRawDataCompressionEnabled(fallbackProfile.getRawDataCompressionEnabled()); + if (profile.getIndexingDataCompressionEnabled() == null) { + builder.setIndexingDataCompressionEnabled(fallbackProfile.getIndexingDataCompressionEnabled()); } return builder.build(); } @@ -76,7 +76,7 @@ public static ConnectionProfile buildDefaultConnectionProfile(Settings settings) builder.setHandshakeTimeout(TransportSettings.CONNECT_TIMEOUT.get(settings)); builder.setPingInterval(TransportSettings.PING_SCHEDULE.get(settings)); builder.setCompressionEnabled(TransportSettings.TRANSPORT_COMPRESS.get(settings)); - builder.setRawDataCompressionEnabled(TransportSettings.TRANSPORT_COMPRESS_RAW_DATA.get(settings)); + builder.setIndexingDataCompressionEnabled(TransportSettings.TRANSPORT_COMPRESS_INDEXING_DATA.get(settings)); builder.addConnections(connectionsPerNodeBulk, TransportRequestOptions.Type.BULK); builder.addConnections(connectionsPerNodePing, TransportRequestOptions.Type.PING); // if we are not master eligible we don't need a dedicated channel to publish the state @@ -114,7 +114,7 @@ public static ConnectionProfile buildSingleChannelProfile(TransportRequestOption builder.setCompressionEnabled(compressionEnabled); } if (rawDataCompressionEnabled != null) { - builder.setRawDataCompressionEnabled(rawDataCompressionEnabled); + builder.setIndexingDataCompressionEnabled(rawDataCompressionEnabled); } return builder.build(); } @@ -125,18 +125,18 @@ public static ConnectionProfile buildSingleChannelProfile(TransportRequestOption private final TimeValue handshakeTimeout; private final TimeValue pingInterval; private final Boolean compressionEnabled; - private final Boolean rawDataCompressionEnabled; + private final Boolean indexingDataCompressionEnabled; private ConnectionProfile(List handles, int numConnections, TimeValue connectTimeout, TimeValue handshakeTimeout, TimeValue pingInterval, Boolean compressionEnabled, - Boolean rawDataCompressionEnabled) { + Boolean indexingDataCompressionEnabled) { this.handles = handles; this.numConnections = numConnections; this.connectTimeout = connectTimeout; this.handshakeTimeout = handshakeTimeout; this.pingInterval = pingInterval; this.compressionEnabled = compressionEnabled; - this.rawDataCompressionEnabled = rawDataCompressionEnabled; + this.indexingDataCompressionEnabled = indexingDataCompressionEnabled; } /** @@ -149,7 +149,7 @@ public static class Builder { private TimeValue connectTimeout; private TimeValue handshakeTimeout; private Boolean compressionEnabled; - private Boolean rawDataCompressionEnabled; + private Boolean indexingDataCompressionEnabled; private TimeValue pingInterval; /** create an empty builder */ @@ -164,7 +164,7 @@ public Builder(ConnectionProfile source) { connectTimeout = source.getConnectTimeout(); handshakeTimeout = source.getHandshakeTimeout(); compressionEnabled = source.getCompressionEnabled(); - rawDataCompressionEnabled = source.getRawDataCompressionEnabled(); + indexingDataCompressionEnabled = source.getIndexingDataCompressionEnabled(); pingInterval = source.getPingInterval(); } /** @@ -206,10 +206,10 @@ public Builder setCompressionEnabled(boolean compressionEnabled) { } /** - * Sets raw data compression enabled for this connection profile + * Sets indexing data compression enabled for this connection profile */ - public Builder setRawDataCompressionEnabled(boolean rawDataCompressionEnabled) { - this.rawDataCompressionEnabled = rawDataCompressionEnabled; + public Builder setIndexingDataCompressionEnabled(boolean indexingDataCompressionEnabled) { + this.indexingDataCompressionEnabled = indexingDataCompressionEnabled; return this; } @@ -244,7 +244,7 @@ public ConnectionProfile build() { throw new IllegalStateException("not all types are added for this connection profile - missing types: " + types); } return new ConnectionProfile(Collections.unmodifiableList(handles), numConnections, connectTimeout, handshakeTimeout, - pingInterval, compressionEnabled, rawDataCompressionEnabled); + pingInterval, compressionEnabled, indexingDataCompressionEnabled); } } @@ -279,11 +279,11 @@ public Boolean getCompressionEnabled() { } /** - * Returns boolean indicating if raw data compression is enabled or null if no explicit raw data compression - * is set on this profile. + * Returns boolean indicating if indexing data compression is enabled or null if no explicit + * indexing data compression is set on this profile. */ - public Boolean getRawDataCompressionEnabled() { - return rawDataCompressionEnabled; + public Boolean getIndexingDataCompressionEnabled() { + return indexingDataCompressionEnabled; } /** diff --git a/server/src/main/java/org/elasticsearch/transport/Lz4TransportDecompressor.java b/server/src/main/java/org/elasticsearch/transport/Lz4TransportDecompressor.java index c4e24d0a5152a..be1dedfd54832 100644 --- a/server/src/main/java/org/elasticsearch/transport/Lz4TransportDecompressor.java +++ b/server/src/main/java/org/elasticsearch/transport/Lz4TransportDecompressor.java @@ -129,7 +129,7 @@ private enum State { private boolean hasSkippedESHeader = false; public Lz4TransportDecompressor(PageCacheRecycler recycler) { - this.decompressor = LZ4Factory.fastestJavaInstance().fastDecompressor(); + this.decompressor = LZ4Factory.safeInstance().fastDecompressor(); this.recycler = recycler; this.pages = new ArrayDeque<>(4); this.checksum = null; diff --git a/server/src/main/java/org/elasticsearch/transport/NetworkMessage.java b/server/src/main/java/org/elasticsearch/transport/NetworkMessage.java index dc3393eb57b6e..bc9a8c380aafa 100644 --- a/server/src/main/java/org/elasticsearch/transport/NetworkMessage.java +++ b/server/src/main/java/org/elasticsearch/transport/NetworkMessage.java @@ -27,8 +27,12 @@ public abstract class NetworkMessage { this.threadContext = threadContext.captureAsWriteable(); this.version = version; this.requestId = requestId; - this.status = status; - this.compressionScheme = compressionScheme; + this.compressionScheme = adjustedScheme(version, compressionScheme); + if (compressionScheme != null) { + this.status = TransportStatus.setCompress(status); + } else { + this.status = status; + } } public Version getVersion() { @@ -58,4 +62,8 @@ boolean isHandshake() { boolean isError() { return TransportStatus.isError(status); } + + private static CompressionScheme adjustedScheme(Version version, CompressionScheme compressionScheme) { + return compressionScheme == CompressionScheme.LZ4 && version.before(CompressionScheme.LZ4_VERSION) ? null : compressionScheme; + } } diff --git a/server/src/main/java/org/elasticsearch/transport/OutboundMessage.java b/server/src/main/java/org/elasticsearch/transport/OutboundMessage.java index fdb97a63f9dee..0e00e700414c2 100644 --- a/server/src/main/java/org/elasticsearch/transport/OutboundMessage.java +++ b/server/src/main/java/org/elasticsearch/transport/OutboundMessage.java @@ -107,8 +107,7 @@ static class Request extends OutboundMessage { Request(ThreadContext threadContext, Writeable message, Version version, String action, long requestId, boolean isHandshake, CompressionScheme compressionScheme) { - super(threadContext, version, setStatus(adjustedScheme(version, compressionScheme), isHandshake, message), requestId, - adjustedScheme(version, compressionScheme), message); + super(threadContext, version, setStatus(isHandshake), requestId, adjustCompressionScheme(compressionScheme, message), message); this.action = action; } @@ -122,12 +121,18 @@ protected void writeVariableHeader(StreamOutput stream) throws IOException { stream.writeString(action); } - private static byte setStatus(CompressionScheme compressionScheme, boolean isHandshake, Writeable message) { + // Do not compress instances of BytesTransportRequest + private static CompressionScheme adjustCompressionScheme(CompressionScheme compressionScheme, Writeable message) { + if (message instanceof BytesTransportRequest) { + return null; + } else { + return compressionScheme; + } + } + + private static byte setStatus(boolean isHandshake) { byte status = 0; status = TransportStatus.setRequest(status); - if (compressionScheme != null && OutboundMessage.canCompress(message)) { - status = TransportStatus.setCompress(status); - } if (isHandshake) { status = TransportStatus.setHandshake(status); } @@ -146,19 +151,15 @@ static class Response extends OutboundMessage { Response(ThreadContext threadContext, Writeable message, Version version, long requestId, boolean isHandshake, CompressionScheme compressionScheme) { - super(threadContext, version, setStatus(adjustedScheme(version, compressionScheme), isHandshake, message), requestId, - adjustedScheme(version, compressionScheme), message); + super(threadContext, version, setStatus(isHandshake, message), requestId, compressionScheme, message); } - private static byte setStatus(CompressionScheme compressionScheme, boolean isHandshake, Writeable message) { + private static byte setStatus(boolean isHandshake, Writeable message) { byte status = 0; status = TransportStatus.setResponse(status); if (message instanceof RemoteTransportException) { status = TransportStatus.setError(status); } - if (compressionScheme != null) { - status = TransportStatus.setCompress(status); - } if (isHandshake) { status = TransportStatus.setHandshake(status); } @@ -172,13 +173,4 @@ public String toString() { + message.getClass() + "}"; } } - - private static CompressionScheme adjustedScheme(Version version, CompressionScheme compressionScheme) { - // TODO: Change after backport - return compressionScheme == CompressionScheme.LZ4 && version.before(Version.V_8_0_0) ? null : compressionScheme; - } - - private static boolean canCompress(Writeable message) { - return message instanceof BytesTransportRequest == false; - } } diff --git a/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java b/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java index 3866f8ee4c440..46424304bc480 100644 --- a/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java +++ b/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java @@ -95,10 +95,10 @@ public final class RemoteClusterService extends RemoteClusterAware implements Cl (ns, key) -> boolSetting(key, TransportSettings.TRANSPORT_COMPRESS, new RemoteConnectionEnabled<>(ns, key), Setting.Property.Dynamic, Setting.Property.NodeScope)); - public static final Setting.AffixSetting REMOTE_CLUSTER_COMPRESS_RAW_DATA = Setting.affixKeySetting( + public static final Setting.AffixSetting REMOTE_CLUSTER_COMPRESS_INDEXING_DATA = Setting.affixKeySetting( "cluster.remote.", - "transport.compress_raw_data", - (ns, key) -> boolSetting(key, TransportSettings.TRANSPORT_COMPRESS_RAW_DATA, + "transport.compress_indexing_data", + (ns, key) -> boolSetting(key, TransportSettings.TRANSPORT_COMPRESS_INDEXING_DATA, new RemoteConnectionEnabled<>(ns, key), Setting.Property.Dynamic, Setting.Property.NodeScope)); private final boolean enabled; diff --git a/server/src/main/java/org/elasticsearch/transport/RemoteConnectionStrategy.java b/server/src/main/java/org/elasticsearch/transport/RemoteConnectionStrategy.java index 8f7a1530a393b..257f47426baa0 100644 --- a/server/src/main/java/org/elasticsearch/transport/RemoteConnectionStrategy.java +++ b/server/src/main/java/org/elasticsearch/transport/RemoteConnectionStrategy.java @@ -125,7 +125,7 @@ static ConnectionProfile buildConnectionProfile(String clusterAlias, Settings se .setConnectTimeout(TransportSettings.CONNECT_TIMEOUT.get(settings)) .setHandshakeTimeout(TransportSettings.CONNECT_TIMEOUT.get(settings)) .setCompressionEnabled(RemoteClusterService.REMOTE_CLUSTER_COMPRESS.getConcreteSettingForNamespace(clusterAlias).get(settings)) - .setRawDataCompressionEnabled(RemoteClusterService.REMOTE_CLUSTER_COMPRESS_RAW_DATA + .setIndexingDataCompressionEnabled(RemoteClusterService.REMOTE_CLUSTER_COMPRESS_INDEXING_DATA .getConcreteSettingForNamespace(clusterAlias).get(settings)) .setPingInterval(RemoteClusterService.REMOTE_CLUSTER_PING_SCHEDULE.getConcreteSettingForNamespace(clusterAlias).get(settings)) .addConnections(0, TransportRequestOptions.Type.BULK, TransportRequestOptions.Type.STATE, @@ -357,7 +357,7 @@ private List> getAndClearListeners() { private boolean connectionProfileChanged(ConnectionProfile oldProfile, ConnectionProfile newProfile) { return Objects.equals(oldProfile.getCompressionEnabled(), newProfile.getCompressionEnabled()) == false || Objects.equals(oldProfile.getPingInterval(), newProfile.getPingInterval()) == false - || Objects.equals(oldProfile.getRawDataCompressionEnabled(), newProfile.getRawDataCompressionEnabled()) == false; + || Objects.equals(oldProfile.getIndexingDataCompressionEnabled(), newProfile.getIndexingDataCompressionEnabled()) == false; } static class StrategyValidator implements Setting.Validator { diff --git a/server/src/main/java/org/elasticsearch/transport/TcpTransport.java b/server/src/main/java/org/elasticsearch/transport/TcpTransport.java index ef2378ba67894..ed85a996e14ce 100644 --- a/server/src/main/java/org/elasticsearch/transport/TcpTransport.java +++ b/server/src/main/java/org/elasticsearch/transport/TcpTransport.java @@ -201,7 +201,7 @@ public final class NodeChannels extends CloseableConnection { } version = handshakeVersion; compress = connectionProfile.getCompressionEnabled(); - rawDataCompress = connectionProfile.getRawDataCompressionEnabled(); + rawDataCompress = connectionProfile.getIndexingDataCompressionEnabled(); } @Override diff --git a/server/src/main/java/org/elasticsearch/transport/TransportSettings.java b/server/src/main/java/org/elasticsearch/transport/TransportSettings.java index 6c48d24c7602e..413f1218263e4 100644 --- a/server/src/main/java/org/elasticsearch/transport/TransportSettings.java +++ b/server/src/main/java/org/elasticsearch/transport/TransportSettings.java @@ -52,8 +52,8 @@ public final class TransportSettings { key -> intSetting(key, -1, -1, Setting.Property.NodeScope)); public static final Setting TRANSPORT_COMPRESS = boolSetting("transport.compress", false, Setting.Property.NodeScope); - public static final Setting TRANSPORT_COMPRESS_RAW_DATA = - boolSetting("transport.compress_raw_data", false, Setting.Property.NodeScope); + public static final Setting TRANSPORT_COMPRESS_INDEXING_DATA = + boolSetting("transport.compress_indexing_data", false, Setting.Property.NodeScope); public static final Setting TRANSPORT_COMPRESSION_SCHEME = enumSetting(CompressionScheme.class, "transport.compression_scheme", CompressionScheme.DEFLATE, Setting.Property.NodeScope); diff --git a/server/src/main/java/org/elasticsearch/transport/TransportStatus.java b/server/src/main/java/org/elasticsearch/transport/TransportStatus.java index 386921f199cf0..28d3397d55e07 100644 --- a/server/src/main/java/org/elasticsearch/transport/TransportStatus.java +++ b/server/src/main/java/org/elasticsearch/transport/TransportStatus.java @@ -47,6 +47,11 @@ public static byte setCompress(byte value) { return value; } + public static byte unSetCompress(byte value) { + value |= STATUS_COMPRESS; + return value; + } + static boolean isHandshake(byte value) { // pkg private since it's only used internally return (value & STATUS_HANDSHAKE) != 0; } diff --git a/server/src/test/java/org/elasticsearch/transport/ConnectionProfileTests.java b/server/src/test/java/org/elasticsearch/transport/ConnectionProfileTests.java index df70b54262899..6b1de351d6496 100644 --- a/server/src/test/java/org/elasticsearch/transport/ConnectionProfileTests.java +++ b/server/src/test/java/org/elasticsearch/transport/ConnectionProfileTests.java @@ -47,7 +47,7 @@ public void testBuildConnectionProfile() { } final boolean setRawDataCompress = randomBoolean(); if (setRawDataCompress) { - builder.setRawDataCompressionEnabled(rawDataCompressionEnabled); + builder.setIndexingDataCompressionEnabled(rawDataCompressionEnabled); } final boolean setPingInterval = randomBoolean(); if (setPingInterval) { @@ -87,9 +87,9 @@ public void testBuildConnectionProfile() { } if (setRawDataCompress) { - assertEquals(rawDataCompressionEnabled, build.getRawDataCompressionEnabled()); + assertEquals(rawDataCompressionEnabled, build.getIndexingDataCompressionEnabled()); } else { - assertNull(build.getRawDataCompressionEnabled()); + assertNull(build.getIndexingDataCompressionEnabled()); } if (setPingInterval) { @@ -186,7 +186,7 @@ public void testConnectionProfileResolve() { } final boolean connectionRawDataCompressSet = randomBoolean(); if (connectionRawDataCompressSet) { - builder.setRawDataCompressionEnabled(randomBoolean()); + builder.setIndexingDataCompressionEnabled(randomBoolean()); } final ConnectionProfile profile = builder.build(); @@ -203,8 +203,9 @@ public void testConnectionProfileResolve() { equalTo(pingIntervalSet ? profile.getPingInterval() : defaultProfile.getPingInterval())); assertThat(resolved.getCompressionEnabled(), equalTo(connectionCompressSet ? profile.getCompressionEnabled() : defaultProfile.getCompressionEnabled())); - assertThat(resolved.getRawDataCompressionEnabled(), - equalTo(connectionRawDataCompressSet ? profile.getRawDataCompressionEnabled() : defaultProfile.getRawDataCompressionEnabled())); + assertThat(resolved.getIndexingDataCompressionEnabled(), + equalTo(connectionRawDataCompressSet ? profile.getIndexingDataCompressionEnabled() : + defaultProfile.getIndexingDataCompressionEnabled())); } public void testDefaultConnectionProfile() { @@ -218,7 +219,7 @@ public void testDefaultConnectionProfile() { assertEquals(TransportSettings.CONNECT_TIMEOUT.get(Settings.EMPTY), profile.getConnectTimeout()); assertEquals(TransportSettings.CONNECT_TIMEOUT.get(Settings.EMPTY), profile.getHandshakeTimeout()); assertEquals(TransportSettings.TRANSPORT_COMPRESS.get(Settings.EMPTY), profile.getCompressionEnabled()); - assertEquals(TransportSettings.TRANSPORT_COMPRESS_RAW_DATA.get(Settings.EMPTY), profile.getRawDataCompressionEnabled()); + assertEquals(TransportSettings.TRANSPORT_COMPRESS_INDEXING_DATA.get(Settings.EMPTY), profile.getIndexingDataCompressionEnabled()); assertEquals(TransportSettings.PING_SCHEDULE.get(Settings.EMPTY), profile.getPingInterval()); profile = ConnectionProfile.buildDefaultConnectionProfile(nonMasterNode()); diff --git a/server/src/test/java/org/elasticsearch/transport/InboundPipelineTests.java b/server/src/test/java/org/elasticsearch/transport/InboundPipelineTests.java index e9ffa12956828..6f32474628458 100644 --- a/server/src/test/java/org/elasticsearch/transport/InboundPipelineTests.java +++ b/server/src/test/java/org/elasticsearch/transport/InboundPipelineTests.java @@ -100,8 +100,7 @@ public void testPipelineHandling() throws IOException { if (randomBoolean()) { scheme = null; } else { - // TODO: Change after backport - if (version.onOrAfter(Version.V_8_0_0)) { + if (version.onOrAfter(CompressionScheme.LZ4_VERSION)) { scheme = randomFrom(CompressionScheme.DEFLATE, CompressionScheme.LZ4); } else { scheme = CompressionScheme.DEFLATE; diff --git a/server/src/test/java/org/elasticsearch/transport/OutboundHandlerTests.java b/server/src/test/java/org/elasticsearch/transport/OutboundHandlerTests.java index f6b8a675448b6..103d90813b8ad 100644 --- a/server/src/test/java/org/elasticsearch/transport/OutboundHandlerTests.java +++ b/server/src/test/java/org/elasticsearch/transport/OutboundHandlerTests.java @@ -123,8 +123,7 @@ public void testSendRequest() throws IOException { long requestId = randomLongBetween(0, 300); boolean isHandshake = randomBoolean(); boolean compress; - // TODO: Change after backport - if (compressionScheme == CompressionScheme.LZ4 && version.before(Version.V_8_0_0)) { + if (compressionScheme == CompressionScheme.LZ4 && version.before(CompressionScheme.LZ4_VERSION)) { compress = false; } else { compress = randomBoolean(); @@ -192,8 +191,7 @@ public void testSendResponse() throws IOException { long requestId = randomLongBetween(0, 300); boolean isHandshake = randomBoolean(); boolean compress; - // TODO: Change after backport - if (compressionScheme == CompressionScheme.LZ4 && version.before(Version.V_8_0_0)) { + if (compressionScheme == CompressionScheme.LZ4 && version.before(CompressionScheme.LZ4_VERSION)) { compress = false; } else { compress = randomBoolean(); diff --git a/server/src/test/java/org/elasticsearch/transport/RemoteClusterServiceTests.java b/server/src/test/java/org/elasticsearch/transport/RemoteClusterServiceTests.java index eb9dbcdd9ae42..9a4ddc1669dfd 100644 --- a/server/src/test/java/org/elasticsearch/transport/RemoteClusterServiceTests.java +++ b/server/src/test/java/org/elasticsearch/transport/RemoteClusterServiceTests.java @@ -369,9 +369,9 @@ public void testChangeSettings() throws Exception { Settings.Builder settingsChange = Settings.builder(); TimeValue pingSchedule = TimeValue.timeValueSeconds(randomIntBetween(6, 8)); settingsChange.put("cluster.remote.cluster_1.transport.ping_schedule", pingSchedule); - boolean rawDataOption = randomBoolean(); - if (rawDataOption) { - settingsChange.put("cluster.remote.cluster_1.transport.compress_raw_data", true); + boolean indexingDataOption = randomBoolean(); + if (indexingDataOption) { + settingsChange.put("cluster.remote.cluster_1.transport.compress_indexing_data", true); } else { settingsChange.put("cluster.remote.cluster_1.transport.compress", true); } @@ -382,12 +382,12 @@ public void testChangeSettings() throws Exception { remoteClusterConnection = service.getRemoteClusterConnection("cluster_1"); ConnectionProfile connectionProfile = remoteClusterConnection.getConnectionManager().getConnectionProfile(); assertEquals(pingSchedule, connectionProfile.getPingInterval()); - if (rawDataOption) { + if (indexingDataOption) { assertEquals(false, connectionProfile.getCompressionEnabled()); - assertEquals(true, connectionProfile.getRawDataCompressionEnabled()); + assertEquals(true, connectionProfile.getIndexingDataCompressionEnabled()); } else { assertEquals(true, connectionProfile.getCompressionEnabled()); - assertEquals(false, connectionProfile.getRawDataCompressionEnabled()); + assertEquals(false, connectionProfile.getIndexingDataCompressionEnabled()); } } } diff --git a/server/src/test/java/org/elasticsearch/transport/RemoteConnectionStrategyTests.java b/server/src/test/java/org/elasticsearch/transport/RemoteConnectionStrategyTests.java index 0f6092c68fb54..206ccaaf883be 100644 --- a/server/src/test/java/org/elasticsearch/transport/RemoteConnectionStrategyTests.java +++ b/server/src/test/java/org/elasticsearch/transport/RemoteConnectionStrategyTests.java @@ -46,7 +46,7 @@ public void testChangeInConnectionProfileMeansTheStrategyMustBeRebuilt() { ClusterConnectionManager connectionManager = new ClusterConnectionManager(TestProfiles.LIGHT_PROFILE, mock(Transport.class)); assertEquals(TimeValue.MINUS_ONE, connectionManager.getConnectionProfile().getPingInterval()); assertEquals(false, connectionManager.getConnectionProfile().getCompressionEnabled()); - assertEquals(false, connectionManager.getConnectionProfile().getRawDataCompressionEnabled()); + assertEquals(false, connectionManager.getConnectionProfile().getIndexingDataCompressionEnabled()); RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager("cluster-alias", connectionManager); FakeConnectionStrategy first = new FakeConnectionStrategy("cluster-alias", mock(TransportService.class), remoteConnectionManager, RemoteConnectionStrategy.ConnectionStrategy.PROXY); @@ -64,8 +64,10 @@ public void testChangeInConnectionProfileMeansTheStrategyMustBeRebuilt() { } else if (change.equals(compress)) { newBuilder.put(RemoteClusterService.REMOTE_CLUSTER_COMPRESS.getConcreteSettingForNamespace("cluster-alias").getKey(), true); } else if (change.equals(rawDataCompress)) { - newBuilder.put(RemoteClusterService.REMOTE_CLUSTER_COMPRESS_RAW_DATA.getConcreteSettingForNamespace("cluster-alias").getKey(), - true); + newBuilder.put( + RemoteClusterService.REMOTE_CLUSTER_COMPRESS_INDEXING_DATA.getConcreteSettingForNamespace("cluster-alias").getKey(), + true + ); } else { throw new AssertionError("Unexpected option: " + change); } diff --git a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java index 3d8cf5bba5030..4df6e5797d3f3 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java +++ b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java @@ -441,7 +441,7 @@ private static Settings getRandomNodeSettings(long seed) { Random random = new Random(seed); Builder builder = Settings.builder(); builder.put(TransportSettings.TRANSPORT_COMPRESS.getKey(), rarely(random)); - builder.put(TransportSettings.TRANSPORT_COMPRESS_RAW_DATA.getKey(), random.nextBoolean()); + builder.put(TransportSettings.TRANSPORT_COMPRESS_INDEXING_DATA.getKey(), random.nextBoolean()); if (random.nextBoolean()) { builder.put(TransportSettings.TRANSPORT_COMPRESSION_SCHEME.getKey(), CompressionScheme.DEFLATE); } else { diff --git a/test/framework/src/main/java/org/elasticsearch/transport/TestProfiles.java b/test/framework/src/main/java/org/elasticsearch/transport/TestProfiles.java index 4a28c39d92e18..ae53e4a57e7f8 100644 --- a/test/framework/src/main/java/org/elasticsearch/transport/TestProfiles.java +++ b/test/framework/src/main/java/org/elasticsearch/transport/TestProfiles.java @@ -26,7 +26,7 @@ private TestProfiles() {} builder.setConnectTimeout(source.getConnectTimeout()); builder.setHandshakeTimeout(source.getHandshakeTimeout()); builder.setCompressionEnabled(source.getCompressionEnabled()); - builder.setRawDataCompressionEnabled(source.getRawDataCompressionEnabled()); + builder.setIndexingDataCompressionEnabled(source.getIndexingDataCompressionEnabled()); builder.setPingInterval(source.getPingInterval()); builder.addConnections(1, TransportRequestOptions.Type.BULK, diff --git a/test/framework/src/main/java/org/elasticsearch/transport/nio/MockNioTransport.java b/test/framework/src/main/java/org/elasticsearch/transport/nio/MockNioTransport.java index 46bf48ad0b3bc..ed5023c6075d3 100644 --- a/test/framework/src/main/java/org/elasticsearch/transport/nio/MockNioTransport.java +++ b/test/framework/src/main/java/org/elasticsearch/transport/nio/MockNioTransport.java @@ -171,7 +171,7 @@ protected ConnectionProfile maybeOverrideConnectionProfile(ConnectionProfile con builder.setConnectTimeout(connectionProfile.getConnectTimeout()); builder.setPingInterval(connectionProfile.getPingInterval()); builder.setCompressionEnabled(connectionProfile.getCompressionEnabled()); - builder.setRawDataCompressionEnabled(connectionProfile.getRawDataCompressionEnabled()); + builder.setIndexingDataCompressionEnabled(connectionProfile.getIndexingDataCompressionEnabled()); return builder.build(); } From 223e1fb9887f45ca535697c5d1064beb89c6fa9d Mon Sep 17 00:00:00 2001 From: Tim Brooks Date: Mon, 28 Jun 2021 16:30:16 -0600 Subject: [PATCH 21/29] Changes --- .../elasticsearch/transport/Compression.java | 54 +++++++++++++++++++ .../transport/CompressionScheme.java | 36 ------------- .../DeflateTransportDecompressor.java | 2 +- .../transport/Lz4TransportDecompressor.java | 4 +- .../transport/NetworkMessage.java | 8 +-- .../transport/OutboundHandler.java | 8 +-- .../transport/OutboundMessage.java | 14 ++--- .../elasticsearch/transport/TcpTransport.java | 5 +- .../transport/TransportDecompressor.java | 12 ++--- .../transport/TransportSettings.java | 4 +- .../transport/InboundDecoderTests.java | 8 +-- .../transport/InboundHandlerTests.java | 2 +- .../transport/InboundPipelineTests.java | 8 +-- .../transport/OutboundHandlerTests.java | 8 +-- .../transport/TcpTransportTests.java | 2 +- .../transport/TransportLoggerTests.java | 2 +- .../test/InternalTestCluster.java | 6 +-- .../AbstractSimpleTransportTestCase.java | 3 +- .../transport/TestTransportChannels.java | 2 +- 19 files changed, 103 insertions(+), 85 deletions(-) create mode 100644 server/src/main/java/org/elasticsearch/transport/Compression.java delete mode 100644 server/src/main/java/org/elasticsearch/transport/CompressionScheme.java diff --git a/server/src/main/java/org/elasticsearch/transport/Compression.java b/server/src/main/java/org/elasticsearch/transport/Compression.java new file mode 100644 index 0000000000000..8908fb9fc7200 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/transport/Compression.java @@ -0,0 +1,54 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.transport; + +import net.jpountz.lz4.LZ4BlockOutputStream; + +import net.jpountz.lz4.LZ4Factory; + +import org.elasticsearch.Version; +import org.elasticsearch.common.compress.DeflateCompressor; + +import java.io.IOException; +import java.io.OutputStream; + +public class Compression { + + public enum Scheme { + LZ4, + DEFLATE; + } + + // TODO: Change after backport + static final Version LZ4_VERSION = Version.V_8_0_0; + static final byte[] DEFLATE_HEADER = DeflateCompressor.HEADER; + static final byte[] LZ4_HEADER = new byte[]{'L', 'Z', '4', '\0'}; + static final int HEADER_LENGTH = 4; + static final int LZ4_BLOCK_SIZE; + + static { + String blockSizeString = System.getProperty("es.transport.compression.lz4_block_size"); + if (blockSizeString != null) { + int lz4BlockSize = Integer.parseInt(blockSizeString); + if (lz4BlockSize < 1024 || lz4BlockSize > (512 * 1024)) { + throw new IllegalArgumentException("lz4_block_size must be >= 1KB and <= 512KB"); + } + LZ4_BLOCK_SIZE = lz4BlockSize; + } else { + // 16KB block size to minimize the allocation of large buffers + LZ4_BLOCK_SIZE = 16 * 1024; + } + } + + public static OutputStream lz4OutputStream(OutputStream outputStream) throws IOException { + outputStream.write(LZ4_HEADER); + // 16KB block size to minimize the allocation of large buffers + return new LZ4BlockOutputStream(outputStream, LZ4_BLOCK_SIZE, LZ4Factory.safeInstance().fastCompressor()); + } +} diff --git a/server/src/main/java/org/elasticsearch/transport/CompressionScheme.java b/server/src/main/java/org/elasticsearch/transport/CompressionScheme.java deleted file mode 100644 index d45f9be906258..0000000000000 --- a/server/src/main/java/org/elasticsearch/transport/CompressionScheme.java +++ /dev/null @@ -1,36 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.transport; - -import net.jpountz.lz4.LZ4BlockOutputStream; - -import net.jpountz.lz4.LZ4Factory; - -import org.elasticsearch.Version; -import org.elasticsearch.common.compress.DeflateCompressor; - -import java.io.IOException; -import java.io.OutputStream; - -public enum CompressionScheme { - LZ4, - DEFLATE; - - // TODO: Change after backport - static Version LZ4_VERSION = Version.V_8_0_0; - static byte[] DEFLATE_HEADER = DeflateCompressor.HEADER; - static byte[] LZ4_HEADER = new byte[]{'L', 'Z', '4', '\0'}; - static int HEADER_LENGTH = 4; - - public static OutputStream lz4OutputStream(OutputStream outputStream) throws IOException { - outputStream.write(LZ4_HEADER); - // 16KB block size to minimize the allocation of large buffers - return new LZ4BlockOutputStream(outputStream, 16 * 1024, LZ4Factory.safeInstance().fastCompressor()); - } -} diff --git a/server/src/main/java/org/elasticsearch/transport/DeflateTransportDecompressor.java b/server/src/main/java/org/elasticsearch/transport/DeflateTransportDecompressor.java index 677f7f18b49a4..d2ec61508c995 100644 --- a/server/src/main/java/org/elasticsearch/transport/DeflateTransportDecompressor.java +++ b/server/src/main/java/org/elasticsearch/transport/DeflateTransportDecompressor.java @@ -40,7 +40,7 @@ public int decompress(BytesReference bytesReference) throws IOException { int bytesConsumed = 0; if (hasSkippedHeader == false) { hasSkippedHeader = true; - int headerLength = CompressionScheme.HEADER_LENGTH; + int headerLength = Compression.HEADER_LENGTH; bytesReference = bytesReference.slice(headerLength, bytesReference.length() - headerLength); bytesConsumed += headerLength; } diff --git a/server/src/main/java/org/elasticsearch/transport/Lz4TransportDecompressor.java b/server/src/main/java/org/elasticsearch/transport/Lz4TransportDecompressor.java index be1dedfd54832..99e653ac0cf51 100644 --- a/server/src/main/java/org/elasticsearch/transport/Lz4TransportDecompressor.java +++ b/server/src/main/java/org/elasticsearch/transport/Lz4TransportDecompressor.java @@ -125,7 +125,7 @@ private enum State { private final PageCacheRecycler recycler; private final ArrayDeque> pages; private int pageOffset = PageCacheRecycler.BYTE_PAGE_SIZE; - private byte[] compressedBuffer = new byte[0]; + private byte[] compressedBuffer = BytesRef.EMPTY_BYTES; private boolean hasSkippedESHeader = false; public Lz4TransportDecompressor(PageCacheRecycler recycler) { @@ -166,7 +166,7 @@ public int decompress(BytesReference bytesReference) throws IOException { int bytesConsumed = 0; if (hasSkippedESHeader == false) { hasSkippedESHeader = true; - int esHeaderLength = CompressionScheme.HEADER_LENGTH; + int esHeaderLength = Compression.HEADER_LENGTH; bytesReference = bytesReference.slice(esHeaderLength, bytesReference.length() - esHeaderLength); bytesConsumed += esHeaderLength; } diff --git a/server/src/main/java/org/elasticsearch/transport/NetworkMessage.java b/server/src/main/java/org/elasticsearch/transport/NetworkMessage.java index bc9a8c380aafa..5060ac8b3c02f 100644 --- a/server/src/main/java/org/elasticsearch/transport/NetworkMessage.java +++ b/server/src/main/java/org/elasticsearch/transport/NetworkMessage.java @@ -21,9 +21,9 @@ public abstract class NetworkMessage { protected final Writeable threadContext; protected final long requestId; protected final byte status; - protected final CompressionScheme compressionScheme; + protected final Compression.Scheme compressionScheme; - NetworkMessage(ThreadContext threadContext, Version version, byte status, long requestId, CompressionScheme compressionScheme) { + NetworkMessage(ThreadContext threadContext, Version version, byte status, long requestId, Compression.Scheme compressionScheme) { this.threadContext = threadContext.captureAsWriteable(); this.version = version; this.requestId = requestId; @@ -63,7 +63,7 @@ boolean isError() { return TransportStatus.isError(status); } - private static CompressionScheme adjustedScheme(Version version, CompressionScheme compressionScheme) { - return compressionScheme == CompressionScheme.LZ4 && version.before(CompressionScheme.LZ4_VERSION) ? null : compressionScheme; + private static Compression.Scheme adjustedScheme(Version version, Compression.Scheme compressionScheme) { + return compressionScheme == Compression.Scheme.LZ4 && version.before(Compression.LZ4_VERSION) ? null : compressionScheme; } } diff --git a/server/src/main/java/org/elasticsearch/transport/OutboundHandler.java b/server/src/main/java/org/elasticsearch/transport/OutboundHandler.java index 64a5d03b8c595..9d8c1b2ecbeb9 100644 --- a/server/src/main/java/org/elasticsearch/transport/OutboundHandler.java +++ b/server/src/main/java/org/elasticsearch/transport/OutboundHandler.java @@ -38,14 +38,14 @@ final class OutboundHandler { private final StatsTracker statsTracker; private final ThreadPool threadPool; private final BigArrays bigArrays; - private final CompressionScheme configuredCompressionScheme; + private final Compression.Scheme configuredCompressionScheme; private volatile long slowLogThresholdMs = Long.MAX_VALUE; private volatile TransportMessageListener messageListener = TransportMessageListener.NOOP_LISTENER; OutboundHandler(String nodeName, Version version, StatsTracker statsTracker, ThreadPool threadPool, BigArrays bigArrays, - CompressionScheme compressionScheme) { + Compression.Scheme compressionScheme) { this.nodeName = nodeName; this.version = version; this.statsTracker = statsTracker; @@ -70,7 +70,7 @@ void sendRequest(final DiscoveryNode node, final TcpChannel channel, final long final TransportRequest request, final TransportRequestOptions options, final Version channelVersion, final boolean compressRequest, final boolean isHandshake) throws IOException, TransportException { Version version = Version.min(this.version, channelVersion); - final CompressionScheme compressionScheme; + final Compression.Scheme compressionScheme; if (compressRequest) { compressionScheme = configuredCompressionScheme; } else { @@ -101,7 +101,7 @@ void sendRequest(final DiscoveryNode node, final TcpChannel channel, final long void sendResponse(final Version nodeVersion, final TcpChannel channel, final long requestId, final String action, final TransportResponse response, final boolean compressResponse, final boolean isHandshake) throws IOException { Version version = Version.min(this.version, nodeVersion); - final CompressionScheme compressionScheme; + final Compression.Scheme compressionScheme; if (compressResponse) { compressionScheme = configuredCompressionScheme; } else { diff --git a/server/src/main/java/org/elasticsearch/transport/OutboundMessage.java b/server/src/main/java/org/elasticsearch/transport/OutboundMessage.java index 0e00e700414c2..b2ec5c08da21d 100644 --- a/server/src/main/java/org/elasticsearch/transport/OutboundMessage.java +++ b/server/src/main/java/org/elasticsearch/transport/OutboundMessage.java @@ -26,7 +26,7 @@ abstract class OutboundMessage extends NetworkMessage { protected final Writeable message; - OutboundMessage(ThreadContext threadContext, Version version, byte status, long requestId, CompressionScheme compressionScheme, + OutboundMessage(ThreadContext threadContext, Version version, byte status, long requestId, Compression.Scheme compressionScheme, Writeable message) { super(threadContext, version, status, requestId, compressionScheme); this.message = message; @@ -88,10 +88,10 @@ BytesReference serialize(BytesStreamOutput bytesStream) throws IOException { // compressed stream wrapped bytes must be no-close wrapped since we need to close the compressed wrapper below to release // resources and write EOS marker bytes but must not yet release the bytes themselves private StreamOutput wrapCompressed(BytesStreamOutput bytesStream) throws IOException { - if (compressionScheme == CompressionScheme.DEFLATE) { + if (compressionScheme == Compression.Scheme.DEFLATE) { return new OutputStreamStreamOutput(CompressorFactory.COMPRESSOR.threadLocalOutputStream(Streams.noCloseStream(bytesStream))); - } else if (compressionScheme == CompressionScheme.LZ4) { - return new OutputStreamStreamOutput(CompressionScheme.lz4OutputStream(Streams.noCloseStream(bytesStream))); + } else if (compressionScheme == Compression.Scheme.LZ4) { + return new OutputStreamStreamOutput(Compression.lz4OutputStream(Streams.noCloseStream(bytesStream))); } else { throw new IllegalArgumentException("Invalid compression scheme: " + compressionScheme); } @@ -106,7 +106,7 @@ static class Request extends OutboundMessage { private final String action; Request(ThreadContext threadContext, Writeable message, Version version, String action, long requestId, - boolean isHandshake, CompressionScheme compressionScheme) { + boolean isHandshake, Compression.Scheme compressionScheme) { super(threadContext, version, setStatus(isHandshake), requestId, adjustCompressionScheme(compressionScheme, message), message); this.action = action; } @@ -122,7 +122,7 @@ protected void writeVariableHeader(StreamOutput stream) throws IOException { } // Do not compress instances of BytesTransportRequest - private static CompressionScheme adjustCompressionScheme(CompressionScheme compressionScheme, Writeable message) { + private static Compression.Scheme adjustCompressionScheme(Compression.Scheme compressionScheme, Writeable message) { if (message instanceof BytesTransportRequest) { return null; } else { @@ -150,7 +150,7 @@ public String toString() { static class Response extends OutboundMessage { Response(ThreadContext threadContext, Writeable message, Version version, long requestId, boolean isHandshake, - CompressionScheme compressionScheme) { + Compression.Scheme compressionScheme) { super(threadContext, version, setStatus(isHandshake, message), requestId, compressionScheme, message); } diff --git a/server/src/main/java/org/elasticsearch/transport/TcpTransport.java b/server/src/main/java/org/elasticsearch/transport/TcpTransport.java index ed85a996e14ce..cc420f7ba2b0c 100644 --- a/server/src/main/java/org/elasticsearch/transport/TcpTransport.java +++ b/server/src/main/java/org/elasticsearch/transport/TcpTransport.java @@ -104,7 +104,6 @@ public abstract class TcpTransport extends AbstractLifecycleComponent implements protected final NetworkService networkService; protected final Set profileSettings; private final CircuitBreakerService circuitBreakerService; - private final CompressionScheme compressionScheme; private final ConcurrentMap profileBoundAddresses = newConcurrentMap(); private final Map> serverChannels = newConcurrentMap(); @@ -134,11 +133,11 @@ public TcpTransport(Settings settings, Version version, ThreadPool threadPool, P this.pageCacheRecycler = pageCacheRecycler; this.circuitBreakerService = circuitBreakerService; this.networkService = networkService; - this.compressionScheme = TransportSettings.TRANSPORT_COMPRESSION_SCHEME.get(settings); + Compression.Scheme compressionScheme = TransportSettings.TRANSPORT_COMPRESSION_SCHEME.get(settings); String nodeName = Node.NODE_NAME_SETTING.get(settings); BigArrays bigArrays = new BigArrays(pageCacheRecycler, circuitBreakerService, CircuitBreaker.IN_FLIGHT_REQUESTS); - this.outboundHandler = new OutboundHandler(nodeName, version, statsTracker, threadPool, bigArrays, this.compressionScheme); + this.outboundHandler = new OutboundHandler(nodeName, version, statsTracker, threadPool, bigArrays, compressionScheme); this.handshaker = new TransportHandshaker(version, threadPool, (node, channel, requestId, v) -> outboundHandler.sendRequest(node, channel, requestId, TransportHandshaker.HANDSHAKE_ACTION_NAME, new TransportHandshaker.HandshakeRequest(version), diff --git a/server/src/main/java/org/elasticsearch/transport/TransportDecompressor.java b/server/src/main/java/org/elasticsearch/transport/TransportDecompressor.java index f208c02eedc20..855f6432dac36 100644 --- a/server/src/main/java/org/elasticsearch/transport/TransportDecompressor.java +++ b/server/src/main/java/org/elasticsearch/transport/TransportDecompressor.java @@ -31,21 +31,21 @@ static TransportDecompressor getDecompressor(PageCacheRecycler recycler, BytesRe } byte firstByte = bytes.get(0); byte[] header; - if (firstByte == CompressionScheme.DEFLATE_HEADER[0]) { - header = CompressionScheme.DEFLATE_HEADER; - } else if (firstByte == CompressionScheme.LZ4_HEADER[0]) { - header = CompressionScheme.LZ4_HEADER; + if (firstByte == Compression.DEFLATE_HEADER[0]) { + header = Compression.DEFLATE_HEADER; + } else if (firstByte == Compression.LZ4_HEADER[0]) { + header = Compression.LZ4_HEADER; } else { throw createIllegalState(bytes); } - for (int i = 1; i < CompressionScheme.HEADER_LENGTH; ++i) { + for (int i = 1; i < Compression.HEADER_LENGTH; ++i) { if (bytes.get(i) != header[i]) { throw createIllegalState(bytes); } } - if (header == CompressionScheme.DEFLATE_HEADER) { + if (header == Compression.DEFLATE_HEADER) { return new DeflateTransportDecompressor(recycler); } else { return new Lz4TransportDecompressor(recycler); diff --git a/server/src/main/java/org/elasticsearch/transport/TransportSettings.java b/server/src/main/java/org/elasticsearch/transport/TransportSettings.java index 413f1218263e4..4947699fb067e 100644 --- a/server/src/main/java/org/elasticsearch/transport/TransportSettings.java +++ b/server/src/main/java/org/elasticsearch/transport/TransportSettings.java @@ -54,8 +54,8 @@ public final class TransportSettings { boolSetting("transport.compress", false, Setting.Property.NodeScope); public static final Setting TRANSPORT_COMPRESS_INDEXING_DATA = boolSetting("transport.compress_indexing_data", false, Setting.Property.NodeScope); - public static final Setting TRANSPORT_COMPRESSION_SCHEME = - enumSetting(CompressionScheme.class, "transport.compression_scheme", CompressionScheme.DEFLATE, + public static final Setting TRANSPORT_COMPRESSION_SCHEME = + enumSetting(Compression.Scheme.class, "transport.compression_scheme", Compression.Scheme.DEFLATE, Setting.Property.NodeScope); // the scheduled internal ping interval setting, defaults to disabled (-1) public static final Setting PING_SCHEDULE = diff --git a/server/src/test/java/org/elasticsearch/transport/InboundDecoderTests.java b/server/src/test/java/org/elasticsearch/transport/InboundDecoderTests.java index 5440a76eb459d..6eef2f2ec63da 100644 --- a/server/src/test/java/org/elasticsearch/transport/InboundDecoderTests.java +++ b/server/src/test/java/org/elasticsearch/transport/InboundDecoderTests.java @@ -96,7 +96,7 @@ public void testDecode() throws IOException { public void testDecodePreHeaderSizeVariableInt() throws IOException { // TODO: Can delete test on 9.0 - CompressionScheme compressionScheme = randomFrom(CompressionScheme.DEFLATE, CompressionScheme.DEFLATE, null); + Compression.Scheme compressionScheme = randomFrom(Compression.Scheme.DEFLATE, Compression.Scheme.DEFLATE, null); String action = "test-request"; long requestId = randomNonNegativeLong(); final Version preHeaderVariableInt = Version.V_7_5_0; @@ -180,7 +180,7 @@ public void testCompressedDecode() throws IOException { } OutboundMessage message; TransportMessage transportMessage; - CompressionScheme scheme = randomFrom(CompressionScheme.DEFLATE, CompressionScheme.LZ4); + Compression.Scheme scheme = randomFrom(Compression.Scheme.DEFLATE, Compression.Scheme.LZ4); if (isRequest) { transportMessage = new TestRequest(randomAlphaOfLength(100)); message = new OutboundMessage.Request(threadContext, transportMessage, Version.CURRENT, action, requestId, false, scheme); @@ -240,7 +240,7 @@ public void testCompressedDecodeHandshakeCompatibility() throws IOException { threadContext.putHeader(headerKey, headerValue); Version handshakeCompat = Version.CURRENT.minimumCompatibilityVersion().minimumCompatibilityVersion(); OutboundMessage message = new OutboundMessage.Request(threadContext, new TestRequest(randomAlphaOfLength(100)), - handshakeCompat, action, requestId, true, CompressionScheme.DEFLATE); + handshakeCompat, action, requestId, true, Compression.Scheme.DEFLATE); final BytesReference bytes = message.serialize(new BytesStreamOutput()); int totalHeaderSize = TcpHeader.headerSize(handshakeCompat); @@ -268,7 +268,7 @@ public void testVersionIncompatibilityDecodeException() throws IOException { long requestId = randomNonNegativeLong(); Version incompatibleVersion = Version.CURRENT.minimumCompatibilityVersion().minimumCompatibilityVersion(); OutboundMessage message = new OutboundMessage.Request(threadContext, new TestRequest(randomAlphaOfLength(100)), - incompatibleVersion, action, requestId, false, CompressionScheme.DEFLATE); + incompatibleVersion, action, requestId, false, Compression.Scheme.DEFLATE); final BytesReference bytes = message.serialize(new BytesStreamOutput()); diff --git a/server/src/test/java/org/elasticsearch/transport/InboundHandlerTests.java b/server/src/test/java/org/elasticsearch/transport/InboundHandlerTests.java index 0a9e526420191..f7bfa120ec7e6 100644 --- a/server/src/test/java/org/elasticsearch/transport/InboundHandlerTests.java +++ b/server/src/test/java/org/elasticsearch/transport/InboundHandlerTests.java @@ -65,7 +65,7 @@ public void setUp() throws Exception { TransportHandshaker handshaker = new TransportHandshaker(version, threadPool, (n, c, r, v) -> {}); TransportKeepAlive keepAlive = new TransportKeepAlive(threadPool, TcpChannel::sendMessage); OutboundHandler outboundHandler = new OutboundHandler("node", version, new StatsTracker(), threadPool, - BigArrays.NON_RECYCLING_INSTANCE, randomFrom(CompressionScheme.DEFLATE, CompressionScheme.LZ4)); + BigArrays.NON_RECYCLING_INSTANCE, randomFrom(Compression.Scheme.DEFLATE, Compression.Scheme.LZ4)); requestHandlers = new Transport.RequestHandlers(); responseHandlers = new Transport.ResponseHandlers(); handler = new InboundHandler(threadPool, outboundHandler, namedWriteableRegistry, handshaker, keepAlive, requestHandlers, diff --git a/server/src/test/java/org/elasticsearch/transport/InboundPipelineTests.java b/server/src/test/java/org/elasticsearch/transport/InboundPipelineTests.java index 6f32474628458..868166631dc82 100644 --- a/server/src/test/java/org/elasticsearch/transport/InboundPipelineTests.java +++ b/server/src/test/java/org/elasticsearch/transport/InboundPipelineTests.java @@ -96,14 +96,14 @@ public void testPipelineHandling() throws IOException { final String value = randomAlphaOfLength(randomIntBetween(10, 200)); final boolean isRequest = randomBoolean(); - CompressionScheme scheme; + Compression.Scheme scheme; if (randomBoolean()) { scheme = null; } else { - if (version.onOrAfter(CompressionScheme.LZ4_VERSION)) { - scheme = randomFrom(CompressionScheme.DEFLATE, CompressionScheme.LZ4); + if (version.onOrAfter(Compression.LZ4_VERSION)) { + scheme = randomFrom(Compression.Scheme.DEFLATE, Compression.Scheme.LZ4); } else { - scheme = CompressionScheme.DEFLATE; + scheme = Compression.Scheme.DEFLATE; } } final long requestId = totalMessages++; diff --git a/server/src/test/java/org/elasticsearch/transport/OutboundHandlerTests.java b/server/src/test/java/org/elasticsearch/transport/OutboundHandlerTests.java index 103d90813b8ad..a70c641740005 100644 --- a/server/src/test/java/org/elasticsearch/transport/OutboundHandlerTests.java +++ b/server/src/test/java/org/elasticsearch/transport/OutboundHandlerTests.java @@ -58,7 +58,7 @@ public class OutboundHandlerTests extends ESTestCase { private OutboundHandler handler; private FakeTcpChannel channel; private DiscoveryNode node; - private CompressionScheme compressionScheme; + private Compression.Scheme compressionScheme; @Before public void setUp() throws Exception { @@ -67,7 +67,7 @@ public void setUp() throws Exception { TransportAddress transportAddress = buildNewFakeTransportAddress(); node = new DiscoveryNode("", transportAddress, Version.CURRENT); StatsTracker statsTracker = new StatsTracker(); - compressionScheme = randomFrom(CompressionScheme.DEFLATE, CompressionScheme.LZ4); + compressionScheme = randomFrom(Compression.Scheme.DEFLATE, Compression.Scheme.LZ4); handler = new OutboundHandler("node", Version.CURRENT, statsTracker, threadPool, BigArrays.NON_RECYCLING_INSTANCE, compressionScheme); @@ -123,7 +123,7 @@ public void testSendRequest() throws IOException { long requestId = randomLongBetween(0, 300); boolean isHandshake = randomBoolean(); boolean compress; - if (compressionScheme == CompressionScheme.LZ4 && version.before(CompressionScheme.LZ4_VERSION)) { + if (compressionScheme == Compression.Scheme.LZ4 && version.before(Compression.LZ4_VERSION)) { compress = false; } else { compress = randomBoolean(); @@ -191,7 +191,7 @@ public void testSendResponse() throws IOException { long requestId = randomLongBetween(0, 300); boolean isHandshake = randomBoolean(); boolean compress; - if (compressionScheme == CompressionScheme.LZ4 && version.before(CompressionScheme.LZ4_VERSION)) { + if (compressionScheme == Compression.Scheme.LZ4 && version.before(Compression.LZ4_VERSION)) { compress = false; } else { compress = randomBoolean(); diff --git a/server/src/test/java/org/elasticsearch/transport/TcpTransportTests.java b/server/src/test/java/org/elasticsearch/transport/TcpTransportTests.java index df822be122a73..fd14a51d3b209 100644 --- a/server/src/test/java/org/elasticsearch/transport/TcpTransportTests.java +++ b/server/src/test/java/org/elasticsearch/transport/TcpTransportTests.java @@ -403,7 +403,7 @@ private void testExceptionHandling(boolean startTransport, Exception exception, TcpTransport.handleException(channel, exception, lifecycle, new OutboundHandler(randomAlphaOfLength(10), Version.CURRENT, new StatsTracker(), testThreadPool, - BigArrays.NON_RECYCLING_INSTANCE, randomFrom(CompressionScheme.DEFLATE, CompressionScheme.LZ4))); + BigArrays.NON_RECYCLING_INSTANCE, randomFrom(Compression.Scheme.DEFLATE, Compression.Scheme.LZ4))); if (expectClosed) { assertTrue(listener.isDone()); diff --git a/server/src/test/java/org/elasticsearch/transport/TransportLoggerTests.java b/server/src/test/java/org/elasticsearch/transport/TransportLoggerTests.java index 7f0f4576dc5a8..26e11422ae298 100644 --- a/server/src/test/java/org/elasticsearch/transport/TransportLoggerTests.java +++ b/server/src/test/java/org/elasticsearch/transport/TransportLoggerTests.java @@ -78,7 +78,7 @@ public void testLoggingHandler() throws IOException { } private BytesReference buildRequest() throws IOException { - CompressionScheme compress = randomFrom(CompressionScheme.DEFLATE, CompressionScheme.LZ4, null); + Compression.Scheme compress = randomFrom(Compression.Scheme.DEFLATE, Compression.Scheme.LZ4, null); try (BytesStreamOutput bytesStreamOutput = new BytesStreamOutput()) { OutboundMessage.Request request = new OutboundMessage.Request(new ThreadContext(Settings.EMPTY), new ClusterStatsRequest(), Version.CURRENT, ClusterStatsAction.NAME, randomInt(30), false, compress); diff --git a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java index 4df6e5797d3f3..3eff4220609c1 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java +++ b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java @@ -97,7 +97,7 @@ import org.elasticsearch.tasks.TaskManager; import org.elasticsearch.test.disruption.ServiceDisruptionScheme; import org.elasticsearch.test.transport.MockTransportService; -import org.elasticsearch.transport.CompressionScheme; +import org.elasticsearch.transport.Compression; import org.elasticsearch.transport.TransportService; import org.elasticsearch.transport.TransportSettings; @@ -443,9 +443,9 @@ private static Settings getRandomNodeSettings(long seed) { builder.put(TransportSettings.TRANSPORT_COMPRESS.getKey(), rarely(random)); builder.put(TransportSettings.TRANSPORT_COMPRESS_INDEXING_DATA.getKey(), random.nextBoolean()); if (random.nextBoolean()) { - builder.put(TransportSettings.TRANSPORT_COMPRESSION_SCHEME.getKey(), CompressionScheme.DEFLATE); + builder.put(TransportSettings.TRANSPORT_COMPRESSION_SCHEME.getKey(), Compression.Scheme.DEFLATE); } else { - builder.put(TransportSettings.TRANSPORT_COMPRESSION_SCHEME.getKey(), CompressionScheme.LZ4); + builder.put(TransportSettings.TRANSPORT_COMPRESSION_SCHEME.getKey(), Compression.Scheme.LZ4); } if (random.nextBoolean()) { builder.put("cache.recycler.page.type", RandomPicks.randomFrom(random, PageCacheRecycler.Type.values())); diff --git a/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java b/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java index 7a79b8c21e9b8..3ae53712b0a63 100644 --- a/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java @@ -557,7 +557,8 @@ public void testVoidMessageCompressed() throws Exception { Settings settingsWithCompress = Settings.builder() .put(TransportSettings.TRANSPORT_COMPRESS.getKey(), true) - .put(TransportSettings.TRANSPORT_COMPRESSION_SCHEME.getKey(), randomFrom(CompressionScheme.DEFLATE, CompressionScheme.LZ4)) + .put(TransportSettings.TRANSPORT_COMPRESSION_SCHEME.getKey(), + randomFrom(Compression.Scheme.DEFLATE, Compression.Scheme.LZ4)) .build(); ConnectionProfile connectionProfile = ConnectionProfile.buildDefaultConnectionProfile(settingsWithCompress); connectToNode(serviceC, serviceA.getLocalDiscoNode(), connectionProfile); diff --git a/test/framework/src/main/java/org/elasticsearch/transport/TestTransportChannels.java b/test/framework/src/main/java/org/elasticsearch/transport/TestTransportChannels.java index 228bc47ae4819..03dd1e2641d76 100644 --- a/test/framework/src/main/java/org/elasticsearch/transport/TestTransportChannels.java +++ b/test/framework/src/main/java/org/elasticsearch/transport/TestTransportChannels.java @@ -20,7 +20,7 @@ public static TcpTransportChannel newFakeTcpTransportChannel(String nodeName, Tc String action, long requestId, Version version) { return new TcpTransportChannel( new OutboundHandler(nodeName, version, new StatsTracker(), threadPool, BigArrays.NON_RECYCLING_INSTANCE, - randomFrom(CompressionScheme.DEFLATE, CompressionScheme.LZ4)), + randomFrom(Compression.Scheme.DEFLATE, Compression.Scheme.LZ4)), channel, action, requestId, version, false, false, () -> {}); } } From a17f90045725a34b27467bfb7a140309c1cb9ddf Mon Sep 17 00:00:00 2001 From: Tim Brooks Date: Mon, 28 Jun 2021 17:18:46 -0600 Subject: [PATCH 22/29] Changes --- .../common/settings/ClusterSettings.java | 3 +- .../common/settings/Setting.java | 17 ++++++ .../elasticsearch/transport/Compression.java | 50 +++++++++-------- .../transport/ConnectionProfile.java | 54 +++++++++---------- .../DeflateTransportDecompressor.java | 2 +- .../transport/Lz4TransportDecompressor.java | 2 +- .../transport/NetworkMessage.java | 2 +- .../transport/OutboundMessage.java | 2 +- .../transport/RemoteClusterService.java | 11 ++-- .../transport/RemoteConnectionStrategy.java | 10 ++-- .../elasticsearch/transport/TcpTransport.java | 7 ++- .../transport/TransportDecompressor.java | 12 ++--- .../transport/TransportSettings.java | 6 +-- .../ClusterConnectionManagerTests.java | 2 +- .../transport/ConnectionProfileTests.java | 40 ++++++++------ .../transport/InboundPipelineTests.java | 2 +- .../transport/OutboundHandlerTests.java | 4 +- .../transport/RemoteClusterServiceTests.java | 19 +++---- .../RemoteConnectionStrategyTests.java | 17 +++--- .../test/InternalTestCluster.java | 12 ++++- .../AbstractSimpleTransportTestCase.java | 6 ++- .../elasticsearch/transport/TestProfiles.java | 2 +- .../transport/nio/MockNioTransport.java | 2 +- .../xpack/ccr/IndexFollowingIT.java | 7 ++- 24 files changed, 169 insertions(+), 122 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java b/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java index b184bcf2cb3d3..6640f8821aa4e 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java @@ -296,7 +296,7 @@ public void apply(Settings value, Settings current, Settings previous) { RemoteClusterService.REMOTE_NODE_ATTRIBUTE, RemoteClusterService.REMOTE_CLUSTER_PING_SCHEDULE, RemoteClusterService.REMOTE_CLUSTER_COMPRESS, - RemoteClusterService.REMOTE_CLUSTER_COMPRESS_INDEXING_DATA, + RemoteClusterService.REMOTE_CLUSTER_COMPRESSION_SCHEME, RemoteConnectionStrategy.REMOTE_CONNECTION_MODE, ProxyConnectionStrategy.PROXY_ADDRESS, ProxyConnectionStrategy.REMOTE_SOCKET_CONNECTIONS, @@ -321,7 +321,6 @@ public void apply(Settings value, Settings current, Settings previous) { TransportSettings.PUBLISH_PORT, TransportSettings.PUBLISH_PORT_PROFILE, TransportSettings.TRANSPORT_COMPRESS, - TransportSettings.TRANSPORT_COMPRESS_INDEXING_DATA, TransportSettings.TRANSPORT_COMPRESSION_SCHEME, TransportSettings.PING_SCHEDULE, TransportSettings.CONNECT_TIMEOUT, diff --git a/server/src/main/java/org/elasticsearch/common/settings/Setting.java b/server/src/main/java/org/elasticsearch/common/settings/Setting.java index d888f25231131..e1d1d4a4a7e70 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/Setting.java +++ b/server/src/main/java/org/elasticsearch/common/settings/Setting.java @@ -1405,6 +1405,23 @@ public static > Setting enumSetting(Class clazz, String return new Setting<>(key, defaultValue.toString(), e -> Enum.valueOf(clazz, e.toUpperCase(Locale.ROOT)), properties); } + /** + * Creates a setting where the allowed values are defined as enum constants. All enum constants must be uppercase. + * + * @param clazz the enum class + * @param key the key for the setting + * @param fallbackSetting the fallback setting for this setting + * @param validator validator for this setting + * @param properties properties for this setting like scope, filtering... + * @param the generics type parameter reflecting the actual type of the enum + * @return the setting object + */ + public static > Setting enumSetting(Class clazz, String key, Setting fallbackSetting, + Validator validator, Property... properties) { + return new Setting<>(new SimpleKey(key), fallbackSetting, fallbackSetting::getRaw, + e -> Enum.valueOf(clazz, e.toUpperCase(Locale.ROOT)), validator, properties); + } + /** * Creates a setting which specifies a memory size. This can either be * specified as an absolute bytes value or as a percentage of the heap diff --git a/server/src/main/java/org/elasticsearch/transport/Compression.java b/server/src/main/java/org/elasticsearch/transport/Compression.java index 8908fb9fc7200..9412870ce0368 100644 --- a/server/src/main/java/org/elasticsearch/transport/Compression.java +++ b/server/src/main/java/org/elasticsearch/transport/Compression.java @@ -23,32 +23,38 @@ public class Compression { public enum Scheme { LZ4, DEFLATE; - } - - // TODO: Change after backport - static final Version LZ4_VERSION = Version.V_8_0_0; - static final byte[] DEFLATE_HEADER = DeflateCompressor.HEADER; - static final byte[] LZ4_HEADER = new byte[]{'L', 'Z', '4', '\0'}; - static final int HEADER_LENGTH = 4; - static final int LZ4_BLOCK_SIZE; - - static { - String blockSizeString = System.getProperty("es.transport.compression.lz4_block_size"); - if (blockSizeString != null) { - int lz4BlockSize = Integer.parseInt(blockSizeString); - if (lz4BlockSize < 1024 || lz4BlockSize > (512 * 1024)) { - throw new IllegalArgumentException("lz4_block_size must be >= 1KB and <= 512KB"); + // TODO: Change after backport + static final Version LZ4_VERSION = Version.V_8_0_0; + static final byte[] DEFLATE_HEADER = DeflateCompressor.HEADER; + static final byte[] LZ4_HEADER = new byte[]{'L', 'Z', '4', '\0'}; + static final int HEADER_LENGTH = 4; + private static final int LZ4_BLOCK_SIZE; + + static { + String blockSizeString = System.getProperty("es.transport.compression.lz4_block_size"); + if (blockSizeString != null) { + int lz4BlockSize = Integer.parseInt(blockSizeString); + if (lz4BlockSize < 1024 || lz4BlockSize > (512 * 1024)) { + throw new IllegalArgumentException("lz4_block_size must be >= 1KB and <= 512KB"); + } + LZ4_BLOCK_SIZE = lz4BlockSize; + } else { + // 16KB block size to minimize the allocation of large buffers + LZ4_BLOCK_SIZE = 16 * 1024; } - LZ4_BLOCK_SIZE = lz4BlockSize; - } else { + } + + public static OutputStream lz4OutputStream(OutputStream outputStream) throws IOException { + outputStream.write(LZ4_HEADER); // 16KB block size to minimize the allocation of large buffers - LZ4_BLOCK_SIZE = 16 * 1024; + return new LZ4BlockOutputStream(outputStream, LZ4_BLOCK_SIZE, LZ4Factory.safeInstance().fastCompressor()); } } - public static OutputStream lz4OutputStream(OutputStream outputStream) throws IOException { - outputStream.write(LZ4_HEADER); - // 16KB block size to minimize the allocation of large buffers - return new LZ4BlockOutputStream(outputStream, LZ4_BLOCK_SIZE, LZ4Factory.safeInstance().fastCompressor()); + public enum Enabled { + TRUE, + INDEXING_DATA, + FALSE } + } diff --git a/server/src/main/java/org/elasticsearch/transport/ConnectionProfile.java b/server/src/main/java/org/elasticsearch/transport/ConnectionProfile.java index 1883ba1c3cd1e..44b0083e72e27 100644 --- a/server/src/main/java/org/elasticsearch/transport/ConnectionProfile.java +++ b/server/src/main/java/org/elasticsearch/transport/ConnectionProfile.java @@ -36,7 +36,7 @@ public static ConnectionProfile resolveConnectionProfile(@Nullable ConnectionPro return fallbackProfile; } else if (profile.getConnectTimeout() != null && profile.getHandshakeTimeout() != null && profile.getPingInterval() != null && profile.getCompressionEnabled() != null - && profile.getIndexingDataCompressionEnabled() != null) { + && profile.getCompressionScheme() != null) { return profile; } else { ConnectionProfile.Builder builder = new ConnectionProfile.Builder(profile); @@ -52,8 +52,8 @@ public static ConnectionProfile resolveConnectionProfile(@Nullable ConnectionPro if (profile.getCompressionEnabled() == null) { builder.setCompressionEnabled(fallbackProfile.getCompressionEnabled()); } - if (profile.getIndexingDataCompressionEnabled() == null) { - builder.setIndexingDataCompressionEnabled(fallbackProfile.getIndexingDataCompressionEnabled()); + if (profile.getCompressionScheme() == null) { + builder.setCompressionScheme(fallbackProfile.getCompressionScheme()); } return builder.build(); } @@ -76,7 +76,7 @@ public static ConnectionProfile buildDefaultConnectionProfile(Settings settings) builder.setHandshakeTimeout(TransportSettings.CONNECT_TIMEOUT.get(settings)); builder.setPingInterval(TransportSettings.PING_SCHEDULE.get(settings)); builder.setCompressionEnabled(TransportSettings.TRANSPORT_COMPRESS.get(settings)); - builder.setIndexingDataCompressionEnabled(TransportSettings.TRANSPORT_COMPRESS_INDEXING_DATA.get(settings)); + builder.setCompressionScheme(TransportSettings.TRANSPORT_COMPRESSION_SCHEME.get(settings)); builder.addConnections(connectionsPerNodeBulk, TransportRequestOptions.Type.BULK); builder.addConnections(connectionsPerNodePing, TransportRequestOptions.Type.PING); // if we are not master eligible we don't need a dedicated channel to publish the state @@ -94,8 +94,8 @@ public static ConnectionProfile buildDefaultConnectionProfile(Settings settings) */ public static ConnectionProfile buildSingleChannelProfile(TransportRequestOptions.Type channelType, @Nullable TimeValue connectTimeout, @Nullable TimeValue handshakeTimeout, @Nullable TimeValue pingInterval, - @Nullable Boolean compressionEnabled, - @Nullable Boolean rawDataCompressionEnabled) { + @Nullable Compression.Enabled compressionEnabled, + @Nullable Compression.Scheme compressionScheme) { Builder builder = new Builder(); builder.addConnections(1, channelType); final EnumSet otherTypes = EnumSet.allOf(TransportRequestOptions.Type.class); @@ -113,8 +113,8 @@ public static ConnectionProfile buildSingleChannelProfile(TransportRequestOption if (compressionEnabled != null) { builder.setCompressionEnabled(compressionEnabled); } - if (rawDataCompressionEnabled != null) { - builder.setIndexingDataCompressionEnabled(rawDataCompressionEnabled); + if (compressionScheme != null) { + builder.setCompressionScheme(compressionScheme); } return builder.build(); } @@ -124,19 +124,19 @@ public static ConnectionProfile buildSingleChannelProfile(TransportRequestOption private final TimeValue connectTimeout; private final TimeValue handshakeTimeout; private final TimeValue pingInterval; - private final Boolean compressionEnabled; - private final Boolean indexingDataCompressionEnabled; + private final Compression.Enabled compressionEnabled; + private final Compression.Scheme compressionScheme; private ConnectionProfile(List handles, int numConnections, TimeValue connectTimeout, - TimeValue handshakeTimeout, TimeValue pingInterval, Boolean compressionEnabled, - Boolean indexingDataCompressionEnabled) { + TimeValue handshakeTimeout, TimeValue pingInterval, Compression.Enabled compressionEnabled, + Compression.Scheme compressionScheme) { this.handles = handles; this.numConnections = numConnections; this.connectTimeout = connectTimeout; this.handshakeTimeout = handshakeTimeout; this.pingInterval = pingInterval; this.compressionEnabled = compressionEnabled; - this.indexingDataCompressionEnabled = indexingDataCompressionEnabled; + this.compressionScheme = compressionScheme; } /** @@ -148,8 +148,8 @@ public static class Builder { private int numConnections = 0; private TimeValue connectTimeout; private TimeValue handshakeTimeout; - private Boolean compressionEnabled; - private Boolean indexingDataCompressionEnabled; + private Compression.Enabled compressionEnabled; + private Compression.Scheme compressionScheme; private TimeValue pingInterval; /** create an empty builder */ @@ -164,7 +164,7 @@ public Builder(ConnectionProfile source) { connectTimeout = source.getConnectTimeout(); handshakeTimeout = source.getHandshakeTimeout(); compressionEnabled = source.getCompressionEnabled(); - indexingDataCompressionEnabled = source.getIndexingDataCompressionEnabled(); + compressionScheme = source.getCompressionScheme(); pingInterval = source.getPingInterval(); } /** @@ -198,9 +198,9 @@ public Builder setPingInterval(TimeValue pingInterval) { } /** - * Sets compression enabled for this connection profile + * Sets compression enabled configuration for this connection profile */ - public Builder setCompressionEnabled(boolean compressionEnabled) { + public Builder setCompressionEnabled(Compression.Enabled compressionEnabled) { this.compressionEnabled = compressionEnabled; return this; } @@ -208,8 +208,8 @@ public Builder setCompressionEnabled(boolean compressionEnabled) { /** * Sets indexing data compression enabled for this connection profile */ - public Builder setIndexingDataCompressionEnabled(boolean indexingDataCompressionEnabled) { - this.indexingDataCompressionEnabled = indexingDataCompressionEnabled; + public Builder setCompressionScheme(Compression.Scheme compressionScheme) { + this.compressionScheme = compressionScheme; return this; } @@ -244,7 +244,7 @@ public ConnectionProfile build() { throw new IllegalStateException("not all types are added for this connection profile - missing types: " + types); } return new ConnectionProfile(Collections.unmodifiableList(handles), numConnections, connectTimeout, handshakeTimeout, - pingInterval, compressionEnabled, indexingDataCompressionEnabled); + pingInterval, compressionEnabled, compressionScheme); } } @@ -271,19 +271,19 @@ public TimeValue getPingInterval() { } /** - * Returns boolean indicating if compression is enabled or null if no explicit compression + * Returns the compression enabled configuration or null if no explicit compression configuration * is set on this profile. */ - public Boolean getCompressionEnabled() { + public Compression.Enabled getCompressionEnabled() { return compressionEnabled; } /** - * Returns boolean indicating if indexing data compression is enabled or null if no explicit - * indexing data compression is set on this profile. + * Returns the configured compression scheme or null if no explicit + * compression scheme is set on this profile. */ - public Boolean getIndexingDataCompressionEnabled() { - return indexingDataCompressionEnabled; + public Compression.Scheme getCompressionScheme() { + return compressionScheme; } /** diff --git a/server/src/main/java/org/elasticsearch/transport/DeflateTransportDecompressor.java b/server/src/main/java/org/elasticsearch/transport/DeflateTransportDecompressor.java index d2ec61508c995..c3f251d6a3455 100644 --- a/server/src/main/java/org/elasticsearch/transport/DeflateTransportDecompressor.java +++ b/server/src/main/java/org/elasticsearch/transport/DeflateTransportDecompressor.java @@ -40,7 +40,7 @@ public int decompress(BytesReference bytesReference) throws IOException { int bytesConsumed = 0; if (hasSkippedHeader == false) { hasSkippedHeader = true; - int headerLength = Compression.HEADER_LENGTH; + int headerLength = Compression.Scheme.HEADER_LENGTH; bytesReference = bytesReference.slice(headerLength, bytesReference.length() - headerLength); bytesConsumed += headerLength; } diff --git a/server/src/main/java/org/elasticsearch/transport/Lz4TransportDecompressor.java b/server/src/main/java/org/elasticsearch/transport/Lz4TransportDecompressor.java index 99e653ac0cf51..6218a81eba838 100644 --- a/server/src/main/java/org/elasticsearch/transport/Lz4TransportDecompressor.java +++ b/server/src/main/java/org/elasticsearch/transport/Lz4TransportDecompressor.java @@ -166,7 +166,7 @@ public int decompress(BytesReference bytesReference) throws IOException { int bytesConsumed = 0; if (hasSkippedESHeader == false) { hasSkippedESHeader = true; - int esHeaderLength = Compression.HEADER_LENGTH; + int esHeaderLength = Compression.Scheme.HEADER_LENGTH; bytesReference = bytesReference.slice(esHeaderLength, bytesReference.length() - esHeaderLength); bytesConsumed += esHeaderLength; } diff --git a/server/src/main/java/org/elasticsearch/transport/NetworkMessage.java b/server/src/main/java/org/elasticsearch/transport/NetworkMessage.java index 5060ac8b3c02f..dee4416b222e1 100644 --- a/server/src/main/java/org/elasticsearch/transport/NetworkMessage.java +++ b/server/src/main/java/org/elasticsearch/transport/NetworkMessage.java @@ -64,6 +64,6 @@ boolean isError() { } private static Compression.Scheme adjustedScheme(Version version, Compression.Scheme compressionScheme) { - return compressionScheme == Compression.Scheme.LZ4 && version.before(Compression.LZ4_VERSION) ? null : compressionScheme; + return compressionScheme == Compression.Scheme.LZ4 && version.before(Compression.Scheme.LZ4_VERSION) ? null : compressionScheme; } } diff --git a/server/src/main/java/org/elasticsearch/transport/OutboundMessage.java b/server/src/main/java/org/elasticsearch/transport/OutboundMessage.java index b2ec5c08da21d..b70d41947909a 100644 --- a/server/src/main/java/org/elasticsearch/transport/OutboundMessage.java +++ b/server/src/main/java/org/elasticsearch/transport/OutboundMessage.java @@ -91,7 +91,7 @@ private StreamOutput wrapCompressed(BytesStreamOutput bytesStream) throws IOExce if (compressionScheme == Compression.Scheme.DEFLATE) { return new OutputStreamStreamOutput(CompressorFactory.COMPRESSOR.threadLocalOutputStream(Streams.noCloseStream(bytesStream))); } else if (compressionScheme == Compression.Scheme.LZ4) { - return new OutputStreamStreamOutput(Compression.lz4OutputStream(Streams.noCloseStream(bytesStream))); + return new OutputStreamStreamOutput(Compression.Scheme.lz4OutputStream(Streams.noCloseStream(bytesStream))); } else { throw new IllegalArgumentException("Invalid compression scheme: " + compressionScheme); } diff --git a/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java b/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java index 46424304bc480..b1a3e2d0394c5 100644 --- a/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java +++ b/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java @@ -45,6 +45,7 @@ import java.util.stream.Stream; import static org.elasticsearch.common.settings.Setting.boolSetting; +import static org.elasticsearch.common.settings.Setting.enumSetting; import static org.elasticsearch.common.settings.Setting.timeSetting; /** @@ -89,16 +90,16 @@ public final class RemoteClusterService extends RemoteClusterAware implements Cl (ns, key) -> timeSetting(key, TransportSettings.PING_SCHEDULE, new RemoteConnectionEnabled<>(ns, key), Setting.Property.Dynamic, Setting.Property.NodeScope)); - public static final Setting.AffixSetting REMOTE_CLUSTER_COMPRESS = Setting.affixKeySetting( + public static final Setting.AffixSetting REMOTE_CLUSTER_COMPRESS = Setting.affixKeySetting( "cluster.remote.", "transport.compress", - (ns, key) -> boolSetting(key, TransportSettings.TRANSPORT_COMPRESS, + (ns, key) -> enumSetting(Compression.Enabled.class, key, TransportSettings.TRANSPORT_COMPRESS, new RemoteConnectionEnabled<>(ns, key), Setting.Property.Dynamic, Setting.Property.NodeScope)); - public static final Setting.AffixSetting REMOTE_CLUSTER_COMPRESS_INDEXING_DATA = Setting.affixKeySetting( + public static final Setting.AffixSetting REMOTE_CLUSTER_COMPRESSION_SCHEME = Setting.affixKeySetting( "cluster.remote.", - "transport.compress_indexing_data", - (ns, key) -> boolSetting(key, TransportSettings.TRANSPORT_COMPRESS_INDEXING_DATA, + "transport.compression_scheme", + (ns, key) -> enumSetting(Compression.Scheme.class, key, TransportSettings.TRANSPORT_COMPRESSION_SCHEME, new RemoteConnectionEnabled<>(ns, key), Setting.Property.Dynamic, Setting.Property.NodeScope)); private final boolean enabled; diff --git a/server/src/main/java/org/elasticsearch/transport/RemoteConnectionStrategy.java b/server/src/main/java/org/elasticsearch/transport/RemoteConnectionStrategy.java index 257f47426baa0..59796fd3cc926 100644 --- a/server/src/main/java/org/elasticsearch/transport/RemoteConnectionStrategy.java +++ b/server/src/main/java/org/elasticsearch/transport/RemoteConnectionStrategy.java @@ -125,7 +125,7 @@ static ConnectionProfile buildConnectionProfile(String clusterAlias, Settings se .setConnectTimeout(TransportSettings.CONNECT_TIMEOUT.get(settings)) .setHandshakeTimeout(TransportSettings.CONNECT_TIMEOUT.get(settings)) .setCompressionEnabled(RemoteClusterService.REMOTE_CLUSTER_COMPRESS.getConcreteSettingForNamespace(clusterAlias).get(settings)) - .setIndexingDataCompressionEnabled(RemoteClusterService.REMOTE_CLUSTER_COMPRESS_INDEXING_DATA + .setCompressionScheme(RemoteClusterService.REMOTE_CLUSTER_COMPRESSION_SCHEME .getConcreteSettingForNamespace(clusterAlias).get(settings)) .setPingInterval(RemoteClusterService.REMOTE_CLUSTER_PING_SCHEDULE.getConcreteSettingForNamespace(clusterAlias).get(settings)) .addConnections(0, TransportRequestOptions.Type.BULK, TransportRequestOptions.Type.STATE, @@ -278,7 +278,10 @@ boolean shouldRebuildConnection(Settings newSettings) { if (newMode.equals(strategyType()) == false) { return true; } else { - Boolean compressionEnabled = RemoteClusterService.REMOTE_CLUSTER_COMPRESS + Compression.Enabled compressionEnabled = RemoteClusterService.REMOTE_CLUSTER_COMPRESS + .getConcreteSettingForNamespace(clusterAlias) + .get(newSettings); + Compression.Scheme compressionScheme = RemoteClusterService.REMOTE_CLUSTER_COMPRESSION_SCHEME .getConcreteSettingForNamespace(clusterAlias) .get(newSettings); TimeValue pingSchedule = RemoteClusterService.REMOTE_CLUSTER_PING_SCHEDULE @@ -288,6 +291,7 @@ boolean shouldRebuildConnection(Settings newSettings) { ConnectionProfile oldProfile = connectionManager.getConnectionProfile(); ConnectionProfile.Builder builder = new ConnectionProfile.Builder(oldProfile); builder.setCompressionEnabled(compressionEnabled); + builder.setCompressionScheme(compressionScheme); builder.setPingInterval(pingSchedule); ConnectionProfile newProfile = builder.build(); return connectionProfileChanged(oldProfile, newProfile) || strategyMustBeRebuilt(newSettings); @@ -357,7 +361,7 @@ private List> getAndClearListeners() { private boolean connectionProfileChanged(ConnectionProfile oldProfile, ConnectionProfile newProfile) { return Objects.equals(oldProfile.getCompressionEnabled(), newProfile.getCompressionEnabled()) == false || Objects.equals(oldProfile.getPingInterval(), newProfile.getPingInterval()) == false - || Objects.equals(oldProfile.getIndexingDataCompressionEnabled(), newProfile.getIndexingDataCompressionEnabled()) == false; + || Objects.equals(oldProfile.getCompressionScheme(), newProfile.getCompressionScheme()) == false; } static class StrategyValidator implements Setting.Validator { diff --git a/server/src/main/java/org/elasticsearch/transport/TcpTransport.java b/server/src/main/java/org/elasticsearch/transport/TcpTransport.java index cc420f7ba2b0c..f711100caa2a8 100644 --- a/server/src/main/java/org/elasticsearch/transport/TcpTransport.java +++ b/server/src/main/java/org/elasticsearch/transport/TcpTransport.java @@ -184,8 +184,7 @@ public final class NodeChannels extends CloseableConnection { private final List channels; private final DiscoveryNode node; private final Version version; - private final boolean compress; - private final boolean rawDataCompress; + private final Compression.Enabled compress; private final AtomicBoolean isClosing = new AtomicBoolean(false); NodeChannels(DiscoveryNode node, List channels, ConnectionProfile connectionProfile, Version handshakeVersion) { @@ -200,7 +199,6 @@ public final class NodeChannels extends CloseableConnection { } version = handshakeVersion; compress = connectionProfile.getCompressionEnabled(); - rawDataCompress = connectionProfile.getIndexingDataCompressionEnabled(); } @Override @@ -245,7 +243,8 @@ public void sendRequest(long requestId, String action, TransportRequest request, throw new NodeNotConnectedException(node, "connection already closed"); } TcpChannel channel = channel(options.type()); - boolean shouldCompress = compress || (rawDataCompress && request instanceof RawDataTransportRequest); + boolean shouldCompress = compress == Compression.Enabled.TRUE || + (compress == Compression.Enabled.INDEXING_DATA && request instanceof RawDataTransportRequest); outboundHandler.sendRequest(node, channel, requestId, action, request, options, getVersion(), shouldCompress, false); } diff --git a/server/src/main/java/org/elasticsearch/transport/TransportDecompressor.java b/server/src/main/java/org/elasticsearch/transport/TransportDecompressor.java index 855f6432dac36..33524a239c902 100644 --- a/server/src/main/java/org/elasticsearch/transport/TransportDecompressor.java +++ b/server/src/main/java/org/elasticsearch/transport/TransportDecompressor.java @@ -31,21 +31,21 @@ static TransportDecompressor getDecompressor(PageCacheRecycler recycler, BytesRe } byte firstByte = bytes.get(0); byte[] header; - if (firstByte == Compression.DEFLATE_HEADER[0]) { - header = Compression.DEFLATE_HEADER; - } else if (firstByte == Compression.LZ4_HEADER[0]) { - header = Compression.LZ4_HEADER; + if (firstByte == Compression.Scheme.DEFLATE_HEADER[0]) { + header = Compression.Scheme.DEFLATE_HEADER; + } else if (firstByte == Compression.Scheme.LZ4_HEADER[0]) { + header = Compression.Scheme.LZ4_HEADER; } else { throw createIllegalState(bytes); } - for (int i = 1; i < Compression.HEADER_LENGTH; ++i) { + for (int i = 1; i < Compression.Scheme.HEADER_LENGTH; ++i) { if (bytes.get(i) != header[i]) { throw createIllegalState(bytes); } } - if (header == Compression.DEFLATE_HEADER) { + if (header == Compression.Scheme.DEFLATE_HEADER) { return new DeflateTransportDecompressor(recycler); } else { return new Lz4TransportDecompressor(recycler); diff --git a/server/src/main/java/org/elasticsearch/transport/TransportSettings.java b/server/src/main/java/org/elasticsearch/transport/TransportSettings.java index 4947699fb067e..650fd7c6054dc 100644 --- a/server/src/main/java/org/elasticsearch/transport/TransportSettings.java +++ b/server/src/main/java/org/elasticsearch/transport/TransportSettings.java @@ -50,10 +50,8 @@ public final class TransportSettings { intSetting("transport.publish_port", -1, -1, Setting.Property.NodeScope); public static final Setting.AffixSetting PUBLISH_PORT_PROFILE = affixKeySetting("transport.profiles.", "publish_port", key -> intSetting(key, -1, -1, Setting.Property.NodeScope)); - public static final Setting TRANSPORT_COMPRESS = - boolSetting("transport.compress", false, Setting.Property.NodeScope); - public static final Setting TRANSPORT_COMPRESS_INDEXING_DATA = - boolSetting("transport.compress_indexing_data", false, Setting.Property.NodeScope); + public static final Setting TRANSPORT_COMPRESS = + enumSetting(Compression.Enabled.class, "transport.compress", Compression.Enabled.FALSE, Setting.Property.NodeScope); public static final Setting TRANSPORT_COMPRESSION_SCHEME = enumSetting(Compression.Scheme.class, "transport.compression_scheme", Compression.Scheme.DEFLATE, Setting.Property.NodeScope); diff --git a/server/src/test/java/org/elasticsearch/transport/ClusterConnectionManagerTests.java b/server/src/test/java/org/elasticsearch/transport/ClusterConnectionManagerTests.java index 9f1543e21d10f..e32b5f9f3b015 100644 --- a/server/src/test/java/org/elasticsearch/transport/ClusterConnectionManagerTests.java +++ b/server/src/test/java/org/elasticsearch/transport/ClusterConnectionManagerTests.java @@ -56,7 +56,7 @@ public void createConnectionManager() { TimeValue oneSecond = new TimeValue(1000); TimeValue oneMinute = TimeValue.timeValueMinutes(1); connectionProfile = ConnectionProfile.buildSingleChannelProfile(TransportRequestOptions.Type.REG, oneSecond, oneSecond, - oneMinute, false, false); + oneMinute, Compression.Enabled.FALSE, null); } @After diff --git a/server/src/test/java/org/elasticsearch/transport/ConnectionProfileTests.java b/server/src/test/java/org/elasticsearch/transport/ConnectionProfileTests.java index 6b1de351d6496..a894349d3fbf8 100644 --- a/server/src/test/java/org/elasticsearch/transport/ConnectionProfileTests.java +++ b/server/src/test/java/org/elasticsearch/transport/ConnectionProfileTests.java @@ -31,8 +31,10 @@ public void testBuildConnectionProfile() { TimeValue connectTimeout = TimeValue.timeValueMillis(randomIntBetween(1, 10)); TimeValue handshakeTimeout = TimeValue.timeValueMillis(randomIntBetween(1, 10)); TimeValue pingInterval = TimeValue.timeValueMillis(randomIntBetween(1, 10)); - boolean compressionEnabled = randomBoolean(); - boolean rawDataCompressionEnabled = randomBoolean(); + Compression.Enabled compressionEnabled = + randomFrom(Compression.Enabled.TRUE, Compression.Enabled.FALSE, Compression.Enabled.INDEXING_DATA); + Compression.Scheme compressionScheme = + randomFrom(Compression.Scheme.DEFLATE, Compression.Scheme.LZ4); final boolean setConnectTimeout = randomBoolean(); if (setConnectTimeout) { builder.setConnectTimeout(connectTimeout); @@ -41,13 +43,15 @@ public void testBuildConnectionProfile() { if (setHandshakeTimeout) { builder.setHandshakeTimeout(handshakeTimeout); } + final boolean setCompress = randomBoolean(); if (setCompress) { builder.setCompressionEnabled(compressionEnabled); } - final boolean setRawDataCompress = randomBoolean(); - if (setRawDataCompress) { - builder.setIndexingDataCompressionEnabled(rawDataCompressionEnabled); + + final boolean setCompressionScheme = randomBoolean(); + if (setCompressionScheme) { + builder.setCompressionScheme(compressionScheme); } final boolean setPingInterval = randomBoolean(); if (setPingInterval) { @@ -86,10 +90,10 @@ public void testBuildConnectionProfile() { assertNull(build.getCompressionEnabled()); } - if (setRawDataCompress) { - assertEquals(rawDataCompressionEnabled, build.getIndexingDataCompressionEnabled()); + if (setCompressionScheme) { + assertEquals(compressionScheme, build.getCompressionScheme()); } else { - assertNull(build.getIndexingDataCompressionEnabled()); + assertNull(build.getCompressionScheme()); } if (setPingInterval) { @@ -182,11 +186,15 @@ public void testConnectionProfileResolve() { } final boolean connectionCompressSet = randomBoolean(); if (connectionCompressSet) { - builder.setCompressionEnabled(randomBoolean()); + Compression.Enabled compressionEnabled = + randomFrom(Compression.Enabled.TRUE, Compression.Enabled.FALSE, Compression.Enabled.INDEXING_DATA); + builder.setCompressionEnabled(compressionEnabled); } - final boolean connectionRawDataCompressSet = randomBoolean(); - if (connectionRawDataCompressSet) { - builder.setIndexingDataCompressionEnabled(randomBoolean()); + final boolean connectionCompressionScheme = randomBoolean(); + if (connectionCompressionScheme) { + Compression.Scheme compressionScheme = + randomFrom(Compression.Scheme.DEFLATE, Compression.Scheme.LZ4); + builder.setCompressionScheme(compressionScheme); } final ConnectionProfile profile = builder.build(); @@ -203,9 +211,9 @@ public void testConnectionProfileResolve() { equalTo(pingIntervalSet ? profile.getPingInterval() : defaultProfile.getPingInterval())); assertThat(resolved.getCompressionEnabled(), equalTo(connectionCompressSet ? profile.getCompressionEnabled() : defaultProfile.getCompressionEnabled())); - assertThat(resolved.getIndexingDataCompressionEnabled(), - equalTo(connectionRawDataCompressSet ? profile.getIndexingDataCompressionEnabled() : - defaultProfile.getIndexingDataCompressionEnabled())); + assertThat(resolved.getCompressionScheme(), + equalTo(connectionCompressionScheme ? profile.getCompressionScheme() : + defaultProfile.getCompressionScheme())); } public void testDefaultConnectionProfile() { @@ -219,7 +227,7 @@ public void testDefaultConnectionProfile() { assertEquals(TransportSettings.CONNECT_TIMEOUT.get(Settings.EMPTY), profile.getConnectTimeout()); assertEquals(TransportSettings.CONNECT_TIMEOUT.get(Settings.EMPTY), profile.getHandshakeTimeout()); assertEquals(TransportSettings.TRANSPORT_COMPRESS.get(Settings.EMPTY), profile.getCompressionEnabled()); - assertEquals(TransportSettings.TRANSPORT_COMPRESS_INDEXING_DATA.get(Settings.EMPTY), profile.getIndexingDataCompressionEnabled()); + assertEquals(TransportSettings.TRANSPORT_COMPRESSION_SCHEME.get(Settings.EMPTY), profile.getCompressionScheme()); assertEquals(TransportSettings.PING_SCHEDULE.get(Settings.EMPTY), profile.getPingInterval()); profile = ConnectionProfile.buildDefaultConnectionProfile(nonMasterNode()); diff --git a/server/src/test/java/org/elasticsearch/transport/InboundPipelineTests.java b/server/src/test/java/org/elasticsearch/transport/InboundPipelineTests.java index 868166631dc82..6008ead34f01d 100644 --- a/server/src/test/java/org/elasticsearch/transport/InboundPipelineTests.java +++ b/server/src/test/java/org/elasticsearch/transport/InboundPipelineTests.java @@ -100,7 +100,7 @@ public void testPipelineHandling() throws IOException { if (randomBoolean()) { scheme = null; } else { - if (version.onOrAfter(Compression.LZ4_VERSION)) { + if (version.onOrAfter(Compression.Scheme.LZ4_VERSION)) { scheme = randomFrom(Compression.Scheme.DEFLATE, Compression.Scheme.LZ4); } else { scheme = Compression.Scheme.DEFLATE; diff --git a/server/src/test/java/org/elasticsearch/transport/OutboundHandlerTests.java b/server/src/test/java/org/elasticsearch/transport/OutboundHandlerTests.java index a70c641740005..c5d9e7aaa5a38 100644 --- a/server/src/test/java/org/elasticsearch/transport/OutboundHandlerTests.java +++ b/server/src/test/java/org/elasticsearch/transport/OutboundHandlerTests.java @@ -123,7 +123,7 @@ public void testSendRequest() throws IOException { long requestId = randomLongBetween(0, 300); boolean isHandshake = randomBoolean(); boolean compress; - if (compressionScheme == Compression.Scheme.LZ4 && version.before(Compression.LZ4_VERSION)) { + if (compressionScheme == Compression.Scheme.LZ4 && version.before(Compression.Scheme.LZ4_VERSION)) { compress = false; } else { compress = randomBoolean(); @@ -191,7 +191,7 @@ public void testSendResponse() throws IOException { long requestId = randomLongBetween(0, 300); boolean isHandshake = randomBoolean(); boolean compress; - if (compressionScheme == Compression.Scheme.LZ4 && version.before(Compression.LZ4_VERSION)) { + if (compressionScheme == Compression.Scheme.LZ4 && version.before(Compression.Scheme.LZ4_VERSION)) { compress = false; } else { compress = randomBoolean(); diff --git a/server/src/test/java/org/elasticsearch/transport/RemoteClusterServiceTests.java b/server/src/test/java/org/elasticsearch/transport/RemoteClusterServiceTests.java index 9a4ddc1669dfd..a3fb319f4394c 100644 --- a/server/src/test/java/org/elasticsearch/transport/RemoteClusterServiceTests.java +++ b/server/src/test/java/org/elasticsearch/transport/RemoteClusterServiceTests.java @@ -369,11 +369,12 @@ public void testChangeSettings() throws Exception { Settings.Builder settingsChange = Settings.builder(); TimeValue pingSchedule = TimeValue.timeValueSeconds(randomIntBetween(6, 8)); settingsChange.put("cluster.remote.cluster_1.transport.ping_schedule", pingSchedule); - boolean indexingDataOption = randomBoolean(); - if (indexingDataOption) { - settingsChange.put("cluster.remote.cluster_1.transport.compress_indexing_data", true); + boolean compressionScheme = randomBoolean(); + Compression.Enabled enabled = randomFrom(Compression.Enabled.TRUE, Compression.Enabled.INDEXING_DATA); + if (compressionScheme) { + settingsChange.put("cluster.remote.cluster_1.transport.compression_scheme", Compression.Scheme.LZ4); } else { - settingsChange.put("cluster.remote.cluster_1.transport.compress", true); + settingsChange.put("cluster.remote.cluster_1.transport.compress", enabled); } settingsChange.putList("cluster.remote.cluster_1.seeds", cluster1Seed.getAddress().toString()); service.validateAndUpdateRemoteCluster("cluster_1", settingsChange.build()); @@ -382,12 +383,12 @@ public void testChangeSettings() throws Exception { remoteClusterConnection = service.getRemoteClusterConnection("cluster_1"); ConnectionProfile connectionProfile = remoteClusterConnection.getConnectionManager().getConnectionProfile(); assertEquals(pingSchedule, connectionProfile.getPingInterval()); - if (indexingDataOption) { - assertEquals(false, connectionProfile.getCompressionEnabled()); - assertEquals(true, connectionProfile.getIndexingDataCompressionEnabled()); + if (compressionScheme) { + assertEquals(Compression.Enabled.FALSE, connectionProfile.getCompressionEnabled()); + assertEquals(Compression.Scheme.LZ4, connectionProfile.getCompressionScheme()); } else { - assertEquals(true, connectionProfile.getCompressionEnabled()); - assertEquals(false, connectionProfile.getIndexingDataCompressionEnabled()); + assertEquals(enabled, connectionProfile.getCompressionEnabled()); + assertEquals(Compression.Scheme.DEFLATE, connectionProfile.getCompressionScheme()); } } } diff --git a/server/src/test/java/org/elasticsearch/transport/RemoteConnectionStrategyTests.java b/server/src/test/java/org/elasticsearch/transport/RemoteConnectionStrategyTests.java index 206ccaaf883be..0ecfaf3676cb4 100644 --- a/server/src/test/java/org/elasticsearch/transport/RemoteConnectionStrategyTests.java +++ b/server/src/test/java/org/elasticsearch/transport/RemoteConnectionStrategyTests.java @@ -45,8 +45,8 @@ public void testSameStrategyChangeMeansThatStrategyDoesNotNeedToBeRebuilt() { public void testChangeInConnectionProfileMeansTheStrategyMustBeRebuilt() { ClusterConnectionManager connectionManager = new ClusterConnectionManager(TestProfiles.LIGHT_PROFILE, mock(Transport.class)); assertEquals(TimeValue.MINUS_ONE, connectionManager.getConnectionProfile().getPingInterval()); - assertEquals(false, connectionManager.getConnectionProfile().getCompressionEnabled()); - assertEquals(false, connectionManager.getConnectionProfile().getIndexingDataCompressionEnabled()); + assertEquals(Compression.Enabled.FALSE, connectionManager.getConnectionProfile().getCompressionEnabled()); + assertEquals(Compression.Scheme.DEFLATE, connectionManager.getConnectionProfile().getCompressionScheme()); RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager("cluster-alias", connectionManager); FakeConnectionStrategy first = new FakeConnectionStrategy("cluster-alias", mock(TransportService.class), remoteConnectionManager, RemoteConnectionStrategy.ConnectionStrategy.PROXY); @@ -56,17 +56,18 @@ public void testChangeInConnectionProfileMeansTheStrategyMustBeRebuilt() { newBuilder.put(ProxyConnectionStrategy.PROXY_ADDRESS.getConcreteSettingForNamespace("cluster-alias").getKey(), "127.0.0.1:9300"); String ping = "ping"; String compress = "compress"; - String rawDataCompress = "raw_data_compress"; - String change = randomFrom(ping, compress, rawDataCompress); + String compressionScheme = "compression_scheme"; + String change = randomFrom(ping, compress, compressionScheme); if (change.equals(ping)) { newBuilder.put(RemoteClusterService.REMOTE_CLUSTER_PING_SCHEDULE.getConcreteSettingForNamespace("cluster-alias").getKey(), TimeValue.timeValueSeconds(5)); } else if (change.equals(compress)) { - newBuilder.put(RemoteClusterService.REMOTE_CLUSTER_COMPRESS.getConcreteSettingForNamespace("cluster-alias").getKey(), true); - } else if (change.equals(rawDataCompress)) { + newBuilder.put(RemoteClusterService.REMOTE_CLUSTER_COMPRESS.getConcreteSettingForNamespace("cluster-alias").getKey(), + randomFrom(Compression.Enabled.INDEXING_DATA, Compression.Enabled.TRUE)); + } else if (change.equals(compressionScheme)) { newBuilder.put( - RemoteClusterService.REMOTE_CLUSTER_COMPRESS_INDEXING_DATA.getConcreteSettingForNamespace("cluster-alias").getKey(), - true + RemoteClusterService.REMOTE_CLUSTER_COMPRESSION_SCHEME.getConcreteSettingForNamespace("cluster-alias").getKey(), + Compression.Scheme.LZ4 ); } else { throw new AssertionError("Unexpected option: " + change); diff --git a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java index 3eff4220609c1..64ee7d36f2757 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java +++ b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java @@ -142,6 +142,7 @@ import static org.elasticsearch.discovery.FileBasedSeedHostsProvider.UNICAST_HOSTS_FILE; import static org.elasticsearch.node.Node.INITIAL_STATE_TIMEOUT_SETTING; import static org.elasticsearch.test.ESTestCase.assertBusy; +import static org.elasticsearch.test.ESTestCase.randomBoolean; import static org.elasticsearch.test.ESTestCase.randomFrom; import static org.elasticsearch.test.NodeRoles.dataOnlyNode; import static org.elasticsearch.test.NodeRoles.masterOnlyNode; @@ -440,8 +441,15 @@ public Collection> getPlugins() { private static Settings getRandomNodeSettings(long seed) { Random random = new Random(seed); Builder builder = Settings.builder(); - builder.put(TransportSettings.TRANSPORT_COMPRESS.getKey(), rarely(random)); - builder.put(TransportSettings.TRANSPORT_COMPRESS_INDEXING_DATA.getKey(), random.nextBoolean()); + if (randomBoolean()) { + builder.put(TransportSettings.TRANSPORT_COMPRESS.getKey(), Compression.Enabled.FALSE); + } else { + if (rarely(random)) { + builder.put(TransportSettings.TRANSPORT_COMPRESS.getKey(), Compression.Enabled.TRUE); + } else { + builder.put(TransportSettings.TRANSPORT_COMPRESS.getKey(), Compression.Enabled.INDEXING_DATA); + } + } if (random.nextBoolean()) { builder.put(TransportSettings.TRANSPORT_COMPRESSION_SCHEME.getKey(), Compression.Scheme.DEFLATE); } else { diff --git a/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java b/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java index 3ae53712b0a63..bb875b8257397 100644 --- a/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java @@ -556,7 +556,7 @@ public void testVoidMessageCompressed() throws Exception { }); Settings settingsWithCompress = Settings.builder() - .put(TransportSettings.TRANSPORT_COMPRESS.getKey(), true) + .put(TransportSettings.TRANSPORT_COMPRESS.getKey(), Compression.Enabled.TRUE) .put(TransportSettings.TRANSPORT_COMPRESSION_SCHEME.getKey(), randomFrom(Compression.Scheme.DEFLATE, Compression.Scheme.LZ4)) .build(); @@ -602,7 +602,9 @@ public void testHelloWorldCompressed() throws Exception { } }); - Settings settingsWithCompress = Settings.builder().put(TransportSettings.TRANSPORT_COMPRESS.getKey(), true).build(); + Settings settingsWithCompress = Settings.builder() + .put(TransportSettings.TRANSPORT_COMPRESS.getKey(), Compression.Enabled.TRUE) + .build(); ConnectionProfile connectionProfile = ConnectionProfile.buildDefaultConnectionProfile(settingsWithCompress); connectToNode(serviceC, serviceA.getLocalDiscoNode(), connectionProfile); diff --git a/test/framework/src/main/java/org/elasticsearch/transport/TestProfiles.java b/test/framework/src/main/java/org/elasticsearch/transport/TestProfiles.java index ae53e4a57e7f8..637f09943b185 100644 --- a/test/framework/src/main/java/org/elasticsearch/transport/TestProfiles.java +++ b/test/framework/src/main/java/org/elasticsearch/transport/TestProfiles.java @@ -26,7 +26,7 @@ private TestProfiles() {} builder.setConnectTimeout(source.getConnectTimeout()); builder.setHandshakeTimeout(source.getHandshakeTimeout()); builder.setCompressionEnabled(source.getCompressionEnabled()); - builder.setIndexingDataCompressionEnabled(source.getIndexingDataCompressionEnabled()); + builder.setCompressionScheme(source.getCompressionScheme()); builder.setPingInterval(source.getPingInterval()); builder.addConnections(1, TransportRequestOptions.Type.BULK, diff --git a/test/framework/src/main/java/org/elasticsearch/transport/nio/MockNioTransport.java b/test/framework/src/main/java/org/elasticsearch/transport/nio/MockNioTransport.java index ed5023c6075d3..31ea88e8421f0 100644 --- a/test/framework/src/main/java/org/elasticsearch/transport/nio/MockNioTransport.java +++ b/test/framework/src/main/java/org/elasticsearch/transport/nio/MockNioTransport.java @@ -171,7 +171,7 @@ protected ConnectionProfile maybeOverrideConnectionProfile(ConnectionProfile con builder.setConnectTimeout(connectionProfile.getConnectTimeout()); builder.setPingInterval(connectionProfile.getPingInterval()); builder.setCompressionEnabled(connectionProfile.getCompressionEnabled()); - builder.setIndexingDataCompressionEnabled(connectionProfile.getIndexingDataCompressionEnabled()); + builder.setCompressionScheme(connectionProfile.getCompressionScheme()); return builder.build(); } diff --git a/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/IndexFollowingIT.java b/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/IndexFollowingIT.java index 0c06b9df5f32a..2e2ddba38ba2b 100644 --- a/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/IndexFollowingIT.java +++ b/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/IndexFollowingIT.java @@ -78,6 +78,7 @@ import org.elasticsearch.tasks.TaskInfo; import org.elasticsearch.test.BackgroundIndexer; import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.transport.Compression; import org.elasticsearch.transport.NoSuchRemoteClusterException; import org.elasticsearch.transport.RemoteClusterService; import org.elasticsearch.transport.SniffConnectionStrategy; @@ -1321,7 +1322,8 @@ public void testUpdateRemoteConfigsDuringFollowing() throws Exception { ClusterUpdateSettingsRequest settingsRequest = new ClusterUpdateSettingsRequest().masterNodeTimeout(TimeValue.MAX_VALUE); String address = getLeaderCluster().getDataNodeInstance(TransportService.class).boundAddress().publishAddress().toString(); - Setting compress = RemoteClusterService.REMOTE_CLUSTER_COMPRESS.getConcreteSettingForNamespace("leader_cluster"); + Setting compress = + RemoteClusterService.REMOTE_CLUSTER_COMPRESS.getConcreteSettingForNamespace("leader_cluster"); Setting> seeds = SniffConnectionStrategy.REMOTE_CLUSTER_SEEDS.getConcreteSettingForNamespace("leader_cluster"); settingsRequest.persistentSettings(Settings.builder().put(compress.getKey(), true).put(seeds.getKey(), address)); assertAcked(followerClient().admin().cluster().updateSettings(settingsRequest).actionGet()); @@ -1352,7 +1354,8 @@ public void testUpdateRemoteConfigsDuringFollowing() throws Exception { } finally { ClusterUpdateSettingsRequest settingsRequest = new ClusterUpdateSettingsRequest().masterNodeTimeout(TimeValue.MAX_VALUE); String address = getLeaderCluster().getDataNodeInstance(TransportService.class).boundAddress().publishAddress().toString(); - Setting compress = RemoteClusterService.REMOTE_CLUSTER_COMPRESS.getConcreteSettingForNamespace("leader_cluster"); + Setting compress = + RemoteClusterService.REMOTE_CLUSTER_COMPRESS.getConcreteSettingForNamespace("leader_cluster"); Setting> seeds = SniffConnectionStrategy.REMOTE_CLUSTER_SEEDS.getConcreteSettingForNamespace("leader_cluster"); settingsRequest.persistentSettings(Settings.builder().put(compress.getKey(), compress.getDefault(Settings.EMPTY)) .put(seeds.getKey(), address)); From c45326fd16a2393dd01dfac9e655d5657ed70a84 Mon Sep 17 00:00:00 2001 From: Tim Brooks Date: Mon, 28 Jun 2021 18:00:57 -0600 Subject: [PATCH 23/29] Changes --- .../modules/remote-clusters.asciidoc | 9 +- docs/reference/modules/transport.asciidoc | 17 +- .../transport/Lz4TransportDecompressor.java | 8 +- .../transport/RawDataTransportRequest.java | 5 + .../transport/TLLZ4BlockOutputStream.java | 325 ++++++++++++++++++ .../ClusterConnectionManagerTests.java | 2 +- .../test/InternalTestCluster.java | 10 +- 7 files changed, 361 insertions(+), 15 deletions(-) create mode 100644 server/src/main/java/org/elasticsearch/transport/TLLZ4BlockOutputStream.java diff --git a/docs/reference/modules/remote-clusters.asciidoc b/docs/reference/modules/remote-clusters.asciidoc index 923580133a76d..ef9aee6edfcd4 100644 --- a/docs/reference/modules/remote-clusters.asciidoc +++ b/docs/reference/modules/remote-clusters.asciidoc @@ -288,11 +288,12 @@ separately. `cluster.remote..transport.compress`:: - Per cluster boolean setting that enables you to configure compression for - requests to a specific remote cluster. This setting impacts only requests + Per cluster setting that enables you to configure compression for requests + to a specific remote cluster. This setting impacts only requests sent to the remote cluster. If the inbound request is compressed, - Elasticsearch compresses the response. If unset, the global - `transport.compress` is used as the fallback setting. + Elasticsearch compresses the response. The setting options are `true`, + `indexing_data`, and `false`. The option `indexing_data` is experimental. + If unset, the global `transport.compress` is used as the fallback setting. [discrete] [[remote-cluster-sniff-settings]] diff --git a/docs/reference/modules/transport.asciidoc b/docs/reference/modules/transport.asciidoc index 58727ca9f2a32..be70b7e30c61a 100644 --- a/docs/reference/modules/transport.asciidoc +++ b/docs/reference/modules/transport.asciidoc @@ -49,8 +49,16 @@ time setting format). Defaults to `30s`. `transport.compress`:: (<>) -Set to `true` to enable compression (`DEFLATE`) between -all nodes. Defaults to `false`. +Set to `true`, `indexing_data`, or `false` to configure transport compression +between nodes. The option `true` will compress all data. The option +`indexing_data` will compress only the raw index data sent between nodes during +ingest, ccr following, and shard recovery. The option `indexing_data` is +experimental. Defaults to `false`. + +`transport.compress_scheme`:: +(<>) +Configures the compression scheme for `transport.compress`. The options are +`deflate` or `lz4`. The option `lz4` is experimental. Defaults to `deflate`. `transport.ping_schedule`:: (<>) @@ -172,6 +180,11 @@ normally makes sense for local cluster communication as compression has a noticeable CPU cost and local clusters tend to be set up with fast network connections between nodes. +The `transport.compress` configuration option `indexing_data` will only +compress requests that relate to the transport of raw indexing source data +between nodes. This is primarily the case for ingest, ccr, and shard recovery +traffic. This option is experimental. + The `transport.compress` setting always configures local cluster request compression and is the fallback setting for remote cluster request compression. If you want to configure remote request compression differently than local diff --git a/server/src/main/java/org/elasticsearch/transport/Lz4TransportDecompressor.java b/server/src/main/java/org/elasticsearch/transport/Lz4TransportDecompressor.java index 6218a81eba838..e390f42995399 100644 --- a/server/src/main/java/org/elasticsearch/transport/Lz4TransportDecompressor.java +++ b/server/src/main/java/org/elasticsearch/transport/Lz4TransportDecompressor.java @@ -50,7 +50,8 @@ */ public class Lz4TransportDecompressor implements TransportDecompressor { - private final ThreadLocal uncompressed = ThreadLocal.withInitial(() -> new byte[64 * 1024]); + private final ThreadLocal uncompressed = ThreadLocal.withInitial(() -> BytesRef.EMPTY_BYTES); + private final ThreadLocal compressed = ThreadLocal.withInitial(() -> BytesRef.EMPTY_BYTES); /** * Magic number of LZ4 block. @@ -337,11 +338,12 @@ private int decodeBlock(BytesReference reference) throws IOException { } private byte[] getCompressedBuffer(int requiredSize) { + byte[] compressedBuffer = this.compressed.get(); if (compressedBuffer.length >= requiredSize) { return compressedBuffer; } else { - this.compressedBuffer = new byte[requiredSize]; - return compressedBuffer; + this.compressed.set(new byte[requiredSize]); + return this.compressed.get(); } } diff --git a/server/src/main/java/org/elasticsearch/transport/RawDataTransportRequest.java b/server/src/main/java/org/elasticsearch/transport/RawDataTransportRequest.java index bf87290fdd3e6..647ead226e375 100644 --- a/server/src/main/java/org/elasticsearch/transport/RawDataTransportRequest.java +++ b/server/src/main/java/org/elasticsearch/transport/RawDataTransportRequest.java @@ -8,5 +8,10 @@ package org.elasticsearch.transport; +/** + * Requests that implement this interface will be compressed when {@link TransportSettings#TRANSPORT_COMPRESS} + * is configured to {@link Compression.Enabled#INDEXING_DATA}. This is primary intended to be + * requests/responses primarily composed of raw source data. + */ public interface RawDataTransportRequest { } diff --git a/server/src/main/java/org/elasticsearch/transport/TLLZ4BlockOutputStream.java b/server/src/main/java/org/elasticsearch/transport/TLLZ4BlockOutputStream.java new file mode 100644 index 0000000000000..80e860e1add8f --- /dev/null +++ b/server/src/main/java/org/elasticsearch/transport/TLLZ4BlockOutputStream.java @@ -0,0 +1,325 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ +/* + * Copyright 2020 Adrien Grand and the lz4-java contributors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.elasticsearch.transport; + +import java.io.FilterOutputStream; +import java.io.IOException; +import java.io.OutputStream; +import java.util.zip.Checksum; + +import net.jpountz.lz4.LZ4BlockInputStream; +import net.jpountz.lz4.LZ4Compressor; +import net.jpountz.lz4.LZ4Factory; +import net.jpountz.lz4.LZ4FrameOutputStream; +import net.jpountz.util.SafeUtils; +import net.jpountz.xxhash.StreamingXXHash32; +import net.jpountz.xxhash.XXHashFactory; + +import org.apache.lucene.util.BytesRef; + +/** + * Streaming LZ4 (not compatible with the LZ4 Frame format). + * This class compresses data into fixed-size blocks of compressed data. + * This class uses its own format and is not compatible with the LZ4 Frame format. + * For interoperability with other LZ4 tools, use {@link LZ4FrameOutputStream}, + * which is compatible with the LZ4 Frame format. This class remains for backward compatibility. + * @see LZ4BlockInputStream + * @see LZ4FrameOutputStream + */ +public class TLLZ4BlockOutputStream extends FilterOutputStream { + + private static class ArrayBox { + private byte[] uncompressed = BytesRef.EMPTY_BYTES; + private byte[] compressed = BytesRef.EMPTY_BYTES; + private boolean owned = false; + + private void markOwnership(int uncompressedBlockSize, int compressedMaxSize) { + assert owned == false; + owned = true; + if (uncompressedBlockSize > uncompressed.length) { + uncompressed = new byte[uncompressedBlockSize]; + } + if (compressedMaxSize > compressed.length) { + compressed = new byte[compressedMaxSize]; + } + } + + private void release() { + owned = false; + } + } + + private final ThreadLocal threadLocalArrays = ThreadLocal.withInitial(ArrayBox::new); + + static final byte[] MAGIC = new byte[] { 'L', 'Z', '4', 'B', 'l', 'o', 'c', 'k' }; + static final int MAGIC_LENGTH = MAGIC.length; + + static final int HEADER_LENGTH = + MAGIC_LENGTH // magic bytes + + 1 // token + + 4 // compressed length + + 4 // decompressed length + + 4; // checksum + + static final int COMPRESSION_LEVEL_BASE = 10; + static final int MIN_BLOCK_SIZE = 64; + static final int MAX_BLOCK_SIZE = 1 << (COMPRESSION_LEVEL_BASE + 0x0F); + + static final int COMPRESSION_METHOD_RAW = 0x10; + static final int COMPRESSION_METHOD_LZ4 = 0x20; + + static final int DEFAULT_SEED = 0x9747b28c; + + private static int compressionLevel(int blockSize) { + if (blockSize < MIN_BLOCK_SIZE) { + throw new IllegalArgumentException("blockSize must be >= " + MIN_BLOCK_SIZE + ", got " + blockSize); + } else if (blockSize > MAX_BLOCK_SIZE) { + throw new IllegalArgumentException("blockSize must be <= " + MAX_BLOCK_SIZE + ", got " + blockSize); + } + int compressionLevel = 32 - Integer.numberOfLeadingZeros(blockSize - 1); // ceil of log2 + assert (1 << compressionLevel) >= blockSize; + assert blockSize * 2 > (1 << compressionLevel); + compressionLevel = Math.max(0, compressionLevel - COMPRESSION_LEVEL_BASE); + assert compressionLevel >= 0 && compressionLevel <= 0x0F; + return compressionLevel; + } + + private final int blockSize; + private final int compressionLevel; + private final LZ4Compressor compressor; + private final Checksum checksum; + private final ArrayBox arrayBox; + private final byte[] buffer; + private final byte[] compressedBuffer; + private final boolean syncFlush; + private boolean finished; + private int o; + + /** + * Creates a new {@link OutputStream} with configurable block size. Large + * blocks require more memory at compression and decompression time but + * should improve the compression ratio. + * + * @param out the {@link OutputStream} to feed + * @param blockSize the maximum number of bytes to try to compress at once, + * must be >= 64 and <= 32 M + * @param compressor the {@link LZ4Compressor} instance to use to compress + * data + * @param checksum the {@link Checksum} instance to use to check data for + * integrity. + * @param syncFlush true if pending data should also be flushed on {@link #flush()} + */ + public TLLZ4BlockOutputStream(OutputStream out, int blockSize, LZ4Compressor compressor, Checksum checksum, boolean syncFlush) { + super(out); + this.blockSize = blockSize; + this.compressor = compressor; + this.checksum = checksum; + this.compressionLevel = compressionLevel(blockSize); + final int compressedBlockSize = HEADER_LENGTH + compressor.maxCompressedLength(blockSize); + this.arrayBox = threadLocalArrays.get(); + arrayBox.markOwnership(blockSize, compressedBlockSize); + this.buffer = arrayBox.uncompressed; + this.compressedBuffer = arrayBox.compressed; + this.syncFlush = syncFlush; + o = 0; + finished = false; + System.arraycopy(MAGIC, 0, compressedBuffer, 0, MAGIC_LENGTH); + } + + /** + * Creates a new instance which checks stream integrity using + * {@link StreamingXXHash32} and doesn't sync flush. + * + * @param out the {@link OutputStream} to feed + * @param blockSize the maximum number of bytes to try to compress at once, + * must be >= 64 and <= 32 M + * @param compressor the {@link LZ4Compressor} instance to use to compress + * data + * + * @see #TLLZ4BlockOutputStream(OutputStream, int, LZ4Compressor, Checksum, boolean) + * @see StreamingXXHash32#asChecksum() + */ + public TLLZ4BlockOutputStream(OutputStream out, int blockSize, LZ4Compressor compressor) { + this(out, blockSize, compressor, XXHashFactory.fastestInstance().newStreamingHash32(DEFAULT_SEED).asChecksum(), false); + } + + /** + * Creates a new instance which compresses with the standard LZ4 compression + * algorithm. + * + * @param out the {@link OutputStream} to feed + * @param blockSize the maximum number of bytes to try to compress at once, + * must be >= 64 and <= 32 M + * + * @see #TLLZ4BlockOutputStream(OutputStream, int, LZ4Compressor) + * @see LZ4Factory#fastCompressor() + */ + public TLLZ4BlockOutputStream(OutputStream out, int blockSize) { + this(out, blockSize, LZ4Factory.fastestInstance().fastCompressor()); + } + + /** + * Creates a new instance which compresses into blocks of 64 KB. + * + * @param out the {@link OutputStream} to feed + * + * @see #TLLZ4BlockOutputStream(OutputStream, int) + */ + public TLLZ4BlockOutputStream(OutputStream out) { + this(out, 1 << 16); + } + + private void ensureNotFinished() { + if (finished) { + throw new IllegalStateException("This stream is already closed"); + } + } + + @Override + public void write(int b) throws IOException { + ensureNotFinished(); + if (o == blockSize) { + flushBufferedData(); + } + buffer[o++] = (byte) b; + } + + @Override + public void write(byte[] b, int off, int len) throws IOException { + SafeUtils.checkRange(b, off, len); + ensureNotFinished(); + + while (o + len > blockSize) { + final int l = blockSize - o; + System.arraycopy(b, off, buffer, o, blockSize - o); + o = blockSize; + flushBufferedData(); + off += l; + len -= l; + } + System.arraycopy(b, off, buffer, o, len); + o += len; + } + + @Override + public void write(byte[] b) throws IOException { + ensureNotFinished(); + write(b, 0, b.length); + } + + @Override + public void close() throws IOException { + try { + if (finished == false) { + finish(); + } + if (out != null) { + out.close(); + out = null; + } + } finally { + arrayBox.release(); + } + } + + private void flushBufferedData() throws IOException { + if (o == 0) { + return; + } + checksum.reset(); + checksum.update(buffer, 0, o); + final int check = (int) checksum.getValue(); + int compressedLength = compressor.compress(buffer, 0, o, compressedBuffer, HEADER_LENGTH); + final int compressMethod; + if (compressedLength >= o) { + compressMethod = COMPRESSION_METHOD_RAW; + compressedLength = o; + System.arraycopy(buffer, 0, compressedBuffer, HEADER_LENGTH, o); + } else { + compressMethod = COMPRESSION_METHOD_LZ4; + } + + compressedBuffer[MAGIC_LENGTH] = (byte) (compressMethod | compressionLevel); + writeIntLE(compressedLength, compressedBuffer, MAGIC_LENGTH + 1); + writeIntLE(o, compressedBuffer, MAGIC_LENGTH + 5); + writeIntLE(check, compressedBuffer, MAGIC_LENGTH + 9); + assert MAGIC_LENGTH + 13 == HEADER_LENGTH; + out.write(compressedBuffer, 0, HEADER_LENGTH + compressedLength); + o = 0; + } + + /** + * Flushes this compressed {@link OutputStream}. + * + * If the stream has been created with syncFlush=true, pending + * data will be compressed and appended to the underlying {@link OutputStream} + * before calling {@link OutputStream#flush()} on the underlying stream. + * Otherwise, this method just flushes the underlying stream, so pending + * data might not be available for reading until {@link #finish()} or + * {@link #close()} is called. + */ + @Override + public void flush() throws IOException { + if (out != null) { + if (syncFlush) { + flushBufferedData(); + } + out.flush(); + } + } + + /** + * Same as {@link #close()} except that it doesn't close the underlying stream. + * This can be useful if you want to keep on using the underlying stream. + * + * @throws IOException if an I/O error occurs. + */ + public void finish() throws IOException { + ensureNotFinished(); + flushBufferedData(); + compressedBuffer[MAGIC_LENGTH] = (byte) (COMPRESSION_METHOD_RAW | compressionLevel); + writeIntLE(0, compressedBuffer, MAGIC_LENGTH + 1); + writeIntLE(0, compressedBuffer, MAGIC_LENGTH + 5); + writeIntLE(0, compressedBuffer, MAGIC_LENGTH + 9); + assert MAGIC_LENGTH + 13 == HEADER_LENGTH; + out.write(compressedBuffer, 0, HEADER_LENGTH); + finished = true; + out.flush(); + } + + private static void writeIntLE(int i, byte[] buf, int off) { + buf[off++] = (byte) i; + buf[off++] = (byte) (i >>> 8); + buf[off++] = (byte) (i >>> 16); + buf[off++] = (byte) (i >>> 24); + } + + @Override + public String toString() { + return getClass().getSimpleName() + "(out=" + out + ", blockSize=" + blockSize + + ", compressor=" + compressor + ", checksum=" + checksum + ")"; + } + +} + diff --git a/server/src/test/java/org/elasticsearch/transport/ClusterConnectionManagerTests.java b/server/src/test/java/org/elasticsearch/transport/ClusterConnectionManagerTests.java index e32b5f9f3b015..43d294489d4ab 100644 --- a/server/src/test/java/org/elasticsearch/transport/ClusterConnectionManagerTests.java +++ b/server/src/test/java/org/elasticsearch/transport/ClusterConnectionManagerTests.java @@ -56,7 +56,7 @@ public void createConnectionManager() { TimeValue oneSecond = new TimeValue(1000); TimeValue oneMinute = TimeValue.timeValueMinutes(1); connectionProfile = ConnectionProfile.buildSingleChannelProfile(TransportRequestOptions.Type.REG, oneSecond, oneSecond, - oneMinute, Compression.Enabled.FALSE, null); + oneMinute, Compression.Enabled.FALSE, Compression.Scheme.DEFLATE); } @After diff --git a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java index 64ee7d36f2757..40f7aa79e0c47 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java +++ b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java @@ -15,6 +15,7 @@ import com.carrotsearch.randomizedtesting.generators.RandomNumbers; import com.carrotsearch.randomizedtesting.generators.RandomPicks; import com.carrotsearch.randomizedtesting.generators.RandomStrings; + import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.lucene.store.AlreadyClosedException; @@ -44,26 +45,26 @@ import org.elasticsearch.cluster.routing.allocation.DiskThresholdSettings; import org.elasticsearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.core.Nullable; import org.elasticsearch.common.Randomness; import org.elasticsearch.common.Strings; import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.component.LifecycleListener; import org.elasticsearch.common.io.FileSystemUtils; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.core.Releasables; import org.elasticsearch.common.settings.MockSecureSettings; import org.elasticsearch.common.settings.SecureSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings.Builder; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; -import org.elasticsearch.core.TimeValue; import org.elasticsearch.common.util.PageCacheRecycler; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.common.util.concurrent.FutureUtils; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.common.util.set.Sets; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.Releasables; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.env.Environment; import org.elasticsearch.env.NodeEnvironment; @@ -142,7 +143,6 @@ import static org.elasticsearch.discovery.FileBasedSeedHostsProvider.UNICAST_HOSTS_FILE; import static org.elasticsearch.node.Node.INITIAL_STATE_TIMEOUT_SETTING; import static org.elasticsearch.test.ESTestCase.assertBusy; -import static org.elasticsearch.test.ESTestCase.randomBoolean; import static org.elasticsearch.test.ESTestCase.randomFrom; import static org.elasticsearch.test.NodeRoles.dataOnlyNode; import static org.elasticsearch.test.NodeRoles.masterOnlyNode; @@ -441,7 +441,7 @@ public Collection> getPlugins() { private static Settings getRandomNodeSettings(long seed) { Random random = new Random(seed); Builder builder = Settings.builder(); - if (randomBoolean()) { + if (random.nextBoolean()) { builder.put(TransportSettings.TRANSPORT_COMPRESS.getKey(), Compression.Enabled.FALSE); } else { if (rarely(random)) { From 97047ec53032c1621ad3c3ead400f899a2d25b93 Mon Sep 17 00:00:00 2001 From: Tim Brooks Date: Mon, 28 Jun 2021 18:21:34 -0600 Subject: [PATCH 24/29] Changes --- .../DeflateTransportDecompressor.java | 13 +- .../transport/Lz4TransportDecompressor.java | 4 +- .../Lz4TransportDecompressorTests.java | 115 ++++++++++++++++++ 3 files changed, 122 insertions(+), 10 deletions(-) create mode 100644 server/src/test/java/org/elasticsearch/transport/Lz4TransportDecompressorTests.java diff --git a/server/src/main/java/org/elasticsearch/transport/DeflateTransportDecompressor.java b/server/src/main/java/org/elasticsearch/transport/DeflateTransportDecompressor.java index c3f251d6a3455..04fe07d50880b 100644 --- a/server/src/main/java/org/elasticsearch/transport/DeflateTransportDecompressor.java +++ b/server/src/main/java/org/elasticsearch/transport/DeflateTransportDecompressor.java @@ -52,24 +52,23 @@ public int decompress(BytesReference bytesReference) throws IOException { bytesConsumed += ref.length; boolean continueInflating = true; while (continueInflating) { - final Recycler.V page; final boolean isNewPage = pageOffset == PageCacheRecycler.BYTE_PAGE_SIZE; if (isNewPage) { pageOffset = 0; - page = recycler.bytePage(false); - } else { - page = pages.getLast(); + pages.add(recycler.bytePage(false)); } + final Recycler.V page = pages.getLast(); + byte[] output = page.v(); try { int bytesInflated = inflater.inflate(output, pageOffset, PageCacheRecycler.BYTE_PAGE_SIZE - pageOffset); pageOffset += bytesInflated; if (isNewPage) { if (bytesInflated == 0) { - page.close(); + Recycler.V removed = pages.pollLast(); + assert removed == page; + removed.close(); pageOffset = PageCacheRecycler.BYTE_PAGE_SIZE; - } else { - pages.add(page); } } } catch (DataFormatException e) { diff --git a/server/src/main/java/org/elasticsearch/transport/Lz4TransportDecompressor.java b/server/src/main/java/org/elasticsearch/transport/Lz4TransportDecompressor.java index e390f42995399..b4a1c2b01462a 100644 --- a/server/src/main/java/org/elasticsearch/transport/Lz4TransportDecompressor.java +++ b/server/src/main/java/org/elasticsearch/transport/Lz4TransportDecompressor.java @@ -126,7 +126,6 @@ private enum State { private final PageCacheRecycler recycler; private final ArrayDeque> pages; private int pageOffset = PageCacheRecycler.BYTE_PAGE_SIZE; - private byte[] compressedBuffer = BytesRef.EMPTY_BYTES; private boolean hasSkippedESHeader = false; public Lz4TransportDecompressor(PageCacheRecycler recycler) { @@ -304,13 +303,12 @@ private int decodeBlock(BytesReference reference) throws IOException { int bytesToCopy = decompressedLength; int uncompressedOffset = 0; while (bytesToCopy > 0) { - final Recycler.V page; final boolean isNewPage = pageOffset == PageCacheRecycler.BYTE_PAGE_SIZE; if (isNewPage) { pageOffset = 0; pages.add(recycler.bytePage(false)); } - page = pages.getLast(); + final Recycler.V page = pages.getLast(); int toCopy = Math.min(bytesToCopy, PageCacheRecycler.BYTE_PAGE_SIZE - pageOffset); System.arraycopy(uncompressed, uncompressedOffset, page.v(), pageOffset, toCopy); diff --git a/server/src/test/java/org/elasticsearch/transport/Lz4TransportDecompressorTests.java b/server/src/test/java/org/elasticsearch/transport/Lz4TransportDecompressorTests.java new file mode 100644 index 0000000000000..6ac6f3eaa0ce4 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/transport/Lz4TransportDecompressorTests.java @@ -0,0 +1,115 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.transport; + +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.bytes.CompositeBytesReference; +import org.elasticsearch.common.bytes.ReleasableBytesReference; +import org.elasticsearch.common.io.Streams; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.OutputStreamStreamOutput; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.util.PageCacheRecycler; +import org.elasticsearch.core.Releasables; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; +import java.io.OutputStream; + +public class Lz4TransportDecompressorTests extends ESTestCase { + + public void testSimpleCompression() throws IOException { + try (BytesStreamOutput output = new BytesStreamOutput()) { + byte randomByte = randomByte(); + try (OutputStream lz4BlockStream = Compression.Scheme.lz4OutputStream(Streams.noCloseStream(output))) { + lz4BlockStream.write(randomByte); + } + + BytesReference bytes = output.bytes(); + + Lz4TransportDecompressor decompressor = new Lz4TransportDecompressor(PageCacheRecycler.NON_RECYCLING_INSTANCE); + int bytesConsumed = decompressor.decompress(bytes); + assertEquals(bytes.length(), bytesConsumed); + ReleasableBytesReference releasableBytesReference = decompressor.pollDecompressedPage(true); + assertEquals(randomByte, releasableBytesReference.get(0)); + releasableBytesReference.close(); + + } + } + + public void testMultiPageCompression() throws IOException { + try (BytesStreamOutput output = new BytesStreamOutput()) { + try (StreamOutput lz4BlockStream = new OutputStreamStreamOutput(Compression.Scheme.lz4OutputStream( + Streams.flushOnCloseStream(output)))) { + for (int i = 0; i < 10000; ++i) { + lz4BlockStream.writeInt(i); + } + } + + BytesReference bytes = output.bytes(); + + Lz4TransportDecompressor decompressor = new Lz4TransportDecompressor(PageCacheRecycler.NON_RECYCLING_INSTANCE); + int bytesConsumed = decompressor.decompress(bytes); + assertEquals(bytes.length(), bytesConsumed); + ReleasableBytesReference reference1 = decompressor.pollDecompressedPage(false); + ReleasableBytesReference reference2 = decompressor.pollDecompressedPage(false); + ReleasableBytesReference reference3 = decompressor.pollDecompressedPage(true); + assertNull(decompressor.pollDecompressedPage(true)); + BytesReference composite = CompositeBytesReference.of(reference1, reference2, reference3); + assertEquals(4 * 10000, composite.length()); + StreamInput streamInput = composite.streamInput(); + for (int i = 0; i < 10000; ++i) { + assertEquals(i, streamInput.readInt()); + } + Releasables.close(reference1, reference2, reference3); + } + } + + public void testIncrementalMultiPageCompression() throws IOException { + try (BytesStreamOutput output = new BytesStreamOutput()) { + try (StreamOutput lz4BlockStream = new OutputStreamStreamOutput( + Compression.Scheme.lz4OutputStream(Streams.flushOnCloseStream(output)))) { + for (int i = 0; i < 10000; ++i) { + lz4BlockStream.writeInt(i); + } + } + + BytesReference bytes = output.bytes(); + + Lz4TransportDecompressor decompressor = new Lz4TransportDecompressor(PageCacheRecycler.NON_RECYCLING_INSTANCE); + + int split1 = (int) (bytes.length() * 0.3); + int split2 = (int) (bytes.length() * 0.65); + BytesReference inbound1 = bytes.slice(0, split1); + BytesReference inbound2 = bytes.slice(split1, split2 - split1); + BytesReference inbound3 = bytes.slice(split2, bytes.length() - split2); + + int bytesConsumed1 = decompressor.decompress(inbound1); + BytesReference next = CompositeBytesReference.of(inbound1.slice(bytesConsumed1, inbound1.length() - bytesConsumed1), inbound2); + int bytesConsumed2 = decompressor.decompress(next); + BytesReference next2 = CompositeBytesReference.of(next.slice(bytesConsumed2, next.length() - bytesConsumed2), inbound3); + int bytesConsumed3 = decompressor.decompress(next2); + + assertEquals(bytes.length(), bytesConsumed1 + bytesConsumed2 + bytesConsumed3); + ReleasableBytesReference reference1 = decompressor.pollDecompressedPage(false); + ReleasableBytesReference reference2 = decompressor.pollDecompressedPage(false); + ReleasableBytesReference reference3 = decompressor.pollDecompressedPage(true); + assertNull(decompressor.pollDecompressedPage(false)); + BytesReference composite = CompositeBytesReference.of(reference1, reference2, reference3); + assertEquals(4 * 10000, composite.length()); + StreamInput streamInput = composite.streamInput(); + for (int i = 0; i < 10000; ++i) { + assertEquals(i, streamInput.readInt()); + } + Releasables.close(reference1, reference2, reference3); + + } + } +} From 4300ca14f9df61655cd142e67a0807b091f96065 Mon Sep 17 00:00:00 2001 From: Tim Brooks Date: Mon, 28 Jun 2021 18:40:30 -0600 Subject: [PATCH 25/29] Changes --- .../elasticsearch/transport/Compression.java | 4 +-- .../transport/Lz4TransportDecompressor.java | 16 +++++----- ... => ReuseBuffersLZ4BlockOutputStream.java} | 29 ++++++++++++------- 3 files changed, 28 insertions(+), 21 deletions(-) rename server/src/main/java/org/elasticsearch/transport/{TLLZ4BlockOutputStream.java => ReuseBuffersLZ4BlockOutputStream.java} (89%) diff --git a/server/src/main/java/org/elasticsearch/transport/Compression.java b/server/src/main/java/org/elasticsearch/transport/Compression.java index 9412870ce0368..996e4b41a1318 100644 --- a/server/src/main/java/org/elasticsearch/transport/Compression.java +++ b/server/src/main/java/org/elasticsearch/transport/Compression.java @@ -8,8 +8,6 @@ package org.elasticsearch.transport; -import net.jpountz.lz4.LZ4BlockOutputStream; - import net.jpountz.lz4.LZ4Factory; import org.elasticsearch.Version; @@ -47,7 +45,7 @@ public enum Scheme { public static OutputStream lz4OutputStream(OutputStream outputStream) throws IOException { outputStream.write(LZ4_HEADER); // 16KB block size to minimize the allocation of large buffers - return new LZ4BlockOutputStream(outputStream, LZ4_BLOCK_SIZE, LZ4Factory.safeInstance().fastCompressor()); + return new ReuseBuffersLZ4BlockOutputStream(outputStream, LZ4_BLOCK_SIZE, LZ4Factory.safeInstance().fastCompressor()); } } diff --git a/server/src/main/java/org/elasticsearch/transport/Lz4TransportDecompressor.java b/server/src/main/java/org/elasticsearch/transport/Lz4TransportDecompressor.java index b4a1c2b01462a..cb4baf9f8317d 100644 --- a/server/src/main/java/org/elasticsearch/transport/Lz4TransportDecompressor.java +++ b/server/src/main/java/org/elasticsearch/transport/Lz4TransportDecompressor.java @@ -41,7 +41,7 @@ import java.util.zip.Checksum; /** - * This file is forked from the https://netty.io project. In particular it forks the follow file + * This file is forked from the https://netty.io project. In particular it forks the following file * io.netty.handler.codec.compression.Lz4FrameDecoder. * * It modifies the original netty code to operate on byte arrays opposed to ByteBufs. @@ -50,8 +50,8 @@ */ public class Lz4TransportDecompressor implements TransportDecompressor { - private final ThreadLocal uncompressed = ThreadLocal.withInitial(() -> BytesRef.EMPTY_BYTES); - private final ThreadLocal compressed = ThreadLocal.withInitial(() -> BytesRef.EMPTY_BYTES); + private static final ThreadLocal UNCOMPRESSED = ThreadLocal.withInitial(() -> BytesRef.EMPTY_BYTES); + private static final ThreadLocal COMPRESSED = ThreadLocal.withInitial(() -> BytesRef.EMPTY_BYTES); /** * Magic number of LZ4 block. @@ -252,10 +252,10 @@ private int decodeBlock(BytesReference reference) throws IOException { } final Checksum checksum = this.checksum; - byte[] uncompressed = this.uncompressed.get(); + byte[] uncompressed = UNCOMPRESSED.get(); if (decompressedLength > uncompressed.length) { uncompressed = new byte[decompressedLength]; - this.uncompressed.set(uncompressed); + UNCOMPRESSED.set(uncompressed); } try { @@ -336,12 +336,12 @@ private int decodeBlock(BytesReference reference) throws IOException { } private byte[] getCompressedBuffer(int requiredSize) { - byte[] compressedBuffer = this.compressed.get(); + byte[] compressedBuffer = COMPRESSED.get(); if (compressedBuffer.length >= requiredSize) { return compressedBuffer; } else { - this.compressed.set(new byte[requiredSize]); - return this.compressed.get(); + COMPRESSED.set(new byte[requiredSize]); + return COMPRESSED.get(); } } diff --git a/server/src/main/java/org/elasticsearch/transport/TLLZ4BlockOutputStream.java b/server/src/main/java/org/elasticsearch/transport/ReuseBuffersLZ4BlockOutputStream.java similarity index 89% rename from server/src/main/java/org/elasticsearch/transport/TLLZ4BlockOutputStream.java rename to server/src/main/java/org/elasticsearch/transport/ReuseBuffersLZ4BlockOutputStream.java index 80e860e1add8f..ed0c02f4ed035 100644 --- a/server/src/main/java/org/elasticsearch/transport/TLLZ4BlockOutputStream.java +++ b/server/src/main/java/org/elasticsearch/transport/ReuseBuffersLZ4BlockOutputStream.java @@ -39,6 +39,14 @@ import org.apache.lucene.util.BytesRef; /** + * This file is forked from https://github.com/lz4/lz4-java. In particular it forks the following file + * net.jpountz.lz4.LZ4BlockOutputStream. + * + * It modifies the original lz4-java code to allow the reuse of local thread local byte arrays. This prevents + * the need to allocate two new byte arrays everytime a new stream is created. For the Elasticsearch use case, + * a single thread should fully compress the stream in one go to avoid memory corruption. + * + * * Streaming LZ4 (not compatible with the LZ4 Frame format). * This class compresses data into fixed-size blocks of compressed data. * This class uses its own format and is not compatible with the LZ4 Frame format. @@ -47,7 +55,7 @@ * @see LZ4BlockInputStream * @see LZ4FrameOutputStream */ -public class TLLZ4BlockOutputStream extends FilterOutputStream { +public class ReuseBuffersLZ4BlockOutputStream extends FilterOutputStream { private static class ArrayBox { private byte[] uncompressed = BytesRef.EMPTY_BYTES; @@ -70,7 +78,7 @@ private void release() { } } - private final ThreadLocal threadLocalArrays = ThreadLocal.withInitial(ArrayBox::new); + private static final ThreadLocal ARRAY_BOX = ThreadLocal.withInitial(ArrayBox::new); static final byte[] MAGIC = new byte[] { 'L', 'Z', '4', 'B', 'l', 'o', 'c', 'k' }; static final int MAGIC_LENGTH = MAGIC.length; @@ -130,14 +138,15 @@ private static int compressionLevel(int blockSize) { * integrity. * @param syncFlush true if pending data should also be flushed on {@link #flush()} */ - public TLLZ4BlockOutputStream(OutputStream out, int blockSize, LZ4Compressor compressor, Checksum checksum, boolean syncFlush) { + public ReuseBuffersLZ4BlockOutputStream(OutputStream out, int blockSize, LZ4Compressor compressor, Checksum checksum, + boolean syncFlush) { super(out); this.blockSize = blockSize; this.compressor = compressor; this.checksum = checksum; this.compressionLevel = compressionLevel(blockSize); final int compressedBlockSize = HEADER_LENGTH + compressor.maxCompressedLength(blockSize); - this.arrayBox = threadLocalArrays.get(); + this.arrayBox = ARRAY_BOX.get(); arrayBox.markOwnership(blockSize, compressedBlockSize); this.buffer = arrayBox.uncompressed; this.compressedBuffer = arrayBox.compressed; @@ -157,10 +166,10 @@ public TLLZ4BlockOutputStream(OutputStream out, int blockSize, LZ4Compressor com * @param compressor the {@link LZ4Compressor} instance to use to compress * data * - * @see #TLLZ4BlockOutputStream(OutputStream, int, LZ4Compressor, Checksum, boolean) + * @see #ReuseBuffersLZ4BlockOutputStream(OutputStream, int, LZ4Compressor, Checksum, boolean) * @see StreamingXXHash32#asChecksum() */ - public TLLZ4BlockOutputStream(OutputStream out, int blockSize, LZ4Compressor compressor) { + public ReuseBuffersLZ4BlockOutputStream(OutputStream out, int blockSize, LZ4Compressor compressor) { this(out, blockSize, compressor, XXHashFactory.fastestInstance().newStreamingHash32(DEFAULT_SEED).asChecksum(), false); } @@ -172,10 +181,10 @@ public TLLZ4BlockOutputStream(OutputStream out, int blockSize, LZ4Compressor com * @param blockSize the maximum number of bytes to try to compress at once, * must be >= 64 and <= 32 M * - * @see #TLLZ4BlockOutputStream(OutputStream, int, LZ4Compressor) + * @see #ReuseBuffersLZ4BlockOutputStream(OutputStream, int, LZ4Compressor) * @see LZ4Factory#fastCompressor() */ - public TLLZ4BlockOutputStream(OutputStream out, int blockSize) { + public ReuseBuffersLZ4BlockOutputStream(OutputStream out, int blockSize) { this(out, blockSize, LZ4Factory.fastestInstance().fastCompressor()); } @@ -184,9 +193,9 @@ public TLLZ4BlockOutputStream(OutputStream out, int blockSize) { * * @param out the {@link OutputStream} to feed * - * @see #TLLZ4BlockOutputStream(OutputStream, int) + * @see #ReuseBuffersLZ4BlockOutputStream(OutputStream, int) */ - public TLLZ4BlockOutputStream(OutputStream out) { + public ReuseBuffersLZ4BlockOutputStream(OutputStream out) { this(out, 1 << 16); } From 99ab30e41eba8292a9e65f5e2090bf71c81769ae Mon Sep 17 00:00:00 2001 From: Tim Brooks Date: Mon, 28 Jun 2021 18:49:38 -0600 Subject: [PATCH 26/29] Test --- .../upgrades/FullClusterRestartIT.java | 39 ++++++++++++++++++- 1 file changed, 37 insertions(+), 2 deletions(-) diff --git a/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java b/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java index 9d36621267e5e..f1311f88709dc 100644 --- a/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java +++ b/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java @@ -10,31 +10,35 @@ import org.apache.http.util.EntityUtils; import org.elasticsearch.Version; +import org.elasticsearch.action.admin.cluster.settings.ClusterGetSettingsResponse; import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; import org.elasticsearch.client.RestClient; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.MetadataIndexStateService; -import org.elasticsearch.core.Booleans; -import org.elasticsearch.core.CheckedFunction; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.common.xcontent.support.XContentMapValues; +import org.elasticsearch.core.Booleans; +import org.elasticsearch.core.CheckedFunction; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.rest.action.admin.indices.RestPutIndexTemplateAction; import org.elasticsearch.test.NotEqualMessageBuilder; import org.elasticsearch.test.XContentTestUtils; import org.elasticsearch.test.rest.ESRestTestCase; import org.elasticsearch.test.rest.yaml.ObjectPath; +import org.elasticsearch.transport.Compression; import org.junit.Before; import java.io.IOException; import java.util.ArrayList; import java.util.Base64; import java.util.Collection; +import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; @@ -51,6 +55,7 @@ import static org.elasticsearch.cluster.routing.UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING; import static org.elasticsearch.cluster.routing.allocation.decider.MaxRetryAllocationDecider.SETTING_ALLOCATION_MAX_RETRY; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.transport.RemoteClusterService.REMOTE_CLUSTER_COMPRESS; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; @@ -1588,6 +1593,36 @@ public void testForbidDisableSoftDeletesOnRestore() throws Exception { } } + public void testTransportCompressionSetting() throws IOException { + if (isRunningAgainstOldCluster()) { + final Request putSettingsRequest = new Request("PUT", "/_cluster/settings"); + try (XContentBuilder builder = jsonBuilder()) { + builder.startObject(); + { + builder.startObject("persistent"); + { + builder.field("cluster.remote.foo.seeds", Collections.singletonList("localhost:9200")); + builder.field("cluster.remote.foo.transport.compression", "true"); + } + builder.endObject(); + } + builder.endObject(); + putSettingsRequest.setJsonEntity(Strings.toString(builder)); + } + client().performRequest(putSettingsRequest); + } else { + final Request getSettingsRequest = new Request("GET", "/_cluster/settings"); + final Response getSettingsResponse = client().performRequest(getSettingsRequest); + try (XContentParser parser = createParser(JsonXContent.jsonXContent, getSettingsResponse.getEntity().getContent())) { + final ClusterGetSettingsResponse clusterGetSettingsResponse = ClusterGetSettingsResponse.fromXContent(parser); + final Settings settings = clusterGetSettingsResponse.getPersistentSettings(); + assertThat( + REMOTE_CLUSTER_COMPRESS.getConcreteSettingForNamespace("foo").get(settings), + equalTo(Compression.Enabled.TRUE)); + } + } + } + public static void assertNumHits(String index, int numHits, int totalShards) throws IOException { Map resp = entityAsMap(client().performRequest(new Request("GET", "/" + index + "/_search"))); assertNoFailures(resp); From d67213a67d4650d807b5d09647f00aa06aef4ee4 Mon Sep 17 00:00:00 2001 From: Tim Brooks Date: Mon, 28 Jun 2021 19:39:24 -0600 Subject: [PATCH 27/29] Fix --- .../java/org/elasticsearch/upgrades/FullClusterRestartIT.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java b/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java index f1311f88709dc..7593b96626e78 100644 --- a/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java +++ b/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java @@ -1602,7 +1602,7 @@ public void testTransportCompressionSetting() throws IOException { builder.startObject("persistent"); { builder.field("cluster.remote.foo.seeds", Collections.singletonList("localhost:9200")); - builder.field("cluster.remote.foo.transport.compression", "true"); + builder.field("cluster.remote.foo.transport.compress", "true"); } builder.endObject(); } From c912529ba575e39047bb92390bf6105f0e27bcf9 Mon Sep 17 00:00:00 2001 From: Tim Brooks Date: Mon, 28 Jun 2021 21:51:08 -0600 Subject: [PATCH 28/29] Changes --- docs/reference/modules/transport.asciidoc | 8 ++-- .../common/compress/DeflateCompressor.java | 2 +- .../elasticsearch/transport/Compression.java | 38 ++++++++++++++++--- .../transport/Lz4TransportDecompressor.java | 13 ++++--- .../transport/TransportDecompressor.java | 24 +++--------- .../test/InternalTestCluster.java | 8 ++-- 6 files changed, 53 insertions(+), 40 deletions(-) diff --git a/docs/reference/modules/transport.asciidoc b/docs/reference/modules/transport.asciidoc index be70b7e30c61a..1c2258ce89d15 100644 --- a/docs/reference/modules/transport.asciidoc +++ b/docs/reference/modules/transport.asciidoc @@ -52,8 +52,8 @@ time setting format). Defaults to `30s`. Set to `true`, `indexing_data`, or `false` to configure transport compression between nodes. The option `true` will compress all data. The option `indexing_data` will compress only the raw index data sent between nodes during -ingest, ccr following, and shard recovery. The option `indexing_data` is -experimental. Defaults to `false`. +ingest, ccr, and shard recovery. The option `indexing_data` is experimental. +Defaults to `false`. `transport.compress_scheme`:: (<>) @@ -182,8 +182,8 @@ connections between nodes. The `transport.compress` configuration option `indexing_data` will only compress requests that relate to the transport of raw indexing source data -between nodes. This is primarily the case for ingest, ccr, and shard recovery -traffic. This option is experimental. +between nodes. This option primarily compresses data sent during ingest, +ccr, and shard recovery. This option is experimental. The `transport.compress` setting always configures local cluster request compression and is the fallback setting for remote cluster request compression. diff --git a/server/src/main/java/org/elasticsearch/common/compress/DeflateCompressor.java b/server/src/main/java/org/elasticsearch/common/compress/DeflateCompressor.java index c44ce128d7dd3..da94385118155 100644 --- a/server/src/main/java/org/elasticsearch/common/compress/DeflateCompressor.java +++ b/server/src/main/java/org/elasticsearch/common/compress/DeflateCompressor.java @@ -34,7 +34,7 @@ public class DeflateCompressor implements Compressor { // It needs to be different from other compressors and to not be specific // enough so that no stream starting with these bytes could be detected as // a XContent - public static final byte[] HEADER = new byte[]{'D', 'F', 'L', '\0'}; + private static final byte[] HEADER = new byte[]{'D', 'F', 'L', '\0'}; // 3 is a good trade-off between speed and compression ratio private static final int LEVEL = 3; // We use buffering on the input and output of in/def-laters in order to diff --git a/server/src/main/java/org/elasticsearch/transport/Compression.java b/server/src/main/java/org/elasticsearch/transport/Compression.java index 996e4b41a1318..24a053c47eb61 100644 --- a/server/src/main/java/org/elasticsearch/transport/Compression.java +++ b/server/src/main/java/org/elasticsearch/transport/Compression.java @@ -11,7 +11,7 @@ import net.jpountz.lz4.LZ4Factory; import org.elasticsearch.Version; -import org.elasticsearch.common.compress.DeflateCompressor; +import org.elasticsearch.common.bytes.BytesReference; import java.io.IOException; import java.io.OutputStream; @@ -21,11 +21,12 @@ public class Compression { public enum Scheme { LZ4, DEFLATE; + // TODO: Change after backport static final Version LZ4_VERSION = Version.V_8_0_0; - static final byte[] DEFLATE_HEADER = DeflateCompressor.HEADER; - static final byte[] LZ4_HEADER = new byte[]{'L', 'Z', '4', '\0'}; static final int HEADER_LENGTH = 4; + private static final byte[] DEFLATE_HEADER = new byte[]{'D', 'F', 'L', '\0'}; + private static final byte[] LZ4_HEADER = new byte[]{'L', 'Z', '4', '\0'}; private static final int LZ4_BLOCK_SIZE; static { @@ -37,14 +38,39 @@ public enum Scheme { } LZ4_BLOCK_SIZE = lz4BlockSize; } else { - // 16KB block size to minimize the allocation of large buffers - LZ4_BLOCK_SIZE = 16 * 1024; + LZ4_BLOCK_SIZE = 64 * 1024; + } + } + + public static boolean isDeflate(BytesReference bytes) { + byte firstByte = bytes.get(0); + if (firstByte != Compression.Scheme.DEFLATE_HEADER[0]) { + return false; + } else { + return validateHeader(bytes, DEFLATE_HEADER); + } + } + + public static boolean isLZ4(BytesReference bytes) { + byte firstByte = bytes.get(0); + if (firstByte != Scheme.LZ4_HEADER[0]) { + return false; + } else { + return validateHeader(bytes, LZ4_HEADER); + } + } + + private static boolean validateHeader(BytesReference bytes, byte[] header) { + for (int i = 1; i < Compression.Scheme.HEADER_LENGTH; ++i) { + if (bytes.get(i) != header[i]) { + return false; + } } + return true; } public static OutputStream lz4OutputStream(OutputStream outputStream) throws IOException { outputStream.write(LZ4_HEADER); - // 16KB block size to minimize the allocation of large buffers return new ReuseBuffersLZ4BlockOutputStream(outputStream, LZ4_BLOCK_SIZE, LZ4Factory.safeInstance().fastCompressor()); } } diff --git a/server/src/main/java/org/elasticsearch/transport/Lz4TransportDecompressor.java b/server/src/main/java/org/elasticsearch/transport/Lz4TransportDecompressor.java index cb4baf9f8317d..6a60ef689cb92 100644 --- a/server/src/main/java/org/elasticsearch/transport/Lz4TransportDecompressor.java +++ b/server/src/main/java/org/elasticsearch/transport/Lz4TransportDecompressor.java @@ -47,6 +47,8 @@ * It modifies the original netty code to operate on byte arrays opposed to ByteBufs. * Additionally, it integrates the decompression code to work in the Elasticsearch transport * pipeline, Finally, it replaces the custom Netty decoder exceptions. + * + * This class is necessary as Netty is not a dependency in Elasticsearch server module. */ public class Lz4TransportDecompressor implements TransportDecompressor { @@ -228,7 +230,7 @@ private int decodeBlock(BytesReference reference) throws IOException { int currentChecksum = Integer.reverseBytes(in.readInt()); bytesConsumed += HEADER_LENGTH; - if (decompressedLength == 0 && compressedLength == 0) { + if (decompressedLength == 0) { if (currentChecksum != 0) { throw new IllegalStateException("stream corrupted: checksum error"); } @@ -337,12 +339,11 @@ private int decodeBlock(BytesReference reference) throws IOException { private byte[] getCompressedBuffer(int requiredSize) { byte[] compressedBuffer = COMPRESSED.get(); - if (compressedBuffer.length >= requiredSize) { - return compressedBuffer; - } else { - COMPRESSED.set(new byte[requiredSize]); - return COMPRESSED.get(); + if (requiredSize > compressedBuffer.length) { + compressedBuffer = new byte[requiredSize]; + COMPRESSED.set(compressedBuffer); } + return compressedBuffer; } /** diff --git a/server/src/main/java/org/elasticsearch/transport/TransportDecompressor.java b/server/src/main/java/org/elasticsearch/transport/TransportDecompressor.java index 33524a239c902..7ad4c953ddc75 100644 --- a/server/src/main/java/org/elasticsearch/transport/TransportDecompressor.java +++ b/server/src/main/java/org/elasticsearch/transport/TransportDecompressor.java @@ -10,7 +10,6 @@ import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.bytes.ReleasableBytesReference; -import org.elasticsearch.common.compress.DeflateCompressor; import org.elasticsearch.common.util.PageCacheRecycler; import org.elasticsearch.core.Releasable; @@ -26,29 +25,16 @@ public interface TransportDecompressor extends Releasable { void close(); static TransportDecompressor getDecompressor(PageCacheRecycler recycler, BytesReference bytes) throws IOException { - if (bytes.length() < DeflateCompressor.HEADER.length) { + if (bytes.length() < Compression.Scheme.HEADER_LENGTH) { return null; } - byte firstByte = bytes.get(0); - byte[] header; - if (firstByte == Compression.Scheme.DEFLATE_HEADER[0]) { - header = Compression.Scheme.DEFLATE_HEADER; - } else if (firstByte == Compression.Scheme.LZ4_HEADER[0]) { - header = Compression.Scheme.LZ4_HEADER; - } else { - throw createIllegalState(bytes); - } - - for (int i = 1; i < Compression.Scheme.HEADER_LENGTH; ++i) { - if (bytes.get(i) != header[i]) { - throw createIllegalState(bytes); - } - } - if (header == Compression.Scheme.DEFLATE_HEADER) { + if (Compression.Scheme.isDeflate(bytes)) { return new DeflateTransportDecompressor(recycler); - } else { + } else if (Compression.Scheme.isLZ4(bytes)) { return new Lz4TransportDecompressor(recycler); + } else { + throw createIllegalState(bytes); } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java index 40f7aa79e0c47..95733052ffd2a 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java +++ b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java @@ -441,11 +441,11 @@ public Collection> getPlugins() { private static Settings getRandomNodeSettings(long seed) { Random random = new Random(seed); Builder builder = Settings.builder(); - if (random.nextBoolean()) { - builder.put(TransportSettings.TRANSPORT_COMPRESS.getKey(), Compression.Enabled.FALSE); + if (rarely(random)) { + builder.put(TransportSettings.TRANSPORT_COMPRESS.getKey(), Compression.Enabled.TRUE); } else { - if (rarely(random)) { - builder.put(TransportSettings.TRANSPORT_COMPRESS.getKey(), Compression.Enabled.TRUE); + if (random.nextBoolean()) { + builder.put(TransportSettings.TRANSPORT_COMPRESS.getKey(), Compression.Enabled.FALSE); } else { builder.put(TransportSettings.TRANSPORT_COMPRESS.getKey(), Compression.Enabled.INDEXING_DATA); } From 72797cb9aba21e45bf4bdf84f719b1ec967b3f13 Mon Sep 17 00:00:00 2001 From: Tim Brooks Date: Tue, 29 Jun 2021 10:57:45 -0600 Subject: [PATCH 29/29] Changes --- docs/reference/modules/transport.asciidoc | 9 +- .../upgrades/FullClusterRestartIT.java | 5 + .../action/bulk/BulkShardRequest.java | 4 +- .../RecoveryTranslogOperationsRequest.java | 4 +- .../transport/ConnectionProfile.java | 2 +- .../transport/Lz4TransportDecompressor.java | 30 +++-- .../transport/NetworkMessage.java | 2 +- ...a => RawIndexingDataTransportRequest.java} | 2 +- .../elasticsearch/transport/TcpTransport.java | 2 +- .../transport/TransportDecompressor.java | 6 + .../transport/TransportStatus.java | 5 - .../transport/InboundPipelineTests.java | 23 ++-- .../Lz4TransportDecompressorTests.java | 104 ++++++++++++++---- .../transport/OutboundHandlerTests.java | 23 ++-- .../AbstractSimpleTransportTestCase.java | 2 + .../xpack/ccr/action/ShardChangesAction.java | 4 +- 16 files changed, 146 insertions(+), 81 deletions(-) rename server/src/main/java/org/elasticsearch/transport/{RawDataTransportRequest.java => RawIndexingDataTransportRequest.java} (92%) diff --git a/docs/reference/modules/transport.asciidoc b/docs/reference/modules/transport.asciidoc index 1c2258ce89d15..a86febd2b905e 100644 --- a/docs/reference/modules/transport.asciidoc +++ b/docs/reference/modules/transport.asciidoc @@ -52,13 +52,16 @@ time setting format). Defaults to `30s`. Set to `true`, `indexing_data`, or `false` to configure transport compression between nodes. The option `true` will compress all data. The option `indexing_data` will compress only the raw index data sent between nodes during -ingest, ccr, and shard recovery. The option `indexing_data` is experimental. +ingest, ccr following (excluding bootstrap), and operations based shard recovery +(excluding transferring lucene files). The `indexing_data` option is experimental. Defaults to `false`. -`transport.compress_scheme`:: +`transport.compression_scheme`:: (<>) Configures the compression scheme for `transport.compress`. The options are -`deflate` or `lz4`. The option `lz4` is experimental. Defaults to `deflate`. +`deflate` or `lz4`. The option `lz4` is experimental. If `lz4` is configured and + the remote node has not been upgraded to a version supporting `lz4`, the traffic + will be sent uncompressed. Defaults to `deflate`. `transport.ping_schedule`:: (<>) diff --git a/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java b/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java index 7593b96626e78..93e824f3e15e5 100644 --- a/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java +++ b/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java @@ -1593,6 +1593,11 @@ public void testForbidDisableSoftDeletesOnRestore() throws Exception { } } + /** + * In 7.14 the cluster.remote.*.transport.compress setting was change from a boolean to an enum setting + * with true/false as options. This test ensures that the old boolean setting in cluster state is + * translated properly. This test can be removed in 9.0. + */ public void testTransportCompressionSetting() throws IOException { if (isRunningAgainstOldCluster()) { final Request putSettingsRequest = new Request("PUT", "/_cluster/settings"); diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkShardRequest.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkShardRequest.java index 3984c03b1cdeb..b28a22ad96a3a 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkShardRequest.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkShardRequest.java @@ -18,14 +18,14 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.transport.RawDataTransportRequest; +import org.elasticsearch.transport.RawIndexingDataTransportRequest; import java.io.IOException; import java.util.HashSet; import java.util.Set; import java.util.stream.Stream; -public class BulkShardRequest extends ReplicatedWriteRequest implements Accountable, RawDataTransportRequest { +public class BulkShardRequest extends ReplicatedWriteRequest implements Accountable, RawIndexingDataTransportRequest { private static final long SHALLOW_SIZE = RamUsageEstimator.shallowSizeOfInstance(BulkShardRequest.class); diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTranslogOperationsRequest.java b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTranslogOperationsRequest.java index 6a6c2a29a7356..d962062c198be 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTranslogOperationsRequest.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTranslogOperationsRequest.java @@ -13,12 +13,12 @@ import org.elasticsearch.index.seqno.RetentionLeases; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.translog.Translog; -import org.elasticsearch.transport.RawDataTransportRequest; +import org.elasticsearch.transport.RawIndexingDataTransportRequest; import java.io.IOException; import java.util.List; -public class RecoveryTranslogOperationsRequest extends RecoveryTransportRequest implements RawDataTransportRequest { +public class RecoveryTranslogOperationsRequest extends RecoveryTransportRequest implements RawIndexingDataTransportRequest { private final long recoveryId; private final ShardId shardId; diff --git a/server/src/main/java/org/elasticsearch/transport/ConnectionProfile.java b/server/src/main/java/org/elasticsearch/transport/ConnectionProfile.java index 44b0083e72e27..7523b6ca2a420 100644 --- a/server/src/main/java/org/elasticsearch/transport/ConnectionProfile.java +++ b/server/src/main/java/org/elasticsearch/transport/ConnectionProfile.java @@ -206,7 +206,7 @@ public Builder setCompressionEnabled(Compression.Enabled compressionEnabled) { } /** - * Sets indexing data compression enabled for this connection profile + * Sets compression scheme for this connection profile */ public Builder setCompressionScheme(Compression.Scheme compressionScheme) { this.compressionScheme = compressionScheme; diff --git a/server/src/main/java/org/elasticsearch/transport/Lz4TransportDecompressor.java b/server/src/main/java/org/elasticsearch/transport/Lz4TransportDecompressor.java index 6a60ef689cb92..9f545c07fd86e 100644 --- a/server/src/main/java/org/elasticsearch/transport/Lz4TransportDecompressor.java +++ b/server/src/main/java/org/elasticsearch/transport/Lz4TransportDecompressor.java @@ -52,7 +52,7 @@ */ public class Lz4TransportDecompressor implements TransportDecompressor { - private static final ThreadLocal UNCOMPRESSED = ThreadLocal.withInitial(() -> BytesRef.EMPTY_BYTES); + private static final ThreadLocal DECOMPRESSED = ThreadLocal.withInitial(() -> BytesRef.EMPTY_BYTES); private static final ThreadLocal COMPRESSED = ThreadLocal.withInitial(() -> BytesRef.EMPTY_BYTES); /** @@ -254,17 +254,13 @@ private int decodeBlock(BytesReference reference) throws IOException { } final Checksum checksum = this.checksum; - byte[] uncompressed = UNCOMPRESSED.get(); - if (decompressedLength > uncompressed.length) { - uncompressed = new byte[decompressedLength]; - UNCOMPRESSED.set(uncompressed); - } + byte[] decompressed = getThreadLocalBuffer(DECOMPRESSED, decompressedLength); try { switch (blockType) { case BLOCK_TYPE_NON_COMPRESSED: try (StreamInput streamInput = reference.streamInput()) { - streamInput.readBytes(uncompressed, 0, decompressedLength); + streamInput.readBytes(decompressed, 0, decompressedLength); } break; case BLOCK_TYPE_COMPRESSED: @@ -275,13 +271,13 @@ private int decodeBlock(BytesReference reference) throws IOException { compressed = ref.bytes; compressedOffset = ref.offset; } else { - compressed = getCompressedBuffer(compressedLength); + compressed = getThreadLocalBuffer(COMPRESSED, compressedLength); compressedOffset = 0; try (StreamInput streamInput = reference.streamInput()) { streamInput.readBytes(compressed, 0, compressedLength); } } - decompressor.decompress(compressed, compressedOffset, uncompressed, 0, decompressedLength); + decompressor.decompress(compressed, compressedOffset, decompressed, 0, decompressedLength); break; default: throw new IllegalStateException(String.format(Locale.ROOT, @@ -293,7 +289,7 @@ private int decodeBlock(BytesReference reference) throws IOException { if (checksum != null) { checksum.reset(); - checksum.update(uncompressed, 0, decompressedLength); + checksum.update(decompressed, 0, decompressedLength); final int checksumResult = (int) checksum.getValue(); if (checksumResult != currentChecksum) { throw new IllegalStateException(String.format(Locale.ROOT, @@ -313,7 +309,7 @@ private int decodeBlock(BytesReference reference) throws IOException { final Recycler.V page = pages.getLast(); int toCopy = Math.min(bytesToCopy, PageCacheRecycler.BYTE_PAGE_SIZE - pageOffset); - System.arraycopy(uncompressed, uncompressedOffset, page.v(), pageOffset, toCopy); + System.arraycopy(decompressed, uncompressedOffset, page.v(), pageOffset, toCopy); pageOffset += toCopy; bytesToCopy -= toCopy; uncompressedOffset += toCopy; @@ -337,13 +333,13 @@ private int decodeBlock(BytesReference reference) throws IOException { return bytesConsumed; } - private byte[] getCompressedBuffer(int requiredSize) { - byte[] compressedBuffer = COMPRESSED.get(); - if (requiredSize > compressedBuffer.length) { - compressedBuffer = new byte[requiredSize]; - COMPRESSED.set(compressedBuffer); + private byte[] getThreadLocalBuffer(ThreadLocal threadLocal, int requiredSize) { + byte[] buffer = threadLocal.get(); + if (requiredSize > buffer.length) { + buffer = new byte[requiredSize]; + threadLocal.set(buffer); } - return compressedBuffer; + return buffer; } /** diff --git a/server/src/main/java/org/elasticsearch/transport/NetworkMessage.java b/server/src/main/java/org/elasticsearch/transport/NetworkMessage.java index dee4416b222e1..976da4268c3de 100644 --- a/server/src/main/java/org/elasticsearch/transport/NetworkMessage.java +++ b/server/src/main/java/org/elasticsearch/transport/NetworkMessage.java @@ -28,7 +28,7 @@ public abstract class NetworkMessage { this.version = version; this.requestId = requestId; this.compressionScheme = adjustedScheme(version, compressionScheme); - if (compressionScheme != null) { + if (this.compressionScheme != null) { this.status = TransportStatus.setCompress(status); } else { this.status = status; diff --git a/server/src/main/java/org/elasticsearch/transport/RawDataTransportRequest.java b/server/src/main/java/org/elasticsearch/transport/RawIndexingDataTransportRequest.java similarity index 92% rename from server/src/main/java/org/elasticsearch/transport/RawDataTransportRequest.java rename to server/src/main/java/org/elasticsearch/transport/RawIndexingDataTransportRequest.java index 647ead226e375..8969bd6a43fad 100644 --- a/server/src/main/java/org/elasticsearch/transport/RawDataTransportRequest.java +++ b/server/src/main/java/org/elasticsearch/transport/RawIndexingDataTransportRequest.java @@ -13,5 +13,5 @@ * is configured to {@link Compression.Enabled#INDEXING_DATA}. This is primary intended to be * requests/responses primarily composed of raw source data. */ -public interface RawDataTransportRequest { +public interface RawIndexingDataTransportRequest { } diff --git a/server/src/main/java/org/elasticsearch/transport/TcpTransport.java b/server/src/main/java/org/elasticsearch/transport/TcpTransport.java index f711100caa2a8..97a77f4861f88 100644 --- a/server/src/main/java/org/elasticsearch/transport/TcpTransport.java +++ b/server/src/main/java/org/elasticsearch/transport/TcpTransport.java @@ -244,7 +244,7 @@ public void sendRequest(long requestId, String action, TransportRequest request, } TcpChannel channel = channel(options.type()); boolean shouldCompress = compress == Compression.Enabled.TRUE || - (compress == Compression.Enabled.INDEXING_DATA && request instanceof RawDataTransportRequest); + (compress == Compression.Enabled.INDEXING_DATA && request instanceof RawIndexingDataTransportRequest); outboundHandler.sendRequest(node, channel, requestId, action, request, options, getVersion(), shouldCompress, false); } diff --git a/server/src/main/java/org/elasticsearch/transport/TransportDecompressor.java b/server/src/main/java/org/elasticsearch/transport/TransportDecompressor.java index 7ad4c953ddc75..652f55432c1d3 100644 --- a/server/src/main/java/org/elasticsearch/transport/TransportDecompressor.java +++ b/server/src/main/java/org/elasticsearch/transport/TransportDecompressor.java @@ -17,6 +17,12 @@ public interface TransportDecompressor extends Releasable { + /** + * Decompress the provided bytes + * + * @param bytesReference to decompress + * @return number of compressed bytes consumed + */ int decompress(BytesReference bytesReference) throws IOException; ReleasableBytesReference pollDecompressedPage(boolean isEOS); diff --git a/server/src/main/java/org/elasticsearch/transport/TransportStatus.java b/server/src/main/java/org/elasticsearch/transport/TransportStatus.java index 28d3397d55e07..386921f199cf0 100644 --- a/server/src/main/java/org/elasticsearch/transport/TransportStatus.java +++ b/server/src/main/java/org/elasticsearch/transport/TransportStatus.java @@ -47,11 +47,6 @@ public static byte setCompress(byte value) { return value; } - public static byte unSetCompress(byte value) { - value |= STATUS_COMPRESS; - return value; - } - static boolean isHandshake(byte value) { // pkg private since it's only used internally return (value & STATUS_HANDSHAKE) != 0; } diff --git a/server/src/test/java/org/elasticsearch/transport/InboundPipelineTests.java b/server/src/test/java/org/elasticsearch/transport/InboundPipelineTests.java index 6008ead34f01d..8b4ca35f0a01c 100644 --- a/server/src/test/java/org/elasticsearch/transport/InboundPipelineTests.java +++ b/server/src/test/java/org/elasticsearch/transport/InboundPipelineTests.java @@ -93,19 +93,16 @@ public void testPipelineHandling() throws IOException { try (BytesStreamOutput streamOutput = new BytesStreamOutput()) { while (streamOutput.size() < BYTE_THRESHOLD) { final Version version = randomFrom(Version.CURRENT, Version.CURRENT.minimumCompatibilityVersion()); - final String value = randomAlphaOfLength(randomIntBetween(10, 200)); + final String value = randomRealisticUnicodeOfCodepointLength(randomIntBetween(200, 400)); final boolean isRequest = randomBoolean(); Compression.Scheme scheme; if (randomBoolean()) { scheme = null; } else { - if (version.onOrAfter(Compression.Scheme.LZ4_VERSION)) { - scheme = randomFrom(Compression.Scheme.DEFLATE, Compression.Scheme.LZ4); - } else { - scheme = Compression.Scheme.DEFLATE; - } + scheme = randomFrom(Compression.Scheme.DEFLATE, Compression.Scheme.LZ4); } + boolean isCompressed = isCompressed(version, scheme); final long requestId = totalMessages++; final MessageData messageData; @@ -114,17 +111,17 @@ public void testPipelineHandling() throws IOException { OutboundMessage message; if (isRequest) { if (rarely()) { - messageData = new MessageData(version, requestId, true, scheme != null, breakThisAction, null); + messageData = new MessageData(version, requestId, true, isCompressed, breakThisAction, null); message = new OutboundMessage.Request(threadContext, new TestRequest(value), version, breakThisAction, requestId, false, scheme); expectedExceptionClass = new CircuitBreakingException("", CircuitBreaker.Durability.PERMANENT); } else { - messageData = new MessageData(version, requestId, true, scheme != null, actionName, value); + messageData = new MessageData(version, requestId, true, isCompressed, actionName, value); message = new OutboundMessage.Request(threadContext, new TestRequest(value), version, actionName, requestId, false, scheme); } } else { - messageData = new MessageData(version, requestId, false, scheme != null, null, value); + messageData = new MessageData(version, requestId, false, isCompressed, null, value); message = new OutboundMessage.Response(threadContext, new TestResponse(value), version, requestId, false, scheme); } @@ -175,6 +172,14 @@ public void testPipelineHandling() throws IOException { } } + private static boolean isCompressed(Version version, Compression.Scheme scheme) { + if (version.before(Compression.Scheme.LZ4_VERSION) && scheme == Compression.Scheme.LZ4) { + return false; + } else { + return scheme != null; + } + } + public void testDecodeExceptionIsPropagated() throws IOException { BiConsumer messageHandler = (c, m) -> {}; final StatsTracker statsTracker = new StatsTracker(); diff --git a/server/src/test/java/org/elasticsearch/transport/Lz4TransportDecompressorTests.java b/server/src/test/java/org/elasticsearch/transport/Lz4TransportDecompressorTests.java index 6ac6f3eaa0ce4..a1da456a096bc 100644 --- a/server/src/test/java/org/elasticsearch/transport/Lz4TransportDecompressorTests.java +++ b/server/src/test/java/org/elasticsearch/transport/Lz4TransportDecompressorTests.java @@ -23,6 +23,8 @@ import java.io.IOException; import java.io.OutputStream; +import static org.hamcrest.Matchers.lessThan; + public class Lz4TransportDecompressorTests extends ESTestCase { public void testSimpleCompression() throws IOException { @@ -40,48 +42,88 @@ public void testSimpleCompression() throws IOException { ReleasableBytesReference releasableBytesReference = decompressor.pollDecompressedPage(true); assertEquals(randomByte, releasableBytesReference.get(0)); releasableBytesReference.close(); - } } public void testMultiPageCompression() throws IOException { + int intsToWrite = 50000; + int uncompressedLength = intsToWrite * 4; + try (BytesStreamOutput output = new BytesStreamOutput()) { try (StreamOutput lz4BlockStream = new OutputStreamStreamOutput(Compression.Scheme.lz4OutputStream( Streams.flushOnCloseStream(output)))) { - for (int i = 0; i < 10000; ++i) { - lz4BlockStream.writeInt(i); + for (int i = 0; i < intsToWrite; ++i) { + int lowByte = (i & 0xFF); + if (lowByte < 128) { + lz4BlockStream.writeInt(0); + } else if (lowByte < 200) { + lz4BlockStream.writeInt(1); + } else { + lz4BlockStream.writeInt(i); + } } } BytesReference bytes = output.bytes(); + // Since 200 / 255 data is repeated, we should get a compression ratio of at least 50% + assertThat(bytes.length(), lessThan(uncompressedLength / 2)); Lz4TransportDecompressor decompressor = new Lz4TransportDecompressor(PageCacheRecycler.NON_RECYCLING_INSTANCE); int bytesConsumed = decompressor.decompress(bytes); assertEquals(bytes.length(), bytesConsumed); - ReleasableBytesReference reference1 = decompressor.pollDecompressedPage(false); - ReleasableBytesReference reference2 = decompressor.pollDecompressedPage(false); - ReleasableBytesReference reference3 = decompressor.pollDecompressedPage(true); + + int numOfUncompressedPages = uncompressedLength / PageCacheRecycler.BYTE_PAGE_SIZE; + if (bytes.length() % PageCacheRecycler.BYTE_PAGE_SIZE > 0) { + numOfUncompressedPages += 1; + } + + ReleasableBytesReference[] polledReferences = new ReleasableBytesReference[numOfUncompressedPages]; + for (int i = 0; i < numOfUncompressedPages - 1; ++i) { + polledReferences[i] = decompressor.pollDecompressedPage(false); + } + + polledReferences[numOfUncompressedPages - 1] = decompressor.pollDecompressedPage(true); assertNull(decompressor.pollDecompressedPage(true)); - BytesReference composite = CompositeBytesReference.of(reference1, reference2, reference3); - assertEquals(4 * 10000, composite.length()); + + BytesReference composite = CompositeBytesReference.of(polledReferences); + assertEquals(uncompressedLength, composite.length()); StreamInput streamInput = composite.streamInput(); - for (int i = 0; i < 10000; ++i) { - assertEquals(i, streamInput.readInt()); + for (int i = 0; i < intsToWrite; ++i) { + int lowByte = (i & 0xFF); + if (lowByte < 128) { + assertEquals(0, streamInput.readInt()); + } else if (lowByte < 200) { + assertEquals(1, streamInput.readInt()); + } else { + assertEquals(i, streamInput.readInt()); + } } - Releasables.close(reference1, reference2, reference3); + Releasables.close(polledReferences); } } public void testIncrementalMultiPageCompression() throws IOException { + int intsToWrite = 50000; + int uncompressedLength = intsToWrite * 4; + try (BytesStreamOutput output = new BytesStreamOutput()) { try (StreamOutput lz4BlockStream = new OutputStreamStreamOutput( Compression.Scheme.lz4OutputStream(Streams.flushOnCloseStream(output)))) { - for (int i = 0; i < 10000; ++i) { - lz4BlockStream.writeInt(i); + for (int i = 0; i < intsToWrite; ++i) { + int lowByte = (i & 0xFF); + if (lowByte < 128) { + lz4BlockStream.writeInt(0); + } else if (lowByte < 200) { + lz4BlockStream.writeInt(1); + } else { + lz4BlockStream.writeInt(i); + } } } BytesReference bytes = output.bytes(); + // Since 200 / 255 data is repeated, we should get a compression ratio of at least 50% + assertThat(bytes.length(), lessThan(uncompressedLength / 2)); Lz4TransportDecompressor decompressor = new Lz4TransportDecompressor(PageCacheRecycler.NON_RECYCLING_INSTANCE); @@ -96,19 +138,35 @@ public void testIncrementalMultiPageCompression() throws IOException { int bytesConsumed2 = decompressor.decompress(next); BytesReference next2 = CompositeBytesReference.of(next.slice(bytesConsumed2, next.length() - bytesConsumed2), inbound3); int bytesConsumed3 = decompressor.decompress(next2); - assertEquals(bytes.length(), bytesConsumed1 + bytesConsumed2 + bytesConsumed3); - ReleasableBytesReference reference1 = decompressor.pollDecompressedPage(false); - ReleasableBytesReference reference2 = decompressor.pollDecompressedPage(false); - ReleasableBytesReference reference3 = decompressor.pollDecompressedPage(true); - assertNull(decompressor.pollDecompressedPage(false)); - BytesReference composite = CompositeBytesReference.of(reference1, reference2, reference3); - assertEquals(4 * 10000, composite.length()); + + int numOfUncompressedPages = uncompressedLength / PageCacheRecycler.BYTE_PAGE_SIZE; + if (bytes.length() % PageCacheRecycler.BYTE_PAGE_SIZE > 0) { + numOfUncompressedPages += 1; + } + + ReleasableBytesReference[] polledReferences = new ReleasableBytesReference[numOfUncompressedPages]; + for (int i = 0; i < numOfUncompressedPages - 1; ++i) { + polledReferences[i] = decompressor.pollDecompressedPage(false); + } + + polledReferences[numOfUncompressedPages - 1] = decompressor.pollDecompressedPage(true); + assertNull(decompressor.pollDecompressedPage(true)); + + BytesReference composite = CompositeBytesReference.of(polledReferences); + assertEquals(uncompressedLength, composite.length()); StreamInput streamInput = composite.streamInput(); - for (int i = 0; i < 10000; ++i) { - assertEquals(i, streamInput.readInt()); + for (int i = 0; i < intsToWrite; ++i) { + int lowByte = (i & 0xFF); + if (lowByte < 128) { + assertEquals(0, streamInput.readInt()); + } else if (lowByte < 200) { + assertEquals(1, streamInput.readInt()); + } else { + assertEquals(i, streamInput.readInt()); + } } - Releasables.close(reference1, reference2, reference3); + Releasables.close(polledReferences); } } diff --git a/server/src/test/java/org/elasticsearch/transport/OutboundHandlerTests.java b/server/src/test/java/org/elasticsearch/transport/OutboundHandlerTests.java index c5d9e7aaa5a38..622f750e3b10c 100644 --- a/server/src/test/java/org/elasticsearch/transport/OutboundHandlerTests.java +++ b/server/src/test/java/org/elasticsearch/transport/OutboundHandlerTests.java @@ -122,12 +122,9 @@ public void testSendRequest() throws IOException { String action = "handshake"; long requestId = randomLongBetween(0, 300); boolean isHandshake = randomBoolean(); - boolean compress; - if (compressionScheme == Compression.Scheme.LZ4 && version.before(Compression.Scheme.LZ4_VERSION)) { - compress = false; - } else { - compress = randomBoolean(); - } + boolean compress = randomBoolean(); + boolean compressUnsupportedDueToVersion = compressionScheme == Compression.Scheme.LZ4 + && version.before(Compression.Scheme.LZ4_VERSION); String value = "message"; threadContext.putHeader("header", "header_value"); TestRequest request = new TestRequest(value); @@ -174,7 +171,7 @@ public void onRequestSent(DiscoveryNode node, long requestId, String action, Tra } else { assertFalse(header.isHandshake()); } - if (compress) { + if (compress && compressUnsupportedDueToVersion == false) { assertTrue(header.isCompressed()); } else { assertFalse(header.isCompressed()); @@ -190,12 +187,10 @@ public void testSendResponse() throws IOException { String action = "handshake"; long requestId = randomLongBetween(0, 300); boolean isHandshake = randomBoolean(); - boolean compress; - if (compressionScheme == Compression.Scheme.LZ4 && version.before(Compression.Scheme.LZ4_VERSION)) { - compress = false; - } else { - compress = randomBoolean(); - } + boolean compress = randomBoolean(); + boolean compressUnsupportedDueToVersion = compressionScheme == Compression.Scheme.LZ4 + && version.before(Compression.Scheme.LZ4_VERSION); + String value = "message"; threadContext.putHeader("header", "header_value"); TestResponse response = new TestResponse(value); @@ -238,7 +233,7 @@ public void onResponseSent(long requestId, String action, TransportResponse resp } else { assertFalse(header.isHandshake()); } - if (compress) { + if (compress && compressUnsupportedDueToVersion == false) { assertTrue(header.isCompressed()); } else { assertFalse(header.isCompressed()); diff --git a/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java b/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java index bb875b8257397..06a871b9a1ca6 100644 --- a/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java @@ -604,6 +604,8 @@ public void testHelloWorldCompressed() throws Exception { Settings settingsWithCompress = Settings.builder() .put(TransportSettings.TRANSPORT_COMPRESS.getKey(), Compression.Enabled.TRUE) + .put(TransportSettings.TRANSPORT_COMPRESSION_SCHEME.getKey(), + randomFrom(Compression.Scheme.DEFLATE, Compression.Scheme.LZ4)) .build(); ConnectionProfile connectionProfile = ConnectionProfile.buildDefaultConnectionProfile(settingsWithCompress); connectToNode(serviceC, serviceA.getLocalDiscoNode(), connectionProfile); diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardChangesAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardChangesAction.java index 0eace656f304b..c49ed04585461 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardChangesAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardChangesAction.java @@ -41,7 +41,7 @@ import org.elasticsearch.index.translog.Translog; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.RawDataTransportRequest; +import org.elasticsearch.transport.RawIndexingDataTransportRequest; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.ccr.Ccr; @@ -67,7 +67,7 @@ private ShardChangesAction() { super(NAME, ShardChangesAction.Response::new); } - public static class Request extends SingleShardRequest implements RawDataTransportRequest { + public static class Request extends SingleShardRequest implements RawIndexingDataTransportRequest { private long fromSeqNo; private int maxOperationCount;