From a221fa8920750a912d49749f830847c5f70b5011 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Thu, 11 Aug 2016 12:43:54 -0400 Subject: [PATCH 1/3] Default max local storage nodes to one This commit defaults the max local storage nodes to one. The motivation for this change is that a default value greather than one is dangerous as users sometimes end up unknowingly starting a second node and start thinking that they have encountered data loss. --- .../gradle/test/ClusterFormationTasks.groovy | 1 + .../elasticsearch/env/NodeEnvironment.java | 14 ++++++++++--- .../org/elasticsearch/tribe/TribeService.java | 3 +++ .../cluster/ClusterInfoServiceIT.java | 2 ++ .../DiscoveryWithServiceDisruptionsIT.java | 2 ++ .../env/NodeEnvironmentTests.java | 16 ++++++--------- .../memory/breaker/CircuitBreakerNoopIT.java | 2 ++ .../java/org/elasticsearch/tribe/TribeIT.java | 5 ++++- .../migration/migrate_5_0/settings.asciidoc | 11 ++++++++++ .../elasticsearch/test/ESIntegTestCase.java | 2 ++ .../test/InternalTestCluster.java | 5 +++-- .../ClusterDiscoveryConfiguration.java | 3 ++- .../test/test/InternalTestClusterTests.java | 20 ++++++++++++++----- 13 files changed, 64 insertions(+), 22 deletions(-) diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy index 442882dfe9910..b1f07265019c6 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy @@ -261,6 +261,7 @@ class ClusterFormationTasks { 'node.attr.testattr' : 'test', 'repositories.url.allowed_urls': 'http://snapshot.test*' ] + esConfig['node.max_local_storage_nodes'] = node.config.numNodes esConfig['http.port'] = node.config.httpPort esConfig['transport.tcp.port'] = node.config.transportPort esConfig.putAll(node.config.settings) diff --git a/core/src/main/java/org/elasticsearch/env/NodeEnvironment.java b/core/src/main/java/org/elasticsearch/env/NodeEnvironment.java index 367131d93cd3c..59ef122760b1d 100644 --- a/core/src/main/java/org/elasticsearch/env/NodeEnvironment.java +++ b/core/src/main/java/org/elasticsearch/env/NodeEnvironment.java @@ -68,6 +68,7 @@ import java.util.HashMap; import java.util.HashSet; import java.util.List; +import java.util.Locale; import java.util.Map; import java.util.Random; import java.util.Set; @@ -151,7 +152,7 @@ public String toString() { /** * Maximum number of data nodes that should run in an environment. */ - public static final Setting MAX_LOCAL_STORAGE_NODES_SETTING = Setting.intSetting("node.max_local_storage_nodes", 50, 1, + public static final Setting MAX_LOCAL_STORAGE_NODES_SETTING = Setting.intSetting("node.max_local_storage_nodes", 1, 1, Property.NodeScope); /** @@ -244,8 +245,15 @@ public NodeEnvironment(Settings settings, Environment environment) throws IOExce } if (locks[0] == null) { - throw new IllegalStateException("Failed to obtain node lock, is the following location writable?: " - + Arrays.toString(environment.dataWithClusterFiles()), lastException); + final String message = String.format( + Locale.ROOT, + "failed to obtain node locks, tried [%s] with lock id%s;" + + " maybe these locations are not writable or multiple nodes were started without increasing [%s] (was [%d])?", + Arrays.toString(environment.dataWithClusterFiles()), + maxLocalStorageNodes == 1 ? " [0]" : "s [0--" + (maxLocalStorageNodes - 1) + "]", + MAX_LOCAL_STORAGE_NODES_SETTING.getKey(), + maxLocalStorageNodes); + throw new IllegalStateException(message, lastException); } this.nodeMetaData = loadOrCreateNodeMetaData(settings, startupTraceLogger, nodePaths); this.logger = Loggers.getLogger(getClass(), Node.addNodeNameIfNeeded(settings, this.nodeMetaData.nodeId())); diff --git a/core/src/main/java/org/elasticsearch/tribe/TribeService.java b/core/src/main/java/org/elasticsearch/tribe/TribeService.java index 40c805e0b002e..3ca801552703a 100644 --- a/core/src/main/java/org/elasticsearch/tribe/TribeService.java +++ b/core/src/main/java/org/elasticsearch/tribe/TribeService.java @@ -117,6 +117,9 @@ public static Settings processSettings(Settings settings) { sb.put(Node.NODE_MASTER_SETTING.getKey(), false); sb.put(Node.NODE_DATA_SETTING.getKey(), false); sb.put(Node.NODE_INGEST_SETTING.getKey(), false); + if (!NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING.exists(settings)) { + sb.put(NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING.getKey(), nodesSettings.size()); + } sb.put(DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey(), "local"); // a tribe node should not use zen discovery // nothing is going to be discovered, since no master will be elected sb.put(DiscoverySettings.INITIAL_STATE_TIMEOUT_SETTING.getKey(), 0); diff --git a/core/src/test/java/org/elasticsearch/cluster/ClusterInfoServiceIT.java b/core/src/test/java/org/elasticsearch/cluster/ClusterInfoServiceIT.java index e1f5a1a719b49..3cafff08a07a5 100644 --- a/core/src/test/java/org/elasticsearch/cluster/ClusterInfoServiceIT.java +++ b/core/src/test/java/org/elasticsearch/cluster/ClusterInfoServiceIT.java @@ -38,6 +38,7 @@ import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.store.Store; @@ -118,6 +119,7 @@ public void blockActions(String... actions) { protected Settings nodeSettings(int nodeOrdinal) { return Settings.builder() // manual collection or upon cluster forming. + .put(NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING.getKey(), 2) .put(InternalClusterInfoService.INTERNAL_CLUSTER_INFO_TIMEOUT_SETTING.getKey(), "1s") .build(); } diff --git a/core/src/test/java/org/elasticsearch/discovery/DiscoveryWithServiceDisruptionsIT.java b/core/src/test/java/org/elasticsearch/discovery/DiscoveryWithServiceDisruptionsIT.java index ab42abd4aacdb..e5de81f7380e3 100644 --- a/core/src/test/java/org/elasticsearch/discovery/DiscoveryWithServiceDisruptionsIT.java +++ b/core/src/test/java/org/elasticsearch/discovery/DiscoveryWithServiceDisruptionsIT.java @@ -55,6 +55,7 @@ import org.elasticsearch.discovery.zen.ping.ZenPingService; import org.elasticsearch.discovery.zen.ping.unicast.UnicastZenPing; import org.elasticsearch.discovery.zen.publish.PublishClusterStateAction; +import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.indices.store.IndicesStoreIntegrationIT; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; @@ -207,6 +208,7 @@ private void configureUnicastCluster( // TODO: Rarely use default settings form some of these Settings nodeSettings = Settings.builder() .put(settings) + .put(NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING.getKey(), 4) .put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), minimumMasterNode) .build(); diff --git a/core/src/test/java/org/elasticsearch/env/NodeEnvironmentTests.java b/core/src/test/java/org/elasticsearch/env/NodeEnvironmentTests.java index 96c52d9dc8e66..ee403bfe91056 100644 --- a/core/src/test/java/org/elasticsearch/env/NodeEnvironmentTests.java +++ b/core/src/test/java/org/elasticsearch/env/NodeEnvironmentTests.java @@ -74,18 +74,14 @@ public void testNodeLockSillySettings() { } public void testNodeLockSingleEnvironment() throws IOException { - final Settings settings = buildEnvSettings(Settings.builder() - .put(NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING.getKey(), 1).build()); + final Settings settings = buildEnvSettings(Settings.builder().put("node.max_local_storage_nodes", 1).build()); NodeEnvironment env = newNodeEnvironment(settings); List dataPaths = Environment.PATH_DATA_SETTING.get(settings); - try { - // Reuse the same location and attempt to lock again - new NodeEnvironment(settings, new Environment(settings)); - fail("env has already locked all the data directories it is allowed"); - } catch (IllegalStateException ex) { - assertThat(ex.getMessage(), containsString("Failed to obtain node lock")); - } + // Reuse the same location and attempt to lock again + IllegalStateException ex = + expectThrows(IllegalStateException.class, () -> new NodeEnvironment(settings, new Environment(settings))); + assertThat(ex.getMessage(), containsString("failed to obtain node lock")); // Close the environment that holds the lock and make sure we can get the lock after release env.close(); @@ -121,7 +117,7 @@ public void testSegmentInfosTracing() { } public void testNodeLockMultipleEnvironment() throws IOException { - final Settings settings = buildEnvSettings(Settings.EMPTY); + final Settings settings = buildEnvSettings(Settings.builder().put("node.max_local_storage_nodes", 2).build()); final NodeEnvironment first = newNodeEnvironment(settings); List dataPaths = Environment.PATH_DATA_SETTING.get(settings); NodeEnvironment second = new NodeEnvironment(settings, new Environment(settings)); diff --git a/core/src/test/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerNoopIT.java b/core/src/test/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerNoopIT.java index 02eab6dc0aad2..627eb74007bae 100644 --- a/core/src/test/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerNoopIT.java +++ b/core/src/test/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerNoopIT.java @@ -22,6 +22,7 @@ import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.client.Client; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService; import org.elasticsearch.search.sort.SortOrder; import org.elasticsearch.test.ESIntegTestCase; @@ -40,6 +41,7 @@ public class CircuitBreakerNoopIT extends ESIntegTestCase { @Override protected Settings nodeSettings(int nodeOrdinal) { return Settings.builder() + .put(NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING.getKey(), 2) .put(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_TYPE_SETTING.getKey(), "noop") // This is set low, because if the "noop" is not a noop, it will break .put(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING.getKey(), "10b") diff --git a/core/src/test/java/org/elasticsearch/tribe/TribeIT.java b/core/src/test/java/org/elasticsearch/tribe/TribeIT.java index e1df7201fbe82..8d90829641b14 100644 --- a/core/src/test/java/org/elasticsearch/tribe/TribeIT.java +++ b/core/src/test/java/org/elasticsearch/tribe/TribeIT.java @@ -39,6 +39,7 @@ import org.elasticsearch.discovery.DiscoveryModule; import org.elasticsearch.discovery.MasterNotDiscoveredException; import org.elasticsearch.discovery.zen.ping.unicast.UnicastZenPing; +import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.node.Node; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.InternalTestCluster; @@ -127,7 +128,10 @@ private void setupTribeNode(Settings settings) { tribe1Defaults.putArray("tribe.t1." + UnicastZenPing.DISCOVERY_ZEN_PING_UNICAST_HOSTS_SETTING.getKey(), getUnicastHosts(internalCluster().client())); tribe1Defaults.putArray("tribe.t2." + UnicastZenPing.DISCOVERY_ZEN_PING_UNICAST_HOSTS_SETTING.getKey(), getUnicastHosts(cluster2.client())); + int maxLocalStorageNodes = internalCluster().size() + 2; Settings merged = Settings.builder() + .put(internalCluster().getDefaultSettings()) + .put(NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING.getKey(), maxLocalStorageNodes) .put("tribe.t1.cluster.name", internalCluster().getClusterName()) .put("tribe.t2.cluster.name", cluster2.getClusterName()) .put("tribe.t1.transport.type", "local") @@ -142,7 +146,6 @@ private void setupTribeNode(Settings settings) { .put(tribe1Defaults.build()) .put(tribe2Defaults.build()) - .put(internalCluster().getDefaultSettings()) .put("node.name", "tribe_node") // make sure we can identify threads from this node .build(); diff --git a/docs/reference/migration/migrate_5_0/settings.asciidoc b/docs/reference/migration/migrate_5_0/settings.asciidoc index 76ee65a6abe20..f6875f60c60c3 100644 --- a/docs/reference/migration/migrate_5_0/settings.asciidoc +++ b/docs/reference/migration/migrate_5_0/settings.asciidoc @@ -310,3 +310,14 @@ The unit 'w' representing weeks is no longer supported. Fractional time values (e.g., 0.5s) are no longer supported. For example, this means when setting timeouts "0.5s" will be rejected and should instead be input as "500ms". + +==== Node max local storage nodes + +Previous versions of Elasticsearch defaulted to allowing multiple nodes to share the same data +directory (up to 50). This can be confusing where users accidentally startup multiple nodes and end +up thinking that they've lost data because the second node will start with an empty data directory. +While the default of allowing multiple nodes is friendly to playing with forming a small cluster on +a laptop, and end-users do sometimes run multiple nodes on the same host, this tends to be the +exception. Keeping with Elasticsearch's continual movement towards safer out-of-the-box defaults, +and optimizing for the norm instead of the exception, the default for +`node.max_local_storage_nodes` is now one. diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java index 63aa44842553c..c11024d26006d 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java @@ -31,6 +31,7 @@ import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.discovery.DiscoveryModule; import org.elasticsearch.client.RestClientBuilder; +import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.script.ScriptService; import org.elasticsearch.transport.MockTcpTransportPlugin; import org.elasticsearch.action.ActionListener; @@ -1632,6 +1633,7 @@ protected Settings nodeSettings(int nodeOrdinal) { Settings.Builder builder = Settings.builder() // Default the watermarks to absurdly low to prevent the tests // from failing on nodes without enough disk space + .put(NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING.getKey(), 8) .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), "1b") .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), "1b") .put(ScriptService.SCRIPT_MAX_COMPILATIONS_PER_MINUTE.getKey(), 1000) diff --git a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java index c121164fb016f..0c9e55766a3b4 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java +++ b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java @@ -175,8 +175,8 @@ public final class InternalTestCluster extends TestCluster { public final int HTTP_BASE_PORT = GLOBAL_HTTP_BASE_PORT + CLUSTER_BASE_PORT_OFFSET; - static final int DEFAULT_LOW_NUM_MASTER_NODES = 1; - static final int DEFAULT_HIGH_NUM_MASTER_NODES = 3; + public static final int DEFAULT_LOW_NUM_MASTER_NODES = 1; + public static final int DEFAULT_HIGH_NUM_MASTER_NODES = 3; static final int DEFAULT_MIN_NUM_DATA_NODES = 1; static final int DEFAULT_MAX_NUM_DATA_NODES = TEST_NIGHTLY ? 6 : 3; @@ -300,6 +300,7 @@ public InternalTestCluster(long clusterSeed, Path baseDir, builder.put(Environment.PATH_DATA_SETTING.getKey(), dataPath.toString()); } } + builder.put(NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING.getKey(), Math.max(1, maxNumDataNodes + numClientNodes)); builder.put(Environment.PATH_SHARED_DATA_SETTING.getKey(), baseDir.resolve("custom")); builder.put(Environment.PATH_HOME_SETTING.getKey(), baseDir); builder.put(Environment.PATH_REPO_SETTING.getKey(), baseDir.resolve("repos")); diff --git a/test/framework/src/main/java/org/elasticsearch/test/discovery/ClusterDiscoveryConfiguration.java b/test/framework/src/main/java/org/elasticsearch/test/discovery/ClusterDiscoveryConfiguration.java index 67d7b99171d54..b393498ec89f6 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/discovery/ClusterDiscoveryConfiguration.java +++ b/test/framework/src/main/java/org/elasticsearch/test/discovery/ClusterDiscoveryConfiguration.java @@ -26,6 +26,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.discovery.DiscoveryModule; +import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.test.InternalTestCluster; import org.elasticsearch.test.NodeConfigurationSource; import org.elasticsearch.transport.TransportSettings; @@ -108,7 +109,7 @@ private static int calcBasePort() { @Override public Settings nodeSettings(int nodeOrdinal) { - Settings.Builder builder = Settings.builder(); + Settings.Builder builder = Settings.builder().put(NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING.getKey(), numOfNodes); String[] unicastHosts = new String[unicastHostOrdinals.length]; if (nodeOrdinal >= unicastHostPorts.length) { diff --git a/test/framework/src/test/java/org/elasticsearch/test/test/InternalTestClusterTests.java b/test/framework/src/test/java/org/elasticsearch/test/test/InternalTestClusterTests.java index d4af031aa84b3..f0b7454fe9d1f 100644 --- a/test/framework/src/test/java/org/elasticsearch/test/test/InternalTestClusterTests.java +++ b/test/framework/src/test/java/org/elasticsearch/test/test/InternalTestClusterTests.java @@ -128,12 +128,16 @@ public void testBeforeTest() throws Exception { boolean masterNodes = randomBoolean(); int minNumDataNodes = randomIntBetween(0, 3); int maxNumDataNodes = randomIntBetween(minNumDataNodes, 4); + int numClientNodes = randomIntBetween(0, 2); final String clusterName1 = "shared1"; final String clusterName2 = "shared2"; NodeConfigurationSource nodeConfigurationSource = new NodeConfigurationSource() { @Override public Settings nodeSettings(int nodeOrdinal) { return Settings.builder() + .put( + NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING.getKey(), + 2 * ((masterNodes ? InternalTestCluster.DEFAULT_HIGH_NUM_MASTER_NODES : 0) + maxNumDataNodes + numClientNodes)) .put(NetworkModule.HTTP_ENABLED.getKey(), false) .put(DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey(), "local") .put(NetworkModule.TRANSPORT_TYPE_KEY, "local").build(); @@ -145,7 +149,7 @@ public Settings transportClientSettings() { .put(NetworkModule.TRANSPORT_TYPE_KEY, "local").build(); } }; - int numClientNodes = randomIntBetween(0, 2); + boolean enableHttpPipelining = randomBoolean(); String nodePrefix = "foobar"; @@ -187,13 +191,17 @@ public void testDataFolderAssignmentAndCleaning() throws IOException, Interrupte long clusterSeed = randomLong(); boolean masterNodes = randomBoolean(); // we need one stable node - int minNumDataNodes = 2; - int maxNumDataNodes = 2; + final int minNumDataNodes = 2; + final int maxNumDataNodes = 2; + final int numClientNodes = randomIntBetween(0, 2); final String clusterName1 = "shared1"; NodeConfigurationSource nodeConfigurationSource = new NodeConfigurationSource() { @Override public Settings nodeSettings(int nodeOrdinal) { return Settings.builder().put(NetworkModule.HTTP_ENABLED.getKey(), false) + .put( + NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING.getKey(), + 2 + (masterNodes ? InternalTestCluster.DEFAULT_HIGH_NUM_MASTER_NODES : 0) + maxNumDataNodes + numClientNodes) .put(NetworkModule.TRANSPORT_TYPE_KEY, "local") .put(DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey(), "local") .build(); @@ -203,7 +211,7 @@ public Settings transportClientSettings() { return Settings.builder() .put(NetworkModule.TRANSPORT_TYPE_KEY, "local").build(); } - }; int numClientNodes = randomIntBetween(0, 2); + }; boolean enableHttpPipelining = randomBoolean(); String nodePrefix = "test"; Path baseDir = createTempDir(); @@ -269,11 +277,13 @@ private Path[] getNodePaths(InternalTestCluster cluster, String name) { public void testDifferentRolesMaintainPathOnRestart() throws Exception { final Path baseDir = createTempDir(); + final int numNodes = 5; InternalTestCluster cluster = new InternalTestCluster(randomLong(), baseDir, true, 0, 0, "test", new NodeConfigurationSource() { @Override public Settings nodeSettings(int nodeOrdinal) { return Settings.builder() + .put(NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING.getKey(), numNodes) .put(NetworkModule.HTTP_ENABLED.getKey(), false) .put(NetworkModule.TRANSPORT_TYPE_KEY, "local") .put(DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey(), "local") @@ -289,7 +299,7 @@ public Settings transportClientSettings() { cluster.beforeTest(random(), 0.0); try { Map> pathsPerRole = new HashMap<>(); - for (int i = 0; i < 5; i++) { + for (int i = 0; i < numNodes; i++) { final DiscoveryNode.Role role = randomFrom(MASTER, DiscoveryNode.Role.DATA, DiscoveryNode.Role.INGEST); final String node; switch (role) { From fe563c5a940c5fc77fe59993b3f54253ae89fa2e Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Thu, 11 Aug 2016 18:44:47 -0400 Subject: [PATCH 2/3] Adjust test node max local storage nodes settings This commit adjusts the node max local storage node settings value for some tests - the provided value for ESIntegTestCase is derived from the value of the annotations - the default value for the InternalTestCluster is more carefully calculated - the value for the tribe unit tests is adjusted to reflect that there are two clusters in play --- .../elasticsearch/tribe/TribeUnitTests.java | 4 ++- .../elasticsearch/test/ESIntegTestCase.java | 33 +++++++++++-------- .../test/InternalTestCluster.java | 4 ++- 3 files changed, 25 insertions(+), 16 deletions(-) diff --git a/qa/evil-tests/src/test/java/org/elasticsearch/tribe/TribeUnitTests.java b/qa/evil-tests/src/test/java/org/elasticsearch/tribe/TribeUnitTests.java index 95550f6f654e2..247caa42210c2 100644 --- a/qa/evil-tests/src/test/java/org/elasticsearch/tribe/TribeUnitTests.java +++ b/qa/evil-tests/src/test/java/org/elasticsearch/tribe/TribeUnitTests.java @@ -60,7 +60,9 @@ public static void createTribes() { .put(NetworkModule.HTTP_ENABLED.getKey(), false) .put("transport.type", "local") .put(DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey(), "local") - .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()).build(); + .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()) + .put(NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING.getKey(), 2) + .build(); tribe1 = new TribeClientNode( Settings.builder() diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java index c11024d26006d..50c551dbd7428 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java @@ -1631,20 +1631,25 @@ private boolean randomDynamicTemplates() { */ protected Settings nodeSettings(int nodeOrdinal) { Settings.Builder builder = Settings.builder() - // Default the watermarks to absurdly low to prevent the tests - // from failing on nodes without enough disk space - .put(NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING.getKey(), 8) - .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), "1b") - .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), "1b") - .put(ScriptService.SCRIPT_MAX_COMPILATIONS_PER_MINUTE.getKey(), 1000) - .put("script.stored", "true") - .put("script.inline", "true") - // by default we never cache below 10k docs in a segment, - // bypass this limit so that caching gets some testing in - // integration tests that usually create few documents - .put(IndicesQueryCache.INDICES_QUERIES_CACHE_ALL_SEGMENTS_SETTING.getKey(), nodeOrdinal % 2 == 0) - // wait short time for other active shards before actually deleting, default 30s not needed in tests - .put(IndicesStore.INDICES_STORE_DELETE_SHARD_TIMEOUT.getKey(), new TimeValue(1, TimeUnit.SECONDS)); + .put( + NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING.getKey(), + (getSupportsDedicatedMasters() ? InternalTestCluster.DEFAULT_HIGH_NUM_MASTER_NODES : 0) + + getMaxNumDataNodes() + + ((getNumClientNodes() == InternalTestCluster.DEFAULT_NUM_CLIENT_NODES) ? + InternalTestCluster.DEFAULT_MAX_NUM_CLIENT_NODES : getNumClientNodes())) + // Default the watermarks to absurdly low to prevent the tests + // from failing on nodes without enough disk space + .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), "1b") + .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), "1b") + .put(ScriptService.SCRIPT_MAX_COMPILATIONS_PER_MINUTE.getKey(), 1000) + .put("script.stored", "true") + .put("script.inline", "true") + // by default we never cache below 10k docs in a segment, + // bypass this limit so that caching gets some testing in + // integration tests that usually create few documents + .put(IndicesQueryCache.INDICES_QUERIES_CACHE_ALL_SEGMENTS_SETTING.getKey(), nodeOrdinal % 2 == 0) + // wait short time for other active shards before actually deleting, default 30s not needed in tests + .put(IndicesStore.INDICES_STORE_DELETE_SHARD_TIMEOUT.getKey(), new TimeValue(1, TimeUnit.SECONDS)); return builder.build(); } diff --git a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java index 0c9e55766a3b4..6c3920191c599 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java +++ b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java @@ -300,7 +300,9 @@ public InternalTestCluster(long clusterSeed, Path baseDir, builder.put(Environment.PATH_DATA_SETTING.getKey(), dataPath.toString()); } } - builder.put(NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING.getKey(), Math.max(1, maxNumDataNodes + numClientNodes)); + builder.put( + NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING.getKey(), + Math.max(1, numSharedDedicatedMasterNodes + maxNumDataNodes + numSharedCoordOnlyNodes)); builder.put(Environment.PATH_SHARED_DATA_SETTING.getKey(), baseDir.resolve("custom")); builder.put(Environment.PATH_HOME_SETTING.getKey(), baseDir); builder.put(Environment.PATH_REPO_SETTING.getKey(), baseDir.resolve("repos")); From fdee065f854406d07d6fa01c56100b88fb56515f Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Fri, 12 Aug 2016 06:33:11 -0400 Subject: [PATCH 3/3] Simplify tests handling of max local storage nodes This commit simplifies the handling of max local storage nodes in integration tests by just setting the default max local storage nodes to be the maximum possible integer. --- .../java/org/elasticsearch/tribe/TribeIT.java | 2 -- .../elasticsearch/test/ESIntegTestCase.java | 33 ++++++++----------- .../test/InternalTestCluster.java | 4 +-- 3 files changed, 15 insertions(+), 24 deletions(-) diff --git a/core/src/test/java/org/elasticsearch/tribe/TribeIT.java b/core/src/test/java/org/elasticsearch/tribe/TribeIT.java index 8d90829641b14..01e6a490dad77 100644 --- a/core/src/test/java/org/elasticsearch/tribe/TribeIT.java +++ b/core/src/test/java/org/elasticsearch/tribe/TribeIT.java @@ -128,10 +128,8 @@ private void setupTribeNode(Settings settings) { tribe1Defaults.putArray("tribe.t1." + UnicastZenPing.DISCOVERY_ZEN_PING_UNICAST_HOSTS_SETTING.getKey(), getUnicastHosts(internalCluster().client())); tribe1Defaults.putArray("tribe.t2." + UnicastZenPing.DISCOVERY_ZEN_PING_UNICAST_HOSTS_SETTING.getKey(), getUnicastHosts(cluster2.client())); - int maxLocalStorageNodes = internalCluster().size() + 2; Settings merged = Settings.builder() .put(internalCluster().getDefaultSettings()) - .put(NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING.getKey(), maxLocalStorageNodes) .put("tribe.t1.cluster.name", internalCluster().getClusterName()) .put("tribe.t2.cluster.name", cluster2.getClusterName()) .put("tribe.t1.transport.type", "local") diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java index 50c551dbd7428..dffa285101fa1 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java @@ -1631,25 +1631,20 @@ private boolean randomDynamicTemplates() { */ protected Settings nodeSettings(int nodeOrdinal) { Settings.Builder builder = Settings.builder() - .put( - NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING.getKey(), - (getSupportsDedicatedMasters() ? InternalTestCluster.DEFAULT_HIGH_NUM_MASTER_NODES : 0) + - getMaxNumDataNodes() + - ((getNumClientNodes() == InternalTestCluster.DEFAULT_NUM_CLIENT_NODES) ? - InternalTestCluster.DEFAULT_MAX_NUM_CLIENT_NODES : getNumClientNodes())) - // Default the watermarks to absurdly low to prevent the tests - // from failing on nodes without enough disk space - .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), "1b") - .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), "1b") - .put(ScriptService.SCRIPT_MAX_COMPILATIONS_PER_MINUTE.getKey(), 1000) - .put("script.stored", "true") - .put("script.inline", "true") - // by default we never cache below 10k docs in a segment, - // bypass this limit so that caching gets some testing in - // integration tests that usually create few documents - .put(IndicesQueryCache.INDICES_QUERIES_CACHE_ALL_SEGMENTS_SETTING.getKey(), nodeOrdinal % 2 == 0) - // wait short time for other active shards before actually deleting, default 30s not needed in tests - .put(IndicesStore.INDICES_STORE_DELETE_SHARD_TIMEOUT.getKey(), new TimeValue(1, TimeUnit.SECONDS)); + .put(NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING.getKey(), Integer.MAX_VALUE) + // Default the watermarks to absurdly low to prevent the tests + // from failing on nodes without enough disk space + .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), "1b") + .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), "1b") + .put(ScriptService.SCRIPT_MAX_COMPILATIONS_PER_MINUTE.getKey(), 1000) + .put("script.stored", "true") + .put("script.inline", "true") + // by default we never cache below 10k docs in a segment, + // bypass this limit so that caching gets some testing in + // integration tests that usually create few documents + .put(IndicesQueryCache.INDICES_QUERIES_CACHE_ALL_SEGMENTS_SETTING.getKey(), nodeOrdinal % 2 == 0) + // wait short time for other active shards before actually deleting, default 30s not needed in tests + .put(IndicesStore.INDICES_STORE_DELETE_SHARD_TIMEOUT.getKey(), new TimeValue(1, TimeUnit.SECONDS)); return builder.build(); } diff --git a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java index 6c3920191c599..cc78afb987f04 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java +++ b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java @@ -300,9 +300,7 @@ public InternalTestCluster(long clusterSeed, Path baseDir, builder.put(Environment.PATH_DATA_SETTING.getKey(), dataPath.toString()); } } - builder.put( - NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING.getKey(), - Math.max(1, numSharedDedicatedMasterNodes + maxNumDataNodes + numSharedCoordOnlyNodes)); + builder.put(NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING.getKey(), Integer.MAX_VALUE); builder.put(Environment.PATH_SHARED_DATA_SETTING.getKey(), baseDir.resolve("custom")); builder.put(Environment.PATH_HOME_SETTING.getKey(), baseDir); builder.put(Environment.PATH_REPO_SETTING.getKey(), baseDir.resolve("repos"));